diff options
Diffstat (limited to 'drivers')
740 files changed, 41479 insertions, 12794 deletions
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 145ec0b6f20b..1af2125e17d5 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -2016,9 +2016,13 @@ bool acpi_ec_dispatch_gpe(void) * to allow the caller to process events properly after that. */ ret = acpi_dispatch_gpe(NULL, first_ec->gpe); - if (ret == ACPI_INTERRUPT_HANDLED) + if (ret == ACPI_INTERRUPT_HANDLED) { pm_pr_dbg("EC GPE dispatched\n"); + /* Flush the event and query workqueues. */ + acpi_ec_flush_work(); + } + return false; } #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 3850704570c0..fd9d4e8318e9 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -980,13 +980,6 @@ static int acpi_s2idle_prepare_late(void) return 0; } -static void acpi_s2idle_sync(void) -{ - /* The EC driver uses special workqueues that need to be flushed. */ - acpi_ec_flush_work(); - acpi_os_wait_events_complete(); /* synchronize Notify handling */ -} - static bool acpi_s2idle_wake(void) { if (!acpi_sci_irq_valid()) @@ -1018,7 +1011,7 @@ static bool acpi_s2idle_wake(void) return true; /* - * Cancel the wakeup and process all pending events in case + * Cancel the SCI wakeup and process all pending events in case * there are any wakeup ones in there. * * Note that if any non-EC GPEs are active at this point, the @@ -1026,8 +1019,7 @@ static bool acpi_s2idle_wake(void) * should be missed by canceling the wakeup here. */ pm_system_cancel_wakeup(); - - acpi_s2idle_sync(); + acpi_os_wait_events_complete(); /* * The SCI is in the "suspended" state now and it cannot produce @@ -1060,7 +1052,8 @@ static void acpi_s2idle_restore(void) * of GPEs. */ acpi_os_wait_events_complete(); /* synchronize GPE processing */ - acpi_s2idle_sync(); + acpi_ec_flush_work(); /* flush the EC driver's workqueues */ + acpi_os_wait_events_complete(); /* synchronize Notify handling */ s2idle_wakeup = false; diff --git a/drivers/base/core.c b/drivers/base/core.c index 073045cb214e..0cad34f1eede 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -365,6 +365,7 @@ struct device_link *device_link_add(struct device *consumer, link->flags |= DL_FLAG_STATELESS; goto reorder; } else { + link->flags |= DL_FLAG_STATELESS; goto out; } } @@ -433,12 +434,16 @@ struct device_link *device_link_add(struct device *consumer, flags & DL_FLAG_PM_RUNTIME) pm_runtime_resume(supplier); + list_add_tail_rcu(&link->s_node, &supplier->links.consumers); + list_add_tail_rcu(&link->c_node, &consumer->links.suppliers); + if (flags & DL_FLAG_SYNC_STATE_ONLY) { dev_dbg(consumer, "Linked as a sync state only consumer to %s\n", dev_name(supplier)); goto out; } + reorder: /* * Move the consumer and all of the devices depending on it to the end @@ -449,12 +454,9 @@ reorder: */ device_reorder_to_tail(consumer, NULL); - list_add_tail_rcu(&link->s_node, &supplier->links.consumers); - list_add_tail_rcu(&link->c_node, &consumer->links.suppliers); - dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier)); - out: +out: device_pm_unlock(); device_links_write_unlock(); @@ -829,6 +831,13 @@ static void __device_links_supplier_defer_sync(struct device *sup) list_add_tail(&sup->links.defer_sync, &deferred_sync); } +static void device_link_drop_managed(struct device_link *link) +{ + link->flags &= ~DL_FLAG_MANAGED; + WRITE_ONCE(link->status, DL_STATE_NONE); + kref_put(&link->kref, __device_link_del); +} + /** * device_links_driver_bound - Update device links after probing its driver. * @dev: Device to update the links for. @@ -842,7 +851,7 @@ static void __device_links_supplier_defer_sync(struct device *sup) */ void device_links_driver_bound(struct device *dev) { - struct device_link *link; + struct device_link *link, *ln; LIST_HEAD(sync_list); /* @@ -882,18 +891,35 @@ void device_links_driver_bound(struct device *dev) else __device_links_queue_sync_state(dev, &sync_list); - list_for_each_entry(link, &dev->links.suppliers, c_node) { + list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) { + struct device *supplier; + if (!(link->flags & DL_FLAG_MANAGED)) continue; - WARN_ON(link->status != DL_STATE_CONSUMER_PROBE); - WRITE_ONCE(link->status, DL_STATE_ACTIVE); + supplier = link->supplier; + if (link->flags & DL_FLAG_SYNC_STATE_ONLY) { + /* + * When DL_FLAG_SYNC_STATE_ONLY is set, it means no + * other DL_MANAGED_LINK_FLAGS have been set. So, it's + * save to drop the managed link completely. + */ + device_link_drop_managed(link); + } else { + WARN_ON(link->status != DL_STATE_CONSUMER_PROBE); + WRITE_ONCE(link->status, DL_STATE_ACTIVE); + } + /* + * This needs to be done even for the deleted + * DL_FLAG_SYNC_STATE_ONLY device link in case it was the last + * device link that was preventing the supplier from getting a + * sync_state() call. + */ if (defer_sync_state_count) - __device_links_supplier_defer_sync(link->supplier); + __device_links_supplier_defer_sync(supplier); else - __device_links_queue_sync_state(link->supplier, - &sync_list); + __device_links_queue_sync_state(supplier, &sync_list); } dev->links.status = DL_DEV_DRIVER_BOUND; @@ -903,13 +929,6 @@ void device_links_driver_bound(struct device *dev) device_links_flush_sync_list(&sync_list, dev); } -static void device_link_drop_managed(struct device_link *link) -{ - link->flags &= ~DL_FLAG_MANAGED; - WRITE_ONCE(link->status, DL_STATE_NONE); - kref_put(&link->kref, __device_link_del); -} - /** * __device_links_no_driver - Update links of a device without a driver. * @dev: Device without a drvier. diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index aae99a2d7bd4..14345a87c7cc 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1570,34 +1570,6 @@ extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled); extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed); extern int drbd_connected(struct drbd_peer_device *); -static inline void drbd_tcp_cork(struct socket *sock) -{ - int val = 1; - (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK, - (char*)&val, sizeof(val)); -} - -static inline void drbd_tcp_uncork(struct socket *sock) -{ - int val = 0; - (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK, - (char*)&val, sizeof(val)); -} - -static inline void drbd_tcp_nodelay(struct socket *sock) -{ - int val = 1; - (void) kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, - (char*)&val, sizeof(val)); -} - -static inline void drbd_tcp_quickack(struct socket *sock) -{ - int val = 2; - (void) kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK, - (char*)&val, sizeof(val)); -} - /* sets the number of 512 byte sectors of our virtual device */ void drbd_set_my_capacity(struct drbd_device *device, sector_t size); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index c094c3c2c5d4..45fbd526c453 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -660,7 +660,7 @@ static int __send_command(struct drbd_connection *connection, int vnr, /* DRBD protocol "pings" are latency critical. * This is supposed to trigger tcp_push_pending_frames() */ if (!err && (cmd == P_PING || cmd == P_PING_ACK)) - drbd_tcp_nodelay(sock->socket); + tcp_sock_set_nodelay(sock->socket->sk); return err; } diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index c15e7083b13a..3a3f2b6a821f 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1051,8 +1051,8 @@ randomize: /* we don't want delays. * we use TCP_CORK where appropriate, though */ - drbd_tcp_nodelay(sock.socket); - drbd_tcp_nodelay(msock.socket); + tcp_sock_set_nodelay(sock.socket->sk); + tcp_sock_set_nodelay(msock.socket->sk); connection->data.socket = sock.socket; connection->meta.socket = msock.socket; @@ -1223,7 +1223,7 @@ static int drbd_recv_header_maybe_unplug(struct drbd_connection *connection, str * quickly as possible, and let remote TCP know what we have * received so far. */ if (err == -EAGAIN) { - drbd_tcp_quickack(connection->data.socket); + tcp_sock_set_quickack(connection->data.socket->sk, 2); drbd_unplug_all_devices(connection); } if (err > 0) { @@ -4959,8 +4959,7 @@ static int receive_UnplugRemote(struct drbd_connection *connection, struct packe { /* Make sure we've acked all the TCP data associated * with the data requests being unplugged */ - drbd_tcp_quickack(connection->data.socket); - + tcp_sock_set_quickack(connection->data.socket->sk, 2); return 0; } @@ -6162,7 +6161,7 @@ void drbd_send_acks_wf(struct work_struct *ws) rcu_read_unlock(); if (tcp_cork) - drbd_tcp_cork(connection->meta.socket); + tcp_sock_set_cork(connection->meta.socket->sk, true); err = drbd_finish_peer_reqs(device); kref_put(&device->kref, drbd_destroy_device); @@ -6175,7 +6174,7 @@ void drbd_send_acks_wf(struct work_struct *ws) } if (tcp_cork) - drbd_tcp_uncork(connection->meta.socket); + tcp_sock_set_cork(connection->meta.socket->sk, false); return; } diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 0dc019da1f8d..2b89c9f2ca70 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -2098,7 +2098,7 @@ static void wait_for_work(struct drbd_connection *connection, struct list_head * if (uncork) { mutex_lock(&connection->data.mutex); if (connection->data.socket) - drbd_tcp_uncork(connection->data.socket); + tcp_sock_set_cork(connection->data.socket->sk, false); mutex_unlock(&connection->data.mutex); } @@ -2153,9 +2153,9 @@ static void wait_for_work(struct drbd_connection *connection, struct list_head * mutex_lock(&connection->data.mutex); if (connection->data.socket) { if (cork) - drbd_tcp_cork(connection->data.socket); + tcp_sock_set_cork(connection->data.socket->sk, true); else if (!uncork) - drbd_tcp_uncork(connection->data.socket); + tcp_sock_set_cork(connection->data.socket->sk, false); } mutex_unlock(&connection->data.mutex); } diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index 8efd8778e209..ce9e33603a4d 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -1535,6 +1535,13 @@ static void null_config_discard(struct nullb *nullb) { if (nullb->dev->discard == false) return; + + if (nullb->dev->zoned) { + nullb->dev->discard = false; + pr_info("discard option is ignored in zoned mode\n"); + return; + } + nullb->q->limits.discard_granularity = nullb->dev->blocksize; nullb->q->limits.discard_alignment = nullb->dev->blocksize; blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9); diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c index 9e4bcdad1a80..ed5458f2d367 100644 --- a/drivers/block/null_blk_zoned.c +++ b/drivers/block/null_blk_zoned.c @@ -23,6 +23,10 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) pr_err("zone_size must be power-of-two\n"); return -EINVAL; } + if (dev->zone_size > dev->size) { + pr_err("Zone size larger than device capacity\n"); + return -EINVAL; + } dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT; dev->nr_zones = dev_size >> diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index eb2ab058a01d..1f8c82603179 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -291,6 +291,7 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) } /* Setup cmd context */ + ret = -ENOMEM; mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl, sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, @@ -1100,6 +1101,7 @@ static int mhi_driver_probe(struct device *dev) } } + ret = -EINVAL; if (dl_chan) { /* * If channel supports LPM notifications then status_cb should diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index b7145f370d3b..2704470e021d 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -1947,8 +1947,8 @@ static int ssif_adapter_handler(struct device *adev, void *opaque) if (adev->type != &i2c_adapter_type) return 0; - addr_info->added_client = i2c_new_device(to_i2c_adapter(adev), - &addr_info->binfo); + addr_info->added_client = i2c_new_client_device(to_i2c_adapter(adev), + &addr_info->binfo); if (!addr_info->adapter_name) return 1; /* Only try the first I2C adapter by default. */ diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 39c59f063aa0..2dfb30b963c4 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -3519,6 +3519,9 @@ static int __clk_core_init(struct clk_core *core) out: clk_pm_runtime_put(core); unlock: + if (ret) + hlist_del_init(&core->child_node); + clk_prepare_unlock(); if (!ret) diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 11ec6f466467..abb121f8de52 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -377,6 +377,7 @@ config SM_GCC_8150 config SM_GCC_8250 tristate "SM8250 Global Clock Controller" + select QCOM_GDSC help Support for the global clock controller on SM8250 devices. Say Y if you want to use peripheral devices such as UART, diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c index ef98fdc51755..732bc7c937e6 100644 --- a/drivers/clk/qcom/gcc-sm8150.c +++ b/drivers/clk/qcom/gcc-sm8150.c @@ -76,8 +76,7 @@ static struct clk_alpha_pll_postdiv gpll0_out_even = { .clkr.hw.init = &(struct clk_init_data){ .name = "gpll0_out_even", .parent_data = &(const struct clk_parent_data){ - .fw_name = "bi_tcxo", - .name = "bi_tcxo", + .hw = &gpll0.clkr.hw, }, .num_parents = 1, .ops = &clk_trion_pll_postdiv_ops, diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c index d17cfb7a3ff4..d7243c09cc84 100644 --- a/drivers/clk/rockchip/clk-rk3228.c +++ b/drivers/clk/rockchip/clk-rk3228.c @@ -156,8 +156,6 @@ PNAME(mux_i2s_out_p) = { "i2s1_pre", "xin12m" }; PNAME(mux_i2s2_p) = { "i2s2_src", "i2s2_frac", "xin12m" }; PNAME(mux_sclk_spdif_p) = { "sclk_spdif_src", "spdif_frac", "xin12m" }; -PNAME(mux_aclk_gpu_pre_p) = { "cpll_gpu", "gpll_gpu", "hdmiphy_gpu", "usb480m_gpu" }; - PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" }; PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" }; PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" }; @@ -468,16 +466,9 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = { RK2928_CLKSEL_CON(24), 6, 10, DFLAGS, RK2928_CLKGATE_CON(2), 8, GFLAGS), - GATE(0, "cpll_gpu", "cpll", 0, - RK2928_CLKGATE_CON(3), 13, GFLAGS), - GATE(0, "gpll_gpu", "gpll", 0, - RK2928_CLKGATE_CON(3), 13, GFLAGS), - GATE(0, "hdmiphy_gpu", "hdmiphy", 0, - RK2928_CLKGATE_CON(3), 13, GFLAGS), - GATE(0, "usb480m_gpu", "usb480m", 0, + COMPOSITE(0, "aclk_gpu_pre", mux_pll_src_4plls_p, 0, + RK2928_CLKSEL_CON(34), 5, 2, MFLAGS, 0, 5, DFLAGS, RK2928_CLKGATE_CON(3), 13, GFLAGS), - COMPOSITE_NOGATE(0, "aclk_gpu_pre", mux_aclk_gpu_pre_p, 0, - RK2928_CLKSEL_CON(34), 5, 2, MFLAGS, 0, 5, DFLAGS), COMPOSITE(SCLK_SPI0, "sclk_spi0", mux_pll_src_2plls_p, 0, RK2928_CLKSEL_CON(25), 8, 1, MFLAGS, 0, 7, DFLAGS, @@ -582,8 +573,8 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = { GATE(0, "pclk_peri_noc", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(12), 2, GFLAGS), /* PD_GPU */ - GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 14, GFLAGS), - GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 15, GFLAGS), + GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(7), 14, GFLAGS), + GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(7), 15, GFLAGS), /* PD_BUS */ GATE(0, "sclk_initmem_mbist", "aclk_cpu", 0, RK2928_CLKGATE_CON(8), 1, GFLAGS), diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c index 64e229ddf2a5..e931319dcc9d 100644 --- a/drivers/clk/tegra/clk-tegra124.c +++ b/drivers/clk/tegra/clk-tegra124.c @@ -1292,7 +1292,7 @@ static struct tegra_clk_init_table common_init_table[] __initdata = { { TEGRA124_CLK_UARTB, TEGRA124_CLK_PLL_P, 408000000, 0 }, { TEGRA124_CLK_UARTC, TEGRA124_CLK_PLL_P, 408000000, 0 }, { TEGRA124_CLK_UARTD, TEGRA124_CLK_PLL_P, 408000000, 0 }, - { TEGRA124_CLK_PLL_A, TEGRA124_CLK_CLK_MAX, 564480000, 0 }, + { TEGRA124_CLK_PLL_A, TEGRA124_CLK_CLK_MAX, 282240000, 0 }, { TEGRA124_CLK_PLL_A_OUT0, TEGRA124_CLK_CLK_MAX, 11289600, 0 }, { TEGRA124_CLK_I2S0, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 }, { TEGRA124_CLK_I2S1, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 }, diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c index e001b9bcb6bf..7dc30dd6c8d5 100644 --- a/drivers/clk/ti/clk-33xx.c +++ b/drivers/clk/ti/clk-33xx.c @@ -212,7 +212,7 @@ static const struct omap_clkctrl_reg_data am3_mpu_clkctrl_regs[] __initconst = { }; static const struct omap_clkctrl_reg_data am3_l4_rtc_clkctrl_regs[] __initconst = { - { AM3_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" }, + { AM3_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk-24mhz-clkctrl:0000:0" }, { 0 }, }; diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c index 062266034d84..864c484bde1b 100644 --- a/drivers/clk/ti/clkctrl.c +++ b/drivers/clk/ti/clkctrl.c @@ -255,24 +255,53 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec, return entry->clk; } +/* Get clkctrl clock base name based on clkctrl_name or dts node */ +static const char * __init clkctrl_get_clock_name(struct device_node *np, + const char *clkctrl_name, + int offset, int index, + bool legacy_naming) +{ + char *clock_name; + + /* l4per-clkctrl:1234:0 style naming based on clkctrl_name */ + if (clkctrl_name && !legacy_naming) { + clock_name = kasprintf(GFP_KERNEL, "%s-clkctrl:%04x:%d", + clkctrl_name, offset, index); + strreplace(clock_name, '_', '-'); + + return clock_name; + } + + /* l4per:1234:0 old style naming based on clkctrl_name */ + if (clkctrl_name) + return kasprintf(GFP_KERNEL, "%s_cm:clk:%04x:%d", + clkctrl_name, offset, index); + + /* l4per_cm:1234:0 old style naming based on parent node name */ + if (legacy_naming) + return kasprintf(GFP_KERNEL, "%pOFn:clk:%04x:%d", + np->parent, offset, index); + + /* l4per-clkctrl:1234:0 style naming based on node name */ + return kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", np, offset, index); +} + static int __init _ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider, struct device_node *node, struct clk_hw *clk_hw, u16 offset, u8 bit, const char * const *parents, - int num_parents, const struct clk_ops *ops) + int num_parents, const struct clk_ops *ops, + const char *clkctrl_name) { struct clk_init_data init = { NULL }; struct clk *clk; struct omap_clkctrl_clk *clkctrl_clk; int ret = 0; - if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) - init.name = kasprintf(GFP_KERNEL, "%pOFn:%pOFn:%04x:%d", - node->parent, node, offset, - bit); - else - init.name = kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", node, - offset, bit); + init.name = clkctrl_get_clock_name(node, clkctrl_name, offset, bit, + ti_clk_get_features()->flags & + TI_CLK_CLKCTRL_COMPAT); + clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL); if (!init.name || !clkctrl_clk) { ret = -ENOMEM; @@ -309,7 +338,7 @@ static void __init _ti_clkctrl_setup_gate(struct omap_clkctrl_provider *provider, struct device_node *node, u16 offset, const struct omap_clkctrl_bit_data *data, - void __iomem *reg) + void __iomem *reg, const char *clkctrl_name) { struct clk_hw_omap *clk_hw; @@ -322,7 +351,7 @@ _ti_clkctrl_setup_gate(struct omap_clkctrl_provider *provider, if (_ti_clkctrl_clk_register(provider, node, &clk_hw->hw, offset, data->bit, data->parents, 1, - &omap_gate_clk_ops)) + &omap_gate_clk_ops, clkctrl_name)) kfree(clk_hw); } @@ -330,7 +359,7 @@ static void __init _ti_clkctrl_setup_mux(struct omap_clkctrl_provider *provider, struct device_node *node, u16 offset, const struct omap_clkctrl_bit_data *data, - void __iomem *reg) + void __iomem *reg, const char *clkctrl_name) { struct clk_omap_mux *mux; int num_parents = 0; @@ -357,7 +386,7 @@ _ti_clkctrl_setup_mux(struct omap_clkctrl_provider *provider, if (_ti_clkctrl_clk_register(provider, node, &mux->hw, offset, data->bit, data->parents, num_parents, - &ti_clk_mux_ops)) + &ti_clk_mux_ops, clkctrl_name)) kfree(mux); } @@ -365,7 +394,7 @@ static void __init _ti_clkctrl_setup_div(struct omap_clkctrl_provider *provider, struct device_node *node, u16 offset, const struct omap_clkctrl_bit_data *data, - void __iomem *reg) + void __iomem *reg, const char *clkctrl_name) { struct clk_omap_divider *div; const struct omap_clkctrl_div_data *div_data = data->data; @@ -393,7 +422,7 @@ _ti_clkctrl_setup_div(struct omap_clkctrl_provider *provider, if (_ti_clkctrl_clk_register(provider, node, &div->hw, offset, data->bit, data->parents, 1, - &ti_clk_divider_ops)) + &ti_clk_divider_ops, clkctrl_name)) kfree(div); } @@ -401,7 +430,7 @@ static void __init _ti_clkctrl_setup_subclks(struct omap_clkctrl_provider *provider, struct device_node *node, const struct omap_clkctrl_reg_data *data, - void __iomem *reg) + void __iomem *reg, const char *clkctrl_name) { const struct omap_clkctrl_bit_data *bits = data->bit_data; @@ -412,17 +441,17 @@ _ti_clkctrl_setup_subclks(struct omap_clkctrl_provider *provider, switch (bits->type) { case TI_CLK_GATE: _ti_clkctrl_setup_gate(provider, node, data->offset, - bits, reg); + bits, reg, clkctrl_name); break; case TI_CLK_DIVIDER: _ti_clkctrl_setup_div(provider, node, data->offset, - bits, reg); + bits, reg, clkctrl_name); break; case TI_CLK_MUX: _ti_clkctrl_setup_mux(provider, node, data->offset, - bits, reg); + bits, reg, clkctrl_name); break; default: @@ -461,42 +490,10 @@ static char * __init clkctrl_get_name(struct device_node *np) return name; } } - of_node_put(np); return NULL; } -/* Get clkctrl clock base name based on clkctrl_name or dts node */ -static const char * __init clkctrl_get_clock_name(struct device_node *np, - const char *clkctrl_name, - int offset, int index, - bool legacy_naming) -{ - char *clock_name; - - /* l4per-clkctrl:1234:0 style naming based on clkctrl_name */ - if (clkctrl_name && !legacy_naming) { - clock_name = kasprintf(GFP_KERNEL, "%s-clkctrl:%04x:%d", - clkctrl_name, offset, index); - strreplace(clock_name, '_', '-'); - - return clock_name; - } - - /* l4per:1234:0 old style naming based on clkctrl_name */ - if (clkctrl_name) - return kasprintf(GFP_KERNEL, "%s_cm:clk:%04x:%d", - clkctrl_name, offset, index); - - /* l4per_cm:1234:0 old style naming based on parent node name */ - if (legacy_naming) - return kasprintf(GFP_KERNEL, "%pOFn:clk:%04x:%d", - np->parent, offset, index); - - /* l4per-clkctrl:1234:0 style naming based on node name */ - return kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", np, offset, index); -} - static void __init _ti_omap4_clkctrl_setup(struct device_node *node) { struct omap_clkctrl_provider *provider; @@ -664,7 +661,7 @@ clkdm_found: hw->enable_reg.ptr = provider->base + reg_data->offset; _ti_clkctrl_setup_subclks(provider, node, reg_data, - hw->enable_reg.ptr); + hw->enable_reg.ptr, clkctrl_name); if (reg_data->flags & CLKF_SW_SUP) hw->enable_bit = MODULEMODE_SWCTRL; diff --git a/drivers/clk/versatile/clk-impd1.c b/drivers/clk/versatile/clk-impd1.c index b05da8516d4c..f9f4babe3ca6 100644 --- a/drivers/clk/versatile/clk-impd1.c +++ b/drivers/clk/versatile/clk-impd1.c @@ -206,6 +206,7 @@ static int integrator_impd1_clk_spawn(struct device *dev, return -ENODEV; } + of_property_read_string(np, "clock-output-names", &name); parent_name = of_clk_get_parent_name(np, 0); clk = icst_clk_setup(NULL, desc, name, parent_name, map, ICST_INTEGRATOR_IM_PD1); diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index dccef3a2908b..e1401d9cc756 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c @@ -682,7 +682,7 @@ int chtls_push_frames(struct chtls_sock *csk, int comp) make_tx_data_wr(sk, skb, immdlen, len, credits_needed, completion); tp->snd_nxt += len; - tp->lsndtime = tcp_time_stamp(tp); + tp->lsndtime = tcp_jiffies32; if (completion) ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR; } else { diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c index 3d0a7e702c94..1e678bdf5aed 100644 --- a/drivers/dax/kmem.c +++ b/drivers/dax/kmem.c @@ -22,6 +22,7 @@ int dev_dax_kmem_probe(struct device *dev) resource_size_t kmem_size; resource_size_t kmem_end; struct resource *new_res; + const char *new_res_name; int numa_node; int rc; @@ -48,11 +49,16 @@ int dev_dax_kmem_probe(struct device *dev) kmem_size &= ~(memory_block_size_bytes() - 1); kmem_end = kmem_start + kmem_size; - /* Region is permanently reserved. Hot-remove not yet implemented. */ - new_res = request_mem_region(kmem_start, kmem_size, dev_name(dev)); + new_res_name = kstrdup(dev_name(dev), GFP_KERNEL); + if (!new_res_name) + return -ENOMEM; + + /* Region is permanently reserved if hotremove fails. */ + new_res = request_mem_region(kmem_start, kmem_size, new_res_name); if (!new_res) { dev_warn(dev, "could not reserve region [%pa-%pa]\n", &kmem_start, &kmem_end); + kfree(new_res_name); return -EBUSY; } @@ -63,12 +69,12 @@ int dev_dax_kmem_probe(struct device *dev) * unknown to us that will break add_memory() below. */ new_res->flags = IORESOURCE_SYSTEM_RAM; - new_res->name = dev_name(dev); rc = add_memory(numa_node, new_res->start, resource_size(new_res)); if (rc) { release_resource(new_res); kfree(new_res); + kfree(new_res_name); return rc; } dev_dax->dax_kmem_res = new_res; @@ -83,6 +89,7 @@ static int dev_dax_kmem_remove(struct device *dev) struct resource *res = dev_dax->dax_kmem_res; resource_size_t kmem_start = res->start; resource_size_t kmem_size = resource_size(res); + const char *res_name = res->name; int rc; /* @@ -102,6 +109,7 @@ static int dev_dax_kmem_remove(struct device *dev) /* Release and free dax resources */ release_resource(res); kfree(res); + kfree(res_name); dev_dax->dax_kmem_res = NULL; return 0; diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 364dd34799d4..0425984db118 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -1166,10 +1166,11 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp) mutex_unlock(&info->lock); return ret; } else if (dmatest_run) { - if (is_threaded_test_pending(info)) - start_threaded_tests(info); - else - pr_info("Could not start test, no channels configured\n"); + if (!is_threaded_test_pending(info)) { + pr_info("No channels configured, continue with any\n"); + add_threaded_test(info); + } + start_threaded_tests(info); } else { stop_threaded_test(info); } diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index f6f49f0f6fae..8d79a8787104 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -62,6 +62,13 @@ int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id) perm.ignore = 0; iowrite32(perm.bits, idxd->reg_base + offset); + /* + * A readback from the device ensures that any previously generated + * completion record writes are visible to software based on PCI + * ordering rules. + */ + perm.bits = ioread32(idxd->reg_base + offset); + return 0; } diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c index d6fcd2e60103..6510791b9921 100644 --- a/drivers/dma/idxd/irq.c +++ b/drivers/dma/idxd/irq.c @@ -173,6 +173,7 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry, struct llist_node *head; int queued = 0; + *processed = 0; head = llist_del_all(&irq_entry->pending_llist); if (!head) return 0; @@ -197,6 +198,7 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry, struct list_head *node, *next; int queued = 0; + *processed = 0; if (list_empty(&irq_entry->work_list)) return 0; @@ -218,10 +220,9 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry, return queued; } -irqreturn_t idxd_wq_thread(int irq, void *data) +static int idxd_desc_process(struct idxd_irq_entry *irq_entry) { - struct idxd_irq_entry *irq_entry = data; - int rc, processed = 0, retry = 0; + int rc, processed, total = 0; /* * There are two lists we are processing. The pending_llist is where @@ -244,15 +245,26 @@ irqreturn_t idxd_wq_thread(int irq, void *data) */ do { rc = irq_process_work_list(irq_entry, &processed); - if (rc != 0) { - retry++; + total += processed; + if (rc != 0) continue; - } rc = irq_process_pending_llist(irq_entry, &processed); - } while (rc != 0 && retry != 10); + total += processed; + } while (rc != 0); + + return total; +} + +irqreturn_t idxd_wq_thread(int irq, void *data) +{ + struct idxd_irq_entry *irq_entry = data; + int processed; + processed = idxd_desc_process(irq_entry); idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id); + /* catch anything unprocessed after unmasking */ + processed += idxd_desc_process(irq_entry); if (processed == 0) return IRQ_NONE; diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c index c683051257fd..66ef70b00ec0 100644 --- a/drivers/dma/owl-dma.c +++ b/drivers/dma/owl-dma.c @@ -175,13 +175,11 @@ struct owl_dma_txd { * @id: physical index to this channel * @base: virtual memory base for the dma channel * @vchan: the virtual channel currently being served by this physical channel - * @lock: a lock to use when altering an instance of this struct */ struct owl_dma_pchan { u32 id; void __iomem *base; struct owl_dma_vchan *vchan; - spinlock_t lock; }; /** @@ -437,14 +435,14 @@ static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od, for (i = 0; i < od->nr_pchans; i++) { pchan = &od->pchans[i]; - spin_lock_irqsave(&pchan->lock, flags); + spin_lock_irqsave(&od->lock, flags); if (!pchan->vchan) { pchan->vchan = vchan; - spin_unlock_irqrestore(&pchan->lock, flags); + spin_unlock_irqrestore(&od->lock, flags); break; } - spin_unlock_irqrestore(&pchan->lock, flags); + spin_unlock_irqrestore(&od->lock, flags); } return pchan; diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index c4ce5dfb149b..db58d7e4f9fe 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -900,7 +900,7 @@ static int tegra_adma_probe(struct platform_device *pdev) ret = dma_async_device_register(&tdma->dma_dev); if (ret < 0) { dev_err(&pdev->dev, "ADMA registration failed: %d\n", ret); - goto irq_dispose; + goto rpm_put; } ret = of_dma_controller_register(pdev->dev.of_node, diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index a9c0251adf1a..a90e154b0ae0 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -2156,7 +2156,8 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl, d->residue += sg_dma_len(sgent); } - cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, CPPI5_TR_CSF_EOP); + cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, + CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP); return d; } @@ -2733,7 +2734,8 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, tr_req[1].dicnt3 = 1; } - cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP); + cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, + CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP); if (uc->config.metadata_size) d->vd.tx.metadata_ops = &metadata_ops; diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index d47749a35863..ff253696d183 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -434,6 +434,7 @@ static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan, struct zynqmp_dma_desc_sw *child, *next; chan->desc_free_cnt++; + list_del(&sdesc->node); list_add_tail(&sdesc->node, &chan->free_list); list_for_each_entry_safe(child, next, &sdesc->tx_list, node) { chan->desc_free_cnt++; @@ -608,8 +609,6 @@ static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan) dma_async_tx_callback callback; void *callback_param; - list_del(&desc->node); - callback = desc->async_tx.callback; callback_param = desc->async_tx.callback_param; if (callback) { diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 9d2512913d25..f564e15fbc7e 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -407,6 +407,58 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie, } } +static const char * const fw_err_rec_type_strs[] = { + "IPF SAL Error Record", + "SOC Firmware Error Record Type1 (Legacy CrashLog Support)", + "SOC Firmware Error Record Type2", +}; + +static void cper_print_fw_err(const char *pfx, + struct acpi_hest_generic_data *gdata, + const struct cper_sec_fw_err_rec_ref *fw_err) +{ + void *buf = acpi_hest_get_payload(gdata); + u32 offset, length = gdata->error_data_length; + + printk("%s""Firmware Error Record Type: %s\n", pfx, + fw_err->record_type < ARRAY_SIZE(fw_err_rec_type_strs) ? + fw_err_rec_type_strs[fw_err->record_type] : "unknown"); + printk("%s""Revision: %d\n", pfx, fw_err->revision); + + /* Record Type based on UEFI 2.7 */ + if (fw_err->revision == 0) { + printk("%s""Record Identifier: %08llx\n", pfx, + fw_err->record_identifier); + } else if (fw_err->revision == 2) { + printk("%s""Record Identifier: %pUl\n", pfx, + &fw_err->record_identifier_guid); + } + + /* + * The FW error record may contain trailing data beyond the + * structure defined by the specification. As the fields + * defined (and hence the offset of any trailing data) vary + * with the revision, set the offset to account for this + * variation. + */ + if (fw_err->revision == 0) { + /* record_identifier_guid not defined */ + offset = offsetof(struct cper_sec_fw_err_rec_ref, + record_identifier_guid); + } else if (fw_err->revision == 1) { + /* record_identifier not defined */ + offset = offsetof(struct cper_sec_fw_err_rec_ref, + record_identifier); + } else { + offset = sizeof(*fw_err); + } + + buf += offset; + length -= offset; + + print_hex_dump(pfx, "", DUMP_PREFIX_OFFSET, 16, 4, buf, length, true); +} + static void cper_print_tstamp(const char *pfx, struct acpi_hest_generic_data_v300 *gdata) { @@ -494,6 +546,16 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata else goto err_section_too_small; #endif + } else if (guid_equal(sec_type, &CPER_SEC_FW_ERR_REC_REF)) { + struct cper_sec_fw_err_rec_ref *fw_err = acpi_hest_get_payload(gdata); + + printk("%ssection_type: Firmware Error Record Reference\n", + newpfx); + /* The minimal FW Error Record contains 16 bytes */ + if (gdata->error_data_length >= SZ_16) + cper_print_fw_err(newpfx, gdata, fw_err); + else + goto err_section_too_small; } else { const void *err = acpi_hest_get_payload(gdata); diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c index 5d4f84781aa0..a52236e11e5f 100644 --- a/drivers/firmware/efi/earlycon.c +++ b/drivers/firmware/efi/earlycon.c @@ -114,14 +114,16 @@ static void efi_earlycon_write_char(u32 *dst, unsigned char c, unsigned int h) const u32 color_black = 0x00000000; const u32 color_white = 0x00ffffff; const u8 *src; - u8 s8; - int m; + int m, n, bytes; + u8 x; - src = font->data + c * font->height; - s8 = *(src + h); + bytes = BITS_TO_BYTES(font->width); + src = font->data + c * font->height * bytes + h * bytes; - for (m = 0; m < 8; m++) { - if ((s8 >> (7 - m)) & 1) + for (m = 0; m < font->width; m++) { + n = m % 8; + x = *(src + m / 8); + if ((x >> (7 - n)) & 1) *dst = color_white; else *dst = color_black; diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 911a2bd0f6b7..4e3055238f31 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -130,11 +130,8 @@ static ssize_t systab_show(struct kobject *kobj, if (efi.smbios != EFI_INVALID_TABLE_ADDR) str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); - if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86)) { - extern char *efi_systab_show_arch(char *str); - + if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86)) str = efi_systab_show_arch(str); - } return str - buf; } diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index 99a5cde7c2d8..48161b1dd098 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c @@ -60,7 +60,11 @@ static struct screen_info *setup_graphics(void) si = alloc_screen_info(); if (!si) return NULL; - efi_setup_gop(si, &gop_proto, size); + status = efi_setup_gop(si, &gop_proto, size); + if (status != EFI_SUCCESS) { + free_screen_info(si); + return NULL; + } } return si; } diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index 67d26949fd26..62943992f02f 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -92,6 +92,19 @@ extern __pure efi_system_table_t *efi_system_table(void); #define EFI_LOCATE_BY_REGISTER_NOTIFY 1 #define EFI_LOCATE_BY_PROTOCOL 2 +/* + * An efi_boot_memmap is used by efi_get_memory_map() to return the + * EFI memory map in a dynamically allocated buffer. + * + * The buffer allocated for the EFI memory map includes extra room for + * a minimum of EFI_MMAP_NR_SLACK_SLOTS additional EFI memory descriptors. + * This facilitates the reuse of the EFI memory map buffer when a second + * call to ExitBootServices() is needed because of intervening changes to + * the EFI memory map. Other related structures, e.g. x86 e820ext, need + * to factor in this headroom requirement as well. + */ +#define EFI_MMAP_NR_SLACK_SLOTS 8 + struct efi_boot_memmap { efi_memory_desc_t **map; unsigned long *map_size; diff --git a/drivers/firmware/efi/libstub/mem.c b/drivers/firmware/efi/libstub/mem.c index 869a79c8946f..09f4fa01914e 100644 --- a/drivers/firmware/efi/libstub/mem.c +++ b/drivers/firmware/efi/libstub/mem.c @@ -5,8 +5,6 @@ #include "efistub.h" -#define EFI_MMAP_NR_SLACK_SLOTS 8 - static inline bool mmap_has_headroom(unsigned long buff_size, unsigned long map_size, unsigned long desc_size) diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c index 1d59e103a2e3..e9a684637b70 100644 --- a/drivers/firmware/efi/libstub/tpm.c +++ b/drivers/firmware/efi/libstub/tpm.c @@ -54,7 +54,7 @@ void efi_retrieve_tpm2_eventlog(void) efi_status_t status; efi_physical_addr_t log_location = 0, log_last_entry = 0; struct linux_efi_tpm_eventlog *log_tbl = NULL; - struct efi_tcg2_final_events_table *final_events_table; + struct efi_tcg2_final_events_table *final_events_table = NULL; unsigned long first_entry_addr, last_entry_addr; size_t log_size, last_entry_size; efi_bool_t truncated; @@ -127,7 +127,8 @@ void efi_retrieve_tpm2_eventlog(void) * Figure out whether any events have already been logged to the * final events structure, and if so how much space they take up */ - final_events_table = get_efi_config_table(LINUX_EFI_TPM_FINAL_LOG_GUID); + if (version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) + final_events_table = get_efi_config_table(LINUX_EFI_TPM_FINAL_LOG_GUID); if (final_events_table && final_events_table->nr_events) { struct tcg_pcr_event2_head *header; int offset; diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c index 05ccb229fb45..f0339b5d3658 100644 --- a/drivers/firmware/efi/libstub/x86-stub.c +++ b/drivers/firmware/efi/libstub/x86-stub.c @@ -606,24 +606,18 @@ static efi_status_t allocate_e820(struct boot_params *params, struct setup_data **e820ext, u32 *e820ext_size) { - unsigned long map_size, desc_size, buff_size; - struct efi_boot_memmap boot_map; - efi_memory_desc_t *map; + unsigned long map_size, desc_size, map_key; efi_status_t status; - __u32 nr_desc; + __u32 nr_desc, desc_version; - boot_map.map = ↦ - boot_map.map_size = &map_size; - boot_map.desc_size = &desc_size; - boot_map.desc_ver = NULL; - boot_map.key_ptr = NULL; - boot_map.buff_size = &buff_size; + /* Only need the size of the mem map and size of each mem descriptor */ + map_size = 0; + status = efi_bs_call(get_memory_map, &map_size, NULL, &map_key, + &desc_size, &desc_version); + if (status != EFI_BUFFER_TOO_SMALL) + return (status != EFI_SUCCESS) ? status : EFI_UNSUPPORTED; - status = efi_get_memory_map(&boot_map); - if (status != EFI_SUCCESS) - return status; - - nr_desc = buff_size / desc_size; + nr_desc = map_size / desc_size + EFI_MMAP_NR_SLACK_SLOTS; if (nr_desc > ARRAY_SIZE(params->e820_table)) { u32 nr_e820ext = nr_desc - ARRAY_SIZE(params->e820_table); diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c index 55b031d2c989..c1955d320fec 100644 --- a/drivers/firmware/efi/tpm.c +++ b/drivers/firmware/efi/tpm.c @@ -62,8 +62,11 @@ int __init efi_tpm_eventlog_init(void) tbl_size = sizeof(*log_tbl) + log_tbl->size; memblock_reserve(efi.tpm_log, tbl_size); - if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR) + if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR || + log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) { + pr_warn(FW_BUG "TPM Final Events table missing or invalid\n"); goto out; + } final_tbl = early_memremap(efi.tpm_final_log, sizeof(*final_tbl)); diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c index baee8c3f06ad..cf3687a7925f 100644 --- a/drivers/gpio/gpio-bcm-kona.c +++ b/drivers/gpio/gpio-bcm-kona.c @@ -625,7 +625,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev) kona_gpio->reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(kona_gpio->reg_base)) { - ret = -ENXIO; + ret = PTR_ERR(kona_gpio->reg_base); goto err_irq_domain; } diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c index da1ef0b1c291..b1accfba017d 100644 --- a/drivers/gpio/gpio-exar.c +++ b/drivers/gpio/gpio-exar.c @@ -148,8 +148,10 @@ static int gpio_exar_probe(struct platform_device *pdev) mutex_init(&exar_gpio->lock); index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL); - if (index < 0) - goto err_destroy; + if (index < 0) { + ret = index; + goto err_mutex_destroy; + } sprintf(exar_gpio->name, "exar_gpio%d", index); exar_gpio->gpio_chip.label = exar_gpio->name; @@ -176,6 +178,7 @@ static int gpio_exar_probe(struct platform_device *pdev) err_destroy: ida_simple_remove(&ida_index, index); +err_mutex_destroy: mutex_destroy(&exar_gpio->lock); return ret; } diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c index 7b7085050219..da570e63589d 100644 --- a/drivers/gpio/gpio-mlxbf2.c +++ b/drivers/gpio/gpio-mlxbf2.c @@ -127,8 +127,8 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs) { u32 arm_gpio_lock_val; - spin_lock(&gs->gc.bgpio_lock); mutex_lock(yu_arm_gpio_lock_param.lock); + spin_lock(&gs->gc.bgpio_lock); arm_gpio_lock_val = readl(yu_arm_gpio_lock_param.io); @@ -136,8 +136,8 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs) * When lock active bit[31] is set, ModeX is write enabled */ if (YU_LOCK_ACTIVE_BIT(arm_gpio_lock_val)) { - mutex_unlock(yu_arm_gpio_lock_param.lock); spin_unlock(&gs->gc.bgpio_lock); + mutex_unlock(yu_arm_gpio_lock_param.lock); return -EINVAL; } @@ -152,8 +152,8 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs) static void mlxbf2_gpio_lock_release(struct mlxbf2_gpio_context *gs) { writel(YU_ARM_GPIO_LOCK_RELEASE, yu_arm_gpio_lock_param.io); - mutex_unlock(yu_arm_gpio_lock_param.lock); spin_unlock(&gs->gc.bgpio_lock); + mutex_unlock(yu_arm_gpio_lock_param.lock); } /* diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index 3c9f4fb3d5a2..bd65114eb170 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -782,6 +782,15 @@ static int mvebu_pwm_probe(struct platform_device *pdev, "marvell,armada-370-gpio")) return 0; + /* + * There are only two sets of PWM configuration registers for + * all the GPIO lines on those SoCs which this driver reserves + * for the first two GPIO chips. So if the resource is missing + * we can't treat it as an error. + */ + if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm")) + return 0; + if (IS_ERR(mvchip->clk)) return PTR_ERR(mvchip->clk); @@ -804,12 +813,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev, mvchip->mvpwm = mvpwm; mvpwm->mvchip = mvchip; - /* - * There are only two sets of PWM configuration registers for - * all the GPIO lines on those SoCs which this driver reserves - * for the first two GPIO chips. So if the resource is missing - * we can't treat it as an error. - */ mvpwm->membase = devm_platform_ioremap_resource_byname(pdev, "pwm"); if (IS_ERR(mvpwm->membase)) return PTR_ERR(mvpwm->membase); diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index 1361270ecf8c..0cb6600b8eee 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -660,8 +660,8 @@ static int pxa_gpio_probe(struct platform_device *pdev) pchip->irq1 = irq1; gpio_reg_base = devm_platform_ioremap_resource(pdev, 0); - if (!gpio_reg_base) - return -EINVAL; + if (IS_ERR(gpio_reg_base)) + return PTR_ERR(gpio_reg_base); clk = clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 182136d98b97..c14f0784274a 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -729,6 +729,10 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) if (ret) goto out_free_descs; } + + atomic_notifier_call_chain(&desc->gdev->notifier, + GPIOLINE_CHANGED_REQUESTED, desc); + dev_dbg(&gdev->dev, "registered chardev handle for line %d\n", offset); } @@ -1083,6 +1087,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) if (ret) goto out_free_desc; + atomic_notifier_call_chain(&desc->gdev->notifier, + GPIOLINE_CHANGED_REQUESTED, desc); + le->irq = gpiod_to_irq(desc); if (le->irq <= 0) { ret = -ENODEV; @@ -2998,8 +3005,6 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label) } done: spin_unlock_irqrestore(&gpio_lock, flags); - atomic_notifier_call_chain(&desc->gdev->notifier, - GPIOLINE_CHANGED_REQUESTED, desc); return ret; } @@ -4215,7 +4220,9 @@ int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset) } } - if (test_bit(FLAG_IS_OUT, &desc->flags)) { + /* To be valid for IRQ the line needs to be input or open drain */ + if (test_bit(FLAG_IS_OUT, &desc->flags) && + !test_bit(FLAG_OPEN_DRAIN, &desc->flags)) { chip_err(gc, "%s: tried to flag a GPIO set as output for IRQ\n", __func__); @@ -4278,7 +4285,12 @@ void gpiochip_enable_irq(struct gpio_chip *gc, unsigned int offset) if (!IS_ERR(desc) && !WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags))) { - WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags)); + /* + * We must not be output when using IRQ UNLESS we are + * open drain. + */ + WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags) && + !test_bit(FLAG_OPEN_DRAIN, &desc->flags)); set_bit(FLAG_IRQ_IS_ENABLED, &desc->flags); } } @@ -4961,6 +4973,9 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, return ERR_PTR(ret); } + atomic_notifier_call_chain(&desc->gdev->notifier, + GPIOLINE_CHANGED_REQUESTED, desc); + return desc; } EXPORT_SYMBOL_GPL(gpiod_get_index); @@ -5026,6 +5041,9 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, return ERR_PTR(ret); } + atomic_notifier_call_chain(&desc->gdev->notifier, + GPIOLINE_CHANGED_REQUESTED, desc); + return desc; } EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 4a3049841086..c24cad3c64ed 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1050,7 +1050,7 @@ void kfd_dec_compute_active(struct kfd_dev *dev); /* Check with device cgroup if @kfd device is accessible */ static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd) { -#if defined(CONFIG_CGROUP_DEVICE) +#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) struct drm_device *ddev = kfd->ddev; return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 28e651b173ab..7fc15b82fe48 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7880,13 +7880,6 @@ static int dm_update_plane_state(struct dc *dc, return -EINVAL; } - if (new_plane_state->crtc_x <= -new_acrtc->max_cursor_width || - new_plane_state->crtc_y <= -new_acrtc->max_cursor_height) { - DRM_DEBUG_ATOMIC("Bad cursor position %d, %d\n", - new_plane_state->crtc_x, new_plane_state->crtc_y); - return -EINVAL; - } - return 0; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 27a7d2a58079..caa090d0b6ac 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -220,6 +220,30 @@ static enum dpcd_training_patterns return dpcd_tr_pattern; } +static uint8_t dc_dp_initialize_scrambling_data_symbols( + struct dc_link *link, + enum dc_dp_training_pattern pattern) +{ + uint8_t disable_scrabled_data_symbols = 0; + + switch (pattern) { + case DP_TRAINING_PATTERN_SEQUENCE_1: + case DP_TRAINING_PATTERN_SEQUENCE_2: + case DP_TRAINING_PATTERN_SEQUENCE_3: + disable_scrabled_data_symbols = 1; + break; + case DP_TRAINING_PATTERN_SEQUENCE_4: + disable_scrabled_data_symbols = 0; + break; + default: + ASSERT(0); + DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n", + __func__, pattern); + break; + } + return disable_scrabled_data_symbols; +} + static inline bool is_repeater(struct dc_link *link, uint32_t offset) { return (!link->is_lttpr_mode_transparent && offset != 0); @@ -252,6 +276,9 @@ static void dpcd_set_lt_pattern_and_lane_settings( dpcd_pattern.v1_4.TRAINING_PATTERN_SET = dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern); + dpcd_pattern.v1_4.SCRAMBLING_DISABLE = + dc_dp_initialize_scrambling_data_symbols(link, pattern); + dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET] = dpcd_pattern.raw; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 085c1a39b313..416afb99529d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1625,12 +1625,81 @@ void dcn10_pipe_control_lock( hws->funcs.verify_allow_pstate_change_high(dc); } +/** + * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE. + * + * Software keepout workaround to prevent cursor update locking from stalling + * out cursor updates indefinitely or from old values from being retained in + * the case where the viewport changes in the same frame as the cursor. + * + * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's + * too close to VUPDATE, then stall out until VUPDATE finishes. + * + * TODO: Optimize cursor programming to be once per frame before VUPDATE + * to avoid the need for this workaround. + */ +static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct crtc_position position; + uint32_t vupdate_start, vupdate_end; + unsigned int lines_to_vupdate, us_to_vupdate, vpos; + unsigned int us_per_line, us_vupdate; + + if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position) + return; + + if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg) + return; + + dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start, + &vupdate_end); + + dc->hwss.get_position(&pipe_ctx, 1, &position); + vpos = position.vertical_count; + + /* Avoid wraparound calculation issues */ + vupdate_start += stream->timing.v_total; + vupdate_end += stream->timing.v_total; + vpos += stream->timing.v_total; + + if (vpos <= vupdate_start) { + /* VPOS is in VACTIVE or back porch. */ + lines_to_vupdate = vupdate_start - vpos; + } else if (vpos > vupdate_end) { + /* VPOS is in the front porch. */ + return; + } else { + /* VPOS is in VUPDATE. */ + lines_to_vupdate = 0; + } + + /* Calculate time until VUPDATE in microseconds. */ + us_per_line = + stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz; + us_to_vupdate = lines_to_vupdate * us_per_line; + + /* 70 us is a conservative estimate of cursor update time*/ + if (us_to_vupdate > 70) + return; + + /* Stall out until the cursor update completes. */ + if (vupdate_end < vupdate_start) + vupdate_end += stream->timing.v_total; + us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line; + udelay(us_to_vupdate + us_vupdate); +} + void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock) { /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */ if (!pipe || pipe->top_pipe) return; + /* Prevent cursor lock from stalling out cursor updates. */ + if (lock) + delay_cursor_until_vupdate(dc, pipe); + dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc, pipe->stream_res.opp->inst, lock); } @@ -3236,7 +3305,7 @@ int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx) return vertical_line_start; } -static void dcn10_calc_vupdate_position( +void dcn10_calc_vupdate_position( struct dc *dc, struct pipe_ctx *pipe_ctx, uint32_t *start_line, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index af51424315d5..42b6e016d71e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -34,6 +34,11 @@ struct dc; void dcn10_hw_sequencer_construct(struct dc *dc); int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx); +void dcn10_calc_vupdate_position( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + uint32_t *start_line, + uint32_t *end_line); void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx); enum dc_status dcn10_enable_stream_timing( struct pipe_ctx *pipe_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c index 700509bdf503..9e8e32629e47 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c @@ -72,6 +72,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .set_clock = dcn10_set_clock, .get_clock = dcn10_get_clock, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .calc_vupdate_position = dcn10_calc_vupdate_position, }; static const struct hwseq_private_funcs dcn10_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c index 6a21228893ee..8334bbd6eabb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -83,6 +83,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = { .init_vm_ctx = dcn20_init_vm_ctx, .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .calc_vupdate_position = dcn10_calc_vupdate_position, }; static const struct hwseq_private_funcs dcn20_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c index 707ce0f28fab..4dd634118df2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -86,6 +86,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = { .optimize_pwr_state = dcn21_optimize_pwr_state, .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .calc_vupdate_position = dcn10_calc_vupdate_position, .set_cursor_position = dcn10_set_cursor_position, .set_cursor_attribute = dcn10_set_cursor_attribute, .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 7ee8b8460a9b..e34c3376efc1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -63,10 +63,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags) endif CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dml_common_defs.o := $(dml_ccflags) DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \ - dml_common_defs.o ifdef CONFIG_DRM_AMD_DC_DCN DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h index 8c86b63ddf07..1e557ddcb638 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h @@ -26,7 +26,6 @@ #ifndef __DML20_DISPLAY_RQ_DLG_CALC_H__ #define __DML20_DISPLAY_RQ_DLG_CALC_H__ -#include "../dml_common_defs.h" #include "../display_rq_dlg_helpers.h" struct display_mode_lib; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h index 0378406bf7e7..0d53e871a9d1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h @@ -26,7 +26,6 @@ #ifndef __DML20V2_DISPLAY_RQ_DLG_CALC_H__ #define __DML20V2_DISPLAY_RQ_DLG_CALC_H__ -#include "../dml_common_defs.h" #include "../display_rq_dlg_helpers.h" struct display_mode_lib; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h index 83e95f8cbff2..e8f7785e3fc6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h @@ -26,7 +26,7 @@ #ifndef __DML21_DISPLAY_RQ_DLG_CALC_H__ #define __DML21_DISPLAY_RQ_DLG_CALC_H__ -#include "../dml_common_defs.h" +#include "dm_services.h" #include "../display_rq_dlg_helpers.h" struct display_mode_lib; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h index cf2758ca5b02..c77c3d827e4a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h @@ -25,8 +25,10 @@ #ifndef __DISPLAY_MODE_LIB_H__ #define __DISPLAY_MODE_LIB_H__ - -#include "dml_common_defs.h" +#include "dm_services.h" +#include "dc_features.h" +#include "display_mode_structs.h" +#include "display_mode_enums.h" #include "display_mode_vba.h" enum dml_project { diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index 5d82fc5a7ed7..3a734171f083 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -27,8 +27,6 @@ #ifndef __DML2_DISPLAY_MODE_VBA_H__ #define __DML2_DISPLAY_MODE_VBA_H__ -#include "dml_common_defs.h" - struct display_mode_lib; void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib); diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h index 1f24db830737..2555ef0358c2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h @@ -26,7 +26,6 @@ #ifndef __DISPLAY_RQ_DLG_HELPERS_H__ #define __DISPLAY_RQ_DLG_HELPERS_H__ -#include "dml_common_defs.h" #include "display_mode_lib.h" /* Function: Printer functions diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h index 304164986bd8..9c06913ad767 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h @@ -26,8 +26,6 @@ #ifndef __DISPLAY_RQ_DLG_CALC_H__ #define __DISPLAY_RQ_DLG_CALC_H__ -#include "dml_common_defs.h" - struct display_mode_lib; #include "display_rq_dlg_helpers.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c deleted file mode 100644 index 723af0b2dda0..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dml_common_defs.h" -#include "dcn_calc_math.h" - -#include "dml_inline_defs.h" - -double dml_round(double a) -{ - double round_pt = 0.5; - double ceil = dml_ceil(a, 1); - double floor = dml_floor(a, 1); - - if (a - floor >= round_pt) - return ceil; - else - return floor; -} - - diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h deleted file mode 100644 index f78cbae9db88..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_COMMON_DEFS_H__ -#define __DC_COMMON_DEFS_H__ - -#include "dm_services.h" -#include "dc_features.h" -#include "display_mode_structs.h" -#include "display_mode_enums.h" - - -double dml_round(double a); - -#endif /* __DC_COMMON_DEFS_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h index ded71ea82413..02e06c9b3230 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h @@ -26,7 +26,6 @@ #ifndef __DML_INLINE_DEFS_H__ #define __DML_INLINE_DEFS_H__ -#include "dml_common_defs.h" #include "dcn_calc_math.h" #include "dml_logger.h" @@ -75,6 +74,18 @@ static inline double dml_floor(double a, double granularity) return (double) dcn_bw_floor2(a, granularity); } +static inline double dml_round(double a) +{ + double round_pt = 0.5; + double ceil = dml_ceil(a, 1); + double floor = dml_floor(a, 1); + + if (a - floor >= round_pt) + return ceil; + else + return floor; +} + static inline int dml_log2(double x) { return dml_round((double)dcn_bw_log(x, 2)); @@ -112,7 +123,7 @@ static inline double dml_log(double x, double base) static inline unsigned int dml_round_to_multiple(unsigned int num, unsigned int multiple, - bool up) + unsigned char up) { unsigned int remainder; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index e57467d99d66..08307f3796e3 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -92,6 +92,11 @@ struct hw_sequencer_funcs { void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes, struct crtc_position *position); int (*get_vupdate_offset_from_vsync)(struct pipe_ctx *pipe_ctx); + void (*calc_vupdate_position)( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + uint32_t *start_line, + uint32_t *end_line); void (*enable_per_frame_crtc_position_reset)(struct dc *dc, int group_size, struct pipe_ctx *grouped_pipes[]); void (*enable_timing_synchronization)(struct dc *dc, diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 4ede08a84e37..d96e3ce3e535 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -191,10 +191,11 @@ static const struct edid_quirk { { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP }, { "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP }, - /* Oculus Rift DK1, DK2, and CV1 VR Headsets */ + /* Oculus Rift DK1, DK2, CV1 and Rift S VR Headsets */ { "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP }, { "OVR", 0x0003, EDID_QUIRK_NON_DESKTOP }, { "OVR", 0x0004, EDID_QUIRK_NON_DESKTOP }, + { "OVR", 0x0012, EDID_QUIRK_NON_DESKTOP }, /* Windows Mixed Reality Headsets */ { "ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP }, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 3b0afa156d92..54def341c1db 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -238,8 +238,10 @@ static int submit_pin_objects(struct etnaviv_gem_submit *submit) } if ((submit->flags & ETNA_SUBMIT_SOFTPIN) && - submit->bos[i].va != mapping->iova) + submit->bos[i].va != mapping->iova) { + etnaviv_gem_mapping_unreference(mapping); return -EINVAL; + } atomic_inc(&etnaviv_obj->gpu_active); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c index e6795bafcbb9..75f9db8f7bec 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c @@ -453,7 +453,7 @@ static const struct etnaviv_pm_domain *pm_domain(const struct etnaviv_gpu *gpu, if (!(gpu->identity.features & meta->feature)) continue; - if (meta->nr_domains < (index - offset)) { + if (index - offset >= meta->nr_domains) { offset += meta->nr_domains; continue; } diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c index 1754c0547069..548cc25ea4ab 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm.c @@ -328,8 +328,8 @@ static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc, if (!drm_atomic_crtc_needs_modeset(state)) return 0; - if (state->mode.hdisplay > priv->soc_info->max_height || - state->mode.vdisplay > priv->soc_info->max_width) + if (state->mode.hdisplay > priv->soc_info->max_width || + state->mode.vdisplay > priv->soc_info->max_height) return -EINVAL; rate = clk_round_rate(priv->pix_clk, @@ -474,7 +474,7 @@ static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder, static irqreturn_t ingenic_drm_irq_handler(int irq, void *arg) { - struct ingenic_drm *priv = arg; + struct ingenic_drm *priv = drm_device_get_priv(arg); unsigned int state; regmap_read(priv->map, JZ_REG_LCD_STATE, &state); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 8cdcd6e5f9e1..3596f3923ea3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -850,7 +850,7 @@ extern void vmw_bo_bo_free(struct ttm_buffer_object *bo); extern int vmw_bo_init(struct vmw_private *dev_priv, struct vmw_buffer_object *vmw_bo, size_t size, struct ttm_placement *placement, - bool interuptable, + bool interruptible, void (*bo_free)(struct ttm_buffer_object *bo)); extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo, struct ttm_object_file *tfile); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 178a6cd1a06f..0f8d29397157 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -515,7 +515,7 @@ bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence) struct vmw_fence_manager *fman = fman_from_fence(fence); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) - return 1; + return true; vmw_fences_update(fman); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 7ef51fa84b01..126f93c0b0b8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -1651,7 +1651,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev, struct vmw_surface_metadata *metadata; struct ttm_base_object *base; uint32_t backup_handle; - int ret = -EINVAL; + int ret; ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid, req->handle_type, &base); diff --git a/drivers/hwtracing/coresight/coresight-cti-platform.c b/drivers/hwtracing/coresight/coresight-cti-platform.c index b44d83142b62..2fdaeec80ee5 100644 --- a/drivers/hwtracing/coresight/coresight-cti-platform.c +++ b/drivers/hwtracing/coresight/coresight-cti-platform.c @@ -120,7 +120,7 @@ static int cti_plat_create_v8_etm_connection(struct device *dev, /* Can optionally have an etm node - return if not */ cs_fwnode = fwnode_find_reference(root_fwnode, CTI_DT_CSDEV_ASSOC, 0); - if (IS_ERR_OR_NULL(cs_fwnode)) + if (IS_ERR(cs_fwnode)) return 0; /* allocate memory */ @@ -393,7 +393,7 @@ static int cti_plat_create_connection(struct device *dev, /* associated device ? */ cs_fwnode = fwnode_find_reference(fwnode, CTI_DT_CSDEV_ASSOC, 0); - if (!IS_ERR_OR_NULL(cs_fwnode)) { + if (!IS_ERR(cs_fwnode)) { assoc_name = cti_plat_get_csdev_or_node_name(cs_fwnode, &csdev); fwnode_handle_put(cs_fwnode); diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c index dff4e178c732..7f10312d1b88 100644 --- a/drivers/i2c/algos/i2c-algo-pca.c +++ b/drivers/i2c/algos/i2c-algo-pca.c @@ -542,7 +542,7 @@ int i2c_pca_add_numbered_bus(struct i2c_adapter *adap) EXPORT_SYMBOL(i2c_pca_add_numbered_bus); MODULE_AUTHOR("Ian Campbell <icampbell@arcom.com>, " - "Wolfram Sang <w.sang@pengutronix.de>"); + "Wolfram Sang <kernel@pengutronix.de>"); MODULE_DESCRIPTION("I2C-Bus PCA9564/PCA9665 algorithm"); MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c index f5c00f903df3..16ddc26c00e6 100644 --- a/drivers/i2c/busses/i2c-altera.c +++ b/drivers/i2c/busses/i2c-altera.c @@ -70,6 +70,7 @@ * @isr_mask: cached copy of local ISR enables. * @isr_status: cached copy of local ISR status. * @lock: spinlock for IRQ synchronization. + * @isr_mutex: mutex for IRQ thread. */ struct altr_i2c_dev { void __iomem *base; @@ -86,6 +87,7 @@ struct altr_i2c_dev { u32 isr_mask; u32 isr_status; spinlock_t lock; /* IRQ synchronization */ + struct mutex isr_mutex; }; static void @@ -245,10 +247,11 @@ static irqreturn_t altr_i2c_isr(int irq, void *_dev) struct altr_i2c_dev *idev = _dev; u32 status = idev->isr_status; + mutex_lock(&idev->isr_mutex); if (!idev->msg) { dev_warn(idev->dev, "unexpected interrupt\n"); altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ); - return IRQ_HANDLED; + goto out; } read = (idev->msg->flags & I2C_M_RD) != 0; @@ -301,6 +304,8 @@ static irqreturn_t altr_i2c_isr(int irq, void *_dev) complete(&idev->msg_complete); dev_dbg(idev->dev, "Message Complete\n"); } +out: + mutex_unlock(&idev->isr_mutex); return IRQ_HANDLED; } @@ -312,6 +317,7 @@ static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg) u32 value; u8 addr = i2c_8bit_addr_from_msg(msg); + mutex_lock(&idev->isr_mutex); idev->msg = msg; idev->msg_len = msg->len; idev->buf = msg->buf; @@ -336,6 +342,7 @@ static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg) altr_i2c_int_enable(idev, imask, true); altr_i2c_fill_tx_fifo(idev); } + mutex_unlock(&idev->isr_mutex); time_left = wait_for_completion_timeout(&idev->msg_complete, ALTR_I2C_XFER_TIMEOUT); @@ -409,6 +416,7 @@ static int altr_i2c_probe(struct platform_device *pdev) idev->dev = &pdev->dev; init_completion(&idev->msg_complete); spin_lock_init(&idev->lock); + mutex_init(&idev->isr_mutex); ret = device_property_read_u32(idev->dev, "fifo-size", &idev->fifo_size); diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c index 0aba51a7df32..37b96ac9dfae 100644 --- a/drivers/i2c/busses/i2c-at91-master.c +++ b/drivers/i2c/busses/i2c-at91-master.c @@ -845,6 +845,18 @@ static int at91_init_twi_recovery_info(struct platform_device *pdev, PINCTRL_STATE_DEFAULT); dev->pinctrl_pins_gpio = pinctrl_lookup_state(dev->pinctrl, "gpio"); + if (IS_ERR(dev->pinctrl_pins_default) || + IS_ERR(dev->pinctrl_pins_gpio)) { + dev_info(&pdev->dev, "pinctrl states incomplete for recovery\n"); + return -EINVAL; + } + + /* + * pins will be taken as GPIO, so we might as well inform pinctrl about + * this and move the state to GPIO + */ + pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_gpio); + rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN); if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER) return -EPROBE_DEFER; @@ -855,9 +867,7 @@ static int at91_init_twi_recovery_info(struct platform_device *pdev, return -EPROBE_DEFER; if (IS_ERR(rinfo->sda_gpiod) || - IS_ERR(rinfo->scl_gpiod) || - IS_ERR(dev->pinctrl_pins_default) || - IS_ERR(dev->pinctrl_pins_gpio)) { + IS_ERR(rinfo->scl_gpiod)) { dev_info(&pdev->dev, "recovery information incomplete\n"); if (!IS_ERR(rinfo->sda_gpiod)) { gpiod_put(rinfo->sda_gpiod); @@ -867,9 +877,13 @@ static int at91_init_twi_recovery_info(struct platform_device *pdev, gpiod_put(rinfo->scl_gpiod); rinfo->scl_gpiod = NULL; } + pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_default); return -EINVAL; } + /* change the state of the pins back to their default state */ + pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_default); + dev_info(&pdev->dev, "using scl, sda for recovery\n"); rinfo->prepare_recovery = at91_prepare_twi_recovery; diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index a66912782064..1f1442dfcad7 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -7,7 +7,7 @@ * Mux support by Rodolfo Giometti <giometti@enneenne.com> and * Michael Lawnick <michael.lawnick.ext@nsn.com> * - * Copyright (C) 2013-2017 Wolfram Sang <wsa@the-dreams.de> + * Copyright (C) 2013-2017 Wolfram Sang <wsa@kernel.org> */ #define pr_fmt(fmt) "i2c-core: " fmt @@ -338,8 +338,10 @@ static int i2c_device_probe(struct device *dev) } else if (ACPI_COMPANION(dev)) { irq = i2c_acpi_get_irq(client); } - if (irq == -EPROBE_DEFER) - return irq; + if (irq == -EPROBE_DEFER) { + status = irq; + goto put_sync_adapter; + } if (irq < 0) irq = 0; @@ -353,15 +355,19 @@ static int i2c_device_probe(struct device *dev) */ if (!driver->id_table && !i2c_acpi_match_device(dev->driver->acpi_match_table, client) && - !i2c_of_match_device(dev->driver->of_match_table, client)) - return -ENODEV; + !i2c_of_match_device(dev->driver->of_match_table, client)) { + status = -ENODEV; + goto put_sync_adapter; + } if (client->flags & I2C_CLIENT_WAKE) { int wakeirq; wakeirq = of_irq_get_byname(dev->of_node, "wakeup"); - if (wakeirq == -EPROBE_DEFER) - return wakeirq; + if (wakeirq == -EPROBE_DEFER) { + status = wakeirq; + goto put_sync_adapter; + } device_init_wakeup(&client->dev, true); @@ -408,6 +414,10 @@ err_detach_pm_domain: err_clear_wakeup_irq: dev_pm_clear_wake_irq(&client->dev); device_init_wakeup(&client->dev, false); +put_sync_adapter: + if (client->flags & I2C_CLIENT_HOST_NOTIFY) + pm_runtime_put_sync(&client->adapter->dev); + return status; } diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c index 6787c1f71483..3ed74aa4b44b 100644 --- a/drivers/i2c/i2c-core-of.c +++ b/drivers/i2c/i2c-core-of.c @@ -5,7 +5,7 @@ * Copyright (C) 2008 Jochen Friedrich <jochen@scram.de> * based on a previous patch from Jon Smirl <jonsmirl@gmail.com> * - * Copyright (C) 2013, 2018 Wolfram Sang <wsa@the-dreams.de> + * Copyright (C) 2013, 2018 Wolfram Sang <wsa@kernel.org> */ #include <dt-bindings/i2c/i2c.h> diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c index 0e16490eb3a1..5365199a31f4 100644 --- a/drivers/i2c/muxes/i2c-demux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c @@ -272,6 +272,7 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev) err_rollback_available: device_remove_file(&pdev->dev, &dev_attr_available_masters); err_rollback: + i2c_demux_deactivate_master(priv); for (j = 0; j < i; j++) { of_node_put(priv->chan[j].parent_np); of_changeset_destroy(&priv->chan[j].chgset); diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c index 66d768d971e1..6e429072e44a 100644 --- a/drivers/iio/accel/sca3000.c +++ b/drivers/iio/accel/sca3000.c @@ -980,7 +980,7 @@ static int sca3000_read_data(struct sca3000_state *st, st->tx[0] = SCA3000_READ_REG(reg_address_high); ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer)); if (ret) { - dev_err(get_device(&st->us->dev), "problem reading register"); + dev_err(&st->us->dev, "problem reading register\n"); return ret; } diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index ae622ee6d08c..dfc3a306c667 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -1812,18 +1812,18 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev) return 0; } -static int stm32_adc_dma_request(struct iio_dev *indio_dev) +static int stm32_adc_dma_request(struct device *dev, struct iio_dev *indio_dev) { struct stm32_adc *adc = iio_priv(indio_dev); struct dma_slave_config config; int ret; - adc->dma_chan = dma_request_chan(&indio_dev->dev, "rx"); + adc->dma_chan = dma_request_chan(dev, "rx"); if (IS_ERR(adc->dma_chan)) { ret = PTR_ERR(adc->dma_chan); if (ret != -ENODEV) { if (ret != -EPROBE_DEFER) - dev_err(&indio_dev->dev, + dev_err(dev, "DMA channel request failed with %d\n", ret); return ret; @@ -1930,7 +1930,7 @@ static int stm32_adc_probe(struct platform_device *pdev) if (ret < 0) return ret; - ret = stm32_adc_dma_request(indio_dev); + ret = stm32_adc_dma_request(dev, indio_dev); if (ret < 0) return ret; diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c index 76a60d93fe23..506bf519f64c 100644 --- a/drivers/iio/adc/stm32-dfsdm-adc.c +++ b/drivers/iio/adc/stm32-dfsdm-adc.c @@ -62,7 +62,7 @@ enum sd_converter_type { struct stm32_dfsdm_dev_data { int type; - int (*init)(struct iio_dev *indio_dev); + int (*init)(struct device *dev, struct iio_dev *indio_dev); unsigned int num_channels; const struct regmap_config *regmap_cfg; }; @@ -1365,11 +1365,12 @@ static void stm32_dfsdm_dma_release(struct iio_dev *indio_dev) } } -static int stm32_dfsdm_dma_request(struct iio_dev *indio_dev) +static int stm32_dfsdm_dma_request(struct device *dev, + struct iio_dev *indio_dev) { struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); - adc->dma_chan = dma_request_chan(&indio_dev->dev, "rx"); + adc->dma_chan = dma_request_chan(dev, "rx"); if (IS_ERR(adc->dma_chan)) { int ret = PTR_ERR(adc->dma_chan); @@ -1425,7 +1426,7 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev, &adc->dfsdm->ch_list[ch->channel]); } -static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev) +static int stm32_dfsdm_audio_init(struct device *dev, struct iio_dev *indio_dev) { struct iio_chan_spec *ch; struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); @@ -1452,10 +1453,10 @@ static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev) indio_dev->num_channels = 1; indio_dev->channels = ch; - return stm32_dfsdm_dma_request(indio_dev); + return stm32_dfsdm_dma_request(dev, indio_dev); } -static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev) +static int stm32_dfsdm_adc_init(struct device *dev, struct iio_dev *indio_dev) { struct iio_chan_spec *ch; struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); @@ -1499,17 +1500,17 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev) init_completion(&adc->completion); /* Optionally request DMA */ - ret = stm32_dfsdm_dma_request(indio_dev); + ret = stm32_dfsdm_dma_request(dev, indio_dev); if (ret) { if (ret != -ENODEV) { if (ret != -EPROBE_DEFER) - dev_err(&indio_dev->dev, + dev_err(dev, "DMA channel request failed with %d\n", ret); return ret; } - dev_dbg(&indio_dev->dev, "No DMA support\n"); + dev_dbg(dev, "No DMA support\n"); return 0; } @@ -1622,7 +1623,7 @@ static int stm32_dfsdm_adc_probe(struct platform_device *pdev) adc->dfsdm->fl_list[adc->fl_id].sync_mode = val; adc->dev_data = dev_data; - ret = dev_data->init(iio); + ret = dev_data->init(dev, iio); if (ret < 0) return ret; diff --git a/drivers/iio/adc/ti-ads8344.c b/drivers/iio/adc/ti-ads8344.c index abe4b56c847c..8a8792010c20 100644 --- a/drivers/iio/adc/ti-ads8344.c +++ b/drivers/iio/adc/ti-ads8344.c @@ -32,16 +32,17 @@ struct ads8344 { u8 rx_buf[3]; }; -#define ADS8344_VOLTAGE_CHANNEL(chan, si) \ +#define ADS8344_VOLTAGE_CHANNEL(chan, addr) \ { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ .channel = chan, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ + .address = addr, \ } -#define ADS8344_VOLTAGE_CHANNEL_DIFF(chan1, chan2, si) \ +#define ADS8344_VOLTAGE_CHANNEL_DIFF(chan1, chan2, addr) \ { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ @@ -50,6 +51,7 @@ struct ads8344 { .differential = 1, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ + .address = addr, \ } static const struct iio_chan_spec ads8344_channels[] = { @@ -105,7 +107,7 @@ static int ads8344_read_raw(struct iio_dev *iio, switch (mask) { case IIO_CHAN_INFO_RAW: mutex_lock(&adc->lock); - *value = ads8344_adc_conversion(adc, channel->scan_index, + *value = ads8344_adc_conversion(adc, channel->address, channel->differential); mutex_unlock(&adc->lock); if (*value < 0) diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c index 82d470561ad3..7b199ce16ecf 100644 --- a/drivers/iio/chemical/atlas-sensor.c +++ b/drivers/iio/chemical/atlas-sensor.c @@ -194,7 +194,19 @@ static const struct iio_chan_spec atlas_orp_channels[] = { }; static const struct iio_chan_spec atlas_do_channels[] = { - ATLAS_CONCENTRATION_CHANNEL(0, ATLAS_REG_DO_DATA), + { + .type = IIO_CONCENTRATION, + .address = ATLAS_REG_DO_DATA, + .info_mask_separate = + BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), + .scan_index = 0, + .scan_type = { + .sign = 'u', + .realbits = 32, + .storagebits = 32, + .endianness = IIO_BE, + }, + }, IIO_CHAN_SOFT_TIMESTAMP(1), { .type = IIO_TEMP, diff --git a/drivers/iio/dac/vf610_dac.c b/drivers/iio/dac/vf610_dac.c index 71f8a5c471c4..7f1e9317c3f3 100644 --- a/drivers/iio/dac/vf610_dac.c +++ b/drivers/iio/dac/vf610_dac.c @@ -223,6 +223,7 @@ static int vf610_dac_probe(struct platform_device *pdev) return 0; error_iio_device_register: + vf610_dac_exit(info); clk_disable_unprepare(info->clk); return ret; diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c index 64ef07a30726..1cf98195f84d 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c @@ -544,8 +544,10 @@ st_lsm6dsx_shub_write_raw(struct iio_dev *iio_dev, ref_sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]); odr = st_lsm6dsx_check_odr(ref_sensor, val, &odr_val); - if (odr < 0) - return odr; + if (odr < 0) { + err = odr; + goto release; + } sensor->ext_info.slv_odr = val; sensor->odr = odr; @@ -557,6 +559,7 @@ st_lsm6dsx_shub_write_raw(struct iio_dev *iio_dev, break; } +release: iio_device_release_direct_mode(iio_dev); return err; diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index bf8e149d3191..e0a5e897e4b1 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c @@ -153,9 +153,9 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj, uobj->context = NULL; /* - * For DESTROY the usecnt is held write locked, the caller is expected - * to put it unlock and put the object when done with it. Only DESTROY - * can remove the IDR handle. + * For DESTROY the usecnt is not changed, the caller is expected to + * manage it via uobj_put_destroy(). Only DESTROY can remove the IDR + * handle. */ if (reason != RDMA_REMOVE_DESTROY) atomic_set(&uobj->usecnt, 0); @@ -187,7 +187,7 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj, /* * This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY * sequence. It should only be used from command callbacks. On success the - * caller must pair this with rdma_lookup_put_uobject(LOOKUP_WRITE). This + * caller must pair this with uobj_put_destroy(). This * version requires the caller to have already obtained an * LOOKUP_DESTROY uobject kref. */ @@ -198,6 +198,13 @@ int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs) down_read(&ufile->hw_destroy_rwsem); + /* + * Once the uobject is destroyed by RDMA_REMOVE_DESTROY then it is left + * write locked as the callers put it back with UVERBS_LOOKUP_DESTROY. + * This is because any other concurrent thread can still see the object + * in the xarray due to RCU. Leaving it locked ensures nothing else will + * touch it. + */ ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE); if (ret) goto out_unlock; @@ -216,7 +223,7 @@ out_unlock: /* * uobj_get_destroy destroys the HW object and returns a handle to the uobj * with a NULL object pointer. The caller must pair this with - * uverbs_put_destroy. + * uobj_put_destroy(). */ struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj, u32 id, struct uverbs_attr_bundle *attrs) @@ -250,8 +257,7 @@ int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id, uobj = __uobj_get_destroy(obj, id, attrs); if (IS_ERR(uobj)) return PTR_ERR(uobj); - - rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE); + uobj_put_destroy(uobj); return 0; } diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c index 69cb7e6e8955..3a0601c2052c 100644 --- a/drivers/infiniband/hw/mlx5/flow.c +++ b/drivers/infiniband/hw/mlx5/flow.c @@ -404,7 +404,10 @@ static bool mlx5_ib_modify_header_supported(struct mlx5_ib_dev *dev) { return MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, max_modify_header_actions) || - MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, max_modify_header_actions); + MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, + max_modify_header_actions) || + MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev, + max_modify_header_actions); } static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER)( diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 65e0e24d463b..566b42f3fb18 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -3698,12 +3698,13 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, if (!dest_num) rule_dst = NULL; } else { + if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) + flow_act.action |= + MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; if (is_egress) flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW; - else - flow_act.action |= - dest_num ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST : - MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; + else if (dest_num) + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; } if ((spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG) && @@ -3747,30 +3748,6 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL); } -static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev, - struct mlx5_ib_flow_prio *ft_prio, - struct ib_flow_attr *flow_attr, - struct mlx5_flow_destination *dst) -{ - struct mlx5_ib_flow_handler *handler_dst = NULL; - struct mlx5_ib_flow_handler *handler = NULL; - - handler = create_flow_rule(dev, ft_prio, flow_attr, NULL); - if (!IS_ERR(handler)) { - handler_dst = create_flow_rule(dev, ft_prio, - flow_attr, dst); - if (IS_ERR(handler_dst)) { - mlx5_del_flow_rules(handler->rule); - ft_prio->refcount--; - kfree(handler); - handler = handler_dst; - } else { - list_add(&handler_dst->list, &handler->list); - } - } - - return handler; -} enum { LEFTOVERS_MC, LEFTOVERS_UC, @@ -3974,15 +3951,11 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, } if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { - if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) { - handler = create_dont_trap_rule(dev, ft_prio, - flow_attr, dst); - } else { - underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ? - mqp->underlay_qpn : 0; - handler = _create_flow_rule(dev, ft_prio, flow_attr, - dst, underlay_qpn, ucmd); - } + underlay_qpn = (mqp->flags & IB_QP_CREATE_SOURCE_QPN) ? + mqp->underlay_qpn : + 0; + handler = _create_flow_rule(dev, ft_prio, flow_attr, dst, + underlay_qpn, ucmd); } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { handler = create_leftovers_rule(dev, ft_prio, flow_attr, diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index a401931189b7..44683073be0c 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1439,6 +1439,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (is_odp_mr(mr)) { to_ib_umem_odp(mr->umem)->private = mr; + init_waitqueue_head(&mr->q_deferred_work); atomic_set(&mr->num_deferred_work, 0); err = xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key), &mr->mmkey, diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index 568b21eb6ea1..021df0654ba7 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c @@ -760,7 +760,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, qib_dev_err(dd, "Skipping linkcontrol sysfs info, (err %d) port %u\n", ret, port_num); - goto bail; + goto bail_link; } kobject_uevent(&ppd->pport_kobj, KOBJ_ADD); @@ -770,7 +770,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, qib_dev_err(dd, "Skipping sl2vl sysfs info, (err %d) port %u\n", ret, port_num); - goto bail_link; + goto bail_sl; } kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD); @@ -780,7 +780,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, qib_dev_err(dd, "Skipping diag_counters sysfs info, (err %d) port %u\n", ret, port_num); - goto bail_sl; + goto bail_diagc; } kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD); @@ -793,7 +793,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, qib_dev_err(dd, "Skipping Congestion Control sysfs info, (err %d) port %u\n", ret, port_num); - goto bail_diagc; + goto bail_cc; } kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD); @@ -854,6 +854,7 @@ void qib_verbs_unregister_sysfs(struct qib_devdata *dd) &cc_table_bin_attr); kobject_put(&ppd->pport_cc_kobj); } + kobject_put(&ppd->diagc_kobj); kobject_put(&ppd->sl2vl_kobj); kobject_put(&ppd->pport_kobj); } diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index e580ae9cc55a..780fd2dfc07e 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c @@ -829,7 +829,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev, !(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "PCI BAR region not MMIO\n"); ret = -ENOMEM; - goto err_free_device; + goto err_disable_pdev; } ret = pci_request_regions(pdev, DRV_NAME); diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c index 559e5fd3bad8..1662216be66d 100644 --- a/drivers/infiniband/sw/siw/siw_cm.c +++ b/drivers/infiniband/sw/siw/siw_cm.c @@ -947,16 +947,8 @@ static void siw_accept_newconn(struct siw_cep *cep) siw_cep_get(new_cep); new_s->sk->sk_user_data = new_cep; - if (siw_tcp_nagle == false) { - int val = 1; - - rv = kernel_setsockopt(new_s, SOL_TCP, TCP_NODELAY, - (char *)&val, sizeof(val)); - if (rv) { - siw_dbg_cep(cep, "setsockopt NODELAY error: %d\n", rv); - goto error; - } - } + if (siw_tcp_nagle == false) + tcp_sock_set_nodelay(new_s->sk); new_cep->state = SIW_EPSTATE_AWAIT_MPAREQ; rv = siw_cm_queue_work(new_cep, SIW_CM_WORK_MPATIMEOUT); @@ -1312,17 +1304,14 @@ static void siw_cm_llp_state_change(struct sock *sk) static int kernel_bindconnect(struct socket *s, struct sockaddr *laddr, struct sockaddr *raddr) { - int rv, flags = 0, s_val = 1; + int rv, flags = 0; size_t size = laddr->sa_family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6); /* * Make address available again asap. */ - rv = kernel_setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char *)&s_val, - sizeof(s_val)); - if (rv < 0) - return rv; + sock_set_reuseaddr(s->sk); rv = s->ops->bind(s, laddr, size); if (rv < 0) @@ -1389,16 +1378,8 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params) siw_dbg_qp(qp, "kernel_bindconnect: error %d\n", rv); goto error; } - if (siw_tcp_nagle == false) { - int val = 1; - - rv = kernel_setsockopt(s, SOL_TCP, TCP_NODELAY, (char *)&val, - sizeof(val)); - if (rv) { - siw_dbg_qp(qp, "setsockopt NODELAY error: %d\n", rv); - goto error; - } - } + if (siw_tcp_nagle == false) + tcp_sock_set_nodelay(s->sk); cep = siw_cep_alloc(sdev); if (!cep) { rv = -ENOMEM; @@ -1781,7 +1762,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog) struct siw_cep *cep = NULL; struct siw_device *sdev = to_siw_dev(id->device); int addr_family = id->local_addr.ss_family; - int rv = 0, s_val; + int rv = 0; if (addr_family != AF_INET && addr_family != AF_INET6) return -EAFNOSUPPORT; @@ -1793,13 +1774,8 @@ int siw_create_listen(struct iw_cm_id *id, int backlog) /* * Allow binding local port when still in TIME_WAIT from last close. */ - s_val = 1; - rv = kernel_setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char *)&s_val, - sizeof(s_val)); - if (rv) { - siw_dbg(id->device, "setsockopt error: %d\n", rv); - goto error; - } + sock_set_reuseaddr(s->sk); + if (addr_family == AF_INET) { struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr); diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index e188a95984b5..9a3379c49541 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -377,8 +377,12 @@ struct ipoib_dev_priv { struct ipoib_rx_buf *rx_ring; struct ipoib_tx_buf *tx_ring; + /* cyclic ring variables for managing tx_ring, for UD only */ unsigned int tx_head; unsigned int tx_tail; + /* cyclic ring variables for counting overall outstanding send WRs */ + unsigned int global_tx_head; + unsigned int global_tx_tail; struct ib_sge tx_sge[MAX_SKB_FRAGS + 1]; struct ib_ud_wr tx_wr; struct ib_wc send_wc[MAX_SEND_CQE]; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index c59e00a0881f..9bf0fa30df28 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -756,7 +756,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ return; } - if ((priv->tx_head - priv->tx_tail) == ipoib_sendq_size - 1) { + if ((priv->global_tx_head - priv->global_tx_tail) == + ipoib_sendq_size - 1) { ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", tx->qp->qp_num); netif_stop_queue(dev); @@ -786,7 +787,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ } else { netif_trans_update(dev); ++tx->tx_head; - ++priv->tx_head; + ++priv->global_tx_head; } } @@ -820,10 +821,11 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) netif_tx_lock(dev); ++tx->tx_tail; - ++priv->tx_tail; + ++priv->global_tx_tail; if (unlikely(netif_queue_stopped(dev) && - (priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1 && + ((priv->global_tx_head - priv->global_tx_tail) <= + ipoib_sendq_size >> 1) && test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))) netif_wake_queue(dev); @@ -1232,8 +1234,9 @@ timeout: dev_kfree_skb_any(tx_req->skb); netif_tx_lock_bh(p->dev); ++p->tx_tail; - ++priv->tx_tail; - if (unlikely(priv->tx_head - priv->tx_tail == ipoib_sendq_size >> 1) && + ++priv->global_tx_tail; + if (unlikely((priv->global_tx_head - priv->global_tx_tail) <= + ipoib_sendq_size >> 1) && netif_queue_stopped(p->dev) && test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) netif_wake_queue(p->dev); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index c332b4761816..da3c5315bbb5 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -407,9 +407,11 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) dev_kfree_skb_any(tx_req->skb); ++priv->tx_tail; + ++priv->global_tx_tail; if (unlikely(netif_queue_stopped(dev) && - ((priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1) && + ((priv->global_tx_head - priv->global_tx_tail) <= + ipoib_sendq_size >> 1) && test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))) netif_wake_queue(dev); @@ -634,7 +636,8 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb, else priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; /* increase the tx_head after send success, but use it for queue state */ - if (priv->tx_head - priv->tx_tail == ipoib_sendq_size - 1) { + if ((priv->global_tx_head - priv->global_tx_tail) == + ipoib_sendq_size - 1) { ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); netif_stop_queue(dev); } @@ -662,6 +665,7 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb, rc = priv->tx_head; ++priv->tx_head; + ++priv->global_tx_head; } return rc; } @@ -807,6 +811,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev) ipoib_dma_unmap_tx(priv, tx_req); dev_kfree_skb_any(tx_req->skb); ++priv->tx_tail; + ++priv->global_tx_tail; } for (i = 0; i < ipoib_recvq_size; ++i) { diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 81b8227214f1..ceec24d45185 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1184,9 +1184,11 @@ static void ipoib_timeout(struct net_device *dev, unsigned int txqueue) ipoib_warn(priv, "transmit timeout: latency %d msecs\n", jiffies_to_msecs(jiffies - dev_trans_start(dev))); - ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n", - netif_queue_stopped(dev), - priv->tx_head, priv->tx_tail); + ipoib_warn(priv, + "queue stopped %d, tx_head %u, tx_tail %u, global_tx_head %u, global_tx_tail %u\n", + netif_queue_stopped(dev), priv->tx_head, priv->tx_tail, + priv->global_tx_head, priv->global_tx_tail); + /* XXX reset QP, etc. */ } @@ -1701,7 +1703,7 @@ static int ipoib_dev_init_default(struct net_device *dev) goto out_rx_ring_cleanup; } - /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ + /* priv->tx_head, tx_tail and global_tx_tail/head are already 0 */ if (ipoib_transport_dev_init(dev, priv->ca)) { pr_warn("%s: ipoib_transport_dev_init failed\n", diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index cb6e3a5f509c..0d57e51b8ba1 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -326,20 +326,6 @@ static int evdev_fasync(int fd, struct file *file, int on) return fasync_helper(fd, file, on, &client->fasync); } -static int evdev_flush(struct file *file, fl_owner_t id) -{ - struct evdev_client *client = file->private_data; - struct evdev *evdev = client->evdev; - - mutex_lock(&evdev->mutex); - - if (evdev->exist && !client->revoked) - input_flush_device(&evdev->handle, file); - - mutex_unlock(&evdev->mutex); - return 0; -} - static void evdev_free(struct device *dev) { struct evdev *evdev = container_of(dev, struct evdev, dev); @@ -453,6 +439,10 @@ static int evdev_release(struct inode *inode, struct file *file) unsigned int i; mutex_lock(&evdev->mutex); + + if (evdev->exist && !client->revoked) + input_flush_device(&evdev->handle, file); + evdev_ungrab(evdev, client); mutex_unlock(&evdev->mutex); @@ -1310,7 +1300,6 @@ static const struct file_operations evdev_fops = { .compat_ioctl = evdev_ioctl_compat, #endif .fasync = evdev_fasync, - .flush = evdev_flush, .llseek = no_llseek, }; diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 6b40a1c68f9f..c77cdb3b62b5 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -459,6 +459,16 @@ static const u8 xboxone_fw2015_init[] = { }; /* + * This packet is required for Xbox One S (0x045e:0x02ea) + * and Xbox One Elite Series 2 (0x045e:0x0b00) pads to + * initialize the controller that was previously used in + * Bluetooth mode. + */ +static const u8 xboxone_s_init[] = { + 0x05, 0x20, 0x00, 0x0f, 0x06 +}; + +/* * This packet is required for the Titanfall 2 Xbox One pads * (0x0e6f:0x0165) to finish initialization and for Hori pads * (0x0f0d:0x0067) to make the analog sticks work. @@ -516,6 +526,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = { XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init), XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init), XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init), + XBOXONE_INIT_PKT(0x045e, 0x02ea, xboxone_s_init), + XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init), XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1), XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2), XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c index d38398526965..14362ebab9a9 100644 --- a/drivers/input/keyboard/applespi.c +++ b/drivers/input/keyboard/applespi.c @@ -186,7 +186,7 @@ struct touchpad_protocol { u8 number_of_fingers; u8 clicked2; u8 unknown3[16]; - struct tp_finger fingers[0]; + struct tp_finger fingers[]; }; /** diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c index 2b71c5a51f90..fc1793ca2f17 100644 --- a/drivers/input/keyboard/cros_ec_keyb.c +++ b/drivers/input/keyboard/cros_ec_keyb.c @@ -347,18 +347,14 @@ static int cros_ec_keyb_info(struct cros_ec_device *ec_dev, params->info_type = info_type; params->event_type = event_type; - ret = cros_ec_cmd_xfer(ec_dev, msg); - if (ret < 0) { - dev_warn(ec_dev->dev, "Transfer error %d/%d: %d\n", - (int)info_type, (int)event_type, ret); - } else if (msg->result == EC_RES_INVALID_VERSION) { + ret = cros_ec_cmd_xfer_status(ec_dev, msg); + if (ret == -ENOTSUPP) { /* With older ECs we just return 0 for everything */ memset(result, 0, result_size); ret = 0; - } else if (msg->result != EC_RES_SUCCESS) { - dev_warn(ec_dev->dev, "Error getting info %d/%d: %d\n", - (int)info_type, (int)event_type, msg->result); - ret = -EPROTO; + } else if (ret < 0) { + dev_warn(ec_dev->dev, "Transfer error %d/%d: %d\n", + (int)info_type, (int)event_type, ret); } else if (ret != result_size) { dev_warn(ec_dev->dev, "Wrong size %d/%d: %d != %zu\n", (int)info_type, (int)event_type, diff --git a/drivers/input/keyboard/dlink-dir685-touchkeys.c b/drivers/input/keyboard/dlink-dir685-touchkeys.c index b0ead7199c40..a69dcc3bd30c 100644 --- a/drivers/input/keyboard/dlink-dir685-touchkeys.c +++ b/drivers/input/keyboard/dlink-dir685-touchkeys.c @@ -143,7 +143,7 @@ MODULE_DEVICE_TABLE(of, dir685_tk_of_match); static struct i2c_driver dir685_tk_i2c_driver = { .driver = { - .name = "dlin-dir685-touchkeys", + .name = "dlink-dir685-touchkeys", .of_match_table = of_match_ptr(dir685_tk_of_match), }, .probe = dir685_tk_probe, diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c index c8f87df93a50..9c6386b2af33 100644 --- a/drivers/input/misc/axp20x-pek.c +++ b/drivers/input/misc/axp20x-pek.c @@ -205,8 +205,11 @@ ATTRIBUTE_GROUPS(axp20x); static irqreturn_t axp20x_pek_irq(int irq, void *pwr) { - struct input_dev *idev = pwr; - struct axp20x_pek *axp20x_pek = input_get_drvdata(idev); + struct axp20x_pek *axp20x_pek = pwr; + struct input_dev *idev = axp20x_pek->input; + + if (!idev) + return IRQ_HANDLED; /* * The power-button is connected to ground so a falling edge (dbf) @@ -225,22 +228,9 @@ static irqreturn_t axp20x_pek_irq(int irq, void *pwr) static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek, struct platform_device *pdev) { - struct axp20x_dev *axp20x = axp20x_pek->axp20x; struct input_dev *idev; int error; - axp20x_pek->irq_dbr = platform_get_irq_byname(pdev, "PEK_DBR"); - if (axp20x_pek->irq_dbr < 0) - return axp20x_pek->irq_dbr; - axp20x_pek->irq_dbr = regmap_irq_get_virq(axp20x->regmap_irqc, - axp20x_pek->irq_dbr); - - axp20x_pek->irq_dbf = platform_get_irq_byname(pdev, "PEK_DBF"); - if (axp20x_pek->irq_dbf < 0) - return axp20x_pek->irq_dbf; - axp20x_pek->irq_dbf = regmap_irq_get_virq(axp20x->regmap_irqc, - axp20x_pek->irq_dbf); - axp20x_pek->input = devm_input_allocate_device(&pdev->dev); if (!axp20x_pek->input) return -ENOMEM; @@ -255,24 +245,6 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek, input_set_drvdata(idev, axp20x_pek); - error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbr, - axp20x_pek_irq, 0, - "axp20x-pek-dbr", idev); - if (error < 0) { - dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n", - axp20x_pek->irq_dbr, error); - return error; - } - - error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbf, - axp20x_pek_irq, 0, - "axp20x-pek-dbf", idev); - if (error < 0) { - dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n", - axp20x_pek->irq_dbf, error); - return error; - } - error = input_register_device(idev); if (error) { dev_err(&pdev->dev, "Can't register input device: %d\n", @@ -280,8 +252,6 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek, return error; } - device_init_wakeup(&pdev->dev, true); - return 0; } @@ -339,6 +309,18 @@ static int axp20x_pek_probe(struct platform_device *pdev) axp20x_pek->axp20x = dev_get_drvdata(pdev->dev.parent); + axp20x_pek->irq_dbr = platform_get_irq_byname(pdev, "PEK_DBR"); + if (axp20x_pek->irq_dbr < 0) + return axp20x_pek->irq_dbr; + axp20x_pek->irq_dbr = regmap_irq_get_virq( + axp20x_pek->axp20x->regmap_irqc, axp20x_pek->irq_dbr); + + axp20x_pek->irq_dbf = platform_get_irq_byname(pdev, "PEK_DBF"); + if (axp20x_pek->irq_dbf < 0) + return axp20x_pek->irq_dbf; + axp20x_pek->irq_dbf = regmap_irq_get_virq( + axp20x_pek->axp20x->regmap_irqc, axp20x_pek->irq_dbf); + if (axp20x_pek_should_register_input(axp20x_pek, pdev)) { error = axp20x_pek_probe_input_device(axp20x_pek, pdev); if (error) @@ -347,6 +329,26 @@ static int axp20x_pek_probe(struct platform_device *pdev) axp20x_pek->info = (struct axp20x_info *)match->driver_data; + error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbr, + axp20x_pek_irq, 0, + "axp20x-pek-dbr", axp20x_pek); + if (error < 0) { + dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n", + axp20x_pek->irq_dbr, error); + return error; + } + + error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbf, + axp20x_pek_irq, 0, + "axp20x-pek-dbf", axp20x_pek); + if (error < 0) { + dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n", + axp20x_pek->irq_dbf, error); + return error; + } + + device_init_wakeup(&pdev->dev, true); + platform_set_drvdata(pdev, axp20x_pek); return 0; diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 4d2036209b45..758dae8d6500 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -170,6 +170,7 @@ static const char * const smbus_pnp_ids[] = { "LEN005b", /* P50 */ "LEN005e", /* T560 */ "LEN006c", /* T470s */ + "LEN007a", /* T470s */ "LEN0071", /* T480 */ "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ "LEN0073", /* X1 Carbon G5 (Elantech) */ diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c index 190b9974526b..258d5fe3d395 100644 --- a/drivers/input/rmi4/rmi_driver.c +++ b/drivers/input/rmi4/rmi_driver.c @@ -205,7 +205,7 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id) if (count) { kfree(attn_data.data); - attn_data.data = NULL; + drvdata->attn_data.data = NULL; } if (!kfifo_is_empty(&drvdata->attn_fifo)) @@ -1210,7 +1210,8 @@ static int rmi_driver_probe(struct device *dev) if (data->input) { rmi_driver_set_input_name(rmi_dev, data->input); if (!rmi_dev->xport->input) { - if (input_register_device(data->input)) { + retval = input_register_device(data->input); + if (retval) { dev_err(dev, "%s: Failed to register input device.\n", __func__); goto err_destroy_functions; diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 08e919dbeb5d..7e048b557462 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -662,6 +662,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"), }, }, + { + /* Lenovo ThinkPad Twist S230u */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"), + }, + }, { } }; diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c index 14c577c16b16..2289f9638116 100644 --- a/drivers/input/touchscreen/elants_i2c.c +++ b/drivers/input/touchscreen/elants_i2c.c @@ -19,6 +19,7 @@ */ +#include <linux/bits.h> #include <linux/module.h> #include <linux/input.h> #include <linux/interrupt.h> @@ -73,6 +74,7 @@ #define FW_POS_STATE 1 #define FW_POS_TOTAL 2 #define FW_POS_XY 3 +#define FW_POS_TOOL_TYPE 33 #define FW_POS_CHECKSUM 34 #define FW_POS_WIDTH 35 #define FW_POS_PRESSURE 45 @@ -842,6 +844,7 @@ static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf) { struct input_dev *input = ts->input; unsigned int n_fingers; + unsigned int tool_type; u16 finger_state; int i; @@ -852,6 +855,10 @@ static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf) dev_dbg(&ts->client->dev, "n_fingers: %u, state: %04x\n", n_fingers, finger_state); + /* Note: all fingers have the same tool type */ + tool_type = buf[FW_POS_TOOL_TYPE] & BIT(0) ? + MT_TOOL_FINGER : MT_TOOL_PALM; + for (i = 0; i < MAX_CONTACT_NUM && n_fingers; i++) { if (finger_state & 1) { unsigned int x, y, p, w; @@ -867,7 +874,7 @@ static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf) i, x, y, p, w); input_mt_slot(input, i); - input_mt_report_slot_state(input, MT_TOOL_FINGER, true); + input_mt_report_slot_state(input, tool_type, true); input_event(input, EV_ABS, ABS_MT_POSITION_X, x); input_event(input, EV_ABS, ABS_MT_POSITION_Y, y); input_event(input, EV_ABS, ABS_MT_PRESSURE, p); @@ -1307,6 +1314,8 @@ static int elants_i2c_probe(struct i2c_client *client, input_set_abs_params(ts->input, ABS_MT_POSITION_Y, 0, ts->y_max, 0, 0); input_set_abs_params(ts->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0); input_set_abs_params(ts->input, ABS_MT_PRESSURE, 0, 255, 0, 0); + input_set_abs_params(ts->input, ABS_MT_TOOL_TYPE, + 0, MT_TOOL_PALM, 0, 0); input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res); input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res); input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, 1); diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c index 69c6d559eeb0..2ef1adaed9af 100644 --- a/drivers/input/touchscreen/mms114.c +++ b/drivers/input/touchscreen/mms114.c @@ -91,15 +91,15 @@ static int __mms114_read_reg(struct mms114_data *data, unsigned int reg, if (reg <= MMS114_MODE_CONTROL && reg + len > MMS114_MODE_CONTROL) BUG(); - /* Write register: use repeated start */ + /* Write register */ xfer[0].addr = client->addr; - xfer[0].flags = I2C_M_TEN | I2C_M_NOSTART; + xfer[0].flags = client->flags & I2C_M_TEN; xfer[0].len = 1; xfer[0].buf = &buf; /* Read data */ xfer[1].addr = client->addr; - xfer[1].flags = I2C_M_RD; + xfer[1].flags = (client->flags & I2C_M_TEN) | I2C_M_RD; xfer[1].len = len; xfer[1].buf = val; @@ -428,10 +428,8 @@ static int mms114_probe(struct i2c_client *client, const void *match_data; int error; - if (!i2c_check_functionality(client->adapter, - I2C_FUNC_PROTOCOL_MANGLING)) { - dev_err(&client->dev, - "Need i2c bus that supports protocol mangling\n"); + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + dev_err(&client->dev, "Not supported I2C adapter\n"); return -ENODEV; } diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c index 16d70201de4a..397cb1d3f481 100644 --- a/drivers/input/touchscreen/usbtouchscreen.c +++ b/drivers/input/touchscreen/usbtouchscreen.c @@ -182,6 +182,7 @@ static const struct usb_device_id usbtouch_devices[] = { #endif #ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH + {USB_DEVICE(0x255e, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, {USB_DEVICE(0x595a, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, {USB_DEVICE(0x6615, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, {USB_DEVICE(0x6615, 0x0012), .driver_info = DEVTYPE_IRTOUCH_HIRES}, diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 1dc3718560d0..2883ac389abb 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -127,7 +127,8 @@ static inline int get_acpihid_device_id(struct device *dev, return -ENODEV; list_for_each_entry(p, &acpihid_map, list) { - if (acpi_dev_hid_uid_match(adev, p->hid, p->uid)) { + if (acpi_dev_hid_uid_match(adev, p->hid, + p->uid[0] ? p->uid : NULL)) { if (entry) *entry = p; return p->devid; diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 2b9a67ecc6ac..5b81fd16f5fa 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -1329,8 +1329,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, } case IVHD_DEV_ACPI_HID: { u16 devid; - u8 hid[ACPIHID_HID_LEN] = {0}; - u8 uid[ACPIHID_UID_LEN] = {0}; + u8 hid[ACPIHID_HID_LEN]; + u8 uid[ACPIHID_UID_LEN]; int ret; if (h->type != 0x40) { @@ -1347,6 +1347,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, break; } + uid[0] = '\0'; switch (e->uidf) { case UID_NOT_PRESENT: @@ -1361,8 +1362,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, break; case UID_IS_CHARACTER: - memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1); - uid[ACPIHID_UID_LEN - 1] = '\0'; + memcpy(uid, &e->uid, e->uidl); + uid[e->uidl] = '\0'; break; default: diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 7b375421afba..03d6a26687bc 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -510,7 +510,7 @@ struct iommu_group *iommu_group_alloc(void) NULL, "%d", group->id); if (ret) { ida_simple_remove(&iommu_group_ida, group->id); - kfree(group); + kobject_put(&group->kobj); return ERR_PTR(ret); } @@ -693,6 +693,15 @@ out: return ret; } +static bool iommu_is_attach_deferred(struct iommu_domain *domain, + struct device *dev) +{ + if (domain->ops->is_attach_deferred) + return domain->ops->is_attach_deferred(domain, dev); + + return false; +} + /** * iommu_group_add_device - add a device to an iommu group * @group: the group into which to add the device (reference should be held) @@ -747,7 +756,7 @@ rename: mutex_lock(&group->mutex); list_add_tail(&device->list, &group->devices); - if (group->domain) + if (group->domain && !iommu_is_attach_deferred(group->domain, dev)) ret = __iommu_attach_device(group->domain, dev); mutex_unlock(&group->mutex); if (ret) @@ -1653,9 +1662,6 @@ static int __iommu_attach_device(struct iommu_domain *domain, struct device *dev) { int ret; - if ((domain->ops->is_attach_deferred != NULL) && - domain->ops->is_attach_deferred(domain, dev)) - return 0; if (unlikely(domain->ops->attach_dev == NULL)) return -ENODEV; @@ -1727,8 +1733,7 @@ EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid); static void __iommu_detach_device(struct iommu_domain *domain, struct device *dev) { - if ((domain->ops->is_attach_deferred != NULL) && - domain->ops->is_attach_deferred(domain, dev)) + if (iommu_is_attach_deferred(domain, dev)) return; if (unlikely(domain->ops->detach_dev == NULL)) diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c index 23445ebfda5c..ec71063fff76 100644 --- a/drivers/ipack/carriers/tpci200.c +++ b/drivers/ipack/carriers/tpci200.c @@ -306,6 +306,7 @@ static int tpci200_register(struct tpci200_board *tpci200) "(bn 0x%X, sn 0x%X) failed to map driver user space!", tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); + res = -ENOMEM; goto out_release_mem8_space; } diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index 06038b325b02..55da6428ceb0 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -142,6 +142,9 @@ static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr) rtsx_disable_aspm(pcr); + /* Fixes DMA transfer timout issue after disabling ASPM on RTS5260 */ + msleep(1); + if (option->ltr_enabled) rtsx_set_ltr_latency(pcr, option->ltr_active_latency); diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 204d807e755b..b32c825a0945 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -266,6 +266,7 @@ void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid) down_write(&dev->me_clients_rwsem); me_cl = __mei_me_cl_by_uuid(dev, uuid); __mei_me_cl_del(dev, me_cl); + mei_me_cl_put(me_cl); up_write(&dev->me_clients_rwsem); } @@ -287,6 +288,7 @@ void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id) down_write(&dev->me_clients_rwsem); me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id); __mei_me_cl_del(dev, me_cl); + mei_me_cl_put(me_cl); up_write(&dev->me_clients_rwsem); } diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index c5367e2c8487..7896952de1ac 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -2484,8 +2484,8 @@ static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp) struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, struct mmc_rpmb_data, chrdev); - put_device(&rpmb->dev); mmc_blk_put(rpmb->md); + put_device(&rpmb->dev); return 0; } diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 3f716466fcfd..e368f2dabf20 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -4000,9 +4000,6 @@ int sdhci_setup_host(struct sdhci_host *host) mmc_hostname(mmc), host->version); } - if (host->quirks & SDHCI_QUIRK_BROKEN_CQE) - mmc->caps2 &= ~MMC_CAP2_CQE; - if (host->quirks & SDHCI_QUIRK_FORCE_DMA) host->flags |= SDHCI_USE_SDMA; else if (!(host->caps & SDHCI_CAN_DO_SDMA)) @@ -4539,6 +4536,12 @@ int __sdhci_add_host(struct sdhci_host *host) struct mmc_host *mmc = host->mmc; int ret; + if ((mmc->caps2 & MMC_CAP2_CQE) && + (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) { + mmc->caps2 &= ~MMC_CAP2_CQE; + mmc->cqe_ops = NULL; + } + host->complete_wq = alloc_workqueue("sdhci", flags, 0); if (!host->complete_wq) return -ENOMEM; diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 2916674208b3..29d41003d6e0 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -555,7 +555,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd) config.id = -1; config.dev = &mtd->dev; - config.name = mtd->name; + config.name = dev_name(&mtd->dev); config.owner = THIS_MODULE; config.reg_read = mtd_nvmem_reg_read; config.size = mtd->size; diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index e4e3ceeac38f..8f9ffb46a09f 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -2728,9 +2728,8 @@ static int brcmnand_resume(struct device *dev) flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0); } - if (has_edu(ctrl)) + if (has_edu(ctrl)) { ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG); - else { edu_writel(ctrl, EDU_CONFIG, ctrl->edu_config); edu_readl(ctrl, EDU_CONFIG); brcmnand_edu_init(ctrl); diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index b6bb358b96ce..e2c382ffc5b6 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -1089,6 +1089,10 @@ static int spinand_init(struct spinand_device *spinand) mtd->oobavail = ret; + /* Propagate ECC information to mtd_info */ + mtd->ecc_strength = nand->eccreq.strength; + mtd->ecc_step_size = nand->eccreq.step_size; + return 0; err_cleanup_nanddev: diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c index 54646c2c2744..ac2bdba8bb1a 100644 --- a/drivers/mtd/ubi/debug.c +++ b/drivers/mtd/ubi/debug.c @@ -393,9 +393,6 @@ static void *eraseblk_count_seq_start(struct seq_file *s, loff_t *pos) { struct ubi_device *ubi = s->private; - if (*pos == 0) - return SEQ_START_TOKEN; - if (*pos < ubi->peb_count) return pos; @@ -409,8 +406,6 @@ static void *eraseblk_count_seq_next(struct seq_file *s, void *v, loff_t *pos) { struct ubi_device *ubi = s->private; - if (v == SEQ_START_TOKEN) - return pos; (*pos)++; if (*pos < ubi->peb_count) @@ -432,11 +427,8 @@ static int eraseblk_count_seq_show(struct seq_file *s, void *iter) int err; /* If this is the start, print a header */ - if (iter == SEQ_START_TOKEN) { - seq_puts(s, - "physical_block_number\terase_count\tblock_status\tread_status\n"); - return 0; - } + if (*block_number == 0) + seq_puts(s, "physical_block_number\terase_count\n"); err = ubi_io_is_bad(ubi, *block_number); if (err) diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c index 007481557191..9b8346638f69 100644 --- a/drivers/net/bonding/bond_sysfs_slave.c +++ b/drivers/net/bonding/bond_sysfs_slave.c @@ -149,8 +149,10 @@ int bond_sysfs_slave_add(struct slave *slave) err = kobject_init_and_add(&slave->kobj, &slave_ktype, &(slave->dev->dev.kobj), "bonding_slave"); - if (err) + if (err) { + kobject_put(&slave->kobj); return err; + } for (a = slave_attrs; *a; ++a) { err = sysfs_create_file(&slave->kobj, &((*a)->attr)); diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 04d59bede5ea..74503cacf594 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -947,8 +947,11 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev) u32 id, rev; addr = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(addr)) + return PTR_ERR(addr); + irq = platform_get_irq(pdev, 0); - if (IS_ERR(addr) || irq < 0) + if (irq < 0) return -EINVAL; id = readl(addr + IFI_CANFD_IP_ID); diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index e3ba8ab0cbf4..e2c6cf4b2228 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -792,7 +792,7 @@ static int sun4ican_probe(struct platform_device *pdev) addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(addr)) { - err = -EBUSY; + err = PTR_ERR(addr); goto exit; } diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index ceb8be653182..1df05841ab6b 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1325,7 +1325,6 @@ int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) u16 pvid, new_pvid; b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); - new_pvid = pvid; if (!vlan_filtering) { /* Filtering is currently enabled, use the default PVID since * the bridge does not expect tagging anymore diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c index 1207c3095027..aaa12d73784e 100644 --- a/drivers/net/dsa/b53/b53_srab.c +++ b/drivers/net/dsa/b53/b53_srab.c @@ -609,7 +609,7 @@ static int b53_srab_probe(struct platform_device *pdev) priv->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->regs)) - return -ENOMEM; + return PTR_ERR(priv->regs); dev = b53_switch_alloc(&pdev->dev, &b53_srab_ops, priv); if (!dev) diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index d30542fc556a..8dcb8a49ab67 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -628,11 +628,8 @@ mt7530_cpu_port_enable(struct mt7530_priv *priv, mt7530_write(priv, MT7530_PVC_P(port), PORT_SPEC_TAG); - /* Disable auto learning on the cpu port */ - mt7530_set(priv, MT7530_PSC_P(port), SA_DIS); - - /* Unknown unicast frame fordwarding to the cpu port */ - mt7530_set(priv, MT7530_MFC, UNU_FFP(BIT(port))); + /* Unknown multicast frame forwarding to the cpu port */ + mt7530_rmw(priv, MT7530_MFC, UNM_FFP_MASK, UNM_FFP(BIT(port))); /* Set CPU port number */ if (priv->id == ID_MT7621) @@ -1288,8 +1285,6 @@ mt7530_setup(struct dsa_switch *ds) /* Enable and reset MIB counters */ mt7530_mib_reset(ds); - mt7530_clear(priv, MT7530_MFC, UNU_FFP_MASK); - for (i = 0; i < MT7530_NUM_PORTS; i++) { /* Disable forwarding by default on all ports */ mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK, diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index d45eb7540703..14de60d0b9ca 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -31,6 +31,7 @@ enum { #define MT7530_MFC 0x10 #define BC_FFP(x) (((x) & 0xff) << 24) #define UNM_FFP(x) (((x) & 0xff) << 16) +#define UNM_FFP_MASK UNM_FFP(~0) #define UNU_FFP(x) (((x) & 0xff) << 8) #define UNU_FFP_MASK UNU_FFP(~0) #define CPU_EN BIT(7) diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index d2b114c96952..66648986e6e3 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -103,13 +103,17 @@ static void felix_vlan_add(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan) { struct ocelot *ocelot = ds->priv; + u16 flags = vlan->flags; u16 vid; int err; + if (dsa_is_cpu_port(ds, port)) + flags &= ~BRIDGE_VLAN_INFO_UNTAGGED; + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { err = ocelot_vlan_add(ocelot, port, vid, - vlan->flags & BRIDGE_VLAN_INFO_PVID, - vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED); + flags & BRIDGE_VLAN_INFO_PVID, + flags & BRIDGE_VLAN_INFO_UNTAGGED); if (err) { dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n", vid, port, err); @@ -414,6 +418,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) struct ocelot *ocelot = &felix->ocelot; phy_interface_t *port_phy_modes; resource_size_t switch_base; + struct resource res; int port, i, err; ocelot->num_phys_ports = num_phys_ports; @@ -448,17 +453,16 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) for (i = 0; i < TARGET_MAX; i++) { struct regmap *target; - struct resource *res; if (!felix->info->target_io_res[i].name) continue; - res = &felix->info->target_io_res[i]; - res->flags = IORESOURCE_MEM; - res->start += switch_base; - res->end += switch_base; + memcpy(&res, &felix->info->target_io_res[i], sizeof(res)); + res.flags = IORESOURCE_MEM; + res.start += switch_base; + res.end += switch_base; - target = ocelot_regmap_init(ocelot, res); + target = ocelot_regmap_init(ocelot, &res); if (IS_ERR(target)) { dev_err(ocelot->dev, "Failed to map device memory space\n"); @@ -479,7 +483,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) for (port = 0; port < num_phys_ports; port++) { struct ocelot_port *ocelot_port; void __iomem *port_regs; - struct resource *res; ocelot_port = devm_kzalloc(ocelot->dev, sizeof(struct ocelot_port), @@ -491,12 +494,12 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) return -ENOMEM; } - res = &felix->info->port_io_res[port]; - res->flags = IORESOURCE_MEM; - res->start += switch_base; - res->end += switch_base; + memcpy(&res, &felix->info->port_io_res[port], sizeof(res)); + res.flags = IORESOURCE_MEM; + res.start += switch_base; + res.end += switch_base; - port_regs = devm_ioremap_resource(ocelot->dev, res); + port_regs = devm_ioremap_resource(ocelot->dev, &res); if (IS_ERR(port_regs)) { dev_err(ocelot->dev, "failed to map registers for port %d\n", port); @@ -595,6 +598,7 @@ static int felix_setup(struct dsa_switch *ds) ANA_FLOODING, tc); ds->mtu_enforcement_ingress = true; + ds->configure_vlan_while_not_filtering = true; /* It looks like the MAC/PCS interrupt register - PM0_IEVENT (0x8040) * isn't instantiated for the Felix PF. * In-band AN may take a few ms to complete, so we need to poll. diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h index 352f7b940af7..a891736ca006 100644 --- a/drivers/net/dsa/ocelot/felix.h +++ b/drivers/net/dsa/ocelot/felix.h @@ -9,9 +9,9 @@ /* Platform-specific information */ struct felix_info { - struct resource *target_io_res; - struct resource *port_io_res; - struct resource *imdio_res; + const struct resource *target_io_res; + const struct resource *port_io_res; + const struct resource *imdio_res; const struct reg_field *regfields; const u32 *const *map; const struct ocelot_ops *ops; diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 85e34d85cc51..1dd9e348152d 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -340,10 +340,8 @@ static const u32 *vsc9959_regmap[] = { [GCB] = vsc9959_gcb_regmap, }; -/* Addresses are relative to the PCI device's base address and - * will be fixed up at ioremap time. - */ -static struct resource vsc9959_target_io_res[] = { +/* Addresses are relative to the PCI device's base address */ +static const struct resource vsc9959_target_io_res[] = { [ANA] = { .start = 0x0280000, .end = 0x028ffff, @@ -386,7 +384,7 @@ static struct resource vsc9959_target_io_res[] = { }, }; -static struct resource vsc9959_port_io_res[] = { +static const struct resource vsc9959_port_io_res[] = { { .start = 0x0100000, .end = 0x010ffff, @@ -422,7 +420,7 @@ static struct resource vsc9959_port_io_res[] = { /* Port MAC 0 Internal MDIO bus through which the SerDes acting as an * SGMII/QSGMII MAC PCS can be found. */ -static struct resource vsc9959_imdio_res = { +static const struct resource vsc9959_imdio_res = { .start = 0x8030, .end = 0x8040, .name = "imdio", @@ -1118,7 +1116,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) struct device *dev = ocelot->dev; resource_size_t imdio_base; void __iomem *imdio_regs; - struct resource *res; + struct resource res; struct enetc_hw *hw; struct mii_bus *bus; int port; @@ -1135,12 +1133,12 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) imdio_base = pci_resource_start(felix->pdev, felix->info->imdio_pci_bar); - res = felix->info->imdio_res; - res->flags = IORESOURCE_MEM; - res->start += imdio_base; - res->end += imdio_base; + memcpy(&res, felix->info->imdio_res, sizeof(res)); + res.flags = IORESOURCE_MEM; + res.start += imdio_base; + res.end += imdio_base; - imdio_regs = devm_ioremap_resource(dev, res); + imdio_regs = devm_ioremap_resource(dev, &res); if (IS_ERR(imdio_regs)) { dev_err(dev, "failed to map internal MDIO registers\n"); return PTR_ERR(imdio_regs); diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h index 198d2a7d7f95..29ed21687295 100644 --- a/drivers/net/dsa/sja1105/sja1105.h +++ b/drivers/net/dsa/sja1105/sja1105.h @@ -84,6 +84,7 @@ struct sja1105_info { * the egress timestamps. */ int ptpegr_ts_bytes; + int num_cbs_shapers; const struct sja1105_dynamic_table_ops *dyn_ops; const struct sja1105_table_ops *static_ops; const struct sja1105_regs *regs; @@ -218,6 +219,7 @@ struct sja1105_private { struct mutex mgmt_lock; bool expect_dsa_8021q; enum sja1105_vlan_state vlan_state; + struct sja1105_cbs_entry *cbs; struct sja1105_tagger_data tagger_data; struct sja1105_ptp_data ptp_data; struct sja1105_tas_data tas_data; @@ -321,24 +323,6 @@ int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port, int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid); -/* Common implementations for the static and dynamic configs */ -size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr, - enum packing_op op); -size_t sja1105pqrs_l2_lookup_entry_packing(void *buf, void *entry_ptr, - enum packing_op op); -size_t sja1105et_l2_lookup_entry_packing(void *buf, void *entry_ptr, - enum packing_op op); -size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr, - enum packing_op op); -size_t sja1105_retagging_entry_packing(void *buf, void *entry_ptr, - enum packing_op op); -size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr, - enum packing_op op); -size_t sja1105pqrs_avb_params_entry_packing(void *buf, void *entry_ptr, - enum packing_op op); -size_t sja1105_vl_lookup_entry_packing(void *buf, void *entry_ptr, - enum packing_op op); - /* From sja1105_flower.c */ int sja1105_cls_flower_del(struct dsa_switch *ds, int port, struct flow_cls_offload *cls, bool ingress); diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c index 2a8fbd7fdedc..4471eeccc293 100644 --- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c +++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c @@ -127,17 +127,29 @@ #define SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD \ SJA1105_SIZE_DYN_CMD +#define SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_DYN_CMD \ + (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY) + #define SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD \ SJA1105_SIZE_DYN_CMD +#define SJA1105PQRS_SIZE_GENERAL_PARAMS_DYN_CMD \ + (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY) + #define SJA1105PQRS_SIZE_AVB_PARAMS_DYN_CMD \ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY) #define SJA1105_SIZE_RETAGGING_DYN_CMD \ (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_RETAGGING_ENTRY) +#define SJA1105ET_SIZE_CBS_DYN_CMD \ + (SJA1105_SIZE_DYN_CMD + SJA1105ET_SIZE_CBS_ENTRY) + +#define SJA1105PQRS_SIZE_CBS_DYN_CMD \ + (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_CBS_ENTRY) + #define SJA1105_MAX_DYN_CMD_SIZE \ - SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD + SJA1105PQRS_SIZE_GENERAL_PARAMS_DYN_CMD struct sja1105_dyn_cmd { bool search; @@ -495,6 +507,18 @@ sja1105et_l2_lookup_params_entry_packing(void *buf, void *entry_ptr, } static void +sja1105pqrs_l2_lookup_params_cmd_packing(void *buf, + struct sja1105_dyn_cmd *cmd, + enum packing_op op) +{ + u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY; + const int size = SJA1105_SIZE_DYN_CMD; + + sja1105_packing(p, &cmd->valid, 31, 31, size, op); + sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op); +} + +static void sja1105et_general_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, enum packing_op op) { @@ -517,6 +541,18 @@ sja1105et_general_params_entry_packing(void *buf, void *entry_ptr, } static void +sja1105pqrs_general_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, + enum packing_op op) +{ + u8 *p = buf + SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY; + const int size = SJA1105_SIZE_DYN_CMD; + + sja1105_packing(p, &cmd->valid, 31, 31, size, op); + sja1105_packing(p, &cmd->errors, 30, 30, size, op); + sja1105_packing(p, &cmd->rdwrset, 28, 28, size, op); +} + +static void sja1105pqrs_avb_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, enum packing_op op) { @@ -542,6 +578,60 @@ sja1105_retagging_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, sja1105_packing(p, &cmd->index, 5, 0, size, op); } +static void sja1105et_cbs_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, + enum packing_op op) +{ + u8 *p = buf + SJA1105ET_SIZE_CBS_ENTRY; + const int size = SJA1105_SIZE_DYN_CMD; + + sja1105_packing(p, &cmd->valid, 31, 31, size, op); + sja1105_packing(p, &cmd->index, 19, 16, size, op); +} + +static size_t sja1105et_cbs_entry_packing(void *buf, void *entry_ptr, + enum packing_op op) +{ + const size_t size = SJA1105ET_SIZE_CBS_ENTRY; + struct sja1105_cbs_entry *entry = entry_ptr; + u8 *cmd = buf + size; + u32 *p = buf; + + sja1105_packing(cmd, &entry->port, 5, 3, SJA1105_SIZE_DYN_CMD, op); + sja1105_packing(cmd, &entry->prio, 2, 0, SJA1105_SIZE_DYN_CMD, op); + sja1105_packing(p + 3, &entry->credit_lo, 31, 0, size, op); + sja1105_packing(p + 2, &entry->credit_hi, 31, 0, size, op); + sja1105_packing(p + 1, &entry->send_slope, 31, 0, size, op); + sja1105_packing(p + 0, &entry->idle_slope, 31, 0, size, op); + return size; +} + +static void sja1105pqrs_cbs_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, + enum packing_op op) +{ + u8 *p = buf + SJA1105PQRS_SIZE_CBS_ENTRY; + const int size = SJA1105_SIZE_DYN_CMD; + + sja1105_packing(p, &cmd->valid, 31, 31, size, op); + sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op); + sja1105_packing(p, &cmd->errors, 29, 29, size, op); + sja1105_packing(p, &cmd->index, 3, 0, size, op); +} + +static size_t sja1105pqrs_cbs_entry_packing(void *buf, void *entry_ptr, + enum packing_op op) +{ + const size_t size = SJA1105PQRS_SIZE_CBS_ENTRY; + struct sja1105_cbs_entry *entry = entry_ptr; + + sja1105_packing(buf, &entry->port, 159, 157, size, op); + sja1105_packing(buf, &entry->prio, 156, 154, size, op); + sja1105_packing(buf, &entry->credit_lo, 153, 122, size, op); + sja1105_packing(buf, &entry->credit_hi, 121, 90, size, op); + sja1105_packing(buf, &entry->send_slope, 89, 58, size, op); + sja1105_packing(buf, &entry->idle_slope, 57, 26, size, op); + return size; +} + #define OP_READ BIT(0) #define OP_WRITE BIT(1) #define OP_DEL BIT(2) @@ -631,6 +721,14 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = { .packed_size = SJA1105_SIZE_RETAGGING_DYN_CMD, .addr = 0x31, }, + [BLK_IDX_CBS] = { + .entry_packing = sja1105et_cbs_entry_packing, + .cmd_packing = sja1105et_cbs_cmd_packing, + .max_entry_count = SJA1105ET_MAX_CBS_COUNT, + .access = OP_WRITE, + .packed_size = SJA1105ET_SIZE_CBS_DYN_CMD, + .addr = 0x2c, + }, [BLK_IDX_XMII_PARAMS] = {0}, }; @@ -693,12 +791,12 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = { [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0}, [BLK_IDX_VL_FORWARDING_PARAMS] = {0}, [BLK_IDX_L2_LOOKUP_PARAMS] = { - .entry_packing = sja1105et_l2_lookup_params_entry_packing, - .cmd_packing = sja1105et_l2_lookup_params_cmd_packing, + .entry_packing = sja1105pqrs_l2_lookup_params_entry_packing, + .cmd_packing = sja1105pqrs_l2_lookup_params_cmd_packing, .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT, .access = (OP_READ | OP_WRITE), - .packed_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD, - .addr = 0x38, + .packed_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_DYN_CMD, + .addr = 0x54, }, [BLK_IDX_L2_FORWARDING_PARAMS] = {0}, [BLK_IDX_AVB_PARAMS] = { @@ -710,12 +808,12 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = { .addr = 0x8003, }, [BLK_IDX_GENERAL_PARAMS] = { - .entry_packing = sja1105et_general_params_entry_packing, - .cmd_packing = sja1105et_general_params_cmd_packing, + .entry_packing = sja1105pqrs_general_params_entry_packing, + .cmd_packing = sja1105pqrs_general_params_cmd_packing, .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT, - .access = OP_WRITE, - .packed_size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD, - .addr = 0x34, + .access = (OP_READ | OP_WRITE), + .packed_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_DYN_CMD, + .addr = 0x3B, }, [BLK_IDX_RETAGGING] = { .entry_packing = sja1105_retagging_entry_packing, @@ -725,6 +823,14 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = { .packed_size = SJA1105_SIZE_RETAGGING_DYN_CMD, .addr = 0x38, }, + [BLK_IDX_CBS] = { + .entry_packing = sja1105pqrs_cbs_entry_packing, + .cmd_packing = sja1105pqrs_cbs_cmd_packing, + .max_entry_count = SJA1105PQRS_MAX_CBS_COUNT, + .access = OP_WRITE, + .packed_size = SJA1105PQRS_SIZE_CBS_DYN_CMD, + .addr = 0x32, + }, [BLK_IDX_XMII_PARAMS] = {0}, }; diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 44ce7882dfb1..789b288cc78b 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -1640,6 +1640,92 @@ static void sja1105_bridge_leave(struct dsa_switch *ds, int port, sja1105_bridge_member(ds, port, br, false); } +#define BYTES_PER_KBIT (1000LL / 8) + +static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv) +{ + int i; + + for (i = 0; i < priv->info->num_cbs_shapers; i++) + if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope) + return i; + + return -1; +} + +static int sja1105_delete_cbs_shaper(struct sja1105_private *priv, int port, + int prio) +{ + int i; + + for (i = 0; i < priv->info->num_cbs_shapers; i++) { + struct sja1105_cbs_entry *cbs = &priv->cbs[i]; + + if (cbs->port == port && cbs->prio == prio) { + memset(cbs, 0, sizeof(*cbs)); + return sja1105_dynamic_config_write(priv, BLK_IDX_CBS, + i, cbs, true); + } + } + + return 0; +} + +static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port, + struct tc_cbs_qopt_offload *offload) +{ + struct sja1105_private *priv = ds->priv; + struct sja1105_cbs_entry *cbs; + int index; + + if (!offload->enable) + return sja1105_delete_cbs_shaper(priv, port, offload->queue); + + index = sja1105_find_unused_cbs_shaper(priv); + if (index < 0) + return -ENOSPC; + + cbs = &priv->cbs[index]; + cbs->port = port; + cbs->prio = offload->queue; + /* locredit and sendslope are negative by definition. In hardware, + * positive values must be provided, and the negative sign is implicit. + */ + cbs->credit_hi = offload->hicredit; + cbs->credit_lo = abs(offload->locredit); + /* User space is in kbits/sec, hardware in bytes/sec */ + cbs->idle_slope = offload->idleslope * BYTES_PER_KBIT; + cbs->send_slope = abs(offload->sendslope * BYTES_PER_KBIT); + /* Convert the negative values from 64-bit 2's complement + * to 32-bit 2's complement (for the case of 0x80000000 whose + * negative is still negative). + */ + cbs->credit_lo &= GENMASK_ULL(31, 0); + cbs->send_slope &= GENMASK_ULL(31, 0); + + return sja1105_dynamic_config_write(priv, BLK_IDX_CBS, index, cbs, + true); +} + +static int sja1105_reload_cbs(struct sja1105_private *priv) +{ + int rc = 0, i; + + for (i = 0; i < priv->info->num_cbs_shapers; i++) { + struct sja1105_cbs_entry *cbs = &priv->cbs[i]; + + if (!cbs->idle_slope && !cbs->send_slope) + continue; + + rc = sja1105_dynamic_config_write(priv, BLK_IDX_CBS, i, cbs, + true); + if (rc) + break; + } + + return rc; +} + static const char * const sja1105_reset_reasons[] = { [SJA1105_VLAN_FILTERING] = "VLAN filtering", [SJA1105_RX_HWTSTAMPING] = "RX timestamping", @@ -1754,6 +1840,10 @@ out_unlock_ptp: sja1105_sgmii_pcs_force_speed(priv, speed); } } + + rc = sja1105_reload_cbs(priv); + if (rc < 0) + goto out; out: mutex_unlock(&priv->mgmt_lock); @@ -2656,6 +2746,10 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled) sja1105_frame_memory_partitioning(priv); + rc = sja1105_build_vlan_table(priv, false); + if (rc) + return rc; + rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING); if (rc) dev_err(ds->dev, "Failed to change VLAN Ethertype\n"); @@ -3131,6 +3225,8 @@ static int sja1105_port_setup_tc(struct dsa_switch *ds, int port, switch (type) { case TC_SETUP_QDISC_TAPRIO: return sja1105_setup_tc_taprio(ds, port, type_data); + case TC_SETUP_QDISC_CBS: + return sja1105_setup_tc_cbs(ds, port, type_data); default: return -EOPNOTSUPP; } @@ -3408,6 +3504,14 @@ static int sja1105_probe(struct spi_device *spi) if (rc) return rc; + if (IS_ENABLED(CONFIG_NET_SCH_CBS)) { + priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers, + sizeof(struct sja1105_cbs_entry), + GFP_KERNEL); + if (!priv->cbs) + return -ENOMEM; + } + /* Connections between dsa_port and sja1105_port */ for (port = 0; port < SJA1105_NUM_PORTS; port++) { struct sja1105_port *sp = &priv->ports[port]; diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c index a0dacae803cc..bb52b9c841b2 100644 --- a/drivers/net/dsa/sja1105/sja1105_spi.c +++ b/drivers/net/dsa/sja1105/sja1105_spi.c @@ -515,6 +515,7 @@ struct sja1105_info sja1105e_info = { .qinq_tpid = ETH_P_8021Q, .ptp_ts_bits = 24, .ptpegr_ts_bytes = 4, + .num_cbs_shapers = SJA1105ET_MAX_CBS_COUNT, .reset_cmd = sja1105et_reset_cmd, .fdb_add_cmd = sja1105et_fdb_add, .fdb_del_cmd = sja1105et_fdb_del, @@ -530,6 +531,7 @@ struct sja1105_info sja1105t_info = { .qinq_tpid = ETH_P_8021Q, .ptp_ts_bits = 24, .ptpegr_ts_bytes = 4, + .num_cbs_shapers = SJA1105ET_MAX_CBS_COUNT, .reset_cmd = sja1105et_reset_cmd, .fdb_add_cmd = sja1105et_fdb_add, .fdb_del_cmd = sja1105et_fdb_del, @@ -545,6 +547,7 @@ struct sja1105_info sja1105p_info = { .qinq_tpid = ETH_P_8021AD, .ptp_ts_bits = 32, .ptpegr_ts_bytes = 8, + .num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT, .setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay, .reset_cmd = sja1105pqrs_reset_cmd, .fdb_add_cmd = sja1105pqrs_fdb_add, @@ -561,6 +564,7 @@ struct sja1105_info sja1105q_info = { .qinq_tpid = ETH_P_8021AD, .ptp_ts_bits = 32, .ptpegr_ts_bytes = 8, + .num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT, .setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay, .reset_cmd = sja1105pqrs_reset_cmd, .fdb_add_cmd = sja1105pqrs_fdb_add, @@ -577,6 +581,7 @@ struct sja1105_info sja1105r_info = { .qinq_tpid = ETH_P_8021AD, .ptp_ts_bits = 32, .ptpegr_ts_bytes = 8, + .num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT, .setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay, .reset_cmd = sja1105pqrs_reset_cmd, .fdb_add_cmd = sja1105pqrs_fdb_add, @@ -594,6 +599,7 @@ struct sja1105_info sja1105s_info = { .qinq_tpid = ETH_P_8021AD, .ptp_ts_bits = 32, .ptpegr_ts_bytes = 8, + .num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT, .setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay, .reset_cmd = sja1105pqrs_reset_cmd, .fdb_add_cmd = sja1105pqrs_fdb_add, diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c index 780aca034cdc..ff3fe471efc2 100644 --- a/drivers/net/dsa/sja1105/sja1105_static_config.c +++ b/drivers/net/dsa/sja1105/sja1105_static_config.c @@ -146,9 +146,8 @@ static size_t sja1105et_general_params_entry_packing(void *buf, void *entry_ptr, /* TPID and TPID2 are intentionally reversed so that semantic * compatibility with E/T is kept. */ -static size_t -sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr, - enum packing_op op) +size_t sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr, + enum packing_op op) { const size_t size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY; struct sja1105_general_params_entry *entry = entry_ptr; @@ -228,9 +227,8 @@ sja1105et_l2_lookup_params_entry_packing(void *buf, void *entry_ptr, return size; } -static size_t -sja1105pqrs_l2_lookup_params_entry_packing(void *buf, void *entry_ptr, - enum packing_op op) +size_t sja1105pqrs_l2_lookup_params_entry_packing(void *buf, void *entry_ptr, + enum packing_op op) { const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY; struct sja1105_l2_lookup_params_entry *entry = entry_ptr; diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.h b/drivers/net/dsa/sja1105/sja1105_static_config.h index 5946847bb5b9..ee0f10062763 100644 --- a/drivers/net/dsa/sja1105/sja1105_static_config.h +++ b/drivers/net/dsa/sja1105/sja1105_static_config.h @@ -30,11 +30,13 @@ #define SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY 4 #define SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY 40 #define SJA1105ET_SIZE_AVB_PARAMS_ENTRY 12 +#define SJA1105ET_SIZE_CBS_ENTRY 16 #define SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY 20 #define SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY 32 #define SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY 16 #define SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY 44 #define SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY 16 +#define SJA1105PQRS_SIZE_CBS_ENTRY 20 /* UM10944.pdf Page 11, Table 2. Configuration Blocks */ enum { @@ -56,6 +58,7 @@ enum { BLKID_AVB_PARAMS = 0x10, BLKID_GENERAL_PARAMS = 0x11, BLKID_RETAGGING = 0x12, + BLKID_CBS = 0x13, BLKID_XMII_PARAMS = 0x4E, }; @@ -78,6 +81,7 @@ enum sja1105_blk_idx { BLK_IDX_AVB_PARAMS, BLK_IDX_GENERAL_PARAMS, BLK_IDX_RETAGGING, + BLK_IDX_CBS, BLK_IDX_XMII_PARAMS, BLK_IDX_MAX, /* Fake block indices that are only valid for dynamic access */ @@ -105,6 +109,8 @@ enum sja1105_blk_idx { #define SJA1105_MAX_RETAGGING_COUNT 32 #define SJA1105_MAX_XMII_PARAMS_COUNT 1 #define SJA1105_MAX_AVB_PARAMS_COUNT 1 +#define SJA1105ET_MAX_CBS_COUNT 10 +#define SJA1105PQRS_MAX_CBS_COUNT 16 #define SJA1105_MAX_FRAME_MEMORY 929 #define SJA1105_MAX_FRAME_MEMORY_RETAGGING 910 @@ -289,6 +295,15 @@ struct sja1105_retagging_entry { u64 destports; }; +struct sja1105_cbs_entry { + u64 port; + u64 prio; + u64 credit_hi; + u64 credit_lo; + u64 send_slope; + u64 idle_slope; +}; + struct sja1105_xmii_params_entry { u64 phy_mac[5]; u64 xmii_mode[5]; @@ -415,4 +430,26 @@ void sja1105_unpack(const void *buf, u64 *val, int start, int end, size_t len); void sja1105_packing(void *buf, u64 *val, int start, int end, size_t len, enum packing_op op); +/* Common implementations for the static and dynamic configs */ +size_t sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr, + enum packing_op op); +size_t sja1105pqrs_l2_lookup_params_entry_packing(void *buf, void *entry_ptr, + enum packing_op op); +size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr, + enum packing_op op); +size_t sja1105pqrs_l2_lookup_entry_packing(void *buf, void *entry_ptr, + enum packing_op op); +size_t sja1105et_l2_lookup_entry_packing(void *buf, void *entry_ptr, + enum packing_op op); +size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr, + enum packing_op op); +size_t sja1105_retagging_entry_packing(void *buf, void *entry_ptr, + enum packing_op op); +size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr, + enum packing_op op); +size_t sja1105pqrs_avb_params_entry_packing(void *buf, void *entry_ptr, + enum packing_op op); +size_t sja1105_vl_lookup_entry_packing(void *buf, void *entry_ptr, + enum packing_op op); + #endif diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c index 42985a82321a..77d78b4c59c4 100644 --- a/drivers/net/ethernet/8390/ne2k-pci.c +++ b/drivers/net/ethernet/8390/ne2k-pci.c @@ -1,39 +1,43 @@ -/* ne2k-pci.c: A NE2000 clone on PCI bus driver for Linux. */ -/* - A Linux device driver for PCI NE2000 clones. - - Authors and other copyright holders: - 1992-2000 by Donald Becker, NE2000 core and various modifications. - 1995-1998 by Paul Gortmaker, core modifications and PCI support. - Copyright 1993 assigned to the United States Government as represented - by the Director, National Security Agency. - - This software may be used and distributed according to the terms of - the GNU General Public License (GPL), incorporated herein by reference. - Drivers based on or derived from this code fall under the GPL and must - retain the authorship, copyright and license notice. This file is not - a complete program and may only be used when the entire operating - system is licensed under the GPL. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - Issues remaining: - People are making PCI ne2000 clones! Oh the horror, the horror... - Limited full-duplex support. -*/ +/* A Linux device driver for PCI NE2000 clones. + * + * Authors and other copyright holders: + * 1992-2000 by Donald Becker, NE2000 core and various modifications. + * 1995-1998 by Paul Gortmaker, core modifications and PCI support. + * Copyright 1993 assigned to the United States Government as represented + * by the Director, National Security Agency. + * + * This software may be used and distributed according to the terms of + * the GNU General Public License (GPL), incorporated herein by reference. + * Drivers based on or derived from this code fall under the GPL and must + * retain the authorship, copyright and license notice. This file is not + * a complete program and may only be used when the entire operating + * system is licensed under the GPL. + * + * The author may be reached as becker@scyld.com, or C/O + * Scyld Computing Corporation + * 410 Severn Ave., Suite 210 + * Annapolis MD 21403 + * + * Issues remaining: + * People are making PCI NE2000 clones! Oh the horror, the horror... + * Limited full-duplex support. + */ #define DRV_NAME "ne2k-pci" +#define DRV_DESCRIPTION "PCI NE2000 clone driver" +#define DRV_AUTHOR "Donald Becker / Paul Gortmaker" #define DRV_VERSION "1.03" #define DRV_RELDATE "9/22/2003" +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* The user-configurable values. - These may be modified when a driver module is loaded.*/ + * These may be modified when a driver module is loaded. + */ + +/* More are supported, limit only on options */ +#define MAX_UNITS 8 -#define MAX_UNITS 8 /* More are supported, limit only on options */ /* Used to pass the full-duplex flag, etc. */ static int full_duplex[MAX_UNITS]; static int options[MAX_UNITS]; @@ -52,7 +56,7 @@ static int options[MAX_UNITS]; #include <linux/netdevice.h> #include <linux/etherdevice.h> -#include <asm/io.h> +#include <linux/io.h> #include <asm/irq.h> #include <linux/uaccess.h> @@ -60,20 +64,14 @@ static int options[MAX_UNITS]; static u32 ne2k_msg_enable; -/* These identify the driver base version and may not be removed. */ -static const char version[] = - KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE - " D. Becker/P. Gortmaker\n"; - #if defined(__powerpc__) #define inl_le(addr) le32_to_cpu(inl(addr)) #define inw_le(addr) le16_to_cpu(inw(addr)) #endif -#define PFX DRV_NAME ": " - -MODULE_AUTHOR("Donald Becker / Paul Gortmaker"); -MODULE_DESCRIPTION("PCI NE2000 clone driver"); +MODULE_AUTHOR(DRV_AUTHOR); +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); module_param_named(msg_enable, ne2k_msg_enable, uint, 0444); @@ -83,7 +81,8 @@ MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bit MODULE_PARM_DESC(options, "Bit 5: full duplex"); MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)"); -/* Some defines that people can play with if so inclined. */ +/* Some defines that people can play with if so inclined. + */ /* Use 32 bit data-movement operations instead of 16 bit. */ #define USE_LONGIO @@ -91,14 +90,18 @@ MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)"); /* Do we implement the read before write bugfix ? */ /* #define NE_RW_BUGFIX */ -/* Flags. We rename an existing ei_status field to store flags! */ -/* Thus only the low 8 bits are usable for non-init-time flags. */ +/* Flags. We rename an existing ei_status field to store flags! + * Thus only the low 8 bits are usable for non-init-time flags. + */ #define ne2k_flags reg0 + enum { - ONLY_16BIT_IO=8, ONLY_32BIT_IO=4, /* Chip can do only 16/32-bit xfers. */ - FORCE_FDX=0x20, /* User override. */ - REALTEK_FDX=0x40, HOLTEK_FDX=0x80, - STOP_PG_0x60=0x100, + /* Chip can do only 16/32-bit xfers. */ + ONLY_16BIT_IO = 8, ONLY_32BIT_IO = 4, + /* User override. */ + FORCE_FDX = 0x20, + REALTEK_FDX = 0x40, HOLTEK_FDX = 0x80, + STOP_PG_0x60 = 0x100, }; enum ne2k_pci_chipsets { @@ -120,7 +123,7 @@ static struct { char *name; int flags; } pci_clone_list[] = { - {"RealTek RTL-8029", REALTEK_FDX}, + {"RealTek RTL-8029(AS)", REALTEK_FDX}, {"Winbond 89C940", 0}, {"Compex RL2000", 0}, {"KTI ET32P2", 0}, @@ -149,13 +152,14 @@ static const struct pci_device_id ne2k_pci_tbl[] = { { 0x8c4a, 0x1980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940_8c4a }, { 0, } }; + MODULE_DEVICE_TABLE(pci, ne2k_pci_tbl); /* ---- No user-serviceable parts below ---- */ #define NE_BASE (dev->base_addr) -#define NE_CMD 0x00 +#define NE_CMD 0x00 #define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */ #define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */ #define NE_IO_EXTENT 0x20 @@ -168,18 +172,20 @@ static int ne2k_pci_open(struct net_device *dev); static int ne2k_pci_close(struct net_device *dev); static void ne2k_pci_reset_8390(struct net_device *dev); -static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); +static void ne2k_pci_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, int ring_page); static void ne2k_pci_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); + struct sk_buff *skb, int ring_offset); static void ne2k_pci_block_output(struct net_device *dev, const int count, - const unsigned char *buf, const int start_page); + const unsigned char *buf, + const int start_page); static const struct ethtool_ops ne2k_pci_ethtool_ops; /* There is no room in the standard 8390 structure for extra info we need, - so we build a meta/outer-wrapper structure.. */ + * so we build a meta/outer-wrapper structure.. + */ struct ne2k_pci_card { struct net_device *dev; struct pci_dev *pci_dev; @@ -187,18 +193,17 @@ struct ne2k_pci_card { -/* - NEx000-clone boards have a Station Address (SA) PROM (SAPROM) in the packet - buffer memory space. By-the-spec NE2000 clones have 0x57,0x57 in bytes - 0x0e,0x0f of the SAPROM, while other supposed NE2000 clones must be - detected by their SA prefix. - - Reading the SAPROM from a word-wide card with the 8390 set in byte-wide - mode results in doubled values, which can be detected and compensated for. - - The probe is also responsible for initializing the card and filling - in the 'dev' and 'ei_status' structures. -*/ +/* NEx000-clone boards have a Station Address (SA) PROM (SAPROM) in the packet + * buffer memory space. By-the-spec NE2000 clones have 0x57,0x57 in bytes + * 0x0e,0x0f of the SAPROM, while other supposed NE2000 clones must be + * detected by their SA prefix. + * + * Reading the SAPROM from a word-wide card with the 8390 set in byte-wide + * mode results in doubled values, which can be detected and compensated for. + * + * The probe is also responsible for initializing the card and filling + * in the 'dev' and 'ei_status' structures. + */ static const struct net_device_ops ne2k_netdev_ops = { .ndo_open = ne2k_pci_open, @@ -208,7 +213,7 @@ static const struct net_device_ops ne2k_netdev_ops = { .ndo_get_stats = ei_get_stats, .ndo_set_rx_mode = ei_set_multicast_list, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ei_poll, #endif @@ -227,28 +232,21 @@ static int ne2k_pci_init_one(struct pci_dev *pdev, int flags = pci_clone_list[chip_idx].flags; struct ei_device *ei_local; -/* when built into the kernel, we only print version if device is found */ -#ifndef MODULE - static int printed_version; - if (!printed_version++) - printk(version); -#endif - fnd_cnt++; - i = pci_enable_device (pdev); + i = pci_enable_device(pdev); if (i) return i; - ioaddr = pci_resource_start (pdev, 0); + ioaddr = pci_resource_start(pdev, 0); irq = pdev->irq; - if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) { + if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) == 0)) { dev_err(&pdev->dev, "no I/O resource at PCI BAR #0\n"); goto err_out; } - if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) { + if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME)) { dev_err(&pdev->dev, "I/O resource 0x%x @ 0x%lx busy\n", NE_IO_EXTENT, ioaddr); goto err_out; @@ -261,14 +259,17 @@ static int ne2k_pci_init_one(struct pci_dev *pdev, /* Do a preliminary verification that we have a 8390. */ { int regd; - outb(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD); + + outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD); regd = inb(ioaddr + 0x0d); outb(0xff, ioaddr + 0x0d); - outb(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD); - inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */ + outb(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD); + /* Clear the counter by reading. */ + inb(ioaddr + EN0_COUNTER0); if (inb(ioaddr + EN0_COUNTER0) != 0) { outb(reg0, ioaddr); - outb(regd, ioaddr + 0x0d); /* Restore the old values. */ + /* Restore the old values. */ + outb(regd, ioaddr + 0x0d); goto err_out_free_res; } } @@ -291,9 +292,9 @@ static int ne2k_pci_init_one(struct pci_dev *pdev, outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET); - /* This looks like a horrible timing loop, but it should never take - more than a few cycles. - */ + /* This looks like a horrible timing loop, but it should never + * take more than a few cycles. + */ while ((inb(ioaddr + EN0_ISR) & ENISR_RESET) == 0) /* Limit wait: '2' avoids jiffy roll-over. */ if (jiffies - reset_start_time > 2) { @@ -301,42 +302,53 @@ static int ne2k_pci_init_one(struct pci_dev *pdev, "Card failure (no reset ack).\n"); goto err_out_free_netdev; } - - outb(0xff, ioaddr + EN0_ISR); /* Ack all intr. */ + /* Ack all intr. */ + outb(0xff, ioaddr + EN0_ISR); } /* Read the 16 bytes of station address PROM. - We must first initialize registers, similar to NS8390_init(eifdev, 0). - We can't reliably read the SAPROM address without this. - (I learned the hard way!). */ + * We must first initialize registers, similar + * to NS8390_init(eifdev, 0). + * We can't reliably read the SAPROM address without this. + * (I learned the hard way!). + */ { struct {unsigned char value, offset; } program_seq[] = { - {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/ - {0x49, EN0_DCFG}, /* Set word-wide access. */ - {0x00, EN0_RCNTLO}, /* Clear the count regs. */ + /* Select page 0 */ + {E8390_NODMA + E8390_PAGE0 + E8390_STOP, E8390_CMD}, + /* Set word-wide access */ + {0x49, EN0_DCFG}, + /* Clear the count regs. */ + {0x00, EN0_RCNTLO}, + /* Mask completion IRQ */ {0x00, EN0_RCNTHI}, - {0x00, EN0_IMR}, /* Mask completion irq. */ + {0x00, EN0_IMR}, {0xFF, EN0_ISR}, - {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */ - {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ + /* 0x20 Set to monitor */ + {E8390_RXOFF, EN0_RXCR}, + /* 0x02 and loopback mode */ + {E8390_TXOFF, EN0_TXCR}, {32, EN0_RCNTLO}, {0x00, EN0_RCNTHI}, - {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */ + /* DMA starting at 0x0000 */ + {0x00, EN0_RSARLO}, {0x00, EN0_RSARHI}, {E8390_RREAD+E8390_START, E8390_CMD}, }; for (i = 0; i < ARRAY_SIZE(program_seq); i++) - outb(program_seq[i].value, ioaddr + program_seq[i].offset); + outb(program_seq[i].value, + ioaddr + program_seq[i].offset); } /* Note: all PCI cards have at least 16 bit access, so we don't have - to check for 8 bit cards. Most cards permit 32 bit access. */ + * to check for 8 bit cards. Most cards permit 32 bit access. + */ if (flags & ONLY_32BIT_IO) { for (i = 0; i < 4 ; i++) ((u32 *)SA_prom)[i] = le32_to_cpu(inl(ioaddr + NE_DATAPORT)); } else - for(i = 0; i < 32 /*sizeof(SA_prom)*/; i++) + for (i = 0; i < 32 /* sizeof(SA_prom )*/; i++) SA_prom[i] = inb(ioaddr + NE_DATAPORT); /* We always set the 8390 registers for word mode. */ @@ -356,7 +368,7 @@ static int ne2k_pci_init_one(struct pci_dev *pdev, ei_status.word16 = 1; ei_status.ne2k_flags = flags; if (fnd_cnt < MAX_UNITS) { - if (full_duplex[fnd_cnt] > 0 || (options[fnd_cnt] & FORCE_FDX)) + if (full_duplex[fnd_cnt] > 0 || (options[fnd_cnt] & FORCE_FDX)) ei_status.ne2k_flags |= FORCE_FDX; } @@ -388,16 +400,15 @@ static int ne2k_pci_init_one(struct pci_dev *pdev, return 0; err_out_free_netdev: - free_netdev (dev); + free_netdev(dev); err_out_free_res: - release_region (ioaddr, NE_IO_EXTENT); + release_region(ioaddr, NE_IO_EXTENT); err_out: pci_disable_device(pdev); return -ENODEV; } -/* - * Magic incantation sequence for full duplex on the supported cards. +/* Magic incantation sequence for full duplex on the supported cards. */ static inline int set_realtek_fdx(struct net_device *dev) { @@ -431,7 +442,9 @@ static int ne2k_pci_set_fdx(struct net_device *dev) static int ne2k_pci_open(struct net_device *dev) { - int ret = request_irq(dev->irq, ei_interrupt, IRQF_SHARED, dev->name, dev); + int ret = request_irq(dev->irq, ei_interrupt, IRQF_SHARED, + dev->name, dev); + if (ret) return ret; @@ -450,7 +463,8 @@ static int ne2k_pci_close(struct net_device *dev) } /* Hard reset the card. This used to pause for the same period that a - 8390 reset command required, but that shouldn't be necessary. */ + * 8390 reset command required, but that shouldn't be necessary. + */ static void ne2k_pci_reset_8390(struct net_device *dev) { unsigned long reset_start_time = jiffies; @@ -467,31 +481,34 @@ static void ne2k_pci_reset_8390(struct net_device *dev) /* This check _should_not_ be necessary, omit eventually. */ while ((inb(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) if (jiffies - reset_start_time > 2) { - netdev_err(dev, "ne2k_pci_reset_8390() did not complete.\n"); + netdev_err(dev, "%s did not complete.\n", __func__); break; } - outb(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */ + /* Ack intr. */ + outb(ENISR_RESET, NE_BASE + EN0_ISR); } /* Grab the 8390 specific header. Similar to the block_input routine, but - we don't need to be concerned with ring wrap as the header will be at - the start of a page, so we optimize accordingly. */ + * we don't need to be concerned with ring wrap as the header will be at + * the start of a page, so we optimize accordingly. + */ -static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +static void ne2k_pci_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, int ring_page) { long nic_base = dev->base_addr; - /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + /* This *shouldn't* happen. If it does, it's the last thing you'll see + */ if (ei_status.dmaing) { - netdev_err(dev, "DMAing conflict in ne2k_pci_get_8390_hdr " - "[DMAstat:%d][irqlock:%d].\n", - ei_status.dmaing, ei_status.irqlock); + netdev_err(dev, "DMAing conflict in %s [DMAstat:%d][irqlock:%d].\n", + __func__, ei_status.dmaing, ei_status.irqlock); return; } ei_status.dmaing |= 0x01; - outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); + outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD); outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); outb(0, nic_base + EN0_RCNTHI); outb(0, nic_base + EN0_RSARLO); /* On page boundary */ @@ -499,20 +516,22 @@ static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr * outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); if (ei_status.ne2k_flags & ONLY_16BIT_IO) { - insw(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1); + insw(NE_BASE + NE_DATAPORT, hdr, + sizeof(struct e8390_pkt_hdr) >> 1); } else { - *(u32*)hdr = le32_to_cpu(inl(NE_BASE + NE_DATAPORT)); + *(u32 *)hdr = le32_to_cpu(inl(NE_BASE + NE_DATAPORT)); le16_to_cpus(&hdr->count); } - - outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + /* Ack intr. */ + outb(ENISR_RDC, nic_base + EN0_ISR); ei_status.dmaing &= ~0x01; } /* Block input and output, similar to the Crynwr packet driver. If you - are porting to a new ethercard, look at the packet driver source for hints. - The NEx000 doesn't share the on-board packet memory -- you have to put - the packet out through the "remote DMA" dataport using outb. */ + *are porting to a new ethercard, look at the packet driver source for hints. + *The NEx000 doesn't share the on-board packet memory -- you have to put + *the packet out through the "remote DMA" dataport using outb. + */ static void ne2k_pci_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) @@ -520,30 +539,30 @@ static void ne2k_pci_block_input(struct net_device *dev, int count, long nic_base = dev->base_addr; char *buf = skb->data; - /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + /* This *shouldn't* happen. + * If it does, it's the last thing you'll see. + */ if (ei_status.dmaing) { - netdev_err(dev, "DMAing conflict in ne2k_pci_block_input " - "[DMAstat:%d][irqlock:%d].\n", - ei_status.dmaing, ei_status.irqlock); + netdev_err(dev, "DMAing conflict in %s [DMAstat:%d][irqlock:%d]\n", + __func__, ei_status.dmaing, ei_status.irqlock); return; } ei_status.dmaing |= 0x01; if (ei_status.ne2k_flags & ONLY_32BIT_IO) count = (count + 3) & 0xFFFC; - outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); + outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD); outb(count & 0xff, nic_base + EN0_RCNTLO); outb(count >> 8, nic_base + EN0_RCNTHI); outb(ring_offset & 0xff, nic_base + EN0_RSARLO); outb(ring_offset >> 8, nic_base + EN0_RSARHI); - outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); + outb(E8390_RREAD + E8390_START, nic_base + NE_CMD); if (ei_status.ne2k_flags & ONLY_16BIT_IO) { - insw(NE_BASE + NE_DATAPORT,buf,count>>1); - if (count & 0x01) { + insw(NE_BASE + NE_DATAPORT, buf, count >> 1); + if (count & 0x01) buf[count-1] = inb(NE_BASE + NE_DATAPORT); - } } else { - insl(NE_BASE + NE_DATAPORT, buf, count>>2); + insl(NE_BASE + NE_DATAPORT, buf, count >> 2); if (count & 3) { buf += count & ~3; if (count & 2) { @@ -556,30 +575,32 @@ static void ne2k_pci_block_input(struct net_device *dev, int count, *buf = inb(NE_BASE + NE_DATAPORT); } } - - outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + /* Ack intr. */ + outb(ENISR_RDC, nic_base + EN0_ISR); ei_status.dmaing &= ~0x01; } static void ne2k_pci_block_output(struct net_device *dev, int count, - const unsigned char *buf, const int start_page) + const unsigned char *buf, const int start_page) { long nic_base = NE_BASE; unsigned long dma_start; /* On little-endian it's always safe to round the count up for - word writes. */ + * word writes. + */ if (ei_status.ne2k_flags & ONLY_32BIT_IO) count = (count + 3) & 0xFFFC; else if (count & 0x01) count++; - /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + /* This *shouldn't* happen. + * If it does, it's the last thing you'll see. + */ if (ei_status.dmaing) { - netdev_err(dev, "DMAing conflict in ne2k_pci_block_output." - "[DMAstat:%d][irqlock:%d]\n", - ei_status.dmaing, ei_status.irqlock); + netdev_err(dev, "DMAing conflict in %s [DMAstat:%d][irqlock:%d]\n", + __func__, ei_status.dmaing, ei_status.irqlock); return; } ei_status.dmaing |= 0x01; @@ -588,9 +609,10 @@ static void ne2k_pci_block_output(struct net_device *dev, int count, #ifdef NE8390_RW_BUGFIX /* Handle the read-before-write bug the same way as the - Crynwr packet driver -- the NatSemi method doesn't work. - Actually this doesn't always work either, but if you have - problems with your NEx000 this is better than nothing! */ + * Crynwr packet driver -- the NatSemi method doesn't work. + * Actually this doesn't always work either, but if you have + * problems with your NEx000 this is better than nothing! + */ outb(0x42, nic_base + EN0_RCNTLO); outb(0x00, nic_base + EN0_RCNTHI); outb(0x42, nic_base + EN0_RSARLO); @@ -599,16 +621,16 @@ static void ne2k_pci_block_output(struct net_device *dev, int count, #endif outb(ENISR_RDC, nic_base + EN0_ISR); - /* Now the normal output. */ + /* Now the normal output. */ outb(count & 0xff, nic_base + EN0_RCNTLO); outb(count >> 8, nic_base + EN0_RCNTHI); outb(0x00, nic_base + EN0_RSARLO); outb(start_page, nic_base + EN0_RSARHI); outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD); if (ei_status.ne2k_flags & ONLY_16BIT_IO) { - outsw(NE_BASE + NE_DATAPORT, buf, count>>1); + outsw(NE_BASE + NE_DATAPORT, buf, count >> 1); } else { - outsl(NE_BASE + NE_DATAPORT, buf, count>>2); + outsl(NE_BASE + NE_DATAPORT, buf, count >> 2); if (count & 3) { buf += count & ~3; if (count & 2) { @@ -623,14 +645,15 @@ static void ne2k_pci_block_output(struct net_device *dev, int count, dma_start = jiffies; while ((inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) - if (jiffies - dma_start > 2) { /* Avoid clock roll-over. */ + /* Avoid clock roll-over. */ + if (jiffies - dma_start > 2) { netdev_warn(dev, "timeout waiting for Tx RDC.\n"); ne2k_pci_reset_8390(dev); - NS8390_init(dev,1); + NS8390_init(dev, 1); break; } - - outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + /* Ack intr. */ + outb(ENISR_RDC, nic_base + EN0_ISR); ei_status.dmaing &= ~0x01; } @@ -640,9 +663,9 @@ static void ne2k_pci_get_drvinfo(struct net_device *dev, struct ei_device *ei = netdev_priv(dev); struct pci_dev *pci_dev = (struct pci_dev *) ei->priv; - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info)); + strscpy(info->driver, DRV_NAME, sizeof(info->driver)); + strscpy(info->version, DRV_VERSION, sizeof(info->version)); + strscpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info)); } static u32 ne2k_pci_get_msglevel(struct net_device *dev) @@ -677,9 +700,9 @@ static void ne2k_pci_remove_one(struct pci_dev *pdev) } #ifdef CONFIG_PM -static int ne2k_pci_suspend (struct pci_dev *pdev, pm_message_t state) +static int ne2k_pci_suspend(struct pci_dev *pdev, pm_message_t state) { - struct net_device *dev = pci_get_drvdata (pdev); + struct net_device *dev = pci_get_drvdata(pdev); netif_device_detach(dev); pci_save_state(pdev); @@ -689,9 +712,9 @@ static int ne2k_pci_suspend (struct pci_dev *pdev, pm_message_t state) return 0; } -static int ne2k_pci_resume (struct pci_dev *pdev) +static int ne2k_pci_resume(struct pci_dev *pdev) { - struct net_device *dev = pci_get_drvdata (pdev); + struct net_device *dev = pci_get_drvdata(pdev); int rc; pci_set_power_state(pdev, PCI_D0); @@ -718,24 +741,20 @@ static struct pci_driver ne2k_driver = { #ifdef CONFIG_PM .suspend = ne2k_pci_suspend, .resume = ne2k_pci_resume, -#endif /* CONFIG_PM */ +#endif }; static int __init ne2k_pci_init(void) { -/* when a module, this is printed whether or not devices are found in probe */ -#ifdef MODULE - printk(version); -#endif return pci_register_driver(&ne2k_driver); } static void __exit ne2k_pci_cleanup(void) { - pci_unregister_driver (&ne2k_driver); + pci_unregister_driver(&ne2k_driver); } module_init(ne2k_pci_init); diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h index 7be3dcbf3d16..336742f6e3c3 100644 --- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h @@ -768,8 +768,8 @@ enum ena_admin_os_type { ENA_ADMIN_OS_DPDK = 3, ENA_ADMIN_OS_FREEBSD = 4, ENA_ADMIN_OS_IPXE = 5, - ENA_ADMIN_OS_ESXI = 6, - ENA_ADMIN_OS_GROUPS_NUM = 6, + ENA_ADMIN_OS_ESXI = 6, + ENA_ADMIN_OS_GROUPS_NUM = 6, }; struct ena_admin_host_info { @@ -813,7 +813,8 @@ struct ena_admin_host_info { u16 reserved; - /* 1 :0 : reserved + /* 0 : reserved + * 1 : rx_offset * 2 : interrupt_moderation * 31:3 : reserved */ @@ -1124,6 +1125,8 @@ struct ena_admin_ena_mmio_req_read_less_resp { #define ENA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3) #define ENA_ADMIN_HOST_INFO_BUS_SHIFT 8 #define ENA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8) +#define ENA_ADMIN_HOST_INFO_RX_OFFSET_SHIFT 1 +#define ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK BIT(1) #define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT 2 #define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK BIT(2) @@ -1133,4 +1136,4 @@ struct ena_admin_ena_mmio_req_read_less_resp { /* aenq_link_change_desc */ #define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0) -#endif /*_ENA_ADMIN_H_ */ +#endif /* _ENA_ADMIN_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index b51bf62af11b..432f143559a1 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -62,7 +62,9 @@ #define ENA_REGS_ADMIN_INTR_MASK 1 -#define ENA_POLL_MS 5 +#define ENA_MIN_ADMIN_POLL_US 100 + +#define ENA_MAX_ADMIN_POLL_US 5000 /*****************************************************************************/ /*****************************************************************************/ @@ -200,17 +202,17 @@ static void comp_ctxt_release(struct ena_com_admin_queue *queue, static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue, u16 command_id, bool capture) { - if (unlikely(!queue->comp_ctx)) { - pr_err("Completion context is NULL\n"); - return NULL; - } - if (unlikely(command_id >= queue->q_depth)) { pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n", command_id, queue->q_depth); return NULL; } + if (unlikely(!queue->comp_ctx)) { + pr_err("Completion context is NULL\n"); + return NULL; + } + if (unlikely(queue->comp_ctx[command_id].occupied && capture)) { pr_err("Completion context is occupied\n"); return NULL; @@ -375,7 +377,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, io_sq->bounce_buf_ctrl.next_to_use = 0; size = io_sq->bounce_buf_ctrl.buffer_size * - io_sq->bounce_buf_ctrl.buffers_num; + io_sq->bounce_buf_ctrl.buffers_num; dev_node = dev_to_node(ena_dev->dmadev); set_dev_node(ena_dev->dmadev, ctx->numa_node); @@ -523,9 +525,6 @@ static int ena_com_comp_status_to_errno(u8 comp_status) if (unlikely(comp_status != 0)) pr_err("admin command failed[%u]\n", comp_status); - if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR)) - return -EINVAL; - switch (comp_status) { case ENA_ADMIN_SUCCESS: return 0; @@ -540,7 +539,14 @@ static int ena_com_comp_status_to_errno(u8 comp_status) return -EINVAL; } - return 0; + return -EINVAL; +} + +static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us) +{ + delay_us = max_t(u32, ENA_MIN_ADMIN_POLL_US, delay_us); + delay_us = min_t(u32, delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US); + usleep_range(delay_us, 2 * delay_us); } static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, @@ -549,6 +555,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c unsigned long flags = 0; unsigned long timeout; int ret; + u32 exp = 0; timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout); @@ -572,7 +579,8 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c goto err; } - msleep(ENA_POLL_MS); + ena_delay_exponential_backoff_us(exp++, + admin_queue->ena_dev->ena_min_poll_delay_us); } if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { @@ -702,8 +710,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, /* The desc list entry size should be whole multiply of 8 * This requirement comes from __iowrite64_copy() */ - pr_err("illegal entry size %d\n", - llq_info->desc_list_entry_size); + pr_err("illegal entry size %d\n", llq_info->desc_list_entry_size); return -EINVAL; } @@ -775,7 +782,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com if (admin_queue->auto_polling) admin_queue->polling = true; } else { - pr_err("The ena device doesn't send a completion for the admin cmd %d status %d\n", + pr_err("The ena device didn't send a completion for the admin cmd %d status %d\n", comp_ctx->cmd_opcode, comp_ctx->status); } /* Check if shifted to polling mode. @@ -943,12 +950,13 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, u16 exp_state) { - u32 val, i; + u32 val, exp = 0; + unsigned long timeout_stamp; - /* Convert timeout from resolution of 100ms to ENA_POLL_MS */ - timeout = (timeout * 100) / ENA_POLL_MS; + /* Convert timeout from resolution of 100ms to us resolution. */ + timeout_stamp = jiffies + usecs_to_jiffies(100 * 1000 * timeout); - for (i = 0; i < timeout; i++) { + while (1) { val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) { @@ -960,10 +968,11 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, exp_state) return 0; - msleep(ENA_POLL_MS); - } + if (time_is_before_jiffies(timeout_stamp)) + return -ETIME; - return -ETIME; + ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us); + } } static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev, @@ -1284,13 +1293,9 @@ static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev) static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, u16 intr_delay_resolution) { - /* Initial value of intr_delay_resolution might be 0 */ - u16 prev_intr_delay_resolution = - ena_dev->intr_delay_resolution ? - ena_dev->intr_delay_resolution : - ENA_DEFAULT_INTR_DELAY_RESOLUTION; + u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution; - if (!intr_delay_resolution) { + if (unlikely(!intr_delay_resolution)) { pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; } @@ -1444,11 +1449,13 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; unsigned long flags = 0; + u32 exp = 0; spin_lock_irqsave(&admin_queue->q_lock, flags); while (atomic_read(&admin_queue->outstanding_cmds) != 0) { spin_unlock_irqrestore(&admin_queue->q_lock, flags); - msleep(ENA_POLL_MS); + ena_delay_exponential_backoff_us(exp++, + ena_dev->ena_min_poll_delay_us); spin_lock_irqsave(&admin_queue->q_lock, flags); } spin_unlock_irqrestore(&admin_queue->q_lock, flags); @@ -1796,6 +1803,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev, if (ret) goto error; + admin_queue->ena_dev = ena_dev; admin_queue->running_state = true; return 0; @@ -2003,7 +2011,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) struct ena_admin_aenq_entry *aenq_e; struct ena_admin_aenq_common_desc *aenq_common; struct ena_com_aenq *aenq = &dev->aenq; - unsigned long long timestamp; + u64 timestamp; ena_aenq_handler handler_cb; u16 masked_head, processed = 0; u8 phase; @@ -2021,9 +2029,8 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) */ dma_rmb(); - timestamp = - (unsigned long long)aenq_common->timestamp_low | - ((unsigned long long)aenq_common->timestamp_high << 32); + timestamp = (u64)aenq_common->timestamp_low | + ((u64)aenq_common->timestamp_high << 32); pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", aenq_common->group, aenq_common->syndrom, timestamp); @@ -2053,8 +2060,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) /* write the aenq doorbell after all AENQ descriptors were read */ mb(); - writel_relaxed((u32)aenq->head, - dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); + writel_relaxed((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); } int ena_com_dev_reset(struct ena_com_dev *ena_dev, @@ -2276,13 +2282,14 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, enum ena_admin_hash_functions func, const u8 *key, u16 key_len, u32 init_val) { - struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_feature_rss_flow_hash_control *hash_key; struct ena_admin_get_feat_resp get_resp; - struct ena_admin_feature_rss_flow_hash_control *hash_key = - rss->hash_key; enum ena_admin_hash_functions old_func; + struct ena_rss *rss = &ena_dev->rss; int rc; + hash_key = rss->hash_key; + /* Make sure size is a mult of DWs */ if (unlikely(key_len & 0x3)) return -EINVAL; @@ -2294,7 +2301,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, if (unlikely(rc)) return rc; - if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) { + if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) { pr_err("Flow hash function %d isn't supported\n", func); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h index 13a1b7812c46..bc187adf54e4 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -77,6 +77,8 @@ #define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0 #define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1 +#define ENA_HASH_KEY_SIZE 40 + #define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF #define ENA_FEATURE_MAX_QUEUE_EXT_VER 1 @@ -237,6 +239,7 @@ struct ena_com_stats_admin { struct ena_com_admin_queue { void *q_dmadev; + struct ena_com_dev *ena_dev; spinlock_t q_lock; /* spinlock for the admin queue */ struct ena_comp_ctx *comp_ctx; @@ -349,6 +352,8 @@ struct ena_com_dev { struct ena_intr_moder_entry *intr_moder_tbl; struct ena_com_llq_info llq_info; + + u32 ena_min_poll_delay_us; }; struct ena_com_dev_get_features_ctx { @@ -393,7 +398,7 @@ struct ena_aenq_handlers { */ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev); -/* ena_com_set_mmio_read_mode - Enable/disable the mmio reg read mechanism +/* ena_com_set_mmio_read_mode - Enable/disable the indirect mmio reg read mechanism * @ena_dev: ENA communication layer struct * @readless_supported: readless mode (enable/disable) */ @@ -515,7 +520,7 @@ void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev, /* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler * @ena_dev: ENA communication layer struct * - * This method go over the admin completion queue and wake up all the pending + * This method goes over the admin completion queue and wakes up all the pending * threads that wait on the commands wait event. * * @note: Should be called after MSI-X interrupt. @@ -525,7 +530,7 @@ void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev); /* ena_com_aenq_intr_handler - AENQ interrupt handler * @ena_dev: ENA communication layer struct * - * This method go over the async event notification queue and call the proper + * This method goes over the async event notification queue and calls the proper * aenq handler. */ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data); @@ -542,14 +547,14 @@ void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev); /* ena_com_wait_for_abort_completion - Wait for admin commands abort. * @ena_dev: ENA communication layer struct * - * This method wait until all the outstanding admin commands will be completed. + * This method waits until all the outstanding admin commands are completed. */ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev); /* ena_com_validate_version - Validate the device parameters * @ena_dev: ENA communication layer struct * - * This method validate the device parameters are the same as the saved + * This method verifies the device parameters are the same as the saved * parameters in ena_dev. * This method is useful after device reset, to validate the device mac address * and the device offloads are the same as before the reset. @@ -689,7 +694,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev); * * Retrieve the hash function from the device. * - * @note: If the caller called ena_com_fill_hash_function but didn't flash + * @note: If the caller called ena_com_fill_hash_function but didn't flush * it to the device, the new configuration will be lost. * * @return: 0 on Success and negative value otherwise. @@ -703,7 +708,7 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev, * * Retrieve the hash key. * - * @note: If the caller called ena_com_fill_hash_key but didn't flash + * @note: If the caller called ena_com_fill_hash_key but didn't flush * it to the device, the new configuration will be lost. * * @return: 0 on Success and negative value otherwise. @@ -743,7 +748,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev); * * Retrieve the hash control from the device. * - * @note, If the caller called ena_com_fill_hash_ctrl but didn't flash + * @note: If the caller called ena_com_fill_hash_ctrl but didn't flush * it to the device, the new configuration will be lost. * * @return: 0 on Success and negative value otherwise. @@ -795,7 +800,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev); * * Retrieve the RSS indirection table from the device. * - * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flash + * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flush * it to the device, the new configuration will be lost. * * @return: 0 on Success and negative value otherwise. @@ -821,14 +826,14 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, /* ena_com_delete_debug_area - Free the debug area resources. * @ena_dev: ENA communication layer struct * - * Free the allocate debug area. + * Free the allocated debug area. */ void ena_com_delete_debug_area(struct ena_com_dev *ena_dev); /* ena_com_delete_host_info - Free the host info resources. * @ena_dev: ENA communication layer struct * - * Free the allocate host info. + * Free the allocated host info. */ void ena_com_delete_host_info(struct ena_com_dev *ena_dev); @@ -869,9 +874,9 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, * @cmd_completion: command completion return value. * @cmd_comp_size: command completion size. - * Submit an admin command and then wait until the device will return a + * Submit an admin command and then wait until the device returns a * completion. - * The completion will be copyed into cmd_comp. + * The completion will be copied into cmd_comp. * * @return - 0 on success, negative value on failure. */ @@ -934,7 +939,7 @@ unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev * /* ena_com_config_dev_mode - Configure the placement policy of the device. * @ena_dev: ENA communication layer struct * @llq_features: LLQ feature descriptor, retrieve via - * ena_com_get_dev_attr_feat. + * ena_com_get_dev_attr_feat. * @ena_llq_config: The default driver LLQ parameters configurations */ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, @@ -960,7 +965,7 @@ static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_d * @intr_reg: interrupt register to update. * @rx_delay_interval: Rx interval in usecs * @tx_delay_interval: Tx interval in usecs - * @unmask: unask enable/disable + * @unmask: unmask enable/disable * * Prepare interrupt update register with the supplied parameters. */ diff --git a/drivers/net/ethernet/amazon/ena/ena_common_defs.h b/drivers/net/ethernet/amazon/ena/ena_common_defs.h index 23beb7e7ed7b..8a8ded0de9ac 100644 --- a/drivers/net/ethernet/amazon/ena/ena_common_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_common_defs.h @@ -45,4 +45,4 @@ struct ena_common_mem_addr { u16 reserved16; }; -#endif /*_ENA_COMMON_H_ */ +#endif /* _ENA_COMMON_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c index 2845ac277724..ec8ea25e988d 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c @@ -519,7 +519,7 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, struct ena_eth_io_rx_cdesc_base *cdesc = NULL; u16 cdesc_idx = 0; u16 nb_hw_desc; - u16 i; + u16 i = 0; WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type"); @@ -538,13 +538,19 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, return -ENOSPC; } - for (i = 0; i < nb_hw_desc; i++) { + cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx); + ena_rx_ctx->pkt_offset = cdesc->offset; + + do { + ena_buf[i].len = cdesc->length; + ena_buf[i].req_id = cdesc->req_id; + + if (++i >= nb_hw_desc) + break; + cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i); - ena_buf->len = cdesc->length; - ena_buf->req_id = cdesc->req_id; - ena_buf++; - } + } while (1); /* Update SQ head ptr */ io_sq->next_to_comp += nb_hw_desc; @@ -578,10 +584,10 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, desc->length = ena_buf->len; - desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK; - desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK; - desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK; - desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK; + desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK | + ENA_ETH_IO_RX_DESC_LAST_MASK | + (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) | + ENA_ETH_IO_RX_DESC_COMP_REQ_MASK; desc->req_id = req_id; diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h index 77986c0ea52c..8b1afd3b32f2 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h @@ -73,6 +73,7 @@ struct ena_com_rx_ctx { u32 hash; u16 descs; int max_bufs; + u8 pkt_offset; }; int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, @@ -95,7 +96,7 @@ static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, writel(intr_reg->intr_control, io_cq->unmask_reg); } -static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq) +static inline int ena_com_free_q_entries(struct ena_com_io_sq *io_sq) { u16 tail, next_to_comp, cnt; @@ -113,7 +114,7 @@ static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq, int temp; if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) - return ena_com_free_desc(io_sq) >= required_buffers; + return ena_com_free_q_entries(io_sq) >= required_buffers; /* This calculation doesn't need to be 100% accurate. So to reduce * the calculation overhead just Subtract 2 lines from the free descs @@ -122,7 +123,7 @@ static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq, */ temp = required_buffers / io_sq->llq_info.descs_per_entry + 2; - return ena_com_free_desc(io_sq) > temp; + return ena_com_free_q_entries(io_sq) > temp; } static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq, diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h index 00e0f056a741..d105c9c56192 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h @@ -264,7 +264,9 @@ struct ena_eth_io_rx_cdesc_base { u16 sub_qid; - u16 reserved; + u8 offset; + + u8 reserved; }; /* 8-word format */ @@ -412,4 +414,4 @@ struct ena_eth_io_numa_node_cfg_reg { #define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31 #define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31) -#endif /*_ENA_ETH_IO_H_ */ +#endif /* _ENA_ETH_IO_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 830d3711d6ee..e340b65af08c 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -206,7 +206,7 @@ int ena_get_sset_count(struct net_device *netdev, int sset) if (sset != ETH_SS_STATS) return -EOPNOTSUPP; - return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) + return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) + ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM; } @@ -260,7 +260,6 @@ static void ena_get_strings(struct net_device *netdev, u32 sset, u8 *data) for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) { ena_stats = &ena_stats_global_strings[i]; - memcpy(data, ena_stats->name, ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } @@ -307,10 +306,8 @@ static int ena_get_coalesce(struct net_device *net_dev, struct ena_adapter *adapter = netdev_priv(net_dev); struct ena_com_dev *ena_dev = adapter->ena_dev; - if (!ena_com_interrupt_moderation_supported(ena_dev)) { - /* the devie doesn't support interrupt moderation */ + if (!ena_com_interrupt_moderation_supported(ena_dev)) return -EOPNOTSUPP; - } coalesce->tx_coalesce_usecs = ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) * @@ -326,7 +323,7 @@ static int ena_get_coalesce(struct net_device *net_dev, return 0; } -static void ena_update_tx_rings_intr_moderation(struct ena_adapter *adapter) +static void ena_update_tx_rings_nonadaptive_intr_moderation(struct ena_adapter *adapter) { unsigned int val; int i; @@ -337,7 +334,7 @@ static void ena_update_tx_rings_intr_moderation(struct ena_adapter *adapter) adapter->tx_ring[i].smoothed_interval = val; } -static void ena_update_rx_rings_intr_moderation(struct ena_adapter *adapter) +static void ena_update_rx_rings_nonadaptive_intr_moderation(struct ena_adapter *adapter) { unsigned int val; int i; @@ -355,24 +352,22 @@ static int ena_set_coalesce(struct net_device *net_dev, struct ena_com_dev *ena_dev = adapter->ena_dev; int rc; - if (!ena_com_interrupt_moderation_supported(ena_dev)) { - /* the devie doesn't support interrupt moderation */ + if (!ena_com_interrupt_moderation_supported(ena_dev)) return -EOPNOTSUPP; - } rc = ena_com_update_nonadaptive_moderation_interval_tx(ena_dev, coalesce->tx_coalesce_usecs); if (rc) return rc; - ena_update_tx_rings_intr_moderation(adapter); + ena_update_tx_rings_nonadaptive_intr_moderation(adapter); rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev, coalesce->rx_coalesce_usecs); if (rc) return rc; - ena_update_rx_rings_intr_moderation(adapter); + ena_update_rx_rings_nonadaptive_intr_moderation(adapter); if (coalesce->use_adaptive_rx_coalesce && !ena_com_get_adaptive_moderation_enabled(ena_dev)) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 85b87ed02dd5..46865d5bd7e7 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -1435,6 +1435,8 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, rx_info->page_offset, len, ENA_PAGE_SIZE); + /* The offset is non zero only for the first buffer */ + rx_info->page_offset = 0; netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, "rx skb updated. len %d. data_len %d\n", @@ -1590,6 +1592,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, { u16 next_to_clean = rx_ring->next_to_clean; struct ena_com_rx_ctx ena_rx_ctx; + struct ena_rx_buffer *rx_info; struct ena_adapter *adapter; u32 res_budget, work_done; int rx_copybreak_pkt = 0; @@ -1614,6 +1617,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; ena_rx_ctx.max_bufs = rx_ring->sgl_size; ena_rx_ctx.descs = 0; + ena_rx_ctx.pkt_offset = 0; rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, rx_ring->ena_com_io_sq, &ena_rx_ctx); @@ -1623,6 +1627,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, if (unlikely(ena_rx_ctx.descs == 0)) break; + rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; + rx_info->page_offset = ena_rx_ctx.pkt_offset; + netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n", rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, @@ -1684,7 +1691,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, rx_ring->next_to_clean = next_to_clean; - refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq); + refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); refill_threshold = min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, ENA_RX_REFILL_THRESH_PACKET); @@ -2235,7 +2242,7 @@ static int ena_rss_configure(struct ena_adapter *adapter) rc = ena_rss_init_default(adapter); if (rc && (rc != -EOPNOTSUPP)) { netif_err(adapter, ifup, adapter->netdev, - "Failed to init RSS rc: %d\n", rc); + "Failed to init RSS rc: %d\n", rc); return rc; } } @@ -2308,7 +2315,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) if (rc) { netif_err(adapter, ifup, adapter->netdev, "Failed to create I/O TX queue num %d rc: %d\n", - qid, rc); + qid, rc); return rc; } @@ -2457,7 +2464,7 @@ static int create_queues_with_size_backoff(struct ena_adapter *adapter) * ones due to past queue allocation failures. */ set_io_rings_size(adapter, adapter->requested_tx_ring_size, - adapter->requested_rx_ring_size); + adapter->requested_rx_ring_size); while (1) { if (ena_xdp_present(adapter)) { @@ -2498,7 +2505,7 @@ err_setup_tx: if (rc != -ENOMEM) { netif_err(adapter, ifup, adapter->netdev, "Queue creation failed with error code %d\n", - rc); + rc); return rc; } @@ -2521,7 +2528,7 @@ err_setup_tx: new_rx_ring_size = cur_rx_ring_size / 2; if (new_tx_ring_size < ENA_MIN_RING_SIZE || - new_rx_ring_size < ENA_MIN_RING_SIZE) { + new_rx_ring_size < ENA_MIN_RING_SIZE) { netif_err(adapter, ifup, adapter->netdev, "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n", ENA_MIN_RING_SIZE); @@ -3080,8 +3087,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, return qid; } -static void ena_config_host_info(struct ena_com_dev *ena_dev, - struct pci_dev *pdev) +static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev) { struct ena_admin_host_info *host_info; int rc; @@ -3111,6 +3117,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, host_info->num_cpus = num_online_cpus(); host_info->driver_supported_features = + ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK; rc = ena_com_set_host_attributes(ena_dev); @@ -3686,8 +3693,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter) for (i = 0; i < adapter->num_io_queues; i++) { rx_ring = &adapter->rx_ring[i]; - refill_required = - ena_com_free_desc(rx_ring->ena_com_io_sq); + refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); if (unlikely(refill_required == (rx_ring->ring_size - 1))) { rx_ring->empty_rx_queue++; @@ -3825,11 +3831,11 @@ static void ena_timer_service(struct timer_list *t) mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); } -static int ena_calc_max_io_queue_num(struct pci_dev *pdev, +static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev, struct ena_com_dev *ena_dev, struct ena_com_dev_get_features_ctx *get_feat_ctx) { - int io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; + u32 io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { struct ena_admin_queue_ext_feature_fields *max_queue_ext = @@ -4115,8 +4121,8 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx) */ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - struct ena_com_dev_get_features_ctx get_feat_ctx; struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; + struct ena_com_dev_get_features_ctx get_feat_ctx; struct ena_llq_configurations llq_config; struct ena_com_dev *ena_dev = NULL; struct ena_adapter *adapter; @@ -4160,6 +4166,8 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_free_region; } + ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US; + ena_dev->dmadev = &pdev->dev; rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state); @@ -4183,7 +4191,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) calc_queue_ctx.get_feat_ctx = &get_feat_ctx; calc_queue_ctx.pdev = pdev; - /* Initial Tx and RX interrupt delay. Assumes 1 usec granularity. + /* Initial TX and RX interrupt delay. Assumes 1 usec granularity. * Updated during device initialization with the real granularity */ ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS; @@ -4227,12 +4235,11 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->num_io_queues = max_num_io_queues; adapter->max_num_io_queues = max_num_io_queues; + adapter->last_monitored_tx_qid = 0; adapter->xdp_first_ring = 0; adapter->xdp_num_queues = 0; - adapter->last_monitored_tx_qid = 0; - adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK; adapter->wd_state = wd_state; diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 680099afcccf..ba030d260940 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -50,12 +50,6 @@ #define DRV_MODULE_GEN_SUBMINOR 0 #define DRV_MODULE_NAME "ena" -#ifndef DRV_MODULE_GENERATION -#define DRV_MODULE_GENERATION \ - __stringify(DRV_MODULE_GEN_MAJOR) "." \ - __stringify(DRV_MODULE_GEN_MINOR) "." \ - __stringify(DRV_MODULE_GEN_SUBMINOR) "K" -#endif #define DEVICE_NAME "Elastic Network Adapter (ENA)" @@ -104,8 +98,6 @@ #define ENA_RX_RSS_TABLE_LOG_SIZE 7 #define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) -#define ENA_HASH_KEY_SIZE 40 - /* The number of tx packet completions that will be handled each NAPI poll * cycle is ring_size / ENA_TX_POLL_BUDGET_DIVIDER. */ @@ -137,6 +129,8 @@ #define ENA_IO_IRQ_FIRST_IDX 1 #define ENA_IO_IRQ_IDX(q) (ENA_IO_IRQ_FIRST_IDX + (q)) +#define ENA_ADMIN_POLL_DELAY_US 100 + /* ENA device should send keep alive msg every 1 sec. * We wait for 6 sec just to be on the safe side. */ diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h index 04fcafcc059c..b514bb1b855d 100644 --- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h @@ -154,4 +154,4 @@ enum ena_regs_reset_reason_types { #define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16 #define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000 -#endif /*_ENA_REGS_H_ */ +#endif /* _ENA_REGS_H_ */ diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index a58185b1d8bf..3e3711b60d01 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c @@ -1182,7 +1182,7 @@ bmac_get_station_address(struct net_device *dev, unsigned char *ea) int i; unsigned short data; - for (i = 0; i < 6; i++) + for (i = 0; i < 3; i++) { reset_and_select_srom(dev); data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index 86fc77d85fda..743d3b13b39d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -88,13 +88,13 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = { "InDroppedDma", }; -static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = { - "Queue[%d] InPackets", - "Queue[%d] OutPackets", - "Queue[%d] Restarts", - "Queue[%d] InJumboPackets", - "Queue[%d] InLroPackets", - "Queue[%d] InErrors", +static const char * const aq_ethtool_queue_stat_names[] = { + "%sQueue[%d] InPackets", + "%sQueue[%d] OutPackets", + "%sQueue[%d] Restarts", + "%sQueue[%d] InJumboPackets", + "%sQueue[%d] InLroPackets", + "%sQueue[%d] InErrors", }; #if IS_ENABLED(CONFIG_MACSEC) @@ -166,7 +166,8 @@ static u32 aq_ethtool_n_stats(struct net_device *ndev) struct aq_nic_s *nic = netdev_priv(ndev); struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(nic); u32 n_stats = ARRAY_SIZE(aq_ethtool_stat_names) + - ARRAY_SIZE(aq_ethtool_queue_stat_names) * cfg->vecs; + ARRAY_SIZE(aq_ethtool_queue_stat_names) * cfg->vecs * + cfg->tcs; #if IS_ENABLED(CONFIG_MACSEC) if (nic->macsec_cfg) { @@ -223,7 +224,7 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev, static void aq_ethtool_get_strings(struct net_device *ndev, u32 stringset, u8 *data) { - struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_s *nic = netdev_priv(ndev); struct aq_nic_cfg_s *cfg; u8 *p = data; int i, si; @@ -231,24 +232,35 @@ static void aq_ethtool_get_strings(struct net_device *ndev, int sa; #endif - cfg = aq_nic_get_cfg(aq_nic); + cfg = aq_nic_get_cfg(nic); switch (stringset) { - case ETH_SS_STATS: + case ETH_SS_STATS: { + const int stat_cnt = ARRAY_SIZE(aq_ethtool_queue_stat_names); + char tc_string[8]; + int tc; + + memset(tc_string, 0, sizeof(tc_string)); memcpy(p, aq_ethtool_stat_names, sizeof(aq_ethtool_stat_names)); p = p + sizeof(aq_ethtool_stat_names); - for (i = 0; i < cfg->vecs; i++) { - for (si = 0; - si < ARRAY_SIZE(aq_ethtool_queue_stat_names); - si++) { - snprintf(p, ETH_GSTRING_LEN, - aq_ethtool_queue_stat_names[si], i); - p += ETH_GSTRING_LEN; + + for (tc = 0; tc < cfg->tcs; tc++) { + if (cfg->is_qos) + snprintf(tc_string, 8, "TC%d ", tc); + + for (i = 0; i < cfg->vecs; i++) { + for (si = 0; si < stat_cnt; si++) { + snprintf(p, ETH_GSTRING_LEN, + aq_ethtool_queue_stat_names[si], + tc_string, + AQ_NIC_CFG_TCVEC2RING(cfg, tc, i)); + p += ETH_GSTRING_LEN; + } } } #if IS_ENABLED(CONFIG_MACSEC) - if (!aq_nic->macsec_cfg) + if (!nic->macsec_cfg) break; memcpy(p, aq_macsec_stat_names, sizeof(aq_macsec_stat_names)); @@ -256,7 +268,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev, for (i = 0; i < AQ_MACSEC_MAX_SC; i++) { struct aq_macsec_txsc *aq_txsc; - if (!(test_bit(i, &aq_nic->macsec_cfg->txsc_idx_busy))) + if (!(test_bit(i, &nic->macsec_cfg->txsc_idx_busy))) continue; for (si = 0; @@ -266,7 +278,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev, aq_macsec_txsc_stat_names[si], i); p += ETH_GSTRING_LEN; } - aq_txsc = &aq_nic->macsec_cfg->aq_txsc[i]; + aq_txsc = &nic->macsec_cfg->aq_txsc[i]; for (sa = 0; sa < MACSEC_NUM_AN; sa++) { if (!(test_bit(sa, &aq_txsc->tx_sa_idx_busy))) continue; @@ -283,10 +295,10 @@ static void aq_ethtool_get_strings(struct net_device *ndev, for (i = 0; i < AQ_MACSEC_MAX_SC; i++) { struct aq_macsec_rxsc *aq_rxsc; - if (!(test_bit(i, &aq_nic->macsec_cfg->rxsc_idx_busy))) + if (!(test_bit(i, &nic->macsec_cfg->rxsc_idx_busy))) continue; - aq_rxsc = &aq_nic->macsec_cfg->aq_rxsc[i]; + aq_rxsc = &nic->macsec_cfg->aq_rxsc[i]; for (sa = 0; sa < MACSEC_NUM_AN; sa++) { if (!(test_bit(sa, &aq_rxsc->rx_sa_idx_busy))) continue; @@ -302,6 +314,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev, } #endif break; + } case ETH_SS_PRIV_FLAGS: memcpy(p, aq_ethtool_priv_flag_names, sizeof(aq_ethtool_priv_flag_names)); @@ -780,8 +793,6 @@ static int aq_set_ringparam(struct net_device *ndev, dev_close(ndev); } - aq_nic_free_vectors(aq_nic); - cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min); cfg->rxds = min(cfg->rxds, hw_caps->rxds_max); cfg->rxds = ALIGN(cfg->rxds, AQ_HW_RXD_MULTIPLE); @@ -790,15 +801,10 @@ static int aq_set_ringparam(struct net_device *ndev, cfg->txds = min(cfg->txds, hw_caps->txds_max); cfg->txds = ALIGN(cfg->txds, AQ_HW_TXD_MULTIPLE); - for (aq_nic->aq_vecs = 0; aq_nic->aq_vecs < cfg->vecs; - aq_nic->aq_vecs++) { - aq_nic->aq_vec[aq_nic->aq_vecs] = - aq_vec_alloc(aq_nic, aq_nic->aq_vecs, cfg); - if (unlikely(!aq_nic->aq_vec[aq_nic->aq_vecs])) { - err = -ENOMEM; - goto err_exit; - } - } + err = aq_nic_realloc_vectors(aq_nic); + if (err) + goto err_exit; + if (ndev_running) err = dev_open(ndev, NULL); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c index 03ff92bc4a7f..1bc4d33a0ce5 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c @@ -153,6 +153,8 @@ aq_check_approve_fvlan(struct aq_nic_s *aq_nic, struct aq_hw_rx_fltrs_s *rx_fltrs, struct ethtool_rx_flow_spec *fsp) { + struct aq_nic_cfg_s *cfg = &aq_nic->aq_nic_cfg; + if (fsp->location < AQ_RX_FIRST_LOC_FVLANID || fsp->location > AQ_RX_LAST_LOC_FVLANID) { netdev_err(aq_nic->ndev, @@ -170,10 +172,10 @@ aq_check_approve_fvlan(struct aq_nic_s *aq_nic, return -EINVAL; } - if (fsp->ring_cookie > aq_nic->aq_nic_cfg.num_rss_queues) { + if (fsp->ring_cookie > cfg->num_rss_queues * cfg->tcs) { netdev_err(aq_nic->ndev, "ethtool: queue number must be in range [0, %d]", - aq_nic->aq_nic_cfg.num_rss_queues - 1); + cfg->num_rss_queues * cfg->tcs - 1); return -EINVAL; } return 0; @@ -262,6 +264,7 @@ static bool __must_check aq_rule_is_not_correct(struct aq_nic_s *aq_nic, struct ethtool_rx_flow_spec *fsp) { + struct aq_nic_cfg_s *cfg = &aq_nic->aq_nic_cfg; bool rule_is_not_correct = false; if (!aq_nic) { @@ -274,11 +277,11 @@ aq_rule_is_not_correct(struct aq_nic_s *aq_nic, } else if (aq_check_filter(aq_nic, fsp)) { rule_is_not_correct = true; } else if (fsp->ring_cookie != RX_CLS_FLOW_DISC) { - if (fsp->ring_cookie >= aq_nic->aq_nic_cfg.num_rss_queues) { + if (fsp->ring_cookie >= cfg->num_rss_queues * cfg->tcs) { netdev_err(aq_nic->ndev, "ethtool: The specified action is invalid.\n" "Maximum allowable value action is %u.\n", - aq_nic->aq_nic_cfg.num_rss_queues - 1); + cfg->num_rss_queues * cfg->tcs - 1); rule_is_not_correct = true; } } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index 03fea9469f01..ed5b465bc664 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -18,6 +18,12 @@ #define AQ_HW_MAC_COUNTER_HZ 312500000ll #define AQ_HW_PHY_COUNTER_HZ 160000000ll +enum aq_tc_mode { + AQ_TC_MODE_INVALID = -1, + AQ_TC_MODE_8TCS, + AQ_TC_MODE_4TCS, +}; + #define AQ_RX_FIRST_LOC_FVLANID 0U #define AQ_RX_LAST_LOC_FVLANID 15U #define AQ_RX_FIRST_LOC_FETHERT 16U @@ -29,6 +35,9 @@ (AQ_RX_LAST_LOC_FVLANID - AQ_RX_FIRST_LOC_FVLANID + 1U) #define AQ_RX_QUEUE_NOT_ASSIGNED 0xFFU +/* Used for rate to Mbps conversion */ +#define AQ_MBPS_DIVISOR 125000 /* 1000000 / 8 */ + /* NIC H/W capabilities */ struct aq_hw_caps_s { u64 hw_features; @@ -46,7 +55,7 @@ struct aq_hw_caps_s { u32 mac_regs_count; u32 hw_alive_check_addr; u8 msix_irqs; - u8 tcs; + u8 tcs_max; u8 rxd_alignment; u8 rxd_size; u8 txd_alignment; @@ -118,8 +127,11 @@ struct aq_stats_s { #define AQ_HW_TXD_MULTIPLE 8U #define AQ_HW_RXD_MULTIPLE 8U +#define AQ_HW_QUEUES_MAX 32U #define AQ_HW_MULTICAST_ADDRESS_MAX 32U +#define AQ_HW_PTP_TC 2U + #define AQ_HW_LED_BLINK 0x2U #define AQ_HW_LED_DEFAULT 0x0U @@ -268,6 +280,8 @@ struct aq_hw_ops { int (*hw_rss_hash_set)(struct aq_hw_s *self, struct aq_rss_parameters *rss_params); + int (*hw_tc_rate_limit_set)(struct aq_hw_s *self); + int (*hw_get_regs)(struct aq_hw_s *self, const struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff); @@ -279,10 +293,6 @@ struct aq_hw_ops { int (*hw_set_offload)(struct aq_hw_s *self, struct aq_nic_cfg_s *aq_nic_cfg); - int (*hw_tx_tc_mode_get)(struct aq_hw_s *self, u32 *tc_mode); - - int (*hw_rx_tc_mode_get)(struct aq_hw_s *self, u32 *tc_mode); - int (*hw_ring_hwts_rx_fill)(struct aq_hw_s *self, struct aq_ring_s *aq_ring); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c index 7dbf49adcea6..342c5179f846 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c @@ -79,3 +79,29 @@ int aq_hw_err_from_flags(struct aq_hw_s *hw) err_exit: return err; } + +int aq_hw_num_tcs(struct aq_hw_s *hw) +{ + switch (hw->aq_nic_cfg->tc_mode) { + case AQ_TC_MODE_8TCS: + return 8; + case AQ_TC_MODE_4TCS: + return 4; + default: + break; + } + + return 1; +} + +int aq_hw_q_per_tc(struct aq_hw_s *hw) +{ + switch (hw->aq_nic_cfg->tc_mode) { + case AQ_TC_MODE_8TCS: + return 4; + case AQ_TC_MODE_4TCS: + return 8; + default: + return 4; + } +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h index 9ef82d487e01..32aa5f2fb840 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h @@ -34,5 +34,7 @@ u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg); void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value); u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg); int aq_hw_err_from_flags(struct aq_hw_s *hw); +int aq_hw_num_tcs(struct aq_hw_s *hw); +int aq_hw_q_per_tc(struct aq_hw_s *hw); #endif /* AQ_HW_UTILS_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c index 91870ceaf3fe..4a6dfac857ca 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c @@ -478,7 +478,7 @@ static int aq_mdo_add_secy(struct macsec_context *ctx) set_bit(txsc_idx, &cfg->txsc_idx_busy); - return 0; + return ret; } static int aq_mdo_upd_secy(struct macsec_context *ctx) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index 9fcab646cbd5..8a1da044e908 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -12,11 +12,13 @@ #include "aq_ethtool.h" #include "aq_ptp.h" #include "aq_filters.h" +#include "aq_hw_utils.h" #include <linux/netdevice.h> #include <linux/module.h> #include <linux/ip.h> #include <linux/udp.h> +#include <net/pkt_cls.h> MODULE_LICENSE("GPL v2"); MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR); @@ -38,7 +40,7 @@ struct net_device *aq_ndev_alloc(void) struct net_device *ndev = NULL; struct aq_nic_s *aq_nic = NULL; - ndev = alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX); + ndev = alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_HW_QUEUES_MAX); if (!ndev) return NULL; @@ -330,6 +332,73 @@ static int aq_ndo_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, return 0; } +static int aq_validate_mqprio_opt(struct aq_nic_s *self, + struct tc_mqprio_qopt_offload *mqprio, + const unsigned int num_tc) +{ + const bool has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE); + struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(self); + const unsigned int tcs_max = min_t(u8, aq_nic_cfg->aq_hw_caps->tcs_max, + AQ_CFG_TCS_MAX); + + if (num_tc > tcs_max) { + netdev_err(self->ndev, "Too many TCs requested\n"); + return -EOPNOTSUPP; + } + + if (num_tc != 0 && !is_power_of_2(num_tc)) { + netdev_err(self->ndev, "TC count should be power of 2\n"); + return -EOPNOTSUPP; + } + + if (has_min_rate && !ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ANTIGUA)) { + netdev_err(self->ndev, "Min tx rate is not supported\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int aq_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct tc_mqprio_qopt_offload *mqprio = type_data; + struct aq_nic_s *aq_nic = netdev_priv(dev); + bool has_min_rate; + bool has_max_rate; + int err; + int i; + + if (type != TC_SETUP_QDISC_MQPRIO) + return -EOPNOTSUPP; + + has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE); + has_max_rate = !!(mqprio->flags & TC_MQPRIO_F_MAX_RATE); + + err = aq_validate_mqprio_opt(aq_nic, mqprio, mqprio->qopt.num_tc); + if (err) + return err; + + for (i = 0; i < mqprio->qopt.num_tc; i++) { + if (has_max_rate) { + u64 max_rate = mqprio->max_rate[i]; + + do_div(max_rate, AQ_MBPS_DIVISOR); + aq_nic_setup_tc_max_rate(aq_nic, i, (u32)max_rate); + } + + if (has_min_rate) { + u64 min_rate = mqprio->min_rate[i]; + + do_div(min_rate, AQ_MBPS_DIVISOR); + aq_nic_setup_tc_min_rate(aq_nic, i, (u32)min_rate); + } + } + + return aq_nic_setup_tc_mqprio(aq_nic, mqprio->qopt.num_tc, + mqprio->qopt.prio_tc_map); +} + static const struct net_device_ops aq_ndev_ops = { .ndo_open = aq_ndev_open, .ndo_stop = aq_ndev_close, @@ -341,6 +410,7 @@ static const struct net_device_ops aq_ndev_ops = { .ndo_do_ioctl = aq_ndev_ioctl, .ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid, + .ndo_setup_tc = aq_ndo_setup_tc, }; static int __init aq_ndev_init_module(void) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 1c6d12deb47a..4435c6374f7e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -26,6 +26,7 @@ #include <linux/ip.h> #include <linux/tcp.h> #include <net/ip.h> +#include <net/pkt_cls.h> static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO; module_param_named(aq_itr, aq_itr, uint, 0644); @@ -64,10 +65,38 @@ static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) rss_params->indirection_table[i] = i & (num_rss_queues - 1); } +/* Recalculate the number of vectors */ +static void aq_nic_cfg_update_num_vecs(struct aq_nic_s *self) +{ + struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; + + cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF); + cfg->vecs = min(cfg->vecs, num_online_cpus()); + if (self->irqvecs > AQ_HW_SERVICE_IRQS) + cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS); + /* cfg->vecs should be power of 2 for RSS */ + cfg->vecs = rounddown_pow_of_two(cfg->vecs); + + if (ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ANTIGUA)) { + if (cfg->tcs > 2) + cfg->vecs = min(cfg->vecs, 4U); + } + + if (cfg->vecs <= 4) + cfg->tc_mode = AQ_TC_MODE_8TCS; + else + cfg->tc_mode = AQ_TC_MODE_4TCS; + + /*rss rings */ + cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF); + aq_nic_rss_init(self, cfg->num_rss_queues); +} + /* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */ void aq_nic_cfg_start(struct aq_nic_s *self) { struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; + int i; cfg->tcs = AQ_CFG_TCS_DEF; @@ -79,7 +108,6 @@ void aq_nic_cfg_start(struct aq_nic_s *self) cfg->rxpageorder = AQ_CFG_RX_PAGEORDER; cfg->is_rss = AQ_CFG_IS_RSS_DEF; - cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF; cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF; cfg->fc.req = AQ_CFG_FC_MODE; cfg->wol = AQ_CFG_WOL_MODES; @@ -89,29 +117,13 @@ void aq_nic_cfg_start(struct aq_nic_s *self) cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF; cfg->is_lro = AQ_CFG_IS_LRO_DEF; + cfg->is_ptp = true; /*descriptors */ cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF); cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF); - /*rss rings */ - cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF); - cfg->vecs = min(cfg->vecs, num_online_cpus()); - if (self->irqvecs > AQ_HW_SERVICE_IRQS) - cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS); - /* cfg->vecs should be power of 2 for RSS */ - if (cfg->vecs >= 8U) - cfg->vecs = 8U; - else if (cfg->vecs >= 4U) - cfg->vecs = 4U; - else if (cfg->vecs >= 2U) - cfg->vecs = 2U; - else - cfg->vecs = 1U; - - cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF); - - aq_nic_rss_init(self, cfg->num_rss_queues); + aq_nic_cfg_update_num_vecs(self); cfg->irq_type = aq_pci_func_get_irq_type(self); @@ -136,6 +148,9 @@ void aq_nic_cfg_start(struct aq_nic_s *self) cfg->is_vlan_rx_strip = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_RX); cfg->is_vlan_tx_insert = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_TX); cfg->is_vlan_force_promisc = true; + + for (i = 0; i < sizeof(cfg->prio_tc_map); i++) + cfg->prio_tc_map[i] = cfg->tcs * i / 8; } static int aq_nic_update_link_status(struct aq_nic_s *self) @@ -181,6 +196,9 @@ static int aq_nic_update_link_status(struct aq_nic_s *self) #if IS_ENABLED(CONFIG_MACSEC) aq_macsec_enable(self); #endif + if (self->aq_hw_ops->hw_tc_rate_limit_set) + self->aq_hw_ops->hw_tc_rate_limit_set(self->aq_hw); + netif_tx_wake_all_queues(self->ndev); } if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) { @@ -399,21 +417,29 @@ int aq_nic_init(struct aq_nic_s *self) err = aq_phy_init(self->aq_hw); } - for (i = 0U, aq_vec = self->aq_vec[0]; - self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + for (i = 0U; i < self->aq_vecs; i++) { + aq_vec = self->aq_vec[i]; + err = aq_vec_ring_alloc(aq_vec, self, i, + aq_nic_get_cfg(self)); + if (err) + goto err_exit; + aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw); + } - err = aq_ptp_init(self, self->irqvecs - 1); - if (err < 0) - goto err_exit; + if (aq_nic_get_cfg(self)->is_ptp) { + err = aq_ptp_init(self, self->irqvecs - 1); + if (err < 0) + goto err_exit; - err = aq_ptp_ring_alloc(self); - if (err < 0) - goto err_exit; + err = aq_ptp_ring_alloc(self); + if (err < 0) + goto err_exit; - err = aq_ptp_ring_init(self); - if (err < 0) - goto err_exit; + err = aq_ptp_ring_init(self); + if (err < 0) + goto err_exit; + } netif_carrier_off(self->ndev); @@ -424,9 +450,12 @@ err_exit: int aq_nic_start(struct aq_nic_s *self) { struct aq_vec_s *aq_vec = NULL; + struct aq_nic_cfg_s *cfg; unsigned int i = 0U; int err = 0; + cfg = aq_nic_get_cfg(self); + err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, self->mc_list.ar, self->mc_list.count); @@ -464,7 +493,7 @@ int aq_nic_start(struct aq_nic_s *self) timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0); aq_nic_service_timer_cb(&self->service_timer); - if (self->aq_nic_cfg.is_polling) { + if (cfg->is_polling) { timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0); mod_timer(&self->polling_timer, jiffies + AQ_CFG_POLLING_TIMER_INTERVAL); @@ -482,16 +511,16 @@ int aq_nic_start(struct aq_nic_s *self) if (err < 0) goto err_exit; - if (self->aq_nic_cfg.link_irq_vec) { + if (cfg->link_irq_vec) { int irqvec = pci_irq_vector(self->pdev, - self->aq_nic_cfg.link_irq_vec); + cfg->link_irq_vec); err = request_threaded_irq(irqvec, NULL, aq_linkstate_threaded_isr, IRQF_SHARED | IRQF_ONESHOT, self->ndev->name, self); if (err < 0) goto err_exit; - self->msix_entry_mask |= (1 << self->aq_nic_cfg.link_irq_vec); + self->msix_entry_mask |= (1 << cfg->link_irq_vec); } err = self->aq_hw_ops->hw_irq_enable(self->aq_hw, @@ -500,14 +529,21 @@ int aq_nic_start(struct aq_nic_s *self) goto err_exit; } - err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs); + err = netif_set_real_num_tx_queues(self->ndev, + self->aq_vecs * cfg->tcs); if (err < 0) goto err_exit; - err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs); + err = netif_set_real_num_rx_queues(self->ndev, + self->aq_vecs * cfg->tcs); if (err < 0) goto err_exit; + for (i = 0; i < cfg->tcs; i++) { + u16 offset = self->aq_vecs * i; + + netdev_set_tc_queue(self->ndev, i, self->aq_vecs, offset); + } netif_tx_start_all_queues(self->ndev); err_exit: @@ -518,6 +554,8 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, struct aq_ring_s *ring) { unsigned int nr_frags = skb_shinfo(skb)->nr_frags; + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self); + struct device *dev = aq_nic_get_dev(self); struct aq_ring_buff_s *first = NULL; u8 ipver = ip_hdr(skb)->version; struct aq_ring_buff_s *dx_buff; @@ -559,7 +597,7 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, need_context_tag = true; } - if (self->aq_nic_cfg.is_vlan_tx_insert && skb_vlan_tag_present(skb)) { + if (cfg->is_vlan_tx_insert && skb_vlan_tag_present(skb)) { dx_buff->vlan_tx_tag = skb_vlan_tag_get(skb); dx_buff->len_pkt = skb->len; dx_buff->is_vlan = 1U; @@ -574,12 +612,12 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, } dx_buff->len = skb_headlen(skb); - dx_buff->pa = dma_map_single(aq_nic_get_dev(self), + dx_buff->pa = dma_map_single(dev, skb->data, dx_buff->len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) { + if (unlikely(dma_mapping_error(dev, dx_buff->pa))) { ret = 0; goto exit; } @@ -611,13 +649,13 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, else buff_size = frag_len; - frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), + frag_pa = skb_frag_dma_map(dev, frag, buff_offset, buff_size, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(aq_nic_get_dev(self), + if (unlikely(dma_mapping_error(dev, frag_pa))) goto mapping_error; @@ -651,12 +689,12 @@ mapping_error: if (!(dx_buff->is_gso_tcp || dx_buff->is_gso_udp) && !dx_buff->is_vlan && dx_buff->pa) { if (unlikely(dx_buff->is_sop)) { - dma_unmap_single(aq_nic_get_dev(self), + dma_unmap_single(dev, dx_buff->pa, dx_buff->len, DMA_TO_DEVICE); } else { - dma_unmap_page(aq_nic_get_dev(self), + dma_unmap_page(dev, dx_buff->pa, dx_buff->len, DMA_TO_DEVICE); @@ -670,15 +708,16 @@ exit: int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) { - unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self); + unsigned int vec = skb->queue_mapping % cfg->vecs; + unsigned int tc = skb->queue_mapping / cfg->vecs; struct aq_ring_s *ring = NULL; unsigned int frags = 0U; int err = NETDEV_TX_OK; - unsigned int tc = 0U; frags = skb_shinfo(skb)->nr_frags + 1; - ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)]; + ring = self->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(cfg, tc, vec)]; if (frags > AQ_CFG_SKB_FRAGS_MAX) { dev_kfree_skb_any(skb); @@ -687,13 +726,14 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) aq_ring_update_queue_state(ring); - if (self->aq_nic_cfg.priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET)) { + if (cfg->priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET)) { err = NETDEV_TX_BUSY; goto err_exit; } /* Above status update may stop the queue. Check this. */ - if (__netif_subqueue_stopped(self->ndev, ring->idx)) { + if (__netif_subqueue_stopped(self->ndev, + AQ_NIC_RING2QMAP(self, ring->idx))) { err = NETDEV_TX_BUSY; goto err_exit; } @@ -823,6 +863,7 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data) struct aq_stats_s *stats; unsigned int count = 0U; unsigned int i = 0U; + unsigned int tc; if (self->aq_fw_ops->update_stats) { mutex_lock(&self->fwreq_mutex); @@ -861,10 +902,13 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data) data += i; - for (i = 0U, aq_vec = self->aq_vec[0]; - aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { - data += count; - aq_vec_get_sw_stats(aq_vec, data, &count); + for (tc = 0U; tc < self->aq_nic_cfg.tcs; tc++) { + for (i = 0U, aq_vec = self->aq_vec[0]; + aq_vec && self->aq_vecs > i; + ++i, aq_vec = self->aq_vec[i]) { + data += count; + aq_vec_get_sw_stats(aq_vec, tc, data, &count); + } } data += count; @@ -1145,9 +1189,11 @@ void aq_nic_deinit(struct aq_nic_s *self, bool link_down) if (!self) goto err_exit; - for (i = 0U, aq_vec = self->aq_vec[0]; - self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + for (i = 0U; i < self->aq_vecs; i++) { + aq_vec = self->aq_vec[i]; aq_vec_deinit(aq_vec); + aq_vec_ring_free(aq_vec); + } aq_ptp_unregister(self); aq_ptp_ring_deinit(self); @@ -1180,6 +1226,22 @@ void aq_nic_free_vectors(struct aq_nic_s *self) err_exit:; } +int aq_nic_realloc_vectors(struct aq_nic_s *self) +{ + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self); + + aq_nic_free_vectors(self); + + for (self->aq_vecs = 0; self->aq_vecs < cfg->vecs; self->aq_vecs++) { + self->aq_vec[self->aq_vecs] = aq_vec_alloc(self, self->aq_vecs, + cfg); + if (unlikely(!self->aq_vec[self->aq_vecs])) + return -ENOMEM; + } + + return 0; +} + void aq_nic_shutdown(struct aq_nic_s *self) { int err = 0; @@ -1245,3 +1307,98 @@ void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type, break; } } + +int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map) +{ + struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; + const unsigned int prev_vecs = cfg->vecs; + bool ndev_running; + int err = 0; + int i; + + /* if already the same configuration or + * disable request (tcs is 0) and we already is disabled + */ + if (tcs == cfg->tcs || (tcs == 0 && !cfg->is_qos)) + return 0; + + ndev_running = netif_running(self->ndev); + if (ndev_running) + dev_close(self->ndev); + + cfg->tcs = tcs; + if (cfg->tcs == 0) + cfg->tcs = 1; + if (prio_tc_map) + memcpy(cfg->prio_tc_map, prio_tc_map, sizeof(cfg->prio_tc_map)); + else + for (i = 0; i < sizeof(cfg->prio_tc_map); i++) + cfg->prio_tc_map[i] = cfg->tcs * i / 8; + + cfg->is_qos = (tcs != 0 ? true : false); + cfg->is_ptp = (cfg->tcs <= AQ_HW_PTP_TC); + if (!cfg->is_ptp) + netdev_warn(self->ndev, "%s\n", + "PTP is auto disabled due to requested TC count."); + + netdev_set_num_tc(self->ndev, cfg->tcs); + + /* Changing the number of TCs might change the number of vectors */ + aq_nic_cfg_update_num_vecs(self); + if (prev_vecs != cfg->vecs) { + err = aq_nic_realloc_vectors(self); + if (err) + goto err_exit; + } + + if (ndev_running) + err = dev_open(self->ndev, NULL); + +err_exit: + return err; +} + +int aq_nic_setup_tc_max_rate(struct aq_nic_s *self, const unsigned int tc, + const u32 max_rate) +{ + struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; + + if (tc >= AQ_CFG_TCS_MAX) + return -EINVAL; + + if (max_rate && max_rate < 10) { + netdev_warn(self->ndev, + "Setting %s to the minimum usable value of %dMbps.\n", + "max rate", 10); + cfg->tc_max_rate[tc] = 10; + } else { + cfg->tc_max_rate[tc] = max_rate; + } + + return 0; +} + +int aq_nic_setup_tc_min_rate(struct aq_nic_s *self, const unsigned int tc, + const u32 min_rate) +{ + struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; + + if (tc >= AQ_CFG_TCS_MAX) + return -EINVAL; + + if (min_rate) + set_bit(tc, &cfg->tc_min_rate_msk); + else + clear_bit(tc, &cfg->tc_min_rate_msk); + + if (min_rate && min_rate < 20) { + netdev_warn(self->ndev, + "Setting %s to the minimum usable value of %dMbps.\n", + "min rate", 20); + cfg->tc_min_rate[tc] = 20; + } else { + cfg->tc_min_rate[tc] = min_rate; + } + + return 0; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 0663b8d0220d..2ab003065e62 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -59,8 +59,15 @@ struct aq_nic_cfg_s { bool is_polling; bool is_rss; bool is_lro; + bool is_qos; + bool is_ptp; + enum aq_tc_mode tc_mode; u32 priv_flags; u8 tcs; + u8 prio_tc_map[8]; + u32 tc_max_rate[AQ_CFG_TCS_MAX]; + unsigned long tc_min_rate_msk; + u32 tc_min_rate[AQ_CFG_TCS_MAX]; struct aq_rss_parameters aq_rss; u32 eee_speeds; }; @@ -77,8 +84,16 @@ struct aq_nic_cfg_s { #define AQ_NIC_WOL_MODES (WAKE_MAGIC |\ WAKE_PHY) -#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \ - ((_TC_) * AQ_CFG_TCS_MAX + (_VEC_)) +#define AQ_NIC_CFG_RING_PER_TC(_NIC_CFG_) \ + (((_NIC_CFG_)->tc_mode == AQ_TC_MODE_4TCS) ? 8 : 4) + +#define AQ_NIC_CFG_TCVEC2RING(_NIC_CFG_, _TC_, _VEC_) \ + ((_TC_) * AQ_NIC_CFG_RING_PER_TC(_NIC_CFG_) + (_VEC_)) + +#define AQ_NIC_RING2QMAP(_NIC_, _ID_) \ + ((_ID_) / AQ_NIC_CFG_RING_PER_TC(&(_NIC_)->aq_nic_cfg) * \ + (_NIC_)->aq_vecs + \ + ((_ID_) % AQ_NIC_CFG_RING_PER_TC(&(_NIC_)->aq_nic_cfg))) struct aq_hw_rx_fl2 { struct aq_rx_filter_vlan aq_vlans[AQ_VLAN_MAX_FILTERS]; @@ -104,7 +119,7 @@ struct aq_nic_s { atomic_t flags; u32 msg_enable; struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX]; - struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX]; + struct aq_ring_s *aq_ring_tx[AQ_HW_QUEUES_MAX]; struct aq_hw_s *aq_hw; struct net_device *ndev; unsigned int aq_vecs; @@ -164,6 +179,7 @@ void aq_nic_deinit(struct aq_nic_s *self, bool link_down); void aq_nic_set_power(struct aq_nic_s *self); void aq_nic_free_hot_resources(struct aq_nic_s *self); void aq_nic_free_vectors(struct aq_nic_s *self); +int aq_nic_realloc_vectors(struct aq_nic_s *self); int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu); int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev); int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags); @@ -181,4 +197,9 @@ void aq_nic_shutdown(struct aq_nic_s *self); u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type); void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type, u32 location); +int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map); +int aq_nic_setup_tc_max_rate(struct aq_nic_s *self, const unsigned int tc, + const u32 max_rate); +int aq_nic_setup_tc_min_rate(struct aq_nic_s *self, const unsigned int tc, + const u32 min_rate); #endif /* AQ_NIC_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index d10fff8a8c71..41c0f560f95b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -431,6 +431,9 @@ static int atl_resume_common(struct device *dev, bool deep) netif_tx_start_all_queues(nic->ndev); err_exit: + if (ret < 0) + aq_nic_deinit(nic, true); + rtnl_unlock(); return ret; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c index 58e8c641e8b3..599ced261b2a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c @@ -945,26 +945,29 @@ void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic) #define PTP_4TC_RING_IDX 16 #define PTP_HWST_RING_IDX 31 +/* Index must be 8 (8 TCs) or 16 (4 TCs). + * It depends on Traffic Class mode. + */ +static unsigned int ptp_ring_idx(const enum aq_tc_mode tc_mode) +{ + if (tc_mode == AQ_TC_MODE_8TCS) + return PTP_8TC_RING_IDX; + + return PTP_4TC_RING_IDX; +} + int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic) { struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp; unsigned int tx_ring_idx, rx_ring_idx; struct aq_ring_s *hwts; - u32 tx_tc_mode, rx_tc_mode; struct aq_ring_s *ring; int err; if (!aq_ptp) return 0; - /* Index must to be 8 (8 TCs) or 16 (4 TCs). - * It depends from Traffic Class mode. - */ - aq_nic->aq_hw_ops->hw_tx_tc_mode_get(aq_nic->aq_hw, &tx_tc_mode); - if (tx_tc_mode == 0) - tx_ring_idx = PTP_8TC_RING_IDX; - else - tx_ring_idx = PTP_4TC_RING_IDX; + tx_ring_idx = ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode); ring = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic, tx_ring_idx, &aq_nic->aq_nic_cfg); @@ -973,11 +976,7 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic) goto err_exit; } - aq_nic->aq_hw_ops->hw_rx_tc_mode_get(aq_nic->aq_hw, &rx_tc_mode); - if (rx_tc_mode == 0) - rx_ring_idx = PTP_8TC_RING_IDX; - else - rx_ring_idx = PTP_4TC_RING_IDX; + rx_ring_idx = ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode); ring = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic, rx_ring_idx, &aq_nic->aq_nic_cfg); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index bae95a618560..68fdb3994088 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -232,8 +232,11 @@ void aq_ring_queue_wake(struct aq_ring_s *ring) { struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic); - if (__netif_subqueue_stopped(ndev, ring->idx)) { - netif_wake_subqueue(ndev, ring->idx); + if (__netif_subqueue_stopped(ndev, + AQ_NIC_RING2QMAP(ring->aq_nic, + ring->idx))) { + netif_wake_subqueue(ndev, + AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx)); ring->stats.tx.queue_restarts++; } } @@ -242,8 +245,11 @@ void aq_ring_queue_stop(struct aq_ring_s *ring) { struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic); - if (!__netif_subqueue_stopped(ndev, ring->idx)) - netif_stop_subqueue(ndev, ring->idx); + if (!__netif_subqueue_stopped(ndev, + AQ_NIC_RING2QMAP(ring->aq_nic, + ring->idx))) + netif_stop_subqueue(ndev, + AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx)); } bool aq_ring_tx_clean(struct aq_ring_s *self) @@ -466,7 +472,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self, buff->is_hash_l4 ? PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_NONE); /* Send all PTP traffic to 0 queue */ - skb_record_rx_queue(skb, is_ptp_ring ? 0 : self->idx); + skb_record_rx_queue(skb, + is_ptp_ring ? 0 + : AQ_NIC_RING2QMAP(self->aq_nic, + self->idx)); ++self->stats.rx.packets; self->stats.rx.bytes += skb->len; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index f40a427970dc..d1d43c8ce400 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -103,16 +103,11 @@ err_exit: struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg) { - struct aq_ring_s *ring = NULL; struct aq_vec_s *self = NULL; - unsigned int i = 0U; - int err = 0; self = kzalloc(sizeof(*self), GFP_KERNEL); - if (!self) { - err = -ENOMEM; + if (!self) goto err_exit; - } self->aq_nic = aq_nic; self->aq_ring_param.vec_idx = idx; @@ -128,10 +123,20 @@ struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx, netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi, aq_vec_poll, AQ_CFG_NAPI_WEIGHT); +err_exit: + return self; +} + +int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic, + unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg) +{ + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + int err = 0; + for (i = 0; i < aq_nic_cfg->tcs; ++i) { - unsigned int idx_ring = AQ_NIC_TCVEC2RING(self->nic, - self->tx_rings, - self->aq_ring_param.vec_idx); + const unsigned int idx_ring = AQ_NIC_CFG_TCVEC2RING(aq_nic_cfg, + i, idx); ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic, idx_ring, aq_nic_cfg); @@ -156,11 +161,11 @@ struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx, err_exit: if (err < 0) { - aq_vec_free(self); + aq_vec_ring_free(self); self = NULL; } - return self; + return err; } int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops, @@ -270,6 +275,18 @@ err_exit:; void aq_vec_free(struct aq_vec_s *self) { + if (!self) + goto err_exit; + + netif_napi_del(&self->napi); + + kfree(self); + +err_exit:; +} + +void aq_vec_ring_free(struct aq_vec_s *self) +{ struct aq_ring_s *ring = NULL; unsigned int i = 0U; @@ -279,13 +296,12 @@ void aq_vec_free(struct aq_vec_s *self) for (i = 0U, ring = self->ring[0]; self->tx_rings > i; ++i, ring = self->ring[i]) { aq_ring_free(&ring[AQ_VEC_TX_ID]); - aq_ring_free(&ring[AQ_VEC_RX_ID]); + if (i < self->rx_rings) + aq_ring_free(&ring[AQ_VEC_RX_ID]); } - netif_napi_del(&self->napi); - - kfree(self); - + self->tx_rings = 0; + self->rx_rings = 0; err_exit:; } @@ -333,16 +349,14 @@ cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self) return &self->aq_ring_param.affinity_mask; } -void aq_vec_add_stats(struct aq_vec_s *self, - struct aq_ring_stats_rx_s *stats_rx, - struct aq_ring_stats_tx_s *stats_tx) +static void aq_vec_add_stats(struct aq_vec_s *self, + const unsigned int tc, + struct aq_ring_stats_rx_s *stats_rx, + struct aq_ring_stats_tx_s *stats_tx) { - struct aq_ring_s *ring = NULL; - unsigned int r = 0U; + struct aq_ring_s *ring = self->ring[tc]; - for (r = 0U, ring = self->ring[0]; - self->tx_rings > r; ++r, ring = self->ring[r]) { - struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx; + if (tc < self->rx_rings) { struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx; stats_rx->packets += rx->packets; @@ -353,6 +367,10 @@ void aq_vec_add_stats(struct aq_vec_s *self, stats_rx->pg_losts += rx->pg_losts; stats_rx->pg_flips += rx->pg_flips; stats_rx->pg_reuses += rx->pg_reuses; + } + + if (tc < self->tx_rings) { + struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx; stats_tx->packets += tx->packets; stats_tx->bytes += tx->bytes; @@ -361,7 +379,8 @@ void aq_vec_add_stats(struct aq_vec_s *self, } } -int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count) +int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data, + unsigned int *p_count) { struct aq_ring_stats_rx_s stats_rx; struct aq_ring_stats_tx_s stats_tx; @@ -369,7 +388,8 @@ int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count) memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); - aq_vec_add_stats(self, &stats_rx, &stats_tx); + + aq_vec_add_stats(self, tc, &stats_rx, &stats_tx); /* This data should mimic aq_ethtool_queue_stat_names structure */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h index 0fe8e0904c7f..541af85e6510 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h @@ -25,17 +25,17 @@ irqreturn_t aq_vec_isr(int irq, void *private); irqreturn_t aq_vec_isr_legacy(int irq, void *private); struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg); +int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic, + unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg); int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops, struct aq_hw_s *aq_hw); void aq_vec_deinit(struct aq_vec_s *self); void aq_vec_free(struct aq_vec_s *self); +void aq_vec_ring_free(struct aq_vec_s *self); int aq_vec_start(struct aq_vec_s *self); void aq_vec_stop(struct aq_vec_s *self); cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self); -int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, +int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data, unsigned int *p_count); -void aq_vec_add_stats(struct aq_vec_s *self, - struct aq_ring_stats_rx_s *stats_rx, - struct aq_ring_stats_tx_s *stats_tx); #endif /* AQ_VEC_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 1b0670a8ae33..a312864969af 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -21,7 +21,7 @@ .msix_irqs = 4U, \ .irq_mask = ~0U, \ .vecs = HW_ATL_A0_RSS_MAX, \ - .tcs = HW_ATL_A0_TC_MAX, \ + .tcs_max = HW_ATL_A0_TC_MAX, \ .rxd_alignment = 1U, \ .rxd_size = HW_ATL_A0_RXD_SIZE, \ .rxds_max = HW_ATL_A0_MAX_RXD, \ @@ -136,10 +136,10 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self) hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U); - hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U); - hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U); - hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U); - hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U); + hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0U, 0xFFF); + hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0U, 0x64); + hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0U, 0x50); + hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0U, 0x1E); /* Tx buf size */ buff_size = HW_ATL_A0_TXBUF_MAX; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index fa3cd7e9954b..14d79f70cad7 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -23,7 +23,7 @@ .msix_irqs = 8U, \ .irq_mask = ~0U, \ .vecs = HW_ATL_B0_RSS_MAX, \ - .tcs = HW_ATL_B0_TC_MAX, \ + .tcs_max = HW_ATL_B0_TC_MAX, \ .rxd_alignment = 1U, \ .rxd_size = HW_ATL_B0_RXD_SIZE, \ .rxds_max = HW_ATL_B0_MAX_RXD, \ @@ -46,7 +46,8 @@ NETIF_F_HW_VLAN_CTAG_RX | \ NETIF_F_HW_VLAN_CTAG_TX | \ NETIF_F_GSO_UDP_L4 | \ - NETIF_F_GSO_PARTIAL, \ + NETIF_F_GSO_PARTIAL | \ + NETIF_F_HW_TC, \ .hw_priv_flags = IFF_UNICAST_FLT, \ .flow_control = true, \ .mtu = HW_ATL_B0_MTU_JUMBO, \ @@ -114,12 +115,34 @@ static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc) return 0; } +static int hw_atl_b0_tc_ptp_set(struct aq_hw_s *self) +{ + /* Init TC2 for PTP_TX */ + hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_TXBUF_SIZE, + AQ_HW_PTP_TC); + + /* Init TC2 for PTP_RX */ + hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_RXBUF_SIZE, + AQ_HW_PTP_TC); + /* No flow control for PTP */ + hw_atl_rpb_rx_xoff_en_per_tc_set(self, 0U, AQ_HW_PTP_TC); + + return aq_hw_err_from_flags(self); +} + static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) { - unsigned int i_priority = 0U; - u32 buff_size = 0U; + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; + u32 tx_buff_size = HW_ATL_B0_TXBUF_MAX; + u32 rx_buff_size = HW_ATL_B0_RXBUF_MAX; + unsigned int prio = 0U; u32 tc = 0U; + if (cfg->is_ptp) { + tx_buff_size -= HW_ATL_B0_PTP_TXBUF_SIZE; + rx_buff_size -= HW_ATL_B0_PTP_RXBUF_SIZE; + } + /* TPS Descriptor rate init */ hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA); @@ -127,63 +150,39 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) /* TPS VM init */ hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U); - /* TPS TC credits init */ - hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); - hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U); - - tc = 0; - - /* TX Packet Scheduler Data TC0 */ - hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, tc); - hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, tc); - hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, tc); - hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, tc); - - /* Tx buf size TC0 */ - buff_size = HW_ATL_B0_TXBUF_MAX - HW_ATL_B0_PTP_TXBUF_SIZE; - - hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc); - hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, - (buff_size * - (1024 / 32U) * 66U) / - 100U, tc); - hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, - (buff_size * - (1024 / 32U) * 50U) / - 100U, tc); - /* Init TC2 for PTP_TX */ - tc = 2; + tx_buff_size /= cfg->tcs; + rx_buff_size /= cfg->tcs; + for (tc = 0; tc < cfg->tcs; tc++) { + u32 threshold = 0U; - hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_TXBUF_SIZE, - tc); + /* Tx buf size TC0 */ + hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc); - /* QoS Rx buf size per TC */ - tc = 0; - buff_size = HW_ATL_B0_RXBUF_MAX - HW_ATL_B0_PTP_RXBUF_SIZE; + threshold = (tx_buff_size * (1024 / 32U) * 66U) / 100U; + hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, threshold, tc); - hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); - hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, - (buff_size * - (1024U / 32U) * 66U) / - 100U, tc); - hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, - (buff_size * - (1024U / 32U) * 50U) / - 100U, tc); + threshold = (tx_buff_size * (1024 / 32U) * 50U) / 100U; + hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, threshold, tc); - hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc); + /* QoS Rx buf size per TC */ + hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc); - /* Init TC2 for PTP_RX */ - tc = 2; + threshold = (rx_buff_size * (1024U / 32U) * 66U) / 100U; + hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, threshold, tc); - hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_RXBUF_SIZE, - tc); - /* No flow control for PTP */ - hw_atl_rpb_rx_xoff_en_per_tc_set(self, 0U, tc); + threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U; + hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc); + + hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc); + } + + if (cfg->is_ptp) + hw_atl_b0_tc_ptp_set(self); /* QoS 802.1p priority -> TC mapping */ - for (i_priority = 8U; i_priority--;) - hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U); + for (prio = 0; prio < 8; ++prio) + hw_atl_rpf_rpb_user_priority_tc_map_set(self, prio, + cfg->prio_tc_map[prio]); return aq_hw_err_from_flags(self); } @@ -311,10 +310,124 @@ int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, return aq_hw_err_from_flags(self); } +static int hw_atl_b0_hw_init_tx_tc_rate_limit(struct aq_hw_s *self) +{ + static const u32 max_weight = BIT(HW_ATL_TPS_DATA_TCTWEIGHT_WIDTH) - 1; + /* Scale factor is based on the number of bits in fractional portion */ + static const u32 scale = BIT(HW_ATL_TPS_DESC_RATE_Y_WIDTH); + static const u32 frac_msk = HW_ATL_TPS_DESC_RATE_Y_MSK >> + HW_ATL_TPS_DESC_RATE_Y_SHIFT; + const u32 link_speed = self->aq_link_status.mbps; + struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg; + unsigned long num_min_rated_tcs = 0; + u32 tc_weight[AQ_CFG_TCS_MAX]; + u32 fixed_max_credit; + u8 min_rate_msk = 0; + u32 sum_weight = 0; + int tc; + + /* By default max_credit is based upon MTU (in unit of 64b) */ + fixed_max_credit = nic_cfg->aq_hw_caps->mtu / 64; + + if (link_speed) { + min_rate_msk = nic_cfg->tc_min_rate_msk & + (BIT(nic_cfg->tcs) - 1); + num_min_rated_tcs = hweight8(min_rate_msk); + } + + /* First, calculate weights where min_rate is specified */ + if (num_min_rated_tcs) { + for (tc = 0; tc != nic_cfg->tcs; tc++) { + if (!nic_cfg->tc_min_rate[tc]) { + tc_weight[tc] = 0; + continue; + } + + tc_weight[tc] = (-1L + link_speed + + nic_cfg->tc_min_rate[tc] * + max_weight) / + link_speed; + tc_weight[tc] = min(tc_weight[tc], max_weight); + sum_weight += tc_weight[tc]; + } + } + + /* WSP, if min_rate is set for at least one TC. + * RR otherwise. + */ + hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U); + /* Data TC Arbiter takes precedence over Descriptor TC Arbiter, + * leave Descriptor TC Arbiter as RR. + */ + hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); + + hw_atl_tps_tx_desc_rate_mode_set(self, nic_cfg->is_qos ? 1U : 0U); + + for (tc = 0; tc != nic_cfg->tcs; tc++) { + const u32 en = (nic_cfg->tc_max_rate[tc] != 0) ? 1U : 0U; + const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0); + u32 weight, max_credit; + + hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, tc, + fixed_max_credit); + hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, tc, 0x1E); + + if (num_min_rated_tcs) { + weight = tc_weight[tc]; + + if (!weight && sum_weight < max_weight) + weight = (max_weight - sum_weight) / + (nic_cfg->tcs - num_min_rated_tcs); + else if (!weight) + weight = 0x64; + + max_credit = max(8 * weight, fixed_max_credit); + } else { + weight = 0x64; + max_credit = 0xFFF; + } + + hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, tc, weight); + hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, tc, + max_credit); + + hw_atl_tps_tx_desc_rate_en_set(self, desc, en); + + if (en) { + /* Nominal rate is always 10G */ + const u32 rate = 10000U * scale / + nic_cfg->tc_max_rate[tc]; + const u32 rate_int = rate >> + HW_ATL_TPS_DESC_RATE_Y_WIDTH; + const u32 rate_frac = rate & frac_msk; + + hw_atl_tps_tx_desc_rate_x_set(self, desc, rate_int); + hw_atl_tps_tx_desc_rate_y_set(self, desc, rate_frac); + } else { + /* A value of 1 indicates the queue is not + * rate controlled. + */ + hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U); + hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U); + } + } + for (tc = nic_cfg->tcs; tc != AQ_CFG_TCS_MAX; tc++) { + const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0); + + hw_atl_tps_tx_desc_rate_en_set(self, desc, 0U); + hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U); + hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U); + } + + return aq_hw_err_from_flags(self); +} + static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self) { + struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg; + /* Tx TC/Queue number config */ - hw_atl_tpb_tps_tx_tc_mode_set(self, 1U); + hw_atl_tpb_tps_tx_tc_mode_set(self, nic_cfg->tc_mode); hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); @@ -334,20 +447,32 @@ static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self) return aq_hw_err_from_flags(self); } +void hw_atl_b0_hw_init_rx_rss_ctrl1(struct aq_hw_s *self) +{ + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; + u32 rss_ctrl1 = HW_ATL_RSS_DISABLED; + + if (cfg->is_rss) + rss_ctrl1 = (cfg->tc_mode == AQ_TC_MODE_8TCS) ? + HW_ATL_RSS_ENABLED_8TCS_2INDEX_BITS : + HW_ATL_RSS_ENABLED_4TCS_3INDEX_BITS; + + hw_atl_reg_rx_flr_rss_control1set(self, rss_ctrl1); +} + static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self) { struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; int i; /* Rx TC/RSS number config */ - hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U); + hw_atl_rpb_rpf_rx_traf_class_mode_set(self, cfg->tc_mode); /* Rx flow control */ hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U); /* RSS Ring selection */ - hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ? - 0xB3333333U : 0x00000000U); + hw_atl_b0_hw_init_rx_rss_ctrl1(self); /* Multicast filters */ for (i = HW_ATL_B0_MAC_MAX; i--;) { @@ -1078,18 +1203,6 @@ int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring) return aq_hw_err_from_flags(self); } -static int hw_atl_b0_tx_tc_mode_get(struct aq_hw_s *self, u32 *tc_mode) -{ - *tc_mode = hw_atl_tpb_tps_tx_tc_mode_get(self); - return aq_hw_err_from_flags(self); -} - -static int hw_atl_b0_rx_tc_mode_get(struct aq_hw_s *self, u32 *tc_mode) -{ - *tc_mode = hw_atl_rpb_rpf_rx_traf_class_mode_get(self); - return aq_hw_err_from_flags(self); -} - #define get_ptp_ts_val_u64(self, indx) \ ((u64)(hw_atl_pcs_ptp_clock_get(self, indx) & 0xffff)) @@ -1503,13 +1616,11 @@ const struct aq_hw_ops hw_atl_ops_b0 = { .hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set, .hw_rss_set = hw_atl_b0_hw_rss_set, .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set, + .hw_tc_rate_limit_set = hw_atl_b0_hw_init_tx_tc_rate_limit, .hw_get_regs = hw_atl_utils_hw_get_regs, .hw_get_hw_stats = hw_atl_utils_get_hw_stats, .hw_get_fw_version = hw_atl_utils_get_fw_version, - .hw_tx_tc_mode_get = hw_atl_b0_tx_tc_mode_get, - .hw_rx_tc_mode_get = hw_atl_b0_rx_tc_mode_get, - .hw_ring_hwts_rx_fill = hw_atl_b0_hw_ring_hwts_rx_fill, .hw_ring_hwts_rx_receive = hw_atl_b0_hw_ring_hwts_rx_receive, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h index b855459272ca..30f468f2084d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h @@ -58,6 +58,8 @@ int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self, int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring); int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring); +void hw_atl_b0_hw_init_rx_rss_ctrl1(struct aq_hw_s *self); + int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr); int hw_atl_b0_hw_start(struct aq_hw_s *self); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h index 7ab23a1751d3..cf460d61a45e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h @@ -75,7 +75,7 @@ #define HW_ATL_B0_RSS_HASHKEY_BITS 320U #define HW_ATL_B0_TCRSS_4_8 1 -#define HW_ATL_B0_TC_MAX 1U +#define HW_ATL_B0_TC_MAX 8U #define HW_ATL_B0_RSS_MAX 8U #define HW_ATL_B0_LRO_RXD_MAX 16U @@ -151,6 +151,10 @@ #define HW_ATL_B0_MAX_RXD 8184U #define HW_ATL_B0_MAX_TXD 8184U +#define HW_ATL_RSS_DISABLED 0x00000000U +#define HW_ATL_RSS_ENABLED_8TCS_2INDEX_BITS 0xA2222222U +#define HW_ATL_RSS_ENABLED_4TCS_3INDEX_BITS 0x80003333U + /* HW layer capabilities */ #endif /* HW_ATL_B0_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c index 9e2d01a6aac8..3c8e8047ea1e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -754,7 +754,7 @@ void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw, } void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw, - u32 user_priority_tc_map, u32 tc) + u32 user_priority, u32 tc) { /* register address for bitfield rx_tc_up{t}[2:0] */ static u32 rpf_rpb_rx_tc_upt_adr[8] = { @@ -773,10 +773,9 @@ void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw, 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U }; - aq_hw_write_reg_bit(aq_hw, rpf_rpb_rx_tc_upt_adr[tc], - rpf_rpb_rx_tc_upt_msk[tc], - rpf_rpb_rx_tc_upt_shft[tc], - user_priority_tc_map); + aq_hw_write_reg_bit(aq_hw, rpf_rpb_rx_tc_upt_adr[user_priority], + rpf_rpb_rx_tc_upt_msk[user_priority], + rpf_rpb_rx_tc_upt_shft[user_priority], tc); } void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr) @@ -1464,8 +1463,8 @@ void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw, } void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw, - u32 max_credit, - u32 tc) + const u32 tc, + const u32 max_credit) { aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc), HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK, @@ -1474,13 +1473,13 @@ void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw, } void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw, - u32 tx_pkt_shed_desc_tc_weight, - u32 tc) + const u32 tc, + const u32 weight) { aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc), HW_ATL_TPS_DESC_TCTWEIGHT_MSK, HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT, - tx_pkt_shed_desc_tc_weight); + weight); } void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw, @@ -1493,8 +1492,8 @@ void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw, } void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw, - u32 max_credit, - u32 tc) + const u32 tc, + const u32 max_credit) { aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc), HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK, @@ -1503,13 +1502,49 @@ void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw, } void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw, - u32 tx_pkt_shed_tc_data_weight, - u32 tc) + const u32 tc, + const u32 weight) { aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc), HW_ATL_TPS_DATA_TCTWEIGHT_MSK, HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT, - tx_pkt_shed_tc_data_weight); + weight); +} + +void hw_atl_tps_tx_desc_rate_mode_set(struct aq_hw_s *aq_hw, + const u32 rate_mode) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_TX_DESC_RATE_MODE_ADR, + HW_ATL_TPS_TX_DESC_RATE_MODE_MSK, + HW_ATL_TPS_TX_DESC_RATE_MODE_SHIFT, + rate_mode); +} + +void hw_atl_tps_tx_desc_rate_en_set(struct aq_hw_s *aq_hw, const u32 desc, + const u32 enable) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_EN_ADR(desc), + HW_ATL_TPS_DESC_RATE_EN_MSK, + HW_ATL_TPS_DESC_RATE_EN_SHIFT, + enable); +} + +void hw_atl_tps_tx_desc_rate_x_set(struct aq_hw_s *aq_hw, const u32 desc, + const u32 rate_int) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_X_ADR(desc), + HW_ATL_TPS_DESC_RATE_X_MSK, + HW_ATL_TPS_DESC_RATE_X_SHIFT, + rate_int); +} + +void hw_atl_tps_tx_desc_rate_y_set(struct aq_hw_s *aq_hw, const u32 desc, + const u32 rate_frac) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_Y_ADR(desc), + HW_ATL_TPS_DESC_RATE_Y_MSK, + HW_ATL_TPS_DESC_RATE_Y_SHIFT, + rate_frac); } /* tx */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h index b88cb84805d5..61a6f70c51cd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h @@ -688,13 +688,13 @@ void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw, /* set tx packet scheduler descriptor tc max credit */ void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw, - u32 max_credit, - u32 tc); + const u32 tc, + const u32 max_credit); /* set tx packet scheduler descriptor tc weight */ void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw, - u32 tx_pkt_shed_desc_tc_weight, - u32 tc); + const u32 tc, + const u32 weight); /* set tx packet scheduler descriptor vm arbitration mode */ void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw, @@ -702,13 +702,29 @@ void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw, /* set tx packet scheduler tc data max credit */ void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw, - u32 max_credit, - u32 tc); + const u32 tc, + const u32 max_credit); /* set tx packet scheduler tc data weight */ void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw, - u32 tx_pkt_shed_tc_data_weight, - u32 tc); + const u32 tc, + const u32 weight); + +/* set tx descriptor rate mode */ +void hw_atl_tps_tx_desc_rate_mode_set(struct aq_hw_s *aq_hw, + const u32 rate_mode); + +/* set tx packet scheduler descriptor rate enable */ +void hw_atl_tps_tx_desc_rate_en_set(struct aq_hw_s *aq_hw, const u32 desc, + const u32 enable); + +/* set tx packet scheduler descriptor rate integral value */ +void hw_atl_tps_tx_desc_rate_x_set(struct aq_hw_s *aq_hw, const u32 desc, + const u32 rate_int); + +/* set tx packet scheduler descriptor rate fractional value */ +void hw_atl_tps_tx_desc_rate_y_set(struct aq_hw_s *aq_hw, const u32 desc, + const u32 rate_frac); /* tx */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index 18de2f7b8959..06220792daf1 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -2038,6 +2038,42 @@ /* default value of bitfield lso_tcp_flag_mid[b:0] */ #define HW_ATL_THM_LSO_TCP_FLAG_MID_DEFAULT 0x0 +/* tx tx_tc_mode bitfield definitions + * preprocessor definitions for the bitfield "tx_tc_mode". + * port="pif_tpb_tx_tc_mode_i,pif_tps_tx_tc_mode_i" + */ + +/* register address for bitfield tx_tc_mode */ +#define HW_ATL_TPB_TX_TC_MODE_ADDR 0x00007900 +/* bitmask for bitfield tx_tc_mode */ +#define HW_ATL_TPB_TX_TC_MODE_MSK 0x00000100 +/* inverted bitmask for bitfield tx_tc_mode */ +#define HW_ATL_TPB_TX_TC_MODE_MSKN 0xFFFFFEFF +/* lower bit position of bitfield tx_tc_mode */ +#define HW_ATL_TPB_TX_TC_MODE_SHIFT 8 +/* width of bitfield tx_tc_mode */ +#define HW_ATL_TPB_TX_TC_MODE_WIDTH 1 +/* default value of bitfield tx_tc_mode */ +#define HW_ATL_TPB_TX_TC_MODE_DEFAULT 0x0 + +/* tx tx_desc_rate_mode bitfield definitions + * preprocessor definitions for the bitfield "tx_desc_rate_mode". + * port="pif_tps_desc_rate_mode_i" + */ + +/* register address for bitfield tx_desc_rate_mode */ +#define HW_ATL_TPS_TX_DESC_RATE_MODE_ADR 0x00007900 +/* bitmask for bitfield tx_desc_rate_mode */ +#define HW_ATL_TPS_TX_DESC_RATE_MODE_MSK 0x00000080 +/* inverted bitmask for bitfield tx_desc_rate_mode */ +#define HW_ATL_TPS_TX_DESC_RATE_MODE_MSKN 0xFFFFFF7F +/* lower bit position of bitfield tx_desc_rate_mode */ +#define HW_ATL_TPS_TX_DESC_RATE_MODE_SHIFT 7 +/* width of bitfield tx_desc_rate_mode */ +#define HW_ATL_TPS_TX_DESC_RATE_MODE_WIDTH 1 +/* default value of bitfield tx_desc_rate_mode */ +#define HW_ATL_TPS_TX_DESC_RATE_MODE_DEFAULT 0x0 + /* tx tx_buf_en bitfield definitions * preprocessor definitions for the bitfield "tx_buf_en". * port="pif_tpb_tx_buf_en_i" @@ -2056,19 +2092,6 @@ /* default value of bitfield tx_buf_en */ #define HW_ATL_TPB_TX_BUF_EN_DEFAULT 0x0 -/* register address for bitfield tx_tc_mode */ -#define HW_ATL_TPB_TX_TC_MODE_ADDR 0x00007900 -/* bitmask for bitfield tx_tc_mode */ -#define HW_ATL_TPB_TX_TC_MODE_MSK 0x00000100 -/* inverted bitmask for bitfield tx_tc_mode */ -#define HW_ATL_TPB_TX_TC_MODE_MSKN 0xFFFFFEFF -/* lower bit position of bitfield tx_tc_mode */ -#define HW_ATL_TPB_TX_TC_MODE_SHIFT 8 -/* width of bitfield tx_tc_mode */ -#define HW_ATL_TPB_TX_TC_MODE_WIDTH 1 -/* default value of bitfield tx_tc_mode */ -#define HW_ATL_TPB_TX_TC_MODE_DEFAULT 0x0 - /* tx tx{b}_hi_thresh[c:0] bitfield definitions * preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]". * parameter: buffer {b} | stride size 0x10 | range [0, 7] @@ -2270,6 +2293,58 @@ /* default value of bitfield data_tc_arb_mode */ #define HW_ATL_TPS_DATA_TC_ARB_MODE_DEFAULT 0x0 +/* tx desc{r}_rate_en bitfield definitions + * preprocessor definitions for the bitfield "desc{r}_rate_en". + * port="pif_tps_desc_rate_en_i[0]" + */ + +/* register address for bitfield desc{r}_rate_en */ +#define HW_ATL_TPS_DESC_RATE_EN_ADR(desc) (0x00007408 + (desc) * 0x10) +/* bitmask for bitfield desc{r}_rate_en */ +#define HW_ATL_TPS_DESC_RATE_EN_MSK 0x80000000 +/* inverted bitmask for bitfield desc{r}_rate_en */ +#define HW_ATL_TPS_DESC_RATE_EN_MSKN 0x7FFFFFFF +/* lower bit position of bitfield desc{r}_rate_en */ +#define HW_ATL_TPS_DESC_RATE_EN_SHIFT 31 +/* width of bitfield desc{r}_rate_en */ +#define HW_ATL_TPS_DESC_RATE_EN_WIDTH 1 +/* default value of bitfield desc{r}_rate_en */ +#define HW_ATL_TPS_DESC_RATE_EN_DEFAULT 0x0 + +/* tx desc{r}_rate_x bitfield definitions + * preprocessor definitions for the bitfield "desc{r}_rate_x". + * port="pif_tps_desc0_rate_x" + */ +/* register address for bitfield desc{r}_rate_x */ +#define HW_ATL_TPS_DESC_RATE_X_ADR(desc) (0x00007408 + (desc) * 0x10) +/* bitmask for bitfield desc{r}_rate_x */ +#define HW_ATL_TPS_DESC_RATE_X_MSK 0x03FF0000 +/* inverted bitmask for bitfield desc{r}_rate_x */ +#define HW_ATL_TPS_DESC_RATE_X_MSKN 0xFC00FFFF +/* lower bit position of bitfield desc{r}_rate_x */ +#define HW_ATL_TPS_DESC_RATE_X_SHIFT 16 +/* width of bitfield desc{r}_rate_x */ +#define HW_ATL_TPS_DESC_RATE_X_WIDTH 10 +/* default value of bitfield desc{r}_rate_x */ +#define HW_ATL_TPS_DESC_RATE_X_DEFAULT 0x0 + +/* tx desc{r}_rate_y bitfield definitions + * preprocessor definitions for the bitfield "desc{r}_rate_y". + * port="pif_tps_desc0_rate_y" + */ +/* register address for bitfield desc{r}_rate_y */ +#define HW_ATL_TPS_DESC_RATE_Y_ADR(desc) (0x00007408 + (desc) * 0x10) +/* bitmask for bitfield desc{r}_rate_y */ +#define HW_ATL_TPS_DESC_RATE_Y_MSK 0x00003FFF +/* inverted bitmask for bitfield desc{r}_rate_y */ +#define HW_ATL_TPS_DESC_RATE_Y_MSKN 0xFFFFC000 +/* lower bit position of bitfield desc{r}_rate_y */ +#define HW_ATL_TPS_DESC_RATE_Y_SHIFT 0 +/* width of bitfield desc{r}_rate_y */ +#define HW_ATL_TPS_DESC_RATE_Y_WIDTH 14 +/* default value of bitfield desc{r}_rate_y */ +#define HW_ATL_TPS_DESC_RATE_Y_DEFAULT 0x0 + /* tx desc_rate_ta_rst bitfield definitions * preprocessor definitions for the bitfield "desc_rate_ta_rst". * port="pif_tps_desc_rate_ta_rst_i" diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c index 6f2b33ae3d06..8df9d4ef36f0 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c @@ -10,6 +10,7 @@ #include "hw_atl/hw_atl_b0.h" #include "hw_atl/hw_atl_utils.h" #include "hw_atl/hw_atl_llh.h" +#include "hw_atl/hw_atl_llh_internal.h" #include "hw_atl2_utils.h" #include "hw_atl2_llh.h" #include "hw_atl2_internal.h" @@ -23,7 +24,7 @@ static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location, .msix_irqs = 8U, \ .irq_mask = ~0U, \ .vecs = HW_ATL2_RSS_MAX, \ - .tcs = HW_ATL2_TC_MAX, \ + .tcs_max = HW_ATL2_TC_MAX, \ .rxd_alignment = 1U, \ .rxd_size = HW_ATL2_RXD_SIZE, \ .rxds_max = HW_ATL2_MAX_RXD, \ @@ -47,7 +48,8 @@ static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location, NETIF_F_HW_VLAN_CTAG_RX | \ NETIF_F_HW_VLAN_CTAG_TX | \ NETIF_F_GSO_UDP_L4 | \ - NETIF_F_GSO_PARTIAL, \ + NETIF_F_GSO_PARTIAL | \ + NETIF_F_HW_TC, \ .hw_priv_flags = IFF_UNICAST_FLT, \ .flow_control = true, \ .mtu = HW_ATL2_MTU_JUMBO, \ @@ -91,16 +93,49 @@ static int hw_atl2_hw_reset(struct aq_hw_s *self) static int hw_atl2_hw_queue_to_tc_map_set(struct aq_hw_s *self) { - if (!hw_atl_rpb_rpf_rx_traf_class_mode_get(self)) { - aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(0), 0x11110000); - aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(8), 0x33332222); - aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(16), 0x55554444); - aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(24), 0x77776666); - } else { - aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(0), 0x00000000); - aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(8), 0x11111111); - aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(16), 0x22222222); - aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(24), 0x33333333); + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; + unsigned int tcs, q_per_tc; + unsigned int tc, q; + u32 rx_map = 0; + u32 tx_map = 0; + + hw_atl2_tpb_tx_tc_q_rand_map_en_set(self, 1U); + + switch (cfg->tc_mode) { + case AQ_TC_MODE_8TCS: + tcs = 8; + q_per_tc = 4; + break; + case AQ_TC_MODE_4TCS: + tcs = 4; + q_per_tc = 8; + break; + default: + return -EINVAL; + } + + for (tc = 0; tc != tcs; tc++) { + unsigned int tc_q_offset = tc * q_per_tc; + + for (q = tc_q_offset; q != tc_q_offset + q_per_tc; q++) { + rx_map |= tc << HW_ATL2_RX_Q_TC_MAP_SHIFT(q); + if (HW_ATL2_RX_Q_TC_MAP_ADR(q) != + HW_ATL2_RX_Q_TC_MAP_ADR(q + 1)) { + aq_hw_write_reg(self, + HW_ATL2_RX_Q_TC_MAP_ADR(q), + rx_map); + rx_map = 0; + } + + tx_map |= tc << HW_ATL2_TX_Q_TC_MAP_SHIFT(q); + if (HW_ATL2_TX_Q_TC_MAP_ADR(q) != + HW_ATL2_TX_Q_TC_MAP_ADR(q + 1)) { + aq_hw_write_reg(self, + HW_ATL2_TX_Q_TC_MAP_ADR(q), + tx_map); + tx_map = 0; + } + } } return aq_hw_err_from_flags(self); @@ -112,7 +147,6 @@ static int hw_atl2_hw_qos_set(struct aq_hw_s *self) u32 tx_buff_size = HW_ATL2_TXBUF_MAX; u32 rx_buff_size = HW_ATL2_RXBUF_MAX; unsigned int prio = 0U; - u32 threshold = 0U; u32 tc = 0U; /* TPS Descriptor rate init */ @@ -122,42 +156,36 @@ static int hw_atl2_hw_qos_set(struct aq_hw_s *self) /* TPS VM init */ hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U); - /* TPS TC credits init */ - hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); - hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U); + tx_buff_size /= cfg->tcs; + rx_buff_size /= cfg->tcs; + for (tc = 0; tc < cfg->tcs; tc++) { + u32 threshold = 0U; - tc = 0; + /* Tx buf size TC0 */ + hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc); - /* TX Packet Scheduler Data TC0 */ - hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF0, tc); - hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(self, 0x640, tc); - hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, tc); - hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, tc); + threshold = (tx_buff_size * (1024 / 32U) * 66U) / 100U; + hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, threshold, tc); - /* Tx buf size TC0 */ - hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc); + threshold = (tx_buff_size * (1024 / 32U) * 50U) / 100U; + hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, threshold, tc); - threshold = (tx_buff_size * (1024 / 32U) * 66U) / 100U; - hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, threshold, tc); + /* QoS Rx buf size per TC */ + hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc); - threshold = (tx_buff_size * (1024 / 32U) * 50U) / 100U; - hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, threshold, tc); + threshold = (rx_buff_size * (1024U / 32U) * 66U) / 100U; + hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, threshold, tc); - /* QoS Rx buf size per TC */ - hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc); - - threshold = (rx_buff_size * (1024U / 32U) * 66U) / 100U; - hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, threshold, tc); - - threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U; - hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc); + threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U; + hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc); + } /* QoS 802.1p priority -> TC mapping */ for (prio = 0; prio < 8; ++prio) hw_atl_rpf_rpb_user_priority_tc_map_set(self, prio, - cfg->tcs * prio / 8); + cfg->prio_tc_map[prio]); - /* ATL2 Apply legacy ring to TC mapping */ + /* ATL2 Apply ring to TC mapping */ hw_atl2_hw_queue_to_tc_map_set(self); return aq_hw_err_from_flags(self); @@ -166,19 +194,149 @@ static int hw_atl2_hw_qos_set(struct aq_hw_s *self) static int hw_atl2_hw_rss_set(struct aq_hw_s *self, struct aq_rss_parameters *rss_params) { - u8 *indirection_table = rss_params->indirection_table; + u8 *indirection_table = rss_params->indirection_table; + const u32 num_tcs = aq_hw_num_tcs(self); + u32 rpf_redir2_enable; + int tc; int i; - for (i = HW_ATL2_RSS_REDIRECTION_MAX; i--;) - hw_atl2_new_rpf_rss_redir_set(self, 0, i, indirection_table[i]); + rpf_redir2_enable = num_tcs > 4 ? 1 : 0; + + hw_atl2_rpf_redirection_table2_select_set(self, rpf_redir2_enable); + + for (i = HW_ATL2_RSS_REDIRECTION_MAX; i--;) { + for (tc = 0; tc != num_tcs; tc++) { + hw_atl2_new_rpf_rss_redir_set(self, tc, i, + tc * + aq_hw_q_per_tc(self) + + indirection_table[i]); + } + } + + return aq_hw_err_from_flags(self); +} + +static int hw_atl2_hw_init_tx_tc_rate_limit(struct aq_hw_s *self) +{ + static const u32 max_weight = BIT(HW_ATL2_TPS_DATA_TCTWEIGHT_WIDTH) - 1; + /* Scale factor is based on the number of bits in fractional portion */ + static const u32 scale = BIT(HW_ATL_TPS_DESC_RATE_Y_WIDTH); + static const u32 frac_msk = HW_ATL_TPS_DESC_RATE_Y_MSK >> + HW_ATL_TPS_DESC_RATE_Y_SHIFT; + const u32 link_speed = self->aq_link_status.mbps; + struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg; + unsigned long num_min_rated_tcs = 0; + u32 tc_weight[AQ_CFG_TCS_MAX]; + u32 fixed_max_credit_4b; + u32 fixed_max_credit; + u8 min_rate_msk = 0; + u32 sum_weight = 0; + int tc; + + /* By default max_credit is based upon MTU (in unit of 64b) */ + fixed_max_credit = nic_cfg->aq_hw_caps->mtu / 64; + /* in unit of 4b */ + fixed_max_credit_4b = nic_cfg->aq_hw_caps->mtu / 4; + + if (link_speed) { + min_rate_msk = nic_cfg->tc_min_rate_msk & + (BIT(nic_cfg->tcs) - 1); + num_min_rated_tcs = hweight8(min_rate_msk); + } + + /* First, calculate weights where min_rate is specified */ + if (num_min_rated_tcs) { + for (tc = 0; tc != nic_cfg->tcs; tc++) { + if (!nic_cfg->tc_min_rate[tc]) { + tc_weight[tc] = 0; + continue; + } + + tc_weight[tc] = (-1L + link_speed + + nic_cfg->tc_min_rate[tc] * + max_weight) / + link_speed; + tc_weight[tc] = min(tc_weight[tc], max_weight); + sum_weight += tc_weight[tc]; + } + } + + /* WSP, if min_rate is set for at least one TC. + * RR otherwise. + */ + hw_atl2_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U); + /* Data TC Arbiter takes precedence over Descriptor TC Arbiter, + * leave Descriptor TC Arbiter as RR. + */ + hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); + + hw_atl_tps_tx_desc_rate_mode_set(self, nic_cfg->is_qos ? 1U : 0U); + + for (tc = 0; tc != nic_cfg->tcs; tc++) { + const u32 en = (nic_cfg->tc_max_rate[tc] != 0) ? 1U : 0U; + const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0); + u32 weight, max_credit; + + hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, tc, + fixed_max_credit); + hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, tc, 0x1E); + + if (num_min_rated_tcs) { + weight = tc_weight[tc]; + + if (!weight && sum_weight < max_weight) + weight = (max_weight - sum_weight) / + (nic_cfg->tcs - num_min_rated_tcs); + else if (!weight) + weight = 0x640; + + max_credit = max(2 * weight, fixed_max_credit_4b); + } else { + weight = 0x640; + max_credit = 0xFFF0; + } + + hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(self, tc, weight); + hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(self, tc, + max_credit); + + hw_atl_tps_tx_desc_rate_en_set(self, desc, en); + + if (en) { + /* Nominal rate is always 10G */ + const u32 rate = 10000U * scale / + nic_cfg->tc_max_rate[tc]; + const u32 rate_int = rate >> + HW_ATL_TPS_DESC_RATE_Y_WIDTH; + const u32 rate_frac = rate & frac_msk; + + hw_atl_tps_tx_desc_rate_x_set(self, desc, rate_int); + hw_atl_tps_tx_desc_rate_y_set(self, desc, rate_frac); + } else { + /* A value of 1 indicates the queue is not + * rate controlled. + */ + hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U); + hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U); + } + } + for (tc = nic_cfg->tcs; tc != AQ_CFG_TCS_MAX; tc++) { + const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0); + + hw_atl_tps_tx_desc_rate_en_set(self, desc, 0U); + hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U); + hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U); + } return aq_hw_err_from_flags(self); } static int hw_atl2_hw_init_tx_path(struct aq_hw_s *self) { + struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg; + /* Tx TC/RSS number config */ - hw_atl_tpb_tps_tx_tc_mode_set(self, 1U); + hw_atl_tpb_tps_tx_tc_mode_set(self, nic_cfg->tc_mode); hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); @@ -201,13 +359,29 @@ static int hw_atl2_hw_init_tx_path(struct aq_hw_s *self) static void hw_atl2_hw_init_new_rx_filters(struct aq_hw_s *self) { struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; + u8 *prio_tc_map = self->aq_nic_cfg->prio_tc_map; + u16 action; u8 index; + int i; + /* Action Resolver Table (ART) is used by RPF to decide which action + * to take with a packet based upon input tag and tag mask, where: + * - input tag is a combination of 3-bit VLan Prio (PTP) and + * 29-bit concatenation of all tags from filter block; + * - tag mask is a mask used for matching against input tag. + * The input_tag is compared with the all the Requested_tags in the + * Record table to find a match. Action field of the selected matched + * REC entry is used for further processing. If multiple entries match, + * the lowest REC entry, Action field will be selected. + */ hw_atl2_rpf_act_rslvr_section_en_set(self, 0xFFFF); hw_atl2_rpfl2_uc_flr_tag_set(self, HW_ATL2_RPF_TAG_BASE_UC, HW_ATL2_MAC_UC); hw_atl2_rpfl2_bc_flr_tag_set(self, HW_ATL2_RPF_TAG_BASE_UC); + /* FW reserves the beginning of ART, thus all driver entries must + * start from the offset specified in FW caps. + */ index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_OFF_INDEX; hw_atl2_act_rslvr_table_set(self, index, 0, HW_ATL2_RPF_TAG_UC_MASK | @@ -220,33 +394,17 @@ static void hw_atl2_hw_init_new_rx_filters(struct aq_hw_s *self) HW_ATL2_RPF_TAG_UNTAG_MASK, HW_ATL2_ACTION_DROP); - index = priv->art_base_index + HW_ATL2_RPF_VLAN_INDEX; - hw_atl2_act_rslvr_table_set(self, index, HW_ATL2_RPF_TAG_BASE_VLAN, - HW_ATL2_RPF_TAG_VLAN_MASK, - HW_ATL2_ACTION_ASSIGN_TC(0)); - - index = priv->art_base_index + HW_ATL2_RPF_MAC_INDEX; - hw_atl2_act_rslvr_table_set(self, index, HW_ATL2_RPF_TAG_BASE_UC, - HW_ATL2_RPF_TAG_UC_MASK, - HW_ATL2_ACTION_ASSIGN_TC(0)); - - index = priv->art_base_index + HW_ATL2_RPF_ALLMC_INDEX; - hw_atl2_act_rslvr_table_set(self, index, HW_ATL2_RPF_TAG_BASE_ALLMC, - HW_ATL2_RPF_TAG_ALLMC_MASK, - HW_ATL2_ACTION_ASSIGN_TC(0)); + /* Configure ART to map given VLan Prio (PCP) to the TC index for + * RSS redirection table. + */ + for (i = 0; i < 8; i++) { + action = HW_ATL2_ACTION_ASSIGN_TC(prio_tc_map[i]); - index = priv->art_base_index + HW_ATL2_RPF_UNTAG_INDEX; - hw_atl2_act_rslvr_table_set(self, index, HW_ATL2_RPF_TAG_UNTAG_MASK, - HW_ATL2_RPF_TAG_UNTAG_MASK, - HW_ATL2_ACTION_ASSIGN_TC(0)); - - index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_ON_INDEX; - hw_atl2_act_rslvr_table_set(self, index, 0, HW_ATL2_RPF_TAG_VLAN_MASK, - HW_ATL2_ACTION_DISABLE); - - index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_ON_INDEX; - hw_atl2_act_rslvr_table_set(self, index, 0, HW_ATL2_RPF_TAG_UC_MASK, - HW_ATL2_ACTION_DISABLE); + index = priv->art_base_index + HW_ATL2_RPF_PCP_TO_TC_INDEX + i; + hw_atl2_act_rslvr_table_set(self, index, + i << HW_ATL2_RPF_TAG_PCP_OFFSET, + HW_ATL2_RPF_TAG_PCP_MASK, action); + } } static void hw_atl2_hw_new_rx_filter_vlan_promisc(struct aq_hw_s *self, @@ -309,7 +467,7 @@ static int hw_atl2_hw_init_rx_path(struct aq_hw_s *self) int i; /* Rx TC/RSS number config */ - hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U); + hw_atl_rpb_rpf_rx_traf_class_mode_set(self, cfg->tc_mode); /* Rx flow control */ hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U); @@ -317,9 +475,7 @@ static int hw_atl2_hw_init_rx_path(struct aq_hw_s *self) hw_atl2_rpf_rss_hash_type_set(self, HW_ATL2_RPF_RSS_HASH_TYPE_ALL); /* RSS Ring selection */ - hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ? - HW_ATL_RSS_ENABLED_3INDEX_BITS : - HW_ATL_RSS_DISABLED); + hw_atl_b0_hw_init_rx_rss_ctrl1(self); /* Multicast filters */ for (i = HW_ATL2_MAC_MAX; i--;) { @@ -678,6 +834,7 @@ const struct aq_hw_ops hw_atl2_ops = { .hw_interrupt_moderation_set = hw_atl2_hw_interrupt_moderation_set, .hw_rss_set = hw_atl2_hw_rss_set, .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set, + .hw_tc_rate_limit_set = hw_atl2_hw_init_tx_tc_rate_limit, .hw_get_hw_stats = hw_atl2_utils_get_hw_stats, .hw_get_fw_version = hw_atl2_utils_get_fw_version, .hw_set_offload = hw_atl_b0_hw_offload_set, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_internal.h index e66b3583bfe9..5a89bb8722f9 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_internal.h @@ -31,7 +31,7 @@ #define HW_ATL2_RSS_REDIRECTION_MAX 64U -#define HW_ATL2_TC_MAX 1U +#define HW_ATL2_TC_MAX 8U #define HW_ATL2_RSS_MAX 8U #define HW_ATL2_INTR_MODER_MAX 0x1FF @@ -82,13 +82,6 @@ enum HW_ATL2_RPF_ART_INDEX { HW_ATL2_RPF_VLAN_USER_INDEX = HW_ATL2_RPF_ET_PCP_USER_INDEX + 16, HW_ATL2_RPF_PCP_TO_TC_INDEX = HW_ATL2_RPF_VLAN_USER_INDEX + HW_ATL_VLAN_MAX_FILTERS, - HW_ATL2_RPF_VLAN_INDEX = HW_ATL2_RPF_PCP_TO_TC_INDEX + - AQ_CFG_TCS_MAX, - HW_ATL2_RPF_MAC_INDEX, - HW_ATL2_RPF_ALLMC_INDEX, - HW_ATL2_RPF_UNTAG_INDEX, - HW_ATL2_RPF_VLAN_PROMISC_ON_INDEX, - HW_ATL2_RPF_L2_PROMISC_ON_INDEX, }; #define HW_ATL2_ACTION(ACTION, RSS, INDEX, VALID) \ @@ -124,9 +117,6 @@ enum HW_ATL2_RPF_RSS_HASH_TYPE { HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_UDP, }; -#define HW_ATL_RSS_DISABLED 0x00000000U -#define HW_ATL_RSS_ENABLED_3INDEX_BITS 0xB3333333U - #define HW_ATL_MCAST_FLT_ANY_TO_HOST 0x00010FFFU struct hw_atl2_priv { diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.c index e779d70fde66..cd954b11d24a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.c @@ -7,6 +7,14 @@ #include "hw_atl2_llh_internal.h" #include "aq_hw_utils.h" +void hw_atl2_rpf_redirection_table2_select_set(struct aq_hw_s *aq_hw, + u32 select) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_ADR, + HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_MSK, + HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_SHIFT, select); +} + void hw_atl2_rpf_rss_hash_type_set(struct aq_hw_s *aq_hw, u32 rss_hash_type) { aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_ADR, @@ -60,6 +68,15 @@ void hw_atl2_rpf_vlan_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter) /* TX */ +void hw_atl2_tpb_tx_tc_q_rand_map_en_set(struct aq_hw_s *aq_hw, + const u32 tc_q_rand_map_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_ADR, + HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_MSK, + HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_SHIFT, + tc_q_rand_map_en); +} + void hw_atl2_tpb_tx_buf_clk_gate_en_set(struct aq_hw_s *aq_hw, u32 clk_gate_en) { aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_ADR, @@ -76,9 +93,18 @@ void hw_atl2_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, tx_intr_moderation_ctl); } +void hw_atl2_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw, + const u32 data_arb_mode) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TC_ARB_MODE_ADR, + HW_ATL2_TPS_DATA_TC_ARB_MODE_MSK, + HW_ATL2_TPS_DATA_TC_ARB_MODE_SHIFT, + data_arb_mode); +} + void hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw, - u32 max_credit, - u32 tc) + const u32 tc, + const u32 max_credit) { aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TCTCREDIT_MAX_ADR(tc), HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSK, @@ -87,13 +113,13 @@ void hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw, } void hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw, - u32 tx_pkt_shed_tc_data_weight, - u32 tc) + const u32 tc, + const u32 weight) { aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TCTWEIGHT_ADR(tc), HW_ATL2_TPS_DATA_TCTWEIGHT_MSK, HW_ATL2_TPS_DATA_TCTWEIGHT_SHIFT, - tx_pkt_shed_tc_data_weight); + weight); } u32 hw_atl2_get_hw_version(struct aq_hw_s *aq_hw) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.h index 8c6d78a64d42..98c7a4621297 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.h @@ -15,6 +15,10 @@ void hw_atl2_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, u32 tx_intr_moderation_ctl, u32 queue); +/* Set Redirection Table 2 Select */ +void hw_atl2_rpf_redirection_table2_select_set(struct aq_hw_s *aq_hw, + u32 select); + /** Set RSS HASH type */ void hw_atl2_rpf_rss_hash_type_set(struct aq_hw_s *aq_hw, u32 rss_hash_type); @@ -34,18 +38,25 @@ void hw_atl2_new_rpf_rss_redir_set(struct aq_hw_s *aq_hw, u32 tc, u32 index, /* Set VLAN filter tag */ void hw_atl2_rpf_vlan_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter); +/* set tx random TC-queue mapping enable bit */ +void hw_atl2_tpb_tx_tc_q_rand_map_en_set(struct aq_hw_s *aq_hw, + const u32 tc_q_rand_map_en); + /* set tx buffer clock gate enable */ void hw_atl2_tpb_tx_buf_clk_gate_en_set(struct aq_hw_s *aq_hw, u32 clk_gate_en); +void hw_atl2_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw, + const u32 data_arb_mode); + /* set tx packet scheduler tc data max credit */ void hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw, - u32 max_credit, - u32 tc); + const u32 tc, + const u32 max_credit); /* set tx packet scheduler tc data weight */ void hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw, - u32 tx_pkt_shed_tc_data_weight, - u32 tc); + const u32 tc, + const u32 weight); u32 hw_atl2_get_hw_version(struct aq_hw_s *aq_hw); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh_internal.h index cde9e9d2836d..e34c5cda061e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh_internal.h @@ -6,6 +6,16 @@ #ifndef HW_ATL2_LLH_INTERNAL_H #define HW_ATL2_LLH_INTERNAL_H +/* RX pif_rpf_redir_2_en_i Bitfield Definitions + * PORT="pif_rpf_redir_2_en_i" + */ +#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_ADR 0x000054C8 +#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_MSK 0x00001000 +#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_MSKN 0xFFFFEFFF +#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_SHIFT 12 +#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_WIDTH 1 +#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_DEFAULT 0x0 + /* RX pif_rpf_rss_hash_type_i Bitfield Definitions */ #define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_ADR 0x000054C8 @@ -122,6 +132,24 @@ /* Default value of bitfield rx_q{Q}_tc_map[2:0] */ #define HW_ATL2_RX_Q_TC_MAP_DEFAULT 0x0 +/* tx tx_tc_q_rand_map_en bitfield definitions + * preprocessor definitions for the bitfield "tx_tc_q_rand_map_en". + * port="pif_tpb_tx_tc_q_rand_map_en_i" + */ + +/* register address for bitfield tx_tc_q_rand_map_en */ +#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_ADR 0x00007900 +/* bitmask for bitfield tx_tc_q_rand_map_en */ +#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_MSK 0x00000200 +/* inverted bitmask for bitfield tx_tc_q_rand_map_en */ +#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_MSKN 0xFFFFFDFF +/* lower bit position of bitfield tx_tc_q_rand_map_en */ +#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_SHIFT 9 +/* width of bitfield tx_tc_q_rand_map_en */ +#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_WIDTH 1 +/* default value of bitfield tx_tc_q_rand_map_en */ +#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_DEFAULT 0x0 + /* tx tx_buffer_clk_gate_en bitfield definitions * preprocessor definitions for the bitfield "tx_buffer_clk_gate_en". * port="pif_tpb_tx_buffer_clk_gate_en_i" @@ -140,42 +168,77 @@ /* default value of bitfield tx_buffer_clk_gate_en */ #define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_DEFAULT 0x0 -/* tx data_tc{t}_credit_max[b:0] bitfield definitions - * preprocessor definitions for the bitfield "data_tc{t}_credit_max[b:0]". +/* tx tx_q_tc_map{q} bitfield definitions + * preprocessor definitions for the bitfield "tx_q_tc_map{q}". + * parameter: queue {q} | bit-level stride | range [0, 31] + * port="pif_tpb_tx_q_tc_map0_i[2:0]" + */ + +/* register address for bitfield tx_q_tc_map{q} */ +#define HW_ATL2_TX_Q_TC_MAP_ADR(queue) \ + (((queue) < 32) ? 0x0000799C + ((queue) / 4) * 4 : 0) +/* lower bit position of bitfield tx_q_tc_map{q} */ +#define HW_ATL2_TX_Q_TC_MAP_SHIFT(queue) \ + (((queue) < 32) ? ((queue) * 8) % 32 : 0) +/* width of bitfield tx_q_tc_map{q} */ +#define HW_ATL2_TX_Q_TC_MAP_WIDTH 3 +/* default value of bitfield tx_q_tc_map{q} */ +#define HW_ATL2_TX_Q_TC_MAP_DEFAULT 0x0 + +/* tx data_tc_arb_mode bitfield definitions + * preprocessor definitions for the bitfield "data_tc_arb_mode". + * port="pif_tps_data_tc_arb_mode_i" + */ + +/* register address for bitfield data_tc_arb_mode */ +#define HW_ATL2_TPS_DATA_TC_ARB_MODE_ADR 0x00007100 +/* bitmask for bitfield data_tc_arb_mode */ +#define HW_ATL2_TPS_DATA_TC_ARB_MODE_MSK 0x00000003 +/* inverted bitmask for bitfield data_tc_arb_mode */ +#define HW_ATL2_TPS_DATA_TC_ARB_MODE_MSKN 0xfffffffc +/* lower bit position of bitfield data_tc_arb_mode */ +#define HW_ATL2_TPS_DATA_TC_ARB_MODE_SHIFT 0 +/* width of bitfield data_tc_arb_mode */ +#define HW_ATL2_TPS_DATA_TC_ARB_MODE_WIDTH 2 +/* default value of bitfield data_tc_arb_mode */ +#define HW_ATL2_TPS_DATA_TC_ARB_MODE_DEFAULT 0x0 + +/* tx data_tc{t}_credit_max[f:0] bitfield definitions + * preprocessor definitions for the bitfield "data_tc{t}_credit_max[f:0]". * parameter: tc {t} | stride size 0x4 | range [0, 7] - * port="pif_tps_data_tc0_credit_max_i[11:0]" + * port="pif_tps_data_tc0_credit_max_i[15:0]" */ -/* register address for bitfield data_tc{t}_credit_max[b:0] */ +/* register address for bitfield data_tc{t}_credit_max[f:0] */ #define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_ADR(tc) (0x00007110 + (tc) * 0x4) -/* bitmask for bitfield data_tc{t}_credit_max[b:0] */ -#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSK 0x0fff0000 -/* inverted bitmask for bitfield data_tc{t}_credit_max[b:0] */ -#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSKN 0xf000ffff -/* lower bit position of bitfield data_tc{t}_credit_max[b:0] */ +/* bitmask for bitfield data_tc{t}_credit_max[f:0] */ +#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSK 0xffff0000 +/* inverted bitmask for bitfield data_tc{t}_credit_max[f:0] */ +#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSKN 0x0000ffff +/* lower bit position of bitfield data_tc{t}_credit_max[f:0] */ #define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_SHIFT 16 -/* width of bitfield data_tc{t}_credit_max[b:0] */ -#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_WIDTH 12 -/* default value of bitfield data_tc{t}_credit_max[b:0] */ +/* width of bitfield data_tc{t}_credit_max[f:0] */ +#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_WIDTH 16 +/* default value of bitfield data_tc{t}_credit_max[f:0] */ #define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_DEFAULT 0x0 -/* tx data_tc{t}_weight[8:0] bitfield definitions - * preprocessor definitions for the bitfield "data_tc{t}_weight[8:0]". +/* tx data_tc{t}_weight[e:0] bitfield definitions + * preprocessor definitions for the bitfield "data_tc{t}_weight[e:0]". * parameter: tc {t} | stride size 0x4 | range [0, 7] - * port="pif_tps_data_tc0_weight_i[8:0]" + * port="pif_tps_data_tc0_weight_i[14:0]" */ -/* register address for bitfield data_tc{t}_weight[8:0] */ +/* register address for bitfield data_tc{t}_weight[e:0] */ #define HW_ATL2_TPS_DATA_TCTWEIGHT_ADR(tc) (0x00007110 + (tc) * 0x4) -/* bitmask for bitfield data_tc{t}_weight[8:0] */ -#define HW_ATL2_TPS_DATA_TCTWEIGHT_MSK 0x000001ff -/* inverted bitmask for bitfield data_tc{t}_weight[8:0] */ -#define HW_ATL2_TPS_DATA_TCTWEIGHT_MSKN 0xfffffe00 -/* lower bit position of bitfield data_tc{t}_weight[8:0] */ +/* bitmask for bitfield data_tc{t}_weight[e:0] */ +#define HW_ATL2_TPS_DATA_TCTWEIGHT_MSK 0x00007fff +/* inverted bitmask for bitfield data_tc{t}_weight[e:0] */ +#define HW_ATL2_TPS_DATA_TCTWEIGHT_MSKN 0xffff8000 +/* lower bit position of bitfield data_tc{t}_weight[e:0] */ #define HW_ATL2_TPS_DATA_TCTWEIGHT_SHIFT 0 -/* width of bitfield data_tc{t}_weight[8:0] */ -#define HW_ATL2_TPS_DATA_TCTWEIGHT_WIDTH 9 -/* default value of bitfield data_tc{t}_weight[8:0] */ +/* width of bitfield data_tc{t}_weight[e:0] */ +#define HW_ATL2_TPS_DATA_TCTWEIGHT_WIDTH 15 +/* default value of bitfield data_tc{t}_weight[e:0] */ #define HW_ATL2_TPS_DATA_TCTWEIGHT_DEFAULT 0x0 /* tx interrupt moderation control register definitions diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 517caedc0a87..1426c691c7c4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -3085,6 +3085,7 @@ static int bnx2x_bsc_read(struct link_params *params, u8 xfer_cnt, u32 *data_array) { + u64 t0, delta; u32 val, i; int rc = 0; @@ -3114,17 +3115,18 @@ static int bnx2x_bsc_read(struct link_params *params, REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); /* Poll for completion */ - i = 0; + t0 = ktime_get_ns(); val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { - udelay(10); - val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); - if (i++ > 1000) { - DP(NETIF_MSG_LINK, "wr 0 byte timed out after %d try\n", - i); + delta = ktime_get_ns() - t0; + if (delta > 10 * NSEC_PER_MSEC) { + DP(NETIF_MSG_LINK, "wr 0 byte timed out after %Lu ns\n", + delta); rc = -EFAULT; break; } + usleep_range(10, 20); + val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); } if (rc == -EFAULT) return rc; @@ -3138,16 +3140,18 @@ static int bnx2x_bsc_read(struct link_params *params, REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); /* Poll for completion */ - i = 0; + t0 = ktime_get_ns(); val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { - udelay(10); - val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); - if (i++ > 1000) { - DP(NETIF_MSG_LINK, "rd op timed out after %d try\n", i); + delta = ktime_get_ns() - t0; + if (delta > 10 * NSEC_PER_MSEC) { + DP(NETIF_MSG_LINK, "rd op timed out after %Lu ns\n", + delta); rc = -EFAULT; break; } + usleep_range(10, 20); + val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); } if (rc == -EFAULT) return rc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index f86b6217f829..c62589c266b2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4176,14 +4176,12 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, int i, intr_process, rc, tmo_count; struct input *req = msg; u32 *data = msg; - __le32 *resp_len; u8 *valid; u16 cp_ring_id, len = 0; struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; struct hwrm_short_input short_input = {0}; u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; - u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr; u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; u16 dst = BNXT_HWRM_CHNL_CHIMP; @@ -4201,7 +4199,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, bar_offset = BNXT_GRCPF_REG_KONG_COMM; doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; resp = bp->hwrm_cmd_kong_resp_addr; - resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr; } memset(resp, 0, PAGE_SIZE); @@ -4270,7 +4267,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); - resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET); if (intr_process) { u16 seq_id = bp->hwrm_intr_seq_id; @@ -4298,9 +4294,8 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, le16_to_cpu(req->req_type)); return -EBUSY; } - len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> - HWRM_RESP_LEN_SFT; - valid = resp_addr + len - 1; + len = le16_to_cpu(resp->resp_len); + valid = ((u8 *)resp) + len - 1; } else { int j; @@ -4311,8 +4306,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, */ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) return -EBUSY; - len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> - HWRM_RESP_LEN_SFT; + len = le16_to_cpu(resp->resp_len); if (len) break; /* on first few passes, just barely sleep */ @@ -4334,7 +4328,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, } /* Last byte of resp contains valid bit */ - valid = resp_addr + len - 1; + valid = ((u8 *)resp) + len - 1; for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { /* make sure we read from updated DMA memory */ dma_rmb(); @@ -9333,7 +9327,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bnxt_free_skbs(bp); /* Save ring stats before shutdown */ - if (bp->bnapi) + if (bp->bnapi && irq_re_init) bnxt_get_ring_stats(bp, &bp->net_stats_prev); if (irq_re_init) { bnxt_free_irq(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index c04ac4a36005..5c562e0aac67 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -659,11 +659,6 @@ struct nqe_cn { #define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) #define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) #define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12) -#define HWRM_RESP_ERR_CODE_MASK 0xffff -#define HWRM_RESP_LEN_OFFSET 4 -#define HWRM_RESP_LEN_MASK 0xffff0000 -#define HWRM_RESP_LEN_SFT 16 -#define HWRM_RESP_VALID_MASK 0xff000000 #define BNXT_HWRM_REQ_MAX_SIZE 128 #define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ BNXT_HWRM_REQ_MAX_SIZE) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index dd0c3f227009..6b88143af5ea 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -2119,11 +2119,12 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename, bnxt_hwrm_fw_set_time(bp); - if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, - BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, - &index, &item_len, NULL) != 0) { + rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, + BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, + &index, &item_len, NULL); + if (rc) { netdev_err(dev, "PKG update area not created in nvram\n"); - return -ENOBUFS; + return rc; } rc = request_firmware(&fw, filename, &dev->dev); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index fc1405a8ed74..5a41801acb6a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -60,6 +60,7 @@ #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) extern struct list_head adapter_list; +extern struct list_head uld_list; extern struct mutex uld_mutex; /* Suspend an Ethernet Tx queue with fewer available descriptors than this. @@ -822,6 +823,13 @@ struct sge_uld_txq_info { u16 ntxq; /* # of egress uld queues */ }; +/* struct to maintain ULD list to reallocate ULD resources on hotplug */ +struct cxgb4_uld_list { + struct cxgb4_uld_info uld_info; + struct list_head list_node; + enum cxgb4_uld uld_type; +}; + enum sge_eosw_state { CXGB4_EO_STATE_CLOSED = 0, /* Not ready to accept traffic */ CXGB4_EO_STATE_FLOWC_OPEN_SEND, /* Send FLOWC open request */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 7818c392da50..c3dd50b45c48 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -1813,12 +1813,8 @@ static int mps_tcam_show(struct seq_file *seq, void *v) /* Inner header lookup */ if (lookup_type && (lookup_type != DATALKPTYPE_M)) { seq_printf(seq, - "%3u %02x:%02x:%02x:%02x:%02x:%02x " - "%012llx %06x %06x - - %3c" - " 'I' %4x " - "%3c %#x%4u%4d", idx, addr[0], - addr[1], addr[2], addr[3], - addr[4], addr[5], + "%3u %pM %012llx %06x %06x - - %3c 'I' %4x %3c %#x%4u%4d", + idx, addr, (unsigned long long)mask, vniy, (vnix | vniy), dip_hit ? 'Y' : 'N', @@ -1830,10 +1826,8 @@ static int mps_tcam_show(struct seq_file *seq, void *v) T6_VF_G(cls_lo) : -1); } else { seq_printf(seq, - "%3u %02x:%02x:%02x:%02x:%02x:%02x " - "%012llx - - ", - idx, addr[0], addr[1], addr[2], - addr[3], addr[4], addr[5], + "%3u %pM %012llx - - ", + idx, addr, (unsigned long long)mask); if (vlan_vld) @@ -1851,10 +1845,8 @@ static int mps_tcam_show(struct seq_file *seq, void *v) T6_VF_G(cls_lo) : -1); } } else - seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x " - "%012llx%3c %#x%4u%4d", - idx, addr[0], addr[1], addr[2], addr[3], - addr[4], addr[5], (unsigned long long)mask, + seq_printf(seq, "%3u %pM %012llx%3c %#x%4u%4d", + idx, addr, (unsigned long long)mask, (cls_lo & SRAM_VLD_F) ? 'Y' : 'N', PORTMAP_G(cls_hi), PF_G(cls_lo), diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index d05c2371d8c7..7a0414f379be 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -180,6 +180,7 @@ static struct dentry *cxgb4_debugfs_root; LIST_HEAD(adapter_list); DEFINE_MUTEX(uld_mutex); +LIST_HEAD(uld_list); static int cfg_queues(struct adapter *adap); @@ -6519,11 +6520,8 @@ fw_attach_fail: /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ pdev->needs_freset = 1; - if (is_uld(adapter)) { - mutex_lock(&uld_mutex); - list_add_tail(&adapter->list_node, &adapter_list); - mutex_unlock(&uld_mutex); - } + if (is_uld(adapter)) + cxgb4_uld_enable(adapter); if (!is_t4(adapter->params.chip)) cxgb4_ptp_init(adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index e65b52375dd8..6b1d3df4b9ba 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -681,6 +681,74 @@ static void cxgb4_set_ktls_feature(struct adapter *adap, bool enable) } #endif +static void cxgb4_uld_alloc_resources(struct adapter *adap, + enum cxgb4_uld type, + const struct cxgb4_uld_info *p) +{ + int ret = 0; + + if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || + (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) + return; + if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) + return; + ret = cfg_queues_uld(adap, type, p); + if (ret) + goto out; + ret = setup_sge_queues_uld(adap, type, p->lro); + if (ret) + goto free_queues; + if (adap->flags & CXGB4_USING_MSIX) { + ret = request_msix_queue_irqs_uld(adap, type); + if (ret) + goto free_rxq; + } + if (adap->flags & CXGB4_FULL_INIT_DONE) + enable_rx_uld(adap, type); +#ifdef CONFIG_CHELSIO_TLS_DEVICE + /* send mbox to enable ktls related settings. */ + if (type == CXGB4_ULD_CRYPTO && + (adap->params.crypto & FW_CAPS_CONFIG_TX_TLS_HW)) + cxgb4_set_ktls_feature(adap, 1); +#endif + if (adap->uld[type].add) + goto free_irq; + ret = setup_sge_txq_uld(adap, type, p); + if (ret) + goto free_irq; + adap->uld[type] = *p; + ret = uld_attach(adap, type); + if (ret) + goto free_txq; + return; +free_txq: + release_sge_txq_uld(adap, type); +free_irq: + if (adap->flags & CXGB4_FULL_INIT_DONE) + quiesce_rx_uld(adap, type); + if (adap->flags & CXGB4_USING_MSIX) + free_msix_queue_irqs_uld(adap, type); +free_rxq: + free_sge_queues_uld(adap, type); +free_queues: + free_queues_uld(adap, type); +out: + dev_warn(adap->pdev_dev, + "ULD registration failed for uld type %d\n", type); +} + +void cxgb4_uld_enable(struct adapter *adap) +{ + struct cxgb4_uld_list *uld_entry; + + mutex_lock(&uld_mutex); + list_add_tail(&adap->list_node, &adapter_list); + list_for_each_entry(uld_entry, &uld_list, list_node) + cxgb4_uld_alloc_resources(adap, uld_entry->uld_type, + &uld_entry->uld_info); + mutex_unlock(&uld_mutex); +} + /* cxgb4_register_uld - register an upper-layer driver * @type: the ULD type * @p: the ULD methods @@ -691,63 +759,23 @@ static void cxgb4_set_ktls_feature(struct adapter *adap, bool enable) void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p) { + struct cxgb4_uld_list *uld_entry; struct adapter *adap; - int ret = 0; if (type >= CXGB4_ULD_MAX) return; + uld_entry = kzalloc(sizeof(*uld_entry), GFP_KERNEL); + if (!uld_entry) + return; + + memcpy(&uld_entry->uld_info, p, sizeof(struct cxgb4_uld_info)); mutex_lock(&uld_mutex); - list_for_each_entry(adap, &adapter_list, list_node) { - if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || - (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) - continue; - if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) - continue; - ret = cfg_queues_uld(adap, type, p); - if (ret) - goto out; - ret = setup_sge_queues_uld(adap, type, p->lro); - if (ret) - goto free_queues; - if (adap->flags & CXGB4_USING_MSIX) { - ret = request_msix_queue_irqs_uld(adap, type); - if (ret) - goto free_rxq; - } - if (adap->flags & CXGB4_FULL_INIT_DONE) - enable_rx_uld(adap, type); -#ifdef CONFIG_CHELSIO_TLS_DEVICE - /* send mbox to enable ktls related settings. */ - if (type == CXGB4_ULD_CRYPTO && - (adap->params.crypto & FW_CAPS_CONFIG_TX_TLS_HW)) - cxgb4_set_ktls_feature(adap, 1); -#endif - if (adap->uld[type].add) - goto free_irq; - ret = setup_sge_txq_uld(adap, type, p); - if (ret) - goto free_irq; - adap->uld[type] = *p; - ret = uld_attach(adap, type); - if (ret) - goto free_txq; - continue; -free_txq: - release_sge_txq_uld(adap, type); -free_irq: - if (adap->flags & CXGB4_FULL_INIT_DONE) - quiesce_rx_uld(adap, type); - if (adap->flags & CXGB4_USING_MSIX) - free_msix_queue_irqs_uld(adap, type); -free_rxq: - free_sge_queues_uld(adap, type); -free_queues: - free_queues_uld(adap, type); -out: - dev_warn(adap->pdev_dev, - "ULD registration failed for uld type %d\n", type); - } + list_for_each_entry(adap, &adapter_list, list_node) + cxgb4_uld_alloc_resources(adap, type, p); + + uld_entry->uld_type = type; + list_add_tail(&uld_entry->list_node, &uld_list); mutex_unlock(&uld_mutex); return; } @@ -761,6 +789,7 @@ EXPORT_SYMBOL(cxgb4_register_uld); */ int cxgb4_unregister_uld(enum cxgb4_uld type) { + struct cxgb4_uld_list *uld_entry, *tmp; struct adapter *adap; if (type >= CXGB4_ULD_MAX) @@ -783,6 +812,13 @@ int cxgb4_unregister_uld(enum cxgb4_uld type) cxgb4_set_ktls_feature(adap, 0); #endif } + + list_for_each_entry_safe(uld_entry, tmp, &uld_list, list_node) { + if (uld_entry->uld_type == type) { + list_del(&uld_entry->list_node); + kfree(uld_entry); + } + } mutex_unlock(&uld_mutex); return 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 16796785eea3..085fa1424f9a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -327,6 +327,7 @@ enum cxgb4_control { CXGB4_CONTROL_DB_DROP, }; +struct adapter; struct pci_dev; struct l2t_data; struct net_device; @@ -465,6 +466,7 @@ struct cxgb4_uld_info { int (*tx_handler)(struct sk_buff *skb, struct net_device *dev); }; +void cxgb4_uld_enable(struct adapter *adap); void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); int cxgb4_unregister_uld(enum cxgb4_uld type); int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index c4416a5f8816..2972244e6eb0 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2914,7 +2914,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) } /* Do this here, so we can be verbose early */ - SET_NETDEV_DEV(net_dev, dev); + SET_NETDEV_DEV(net_dev, dev->parent); dev_set_drvdata(dev, net_dev); priv = netdev_priv(net_dev); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 2e209142f2d1..4acb91dce5fc 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -88,8 +88,6 @@ static void fec_enet_itr_coal_init(struct net_device *ndev); struct fec_devinfo { u32 quirks; - u8 stop_gpr_reg; - u8 stop_gpr_bit; }; static const struct fec_devinfo fec_imx25_info = { @@ -112,8 +110,6 @@ static const struct fec_devinfo fec_imx6q_info = { FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | FEC_QUIRK_HAS_RACC, - .stop_gpr_reg = 0x34, - .stop_gpr_bit = 27, }; static const struct fec_devinfo fec_mvf600_info = { @@ -3476,19 +3472,23 @@ static int fec_enet_get_irq_cnt(struct platform_device *pdev) } static int fec_enet_init_stop_mode(struct fec_enet_private *fep, - struct fec_devinfo *dev_info, struct device_node *np) { struct device_node *gpr_np; + u32 out_val[3]; int ret = 0; - if (!dev_info) - return 0; - - gpr_np = of_parse_phandle(np, "gpr", 0); + gpr_np = of_parse_phandle(np, "fsl,stop-mode", 0); if (!gpr_np) return 0; + ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val, + ARRAY_SIZE(out_val)); + if (ret) { + dev_dbg(&fep->pdev->dev, "no stop mode property\n"); + return ret; + } + fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np); if (IS_ERR(fep->stop_gpr.gpr)) { dev_err(&fep->pdev->dev, "could not find gpr regmap\n"); @@ -3497,8 +3497,8 @@ static int fec_enet_init_stop_mode(struct fec_enet_private *fep, goto out; } - fep->stop_gpr.reg = dev_info->stop_gpr_reg; - fep->stop_gpr.bit = dev_info->stop_gpr_bit; + fep->stop_gpr.reg = out_val[1]; + fep->stop_gpr.bit = out_val[2]; out: of_node_put(gpr_np); @@ -3575,7 +3575,7 @@ fec_probe(struct platform_device *pdev) if (of_get_property(np, "fsl,magic-packet", NULL)) fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; - ret = fec_enet_init_stop_mode(fep, dev_info, np); + ret = fec_enet_init_stop_mode(fep, np); if (ret) goto failed_stop_mode; diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 6e5f6dd169b5..552e7554a9f8 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -42,6 +42,7 @@ #include <soc/fsl/qe/ucc.h> #include <soc/fsl/qe/ucc_fast.h> #include <asm/machdep.h> +#include <net/sch_generic.h> #include "ucc_geth.h" @@ -1548,11 +1549,8 @@ static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode) static void ugeth_quiesce(struct ucc_geth_private *ugeth) { - /* Prevent any further xmits, plus detach the device. */ - netif_device_detach(ugeth->ndev); - - /* Wait for any current xmits to finish. */ - netif_tx_disable(ugeth->ndev); + /* Prevent any further xmits */ + netif_tx_stop_all_queues(ugeth->ndev); /* Disable the interrupt to avoid NAPI rescheduling. */ disable_irq(ugeth->ug_info->uf_info.irq); @@ -1565,7 +1563,10 @@ static void ugeth_activate(struct ucc_geth_private *ugeth) { napi_enable(&ugeth->napi); enable_irq(ugeth->ug_info->uf_info.irq); - netif_device_attach(ugeth->ndev); + + /* allow to xmit again */ + netif_tx_wake_all_queues(ugeth->ndev); + __netdev_watchdog_up(ugeth->ndev); } /* Called every time the controller might need to be made diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 7506cabaa16e..d041cac9a487 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -145,7 +145,6 @@ enum hnae3_reset_notify_type { HNAE3_DOWN_CLIENT, HNAE3_INIT_CLIENT, HNAE3_UNINIT_CLIENT, - HNAE3_RESTORE_CLIENT, }; enum hnae3_hw_error_type { @@ -622,16 +621,6 @@ struct hnae3_roce_private_info { unsigned long state; }; -struct hnae3_unic_private_info { - struct net_device *netdev; - u16 rx_buf_len; - u16 num_tx_desc; - u16 num_rx_desc; - - u16 num_tqps; /* total number of tqps in this handle */ - struct hnae3_queue **tqp; /* array base of all TQPs of this instance */ -}; - #define HNAE3_SUPPORT_APP_LOOPBACK BIT(0) #define HNAE3_SUPPORT_PHY_LOOPBACK BIT(1) #define HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK BIT(2) @@ -657,7 +646,6 @@ struct hnae3_handle { union { struct net_device *netdev; /* first member */ struct hnae3_knic_private_info kinfo; - struct hnae3_unic_private_info uinfo; struct hnae3_roce_private_info rinfo; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 9fe40c7773b4..b14f2abc2425 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1544,12 +1544,6 @@ static int hns3_nic_set_features(struct net_device *netdev, return ret; } - if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) && - h->ae_algo->ops->enable_vlan_filter) { - enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER); - h->ae_algo->ops->enable_vlan_filter(h, enable); - } - if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && h->ae_algo->ops->enable_hw_strip_rxvtag) { enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 60f82ad89957..66cd4395f781 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -469,21 +469,8 @@ struct hns3_enet_tqp_vector { unsigned long last_jiffies; } ____cacheline_internodealigned_in_smp; -enum hns3_udp_tnl_type { - HNS3_UDP_TNL_VXLAN, - HNS3_UDP_TNL_GENEVE, - HNS3_UDP_TNL_MAX, -}; - -struct hns3_udp_tunnel { - u16 dst_port; - int used; -}; - struct hns3_nic_priv { struct hnae3_handle *ae_handle; - u32 enet_ver; - u32 port_id; struct net_device *netdev; struct device *dev; @@ -495,19 +482,10 @@ struct hns3_nic_priv { struct hns3_enet_tqp_vector *tqp_vector; u16 vector_num; - /* The most recently read link state */ - int link; u64 tx_timeout_count; unsigned long state; - struct timer_list service_timer; - - struct work_struct service_task; - - struct notifier_block notifier_block; - /* Vxlan/Geneve information */ - struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX]; struct hns3_enet_coalesce tx_coal; struct hns3_enet_coalesce rx_coal; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 7f509eff562e..1d6c328bd9fb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -11,8 +11,6 @@ #include "hnae3.h" #include "hclge_main.h" -#define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ) - #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev) static int hclge_ring_space(struct hclge_cmq_ring *ring) @@ -426,6 +424,9 @@ int hclge_cmd_init(struct hclge_dev *hdev) * reset may happen when lower level reset is being processed. */ if ((hclge_is_reset_pending(hdev))) { + dev_err(&hdev->pdev->dev, + "failed to init cmd since reset %#lx pending\n", + hdev->reset_pending); ret = -EBUSY; goto err_cmd_init; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index e3bab8f3847f..463f29151ef0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -884,8 +884,8 @@ struct hclge_cfg_tso_status_cmd { #define HCLGE_GRO_EN_B 0 struct hclge_cfg_gro_status_cmd { - __le16 gro_en; - u8 rsv[22]; + u8 gro_en; + u8 rsv[23]; }; #define HCLGE_TSO_MSS_MIN 256 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index b796d3fb5b0b..96bfad52630d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1387,7 +1387,8 @@ static int hclge_configure(struct hclge_dev *hdev) ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); if (ret) { - dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret); + dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n", + cfg.default_speed, ret); return ret; } @@ -1429,26 +1430,17 @@ static int hclge_configure(struct hclge_dev *hdev) return ret; } -static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min, - unsigned int tso_mss_max) +static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min, + u16 tso_mss_max) { struct hclge_cfg_tso_status_cmd *req; struct hclge_desc desc; - u16 tso_mss; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); req = (struct hclge_cfg_tso_status_cmd *)desc.data; - - tso_mss = 0; - hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, - HCLGE_TSO_MSS_MIN_S, tso_mss_min); - req->tso_mss_min = cpu_to_le16(tso_mss); - - tso_mss = 0; - hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, - HCLGE_TSO_MSS_MIN_S, tso_mss_max); - req->tso_mss_max = cpu_to_le16(tso_mss); + req->tso_mss_min = cpu_to_le16(tso_mss_min); + req->tso_mss_max = cpu_to_le16(tso_mss_max); return hclge_cmd_send(&hdev->hw, &desc, 1); } @@ -1465,7 +1457,7 @@ static int hclge_config_gro(struct hclge_dev *hdev, bool en) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); req = (struct hclge_cfg_gro_status_cmd *)desc.data; - req->gro_en = cpu_to_le16(en ? 1 : 0); + req->gro_en = en ? 1 : 0; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) @@ -3770,11 +3762,6 @@ static int hclge_reset_rebuild(struct hclge_dev *hdev) hclge_clear_reset_cause(hdev); - ret = hclge_reset_prepare_up(hdev); - if (ret) - return ret; - - ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT); /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1 * times @@ -3783,6 +3770,10 @@ static int hclge_reset_rebuild(struct hclge_dev *hdev) hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1) return ret; + ret = hclge_reset_prepare_up(hdev); + if (ret) + return ret; + rtnl_lock(); ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); rtnl_unlock(); @@ -6584,8 +6575,6 @@ static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) /* 2 Then setup the loopback flag */ loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); - hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0); - hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0); req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); @@ -9931,10 +9920,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) int ret; hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); - if (!hdev) { - ret = -ENOMEM; - goto out; - } + if (!hdev) + return -ENOMEM; hdev->pdev = pdev; hdev->ae_dev = ae_dev; @@ -10113,6 +10100,7 @@ err_pci_uninit: pci_release_regions(pdev); pci_disable_device(pdev); out: + mutex_destroy(&hdev->vport_lock); return ret; } @@ -10736,16 +10724,19 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc) { - /*prepare 4 commands to query DFX BD number*/ - hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true); - desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); - hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true); - desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); - hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true); - desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); - hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true); + int i; + + /* initialize command BD except the last one */ + for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) { + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, + true); + desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + } + + /* initialize the last command BD */ + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true); - return hclge_cmd_send(&hdev->hw, desc, 4); + return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT); } static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 913c4f677404..46e6e0fef3ba 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -771,12 +771,6 @@ struct hclge_dev { u16 num_roce_msi; /* Num of roce vectors for this PF */ int roce_base_vector; - u16 pending_udp_bitmap; - - u16 rx_itr_default; - u16 tx_itr_default; - - u16 adminq_work_limit; /* Num of admin receive queue desc to process */ unsigned long service_timer_period; unsigned long service_timer_previous; struct timer_list reset_timer; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 696c5ae922e3..e89820702540 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -155,7 +155,7 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev) ret = mdiobus_register(mdio_bus); if (ret) { dev_err(mdio_bus->parent, - "Failed to register MDIO bus ret = %#x\n", ret); + "failed to register MDIO bus, ret = %d\n", ret); return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index f38d236ebf4f..fec65239a3c8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -11,9 +11,6 @@ #include "hclgevf_main.h" #include "hnae3.h" -#define hclgevf_is_csq(ring) ((ring)->flag & HCLGEVF_TYPE_CSQ) -#define hclgevf_ring_to_dma_dir(ring) (hclgevf_is_csq(ring) ? \ - DMA_TO_DEVICE : DMA_FROM_DEVICE) #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev) static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h index f830eef02e5c..40d6e602ab51 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -161,8 +161,8 @@ struct hclgevf_query_res_cmd { #define HCLGEVF_GRO_EN_B 0 struct hclgevf_cfg_gro_status_cmd { - __le16 gro_en; - u8 rsv[22]; + u8 gro_en; + u8 rsv[23]; }; #define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 32341dcaa6c1..1b9578d0bd80 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -46,7 +46,7 @@ static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, HCLGEVF_CMDQ_RX_TAIL_REG, HCLGEVF_CMDQ_RX_HEAD_REG, HCLGEVF_VECTOR0_CMDQ_SRC_REG, - HCLGEVF_CMDQ_INTR_STS_REG, + HCLGEVF_VECTOR0_CMDQ_STATE_REG, HCLGEVF_CMDQ_INTR_EN_REG, HCLGEVF_CMDQ_INTR_GEN_REG}; @@ -669,8 +669,8 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) u16 tc_size[HCLGEVF_MAX_TC_NUM]; struct hclgevf_desc desc; u16 roundup_size; - int status; unsigned int i; + int status; req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; @@ -1143,7 +1143,6 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, send_msg.en_mc = en_mc_pmc ? 1 : 0; ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); - if (ret) dev_err(&hdev->pdev->dev, "Set promisc mode fail, status is %d.\n", ret); @@ -1826,7 +1825,7 @@ static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", - hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STAT_REG)); + hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG)); dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG)); dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", @@ -2250,7 +2249,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, /* fetch the events from their corresponding regs */ cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, - HCLGEVF_VECTOR0_CMDQ_STAT_REG); + HCLGEVF_VECTOR0_CMDQ_STATE_REG); if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); @@ -2403,7 +2402,7 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) false); req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; - req->gro_en = cpu_to_le16(en ? 1 : 0); + req->gro_en = en ? 1 : 0; ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (ret) @@ -2713,6 +2712,7 @@ static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, struct hnae3_client *client) { struct hclgevf_dev *hdev = ae_dev->priv; + int rst_cnt = hdev->rst_stats.rst_cnt; int ret; ret = client->ops->init_instance(&hdev->nic); @@ -2720,6 +2720,14 @@ static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, return ret; set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); + if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || + rst_cnt != hdev->rst_stats.rst_cnt) { + clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); + + client->ops->uninit_instance(&hdev->nic, 0); + return -EBUSY; + } + hnae3_set_client_init_flag(client, ae_dev, 1); if (netif_msg_drv(&hdev->nic)) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index f19583c4bc9b..c1fac8920ae3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -42,8 +42,6 @@ #define HCLGEVF_CMDQ_RX_DEPTH_REG 0x27020 #define HCLGEVF_CMDQ_RX_TAIL_REG 0x27024 #define HCLGEVF_CMDQ_RX_HEAD_REG 0x27028 -#define HCLGEVF_CMDQ_INTR_SRC_REG 0x27100 -#define HCLGEVF_CMDQ_INTR_STS_REG 0x27104 #define HCLGEVF_CMDQ_INTR_EN_REG 0x27108 #define HCLGEVF_CMDQ_INTR_GEN_REG 0x2710C @@ -88,7 +86,7 @@ /* Vector0 interrupt CMDQ event source register(RW) */ #define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100 /* Vector0 interrupt CMDQ event status register(RO) */ -#define HCLGEVF_VECTOR0_CMDQ_STAT_REG 0x27104 +#define HCLGEVF_VECTOR0_CMDQ_STATE_REG 0x27104 /* CMDQ register bits for RX event(=MBX event) */ #define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1 /* RST register bits for RESET event */ @@ -280,7 +278,7 @@ struct hclgevf_dev { struct semaphore reset_sem; /* protect reset process */ u32 fw_version; - u16 num_tqps; /* num task queue pairs of this PF */ + u16 num_tqps; /* num task queue pairs of this VF */ u16 alloc_rss_size; /* allocated RSS task queue */ u16 rss_size_max; /* HW defined max RSS task queue */ diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 3de549c6c693..197dc5b2c090 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -4678,12 +4678,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); break; } - dev_info(dev, "Partner protocol version is %d\n", - crq->version_exchange_rsp.version); - if (be16_to_cpu(crq->version_exchange_rsp.version) < - ibmvnic_version) - ibmvnic_version = + ibmvnic_version = be16_to_cpu(crq->version_exchange_rsp.version); + dev_info(dev, "Partner protocol version is %d\n", + ibmvnic_version); send_cap_queries(adapter); break; case QUERY_CAPABILITY_RSP: diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c index 48428d6a00be..623e516a9630 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.c +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c @@ -3960,7 +3960,7 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, * @hw: Struct containing variables accessed by shared code * * Reads the first 64 16 bit words of the EEPROM and sums the values read. - * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is + * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is * valid. */ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw) diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 05bc6e216bca..d9fa4600f745 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -542,8 +542,13 @@ void e1000_reinit_locked(struct e1000_adapter *adapter) WARN_ON(in_interrupt()); while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) msleep(1); - e1000_down(adapter); - e1000_up(adapter); + + /* only run the task if not already down */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) { + e1000_down(adapter); + e1000_up(adapter); + } + clear_bit(__E1000_RESETTING, &adapter->flags); } @@ -1433,10 +1438,15 @@ int e1000_close(struct net_device *netdev) struct e1000_hw *hw = &adapter->hw; int count = E1000_CHECK_RESET_COUNT; - while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--) usleep_range(10000, 20000); - WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); + WARN_ON(count < 0); + + /* signal that we're down so that the reset task will no longer run */ + set_bit(__E1000_DOWN, &adapter->flags); + clear_bit(__E1000_RESETTING, &adapter->flags); + e1000_down(adapter); e1000_power_down_phy(adapter); e1000_free_irq(adapter); diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 37a2314d3e6b..944abd5eae11 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -576,7 +576,6 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg) #define er32(reg) __er32(hw, E1000_##reg) -s32 __ew32_prepare(struct e1000_hw *hw); void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val); #define ew32(reg, val) __ew32(hw, E1000_##reg, (val)) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 735bf25952fc..f999cca37a8a 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -300,7 +300,11 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) * so forcibly disable it. */ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown; - e1000_disable_ulp_lpt_lp(hw, true); + ret_val = e1000_disable_ulp_lpt_lp(hw, true); + if (ret_val) { + e_warn("Failed to disable ULP\n"); + goto out; + } ret_val = hw->phy.ops.acquire(hw); if (ret_val) { diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index e0b074820b47..444532292588 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -107,6 +107,45 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { {0, NULL} }; +struct e1000e_me_supported { + u16 device_id; /* supported device ID */ +}; + +static const struct e1000e_me_supported me_supported[] = { + {E1000_DEV_ID_PCH_LPT_I217_LM}, + {E1000_DEV_ID_PCH_LPTLP_I218_LM}, + {E1000_DEV_ID_PCH_I218_LM2}, + {E1000_DEV_ID_PCH_I218_LM3}, + {E1000_DEV_ID_PCH_SPT_I219_LM}, + {E1000_DEV_ID_PCH_SPT_I219_LM2}, + {E1000_DEV_ID_PCH_LBG_I219_LM3}, + {E1000_DEV_ID_PCH_SPT_I219_LM4}, + {E1000_DEV_ID_PCH_SPT_I219_LM5}, + {E1000_DEV_ID_PCH_CNP_I219_LM6}, + {E1000_DEV_ID_PCH_CNP_I219_LM7}, + {E1000_DEV_ID_PCH_ICP_I219_LM8}, + {E1000_DEV_ID_PCH_ICP_I219_LM9}, + {E1000_DEV_ID_PCH_CMP_I219_LM10}, + {E1000_DEV_ID_PCH_CMP_I219_LM11}, + {E1000_DEV_ID_PCH_CMP_I219_LM12}, + {E1000_DEV_ID_PCH_TGP_I219_LM13}, + {E1000_DEV_ID_PCH_TGP_I219_LM14}, + {E1000_DEV_ID_PCH_TGP_I219_LM15}, + {0} +}; + +static bool e1000e_check_me(u16 device_id) +{ + struct e1000e_me_supported *id; + + for (id = (struct e1000e_me_supported *)me_supported; + id->device_id; id++) + if (device_id == id->device_id) + return true; + + return false; +} + /** * __ew32_prepare - prepare to write to MAC CSR register on certain parts * @hw: pointer to the HW structure @@ -119,14 +158,12 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set * and try again a number of times. **/ -s32 __ew32_prepare(struct e1000_hw *hw) +static void __ew32_prepare(struct e1000_hw *hw) { s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) udelay(50); - - return i; } void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) @@ -607,11 +644,11 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) { struct e1000_adapter *adapter = rx_ring->adapter; struct e1000_hw *hw = &adapter->hw; - s32 ret_val = __ew32_prepare(hw); + __ew32_prepare(hw); writel(i, rx_ring->tail); - if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) { + if (unlikely(i != readl(rx_ring->tail))) { u32 rctl = er32(RCTL); ew32(RCTL, rctl & ~E1000_RCTL_EN); @@ -624,11 +661,11 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) { struct e1000_adapter *adapter = tx_ring->adapter; struct e1000_hw *hw = &adapter->hw; - s32 ret_val = __ew32_prepare(hw); + __ew32_prepare(hw); writel(i, tx_ring->tail); - if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) { + if (unlikely(i != readl(tx_ring->tail))) { u32 tctl = er32(TCTL); ew32(TCTL, tctl & ~E1000_TCTL_EN); @@ -5294,6 +5331,10 @@ static void e1000_watchdog_task(struct work_struct *work) /* oops */ break; } + if (hw->mac.type == e1000_pch_spt) { + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + } } /* enable transmits in the hardware, need to do this @@ -6912,7 +6953,8 @@ static int e1000e_pm_suspend(struct device *dev) e1000e_pm_thaw(dev); /* Introduce S0ix implementation */ - if (hw->mac.type >= e1000_pch_cnp) + if (hw->mac.type >= e1000_pch_cnp && + !e1000e_check_me(hw->adapter->pdev->device)) e1000e_s0ix_entry_flow(adapter); return rc; @@ -6927,7 +6969,8 @@ static int e1000e_pm_resume(struct device *dev) int rc; /* Introduce S0ix implementation */ - if (hw->mac.type >= e1000_pch_cnp) + if (hw->mac.type >= e1000_pch_cnp && + !e1000e_check_me(hw->adapter->pdev->device)) e1000e_s0ix_exit_flow(adapter); rc = __e1000_resume(pdev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 37514a75f928..6a089848c857 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -694,10 +694,8 @@ init_adminq_exit: * i40e_shutdown_adminq - shutdown routine for the Admin Queue * @hw: pointer to the hardware structure **/ -i40e_status i40e_shutdown_adminq(struct i40e_hw *hw) +void i40e_shutdown_adminq(struct i40e_hw *hw) { - i40e_status ret_code = 0; - if (i40e_check_asq_alive(hw)) i40e_aq_queue_shutdown(hw, true); @@ -706,8 +704,6 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw) if (hw->nvm_buff.va) i40e_free_virt_mem(hw, &hw->nvm_buff); - - return ret_code; } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 2a037ec244b9..5d807c8004f8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -11,7 +11,7 @@ #include "i40e_diag.h" #include "i40e_xsk.h" #include <net/udp_tunnel.h> -#include <net/xdp_sock.h> +#include <net/xdp_sock_drv.h> /* All i40e tracepoints are defined by the include below, which * must be included exactly once across the whole kernel with * CREATE_TRACE_POINTS defined @@ -3260,26 +3260,31 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) if (ring->vsi->type == I40E_VSI_MAIN) xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + kfree(ring->rx_bi); ring->xsk_umem = i40e_xsk_umem(ring); if (ring->xsk_umem) { - ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr - - XDP_PACKET_HEADROOM; + ret = i40e_alloc_rx_bi_zc(ring); + if (ret) + return ret; + ring->rx_buf_len = xsk_umem_get_rx_frame_size(ring->xsk_umem); /* For AF_XDP ZC, we disallow packets to span on * multiple buffers, thus letting us skip that * handling in the fast-path. */ chain_len = 1; - ring->zca.free = i40e_zca_free; ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, - MEM_TYPE_ZERO_COPY, - &ring->zca); + MEM_TYPE_XSK_BUFF_POOL, + NULL); if (ret) return ret; dev_info(&vsi->back->pdev->dev, - "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n", + "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", ring->queue_index); } else { + ret = i40e_alloc_rx_bi(ring); + if (ret) + return ret; ring->rx_buf_len = vsi->rx_buf_len; if (ring->vsi->type == I40E_VSI_MAIN) { ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, @@ -3344,9 +3349,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); writel(0, ring->tail); - ok = ring->xsk_umem ? - i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) : - !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); + if (ring->xsk_umem) { + xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq); + ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)); + } else { + ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); + } if (!ok) { /* Log this in case the user has forgotten to give the kernel * any buffers, even later in the application. @@ -14478,29 +14486,29 @@ static void i40e_print_features(struct i40e_pf *pf) i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id); #ifdef CONFIG_PCI_IOV - i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); + i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); #endif - i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d", + i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d", pf->hw.func_caps.num_vsis, pf->vsi[pf->lan_vsi]->num_queue_pairs); if (pf->flags & I40E_FLAG_RSS_ENABLED) - i += snprintf(&buf[i], REMAIN(i), " RSS"); + i += scnprintf(&buf[i], REMAIN(i), " RSS"); if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) - i += snprintf(&buf[i], REMAIN(i), " FD_ATR"); + i += scnprintf(&buf[i], REMAIN(i), " FD_ATR"); if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { - i += snprintf(&buf[i], REMAIN(i), " FD_SB"); - i += snprintf(&buf[i], REMAIN(i), " NTUPLE"); + i += scnprintf(&buf[i], REMAIN(i), " FD_SB"); + i += scnprintf(&buf[i], REMAIN(i), " NTUPLE"); } if (pf->flags & I40E_FLAG_DCB_CAPABLE) - i += snprintf(&buf[i], REMAIN(i), " DCB"); - i += snprintf(&buf[i], REMAIN(i), " VxLAN"); - i += snprintf(&buf[i], REMAIN(i), " Geneve"); + i += scnprintf(&buf[i], REMAIN(i), " DCB"); + i += scnprintf(&buf[i], REMAIN(i), " VxLAN"); + i += scnprintf(&buf[i], REMAIN(i), " Geneve"); if (pf->flags & I40E_FLAG_PTP) - i += snprintf(&buf[i], REMAIN(i), " PTP"); + i += scnprintf(&buf[i], REMAIN(i), " PTP"); if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) - i += snprintf(&buf[i], REMAIN(i), " VEB"); + i += scnprintf(&buf[i], REMAIN(i), " VEB"); else - i += snprintf(&buf[i], REMAIN(i), " VEPA"); + i += scnprintf(&buf[i], REMAIN(i), " VEPA"); dev_info(&pf->pdev->dev, "%s\n", buf); kfree(buf); diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index bbb478f09093..5c1378641b3b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -17,7 +17,7 @@ /* adminq functions */ i40e_status i40e_init_adminq(struct i40e_hw *hw); -i40e_status i40e_shutdown_adminq(struct i40e_hw *hw); +void i40e_shutdown_adminq(struct i40e_hw *hw); void i40e_adminq_init_ring_data(struct i40e_hw *hw); i40e_status i40e_clean_arq_element(struct i40e_hw *hw, struct i40e_arq_event_info *e, diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index a3772beffe02..f613782f2f56 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -521,28 +521,29 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi, /** * i40e_fd_handle_status - check the Programming Status for FD * @rx_ring: the Rx ring for this descriptor - * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor. + * @qword0_raw: qword0 + * @qword1: qword1 after le_to_cpu * @prog_id: the id originally used for programming * * This is used to verify if the FD programming or invalidation * requested by SW to the HW is successful or not and take actions accordingly. **/ -void i40e_fd_handle_status(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, u8 prog_id) +static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw, + u64 qword1, u8 prog_id) { struct i40e_pf *pf = rx_ring->vsi->back; struct pci_dev *pdev = pf->pdev; + struct i40e_32b_rx_wb_qw0 *qw0; u32 fcnt_prog, fcnt_avail; u32 error; - u64 qw; - qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> + qw0 = (struct i40e_32b_rx_wb_qw0 *)&qword0_raw; + error = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { - pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id); - if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) || + pf->fd_inv = le32_to_cpu(qw0->hi_dword.fd_id); + if (qw0->hi_dword.fd_id != 0 || (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", pf->fd_inv); @@ -560,7 +561,7 @@ void i40e_fd_handle_status(struct i40e_ring *rx_ring, /* store the current atr filter count */ pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf); - if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) && + if (qw0->hi_dword.fd_id == 0 && test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) { /* These set_bit() calls aren't atomic with the * test_bit() here, but worse case we potentially @@ -589,7 +590,7 @@ void i40e_fd_handle_status(struct i40e_ring *rx_ring, } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n", - rx_desc->wb.qword0.hi_dword.fd_id); + qw0->hi_dword.fd_id); } } @@ -1195,6 +1196,11 @@ clear_counts: rc->total_packets = 0; } +static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) +{ + return &rx_ring->rx_bi[idx]; +} + /** * i40e_reuse_rx_page - page flip buffer and store it back on the ring * @rx_ring: rx descriptor ring to store buffers on @@ -1208,7 +1214,7 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, struct i40e_rx_buffer *new_buff; u16 nta = rx_ring->next_to_alloc; - new_buff = &rx_ring->rx_bi[nta]; + new_buff = i40e_rx_bi(rx_ring, nta); /* update, and store next to alloc */ nta++; @@ -1227,29 +1233,10 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, } /** - * i40e_rx_is_programming_status - check for programming status descriptor - * @qw: qword representing status_error_len in CPU ordering - * - * The value of in the descriptor length field indicate if this - * is a programming status descriptor for flow director or FCoE - * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise - * it is a packet descriptor. - **/ -static inline bool i40e_rx_is_programming_status(u64 qw) -{ - /* The Rx filter programming status and SPH bit occupy the same - * spot in the descriptor. Since we don't support packet split we - * can just reuse the bit as an indication that this is a - * programming status descriptor. - */ - return qw & I40E_RXD_QW1_LENGTH_SPH_MASK; -} - -/** - * i40e_clean_programming_status - try clean the programming status descriptor + * i40e_clean_programming_status - clean the programming status descriptor * @rx_ring: the rx ring that has this descriptor - * @rx_desc: the rx descriptor written back by HW - * @qw: qword representing status_error_len in CPU ordering + * @qword0_raw: qword0 + * @qword1: qword1 representing status_error_len in CPU ordering * * Flow director should handle FD_FILTER_STATUS to check its filter programming * status being successful or not and take actions accordingly. FCoE should @@ -1257,34 +1244,16 @@ static inline bool i40e_rx_is_programming_status(u64 qw) * * Returns an i40e_rx_buffer to reuse if the cleanup occurred, otherwise NULL. **/ -struct i40e_rx_buffer *i40e_clean_programming_status( - struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, - u64 qw) +void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw, + u64 qword1) { - struct i40e_rx_buffer *rx_buffer; - u32 ntc; u8 id; - if (!i40e_rx_is_programming_status(qw)) - return NULL; - - ntc = rx_ring->next_to_clean; - - /* fetch, update, and store next to clean */ - rx_buffer = &rx_ring->rx_bi[ntc++]; - ntc = (ntc < rx_ring->count) ? ntc : 0; - rx_ring->next_to_clean = ntc; - - prefetch(I40E_RX_DESC(rx_ring, ntc)); - - id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> + id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) - i40e_fd_handle_status(rx_ring, rx_desc, id); - - return rx_buffer; + i40e_fd_handle_status(rx_ring, qword0_raw, qword1, id); } /** @@ -1336,13 +1305,25 @@ err: return -ENOMEM; } +int i40e_alloc_rx_bi(struct i40e_ring *rx_ring) +{ + unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count; + + rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL); + return rx_ring->rx_bi ? 0 : -ENOMEM; +} + +static void i40e_clear_rx_bi(struct i40e_ring *rx_ring) +{ + memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count); +} + /** * i40e_clean_rx_ring - Free Rx buffers * @rx_ring: ring to be cleaned **/ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) { - unsigned long bi_size; u16 i; /* ring already cleared, nothing to do */ @@ -1361,7 +1342,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { - struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; + struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i); if (!rx_bi->page) continue; @@ -1388,8 +1369,10 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) } skip_free: - bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; - memset(rx_ring->rx_bi, 0, bi_size); + if (rx_ring->xsk_umem) + i40e_clear_rx_bi_zc(rx_ring); + else + i40e_clear_rx_bi(rx_ring); /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size); @@ -1430,15 +1413,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring) int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) { struct device *dev = rx_ring->dev; - int err = -ENOMEM; - int bi_size; - - /* warn if we are about to overwrite the pointer */ - WARN_ON(rx_ring->rx_bi); - bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; - rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); - if (!rx_ring->rx_bi) - goto err; + int err; u64_stats_init(&rx_ring->syncp); @@ -1451,7 +1426,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) if (!rx_ring->desc) { dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", rx_ring->size); - goto err; + return -ENOMEM; } rx_ring->next_to_alloc = 0; @@ -1463,16 +1438,12 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->queue_index); if (err < 0) - goto err; + return err; } rx_ring->xdp_prog = rx_ring->vsi->xdp_prog; return 0; -err: - kfree(rx_ring->rx_bi); - rx_ring->rx_bi = NULL; - return err; } /** @@ -1592,7 +1563,7 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) return false; rx_desc = I40E_RX_DESC(rx_ring, ntu); - bi = &rx_ring->rx_bi[ntu]; + bi = i40e_rx_bi(rx_ring, ntu); do { if (!i40e_alloc_mapped_page(rx_ring, bi)) @@ -1614,7 +1585,7 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) ntu++; if (unlikely(ntu == rx_ring->count)) { rx_desc = I40E_RX_DESC(rx_ring, 0); - bi = rx_ring->rx_bi; + bi = i40e_rx_bi(rx_ring, 0); ntu = 0; } @@ -1981,7 +1952,7 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, { struct i40e_rx_buffer *rx_buffer; - rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; + rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); prefetchw(rx_buffer->page); /* we are reusing so sync this buffer for CPU use */ @@ -2382,9 +2353,12 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) */ dma_rmb(); - rx_buffer = i40e_clean_programming_status(rx_ring, rx_desc, - qword); - if (unlikely(rx_buffer)) { + if (i40e_rx_is_programming_status(qword)) { + i40e_clean_programming_status(rx_ring, + rx_desc->raw.qword[0], + qword); + rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); + i40e_inc_ntc(rx_ring); i40e_reuse_rx_page(rx_ring, rx_buffer); cleaned_count++; continue; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 36d37f31a287..5c255977fd58 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -296,17 +296,9 @@ struct i40e_tx_buffer { struct i40e_rx_buffer { dma_addr_t dma; - union { - struct { - struct page *page; - __u32 page_offset; - __u16 pagecnt_bias; - }; - struct { - void *addr; - u64 handle; - }; - }; + struct page *page; + __u32 page_offset; + __u16 pagecnt_bias; }; struct i40e_queue_stats { @@ -358,6 +350,7 @@ struct i40e_ring { union { struct i40e_tx_buffer *tx_bi; struct i40e_rx_buffer *rx_bi; + struct xdp_buff **rx_bi_zc; }; DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS); u16 queue_index; /* Queue number of ring */ @@ -419,7 +412,6 @@ struct i40e_ring { struct i40e_channel *ch; struct xdp_rxq_info xdp_rxq; struct xdp_umem *xsk_umem; - struct zero_copy_allocator zca; /* ZC allocator anchor */ } ____cacheline_internodealigned_in_smp; static inline bool ring_uses_build_skb(struct i40e_ring *ring) @@ -495,6 +487,7 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); bool __i40e_chk_linearize(struct sk_buff *skb); int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); +int i40e_alloc_rx_bi(struct i40e_ring *rx_ring); /** * i40e_get_head - Retrieve head from head writeback diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h index 8af0e99c6c0d..667c4dc4b39f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h @@ -4,13 +4,9 @@ #ifndef I40E_TXRX_COMMON_ #define I40E_TXRX_COMMON_ -void i40e_fd_handle_status(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, u8 prog_id); int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring); -struct i40e_rx_buffer *i40e_clean_programming_status( - struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, - u64 qw); +void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw, + u64 qword1); void i40e_process_skb_fields(struct i40e_ring *rx_ring, union i40e_rx_desc *rx_desc, struct sk_buff *skb); void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring); @@ -84,6 +80,38 @@ static inline void i40e_arm_wb(struct i40e_ring *tx_ring, } } +/** + * i40e_rx_is_programming_status - check for programming status descriptor + * @qword1: qword1 representing status_error_len in CPU ordering + * + * The value of in the descriptor length field indicate if this + * is a programming status descriptor for flow director or FCoE + * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise + * it is a packet descriptor. + **/ +static inline bool i40e_rx_is_programming_status(u64 qword1) +{ + /* The Rx filter programming status and SPH bit occupy the same + * spot in the descriptor. Since we don't support packet split we + * can just reuse the bit as an indication that this is a + * programming status descriptor. + */ + return qword1 & I40E_RXD_QW1_LENGTH_SPH_MASK; +} + +/** + * i40e_inc_ntc: Advance the next_to_clean index + * @rx_ring: Rx ring + **/ +static inline void i40e_inc_ntc(struct i40e_ring *rx_ring) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + prefetch(I40E_RX_DESC(rx_ring, ntc)); +} + void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring); void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring); bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi); diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 6ea2867ff60f..63e098f7cb63 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -689,7 +689,7 @@ union i40e_32byte_rx_desc { __le64 rsvd2; } read; struct { - struct { + struct i40e_32b_rx_wb_qw0 { struct { union { __le16 mirroring_status; @@ -727,6 +727,9 @@ union i40e_32byte_rx_desc { } hi_dword; } qword3; } wb; /* writeback */ + struct { + u64 qword[4]; + } raw; }; enum i40e_rx_desc_status_bits { diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 2b9184aead5f..7276580cbe64 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -2,68 +2,30 @@ /* Copyright(c) 2018 Intel Corporation. */ #include <linux/bpf_trace.h> -#include <net/xdp_sock.h> +#include <net/xdp_sock_drv.h> #include <net/xdp.h> #include "i40e.h" #include "i40e_txrx_common.h" #include "i40e_xsk.h" -/** - * i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev - * @vsi: Current VSI - * @umem: UMEM to DMA map - * - * Returns 0 on success, <0 on failure - **/ -static int i40e_xsk_umem_dma_map(struct i40e_vsi *vsi, struct xdp_umem *umem) +int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring) { - struct i40e_pf *pf = vsi->back; - struct device *dev; - unsigned int i, j; - dma_addr_t dma; - - dev = &pf->pdev->dev; - for (i = 0; i < umem->npgs; i++) { - dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE, - DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR); - if (dma_mapping_error(dev, dma)) - goto out_unmap; + unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count; - umem->pages[i].dma = dma; - } - - return 0; - -out_unmap: - for (j = 0; j < i; j++) { - dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, - DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR); - umem->pages[i].dma = 0; - } - - return -1; + rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL); + return rx_ring->rx_bi_zc ? 0 : -ENOMEM; } -/** - * i40e_xsk_umem_dma_unmap - DMA unmaps all UMEM memory for the netdev - * @vsi: Current VSI - * @umem: UMEM to DMA map - **/ -static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem) +void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring) { - struct i40e_pf *pf = vsi->back; - struct device *dev; - unsigned int i; - - dev = &pf->pdev->dev; - - for (i = 0; i < umem->npgs; i++) { - dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, - DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR); + memset(rx_ring->rx_bi_zc, 0, + sizeof(*rx_ring->rx_bi_zc) * rx_ring->count); +} - umem->pages[i].dma = 0; - } +static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) +{ + return &rx_ring->rx_bi_zc[idx]; } /** @@ -78,7 +40,6 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, u16 qid) { struct net_device *netdev = vsi->netdev; - struct xdp_umem_fq_reuse *reuseq; bool if_running; int err; @@ -92,13 +53,7 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, qid >= netdev->real_num_tx_queues) return -EINVAL; - reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count); - if (!reuseq) - return -ENOMEM; - - xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq)); - - err = i40e_xsk_umem_dma_map(vsi, umem); + err = xsk_buff_dma_map(umem, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR); if (err) return err; @@ -151,7 +106,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) } clear_bit(qid, vsi->af_xdp_zc_qps); - i40e_xsk_umem_dma_unmap(vsi, umem); + xsk_buff_dma_unmap(umem, I40E_RX_DMA_ATTR); if (if_running) { err = i40e_queue_pair_enable(vsi, qid); @@ -184,17 +139,13 @@ int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem, * @rx_ring: Rx ring * @xdp: xdp_buff used as input to the XDP program * - * This function enables or disables a UMEM to a certain ring. - * * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR} **/ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) { - struct xdp_umem *umem = rx_ring->xsk_umem; int err, result = I40E_XDP_PASS; struct i40e_ring *xdp_ring; struct bpf_prog *xdp_prog; - u64 offset; u32 act; rcu_read_lock(); @@ -203,9 +154,6 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) */ xdp_prog = READ_ONCE(rx_ring->xdp_prog); act = bpf_prog_run_xdp(xdp_prog, xdp); - offset = xdp->data - xdp->data_hard_start; - - xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset); switch (act) { case XDP_PASS: @@ -232,107 +180,26 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) return result; } -/** - * i40e_alloc_buffer_zc - Allocates an i40e_rx_buffer - * @rx_ring: Rx ring - * @bi: Rx buffer to populate - * - * This function allocates an Rx buffer. The buffer can come from fill - * queue, or via the recycle queue (next_to_alloc). - * - * Returns true for a successful allocation, false otherwise - **/ -static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *bi) -{ - struct xdp_umem *umem = rx_ring->xsk_umem; - void *addr = bi->addr; - u64 handle, hr; - - if (addr) { - rx_ring->rx_stats.page_reuse_count++; - return true; - } - - if (!xsk_umem_peek_addr(umem, &handle)) { - rx_ring->rx_stats.alloc_page_failed++; - return false; - } - - hr = umem->headroom + XDP_PACKET_HEADROOM; - - bi->dma = xdp_umem_get_dma(umem, handle); - bi->dma += hr; - - bi->addr = xdp_umem_get_data(umem, handle); - bi->addr += hr; - - bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); - - xsk_umem_release_addr(umem); - return true; -} - -/** - * i40e_alloc_buffer_slow_zc - Allocates an i40e_rx_buffer - * @rx_ring: Rx ring - * @bi: Rx buffer to populate - * - * This function allocates an Rx buffer. The buffer can come from fill - * queue, or via the reuse queue. - * - * Returns true for a successful allocation, false otherwise - **/ -static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *bi) -{ - struct xdp_umem *umem = rx_ring->xsk_umem; - u64 handle, hr; - - if (!xsk_umem_peek_addr_rq(umem, &handle)) { - rx_ring->rx_stats.alloc_page_failed++; - return false; - } - - handle &= rx_ring->xsk_umem->chunk_mask; - - hr = umem->headroom + XDP_PACKET_HEADROOM; - - bi->dma = xdp_umem_get_dma(umem, handle); - bi->dma += hr; - - bi->addr = xdp_umem_get_data(umem, handle); - bi->addr += hr; - - bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); - - xsk_umem_release_addr_rq(umem); - return true; -} - -static __always_inline bool -__i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count, - bool alloc(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *bi)) +bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) { u16 ntu = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; - struct i40e_rx_buffer *bi; + struct xdp_buff **bi, *xdp; + dma_addr_t dma; bool ok = true; rx_desc = I40E_RX_DESC(rx_ring, ntu); - bi = &rx_ring->rx_bi[ntu]; + bi = i40e_rx_bi(rx_ring, ntu); do { - if (!alloc(rx_ring, bi)) { + xdp = xsk_buff_alloc(rx_ring->xsk_umem); + if (!xdp) { ok = false; goto no_buffers; } - - dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 0, - rx_ring->rx_buf_len, - DMA_BIDIRECTIONAL); - - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + *bi = xdp; + dma = xsk_buff_xdp_get_dma(xdp); + rx_desc->read.pkt_addr = cpu_to_le64(dma); + rx_desc->read.hdr_addr = 0; rx_desc++; bi++; @@ -340,11 +207,10 @@ __i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count, if (unlikely(ntu == rx_ring->count)) { rx_desc = I40E_RX_DESC(rx_ring, 0); - bi = rx_ring->rx_bi; + bi = i40e_rx_bi(rx_ring, 0); ntu = 0; } - rx_desc->wb.qword1.status_error_len = 0; count--; } while (count); @@ -356,127 +222,8 @@ no_buffers: } /** - * i40e_alloc_rx_buffers_zc - Allocates a number of Rx buffers - * @rx_ring: Rx ring - * @count: The number of buffers to allocate - * - * This function allocates a number of Rx buffers from the reuse queue - * or fill ring and places them on the Rx ring. - * - * Returns true for a successful allocation, false otherwise - **/ -bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) -{ - return __i40e_alloc_rx_buffers_zc(rx_ring, count, - i40e_alloc_buffer_slow_zc); -} - -/** - * i40e_alloc_rx_buffers_fast_zc - Allocates a number of Rx buffers - * @rx_ring: Rx ring - * @count: The number of buffers to allocate - * - * This function allocates a number of Rx buffers from the fill ring - * or the internal recycle mechanism and places them on the Rx ring. - * - * Returns true for a successful allocation, false otherwise - **/ -static bool i40e_alloc_rx_buffers_fast_zc(struct i40e_ring *rx_ring, u16 count) -{ - return __i40e_alloc_rx_buffers_zc(rx_ring, count, - i40e_alloc_buffer_zc); -} - -/** - * i40e_get_rx_buffer_zc - Return the current Rx buffer - * @rx_ring: Rx ring - * @size: The size of the rx buffer (read from descriptor) - * - * This function returns the current, received Rx buffer, and also - * does DMA synchronization. the Rx ring. - * - * Returns the received Rx buffer - **/ -static struct i40e_rx_buffer *i40e_get_rx_buffer_zc(struct i40e_ring *rx_ring, - const unsigned int size) -{ - struct i40e_rx_buffer *bi; - - bi = &rx_ring->rx_bi[rx_ring->next_to_clean]; - - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - bi->dma, 0, - size, - DMA_BIDIRECTIONAL); - - return bi; -} - -/** - * i40e_reuse_rx_buffer_zc - Recycle an Rx buffer - * @rx_ring: Rx ring - * @old_bi: The Rx buffer to recycle - * - * This function recycles a finished Rx buffer, and places it on the - * recycle queue (next_to_alloc). - **/ -static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *old_bi) -{ - struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc]; - u16 nta = rx_ring->next_to_alloc; - - /* update, and store next to alloc */ - nta++; - rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - - /* transfer page from old buffer to new buffer */ - new_bi->dma = old_bi->dma; - new_bi->addr = old_bi->addr; - new_bi->handle = old_bi->handle; - - old_bi->addr = NULL; -} - -/** - * i40e_zca_free - Free callback for MEM_TYPE_ZERO_COPY allocations - * @alloc: Zero-copy allocator - * @handle: Buffer handle - **/ -void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle) -{ - struct i40e_rx_buffer *bi; - struct i40e_ring *rx_ring; - u64 hr, mask; - u16 nta; - - rx_ring = container_of(alloc, struct i40e_ring, zca); - hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; - mask = rx_ring->xsk_umem->chunk_mask; - - nta = rx_ring->next_to_alloc; - bi = &rx_ring->rx_bi[nta]; - - nta++; - rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - - handle &= mask; - - bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle); - bi->dma += hr; - - bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle); - bi->addr += hr; - - bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle, - rx_ring->xsk_umem->headroom); -} - -/** - * i40e_construct_skb_zc - Create skbufff from zero-copy Rx buffer + * i40e_construct_skb_zc - Create skbuff from zero-copy Rx buffer * @rx_ring: Rx ring - * @bi: Rx buffer * @xdp: xdp_buff * * This functions allocates a new skb from a zero-copy Rx buffer. @@ -484,7 +231,6 @@ void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle) * Returns the skb, or NULL on failure. **/ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *bi, struct xdp_buff *xdp) { unsigned int metasize = xdp->data - xdp->data_meta; @@ -503,24 +249,11 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, if (metasize) skb_metadata_set(skb, metasize); - i40e_reuse_rx_buffer_zc(rx_ring, bi); + xsk_buff_free(xdp); return skb; } /** - * i40e_inc_ntc: Advance the next_to_clean index - * @rx_ring: Rx ring - **/ -static void i40e_inc_ntc(struct i40e_ring *rx_ring) -{ - u32 ntc = rx_ring->next_to_clean + 1; - - ntc = (ntc < rx_ring->count) ? ntc : 0; - rx_ring->next_to_clean = ntc; - prefetch(I40E_RX_DESC(rx_ring, ntc)); -} - -/** * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring * @rx_ring: Rx ring * @budget: NAPI budget @@ -531,25 +264,20 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - struct xdp_umem *umem = rx_ring->xsk_umem; unsigned int xdp_res, xdp_xmit = 0; bool failure = false; struct sk_buff *skb; - struct xdp_buff xdp; - - xdp.rxq = &rx_ring->xdp_rxq; - xdp.frame_sz = xsk_umem_xdp_frame_sz(umem); while (likely(total_rx_packets < (unsigned int)budget)) { - struct i40e_rx_buffer *bi; union i40e_rx_desc *rx_desc; + struct xdp_buff **bi; unsigned int size; u64 qword; if (cleaned_count >= I40E_RX_BUFFER_WRITE) { failure = failure || - !i40e_alloc_rx_buffers_fast_zc(rx_ring, - cleaned_count); + !i40e_alloc_rx_buffers_zc(rx_ring, + cleaned_count); cleaned_count = 0; } @@ -562,35 +290,36 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) */ dma_rmb(); - bi = i40e_clean_programming_status(rx_ring, rx_desc, - qword); - if (unlikely(bi)) { - i40e_reuse_rx_buffer_zc(rx_ring, bi); + if (i40e_rx_is_programming_status(qword)) { + i40e_clean_programming_status(rx_ring, + rx_desc->raw.qword[0], + qword); + bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); + xsk_buff_free(*bi); + *bi = NULL; cleaned_count++; + i40e_inc_ntc(rx_ring); continue; } + bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; if (!size) break; - bi = i40e_get_rx_buffer_zc(rx_ring, size); - xdp.data = bi->addr; - xdp.data_meta = xdp.data; - xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; - xdp.data_end = xdp.data + size; - xdp.handle = bi->handle; + bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); + (*bi)->data_end = (*bi)->data + size; + xsk_buff_dma_sync_for_cpu(*bi); - xdp_res = i40e_run_xdp_zc(rx_ring, &xdp); + xdp_res = i40e_run_xdp_zc(rx_ring, *bi); if (xdp_res) { - if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) { + if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) xdp_xmit |= xdp_res; - bi->addr = NULL; - } else { - i40e_reuse_rx_buffer_zc(rx_ring, bi); - } + else + xsk_buff_free(*bi); + *bi = NULL; total_rx_bytes += size; total_rx_packets++; @@ -606,7 +335,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that * SBP is *not* set in PRT_SBPVSI (default not set). */ - skb = i40e_construct_skb_zc(rx_ring, bi, &xdp); + skb = i40e_construct_skb_zc(rx_ring, *bi); + *bi = NULL; if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; break; @@ -664,10 +394,9 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc)) break; - dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr); - - dma_sync_single_for_device(xdp_ring->dev, dma, desc.len, - DMA_BIDIRECTIONAL); + dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr); + xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma, + desc.len); tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use]; tx_bi->bytecount = desc.len; @@ -826,13 +555,13 @@ void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring) u16 i; for (i = 0; i < rx_ring->count; i++) { - struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; + struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, i); - if (!rx_bi->addr) + if (!rx_bi) continue; - xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_bi->handle); - rx_bi->addr = NULL; + xsk_buff_free(rx_bi); + rx_bi = NULL; } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h index 9ed59c14eb55..ea919a7d60ec 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h @@ -12,12 +12,13 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair); int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair); int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem, u16 qid); -void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle); bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 cleaned_count); int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget); bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring, int napi_budget); int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); +int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring); +void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring); #endif /* _I40E_XSK_H_ */ diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 29c6c6743450..980bbcc64b4b 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -17,10 +17,14 @@ ice-y := ice_main.o \ ice_lib.o \ ice_txrx_lib.o \ ice_txrx.o \ + ice_fltr.o \ + ice_fdir.o \ + ice_ethtool_fdir.o \ ice_flex_pipe.o \ ice_flow.o \ ice_devlink.o \ ice_ethtool.o ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o +ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 5c11448bfbb3..5792ee616b5c 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -34,9 +34,14 @@ #include <linux/ctype.h> #include <linux/bpf.h> #include <linux/avf/virtchnl.h> +#include <linux/cpu_rmap.h> #include <net/devlink.h> #include <net/ipv6.h> #include <net/xdp_sock.h> +#include <net/geneve.h> +#include <net/gre.h> +#include <net/udp_tunnel.h> +#include <net/vxlan.h> #include "ice_devids.h" #include "ice_type.h" #include "ice_txrx.h" @@ -46,7 +51,9 @@ #include "ice_sched.h" #include "ice_virtchnl_pf.h" #include "ice_sriov.h" +#include "ice_fdir.h" #include "ice_xsk.h" +#include "ice_arfs.h" extern const char ice_drv_ver[]; #define ICE_BAR0 0 @@ -62,6 +69,7 @@ extern const char ice_drv_ver[]; #define ICE_AQ_LEN 64 #define ICE_MBXSQ_LEN 64 #define ICE_MIN_MSIX 2 +#define ICE_FDIR_MSIX 1 #define ICE_NO_VSI 0xffff #define ICE_VSI_MAP_CONTIG 0 #define ICE_VSI_MAP_SCATTER 1 @@ -90,6 +98,7 @@ extern const char ice_drv_ver[]; #define ICE_TX_DESC(R, i) (&(((struct ice_tx_desc *)((R)->desc))[i])) #define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i])) #define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i])) +#define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i])) /* Macro for each VSI in a PF */ #define ice_for_each_vsi(pf, i) \ @@ -210,6 +219,7 @@ enum ice_state { __ICE_CFG_BUSY, __ICE_SERVICE_SCHED, __ICE_SERVICE_DIS, + __ICE_FD_FLUSH_REQ, __ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */ __ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */ __ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */ @@ -244,8 +254,8 @@ struct ice_vsi { u32 tx_busy; u32 rx_buf_failed; u32 rx_page_failed; - int num_q_vectors; - int base_vector; /* IRQ base for OS reserved vectors */ + u16 num_q_vectors; + u16 base_vector; /* IRQ base for OS reserved vectors */ enum ice_vsi_type type; u16 vsi_num; /* HW (absolute) index of this VSI */ u16 idx; /* software index in pf->vsi[] */ @@ -253,6 +263,8 @@ struct ice_vsi { s16 vf_id; /* VF ID for SR-IOV VSIs */ u16 ethtype; /* Ethernet protocol for pause frame */ + u16 num_gfltr; + u16 num_bfltr; /* RSS config */ u16 rss_table_size; /* HW RSS table size */ @@ -261,6 +273,14 @@ struct ice_vsi { u8 *rss_lut_user; /* User configured lookup table entries */ u8 rss_lut_type; /* used to configure Get/Set RSS LUT AQ call */ + /* aRFS members only allocated for the PF VSI */ +#define ICE_MAX_ARFS_LIST 1024 +#define ICE_ARFS_LST_MASK (ICE_MAX_ARFS_LIST - 1) + struct hlist_head *arfs_fltr_list; + struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs; + spinlock_t arfs_lock; /* protects aRFS hash table and filter state */ + atomic_t *arfs_last_fltr_id; + u16 max_frame; u16 rx_buf_len; @@ -335,12 +355,14 @@ enum ice_pf_flags { ICE_FLAG_SRIOV_CAPABLE, ICE_FLAG_DCB_CAPABLE, ICE_FLAG_DCB_ENA, + ICE_FLAG_FD_ENA, ICE_FLAG_ADV_FEATURES, ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, ICE_FLAG_NO_MEDIA, ICE_FLAG_FW_LLDP_AGENT, ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ ICE_FLAG_LEGACY_RX, + ICE_FLAG_VF_TRUE_PROMISC_ENA, ICE_FLAG_MDD_AUTO_RESET_VF, ICE_PF_FLAGS_NBITS /* must be last */ }; @@ -362,11 +384,13 @@ struct ice_pf { */ u16 sriov_base_vector; + u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */ + struct ice_vsi **vsi; /* VSIs created by the driver */ struct ice_sw *first_sw; /* first switch created by firmware */ /* Virtchnl/SR-IOV config info */ struct ice_vf *vf; - int num_alloc_vfs; /* actual number of VFs allocated */ + u16 num_alloc_vfs; /* actual number of VFs allocated */ u16 num_vfs_supported; /* num VFs supported for this PF */ u16 num_qps_per_vf; u16 num_msix_per_vf; @@ -385,11 +409,11 @@ struct ice_pf { struct mutex tc_mutex; /* lock to protect TC changes */ u32 msg_enable; u32 hw_csum_rx_error; - u32 oicr_idx; /* Other interrupt cause MSIX vector index */ - u32 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */ + u16 oicr_idx; /* Other interrupt cause MSIX vector index */ + u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */ u16 max_pf_txqs; /* Total Tx queues PF wide */ u16 max_pf_rxqs; /* Total Rx queues PF wide */ - u32 num_lan_msix; /* Total MSIX vectors for base driver */ + u16 num_lan_msix; /* Total MSIX vectors for base driver */ u16 num_lan_tx; /* num LAN Tx queues setup */ u16 num_lan_rx; /* num LAN Rx queues setup */ u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */ @@ -500,8 +524,27 @@ static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf) return NULL; } +/** + * ice_get_ctrl_vsi - Get the control VSI + * @pf: PF instance + */ +static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf) +{ + /* if pf->ctrl_vsi_idx is ICE_NO_VSI, control VSI was not set up */ + if (!pf->vsi || pf->ctrl_vsi_idx == ICE_NO_VSI) + return NULL; + + return pf->vsi[pf->ctrl_vsi_idx]; +} + +#define ICE_FD_STAT_CTR_BLOCK_COUNT 256 +#define ICE_FD_STAT_PF_IDX(base_idx) \ + ((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT) +#define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx) + int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); +int ice_vsi_open_ctrl(struct ice_vsi *vsi); void ice_set_ethtool_ops(struct net_device *netdev); void ice_set_ethtool_safe_mode_ops(struct net_device *netdev); u16 ice_get_avail_txq_count(struct ice_pf *pf); @@ -523,7 +566,24 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset); void ice_print_link_msg(struct ice_vsi *vsi, bool isup); +const char *ice_stat_str(enum ice_status stat_err); +const char *ice_aq_str(enum ice_aq_err aq_err); +int +ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, + bool is_tun); +void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena); +int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd); +int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd); +int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd); +int +ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd, + u32 *rule_locs); +void ice_fdir_release_flows(struct ice_hw *hw); +void ice_fdir_replay_flows(struct ice_hw *hw); +void ice_fdir_replay_fltrs(struct ice_pf *pf); +int ice_fdir_create_dflt_rules(struct ice_pf *pf); int ice_open(struct net_device *netdev); int ice_stop(struct net_device *netdev); +void ice_service_task_schedule(struct ice_pf *pf); #endif /* _ICE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 2381b4014ed6..f04c338fb6e0 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -107,6 +107,7 @@ struct ice_aqc_list_caps_elem { #define ICE_AQC_CAPS_RXQS 0x0041 #define ICE_AQC_CAPS_TXQS 0x0042 #define ICE_AQC_CAPS_MSIX 0x0043 +#define ICE_AQC_CAPS_FD 0x0045 #define ICE_AQC_CAPS_MAX_MTU 0x0047 u8 major_ver; @@ -155,13 +156,11 @@ struct ice_aqc_manage_mac_write { #define ICE_AQC_MAN_MAC_WR_MC_MAG_EN BIT(0) #define ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP BIT(1) #define ICE_AQC_MAN_MAC_WR_S 6 -#define ICE_AQC_MAN_MAC_WR_M (3 << ICE_AQC_MAN_MAC_WR_S) +#define ICE_AQC_MAN_MAC_WR_M ICE_M(3, ICE_AQC_MAN_MAC_WR_S) #define ICE_AQC_MAN_MAC_UPDATE_LAA 0 -#define ICE_AQC_MAN_MAC_UPDATE_LAA_WOL (BIT(0) << ICE_AQC_MAN_MAC_WR_S) - /* High 16 bits of MAC address in big endian order */ - __be16 sah; - /* Low 32 bits of MAC address in big endian order */ - __be32 sal; +#define ICE_AQC_MAN_MAC_UPDATE_LAA_WOL BIT(ICE_AQC_MAN_MAC_WR_S) + /* byte stream in network order */ + u8 mac_addr[ETH_ALEN]; __le32 addr_high; __le32 addr_low; }; @@ -232,6 +231,11 @@ struct ice_aqc_get_sw_cfg_resp { */ #define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03 #define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04 +#define ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK 0x21 +#define ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES 0x22 +#define ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES 0x23 +#define ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID 0x58 +#define ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM 0x59 #define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID 0x60 #define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM 0x61 @@ -240,6 +244,9 @@ struct ice_aqc_get_sw_cfg_resp { #define ICE_AQC_RES_TYPE_FLAG_DEDICATED 0x00 +#define ICE_AQC_RES_TYPE_S 0 +#define ICE_AQC_RES_TYPE_M (0x07F << ICE_AQC_RES_TYPE_S) + /* Allocate Resources command (indirect 0x0208) * Free Resources command (indirect 0x0209) */ @@ -541,7 +548,7 @@ struct ice_sw_rule_lkup_rx_tx { #define ICE_SINGLE_ACT_OTHER_ACTS 0x3 #define ICE_SINGLE_OTHER_ACT_IDENTIFIER_S 17 #define ICE_SINGLE_OTHER_ACT_IDENTIFIER_M \ - (0x3 << \ ICE_SINGLE_OTHER_ACT_IDENTIFIER_S) + (0x3 << ICE_SINGLE_OTHER_ACT_IDENTIFIER_S) /* Bit 17:18 - Defines other actions */ /* Other action = 0 - Mirror VSI */ @@ -1059,6 +1066,25 @@ struct ice_aqc_set_phy_cfg_data { u8 rsvd1; }; +/* Set MAC Config command data structure (direct 0x0603) */ +struct ice_aqc_set_mac_cfg { + __le16 max_frame_size; + u8 params; +#define ICE_AQ_SET_MAC_PACE_S 3 +#define ICE_AQ_SET_MAC_PACE_M (0xF << ICE_AQ_SET_MAC_PACE_S) +#define ICE_AQ_SET_MAC_PACE_TYPE_M BIT(7) +#define ICE_AQ_SET_MAC_PACE_TYPE_RATE 0 +#define ICE_AQ_SET_MAC_PACE_TYPE_FIXED ICE_AQ_SET_MAC_PACE_TYPE_M + u8 tx_tmr_priority; + __le16 tx_tmr_value; + __le16 fc_refresh_threshold; + u8 drop_opts; +#define ICE_AQ_SET_MAC_AUTO_DROP_MASK BIT(0) +#define ICE_AQ_SET_MAC_AUTO_DROP_NONE 0 +#define ICE_AQ_SET_MAC_AUTO_DROP_BLOCKING_PKTS BIT(0) + u8 reserved[7]; +}; + /* Restart AN command data structure (direct 0x0605) * Also used for response, with only the lport_num field present. */ @@ -1264,6 +1290,33 @@ struct ice_aqc_nvm_checksum { u8 rsvd2[12]; }; +/* The result of netlist NVM read comes in a TLV format. The actual data + * (netlist header) starts from word offset 1 (byte 2). The FW strips + * out the type field from the TLV header so all the netlist fields + * should adjust their offset value by 1 word (2 bytes) in order to map + * their correct location. + */ +#define ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID 0x11B +#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET 1 +#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN 2 /* In bytes */ +#define ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET 2 +#define ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN 2 /* In bytes */ +#define ICE_AQC_NVM_NETLIST_NODE_COUNT_M ICE_M(0x3FF, 0) +#define ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET 5 +#define ICE_AQC_NVM_NETLIST_ID_BLK_LEN 0x30 /* In words */ + +/* netlist ID block field offsets (word offsets) */ +#define ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW 2 +#define ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH 3 +#define ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW 4 +#define ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH 5 +#define ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW 6 +#define ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH 7 +#define ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW 8 +#define ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH 9 +#define ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH 0xA +#define ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER 0x2F + /** * Send to PF command (indirect 0x0801) ID is only used by PF * @@ -1648,10 +1701,12 @@ struct ice_pkg_ver { }; #define ICE_PKG_NAME_SIZE 32 +#define ICE_SEG_NAME_SIZE 28 struct ice_aqc_get_pkg_info { struct ice_pkg_ver ver; - char name[ICE_PKG_NAME_SIZE]; + char name[ICE_SEG_NAME_SIZE]; + __le32 track_id; u8 is_in_nvm; u8 is_active; u8 is_active_at_boot; @@ -1738,6 +1793,7 @@ struct ice_aq_desc { struct ice_aqc_download_pkg download_pkg; struct ice_aqc_set_mac_lb set_mac_lb; struct ice_aqc_alloc_free_res_cmd sw_res_ctrl; + struct ice_aqc_set_mac_cfg set_mac_cfg; struct ice_aqc_set_event_mask set_event_mask; struct ice_aqc_get_link_status get_link_status; struct ice_aqc_event_lan_overflow lan_overflow; @@ -1834,6 +1890,7 @@ enum ice_adminq_opc { /* PHY commands */ ice_aqc_opc_get_phy_caps = 0x0600, ice_aqc_opc_set_phy_cfg = 0x0601, + ice_aqc_opc_set_mac_cfg = 0x0603, ice_aqc_opc_restart_an = 0x0605, ice_aqc_opc_get_link_status = 0x0607, ice_aqc_opc_set_event_mask = 0x0613, diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c new file mode 100644 index 000000000000..6560acd76c94 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_arfs.c @@ -0,0 +1,663 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2020, Intel Corporation. */ + +#include "ice.h" + +/** + * ice_is_arfs_active - helper to check is aRFS is active + * @vsi: VSI to check + */ +static bool ice_is_arfs_active(struct ice_vsi *vsi) +{ + return !!vsi->arfs_fltr_list; +} + +/** + * ice_is_arfs_using_perfect_flow - check if aRFS has active perfect filters + * @hw: pointer to the HW structure + * @flow_type: flow type as Flow Director understands it + * + * Flow Director will query this function to see if aRFS is currently using + * the specified flow_type for perfect (4-tuple) filters. + */ +bool +ice_is_arfs_using_perfect_flow(struct ice_hw *hw, enum ice_fltr_ptype flow_type) +{ + struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs; + struct ice_pf *pf = hw->back; + struct ice_vsi *vsi; + + vsi = ice_get_main_vsi(pf); + if (!vsi) + return false; + + arfs_fltr_cntrs = vsi->arfs_fltr_cntrs; + + /* active counters can be updated by multiple CPUs */ + smp_mb__before_atomic(); + switch (flow_type) { + case ICE_FLTR_PTYPE_NONF_IPV4_UDP: + return atomic_read(&arfs_fltr_cntrs->active_udpv4_cnt) > 0; + case ICE_FLTR_PTYPE_NONF_IPV6_UDP: + return atomic_read(&arfs_fltr_cntrs->active_udpv6_cnt) > 0; + case ICE_FLTR_PTYPE_NONF_IPV4_TCP: + return atomic_read(&arfs_fltr_cntrs->active_tcpv4_cnt) > 0; + case ICE_FLTR_PTYPE_NONF_IPV6_TCP: + return atomic_read(&arfs_fltr_cntrs->active_tcpv6_cnt) > 0; + default: + return false; + } +} + +/** + * ice_arfs_update_active_fltr_cntrs - update active filter counters for aRFS + * @vsi: VSI that aRFS is active on + * @entry: aRFS entry used to change counters + * @add: true to increment counter, false to decrement + */ +static void +ice_arfs_update_active_fltr_cntrs(struct ice_vsi *vsi, + struct ice_arfs_entry *entry, bool add) +{ + struct ice_arfs_active_fltr_cntrs *fltr_cntrs = vsi->arfs_fltr_cntrs; + + switch (entry->fltr_info.flow_type) { + case ICE_FLTR_PTYPE_NONF_IPV4_TCP: + if (add) + atomic_inc(&fltr_cntrs->active_tcpv4_cnt); + else + atomic_dec(&fltr_cntrs->active_tcpv4_cnt); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_TCP: + if (add) + atomic_inc(&fltr_cntrs->active_tcpv6_cnt); + else + atomic_dec(&fltr_cntrs->active_tcpv6_cnt); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_UDP: + if (add) + atomic_inc(&fltr_cntrs->active_udpv4_cnt); + else + atomic_dec(&fltr_cntrs->active_udpv4_cnt); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_UDP: + if (add) + atomic_inc(&fltr_cntrs->active_udpv6_cnt); + else + atomic_dec(&fltr_cntrs->active_udpv6_cnt); + break; + default: + dev_err(ice_pf_to_dev(vsi->back), "aRFS: Failed to update filter counters, invalid filter type %d\n", + entry->fltr_info.flow_type); + } +} + +/** + * ice_arfs_del_flow_rules - delete the rules passed in from HW + * @vsi: VSI for the flow rules that need to be deleted + * @del_list_head: head of the list of ice_arfs_entry(s) for rule deletion + * + * Loop through the delete list passed in and remove the rules from HW. After + * each rule is deleted, disconnect and free the ice_arfs_entry because it is no + * longer being referenced by the aRFS hash table. + */ +static void +ice_arfs_del_flow_rules(struct ice_vsi *vsi, struct hlist_head *del_list_head) +{ + struct ice_arfs_entry *e; + struct hlist_node *n; + struct device *dev; + + dev = ice_pf_to_dev(vsi->back); + + hlist_for_each_entry_safe(e, n, del_list_head, list_entry) { + int result; + + result = ice_fdir_write_fltr(vsi->back, &e->fltr_info, false, + false); + if (!result) + ice_arfs_update_active_fltr_cntrs(vsi, e, false); + else + dev_dbg(dev, "Unable to delete aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n", + result, e->fltr_state, e->fltr_info.fltr_id, + e->flow_id, e->fltr_info.q_index); + + /* The aRFS hash table is no longer referencing this entry */ + hlist_del(&e->list_entry); + devm_kfree(dev, e); + } +} + +/** + * ice_arfs_add_flow_rules - add the rules passed in from HW + * @vsi: VSI for the flow rules that need to be added + * @add_list_head: head of the list of ice_arfs_entry_ptr(s) for rule addition + * + * Loop through the add list passed in and remove the rules from HW. After each + * rule is added, disconnect and free the ice_arfs_entry_ptr node. Don't free + * the ice_arfs_entry(s) because they are still being referenced in the aRFS + * hash table. + */ +static void +ice_arfs_add_flow_rules(struct ice_vsi *vsi, struct hlist_head *add_list_head) +{ + struct ice_arfs_entry_ptr *ep; + struct hlist_node *n; + struct device *dev; + + dev = ice_pf_to_dev(vsi->back); + + hlist_for_each_entry_safe(ep, n, add_list_head, list_entry) { + int result; + + result = ice_fdir_write_fltr(vsi->back, + &ep->arfs_entry->fltr_info, true, + false); + if (!result) + ice_arfs_update_active_fltr_cntrs(vsi, ep->arfs_entry, + true); + else + dev_dbg(dev, "Unable to add aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n", + result, ep->arfs_entry->fltr_state, + ep->arfs_entry->fltr_info.fltr_id, + ep->arfs_entry->flow_id, + ep->arfs_entry->fltr_info.q_index); + + hlist_del(&ep->list_entry); + devm_kfree(dev, ep); + } +} + +/** + * ice_arfs_is_flow_expired - check if the aRFS entry has expired + * @vsi: VSI containing the aRFS entry + * @arfs_entry: aRFS entry that's being checked for expiration + * + * Return true if the flow has expired, else false. This function should be used + * to determine whether or not an aRFS entry should be removed from the hardware + * and software structures. + */ +static bool +ice_arfs_is_flow_expired(struct ice_vsi *vsi, struct ice_arfs_entry *arfs_entry) +{ +#define ICE_ARFS_TIME_DELTA_EXPIRATION msecs_to_jiffies(5000) + if (rps_may_expire_flow(vsi->netdev, arfs_entry->fltr_info.q_index, + arfs_entry->flow_id, + arfs_entry->fltr_info.fltr_id)) + return true; + + /* expiration timer only used for UDP filters */ + if (arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV4_UDP && + arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV6_UDP) + return false; + + return time_in_range64(arfs_entry->time_activated + + ICE_ARFS_TIME_DELTA_EXPIRATION, + arfs_entry->time_activated, get_jiffies_64()); +} + +/** + * ice_arfs_update_flow_rules - add/delete aRFS rules in HW + * @vsi: the VSI to be forwarded to + * @idx: index into the table of aRFS filter lists. Obtained from skb->hash + * @add_list: list to populate with filters to be added to Flow Director + * @del_list: list to populate with filters to be deleted from Flow Director + * + * Iterate over the hlist at the index given in the aRFS hash table and + * determine if there are any aRFS entries that need to be either added or + * deleted in the HW. If the aRFS entry is marked as ICE_ARFS_INACTIVE the + * filter needs to be added to HW, else if it's marked as ICE_ARFS_ACTIVE and + * the flow has expired delete the filter from HW. The caller of this function + * is expected to add/delete rules on the add_list/del_list respectively. + */ +static void +ice_arfs_update_flow_rules(struct ice_vsi *vsi, u16 idx, + struct hlist_head *add_list, + struct hlist_head *del_list) +{ + struct ice_arfs_entry *e; + struct hlist_node *n; + struct device *dev; + + dev = ice_pf_to_dev(vsi->back); + + /* go through the aRFS hlist at this idx and check for needed updates */ + hlist_for_each_entry_safe(e, n, &vsi->arfs_fltr_list[idx], list_entry) + /* check if filter needs to be added to HW */ + if (e->fltr_state == ICE_ARFS_INACTIVE) { + enum ice_fltr_ptype flow_type = e->fltr_info.flow_type; + struct ice_arfs_entry_ptr *ep = + devm_kzalloc(dev, sizeof(*ep), GFP_ATOMIC); + + if (!ep) + continue; + INIT_HLIST_NODE(&ep->list_entry); + /* reference aRFS entry to add HW filter */ + ep->arfs_entry = e; + hlist_add_head(&ep->list_entry, add_list); + e->fltr_state = ICE_ARFS_ACTIVE; + /* expiration timer only used for UDP flows */ + if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP || + flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP) + e->time_activated = get_jiffies_64(); + } else if (e->fltr_state == ICE_ARFS_ACTIVE) { + /* check if filter needs to be removed from HW */ + if (ice_arfs_is_flow_expired(vsi, e)) { + /* remove aRFS entry from hash table for delete + * and to prevent referencing it the next time + * through this hlist index + */ + hlist_del(&e->list_entry); + e->fltr_state = ICE_ARFS_TODEL; + /* save reference to aRFS entry for delete */ + hlist_add_head(&e->list_entry, del_list); + } + } +} + +/** + * ice_sync_arfs_fltrs - update all aRFS filters + * @pf: board private structure + */ +void ice_sync_arfs_fltrs(struct ice_pf *pf) +{ + HLIST_HEAD(tmp_del_list); + HLIST_HEAD(tmp_add_list); + struct ice_vsi *pf_vsi; + unsigned int i; + + pf_vsi = ice_get_main_vsi(pf); + if (!pf_vsi) + return; + + if (!ice_is_arfs_active(pf_vsi)) + return; + + spin_lock_bh(&pf_vsi->arfs_lock); + /* Once we process aRFS for the PF VSI get out */ + for (i = 0; i < ICE_MAX_ARFS_LIST; i++) + ice_arfs_update_flow_rules(pf_vsi, i, &tmp_add_list, + &tmp_del_list); + spin_unlock_bh(&pf_vsi->arfs_lock); + + /* use list of ice_arfs_entry(s) for delete */ + ice_arfs_del_flow_rules(pf_vsi, &tmp_del_list); + + /* use list of ice_arfs_entry_ptr(s) for add */ + ice_arfs_add_flow_rules(pf_vsi, &tmp_add_list); +} + +/** + * ice_arfs_build_entry - builds an aRFS entry based on input + * @vsi: destination VSI for this flow + * @fk: flow dissector keys for creating the tuple + * @rxq_idx: Rx queue to steer this flow to + * @flow_id: passed down from the stack and saved for flow expiration + * + * returns an aRFS entry on success and NULL on failure + */ +static struct ice_arfs_entry * +ice_arfs_build_entry(struct ice_vsi *vsi, const struct flow_keys *fk, + u16 rxq_idx, u32 flow_id) +{ + struct ice_arfs_entry *arfs_entry; + struct ice_fdir_fltr *fltr_info; + u8 ip_proto; + + arfs_entry = devm_kzalloc(ice_pf_to_dev(vsi->back), + sizeof(*arfs_entry), + GFP_ATOMIC | __GFP_NOWARN); + if (!arfs_entry) + return NULL; + + fltr_info = &arfs_entry->fltr_info; + fltr_info->q_index = rxq_idx; + fltr_info->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; + fltr_info->dest_vsi = vsi->idx; + ip_proto = fk->basic.ip_proto; + + if (fk->basic.n_proto == htons(ETH_P_IP)) { + fltr_info->ip.v4.proto = ip_proto; + fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ? + ICE_FLTR_PTYPE_NONF_IPV4_TCP : + ICE_FLTR_PTYPE_NONF_IPV4_UDP; + fltr_info->ip.v4.src_ip = fk->addrs.v4addrs.src; + fltr_info->ip.v4.dst_ip = fk->addrs.v4addrs.dst; + fltr_info->ip.v4.src_port = fk->ports.src; + fltr_info->ip.v4.dst_port = fk->ports.dst; + } else { /* ETH_P_IPV6 */ + fltr_info->ip.v6.proto = ip_proto; + fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ? + ICE_FLTR_PTYPE_NONF_IPV6_TCP : + ICE_FLTR_PTYPE_NONF_IPV6_UDP; + memcpy(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src, + sizeof(struct in6_addr)); + memcpy(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst, + sizeof(struct in6_addr)); + fltr_info->ip.v6.src_port = fk->ports.src; + fltr_info->ip.v6.dst_port = fk->ports.dst; + } + + arfs_entry->flow_id = flow_id; + fltr_info->fltr_id = + atomic_inc_return(vsi->arfs_last_fltr_id) % RPS_NO_FILTER; + + return arfs_entry; +} + +/** + * ice_arfs_is_perfect_flow_set - Check to see if perfect flow is set + * @hw: pointer to HW structure + * @l3_proto: ETH_P_IP or ETH_P_IPV6 in network order + * @l4_proto: IPPROTO_UDP or IPPROTO_TCP + * + * We only support perfect (4-tuple) filters for aRFS. This function allows aRFS + * to check if perfect (4-tuple) flow rules are currently in place by Flow + * Director. + */ +static bool +ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto) +{ + unsigned long *perfect_fltr = hw->fdir_perfect_fltr; + + /* advanced Flow Director disabled, perfect filters always supported */ + if (!perfect_fltr) + return true; + + if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_UDP) + return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_UDP, perfect_fltr); + else if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_TCP) + return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_TCP, perfect_fltr); + else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_UDP) + return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_UDP, perfect_fltr); + else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_TCP) + return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_TCP, perfect_fltr); + + return false; +} + +/** + * ice_rx_flow_steer - steer the Rx flow to where application is being run + * @netdev: ptr to the netdev being adjusted + * @skb: buffer with required header information + * @rxq_idx: queue to which the flow needs to move + * @flow_id: flow identifier provided by the netdev + * + * Based on the skb, rxq_idx, and flow_id passed in add/update an entry in the + * aRFS hash table. Iterate over one of the hlists in the aRFS hash table and + * if the flow_id already exists in the hash table but the rxq_idx has changed + * mark the entry as ICE_ARFS_INACTIVE so it can get updated in HW, else + * if the entry is marked as ICE_ARFS_TODEL delete it from the aRFS hash table. + * If neither of the previous conditions are true then add a new entry in the + * aRFS hash table, which gets set to ICE_ARFS_INACTIVE by default so it can be + * added to HW. + */ +int +ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb, + u16 rxq_idx, u32 flow_id) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_arfs_entry *arfs_entry; + struct ice_vsi *vsi = np->vsi; + struct flow_keys fk; + struct ice_pf *pf; + __be16 n_proto; + u8 ip_proto; + u16 idx; + int ret; + + /* failed to allocate memory for aRFS so don't crash */ + if (unlikely(!vsi->arfs_fltr_list)) + return -ENODEV; + + pf = vsi->back; + + if (skb->encapsulation) + return -EPROTONOSUPPORT; + + if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) + return -EPROTONOSUPPORT; + + n_proto = fk.basic.n_proto; + /* Support only IPV4 and IPV6 */ + if ((n_proto == htons(ETH_P_IP) && !ip_is_fragment(ip_hdr(skb))) || + n_proto == htons(ETH_P_IPV6)) + ip_proto = fk.basic.ip_proto; + else + return -EPROTONOSUPPORT; + + /* Support only TCP and UDP */ + if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) + return -EPROTONOSUPPORT; + + /* only support 4-tuple filters for aRFS */ + if (!ice_arfs_is_perfect_flow_set(&pf->hw, n_proto, ip_proto)) + return -EOPNOTSUPP; + + /* choose the aRFS list bucket based on skb hash */ + idx = skb_get_hash_raw(skb) & ICE_ARFS_LST_MASK; + /* search for entry in the bucket */ + spin_lock_bh(&vsi->arfs_lock); + hlist_for_each_entry(arfs_entry, &vsi->arfs_fltr_list[idx], + list_entry) { + struct ice_fdir_fltr *fltr_info; + + /* keep searching for the already existing arfs_entry flow */ + if (arfs_entry->flow_id != flow_id) + continue; + + fltr_info = &arfs_entry->fltr_info; + ret = fltr_info->fltr_id; + + if (fltr_info->q_index == rxq_idx || + arfs_entry->fltr_state != ICE_ARFS_ACTIVE) + goto out; + + /* update the queue to forward to on an already existing flow */ + fltr_info->q_index = rxq_idx; + arfs_entry->fltr_state = ICE_ARFS_INACTIVE; + ice_arfs_update_active_fltr_cntrs(vsi, arfs_entry, false); + goto out_schedule_service_task; + } + + arfs_entry = ice_arfs_build_entry(vsi, &fk, rxq_idx, flow_id); + if (!arfs_entry) { + ret = -ENOMEM; + goto out; + } + + ret = arfs_entry->fltr_info.fltr_id; + INIT_HLIST_NODE(&arfs_entry->list_entry); + hlist_add_head(&arfs_entry->list_entry, &vsi->arfs_fltr_list[idx]); +out_schedule_service_task: + ice_service_task_schedule(pf); +out: + spin_unlock_bh(&vsi->arfs_lock); + return ret; +} + +/** + * ice_init_arfs_cntrs - initialize aRFS counter values + * @vsi: VSI that aRFS counters need to be initialized on + */ +static int ice_init_arfs_cntrs(struct ice_vsi *vsi) +{ + if (!vsi || vsi->type != ICE_VSI_PF) + return -EINVAL; + + vsi->arfs_fltr_cntrs = kzalloc(sizeof(*vsi->arfs_fltr_cntrs), + GFP_KERNEL); + if (!vsi->arfs_fltr_cntrs) + return -ENOMEM; + + vsi->arfs_last_fltr_id = kzalloc(sizeof(*vsi->arfs_last_fltr_id), + GFP_KERNEL); + if (!vsi->arfs_last_fltr_id) { + kfree(vsi->arfs_fltr_cntrs); + vsi->arfs_fltr_cntrs = NULL; + return -ENOMEM; + } + + return 0; +} + +/** + * ice_init_arfs - initialize aRFS resources + * @vsi: the VSI to be forwarded to + */ +void ice_init_arfs(struct ice_vsi *vsi) +{ + struct hlist_head *arfs_fltr_list; + unsigned int i; + + if (!vsi || vsi->type != ICE_VSI_PF) + return; + + arfs_fltr_list = kzalloc(sizeof(*arfs_fltr_list) * ICE_MAX_ARFS_LIST, + GFP_KERNEL); + if (!arfs_fltr_list) + return; + + if (ice_init_arfs_cntrs(vsi)) + goto free_arfs_fltr_list; + + for (i = 0; i < ICE_MAX_ARFS_LIST; i++) + INIT_HLIST_HEAD(&arfs_fltr_list[i]); + + spin_lock_init(&vsi->arfs_lock); + + vsi->arfs_fltr_list = arfs_fltr_list; + + return; + +free_arfs_fltr_list: + kfree(arfs_fltr_list); +} + +/** + * ice_clear_arfs - clear the aRFS hash table and any memory used for aRFS + * @vsi: the VSI to be forwarded to + */ +void ice_clear_arfs(struct ice_vsi *vsi) +{ + struct device *dev; + unsigned int i; + + if (!vsi || vsi->type != ICE_VSI_PF || !vsi->back || + !vsi->arfs_fltr_list) + return; + + dev = ice_pf_to_dev(vsi->back); + for (i = 0; i < ICE_MAX_ARFS_LIST; i++) { + struct ice_arfs_entry *r; + struct hlist_node *n; + + spin_lock_bh(&vsi->arfs_lock); + hlist_for_each_entry_safe(r, n, &vsi->arfs_fltr_list[i], + list_entry) { + hlist_del(&r->list_entry); + devm_kfree(dev, r); + } + spin_unlock_bh(&vsi->arfs_lock); + } + + kfree(vsi->arfs_fltr_list); + vsi->arfs_fltr_list = NULL; + kfree(vsi->arfs_last_fltr_id); + vsi->arfs_last_fltr_id = NULL; + kfree(vsi->arfs_fltr_cntrs); + vsi->arfs_fltr_cntrs = NULL; +} + +/** + * ice_free_cpu_rx_rmap - free setup CPU reverse map + * @vsi: the VSI to be forwarded to + */ +void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) +{ + struct net_device *netdev; + + if (!vsi || vsi->type != ICE_VSI_PF || !vsi->arfs_fltr_list) + return; + + netdev = vsi->netdev; + if (!netdev || !netdev->rx_cpu_rmap || + netdev->reg_state != NETREG_REGISTERED) + return; + + free_irq_cpu_rmap(netdev->rx_cpu_rmap); + netdev->rx_cpu_rmap = NULL; +} + +/** + * ice_set_cpu_rx_rmap - setup CPU reverse map for each queue + * @vsi: the VSI to be forwarded to + */ +int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) +{ + struct net_device *netdev; + struct ice_pf *pf; + int base_idx, i; + + if (!vsi || vsi->type != ICE_VSI_PF) + return -EINVAL; + + pf = vsi->back; + netdev = vsi->netdev; + if (!pf || !netdev || !vsi->num_q_vectors || + vsi->netdev->reg_state != NETREG_REGISTERED) + return -EINVAL; + + netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n", + vsi->type, netdev->name, vsi->num_q_vectors); + + netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors); + if (unlikely(!netdev->rx_cpu_rmap)) + return -EINVAL; + + base_idx = vsi->base_vector; + for (i = 0; i < vsi->num_q_vectors; i++) + if (irq_cpu_rmap_add(netdev->rx_cpu_rmap, + pf->msix_entries[base_idx + i].vector)) { + ice_free_cpu_rx_rmap(vsi); + return -EINVAL; + } + + return 0; +} + +/** + * ice_remove_arfs - remove/clear all aRFS resources + * @pf: device private structure + */ +void ice_remove_arfs(struct ice_pf *pf) +{ + struct ice_vsi *pf_vsi; + + pf_vsi = ice_get_main_vsi(pf); + if (!pf_vsi) + return; + + ice_free_cpu_rx_rmap(pf_vsi); + ice_clear_arfs(pf_vsi); +} + +/** + * ice_rebuild_arfs - remove/clear all aRFS resources and rebuild after reset + * @pf: device private structure + */ +void ice_rebuild_arfs(struct ice_pf *pf) +{ + struct ice_vsi *pf_vsi; + + pf_vsi = ice_get_main_vsi(pf); + if (!pf_vsi) + return; + + ice_remove_arfs(pf); + if (ice_set_cpu_rx_rmap(pf_vsi)) { + dev_err(ice_pf_to_dev(pf), "Failed to rebuild aRFS\n"); + return; + } + ice_init_arfs(pf_vsi); +} diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.h b/drivers/net/ethernet/intel/ice/ice_arfs.h new file mode 100644 index 000000000000..f39cd16403ed --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_arfs.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2020, Intel Corporation. */ + +#ifndef _ICE_ARFS_H_ +#define _ICE_ARFS_H_ +enum ice_arfs_fltr_state { + ICE_ARFS_INACTIVE, + ICE_ARFS_ACTIVE, + ICE_ARFS_TODEL, +}; + +struct ice_arfs_entry { + struct ice_fdir_fltr fltr_info; + struct hlist_node list_entry; + u64 time_activated; /* only valid for UDP flows */ + u32 flow_id; + /* fltr_state = 0 - ICE_ARFS_INACTIVE: + * filter needs to be updated or programmed in HW. + * fltr_state = 1 - ICE_ARFS_ACTIVE: + * filter is active and programmed in HW. + * fltr_state = 2 - ICE_ARFS_TODEL: + * filter has been deleted from HW and needs to be removed from + * the aRFS hash table. + */ + u8 fltr_state; +}; + +struct ice_arfs_entry_ptr { + struct ice_arfs_entry *arfs_entry; + struct hlist_node list_entry; +}; + +struct ice_arfs_active_fltr_cntrs { + atomic_t active_tcpv4_cnt; + atomic_t active_tcpv6_cnt; + atomic_t active_udpv4_cnt; + atomic_t active_udpv6_cnt; +}; + +#ifdef CONFIG_RFS_ACCEL +int +ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb, + u16 rxq_idx, u32 flow_id); +void ice_clear_arfs(struct ice_vsi *vsi); +void ice_free_cpu_rx_rmap(struct ice_vsi *vsi); +void ice_init_arfs(struct ice_vsi *vsi); +void ice_sync_arfs_fltrs(struct ice_pf *pf); +int ice_set_cpu_rx_rmap(struct ice_vsi *vsi); +void ice_remove_arfs(struct ice_pf *pf); +void ice_rebuild_arfs(struct ice_pf *pf); +bool +ice_is_arfs_using_perfect_flow(struct ice_hw *hw, + enum ice_fltr_ptype flow_type); +#else +#define ice_sync_arfs_fltrs(pf) do {} while (0) +#define ice_init_arfs(vsi) do {} while (0) +#define ice_clear_arfs(vsi) do {} while (0) +#define ice_remove_arfs(pf) do {} while (0) +#define ice_free_cpu_rx_rmap(vsi) do {} while (0) +#define ice_rebuild_arfs(pf) do {} while (0) + +static inline int ice_set_cpu_rx_rmap(struct ice_vsi __always_unused *vsi) +{ + return 0; +} + +static inline int +ice_rx_flow_steer(struct net_device __always_unused *netdev, + const struct sk_buff __always_unused *skb, + u16 __always_unused rxq_idx, u32 __always_unused flow_id) +{ + return -EOPNOTSUPP; +} + +static inline bool +ice_is_arfs_using_perfect_flow(struct ice_hw __always_unused *hw, + enum ice_fltr_ptype __always_unused flow_type) +{ + return false; +} +#endif /* CONFIG_RFS_ACCEL */ +#endif /* _ICE_ARFS_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index a19cd6f5436b..a174911d8994 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019, Intel Corporation. */ +#include <net/xdp_sock_drv.h> #include "ice_base.h" #include "ice_dcb_lib.h" @@ -12,7 +13,7 @@ */ static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg) { - int offset, i; + unsigned int offset, i; mutex_lock(qs_cfg->qs_mutex); offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size, @@ -24,7 +25,7 @@ static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg) bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count); for (i = 0; i < qs_cfg->q_count; i++) - qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset; + qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)(i + offset); mutex_unlock(qs_cfg->qs_mutex); return 0; @@ -38,7 +39,7 @@ static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg) */ static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg) { - int i, index = 0; + unsigned int i, index = 0; mutex_lock(qs_cfg->qs_mutex); for (i = 0; i < qs_cfg->q_count; i++) { @@ -47,7 +48,7 @@ static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg) if (index >= qs_cfg->pf_map_size) goto err_scatter; set_bit(index, qs_cfg->pf_map); - qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index; + qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)index; } mutex_unlock(qs_cfg->qs_mutex); @@ -96,7 +97,7 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) * We allocate one q_vector and set default value for ITR setting associated * with this q_vector. If allocation fails we return -ENOMEM. */ -static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) +static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) { struct ice_pf *pf = vsi->back; struct ice_q_vector *q_vector; @@ -246,6 +247,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) */ switch (vsi->type) { case ICE_VSI_LB: + case ICE_VSI_CTRL: case ICE_VSI_PF: tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; break; @@ -279,7 +281,9 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) */ int ice_setup_rx_ctx(struct ice_ring *ring) { + struct device *dev = ice_pf_to_dev(ring->vsi->back); int chain_len = ICE_MAX_CHAINED_RX_BUFS; + u16 num_bufs = ICE_DESC_UNUSED(ring); struct ice_vsi *vsi = ring->vsi; u32 rxdid = ICE_RXDID_FLEX_NIC; struct ice_rlan_ctx rlan_ctx; @@ -308,24 +312,23 @@ int ice_setup_rx_ctx(struct ice_ring *ring) if (ring->xsk_umem) { xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); - ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr - - XDP_PACKET_HEADROOM; + ring->rx_buf_len = + xsk_umem_get_rx_frame_size(ring->xsk_umem); /* For AF_XDP ZC, we disallow packets to span on * multiple buffers, thus letting us skip that * handling in the fast-path. */ chain_len = 1; - ring->zca.free = ice_zca_free; err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, - MEM_TYPE_ZERO_COPY, - &ring->zca); + MEM_TYPE_XSK_BUFF_POOL, + NULL); if (err) return err; + xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq); - dev_info(ice_pf_to_dev(vsi->back), "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n", + dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", ring->q_index); } else { - ring->zca.free = NULL; if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) /* coverity[check_return] */ xdp_rxq_info_reg(&ring->xdp_rxq, @@ -376,7 +379,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring) /* Max packet size for this queue - must not be set to a larger value * than 5 x DBUF */ - rlan_ctx.rxmax = min_t(u16, vsi->max_frame, + rlan_ctx.rxmax = min_t(u32, vsi->max_frame, chain_len * ring->rx_buf_len); /* Rx queue threshold in units of 64 */ @@ -407,7 +410,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring) /* Absolute queue number out of 2K needs to be passed */ err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); if (err) { - dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", + dev_err(dev, "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", pf_q, err); return -EIO; } @@ -425,13 +428,23 @@ int ice_setup_rx_ctx(struct ice_ring *ring) ring->tail = hw->hw_addr + QRX_TAIL(pf_q); writel(0, ring->tail); - err = ring->xsk_umem ? - ice_alloc_rx_bufs_slow_zc(ring, ICE_DESC_UNUSED(ring)) : - ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring)); - if (err) - dev_info(ice_pf_to_dev(vsi->back), "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n", - ring->xsk_umem ? "UMEM enabled " : "", - ring->q_index, pf_q); + if (ring->xsk_umem) { + if (!xsk_buff_can_alloc(ring->xsk_umem, num_bufs)) { + dev_warn(dev, "UMEM does not provide enough addresses to fill %d buffers on Rx ring %d\n", + num_bufs, ring->q_index); + dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n"); + + return 0; + } + + err = ice_alloc_rx_bufs_zc(ring, num_bufs); + if (err) + dev_info(dev, "Failed to allocate some buffers on UMEM enabled Rx ring %d (pf_q %d)\n", + ring->q_index, pf_q); + return 0; + } + + ice_alloc_rx_bufs(ring, num_bufs); return 0; } @@ -453,7 +466,7 @@ int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg) if (ret) { /* contig failed, so try with scatter approach */ qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER; - qs_cfg->q_count = min_t(u16, qs_cfg->q_count, + qs_cfg->q_count = min_t(unsigned int, qs_cfg->q_count, qs_cfg->scatter_count); ret = __ice_vsi_get_qs_sc(qs_cfg); } @@ -526,7 +539,8 @@ int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) { struct device *dev = ice_pf_to_dev(vsi->back); - int v_idx, err; + u16 v_idx; + int err; if (vsi->q_vectors[0]) { dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num); @@ -562,7 +576,7 @@ err_out: void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) { int q_vectors = vsi->num_q_vectors; - int tx_rings_rem, rx_rings_rem; + u16 tx_rings_rem, rx_rings_rem; int v_id; /* initially assigning remaining rings count to VSIs num queue value */ @@ -571,10 +585,12 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) for (v_id = 0; v_id < q_vectors; v_id++) { struct ice_q_vector *q_vector = vsi->q_vectors[v_id]; - int tx_rings_per_v, rx_rings_per_v, q_id, q_base; + u8 tx_rings_per_v, rx_rings_per_v; + u16 q_id, q_base; /* Tx rings mapping to vector */ - tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); + tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem, + q_vectors - v_id); q_vector->num_ring_tx = tx_rings_per_v; q_vector->tx.ring = NULL; q_vector->tx.itr_idx = ICE_TX_ITR; @@ -590,7 +606,8 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) tx_rings_rem -= tx_rings_per_v; /* Rx rings mapping to vector */ - rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); + rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem, + q_vectors - v_id); q_vector->num_ring_rx = rx_rings_per_v; q_vector->rx.ring = NULL; q_vector->rx.itr_idx = ICE_RX_ITR; @@ -633,6 +650,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, struct ice_aqc_add_txqs_perq *txq; struct ice_pf *pf = vsi->back; u8 buf_len = sizeof(*qg_buf); + struct ice_hw *hw = &pf->hw; enum ice_status status; u16 pf_q; u8 tc; @@ -641,13 +659,13 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); /* copy context contents into the qg_buf */ qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); - ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, + ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, ice_tlan_ctx_info); /* init queue specific tail reg. It is referred as * transmit comm scheduler queue doorbell. */ - ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); + ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q); if (IS_ENABLED(CONFIG_DCB)) tc = ring->dcb_tc; @@ -662,8 +680,8 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle, 1, qg_buf, buf_len, NULL); if (status) { - dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n", - status); + dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n", + ice_stat_str(status)); return -ENODEV; } @@ -832,8 +850,8 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, } else if (status == ICE_ERR_DOES_NOT_EXIST) { dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n"); } else if (status) { - dev_err(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n", - status); + dev_err(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %s\n", + ice_stat_str(status)); return -ENODEV; } diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 2c0d8fd3d5cd..8c73e161829d 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -316,12 +316,78 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, } /** + * ice_fill_tx_timer_and_fc_thresh + * @hw: pointer to the HW struct + * @cmd: pointer to MAC cfg structure + * + * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command + * descriptor + */ +static void +ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, + struct ice_aqc_set_mac_cfg *cmd) +{ + u16 fc_thres_val, tx_timer_val; + u32 val; + + /* We read back the transmit timer and FC threshold value of + * LFC. Thus, we will use index = + * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. + * + * Also, because we are operating on transmit timer and FC + * threshold of LFC, we don't turn on any bit in tx_tmr_priority + */ +#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX + + /* Retrieve the transmit timer */ + val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC)); + tx_timer_val = val & + PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M; + cmd->tx_tmr_value = cpu_to_le16(tx_timer_val); + + /* Retrieve the FC threshold */ + val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC)); + fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M; + + cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val); +} + +/** + * ice_aq_set_mac_cfg + * @hw: pointer to the HW struct + * @max_frame_size: Maximum Frame Size to be supported + * @cd: pointer to command details structure or NULL + * + * Set MAC configuration (0x0603) + */ +enum ice_status +ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) +{ + struct ice_aqc_set_mac_cfg *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.set_mac_cfg; + + if (max_frame_size == 0) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); + + cmd->max_frame_size = cpu_to_le16(max_frame_size); + + ice_fill_tx_timer_and_fc_thresh(hw, cmd); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** * ice_init_fltr_mgmt_struct - initializes filter management list and locks * @hw: pointer to the HW struct */ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) { struct ice_switch_info *sw; + enum ice_status status; hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*hw->switch_info), GFP_KERNEL); @@ -332,7 +398,12 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) INIT_LIST_HEAD(&sw->vsi_list_map_head); - return ice_init_def_sw_recp(hw); + status = ice_init_def_sw_recp(hw); + if (status) { + devm_kfree(ice_hw_to_dev(hw), hw->switch_info); + return status; + } + return 0; } /** @@ -653,6 +724,10 @@ enum ice_status ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_cqinit; + /* Set bit to enable Flow Director filters */ + wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); + INIT_LIST_HEAD(&hw->fdir_list_head); + ice_clear_pxe_mode(hw); status = ice_init_nvm(hw); @@ -743,9 +818,18 @@ enum ice_status ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_fltr_mgmt_struct; + /* enable jumbo frame support at MAC level */ + status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); + if (status) + goto err_unroll_fltr_mgmt_struct; + /* Obtain counter base index which would be used by flow director */ + status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); + if (status) + goto err_unroll_fltr_mgmt_struct; status = ice_init_hw_tbls(hw); if (status) goto err_unroll_fltr_mgmt_struct; + mutex_init(&hw->tnl_lock); return 0; err_unroll_fltr_mgmt_struct: @@ -769,12 +853,14 @@ err_unroll_cqinit: */ void ice_deinit_hw(struct ice_hw *hw) { + ice_free_fd_res_cntr(hw, hw->fd_ctr_base); ice_cleanup_fltr_mgmt_struct(hw); ice_sched_cleanup_all(hw); ice_sched_clear_agg(hw); ice_free_seg(hw); ice_free_hw_tbls(hw); + mutex_destroy(&hw->tnl_lock); if (hw->port_info) { devm_kfree(ice_hw_to_dev(hw), hw->port_info); @@ -1012,7 +1098,7 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, rlan_ctx->prefena = 1; - ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); + ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); } @@ -1678,6 +1764,33 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, "%s: msix_vector_first_id = %d\n", prefix, caps->msix_vector_first_id); break; + case ICE_AQC_CAPS_FD: + if (dev_p) { + dev_p->num_flow_director_fltr = number; + ice_debug(hw, ICE_DBG_INIT, + "%s: num_flow_director_fltr = %d\n", + prefix, + dev_p->num_flow_director_fltr); + } + if (func_p) { + u32 reg_val, val; + + reg_val = rd32(hw, GLQF_FD_SIZE); + val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >> + GLQF_FD_SIZE_FD_GSIZE_S; + func_p->fd_fltr_guar = + ice_get_num_per_func(hw, val); + val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >> + GLQF_FD_SIZE_FD_BSIZE_S; + func_p->fd_fltr_best_effort = val; + ice_debug(hw, ICE_DBG_INIT, + "%s: fd_fltr_guar = %d\n", + prefix, func_p->fd_fltr_guar); + ice_debug(hw, ICE_DBG_INIT, + "%s: fd_fltr_best_effort = %d\n", + prefix, func_p->fd_fltr_best_effort); + } + break; case ICE_AQC_CAPS_MAX_MTU: caps->max_mtu = number; ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", @@ -1887,10 +2000,7 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); cmd->flags = flags; - - /* Prep values for flags, sah, sal */ - cmd->sah = htons(*((const u16 *)mac_addr)); - cmd->sal = htonl(*((const u32 *)(mac_addr + 2))); + ether_addr_copy(cmd->mac_addr, mac_addr); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } @@ -3089,12 +3199,14 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) /** * ice_set_ctx - set context bits in packed structure + * @hw: pointer to the hardware structure * @src_ctx: pointer to a generic non-packed context structure * @dest_ctx: pointer to memory for the packed structure * @ce_info: a description of the structure to be transformed */ enum ice_status -ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) +ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, + const struct ice_ctx_ele *ce_info) { int f; @@ -3103,6 +3215,12 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) * using the correct size so that we are correct regardless * of the endianness of the machine. */ + if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { + ice_debug(hw, ICE_DBG_QCTX, + "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", + f, ce_info[f].width, ce_info[f].size_of); + continue; + } switch (ce_info[f].size_of) { case sizeof(u8): ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 8104f3d64d96..9b9e50d2398b 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -70,7 +70,8 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); extern const struct ice_ctx_ele ice_tlan_ctx_info[]; enum ice_status -ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info); +ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, + const struct ice_ctx_ele *ce_info); extern struct mutex ice_global_cfg_lock_sw; @@ -108,6 +109,8 @@ enum ice_status ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, struct ice_sq_cd *cd); enum ice_status +ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd); +enum ice_status ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, struct ice_link_status *link, struct ice_sq_cd *cd); enum ice_status diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index dd946866d7b8..479a74efc536 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -12,6 +12,7 @@ do { \ (qinfo)->sq.bal = prefix##_ATQBAL; \ (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \ (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \ + (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \ (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \ (qinfo)->rq.head = prefix##_ARQH; \ (qinfo)->rq.tail = prefix##_ARQT; \ @@ -20,6 +21,7 @@ do { \ (qinfo)->rq.bal = prefix##_ARQBAL; \ (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \ (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \ + (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \ (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \ } while (0) @@ -199,7 +201,9 @@ unwind_alloc_rq_bufs: cq->rq.r.rq_bi[i].pa = 0; cq->rq.r.rq_bi[i].size = 0; } + cq->rq.r.rq_bi = NULL; devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); + cq->rq.dma_head = NULL; return ICE_ERR_NO_MEMORY; } @@ -245,7 +249,9 @@ unwind_alloc_sq_bufs: cq->sq.r.sq_bi[i].pa = 0; cq->sq.r.sq_bi[i].size = 0; } + cq->sq.r.sq_bi = NULL; devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); + cq->sq.dma_head = NULL; return ICE_ERR_NO_MEMORY; } @@ -304,6 +310,28 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) return 0; } +#define ICE_FREE_CQ_BUFS(hw, qi, ring) \ +do { \ + int i; \ + /* free descriptors */ \ + if ((qi)->ring.r.ring##_bi) \ + for (i = 0; i < (qi)->num_##ring##_entries; i++) \ + if ((qi)->ring.r.ring##_bi[i].pa) { \ + dmam_free_coherent(ice_hw_to_dev(hw), \ + (qi)->ring.r.ring##_bi[i].size, \ + (qi)->ring.r.ring##_bi[i].va, \ + (qi)->ring.r.ring##_bi[i].pa); \ + (qi)->ring.r.ring##_bi[i].va = NULL;\ + (qi)->ring.r.ring##_bi[i].pa = 0;\ + (qi)->ring.r.ring##_bi[i].size = 0;\ + } \ + /* free the buffer info list */ \ + if ((qi)->ring.cmd_buf) \ + devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ + /* free DMA head */ \ + devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \ +} while (0) + /** * ice_init_sq - main initialization routine for Control ATQ * @hw: pointer to the hardware structure @@ -357,6 +385,7 @@ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) goto init_ctrlq_exit; init_ctrlq_free_rings: + ICE_FREE_CQ_BUFS(hw, cq, sq); ice_free_cq_ring(hw, &cq->sq); init_ctrlq_exit: @@ -416,33 +445,13 @@ static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) goto init_ctrlq_exit; init_ctrlq_free_rings: + ICE_FREE_CQ_BUFS(hw, cq, rq); ice_free_cq_ring(hw, &cq->rq); init_ctrlq_exit: return ret_code; } -#define ICE_FREE_CQ_BUFS(hw, qi, ring) \ -do { \ - int i; \ - /* free descriptors */ \ - for (i = 0; i < (qi)->num_##ring##_entries; i++) \ - if ((qi)->ring.r.ring##_bi[i].pa) { \ - dmam_free_coherent(ice_hw_to_dev(hw), \ - (qi)->ring.r.ring##_bi[i].size,\ - (qi)->ring.r.ring##_bi[i].va,\ - (qi)->ring.r.ring##_bi[i].pa);\ - (qi)->ring.r.ring##_bi[i].va = NULL; \ - (qi)->ring.r.ring##_bi[i].pa = 0; \ - (qi)->ring.r.ring##_bi[i].size = 0; \ - } \ - /* free the buffer info list */ \ - if ((qi)->ring.cmd_buf) \ - devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ - /* free DMA head */ \ - devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \ -} while (0) - /** * ice_shutdown_sq - shutdown the Control ATQ * @hw: pointer to the hardware structure @@ -635,6 +644,50 @@ init_ctrlq_free_sq: } /** + * ice_shutdown_ctrlq - shutdown routine for any control queue + * @hw: pointer to the hardware structure + * @q_type: specific Control queue type + * + * NOTE: this function does not destroy the control queue locks. + */ +static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) +{ + struct ice_ctl_q_info *cq; + + switch (q_type) { + case ICE_CTL_Q_ADMIN: + cq = &hw->adminq; + if (ice_check_sq_alive(hw, cq)) + ice_aq_q_shutdown(hw, true); + break; + case ICE_CTL_Q_MAILBOX: + cq = &hw->mailboxq; + break; + default: + return; + } + + ice_shutdown_sq(hw, cq); + ice_shutdown_rq(hw, cq); +} + +/** + * ice_shutdown_all_ctrlq - shutdown routine for all control queues + * @hw: pointer to the hardware structure + * + * NOTE: this function does not destroy the control queue locks. The driver + * may call this at runtime to shutdown and later restart control queues, such + * as in response to a reset event. + */ +void ice_shutdown_all_ctrlq(struct ice_hw *hw) +{ + /* Shutdown FW admin queue */ + ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); + /* Shutdown PF-VF Mailbox */ + ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); +} + +/** * ice_init_all_ctrlq - main initialization routine for all control queues * @hw: pointer to the hardware structure * @@ -649,17 +702,27 @@ init_ctrlq_free_sq: */ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) { - enum ice_status ret_code; + enum ice_status status; + u32 retry = 0; /* Init FW admin queue */ - ret_code = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN); - if (ret_code) - return ret_code; + do { + status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN); + if (status) + return status; - ret_code = ice_init_check_adminq(hw); - if (ret_code) - return ret_code; + status = ice_init_check_adminq(hw); + if (status != ICE_ERR_AQ_FW_CRITICAL) + break; + + ice_debug(hw, ICE_DBG_AQ_MSG, + "Retry Admin Queue init due to FW critical error\n"); + ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); + msleep(ICE_CTL_Q_ADMIN_INIT_MSEC); + } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT); + if (status) + return status; /* Init Mailbox queue */ return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX); } @@ -701,50 +764,6 @@ enum ice_status ice_create_all_ctrlq(struct ice_hw *hw) } /** - * ice_shutdown_ctrlq - shutdown routine for any control queue - * @hw: pointer to the hardware structure - * @q_type: specific Control queue type - * - * NOTE: this function does not destroy the control queue locks. - */ -static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) -{ - struct ice_ctl_q_info *cq; - - switch (q_type) { - case ICE_CTL_Q_ADMIN: - cq = &hw->adminq; - if (ice_check_sq_alive(hw, cq)) - ice_aq_q_shutdown(hw, true); - break; - case ICE_CTL_Q_MAILBOX: - cq = &hw->mailboxq; - break; - default: - return; - } - - ice_shutdown_sq(hw, cq); - ice_shutdown_rq(hw, cq); -} - -/** - * ice_shutdown_all_ctrlq - shutdown routine for all control queues - * @hw: pointer to the hardware structure - * - * NOTE: this function does not destroy the control queue locks. The driver - * may call this at runtime to shutdown and later restart control queues, such - * as in response to a reset event. - */ -void ice_shutdown_all_ctrlq(struct ice_hw *hw) -{ - /* Shutdown FW admin queue */ - ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); - /* Shutdown PF-VF Mailbox */ - ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); -} - -/** * ice_destroy_ctrlq_locks - Destroy locks for a control queue * @cq: pointer to the control queue * @@ -1042,9 +1061,15 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, /* update the error if time out occurred */ if (!cmd_completed) { - ice_debug(hw, ICE_DBG_AQ_MSG, - "Control Send Queue Writeback timeout.\n"); - status = ICE_ERR_AQ_TIMEOUT; + if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask || + rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) { + ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n"); + status = ICE_ERR_AQ_FW_CRITICAL; + } else { + ice_debug(hw, ICE_DBG_AQ_MSG, + "Control Send Queue Writeback timeout.\n"); + status = ICE_ERR_AQ_TIMEOUT; + } } sq_send_command_error: @@ -1128,7 +1153,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, } memcpy(&e->desc, desc, sizeof(e->desc)); datalen = le16_to_cpu(desc->datalen); - e->msg_len = min(datalen, e->buf_len); + e->msg_len = min_t(u16, datalen, e->buf_len); if (e->msg_buf && e->msg_len) memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len); diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h index bf0ebe6149e8..faaa08e8171b 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.h +++ b/drivers/net/ethernet/intel/ice/ice_controlq.h @@ -34,6 +34,8 @@ enum ice_ctl_q { /* Control Queue timeout settings - max delay 250ms */ #define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */ #define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */ +#define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */ +#define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */ struct ice_ctl_q_ring { void *dma_head; /* Virtual address to DMA head */ @@ -59,6 +61,7 @@ struct ice_ctl_q_ring { u32 bal; u32 len_mask; u32 len_ena_mask; + u32 len_crit_mask; u32 head_mask; }; diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 7bea09363b42..3c7f604c0c49 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -63,6 +63,64 @@ u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg) } /** + * ice_is_pfc_causing_hung_q + * @pf: pointer to PF structure + * @txqueue: Tx queue which is supposedly hung queue + * + * find if PFC is causing the hung queue, if yes return true else false + */ +bool ice_is_pfc_causing_hung_q(struct ice_pf *pf, unsigned int txqueue) +{ + u8 num_tcs = 0, i, tc, up_mapped_tc, up_in_tc = 0; + u64 ref_prio_xoff[ICE_MAX_UP]; + struct ice_vsi *vsi; + u32 up2tc; + + vsi = ice_get_main_vsi(pf); + if (!vsi) + return false; + + ice_for_each_traffic_class(i) + if (vsi->tc_cfg.ena_tc & BIT(i)) + num_tcs++; + + /* first find out the TC to which the hung queue belongs to */ + for (tc = 0; tc < num_tcs - 1; tc++) + if (ice_find_q_in_range(vsi->tc_cfg.tc_info[tc].qoffset, + vsi->tc_cfg.tc_info[tc + 1].qoffset, + txqueue)) + break; + + /* Build a bit map of all UPs associated to the suspect hung queue TC, + * so that we check for its counter increment. + */ + up2tc = rd32(&pf->hw, PRTDCB_TUP2TC); + for (i = 0; i < ICE_MAX_UP; i++) { + up_mapped_tc = (up2tc >> (i * 3)) & 0x7; + if (up_mapped_tc == tc) + up_in_tc |= BIT(i); + } + + /* Now that we figured out that hung queue is PFC enabled, still the + * Tx timeout can be legitimate. So to make sure Tx timeout is + * absolutely caused by PFC storm, check if the counters are + * incrementing. + */ + for (i = 0; i < ICE_MAX_UP; i++) + if (up_in_tc & BIT(i)) + ref_prio_xoff[i] = pf->stats.priority_xoff_rx[i]; + + ice_update_dcb_stats(pf); + + for (i = 0; i < ICE_MAX_UP; i++) + if (up_in_tc & BIT(i)) + if (pf->stats.priority_xoff_rx[i] > ref_prio_xoff[i]) + return true; + + return false; +} + +/** * ice_dcb_get_mode - gets the DCB mode * @port_info: pointer to port info structure * @host: if set it's HOST if not it's MANAGED @@ -526,16 +584,21 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked) */ static bool ice_dcb_tc_contig(u8 *prio_table) { - u8 max_tc = 0; + bool found_empty = false; + u8 used_tc = 0; int i; - for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) { - u8 cur_tc = prio_table[i]; + /* Create a bitmap of used TCs */ + for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) + used_tc |= BIT(prio_table[i]); - if (cur_tc > max_tc) - return false; - else if (cur_tc == max_tc) - max_tc++; + for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) { + if (used_tc & BIT(i)) { + if (found_empty) + return false; + } else { + found_empty = true; + } } return true; diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h index 37680e815b02..7c42324494d2 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h @@ -17,6 +17,8 @@ void ice_dcb_rebuild(struct ice_pf *pf); u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg); u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg); +void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi); +bool ice_is_pfc_causing_hung_q(struct ice_pf *pf, unsigned int txqueue); u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index); int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked); @@ -32,6 +34,20 @@ void ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event); void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); + +/** + * ice_find_q_in_range + * @low: start of queue range for a TC i.e. offset of TC + * @high: start of queue for next TC + * @tx_q: hung_queue/tx_queue + * + * finds if queue 'tx_q' falls between the two offsets of any given TC + */ +static inline bool ice_find_q_in_range(u16 low, u16 high, unsigned int tx_q) +{ + return (tx_q >= low) && (tx_q < high); +} + static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring) { @@ -79,6 +95,13 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring, return 0; } +static inline bool +ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf, + unsigned int __always_unused txqueue) +{ + return false; +} + #define ice_update_dcb_stats(pf) do {} while (0) #define ice_pf_dcb_recfg(pf) do {} while (0) #define ice_vsi_cfg_dcb_rings(vsi) do {} while (0) diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c index c4c12414083a..87f91b750d59 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c @@ -7,8 +7,6 @@ #include "ice_dcb_nl.h" #include <net/dcbnl.h> -#define ICE_APP_PROT_ID_ROCE 0x8915 - /** * ice_dcbnl_devreset - perform enough of a ifdown/ifup to sync DCBNL info * @netdev: device associated with interface that needs reset @@ -671,7 +669,7 @@ static bool ice_dcbnl_find_app(struct ice_dcbx_cfg *cfg, struct ice_dcb_app_priority_table *app) { - int i; + unsigned int i; for (i = 0; i < cfg->numapps; i++) { if (app->selector == cfg->app[i].selector && @@ -746,7 +744,8 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app) { struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_dcbx_cfg *old_cfg, *new_cfg; - int i, j, ret = 0; + unsigned int i, j; + int ret = 0; if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) return -EINVAL; @@ -869,7 +868,7 @@ void ice_dcbnl_set_all(struct ice_vsi *vsi) struct ice_port_info *pi; struct dcb_app sapp; struct ice_pf *pf; - int i; + unsigned int i; if (!netdev) return; @@ -941,7 +940,7 @@ ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg, struct ice_dcbx_cfg *new_cfg) { struct ice_vsi *main_vsi = ice_get_main_vsi(pf); - int i; + unsigned int i; if (!main_vsi) return; diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index c6833944b90a..a73d06e06b5d 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -105,6 +105,27 @@ static int ice_info_ddp_pkg_version(struct ice_pf *pf, char *buf, size_t len) return 0; } +static int ice_info_netlist_ver(struct ice_pf *pf, char *buf, size_t len) +{ + struct ice_netlist_ver_info *netlist = &pf->hw.netlist_ver; + + /* The netlist version fields are BCD formatted */ + snprintf(buf, len, "%x.%x.%x-%x.%x.%x", netlist->major, netlist->minor, + netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev, + netlist->cust_ver); + + return 0; +} + +static int ice_info_netlist_build(struct ice_pf *pf, char *buf, size_t len) +{ + struct ice_netlist_ver_info *netlist = &pf->hw.netlist_ver; + + snprintf(buf, len, "0x%08x", netlist->hash); + + return 0; +} + #define fixed(key, getter) { ICE_VERSION_FIXED, key, getter } #define running(key, getter) { ICE_VERSION_RUNNING, key, getter } @@ -128,6 +149,8 @@ static const struct ice_devlink_version { running(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack), running("fw.app.name", ice_info_ddp_pkg_name), running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version), + running("fw.netlist", ice_info_netlist_ver), + running("fw.netlist.build", ice_info_netlist_build), }; /** diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 593fb37bd59e..fd1849155d85 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -5,6 +5,7 @@ #include "ice.h" #include "ice_flow.h" +#include "ice_fltr.h" #include "ice_lib.h" #include "ice_dcb_lib.h" @@ -129,6 +130,8 @@ static const struct ice_stats ice_gstrings_pf_stats[] = { ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes), ICE_PF_STAT("mac_local_faults.nic", stats.mac_local_faults), ICE_PF_STAT("mac_remote_faults.nic", stats.mac_remote_faults), + ICE_PF_STAT("fdir_sb_match.nic", stats.fd_sb_match), + ICE_PF_STAT("fdir_sb_status.nic", stats.fd_sb_status), }; static const u32 ice_regs_dump_list[] = { @@ -139,9 +142,6 @@ static const u32 ice_regs_dump_list[] = { QINT_RQCTL(0), PFINT_OICR_ENA, QRX_ITR(0), - PF0INT_ITR_0(0), - PF0INT_ITR_1(0), - PF0INT_ITR_2(0), }; struct ice_priv_flag { @@ -157,6 +157,8 @@ struct ice_priv_flag { static const struct ice_priv_flag ice_gstrings_priv_flags[] = { ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA), ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT), + ICE_PRIV_FLAG("vf-true-promisc-support", + ICE_FLAG_VF_TRUE_PROMISC_ENA), ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF), ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX), }; @@ -203,7 +205,7 @@ ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) struct ice_pf *pf = np->vsi->back; struct ice_hw *hw = &pf->hw; u32 *regs_buf = (u32 *)p; - int i; + unsigned int i; regs->version = 1; @@ -273,8 +275,9 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) { - dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", - status, hw->adminq.sq_last_status); + dev_err(dev, "ice_acquire_nvm failed, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); ret = -EIO; goto out; } @@ -282,8 +285,9 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf, false); if (status) { - dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %d\n", - status, hw->adminq.sq_last_status); + dev_err(dev, "ice_read_flat_nvm failed, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); ret = -EIO; goto release; } @@ -304,7 +308,7 @@ out: */ static bool ice_active_vfs(struct ice_pf *pf) { - int i; + unsigned int i; ice_for_each_vf(pf, i) { struct ice_vf *vf = &pf->vf[i]; @@ -332,7 +336,8 @@ static u64 ice_link_test(struct net_device *netdev) netdev_info(netdev, "link test\n"); status = ice_get_link_status(np->vsi->port_info, &link_up); if (status) { - netdev_err(netdev, "link query error, status = %d\n", status); + netdev_err(netdev, "link query error, status = %s\n", + ice_stat_str(status)); return 1; } @@ -373,7 +378,7 @@ static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask) 0x00000000, 0xFFFFFFFF }; u32 val, orig_val; - int i; + unsigned int i; orig_val = rd32(hw, reg); for (i = 0; i < ARRAY_SIZE(patterns); ++i) { @@ -426,7 +431,7 @@ static u64 ice_reg_test(struct net_device *netdev) GLINT_ITR(2, 1) - GLINT_ITR(2, 0)}, {GLINT_CTL, 0xffff0001, 1, 0} }; - int i; + unsigned int i; netdev_dbg(netdev, "Register test\n"); for (i = 0; i < ARRAY_SIZE(ice_reg_list); ++i) { @@ -671,7 +676,6 @@ static u64 ice_loopback_test(struct net_device *netdev) struct ice_ring *tx_ring, *rx_ring; u8 broadcast[ETH_ALEN], ret = 0; int num_frames, valid_frames; - LIST_HEAD(tmp_list); struct device *dev; u8 *tx_frame; int i; @@ -707,16 +711,11 @@ static u64 ice_loopback_test(struct net_device *netdev) /* Test VSI needs to receive broadcast packets */ eth_broadcast_addr(broadcast); - if (ice_add_mac_to_list(test_vsi, &tmp_list, broadcast)) { + if (ice_fltr_add_mac(test_vsi, broadcast, ICE_FWD_TO_VSI)) { ret = 5; goto lbtest_mac_dis; } - if (ice_add_mac(&pf->hw, &tmp_list)) { - ret = 6; - goto free_mac_list; - } - if (ice_lbtest_create_frame(pf, &tx_frame, ICE_LB_FRAME_SIZE)) { ret = 7; goto remove_mac_filters; @@ -739,10 +738,8 @@ static u64 ice_loopback_test(struct net_device *netdev) lbtest_free_frame: devm_kfree(dev, tx_frame); remove_mac_filters: - if (ice_remove_mac(&pf->hw, &tmp_list)) + if (ice_fltr_remove_mac(test_vsi, broadcast, ICE_FWD_TO_VSI)) netdev_err(netdev, "Could not remove MAC filter for the test VSI\n"); -free_mac_list: - ice_free_fltr_list(dev, &tmp_list); lbtest_mac_dis: /* Disable MAC loopback after the test is completed. */ if (ice_aq_set_mac_loopback(&pf->hw, false, NULL)) @@ -1158,8 +1155,9 @@ static int ice_nway_reset(struct net_device *netdev) status = ice_aq_set_link_restart_an(pi, false, NULL); if (status) { - netdev_info(netdev, "link restart failed, err %d aq_err %d\n", - status, pi->hw->adminq.sq_last_status); + netdev_info(netdev, "link restart failed, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(pi->hw->adminq.sq_last_status)); return -EIO; } @@ -1308,6 +1306,16 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) ice_down(vsi); ice_up(vsi); } + /* don't allow modification of this flag when a single VF is in + * promiscuous mode because it's not supported + */ + if (test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, change_flags) && + ice_is_any_vf_in_promisc(pf)) { + dev_err(dev, "Changing vf-true-promisc-support flag while VF(s) are in promiscuous mode not supported\n"); + /* toggle bit back to previous state */ + change_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags); + ret = -EAGAIN; + } clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags); return ret; } @@ -2450,8 +2458,8 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs); if (status) { - dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n", - vsi->vsi_num, status); + dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %s\n", + vsi->vsi_num, ice_stat_str(status)); return -EINVAL; } @@ -2526,6 +2534,10 @@ static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) struct ice_vsi *vsi = np->vsi; switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + return ice_add_fdir_ethtool(vsi, cmd); + case ETHTOOL_SRXCLSRLDEL: + return ice_del_fdir_ethtool(vsi, cmd); case ETHTOOL_SRXFH: return ice_set_rss_hash_opt(vsi, cmd); default: @@ -2549,12 +2561,27 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; int ret = -EOPNOTSUPP; + struct ice_hw *hw; + + hw = &vsi->back->hw; switch (cmd->cmd) { case ETHTOOL_GRXRINGS: cmd->data = vsi->rss_size; ret = 0; break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = hw->fdir_active_fltr; + /* report total rule count */ + cmd->data = ice_get_fdir_cnt_all(hw); + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = ice_get_ethtool_fdir_entry(hw, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = ice_get_fdir_fltr_ids(hw, cmd, (u32 *)rule_locs); + break; case ETHTOOL_GRXFH: ice_get_rss_hash_opt(vsi, cmd); ret = 0; @@ -2593,7 +2620,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; int i, timeout = 50, err = 0; - u32 new_rx_cnt, new_tx_cnt; + u16 new_rx_cnt, new_tx_cnt; if (ring->tx_pending > ICE_MAX_NUM_DESC || ring->tx_pending < ICE_MIN_NUM_DESC || @@ -2645,8 +2672,8 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) if (ice_is_xdp_ena_vsi(vsi)) for (i = 0; i < vsi->num_xdp_txq; i++) vsi->xdp_rings[i]->count = new_tx_cnt; - vsi->num_tx_desc = new_tx_cnt; - vsi->num_rx_desc = new_rx_cnt; + vsi->num_tx_desc = (u16)new_tx_cnt; + vsi->num_rx_desc = (u16)new_rx_cnt; netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n"); goto done; } @@ -2952,31 +2979,22 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) status = ice_set_fc(pi, &aq_failures, link_up); if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) { - netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %d\n", - status, hw->adminq.sq_last_status); + netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); err = -EAGAIN; } else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) { - netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %d\n", - status, hw->adminq.sq_last_status); + netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); err = -EAGAIN; } else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) { - netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %d\n", - status, hw->adminq.sq_last_status); + netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); err = -EAGAIN; } - if (!test_bit(__ICE_DOWN, pf->state)) { - /* Give it a little more time to try to come back. If still - * down, restart autoneg link or reinitialize the interface. - */ - msleep(75); - if (!test_bit(__ICE_DOWN, pf->state)) - return ice_nway_reset(netdev); - - ice_down(vsi); - ice_up(vsi); - } - return err; } @@ -3184,6 +3202,10 @@ ice_get_channels(struct net_device *dev, struct ethtool_channels *ch) ch->combined_count = ice_get_combined_cnt(vsi); ch->rx_count = vsi->num_rxq - ch->combined_count; ch->tx_count = vsi->num_txq - ch->combined_count; + + /* report other queues */ + ch->other_count = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; + ch->max_other = ch->other_count; } /** @@ -3227,8 +3249,9 @@ static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size) status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, lut, vsi->rss_table_size); if (status) { - dev_err(dev, "Cannot set RSS lut, err %d aq_err %d\n", - status, hw->adminq.rq_last_status); + dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); err = -EIO; } @@ -3255,9 +3278,14 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) return -EOPNOTSUPP; } /* do not support changing other_count */ - if (ch->other_count) + if (ch->other_count != (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1U : 0U)) return -EINVAL; + if (test_bit(ICE_FLAG_FD_ENA, pf->flags) && pf->hw.fdir_active_fltr) { + netdev_err(dev, "Cannot set channels when Flow Director filters are active\n"); + return -EOPNOTSUPP; + } + curr_combined = ice_get_combined_cnt(vsi); /* these checks are for cases where user didn't specify a particular @@ -3731,10 +3759,10 @@ ice_get_module_eeprom(struct net_device *netdev, struct ice_hw *hw = &pf->hw; enum ice_status status; bool is_sfp = false; + unsigned int i; u16 offset = 0; u8 value = 0; u8 page = 0; - int i; status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, &value, 1, 0, NULL); diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c new file mode 100644 index 000000000000..42803fc0ed18 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -0,0 +1,1672 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2020, Intel Corporation. */ + +/* flow director ethtool support for ice */ + +#include "ice.h" +#include "ice_lib.h" +#include "ice_flow.h" + +static struct in6_addr full_ipv6_addr_mask = { + .in6_u = { + .u6_addr8 = { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } + } +}; + +static struct in6_addr zero_ipv6_addr_mask = { + .in6_u = { + .u6_addr8 = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + } + } +}; + +/* calls to ice_flow_add_prof require the number of segments in the array + * for segs_cnt. In this code that is one more than the index. + */ +#define TNL_SEG_CNT(_TNL_) ((_TNL_) + 1) + +/** + * ice_fltr_to_ethtool_flow - convert filter type values to ethtool + * flow type values + * @flow: filter type to be converted + * + * Returns the corresponding ethtool flow type. + */ +static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow) +{ + switch (flow) { + case ICE_FLTR_PTYPE_NONF_IPV4_TCP: + return TCP_V4_FLOW; + case ICE_FLTR_PTYPE_NONF_IPV4_UDP: + return UDP_V4_FLOW; + case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: + return SCTP_V4_FLOW; + case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: + return IPV4_USER_FLOW; + case ICE_FLTR_PTYPE_NONF_IPV6_TCP: + return TCP_V6_FLOW; + case ICE_FLTR_PTYPE_NONF_IPV6_UDP: + return UDP_V6_FLOW; + case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: + return SCTP_V6_FLOW; + case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: + return IPV6_USER_FLOW; + default: + /* 0 is undefined ethtool flow */ + return 0; + } +} + +/** + * ice_ethtool_flow_to_fltr - convert ethtool flow type to filter enum + * @eth: Ethtool flow type to be converted + * + * Returns flow enum + */ +static enum ice_fltr_ptype ice_ethtool_flow_to_fltr(int eth) +{ + switch (eth) { + case TCP_V4_FLOW: + return ICE_FLTR_PTYPE_NONF_IPV4_TCP; + case UDP_V4_FLOW: + return ICE_FLTR_PTYPE_NONF_IPV4_UDP; + case SCTP_V4_FLOW: + return ICE_FLTR_PTYPE_NONF_IPV4_SCTP; + case IPV4_USER_FLOW: + return ICE_FLTR_PTYPE_NONF_IPV4_OTHER; + case TCP_V6_FLOW: + return ICE_FLTR_PTYPE_NONF_IPV6_TCP; + case UDP_V6_FLOW: + return ICE_FLTR_PTYPE_NONF_IPV6_UDP; + case SCTP_V6_FLOW: + return ICE_FLTR_PTYPE_NONF_IPV6_SCTP; + case IPV6_USER_FLOW: + return ICE_FLTR_PTYPE_NONF_IPV6_OTHER; + default: + return ICE_FLTR_PTYPE_NONF_NONE; + } +} + +/** + * ice_is_mask_valid - check mask field set + * @mask: full mask to check + * @field: field for which mask should be valid + * + * If the mask is fully set return true. If it is not valid for field return + * false. + */ +static bool ice_is_mask_valid(u64 mask, u64 field) +{ + return (mask & field) == field; +} + +/** + * ice_get_ethtool_fdir_entry - fill ethtool structure with fdir filter data + * @hw: hardware structure that contains filter list + * @cmd: ethtool command data structure to receive the filter data + * + * Returns 0 on success and -EINVAL on failure + */ +int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp; + struct ice_fdir_fltr *rule; + int ret = 0; + u16 idx; + + fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + + mutex_lock(&hw->fdir_fltr_lock); + + rule = ice_fdir_find_fltr_by_idx(hw, fsp->location); + + if (!rule || fsp->location != rule->fltr_id) { + ret = -EINVAL; + goto release_lock; + } + + fsp->flow_type = ice_fltr_to_ethtool_flow(rule->flow_type); + + memset(&fsp->m_u, 0, sizeof(fsp->m_u)); + memset(&fsp->m_ext, 0, sizeof(fsp->m_ext)); + + switch (fsp->flow_type) { + case IPV4_USER_FLOW: + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + fsp->h_u.usr_ip4_spec.proto = 0; + fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip.v4.l4_header; + fsp->h_u.usr_ip4_spec.tos = rule->ip.v4.tos; + fsp->h_u.usr_ip4_spec.ip4src = rule->ip.v4.src_ip; + fsp->h_u.usr_ip4_spec.ip4dst = rule->ip.v4.dst_ip; + fsp->m_u.usr_ip4_spec.ip4src = rule->mask.v4.src_ip; + fsp->m_u.usr_ip4_spec.ip4dst = rule->mask.v4.dst_ip; + fsp->m_u.usr_ip4_spec.ip_ver = 0xFF; + fsp->m_u.usr_ip4_spec.proto = 0; + fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->mask.v4.l4_header; + fsp->m_u.usr_ip4_spec.tos = rule->mask.v4.tos; + break; + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + fsp->h_u.tcp_ip4_spec.psrc = rule->ip.v4.src_port; + fsp->h_u.tcp_ip4_spec.pdst = rule->ip.v4.dst_port; + fsp->h_u.tcp_ip4_spec.ip4src = rule->ip.v4.src_ip; + fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip.v4.dst_ip; + fsp->m_u.tcp_ip4_spec.psrc = rule->mask.v4.src_port; + fsp->m_u.tcp_ip4_spec.pdst = rule->mask.v4.dst_port; + fsp->m_u.tcp_ip4_spec.ip4src = rule->mask.v4.src_ip; + fsp->m_u.tcp_ip4_spec.ip4dst = rule->mask.v4.dst_ip; + break; + case IPV6_USER_FLOW: + fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip.v6.l4_header; + fsp->h_u.usr_ip6_spec.tclass = rule->ip.v6.tc; + fsp->h_u.usr_ip6_spec.l4_proto = rule->ip.v6.proto; + memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip, + sizeof(struct in6_addr)); + memcpy(fsp->m_u.tcp_ip6_spec.ip6src, rule->mask.v6.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, rule->mask.v6.dst_ip, + sizeof(struct in6_addr)); + fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->mask.v6.l4_header; + fsp->m_u.usr_ip6_spec.tclass = rule->mask.v6.tc; + fsp->m_u.usr_ip6_spec.l4_proto = rule->mask.v6.proto; + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip, + sizeof(struct in6_addr)); + fsp->h_u.tcp_ip6_spec.psrc = rule->ip.v6.src_port; + fsp->h_u.tcp_ip6_spec.pdst = rule->ip.v6.dst_port; + memcpy(fsp->m_u.tcp_ip6_spec.ip6src, + rule->mask.v6.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, + rule->mask.v6.dst_ip, + sizeof(struct in6_addr)); + fsp->m_u.tcp_ip6_spec.psrc = rule->mask.v6.src_port; + fsp->m_u.tcp_ip6_spec.pdst = rule->mask.v6.dst_port; + fsp->h_u.tcp_ip6_spec.tclass = rule->ip.v6.tc; + fsp->m_u.tcp_ip6_spec.tclass = rule->mask.v6.tc; + break; + default: + break; + } + + if (rule->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) + fsp->ring_cookie = RX_CLS_FLOW_DISC; + else + fsp->ring_cookie = rule->q_index; + + idx = ice_ethtool_flow_to_fltr(fsp->flow_type); + if (idx == ICE_FLTR_PTYPE_NONF_NONE) { + dev_err(ice_hw_to_dev(hw), "Missing input index for flow_type %d\n", + rule->flow_type); + ret = -EINVAL; + } + +release_lock: + mutex_unlock(&hw->fdir_fltr_lock); + return ret; +} + +/** + * ice_get_fdir_fltr_ids - fill buffer with filter IDs of active filters + * @hw: hardware structure containing the filter list + * @cmd: ethtool command data structure + * @rule_locs: ethtool array passed in from OS to receive filter IDs + * + * Returns 0 as expected for success by ethtool + */ +int +ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct ice_fdir_fltr *f_rule; + unsigned int cnt = 0; + int val = 0; + + /* report total rule count */ + cmd->data = ice_get_fdir_cnt_all(hw); + + mutex_lock(&hw->fdir_fltr_lock); + + list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) { + if (cnt == cmd->rule_cnt) { + val = -EMSGSIZE; + goto release_lock; + } + rule_locs[cnt] = f_rule->fltr_id; + cnt++; + } + +release_lock: + mutex_unlock(&hw->fdir_fltr_lock); + if (!val) + cmd->rule_cnt = cnt; + return val; +} + +/** + * ice_fdir_get_hw_prof - return the ice_fd_hw_proc associated with a flow + * @hw: hardware structure containing the filter list + * @blk: hardware block + * @flow: FDir flow type to release + */ +static struct ice_fd_hw_prof * +ice_fdir_get_hw_prof(struct ice_hw *hw, enum ice_block blk, int flow) +{ + if (blk == ICE_BLK_FD && hw->fdir_prof) + return hw->fdir_prof[flow]; + + return NULL; +} + +/** + * ice_fdir_erase_flow_from_hw - remove a flow from the HW profile tables + * @hw: hardware structure containing the filter list + * @blk: hardware block + * @flow: FDir flow type to release + */ +static void +ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow) +{ + struct ice_fd_hw_prof *prof = ice_fdir_get_hw_prof(hw, blk, flow); + int tun; + + if (!prof) + return; + + for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { + u64 prof_id; + int j; + + prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; + for (j = 0; j < prof->cnt; j++) { + u16 vsi_num; + + if (!prof->entry_h[j][tun] || !prof->vsi_h[j]) + continue; + vsi_num = ice_get_hw_vsi_num(hw, prof->vsi_h[j]); + ice_rem_prof_id_flow(hw, blk, vsi_num, prof_id); + ice_flow_rem_entry(hw, blk, prof->entry_h[j][tun]); + prof->entry_h[j][tun] = 0; + } + ice_flow_rem_prof(hw, blk, prof_id); + } +} + +/** + * ice_fdir_rem_flow - release the ice_flow structures for a filter type + * @hw: hardware structure containing the filter list + * @blk: hardware block + * @flow_type: FDir flow type to release + */ +static void +ice_fdir_rem_flow(struct ice_hw *hw, enum ice_block blk, + enum ice_fltr_ptype flow_type) +{ + int flow = (int)flow_type & ~FLOW_EXT; + struct ice_fd_hw_prof *prof; + int tun, i; + + prof = ice_fdir_get_hw_prof(hw, blk, flow); + if (!prof) + return; + + ice_fdir_erase_flow_from_hw(hw, blk, flow); + for (i = 0; i < prof->cnt; i++) + prof->vsi_h[i] = 0; + for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { + if (!prof->fdir_seg[tun]) + continue; + devm_kfree(ice_hw_to_dev(hw), prof->fdir_seg[tun]); + prof->fdir_seg[tun] = NULL; + } + prof->cnt = 0; +} + +/** + * ice_fdir_release_flows - release all flows in use for later replay + * @hw: pointer to HW instance + */ +void ice_fdir_release_flows(struct ice_hw *hw) +{ + int flow; + + /* release Flow Director HW table entries */ + for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) + ice_fdir_erase_flow_from_hw(hw, ICE_BLK_FD, flow); +} + +/** + * ice_fdir_replay_flows - replay HW Flow Director filter info + * @hw: pointer to HW instance + */ +void ice_fdir_replay_flows(struct ice_hw *hw) +{ + int flow; + + for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) { + int tun; + + if (!hw->fdir_prof[flow] || !hw->fdir_prof[flow]->cnt) + continue; + for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { + struct ice_flow_prof *hw_prof; + struct ice_fd_hw_prof *prof; + u64 prof_id; + int j; + + prof = hw->fdir_prof[flow]; + prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; + ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, + prof->fdir_seg[tun], TNL_SEG_CNT(tun), + &hw_prof); + for (j = 0; j < prof->cnt; j++) { + enum ice_flow_priority prio; + u64 entry_h = 0; + int err; + + prio = ICE_FLOW_PRIO_NORMAL; + err = ice_flow_add_entry(hw, ICE_BLK_FD, + prof_id, + prof->vsi_h[0], + prof->vsi_h[j], + prio, prof->fdir_seg, + &entry_h); + if (err) { + dev_err(ice_hw_to_dev(hw), "Could not replay Flow Director, flow type %d\n", + flow); + continue; + } + prof->entry_h[j][tun] = entry_h; + } + } + } +} + +/** + * ice_parse_rx_flow_user_data - deconstruct user-defined data + * @fsp: pointer to ethtool Rx flow specification + * @data: pointer to userdef data structure for storage + * + * Returns 0 on success, negative error value on failure + */ +static int +ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, + struct ice_rx_flow_userdef *data) +{ + u64 value, mask; + + memset(data, 0, sizeof(*data)); + if (!(fsp->flow_type & FLOW_EXT)) + return 0; + + value = be64_to_cpu(*((__force __be64 *)fsp->h_ext.data)); + mask = be64_to_cpu(*((__force __be64 *)fsp->m_ext.data)); + if (!mask) + return 0; + +#define ICE_USERDEF_FLEX_WORD_M GENMASK_ULL(15, 0) +#define ICE_USERDEF_FLEX_OFFS_S 16 +#define ICE_USERDEF_FLEX_OFFS_M GENMASK_ULL(31, ICE_USERDEF_FLEX_OFFS_S) +#define ICE_USERDEF_FLEX_FLTR_M GENMASK_ULL(31, 0) + + /* 0x1fe is the maximum value for offsets stored in the internal + * filtering tables. + */ +#define ICE_USERDEF_FLEX_MAX_OFFS_VAL 0x1fe + + if (!ice_is_mask_valid(mask, ICE_USERDEF_FLEX_FLTR_M) || + value > ICE_USERDEF_FLEX_FLTR_M) + return -EINVAL; + + data->flex_word = value & ICE_USERDEF_FLEX_WORD_M; + data->flex_offset = (value & ICE_USERDEF_FLEX_OFFS_M) >> + ICE_USERDEF_FLEX_OFFS_S; + if (data->flex_offset > ICE_USERDEF_FLEX_MAX_OFFS_VAL) + return -EINVAL; + + data->flex_fltr = true; + + return 0; +} + +/** + * ice_fdir_num_avail_fltr - return the number of unused flow director filters + * @hw: pointer to hardware structure + * @vsi: software VSI structure + * + * There are 2 filter pools: guaranteed and best effort(shared). Each VSI can + * use filters from either pool. The guaranteed pool is divided between VSIs. + * The best effort filter pool is common to all VSIs and is a device shared + * resource pool. The number of filters available to this VSI is the sum of + * the VSIs guaranteed filter pool and the global available best effort + * filter pool. + * + * Returns the number of available flow director filters to this VSI + */ +static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi) +{ + u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); + u16 num_guar; + u16 num_be; + + /* total guaranteed filters assigned to this VSI */ + num_guar = vsi->num_gfltr; + + /* minus the guaranteed filters programed by this VSI */ + num_guar -= (rd32(hw, VSIQF_FD_CNT(vsi_num)) & + VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S; + + /* total global best effort filters */ + num_be = hw->func_caps.fd_fltr_best_effort; + + /* minus the global best effort filters programmed */ + num_be -= (rd32(hw, GLQF_FD_CNT) & GLQF_FD_CNT_FD_BCNT_M) >> + GLQF_FD_CNT_FD_BCNT_S; + + return num_guar + num_be; +} + +/** + * ice_fdir_alloc_flow_prof - allocate FDir flow profile structure(s) + * @hw: HW structure containing the FDir flow profile structure(s) + * @flow: flow type to allocate the flow profile for + * + * Allocate the fdir_prof and fdir_prof[flow] if not already created. Return 0 + * on success and negative on error. + */ +static int +ice_fdir_alloc_flow_prof(struct ice_hw *hw, enum ice_fltr_ptype flow) +{ + if (!hw) + return -EINVAL; + + if (!hw->fdir_prof) { + hw->fdir_prof = devm_kcalloc(ice_hw_to_dev(hw), + ICE_FLTR_PTYPE_MAX, + sizeof(*hw->fdir_prof), + GFP_KERNEL); + if (!hw->fdir_prof) + return -ENOMEM; + } + + if (!hw->fdir_prof[flow]) { + hw->fdir_prof[flow] = devm_kzalloc(ice_hw_to_dev(hw), + sizeof(**hw->fdir_prof), + GFP_KERNEL); + if (!hw->fdir_prof[flow]) + return -ENOMEM; + } + + return 0; +} + +/** + * ice_fdir_set_hw_fltr_rule - Configure HW tables to generate a FDir rule + * @pf: pointer to the PF structure + * @seg: protocol header description pointer + * @flow: filter enum + * @tun: FDir segment to program + */ +static int +ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, + enum ice_fltr_ptype flow, enum ice_fd_hw_seg tun) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_vsi *main_vsi, *ctrl_vsi; + struct ice_flow_seg_info *old_seg; + struct ice_flow_prof *prof = NULL; + struct ice_fd_hw_prof *hw_prof; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + u64 entry1_h = 0; + u64 entry2_h = 0; + u64 prof_id; + int err; + + main_vsi = ice_get_main_vsi(pf); + if (!main_vsi) + return -EINVAL; + + ctrl_vsi = ice_get_ctrl_vsi(pf); + if (!ctrl_vsi) + return -EINVAL; + + err = ice_fdir_alloc_flow_prof(hw, flow); + if (err) + return err; + + hw_prof = hw->fdir_prof[flow]; + old_seg = hw_prof->fdir_seg[tun]; + if (old_seg) { + /* This flow_type already has a changed input set. + * If it matches the requested input set then we are + * done. Or, if it's different then it's an error. + */ + if (!memcmp(old_seg, seg, sizeof(*seg))) + return -EEXIST; + + /* if there are FDir filters using this flow, + * then return error. + */ + if (hw->fdir_fltr_cnt[flow]) { + dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n"); + return -EINVAL; + } + + if (ice_is_arfs_using_perfect_flow(hw, flow)) { + dev_err(dev, "aRFS using perfect flow type %d, cannot change input set\n", + flow); + return -EINVAL; + } + + /* remove HW filter definition */ + ice_fdir_rem_flow(hw, ICE_BLK_FD, flow); + } + + /* Adding a profile, but there is only one header supported. + * That is the final parameters are 1 header (segment), no + * actions (NULL) and zero actions 0. + */ + prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; + status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, + TNL_SEG_CNT(tun), &prof); + if (status) + return ice_status_to_errno(status); + status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, + main_vsi->idx, ICE_FLOW_PRIO_NORMAL, + seg, &entry1_h); + if (status) { + err = ice_status_to_errno(status); + goto err_prof; + } + status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, + ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, + seg, &entry2_h); + if (status) { + err = ice_status_to_errno(status); + goto err_entry; + } + + hw_prof->fdir_seg[tun] = seg; + hw_prof->entry_h[0][tun] = entry1_h; + hw_prof->entry_h[1][tun] = entry2_h; + hw_prof->vsi_h[0] = main_vsi->idx; + hw_prof->vsi_h[1] = ctrl_vsi->idx; + if (!hw_prof->cnt) + hw_prof->cnt = 2; + + return 0; + +err_entry: + ice_rem_prof_id_flow(hw, ICE_BLK_FD, + ice_get_hw_vsi_num(hw, main_vsi->idx), prof_id); + ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h); +err_prof: + ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); + dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n"); + + return err; +} + +/** + * ice_set_init_fdir_seg + * @seg: flow segment for programming + * @l3_proto: ICE_FLOW_SEG_HDR_IPV4 or ICE_FLOW_SEG_HDR_IPV6 + * @l4_proto: ICE_FLOW_SEG_HDR_TCP or ICE_FLOW_SEG_HDR_UDP + * + * Set the configuration for perfect filters to the provided flow segment for + * programming the HW filter. This is to be called only when initializing + * filters as this function it assumes no filters exist. + */ +static int +ice_set_init_fdir_seg(struct ice_flow_seg_info *seg, + enum ice_flow_seg_hdr l3_proto, + enum ice_flow_seg_hdr l4_proto) +{ + enum ice_flow_field src_addr, dst_addr, src_port, dst_port; + + if (!seg) + return -EINVAL; + + if (l3_proto == ICE_FLOW_SEG_HDR_IPV4) { + src_addr = ICE_FLOW_FIELD_IDX_IPV4_SA; + dst_addr = ICE_FLOW_FIELD_IDX_IPV4_DA; + } else if (l3_proto == ICE_FLOW_SEG_HDR_IPV6) { + src_addr = ICE_FLOW_FIELD_IDX_IPV6_SA; + dst_addr = ICE_FLOW_FIELD_IDX_IPV6_DA; + } else { + return -EINVAL; + } + + if (l4_proto == ICE_FLOW_SEG_HDR_TCP) { + src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT; + dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT; + } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) { + src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT; + dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT; + } else { + return -EINVAL; + } + + ICE_FLOW_SET_HDRS(seg, l3_proto | l4_proto); + + /* IP source address */ + ice_flow_set_fld(seg, src_addr, ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); + + /* IP destination address */ + ice_flow_set_fld(seg, dst_addr, ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); + + /* Layer 4 source port */ + ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); + + /* Layer 4 destination port */ + ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); + + return 0; +} + +/** + * ice_create_init_fdir_rule + * @pf: PF structure + * @flow: filter enum + * + * Return error value or 0 on success. + */ +static int +ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow) +{ + struct ice_flow_seg_info *seg, *tun_seg; + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + int ret; + + /* if there is already a filter rule for kind return -EINVAL */ + if (hw->fdir_prof && hw->fdir_prof[flow] && + hw->fdir_prof[flow]->fdir_seg[0]) + return -EINVAL; + + seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); + if (!seg) + return -ENOMEM; + + tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX, + GFP_KERNEL); + if (!tun_seg) { + devm_kfree(dev, seg); + return -ENOMEM; + } + + if (flow == ICE_FLTR_PTYPE_NONF_IPV4_TCP) + ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4, + ICE_FLOW_SEG_HDR_TCP); + else if (flow == ICE_FLTR_PTYPE_NONF_IPV4_UDP) + ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4, + ICE_FLOW_SEG_HDR_UDP); + else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_TCP) + ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6, + ICE_FLOW_SEG_HDR_TCP); + else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_UDP) + ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6, + ICE_FLOW_SEG_HDR_UDP); + else + ret = -EINVAL; + if (ret) + goto err_exit; + + /* add filter for outer headers */ + ret = ice_fdir_set_hw_fltr_rule(pf, seg, flow, ICE_FD_HW_SEG_NON_TUN); + if (ret) + /* could not write filter, free memory */ + goto err_exit; + + /* make tunneled filter HW entries if possible */ + memcpy(&tun_seg[1], seg, sizeof(*seg)); + ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, flow, ICE_FD_HW_SEG_TUN); + if (ret) + /* could not write tunnel filter, but outer header filter + * exists + */ + devm_kfree(dev, tun_seg); + + set_bit(flow, hw->fdir_perfect_fltr); + return ret; +err_exit: + devm_kfree(dev, tun_seg); + devm_kfree(dev, seg); + + return -EOPNOTSUPP; +} + +/** + * ice_set_fdir_ip4_seg + * @seg: flow segment for programming + * @tcp_ip4_spec: mask data from ethtool + * @l4_proto: Layer 4 protocol to program + * @perfect_fltr: only valid on success; returns true if perfect filter, + * false if not + * + * Set the mask data into the flow segment to be used to program HW + * table based on provided L4 protocol for IPv4 + */ +static int +ice_set_fdir_ip4_seg(struct ice_flow_seg_info *seg, + struct ethtool_tcpip4_spec *tcp_ip4_spec, + enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr) +{ + enum ice_flow_field src_port, dst_port; + + /* make sure we don't have any empty rule */ + if (!tcp_ip4_spec->psrc && !tcp_ip4_spec->ip4src && + !tcp_ip4_spec->pdst && !tcp_ip4_spec->ip4dst) + return -EINVAL; + + /* filtering on TOS not supported */ + if (tcp_ip4_spec->tos) + return -EOPNOTSUPP; + + if (l4_proto == ICE_FLOW_SEG_HDR_TCP) { + src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT; + dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT; + } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) { + src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT; + dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT; + } else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) { + src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT; + dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT; + } else { + return -EOPNOTSUPP; + } + + *perfect_fltr = true; + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | l4_proto); + + /* IP source address */ + if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF)) + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA, + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + else if (!tcp_ip4_spec->ip4src) + *perfect_fltr = false; + else + return -EOPNOTSUPP; + + /* IP destination address */ + if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF)) + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA, + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + else if (!tcp_ip4_spec->ip4dst) + *perfect_fltr = false; + else + return -EOPNOTSUPP; + + /* Layer 4 source port */ + if (tcp_ip4_spec->psrc == htons(0xFFFF)) + ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, + false); + else if (!tcp_ip4_spec->psrc) + *perfect_fltr = false; + else + return -EOPNOTSUPP; + + /* Layer 4 destination port */ + if (tcp_ip4_spec->pdst == htons(0xFFFF)) + ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, + false); + else if (!tcp_ip4_spec->pdst) + *perfect_fltr = false; + else + return -EOPNOTSUPP; + + return 0; +} + +/** + * ice_set_fdir_ip4_usr_seg + * @seg: flow segment for programming + * @usr_ip4_spec: ethtool userdef packet offset + * @perfect_fltr: only valid on success; returns true if perfect filter, + * false if not + * + * Set the offset data into the flow segment to be used to program HW + * table for IPv4 + */ +static int +ice_set_fdir_ip4_usr_seg(struct ice_flow_seg_info *seg, + struct ethtool_usrip4_spec *usr_ip4_spec, + bool *perfect_fltr) +{ + /* first 4 bytes of Layer 4 header */ + if (usr_ip4_spec->l4_4_bytes) + return -EINVAL; + if (usr_ip4_spec->tos) + return -EINVAL; + if (usr_ip4_spec->ip_ver) + return -EINVAL; + /* Filtering on Layer 4 protocol not supported */ + if (usr_ip4_spec->proto) + return -EOPNOTSUPP; + /* empty rules are not valid */ + if (!usr_ip4_spec->ip4src && !usr_ip4_spec->ip4dst) + return -EINVAL; + + *perfect_fltr = true; + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4); + + /* IP source address */ + if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF)) + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA, + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + else if (!usr_ip4_spec->ip4src) + *perfect_fltr = false; + else + return -EOPNOTSUPP; + + /* IP destination address */ + if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF)) + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA, + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + else if (!usr_ip4_spec->ip4dst) + *perfect_fltr = false; + else + return -EOPNOTSUPP; + + return 0; +} + +/** + * ice_set_fdir_ip6_seg + * @seg: flow segment for programming + * @tcp_ip6_spec: mask data from ethtool + * @l4_proto: Layer 4 protocol to program + * @perfect_fltr: only valid on success; returns true if perfect filter, + * false if not + * + * Set the mask data into the flow segment to be used to program HW + * table based on provided L4 protocol for IPv6 + */ +static int +ice_set_fdir_ip6_seg(struct ice_flow_seg_info *seg, + struct ethtool_tcpip6_spec *tcp_ip6_spec, + enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr) +{ + enum ice_flow_field src_port, dst_port; + + /* make sure we don't have any empty rule */ + if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask, + sizeof(struct in6_addr)) && + !memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask, + sizeof(struct in6_addr)) && + !tcp_ip6_spec->psrc && !tcp_ip6_spec->pdst) + return -EINVAL; + + /* filtering on TC not supported */ + if (tcp_ip6_spec->tclass) + return -EOPNOTSUPP; + + if (l4_proto == ICE_FLOW_SEG_HDR_TCP) { + src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT; + dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT; + } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) { + src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT; + dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT; + } else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) { + src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT; + dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT; + } else { + return -EINVAL; + } + + *perfect_fltr = true; + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | l4_proto); + + if (!memcmp(tcp_ip6_spec->ip6src, &full_ipv6_addr_mask, + sizeof(struct in6_addr))) + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA, + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + else if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask, + sizeof(struct in6_addr))) + *perfect_fltr = false; + else + return -EOPNOTSUPP; + + if (!memcmp(tcp_ip6_spec->ip6dst, &full_ipv6_addr_mask, + sizeof(struct in6_addr))) + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA, + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + else if (!memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask, + sizeof(struct in6_addr))) + *perfect_fltr = false; + else + return -EOPNOTSUPP; + + /* Layer 4 source port */ + if (tcp_ip6_spec->psrc == htons(0xFFFF)) + ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, + false); + else if (!tcp_ip6_spec->psrc) + *perfect_fltr = false; + else + return -EOPNOTSUPP; + + /* Layer 4 destination port */ + if (tcp_ip6_spec->pdst == htons(0xFFFF)) + ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, + false); + else if (!tcp_ip6_spec->pdst) + *perfect_fltr = false; + else + return -EOPNOTSUPP; + + return 0; +} + +/** + * ice_set_fdir_ip6_usr_seg + * @seg: flow segment for programming + * @usr_ip6_spec: ethtool userdef packet offset + * @perfect_fltr: only valid on success; returns true if perfect filter, + * false if not + * + * Set the offset data into the flow segment to be used to program HW + * table for IPv6 + */ +static int +ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info *seg, + struct ethtool_usrip6_spec *usr_ip6_spec, + bool *perfect_fltr) +{ + /* filtering on Layer 4 bytes not supported */ + if (usr_ip6_spec->l4_4_bytes) + return -EOPNOTSUPP; + /* filtering on TC not supported */ + if (usr_ip6_spec->tclass) + return -EOPNOTSUPP; + /* filtering on Layer 4 protocol not supported */ + if (usr_ip6_spec->l4_proto) + return -EOPNOTSUPP; + /* empty rules are not valid */ + if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask, + sizeof(struct in6_addr)) && + !memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask, + sizeof(struct in6_addr))) + return -EINVAL; + + *perfect_fltr = true; + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6); + + if (!memcmp(usr_ip6_spec->ip6src, &full_ipv6_addr_mask, + sizeof(struct in6_addr))) + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA, + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + else if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask, + sizeof(struct in6_addr))) + *perfect_fltr = false; + else + return -EOPNOTSUPP; + + if (!memcmp(usr_ip6_spec->ip6dst, &full_ipv6_addr_mask, + sizeof(struct in6_addr))) + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA, + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + else if (!memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask, + sizeof(struct in6_addr))) + *perfect_fltr = false; + else + return -EOPNOTSUPP; + + return 0; +} + +/** + * ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter + * @pf: PF structure + * @fsp: pointer to ethtool Rx flow specification + * @user: user defined data from flow specification + * + * Returns 0 on success. + */ +static int +ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp, + struct ice_rx_flow_userdef *user) +{ + struct ice_flow_seg_info *seg, *tun_seg; + struct device *dev = ice_pf_to_dev(pf); + enum ice_fltr_ptype fltr_idx; + struct ice_hw *hw = &pf->hw; + bool perfect_filter; + int ret; + + seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); + if (!seg) + return -ENOMEM; + + tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX, + GFP_KERNEL); + if (!tun_seg) { + devm_kfree(dev, seg); + return -ENOMEM; + } + + switch (fsp->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec, + ICE_FLOW_SEG_HDR_TCP, + &perfect_filter); + break; + case UDP_V4_FLOW: + ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec, + ICE_FLOW_SEG_HDR_UDP, + &perfect_filter); + break; + case SCTP_V4_FLOW: + ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec, + ICE_FLOW_SEG_HDR_SCTP, + &perfect_filter); + break; + case IPV4_USER_FLOW: + ret = ice_set_fdir_ip4_usr_seg(seg, &fsp->m_u.usr_ip4_spec, + &perfect_filter); + break; + case TCP_V6_FLOW: + ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec, + ICE_FLOW_SEG_HDR_TCP, + &perfect_filter); + break; + case UDP_V6_FLOW: + ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec, + ICE_FLOW_SEG_HDR_UDP, + &perfect_filter); + break; + case SCTP_V6_FLOW: + ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec, + ICE_FLOW_SEG_HDR_SCTP, + &perfect_filter); + break; + case IPV6_USER_FLOW: + ret = ice_set_fdir_ip6_usr_seg(seg, &fsp->m_u.usr_ip6_spec, + &perfect_filter); + break; + default: + ret = -EINVAL; + } + if (ret) + goto err_exit; + + /* tunnel segments are shifted up one. */ + memcpy(&tun_seg[1], seg, sizeof(*seg)); + + if (user && user->flex_fltr) { + perfect_filter = false; + ice_flow_add_fld_raw(seg, user->flex_offset, + ICE_FLTR_PRGM_FLEX_WORD_SIZE, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL); + ice_flow_add_fld_raw(&tun_seg[1], user->flex_offset, + ICE_FLTR_PRGM_FLEX_WORD_SIZE, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL); + } + + /* add filter for outer headers */ + fltr_idx = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT); + ret = ice_fdir_set_hw_fltr_rule(pf, seg, fltr_idx, + ICE_FD_HW_SEG_NON_TUN); + if (ret == -EEXIST) + /* Rule already exists, free memory and continue */ + devm_kfree(dev, seg); + else if (ret) + /* could not write filter, free memory */ + goto err_exit; + + /* make tunneled filter HW entries if possible */ + memcpy(&tun_seg[1], seg, sizeof(*seg)); + ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, fltr_idx, + ICE_FD_HW_SEG_TUN); + if (ret == -EEXIST) { + /* Rule already exists, free memory and count as success */ + devm_kfree(dev, tun_seg); + ret = 0; + } else if (ret) { + /* could not write tunnel filter, but outer filter exists */ + devm_kfree(dev, tun_seg); + } + + if (perfect_filter) + set_bit(fltr_idx, hw->fdir_perfect_fltr); + else + clear_bit(fltr_idx, hw->fdir_perfect_fltr); + + return ret; + +err_exit: + devm_kfree(dev, tun_seg); + devm_kfree(dev, seg); + + return -EOPNOTSUPP; +} + +/** + * ice_fdir_write_fltr - send a flow director filter to the hardware + * @pf: PF data structure + * @input: filter structure + * @add: true adds filter and false removed filter + * @is_tun: true adds inner filter on tunnel and false outer headers + * + * returns 0 on success and negative value on error + */ +int +ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, + bool is_tun) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + struct ice_fltr_desc desc; + struct ice_vsi *ctrl_vsi; + enum ice_status status; + u8 *pkt, *frag_pkt; + bool has_frag; + int err; + + ctrl_vsi = ice_get_ctrl_vsi(pf); + if (!ctrl_vsi) + return -EINVAL; + + pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL); + if (!pkt) + return -ENOMEM; + frag_pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL); + if (!frag_pkt) { + err = -ENOMEM; + goto err_free; + } + + ice_fdir_get_prgm_desc(hw, input, &desc, add); + status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); + if (status) { + err = ice_status_to_errno(status); + goto err_free_all; + } + err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); + if (err) + goto err_free_all; + + /* repeat for fragment packet */ + has_frag = ice_fdir_has_frag(input->flow_type); + if (has_frag) { + /* does not return error */ + ice_fdir_get_prgm_desc(hw, input, &desc, add); + status = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true, + is_tun); + if (status) { + err = ice_status_to_errno(status); + goto err_frag; + } + err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt); + if (err) + goto err_frag; + } else { + devm_kfree(dev, frag_pkt); + } + + return 0; + +err_free_all: + devm_kfree(dev, frag_pkt); +err_free: + devm_kfree(dev, pkt); + return err; + +err_frag: + devm_kfree(dev, frag_pkt); + return err; +} + +/** + * ice_fdir_write_all_fltr - send a flow director filter to the hardware + * @pf: PF data structure + * @input: filter structure + * @add: true adds filter and false removed filter + * + * returns 0 on success and negative value on error + */ +static int +ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, + bool add) +{ + u16 port_num; + int tun; + + for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { + bool is_tun = tun == ICE_FD_HW_SEG_TUN; + int err; + + if (is_tun && !ice_get_open_tunnel_port(&pf->hw, TNL_ALL, + &port_num)) + continue; + err = ice_fdir_write_fltr(pf, input, add, is_tun); + if (err) + return err; + } + return 0; +} + +/** + * ice_fdir_replay_fltrs - replay filters from the HW filter list + * @pf: board private structure + */ +void ice_fdir_replay_fltrs(struct ice_pf *pf) +{ + struct ice_fdir_fltr *f_rule; + struct ice_hw *hw = &pf->hw; + + list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) { + int err = ice_fdir_write_all_fltr(pf, f_rule, true); + + if (err) + dev_dbg(ice_pf_to_dev(pf), "Flow Director error %d, could not reprogram filter %d\n", + err, f_rule->fltr_id); + } +} + +/** + * ice_fdir_create_dflt_rules - create default perfect filters + * @pf: PF data structure + * + * Returns 0 for success or error. + */ +int ice_fdir_create_dflt_rules(struct ice_pf *pf) +{ + int err; + + /* Create perfect TCP and UDP rules in hardware. */ + err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_TCP); + if (err) + return err; + + err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_UDP); + if (err) + return err; + + err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_TCP); + if (err) + return err; + + err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_UDP); + + return err; +} + +/** + * ice_vsi_manage_fdir - turn on/off flow director + * @vsi: the VSI being changed + * @ena: boolean value indicating if this is an enable or disable request + */ +void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena) +{ + struct ice_fdir_fltr *f_rule, *tmp; + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + enum ice_fltr_ptype flow; + + if (ena) { + set_bit(ICE_FLAG_FD_ENA, pf->flags); + ice_fdir_create_dflt_rules(pf); + return; + } + + mutex_lock(&hw->fdir_fltr_lock); + if (!test_and_clear_bit(ICE_FLAG_FD_ENA, pf->flags)) + goto release_lock; + list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) { + /* ignore return value */ + ice_fdir_write_all_fltr(pf, f_rule, false); + ice_fdir_update_cntrs(hw, f_rule->flow_type, false); + list_del(&f_rule->fltr_node); + devm_kfree(ice_hw_to_dev(hw), f_rule); + } + + if (hw->fdir_prof) + for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; + flow++) + if (hw->fdir_prof[flow]) + ice_fdir_rem_flow(hw, ICE_BLK_FD, flow); + +release_lock: + mutex_unlock(&hw->fdir_fltr_lock); +} + +/** + * ice_fdir_update_list_entry - add or delete a filter from the filter list + * @pf: PF structure + * @input: filter structure + * @fltr_idx: ethtool index of filter to modify + * + * returns 0 on success and negative on errors + */ +static int +ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input, + int fltr_idx) +{ + struct ice_fdir_fltr *old_fltr; + struct ice_hw *hw = &pf->hw; + int err = -ENOENT; + + /* Do not update filters during reset */ + if (ice_is_reset_in_progress(pf->state)) + return -EBUSY; + + old_fltr = ice_fdir_find_fltr_by_idx(hw, fltr_idx); + if (old_fltr) { + err = ice_fdir_write_all_fltr(pf, old_fltr, false); + if (err) + return err; + ice_fdir_update_cntrs(hw, old_fltr->flow_type, false); + if (!input && !hw->fdir_fltr_cnt[old_fltr->flow_type]) + /* we just deleted the last filter of flow_type so we + * should also delete the HW filter info. + */ + ice_fdir_rem_flow(hw, ICE_BLK_FD, old_fltr->flow_type); + list_del(&old_fltr->fltr_node); + devm_kfree(ice_hw_to_dev(hw), old_fltr); + } + if (!input) + return err; + ice_fdir_list_add_fltr(hw, input); + ice_fdir_update_cntrs(hw, input->flow_type, true); + return 0; +} + +/** + * ice_del_fdir_ethtool - delete Flow Director filter + * @vsi: pointer to target VSI + * @cmd: command to add or delete Flow Director filter + * + * Returns 0 on success and negative values for failure + */ +int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + int val; + + if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) + return -EOPNOTSUPP; + + /* Do not delete filters during reset */ + if (ice_is_reset_in_progress(pf->state)) { + dev_err(ice_pf_to_dev(pf), "Device is resetting - deleting Flow Director filters not supported during reset\n"); + return -EBUSY; + } + + if (test_bit(__ICE_FD_FLUSH_REQ, pf->state)) + return -EBUSY; + + mutex_lock(&hw->fdir_fltr_lock); + val = ice_fdir_update_list_entry(pf, NULL, fsp->location); + mutex_unlock(&hw->fdir_fltr_lock); + + return val; +} + +/** + * ice_set_fdir_input_set - Set the input set for Flow Director + * @vsi: pointer to target VSI + * @fsp: pointer to ethtool Rx flow specification + * @input: filter structure + */ +static int +ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp, + struct ice_fdir_fltr *input) +{ + u16 dest_vsi, q_index = 0; + struct ice_pf *pf; + struct ice_hw *hw; + int flow_type; + u8 dest_ctl; + + if (!vsi || !fsp || !input) + return -EINVAL; + + pf = vsi->back; + hw = &pf->hw; + + dest_vsi = vsi->idx; + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT; + } else { + u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); + + if (vf) { + dev_err(ice_pf_to_dev(pf), "Failed to add filter. Flow director filters are not supported on VF queues.\n"); + return -EINVAL; + } + + if (ring >= vsi->num_rxq) + return -EINVAL; + + dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; + q_index = ring; + } + + input->fltr_id = fsp->location; + input->q_index = q_index; + flow_type = fsp->flow_type & ~FLOW_EXT; + + input->dest_vsi = dest_vsi; + input->dest_ctl = dest_ctl; + input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID; + input->cnt_index = ICE_FD_SB_STAT_IDX(hw->fd_ctr_base); + input->flow_type = ice_ethtool_flow_to_fltr(flow_type); + + if (fsp->flow_type & FLOW_EXT) { + memcpy(input->ext_data.usr_def, fsp->h_ext.data, + sizeof(input->ext_data.usr_def)); + input->ext_data.vlan_type = fsp->h_ext.vlan_etype; + input->ext_data.vlan_tag = fsp->h_ext.vlan_tci; + memcpy(input->ext_mask.usr_def, fsp->m_ext.data, + sizeof(input->ext_mask.usr_def)); + input->ext_mask.vlan_type = fsp->m_ext.vlan_etype; + input->ext_mask.vlan_tag = fsp->m_ext.vlan_tci; + } + + switch (flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + input->ip.v4.dst_port = fsp->h_u.tcp_ip4_spec.pdst; + input->ip.v4.src_port = fsp->h_u.tcp_ip4_spec.psrc; + input->ip.v4.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst; + input->ip.v4.src_ip = fsp->h_u.tcp_ip4_spec.ip4src; + input->mask.v4.dst_port = fsp->m_u.tcp_ip4_spec.pdst; + input->mask.v4.src_port = fsp->m_u.tcp_ip4_spec.psrc; + input->mask.v4.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst; + input->mask.v4.src_ip = fsp->m_u.tcp_ip4_spec.ip4src; + break; + case IPV4_USER_FLOW: + input->ip.v4.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst; + input->ip.v4.src_ip = fsp->h_u.usr_ip4_spec.ip4src; + input->ip.v4.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes; + input->ip.v4.proto = fsp->h_u.usr_ip4_spec.proto; + input->ip.v4.ip_ver = fsp->h_u.usr_ip4_spec.ip_ver; + input->ip.v4.tos = fsp->h_u.usr_ip4_spec.tos; + input->mask.v4.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst; + input->mask.v4.src_ip = fsp->m_u.usr_ip4_spec.ip4src; + input->mask.v4.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes; + input->mask.v4.proto = fsp->m_u.usr_ip4_spec.proto; + input->mask.v4.ip_ver = fsp->m_u.usr_ip4_spec.ip_ver; + input->mask.v4.tos = fsp->m_u.usr_ip4_spec.tos; + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + input->ip.v6.dst_port = fsp->h_u.tcp_ip6_spec.pdst; + input->ip.v6.src_port = fsp->h_u.tcp_ip6_spec.psrc; + input->ip.v6.tc = fsp->h_u.tcp_ip6_spec.tclass; + memcpy(input->mask.v6.dst_ip, fsp->m_u.tcp_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + memcpy(input->mask.v6.src_ip, fsp->m_u.tcp_ip6_spec.ip6src, + sizeof(struct in6_addr)); + input->mask.v6.dst_port = fsp->m_u.tcp_ip6_spec.pdst; + input->mask.v6.src_port = fsp->m_u.tcp_ip6_spec.psrc; + input->mask.v6.tc = fsp->m_u.tcp_ip6_spec.tclass; + break; + case IPV6_USER_FLOW: + memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes; + input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass; + input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto; + memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + input->mask.v6.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes; + input->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass; + input->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto; + break; + default: + /* not doing un-parsed flow types */ + return -EINVAL; + } + + return 0; +} + +/** + * ice_add_fdir_ethtool - Add/Remove Flow Director filter + * @vsi: pointer to target VSI + * @cmd: command to add or delete Flow Director filter + * + * Returns 0 on success and negative values for failure + */ +int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) +{ + struct ice_rx_flow_userdef userdata; + struct ethtool_rx_flow_spec *fsp; + struct ice_fdir_fltr *input; + struct device *dev; + struct ice_pf *pf; + struct ice_hw *hw; + int fltrs_needed; + u16 tunnel_port; + int ret; + + if (!vsi) + return -EINVAL; + + pf = vsi->back; + hw = &pf->hw; + dev = ice_pf_to_dev(pf); + + if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) + return -EOPNOTSUPP; + + /* Do not program filters during reset */ + if (ice_is_reset_in_progress(pf->state)) { + dev_err(dev, "Device is resetting - adding Flow Director filters not supported during reset\n"); + return -EBUSY; + } + + fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + + if (ice_parse_rx_flow_user_data(fsp, &userdata)) + return -EINVAL; + + if (fsp->flow_type & FLOW_MAC_EXT) + return -EINVAL; + + ret = ice_cfg_fdir_xtrct_seq(pf, fsp, &userdata); + if (ret) + return ret; + + if (fsp->location >= ice_get_fdir_cnt_all(hw)) { + dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n"); + return -ENOSPC; + } + + /* return error if not an update and no available filters */ + fltrs_needed = ice_get_open_tunnel_port(hw, TNL_ALL, &tunnel_port) ? + 2 : 1; + if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) && + ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) { + dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n"); + return -ENOSPC; + } + + input = devm_kzalloc(dev, sizeof(*input), GFP_KERNEL); + if (!input) + return -ENOMEM; + + ret = ice_set_fdir_input_set(vsi, fsp, input); + if (ret) + goto free_input; + + mutex_lock(&hw->fdir_fltr_lock); + if (ice_fdir_is_dup_fltr(hw, input)) { + ret = -EINVAL; + goto release_lock; + } + + if (userdata.flex_fltr) { + input->flex_fltr = true; + input->flex_word = cpu_to_be16(userdata.flex_word); + input->flex_offset = userdata.flex_offset; + } + + /* input struct is added to the HW filter list */ + ice_fdir_update_list_entry(pf, input, fsp->location); + + ret = ice_fdir_write_all_fltr(pf, input, true); + if (ret) + goto remove_sw_rule; + + goto release_lock; + +remove_sw_rule: + ice_fdir_update_cntrs(hw, input->flow_type, false); + list_del(&input->fltr_node); +release_lock: + mutex_unlock(&hw->fdir_fltr_lock); +free_input: + if (ret) + devm_kfree(dev, input); + + return ret; +} diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c new file mode 100644 index 000000000000..6834df14332f --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_fdir.c @@ -0,0 +1,840 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2020, Intel Corporation. */ + +#include "ice_common.h" + +/* These are training packet headers used to program flow director filters. */ +static const u8 ice_fdir_tcpv4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x28, 0x00, 0x01, 0x00, 0x00, 0x40, 0x06, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x00, + 0x20, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +static const u8 ice_fdir_udpv4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x1C, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +static const u8 ice_fdir_sctpv4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x20, 0x00, 0x00, 0x40, 0x00, 0x40, 0x84, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x10, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00 +}; + +static const u8 ice_fdir_tcpv6_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x14, 0x06, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x50, 0x00, 0x20, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +static const u8 ice_fdir_udpv6_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x08, 0x11, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, +}; + +static const u8 ice_fdir_sctpv6_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x0C, 0x84, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv6_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x3B, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp4_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x5a, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, + 0x45, 0x00, 0x00, 0x28, 0x00, 0x00, 0x40, 0x00, + 0x40, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x50, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp4_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x4e, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, + 0x45, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x40, 0x00, + 0x40, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_sctp4_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x52, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, + 0x45, 0x00, 0x00, 0x20, 0x00, 0x01, 0x00, 0x00, + 0x40, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_ip4_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x46, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, + 0x45, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, + 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp6_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x6e, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, + 0x60, 0x00, 0x00, 0x00, 0x00, 0x14, 0x06, 0x40, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x50, 0x00, 0x20, 0x00, + 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp6_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x62, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, + 0x60, 0x00, 0x00, 0x00, 0x00, 0x08, 0x11, 0x40, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_sctp6_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x66, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, + 0x60, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x84, 0x40, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_ip6_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x5a, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, + 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0x40, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +/* Flow Director no-op training packet table */ +static const struct ice_fdir_base_pkt ice_fdir_pkt[] = { + { + ICE_FLTR_PTYPE_NONF_IPV4_TCP, + sizeof(ice_fdir_tcpv4_pkt), ice_fdir_tcpv4_pkt, + sizeof(ice_fdir_tcp4_tun_pkt), ice_fdir_tcp4_tun_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_UDP, + sizeof(ice_fdir_udpv4_pkt), ice_fdir_udpv4_pkt, + sizeof(ice_fdir_udp4_tun_pkt), ice_fdir_udp4_tun_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_SCTP, + sizeof(ice_fdir_sctpv4_pkt), ice_fdir_sctpv4_pkt, + sizeof(ice_fdir_sctp4_tun_pkt), ice_fdir_sctp4_tun_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_OTHER, + sizeof(ice_fdir_ipv4_pkt), ice_fdir_ipv4_pkt, + sizeof(ice_fdir_ip4_tun_pkt), ice_fdir_ip4_tun_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_TCP, + sizeof(ice_fdir_tcpv6_pkt), ice_fdir_tcpv6_pkt, + sizeof(ice_fdir_tcp6_tun_pkt), ice_fdir_tcp6_tun_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_UDP, + sizeof(ice_fdir_udpv6_pkt), ice_fdir_udpv6_pkt, + sizeof(ice_fdir_udp6_tun_pkt), ice_fdir_udp6_tun_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_SCTP, + sizeof(ice_fdir_sctpv6_pkt), ice_fdir_sctpv6_pkt, + sizeof(ice_fdir_sctp6_tun_pkt), ice_fdir_sctp6_tun_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_OTHER, + sizeof(ice_fdir_ipv6_pkt), ice_fdir_ipv6_pkt, + sizeof(ice_fdir_ip6_tun_pkt), ice_fdir_ip6_tun_pkt, + }, +}; + +#define ICE_FDIR_NUM_PKT ARRAY_SIZE(ice_fdir_pkt) + +/** + * ice_set_dflt_val_fd_desc + * @fd_fltr_ctx: pointer to fd filter descriptor + */ +static void ice_set_dflt_val_fd_desc(struct ice_fd_fltr_desc_ctx *fd_fltr_ctx) +{ + fd_fltr_ctx->comp_q = ICE_FXD_FLTR_QW0_COMP_Q_ZERO; + fd_fltr_ctx->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL; + fd_fltr_ctx->fd_space = ICE_FXD_FLTR_QW0_FD_SPACE_GUAR_BEST; + fd_fltr_ctx->cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS; + fd_fltr_ctx->evict_ena = ICE_FXD_FLTR_QW0_EVICT_ENA_TRUE; + fd_fltr_ctx->toq = ICE_FXD_FLTR_QW0_TO_Q_EQUALS_QINDEX; + fd_fltr_ctx->toq_prio = ICE_FXD_FLTR_QW0_TO_Q_PRIO1; + fd_fltr_ctx->dpu_recipe = ICE_FXD_FLTR_QW0_DPU_RECIPE_DFLT; + fd_fltr_ctx->drop = ICE_FXD_FLTR_QW0_DROP_NO; + fd_fltr_ctx->flex_prio = ICE_FXD_FLTR_QW0_FLEX_PRI_NONE; + fd_fltr_ctx->flex_mdid = ICE_FXD_FLTR_QW0_FLEX_MDID0; + fd_fltr_ctx->flex_val = ICE_FXD_FLTR_QW0_FLEX_VAL0; + fd_fltr_ctx->dtype = ICE_TX_DESC_DTYPE_FLTR_PROG; + fd_fltr_ctx->desc_prof_prio = ICE_FXD_FLTR_QW1_PROF_PRIO_ZERO; + fd_fltr_ctx->desc_prof = ICE_FXD_FLTR_QW1_PROF_ZERO; + fd_fltr_ctx->swap = ICE_FXD_FLTR_QW1_SWAP_SET; + fd_fltr_ctx->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE; + fd_fltr_ctx->fdid_mdid = ICE_FXD_FLTR_QW1_FDID_MDID_FD; + fd_fltr_ctx->fdid = ICE_FXD_FLTR_QW1_FDID_ZERO; +} + +/** + * ice_set_fd_desc_val + * @ctx: pointer to fd filter descriptor context + * @fdir_desc: populated with fd filter descriptor values + */ +static void +ice_set_fd_desc_val(struct ice_fd_fltr_desc_ctx *ctx, + struct ice_fltr_desc *fdir_desc) +{ + u64 qword; + + /* prep QW0 of FD filter programming desc */ + qword = ((u64)ctx->qindex << ICE_FXD_FLTR_QW0_QINDEX_S) & + ICE_FXD_FLTR_QW0_QINDEX_M; + qword |= ((u64)ctx->comp_q << ICE_FXD_FLTR_QW0_COMP_Q_S) & + ICE_FXD_FLTR_QW0_COMP_Q_M; + qword |= ((u64)ctx->comp_report << ICE_FXD_FLTR_QW0_COMP_REPORT_S) & + ICE_FXD_FLTR_QW0_COMP_REPORT_M; + qword |= ((u64)ctx->fd_space << ICE_FXD_FLTR_QW0_FD_SPACE_S) & + ICE_FXD_FLTR_QW0_FD_SPACE_M; + qword |= ((u64)ctx->cnt_index << ICE_FXD_FLTR_QW0_STAT_CNT_S) & + ICE_FXD_FLTR_QW0_STAT_CNT_M; + qword |= ((u64)ctx->cnt_ena << ICE_FXD_FLTR_QW0_STAT_ENA_S) & + ICE_FXD_FLTR_QW0_STAT_ENA_M; + qword |= ((u64)ctx->evict_ena << ICE_FXD_FLTR_QW0_EVICT_ENA_S) & + ICE_FXD_FLTR_QW0_EVICT_ENA_M; + qword |= ((u64)ctx->toq << ICE_FXD_FLTR_QW0_TO_Q_S) & + ICE_FXD_FLTR_QW0_TO_Q_M; + qword |= ((u64)ctx->toq_prio << ICE_FXD_FLTR_QW0_TO_Q_PRI_S) & + ICE_FXD_FLTR_QW0_TO_Q_PRI_M; + qword |= ((u64)ctx->dpu_recipe << ICE_FXD_FLTR_QW0_DPU_RECIPE_S) & + ICE_FXD_FLTR_QW0_DPU_RECIPE_M; + qword |= ((u64)ctx->drop << ICE_FXD_FLTR_QW0_DROP_S) & + ICE_FXD_FLTR_QW0_DROP_M; + qword |= ((u64)ctx->flex_prio << ICE_FXD_FLTR_QW0_FLEX_PRI_S) & + ICE_FXD_FLTR_QW0_FLEX_PRI_M; + qword |= ((u64)ctx->flex_mdid << ICE_FXD_FLTR_QW0_FLEX_MDID_S) & + ICE_FXD_FLTR_QW0_FLEX_MDID_M; + qword |= ((u64)ctx->flex_val << ICE_FXD_FLTR_QW0_FLEX_VAL_S) & + ICE_FXD_FLTR_QW0_FLEX_VAL_M; + fdir_desc->qidx_compq_space_stat = cpu_to_le64(qword); + + /* prep QW1 of FD filter programming desc */ + qword = ((u64)ctx->dtype << ICE_FXD_FLTR_QW1_DTYPE_S) & + ICE_FXD_FLTR_QW1_DTYPE_M; + qword |= ((u64)ctx->pcmd << ICE_FXD_FLTR_QW1_PCMD_S) & + ICE_FXD_FLTR_QW1_PCMD_M; + qword |= ((u64)ctx->desc_prof_prio << ICE_FXD_FLTR_QW1_PROF_PRI_S) & + ICE_FXD_FLTR_QW1_PROF_PRI_M; + qword |= ((u64)ctx->desc_prof << ICE_FXD_FLTR_QW1_PROF_S) & + ICE_FXD_FLTR_QW1_PROF_M; + qword |= ((u64)ctx->fd_vsi << ICE_FXD_FLTR_QW1_FD_VSI_S) & + ICE_FXD_FLTR_QW1_FD_VSI_M; + qword |= ((u64)ctx->swap << ICE_FXD_FLTR_QW1_SWAP_S) & + ICE_FXD_FLTR_QW1_SWAP_M; + qword |= ((u64)ctx->fdid_prio << ICE_FXD_FLTR_QW1_FDID_PRI_S) & + ICE_FXD_FLTR_QW1_FDID_PRI_M; + qword |= ((u64)ctx->fdid_mdid << ICE_FXD_FLTR_QW1_FDID_MDID_S) & + ICE_FXD_FLTR_QW1_FDID_MDID_M; + qword |= ((u64)ctx->fdid << ICE_FXD_FLTR_QW1_FDID_S) & + ICE_FXD_FLTR_QW1_FDID_M; + fdir_desc->dtype_cmd_vsi_fdid = cpu_to_le64(qword); +} + +/** + * ice_fdir_get_prgm_desc - set a fdir descriptor from a fdir filter struct + * @hw: pointer to the hardware structure + * @input: filter + * @fdesc: filter descriptor + * @add: if add is true, this is an add operation, false implies delete + */ +void +ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input, + struct ice_fltr_desc *fdesc, bool add) +{ + struct ice_fd_fltr_desc_ctx fdir_fltr_ctx = { 0 }; + + /* set default context info */ + ice_set_dflt_val_fd_desc(&fdir_fltr_ctx); + + /* change sideband filtering values */ + fdir_fltr_ctx.fdid = input->fltr_id; + if (input->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) { + fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_YES; + fdir_fltr_ctx.qindex = 0; + } else { + fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_NO; + fdir_fltr_ctx.qindex = input->q_index; + } + fdir_fltr_ctx.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS; + fdir_fltr_ctx.cnt_index = input->cnt_index; + fdir_fltr_ctx.fd_vsi = ice_get_hw_vsi_num(hw, input->dest_vsi); + fdir_fltr_ctx.evict_ena = ICE_FXD_FLTR_QW0_EVICT_ENA_FALSE; + fdir_fltr_ctx.toq_prio = 3; + fdir_fltr_ctx.pcmd = add ? ICE_FXD_FLTR_QW1_PCMD_ADD : + ICE_FXD_FLTR_QW1_PCMD_REMOVE; + fdir_fltr_ctx.swap = ICE_FXD_FLTR_QW1_SWAP_NOT_SET; + fdir_fltr_ctx.comp_q = ICE_FXD_FLTR_QW0_COMP_Q_ZERO; + fdir_fltr_ctx.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL; + fdir_fltr_ctx.fdid_prio = 3; + fdir_fltr_ctx.desc_prof = 1; + fdir_fltr_ctx.desc_prof_prio = 3; + ice_set_fd_desc_val(&fdir_fltr_ctx, fdesc); +} + +/** + * ice_alloc_fd_res_cntr - obtain counter resource for FD type + * @hw: pointer to the hardware structure + * @cntr_id: returns counter index + */ +enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id) +{ + return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK, + ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id); +} + +/** + * ice_free_fd_res_cntr - Free counter resource for FD type + * @hw: pointer to the hardware structure + * @cntr_id: counter index to be freed + */ +enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id) +{ + return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK, + ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id); +} + +/** + * ice_alloc_fd_guar_item - allocate resource for FD guaranteed entries + * @hw: pointer to the hardware structure + * @cntr_id: returns counter index + * @num_fltr: number of filter entries to be allocated + */ +enum ice_status +ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) +{ + return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES, + ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr, + cntr_id); +} + +/** + * ice_alloc_fd_shrd_item - allocate resource for flow director shared entries + * @hw: pointer to the hardware structure + * @cntr_id: returns counter index + * @num_fltr: number of filter entries to be allocated + */ +enum ice_status +ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) +{ + return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES, + ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr, + cntr_id); +} + +/** + * ice_get_fdir_cnt_all - get the number of Flow Director filters + * @hw: hardware data structure + * + * Returns the number of filters available on device + */ +int ice_get_fdir_cnt_all(struct ice_hw *hw) +{ + return hw->func_caps.fd_fltr_guar + hw->func_caps.fd_fltr_best_effort; +} + +/** + * ice_pkt_insert_ipv6_addr - insert a be32 IPv6 address into a memory buffer + * @pkt: packet buffer + * @offset: offset into buffer + * @addr: IPv6 address to convert and insert into pkt at offset + */ +static void ice_pkt_insert_ipv6_addr(u8 *pkt, int offset, __be32 *addr) +{ + int idx; + + for (idx = 0; idx < ICE_IPV6_ADDR_LEN_AS_U32; idx++) + memcpy(pkt + offset + idx * sizeof(*addr), &addr[idx], + sizeof(*addr)); +} + +/** + * ice_pkt_insert_u16 - insert a be16 value into a memory buffer + * @pkt: packet buffer + * @offset: offset into buffer + * @data: 16 bit value to convert and insert into pkt at offset + */ +static void ice_pkt_insert_u16(u8 *pkt, int offset, __be16 data) +{ + memcpy(pkt + offset, &data, sizeof(data)); +} + +/** + * ice_pkt_insert_u32 - insert a be32 value into a memory buffer + * @pkt: packet buffer + * @offset: offset into buffer + * @data: 32 bit value to convert and insert into pkt at offset + */ +static void ice_pkt_insert_u32(u8 *pkt, int offset, __be32 data) +{ + memcpy(pkt + offset, &data, sizeof(data)); +} + +/** + * ice_fdir_get_gen_prgm_pkt - generate a training packet + * @hw: pointer to the hardware structure + * @input: flow director filter data structure + * @pkt: pointer to return filter packet + * @frag: generate a fragment packet + * @tun: true implies generate a tunnel packet + */ +enum ice_status +ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, + u8 *pkt, bool frag, bool tun) +{ + enum ice_fltr_ptype flow; + u16 tnl_port; + u8 *loc; + u16 idx; + + if (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) { + switch (input->ip.v4.proto) { + case IPPROTO_TCP: + flow = ICE_FLTR_PTYPE_NONF_IPV4_TCP; + break; + case IPPROTO_UDP: + flow = ICE_FLTR_PTYPE_NONF_IPV4_UDP; + break; + case IPPROTO_SCTP: + flow = ICE_FLTR_PTYPE_NONF_IPV4_SCTP; + break; + case IPPROTO_IP: + flow = ICE_FLTR_PTYPE_NONF_IPV4_OTHER; + break; + default: + return ICE_ERR_PARAM; + } + } else if (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) { + switch (input->ip.v6.proto) { + case IPPROTO_TCP: + flow = ICE_FLTR_PTYPE_NONF_IPV6_TCP; + break; + case IPPROTO_UDP: + flow = ICE_FLTR_PTYPE_NONF_IPV6_UDP; + break; + case IPPROTO_SCTP: + flow = ICE_FLTR_PTYPE_NONF_IPV6_SCTP; + break; + case IPPROTO_IP: + flow = ICE_FLTR_PTYPE_NONF_IPV6_OTHER; + break; + default: + return ICE_ERR_PARAM; + } + } else { + flow = input->flow_type; + } + + for (idx = 0; idx < ICE_FDIR_NUM_PKT; idx++) + if (ice_fdir_pkt[idx].flow == flow) + break; + if (idx == ICE_FDIR_NUM_PKT) + return ICE_ERR_PARAM; + if (!tun) { + memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len); + loc = pkt; + } else { + if (!ice_get_open_tunnel_port(hw, TNL_ALL, &tnl_port)) + return ICE_ERR_DOES_NOT_EXIST; + if (!ice_fdir_pkt[idx].tun_pkt) + return ICE_ERR_PARAM; + memcpy(pkt, ice_fdir_pkt[idx].tun_pkt, + ice_fdir_pkt[idx].tun_pkt_len); + ice_pkt_insert_u16(pkt, ICE_IPV4_UDP_DST_PORT_OFFSET, + htons(tnl_port)); + loc = &pkt[ICE_FDIR_TUN_PKT_OFF]; + } + + /* Reverse the src and dst, since the HW expects them to be from Tx + * perspective. The input from user is from Rx filter perspective. + */ + switch (flow) { + case ICE_FLTR_PTYPE_NONF_IPV4_TCP: + ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u16(loc, ICE_IPV4_TCP_DST_PORT_OFFSET, + input->ip.v4.src_port); + ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, + input->ip.v4.dst_ip); + ice_pkt_insert_u16(loc, ICE_IPV4_TCP_SRC_PORT_OFFSET, + input->ip.v4.dst_port); + if (frag) + loc[20] = ICE_FDIR_IPV4_PKT_FLAG_DF; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_UDP: + ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u16(loc, ICE_IPV4_UDP_DST_PORT_OFFSET, + input->ip.v4.src_port); + ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, + input->ip.v4.dst_ip); + ice_pkt_insert_u16(loc, ICE_IPV4_UDP_SRC_PORT_OFFSET, + input->ip.v4.dst_port); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: + ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u16(loc, ICE_IPV4_SCTP_DST_PORT_OFFSET, + input->ip.v4.src_port); + ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, + input->ip.v4.dst_ip); + ice_pkt_insert_u16(loc, ICE_IPV4_SCTP_SRC_PORT_OFFSET, + input->ip.v4.dst_port); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: + ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, + input->ip.v4.dst_ip); + ice_pkt_insert_u16(loc, ICE_IPV4_PROTO_OFFSET, 0); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_TCP: + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, + input->ip.v6.src_ip); + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET, + input->ip.v6.dst_ip); + ice_pkt_insert_u16(loc, ICE_IPV6_TCP_DST_PORT_OFFSET, + input->ip.v6.src_port); + ice_pkt_insert_u16(loc, ICE_IPV6_TCP_SRC_PORT_OFFSET, + input->ip.v6.dst_port); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_UDP: + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, + input->ip.v6.src_ip); + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET, + input->ip.v6.dst_ip); + ice_pkt_insert_u16(loc, ICE_IPV6_UDP_DST_PORT_OFFSET, + input->ip.v6.src_port); + ice_pkt_insert_u16(loc, ICE_IPV6_UDP_SRC_PORT_OFFSET, + input->ip.v6.dst_port); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, + input->ip.v6.src_ip); + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET, + input->ip.v6.dst_ip); + ice_pkt_insert_u16(loc, ICE_IPV6_SCTP_DST_PORT_OFFSET, + input->ip.v6.src_port); + ice_pkt_insert_u16(loc, ICE_IPV6_SCTP_SRC_PORT_OFFSET, + input->ip.v6.dst_port); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, + input->ip.v6.src_ip); + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET, + input->ip.v6.dst_ip); + break; + default: + return ICE_ERR_PARAM; + } + + if (input->flex_fltr) + ice_pkt_insert_u16(loc, input->flex_offset, input->flex_word); + + return 0; +} + +/** + * ice_fdir_has_frag - does flow type have 2 ptypes + * @flow: flow ptype + * + * returns true is there is a fragment packet for this ptype + */ +bool ice_fdir_has_frag(enum ice_fltr_ptype flow) +{ + if (flow == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) + return true; + else + return false; +} + +/** + * ice_fdir_find_by_idx - find filter with idx + * @hw: pointer to hardware structure + * @fltr_idx: index to find. + * + * Returns pointer to filter if found or null + */ +struct ice_fdir_fltr * +ice_fdir_find_fltr_by_idx(struct ice_hw *hw, u32 fltr_idx) +{ + struct ice_fdir_fltr *rule; + + list_for_each_entry(rule, &hw->fdir_list_head, fltr_node) { + /* rule ID found in the list */ + if (fltr_idx == rule->fltr_id) + return rule; + if (fltr_idx < rule->fltr_id) + break; + } + return NULL; +} + +/** + * ice_fdir_list_add_fltr - add a new node to the flow director filter list + * @hw: hardware structure + * @fltr: filter node to add to structure + */ +void ice_fdir_list_add_fltr(struct ice_hw *hw, struct ice_fdir_fltr *fltr) +{ + struct ice_fdir_fltr *rule, *parent = NULL; + + list_for_each_entry(rule, &hw->fdir_list_head, fltr_node) { + /* rule ID found or pass its spot in the list */ + if (rule->fltr_id >= fltr->fltr_id) + break; + parent = rule; + } + + if (parent) + list_add(&fltr->fltr_node, &parent->fltr_node); + else + list_add(&fltr->fltr_node, &hw->fdir_list_head); +} + +/** + * ice_fdir_update_cntrs - increment / decrement filter counter + * @hw: pointer to hardware structure + * @flow: filter flow type + * @add: true implies filters added + */ +void +ice_fdir_update_cntrs(struct ice_hw *hw, enum ice_fltr_ptype flow, bool add) +{ + int incr; + + incr = add ? 1 : -1; + hw->fdir_active_fltr += incr; + + if (flow == ICE_FLTR_PTYPE_NONF_NONE || flow >= ICE_FLTR_PTYPE_MAX) + ice_debug(hw, ICE_DBG_SW, "Unknown filter type %d\n", flow); + else + hw->fdir_fltr_cnt[flow] += incr; +} + +/** + * ice_cmp_ipv6_addr - compare 2 IP v6 addresses + * @a: IP v6 address + * @b: IP v6 address + * + * Returns 0 on equal, returns non-0 if different + */ +static int ice_cmp_ipv6_addr(__be32 *a, __be32 *b) +{ + return memcmp(a, b, 4 * sizeof(__be32)); +} + +/** + * ice_fdir_comp_rules - compare 2 filters + * @a: a Flow Director filter data structure + * @b: a Flow Director filter data structure + * @v6: bool true if v6 filter + * + * Returns true if the filters match + */ +static bool +ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b, bool v6) +{ + enum ice_fltr_ptype flow_type = a->flow_type; + + /* The calling function already checks that the two filters have the + * same flow_type. + */ + if (!v6) { + if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP || + flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP || + flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) { + if (a->ip.v4.dst_ip == b->ip.v4.dst_ip && + a->ip.v4.src_ip == b->ip.v4.src_ip && + a->ip.v4.dst_port == b->ip.v4.dst_port && + a->ip.v4.src_port == b->ip.v4.src_port) + return true; + } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) { + if (a->ip.v4.dst_ip == b->ip.v4.dst_ip && + a->ip.v4.src_ip == b->ip.v4.src_ip && + a->ip.v4.l4_header == b->ip.v4.l4_header && + a->ip.v4.proto == b->ip.v4.proto && + a->ip.v4.ip_ver == b->ip.v4.ip_ver && + a->ip.v4.tos == b->ip.v4.tos) + return true; + } + } else { + if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP || + flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP || + flow_type == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) { + if (a->ip.v6.dst_port == b->ip.v6.dst_port && + a->ip.v6.src_port == b->ip.v6.src_port && + !ice_cmp_ipv6_addr(a->ip.v6.dst_ip, + b->ip.v6.dst_ip) && + !ice_cmp_ipv6_addr(a->ip.v6.src_ip, + b->ip.v6.src_ip)) + return true; + } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) { + if (a->ip.v6.dst_port == b->ip.v6.dst_port && + a->ip.v6.src_port == b->ip.v6.src_port) + return true; + } + } + + return false; +} + +/** + * ice_fdir_is_dup_fltr - test if filter is already in list for PF + * @hw: hardware data structure + * @input: Flow Director filter data structure + * + * Returns true if the filter is found in the list + */ +bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input) +{ + struct ice_fdir_fltr *rule; + bool ret = false; + + list_for_each_entry(rule, &hw->fdir_list_head, fltr_node) { + enum ice_fltr_ptype flow_type; + + if (rule->flow_type != input->flow_type) + continue; + + flow_type = input->flow_type; + if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP || + flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP || + flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP || + flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) + ret = ice_fdir_comp_rules(rule, input, false); + else + ret = ice_fdir_comp_rules(rule, input, true); + if (ret) { + if (rule->fltr_id == input->fltr_id && + rule->q_index != input->q_index) + ret = false; + else + break; + } + } + + return ret; +} diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h new file mode 100644 index 000000000000..1c587766daab --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_fdir.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2020, Intel Corporation. */ + +#ifndef _ICE_FDIR_H_ +#define _ICE_FDIR_H_ + +#define ICE_FDIR_TUN_PKT_OFF 50 +#define ICE_FDIR_MAX_RAW_PKT_SIZE (512 + ICE_FDIR_TUN_PKT_OFF) + +/* macros for offsets into packets for flow director programming */ +#define ICE_IPV4_SRC_ADDR_OFFSET 26 +#define ICE_IPV4_DST_ADDR_OFFSET 30 +#define ICE_IPV4_TCP_SRC_PORT_OFFSET 34 +#define ICE_IPV4_TCP_DST_PORT_OFFSET 36 +#define ICE_IPV4_UDP_SRC_PORT_OFFSET 34 +#define ICE_IPV4_UDP_DST_PORT_OFFSET 36 +#define ICE_IPV4_SCTP_SRC_PORT_OFFSET 34 +#define ICE_IPV4_SCTP_DST_PORT_OFFSET 36 +#define ICE_IPV4_PROTO_OFFSET 23 +#define ICE_IPV6_SRC_ADDR_OFFSET 22 +#define ICE_IPV6_DST_ADDR_OFFSET 38 +#define ICE_IPV6_TCP_SRC_PORT_OFFSET 54 +#define ICE_IPV6_TCP_DST_PORT_OFFSET 56 +#define ICE_IPV6_UDP_SRC_PORT_OFFSET 54 +#define ICE_IPV6_UDP_DST_PORT_OFFSET 56 +#define ICE_IPV6_SCTP_SRC_PORT_OFFSET 54 +#define ICE_IPV6_SCTP_DST_PORT_OFFSET 56 +/* IP v4 has 2 flag bits that enable fragment processing: DF and MF. DF + * requests that the packet not be fragmented. MF indicates that a packet has + * been fragmented. + */ +#define ICE_FDIR_IPV4_PKT_FLAG_DF 0x20 + +enum ice_fltr_prgm_desc_dest { + ICE_FLTR_PRGM_DESC_DEST_DROP_PKT, + ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX, +}; + +enum ice_fltr_prgm_desc_fd_status { + ICE_FLTR_PRGM_DESC_FD_STATUS_NONE, + ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID, +}; + +/* Flow Director (FD) Filter Programming descriptor */ +struct ice_fd_fltr_desc_ctx { + u32 fdid; + u16 qindex; + u16 cnt_index; + u16 fd_vsi; + u16 flex_val; + u8 comp_q; + u8 comp_report; + u8 fd_space; + u8 cnt_ena; + u8 evict_ena; + u8 toq; + u8 toq_prio; + u8 dpu_recipe; + u8 drop; + u8 flex_prio; + u8 flex_mdid; + u8 dtype; + u8 pcmd; + u8 desc_prof_prio; + u8 desc_prof; + u8 swap; + u8 fdid_prio; + u8 fdid_mdid; +}; + +#define ICE_FLTR_PRGM_FLEX_WORD_SIZE sizeof(__be16) + +struct ice_rx_flow_userdef { + u16 flex_word; + u16 flex_offset; + u16 flex_fltr; +}; + +struct ice_fdir_v4 { + __be32 dst_ip; + __be32 src_ip; + __be16 dst_port; + __be16 src_port; + __be32 l4_header; + __be32 sec_parm_idx; /* security parameter index */ + u8 tos; + u8 ip_ver; + u8 proto; +}; + +#define ICE_IPV6_ADDR_LEN_AS_U32 4 + +struct ice_fdir_v6 { + __be32 dst_ip[ICE_IPV6_ADDR_LEN_AS_U32]; + __be32 src_ip[ICE_IPV6_ADDR_LEN_AS_U32]; + __be16 dst_port; + __be16 src_port; + __be32 l4_header; /* next header */ + __be32 sec_parm_idx; /* security parameter index */ + u8 tc; + u8 proto; +}; + +struct ice_fdir_extra { + u8 dst_mac[ETH_ALEN]; /* dest MAC address */ + u32 usr_def[2]; /* user data */ + __be16 vlan_type; /* VLAN ethertype */ + __be16 vlan_tag; /* VLAN tag info */ +}; + +struct ice_fdir_fltr { + struct list_head fltr_node; + enum ice_fltr_ptype flow_type; + + union { + struct ice_fdir_v4 v4; + struct ice_fdir_v6 v6; + } ip, mask; + + struct ice_fdir_extra ext_data; + struct ice_fdir_extra ext_mask; + + /* flex byte filter data */ + __be16 flex_word; + u16 flex_offset; + u16 flex_fltr; + + /* filter control */ + u16 q_index; + u16 dest_vsi; + u8 dest_ctl; + u8 fltr_status; + u16 cnt_index; + u32 fltr_id; +}; + +/* Dummy packet filter definition structure */ +struct ice_fdir_base_pkt { + enum ice_fltr_ptype flow; + u16 pkt_len; + const u8 *pkt; + u16 tun_pkt_len; + const u8 *tun_pkt; +}; + +enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id); +enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id); +enum ice_status +ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); +enum ice_status +ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); +void +ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input, + struct ice_fltr_desc *fdesc, bool add); +enum ice_status +ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, + u8 *pkt, bool frag, bool tun); +int ice_get_fdir_cnt_all(struct ice_hw *hw); +bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input); +bool ice_fdir_has_frag(enum ice_fltr_ptype flow); +struct ice_fdir_fltr * +ice_fdir_find_fltr_by_idx(struct ice_hw *hw, u32 fltr_idx); +void +ice_fdir_update_cntrs(struct ice_hw *hw, enum ice_fltr_ptype flow, bool add); +void ice_fdir_list_add_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input); +#endif /* _ICE_FDIR_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index e7a2671222d2..4420fc02f7e7 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -5,6 +5,15 @@ #include "ice_flex_pipe.h" #include "ice_flow.h" +/* To support tunneling entries by PF, the package will append the PF number to + * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc. + */ +static const struct ice_tunnel_type_scan tnls[] = { + { TNL_VXLAN, "TNL_VXLAN_PF" }, + { TNL_GENEVE, "TNL_GENEVE_PF" }, + { TNL_LAST, "" } +}; + static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = { /* SWITCH */ { @@ -239,6 +248,268 @@ ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, return state->sect; } +/** + * ice_pkg_enum_entry + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * @sect_type: section type to enumerate + * @offset: pointer to variable that receives the offset in the table (optional) + * @handler: function that handles access to the entries into the section type + * + * This function will enumerate all the entries in particular section type in + * the ice segment. The first call is made with the ice_seg parameter non-NULL; + * on subsequent calls, ice_seg is set to NULL which continues the enumeration. + * When the function returns a NULL pointer, then the end of the entries has + * been reached. + * + * Since each section may have a different header and entry size, the handler + * function is needed to determine the number and location entries in each + * section. + * + * The offset parameter is optional, but should be used for sections that + * contain an offset for each section table. For such cases, the section handler + * function must return the appropriate offset + index to give the absolution + * offset for each entry. For example, if the base for a section's header + * indicates a base offset of 10, and the index for the entry is 2, then + * section handler function should set the offset to 10 + 2 = 12. + */ +static void * +ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, + u32 sect_type, u32 *offset, + void *(*handler)(u32 sect_type, void *section, + u32 index, u32 *offset)) +{ + void *entry; + + if (ice_seg) { + if (!handler) + return NULL; + + if (!ice_pkg_enum_section(ice_seg, state, sect_type)) + return NULL; + + state->entry_idx = 0; + state->handler = handler; + } else { + state->entry_idx++; + } + + if (!state->handler) + return NULL; + + /* get entry */ + entry = state->handler(state->sect_type, state->sect, state->entry_idx, + offset); + if (!entry) { + /* end of a section, look for another section of this type */ + if (!ice_pkg_enum_section(NULL, state, 0)) + return NULL; + + state->entry_idx = 0; + entry = state->handler(state->sect_type, state->sect, + state->entry_idx, offset); + } + + return entry; +} + +/** + * ice_boost_tcam_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the boost TCAM entry to be returned + * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * Handles enumeration of individual boost TCAM entries. + */ +static void * +ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset) +{ + struct ice_boost_tcam_section *boost; + + if (!section) + return NULL; + + if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) + return NULL; + + if (index > ICE_MAX_BST_TCAMS_IN_BUF) + return NULL; + + if (offset) + *offset = 0; + + boost = section; + if (index >= le16_to_cpu(boost->count)) + return NULL; + + return boost->tcam + index; +} + +/** + * ice_find_boost_entry + * @ice_seg: pointer to the ice segment (non-NULL) + * @addr: Boost TCAM address of entry to search for + * @entry: returns pointer to the entry + * + * Finds a particular Boost TCAM entry and returns a pointer to that entry + * if it is found. The ice_seg parameter must not be NULL since the first call + * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. + */ +static enum ice_status +ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, + struct ice_boost_tcam_entry **entry) +{ + struct ice_boost_tcam_entry *tcam; + struct ice_pkg_enum state; + + memset(&state, 0, sizeof(state)); + + if (!ice_seg) + return ICE_ERR_PARAM; + + do { + tcam = ice_pkg_enum_entry(ice_seg, &state, + ICE_SID_RXPARSER_BOOST_TCAM, NULL, + ice_boost_tcam_handler); + if (tcam && le16_to_cpu(tcam->addr) == addr) { + *entry = tcam; + return 0; + } + + ice_seg = NULL; + } while (tcam); + + *entry = NULL; + return ICE_ERR_CFG; +} + +/** + * ice_label_enum_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the label entry to be returned + * @offset: pointer to receive absolute offset, always zero for label sections + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * Handles enumeration of individual label entries. + */ +static void * +ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index, + u32 *offset) +{ + struct ice_label_section *labels; + + if (!section) + return NULL; + + if (index > ICE_MAX_LABELS_IN_BUF) + return NULL; + + if (offset) + *offset = 0; + + labels = section; + if (index >= le16_to_cpu(labels->count)) + return NULL; + + return labels->label + index; +} + +/** + * ice_enum_labels + * @ice_seg: pointer to the ice segment (NULL on subsequent calls) + * @type: the section type that will contain the label (0 on subsequent calls) + * @state: ice_pkg_enum structure that will hold the state of the enumeration + * @value: pointer to a value that will return the label's value if found + * + * Enumerates a list of labels in the package. The caller will call + * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call + * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL + * the end of the list has been reached. + */ +static char * +ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state, + u16 *value) +{ + struct ice_label *label; + + /* Check for valid label section on first call */ + if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST)) + return NULL; + + label = ice_pkg_enum_entry(ice_seg, state, type, NULL, + ice_label_enum_handler); + if (!label) + return NULL; + + *value = le16_to_cpu(label->value); + return label->name; +} + +/** + * ice_init_pkg_hints + * @hw: pointer to the HW structure + * @ice_seg: pointer to the segment of the package scan (non-NULL) + * + * This function will scan the package and save off relevant information + * (hints or metadata) for driver use. The ice_seg parameter must not be NULL + * since the first call to ice_enum_labels requires a pointer to an actual + * ice_seg structure. + */ +static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) +{ + struct ice_pkg_enum state; + char *label_name; + u16 val; + int i; + + memset(&hw->tnl, 0, sizeof(hw->tnl)); + memset(&state, 0, sizeof(state)); + + if (!ice_seg) + return; + + label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, + &val); + + while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { + for (i = 0; tnls[i].type != TNL_LAST; i++) { + size_t len = strlen(tnls[i].label_prefix); + + /* Look for matching label start, before continuing */ + if (strncmp(label_name, tnls[i].label_prefix, len)) + continue; + + /* Make sure this label matches our PF. Note that the PF + * character ('0' - '7') will be located where our + * prefix string's null terminator is located. + */ + if ((label_name[len] - '0') == hw->pf_id) { + hw->tnl.tbl[hw->tnl.count].type = tnls[i].type; + hw->tnl.tbl[hw->tnl.count].valid = false; + hw->tnl.tbl[hw->tnl.count].in_use = false; + hw->tnl.tbl[hw->tnl.count].marked = false; + hw->tnl.tbl[hw->tnl.count].boost_addr = val; + hw->tnl.tbl[hw->tnl.count].port = 0; + hw->tnl.count++; + break; + } + } + + label_name = ice_enum_labels(NULL, 0, &state, &val); + } + + /* Cache the appropriate boost TCAM entry pointers */ + for (i = 0; i < hw->tnl.count; i++) { + ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, + &hw->tnl.tbl[i].boost_entry); + if (hw->tnl.tbl[i].boost_entry) + hw->tnl.tbl[i].valid = true; + } +} + /* Key creation */ #define ICE_DC_KEY 0x1 /* don't care */ @@ -593,8 +864,9 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, u32 i; ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", - pkg_hdr->format_ver.major, pkg_hdr->format_ver.minor, - pkg_hdr->format_ver.update, pkg_hdr->format_ver.draft); + pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor, + pkg_hdr->pkg_format_ver.update, + pkg_hdr->pkg_format_ver.draft); /* Search all package segments for the requested segment type */ for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { @@ -764,13 +1036,15 @@ ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) { struct ice_buf_table *ice_buf_tbl; - ice_debug(hw, ICE_DBG_PKG, "Segment version: %d.%d.%d.%d\n", - ice_seg->hdr.seg_ver.major, ice_seg->hdr.seg_ver.minor, - ice_seg->hdr.seg_ver.update, ice_seg->hdr.seg_ver.draft); + ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", + ice_seg->hdr.seg_format_ver.major, + ice_seg->hdr.seg_format_ver.minor, + ice_seg->hdr.seg_format_ver.update, + ice_seg->hdr.seg_format_ver.draft); ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", le32_to_cpu(ice_seg->hdr.seg_type), - le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_name); + le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); ice_buf_tbl = ice_find_buf_table(ice_seg); @@ -815,14 +1089,16 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); if (seg_hdr) { - hw->ice_pkg_ver = seg_hdr->seg_ver; - memcpy(hw->ice_pkg_name, seg_hdr->seg_name, + hw->ice_pkg_ver = seg_hdr->seg_format_ver; + memcpy(hw->ice_pkg_name, seg_hdr->seg_id, sizeof(hw->ice_pkg_name)); - ice_debug(hw, ICE_DBG_PKG, "Ice Pkg: %d.%d.%d.%d, %s\n", - seg_hdr->seg_ver.major, seg_hdr->seg_ver.minor, - seg_hdr->seg_ver.update, seg_hdr->seg_ver.draft, - seg_hdr->seg_name); + ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n", + seg_hdr->seg_format_ver.major, + seg_hdr->seg_format_ver.minor, + seg_hdr->seg_format_ver.update, + seg_hdr->seg_format_ver.draft, + seg_hdr->seg_id); } else { ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n"); @@ -863,9 +1139,11 @@ static enum ice_status ice_get_pkg_info(struct ice_hw *hw) if (pkg_info->pkg_info[i].is_active) { flags[place++] = 'A'; hw->active_pkg_ver = pkg_info->pkg_info[i].ver; + hw->active_track_id = + le32_to_cpu(pkg_info->pkg_info[i].track_id); memcpy(hw->active_pkg_name, pkg_info->pkg_info[i].name, - sizeof(hw->active_pkg_name)); + sizeof(pkg_info->pkg_info[i].name)); hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; } if (pkg_info->pkg_info[i].is_active_at_boot) @@ -905,10 +1183,10 @@ static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) if (len < sizeof(*pkg)) return ICE_ERR_BUF_TOO_SHORT; - if (pkg->format_ver.major != ICE_PKG_FMT_VER_MAJ || - pkg->format_ver.minor != ICE_PKG_FMT_VER_MNR || - pkg->format_ver.update != ICE_PKG_FMT_VER_UPD || - pkg->format_ver.draft != ICE_PKG_FMT_VER_DFT) + if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || + pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || + pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || + pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) return ICE_ERR_CFG; /* pkg must have at least one segment */ @@ -990,6 +1268,68 @@ static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) } /** + * ice_chk_pkg_compat + * @hw: pointer to the hardware structure + * @ospkg: pointer to the package hdr + * @seg: pointer to the package segment hdr + * + * This function checks the package version compatibility with driver and NVM + */ +static enum ice_status +ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, + struct ice_seg **seg) +{ + struct ice_aqc_get_pkg_info_resp *pkg; + enum ice_status status; + u16 size; + u32 i; + + /* Check package version compatibility */ + status = ice_chk_pkg_version(&hw->pkg_ver); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); + return status; + } + + /* find ICE segment in given package */ + *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, + ospkg); + if (!*seg) { + ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); + return ICE_ERR_CFG; + } + + /* Check if FW is compatible with the OS package */ + size = struct_size(pkg, pkg_info, ICE_PKG_CNT - 1); + pkg = kzalloc(size, GFP_KERNEL); + if (!pkg) + return ICE_ERR_NO_MEMORY; + + status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL); + if (status) + goto fw_ddp_compat_free_alloc; + + for (i = 0; i < le32_to_cpu(pkg->count); i++) { + /* loop till we find the NVM package */ + if (!pkg->pkg_info[i].is_in_nvm) + continue; + if ((*seg)->hdr.seg_format_ver.major != + pkg->pkg_info[i].ver.major || + (*seg)->hdr.seg_format_ver.minor > + pkg->pkg_info[i].ver.minor) { + status = ICE_ERR_FW_DDP_MISMATCH; + ice_debug(hw, ICE_DBG_INIT, + "OS package is not compatible with NVM.\n"); + } + /* done processing NVM package so break */ + break; + } +fw_ddp_compat_free_alloc: + kfree(pkg); + return status; +} + +/** * ice_init_pkg - initialize/download package * @hw: pointer to the hardware structure * @buf: pointer to the package buffer @@ -1039,18 +1379,12 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) /* before downloading the package, check package version for * compatibility with driver */ - status = ice_chk_pkg_version(&hw->pkg_ver); + status = ice_chk_pkg_compat(hw, pkg, &seg); if (status) return status; - /* find segment in given package */ - seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg); - if (!seg) { - ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); - return ICE_ERR_CFG; - } - - /* download package */ + /* initialize package hints and then download package */ + ice_init_pkg_hints(hw, seg); status = ice_download_pkg(hw, seg); if (status == ICE_ERR_AQ_NO_WORK) { ice_debug(hw, ICE_DBG_INIT, @@ -1292,6 +1626,284 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) return &bld->buf; } +/** + * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage + * @hw: pointer to the HW structure + * @port: port to search for + * @index: optionally returns index + * + * Returns whether a port is already in use as a tunnel, and optionally its + * index + */ +static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index) +{ + u16 i; + + for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) + if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) { + if (index) + *index = i; + return true; + } + + return false; +} + +/** + * ice_tunnel_port_in_use + * @hw: pointer to the HW structure + * @port: port to search for + * @index: optionally returns index + * + * Returns whether a port is already in use as a tunnel, and optionally its + * index + */ +bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index) +{ + bool res; + + mutex_lock(&hw->tnl_lock); + res = ice_tunnel_port_in_use_hlpr(hw, port, index); + mutex_unlock(&hw->tnl_lock); + + return res; +} + +/** + * ice_find_free_tunnel_entry + * @hw: pointer to the HW structure + * @type: tunnel type + * @index: optionally returns index + * + * Returns whether there is a free tunnel entry, and optionally its index + */ +static bool +ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type, + u16 *index) +{ + u16 i; + + for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) + if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use && + hw->tnl.tbl[i].type == type) { + if (index) + *index = i; + return true; + } + + return false; +} + +/** + * ice_get_open_tunnel_port - retrieve an open tunnel port + * @hw: pointer to the HW structure + * @type: tunnel type (TNL_ALL will return any open port) + * @port: returns open port + */ +bool +ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type, + u16 *port) +{ + bool res = false; + u16 i; + + mutex_lock(&hw->tnl_lock); + + for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) + if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && + (type == TNL_ALL || hw->tnl.tbl[i].type == type)) { + *port = hw->tnl.tbl[i].port; + res = true; + break; + } + + mutex_unlock(&hw->tnl_lock); + + return res; +} + +/** + * ice_create_tunnel + * @hw: pointer to the HW structure + * @type: type of tunnel + * @port: port of tunnel to create + * + * Create a tunnel by updating the parse graph in the parser. We do that by + * creating a package buffer with the tunnel info and issuing an update package + * command. + */ +enum ice_status +ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port) +{ + struct ice_boost_tcam_section *sect_rx, *sect_tx; + enum ice_status status = ICE_ERR_MAX_LIMIT; + struct ice_buf_build *bld; + u16 index; + + mutex_lock(&hw->tnl_lock); + + if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) { + hw->tnl.tbl[index].ref++; + status = 0; + goto ice_create_tunnel_end; + } + + if (!ice_find_free_tunnel_entry(hw, type, &index)) { + status = ICE_ERR_OUT_OF_RANGE; + goto ice_create_tunnel_end; + } + + bld = ice_pkg_buf_alloc(hw); + if (!bld) { + status = ICE_ERR_NO_MEMORY; + goto ice_create_tunnel_end; + } + + /* allocate 2 sections, one for Rx parser, one for Tx parser */ + if (ice_pkg_buf_reserve_section(bld, 2)) + goto ice_create_tunnel_err; + + sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM, + sizeof(*sect_rx)); + if (!sect_rx) + goto ice_create_tunnel_err; + sect_rx->count = cpu_to_le16(1); + + sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, + sizeof(*sect_tx)); + if (!sect_tx) + goto ice_create_tunnel_err; + sect_tx->count = cpu_to_le16(1); + + /* copy original boost entry to update package buffer */ + memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry, + sizeof(*sect_rx->tcam)); + + /* over-write the never-match dest port key bits with the encoded port + * bits + */ + ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key), + (u8 *)&port, NULL, NULL, NULL, + (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key), + sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key)); + + /* exact copy of entry to Tx section entry */ + memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam)); + + status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); + if (!status) { + hw->tnl.tbl[index].port = port; + hw->tnl.tbl[index].in_use = true; + hw->tnl.tbl[index].ref = 1; + } + +ice_create_tunnel_err: + ice_pkg_buf_free(hw, bld); + +ice_create_tunnel_end: + mutex_unlock(&hw->tnl_lock); + + return status; +} + +/** + * ice_destroy_tunnel + * @hw: pointer to the HW structure + * @port: port of tunnel to destroy (ignored if the all parameter is true) + * @all: flag that states to destroy all tunnels + * + * Destroys a tunnel or all tunnels by creating an update package buffer + * targeting the specific updates requested and then performing an update + * package. + */ +enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all) +{ + struct ice_boost_tcam_section *sect_rx, *sect_tx; + enum ice_status status = ICE_ERR_MAX_LIMIT; + struct ice_buf_build *bld; + u16 count = 0; + u16 index; + u16 size; + u16 i; + + mutex_lock(&hw->tnl_lock); + + if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index)) + if (hw->tnl.tbl[index].ref > 1) { + hw->tnl.tbl[index].ref--; + status = 0; + goto ice_destroy_tunnel_end; + } + + /* determine count */ + for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) + if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && + (all || hw->tnl.tbl[i].port == port)) + count++; + + if (!count) { + status = ICE_ERR_PARAM; + goto ice_destroy_tunnel_end; + } + + /* size of section - there is at least one entry */ + size = struct_size(sect_rx, tcam, count - 1); + + bld = ice_pkg_buf_alloc(hw); + if (!bld) { + status = ICE_ERR_NO_MEMORY; + goto ice_destroy_tunnel_end; + } + + /* allocate 2 sections, one for Rx parser, one for Tx parser */ + if (ice_pkg_buf_reserve_section(bld, 2)) + goto ice_destroy_tunnel_err; + + sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM, + size); + if (!sect_rx) + goto ice_destroy_tunnel_err; + sect_rx->count = cpu_to_le16(1); + + sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, + size); + if (!sect_tx) + goto ice_destroy_tunnel_err; + sect_tx->count = cpu_to_le16(1); + + /* copy original boost entry to update package buffer, one copy to Rx + * section, another copy to the Tx section + */ + for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) + if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && + (all || hw->tnl.tbl[i].port == port)) { + memcpy(sect_rx->tcam + i, hw->tnl.tbl[i].boost_entry, + sizeof(*sect_rx->tcam)); + memcpy(sect_tx->tcam + i, hw->tnl.tbl[i].boost_entry, + sizeof(*sect_tx->tcam)); + hw->tnl.tbl[i].marked = true; + } + + status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); + if (!status) + for (i = 0; i < hw->tnl.count && + i < ICE_TUNNEL_MAX_ENTRIES; i++) + if (hw->tnl.tbl[i].marked) { + hw->tnl.tbl[i].ref = 0; + hw->tnl.tbl[i].port = 0; + hw->tnl.tbl[i].in_use = false; + hw->tnl.tbl[i].marked = false; + } + +ice_destroy_tunnel_err: + ice_pkg_buf_free(hw, bld); + +ice_destroy_tunnel_end: + mutex_unlock(&hw->tnl_lock); + + return status; +} + /* PTG Management */ /** @@ -1807,9 +2419,16 @@ ice_find_prof_id(struct ice_hw *hw, enum ice_block blk, struct ice_fv_word *fv, u8 *prof_id) { struct ice_es *es = &hw->blk[blk].es; - u16 off, i; + u16 off; + u8 i; + + /* For FD, we don't want to re-use a existed profile with the same + * field vector and mask. This will cause rule interference. + */ + if (blk == ICE_BLK_FD) + return ICE_ERR_DOES_NOT_EXIST; - for (i = 0; i < es->count; i++) { + for (i = 0; i < (u8)es->count; i++) { off = i * es->fvw; if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv))) @@ -1830,6 +2449,9 @@ ice_find_prof_id(struct ice_hw *hw, enum ice_block blk, static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type) { switch (blk) { + case ICE_BLK_FD: + *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID; + break; case ICE_BLK_RSS: *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID; break; @@ -1847,6 +2469,9 @@ static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type) static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type) { switch (blk) { + case ICE_BLK_FD: + *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM; + break; case ICE_BLK_RSS: *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM; break; @@ -2290,6 +2915,12 @@ static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx) mutex_lock(&hw->fl_profs_locks[blk_idx]); list_for_each_entry_safe(p, tmp, &hw->fl_profs[blk_idx], l_entry) { + struct ice_flow_entry *e, *t; + + list_for_each_entry_safe(e, t, &p->entries, l_entry) + ice_flow_rem_entry(hw, (enum ice_block)blk_idx, + ICE_FLOW_ENTRY_HNDL(e)); + list_del(&p->l_entry); devm_kfree(ice_hw_to_dev(hw), p); } @@ -2919,6 +3550,212 @@ error_tmp: } /** + * ice_update_fd_mask - set Flow Director Field Vector mask for a profile + * @hw: pointer to the HW struct + * @prof_id: profile ID + * @mask_sel: mask select + * + * This function enable any of the masks selected by the mask select parameter + * for the profile specified. + */ +static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel) +{ + wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel); + + ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id, + GLQF_FDMASK_SEL(prof_id), mask_sel); +} + +struct ice_fd_src_dst_pair { + u8 prot_id; + u8 count; + u16 off; +}; + +static const struct ice_fd_src_dst_pair ice_fd_pairs[] = { + /* These are defined in pairs */ + { ICE_PROT_IPV4_OF_OR_S, 2, 12 }, + { ICE_PROT_IPV4_OF_OR_S, 2, 16 }, + + { ICE_PROT_IPV4_IL, 2, 12 }, + { ICE_PROT_IPV4_IL, 2, 16 }, + + { ICE_PROT_IPV6_OF_OR_S, 8, 8 }, + { ICE_PROT_IPV6_OF_OR_S, 8, 24 }, + + { ICE_PROT_IPV6_IL, 8, 8 }, + { ICE_PROT_IPV6_IL, 8, 24 }, + + { ICE_PROT_TCP_IL, 1, 0 }, + { ICE_PROT_TCP_IL, 1, 2 }, + + { ICE_PROT_UDP_OF, 1, 0 }, + { ICE_PROT_UDP_OF, 1, 2 }, + + { ICE_PROT_UDP_IL_OR_S, 1, 0 }, + { ICE_PROT_UDP_IL_OR_S, 1, 2 }, + + { ICE_PROT_SCTP_IL, 1, 0 }, + { ICE_PROT_SCTP_IL, 1, 2 } +}; + +#define ICE_FD_SRC_DST_PAIR_COUNT ARRAY_SIZE(ice_fd_pairs) + +/** + * ice_update_fd_swap - set register appropriately for a FD FV extraction + * @hw: pointer to the HW struct + * @prof_id: profile ID + * @es: extraction sequence (length of array is determined by the block) + */ +static enum ice_status +ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) +{ + DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT); + u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 }; +#define ICE_FD_FV_NOT_FOUND (-2) + s8 first_free = ICE_FD_FV_NOT_FOUND; + u8 used[ICE_MAX_FV_WORDS] = { 0 }; + s8 orig_free, si; + u32 mask_sel = 0; + u8 i, j, k; + + bitmap_zero(pair_list, ICE_FD_SRC_DST_PAIR_COUNT); + + /* This code assumes that the Flow Director field vectors are assigned + * from the end of the FV indexes working towards the zero index, that + * only complete fields will be included and will be consecutive, and + * that there are no gaps between valid indexes. + */ + + /* Determine swap fields present */ + for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) { + /* Find the first free entry, assuming right to left population. + * This is where we can start adding additional pairs if needed. + */ + if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id != + ICE_PROT_INVALID) + first_free = i - 1; + + for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++) + if (es[i].prot_id == ice_fd_pairs[j].prot_id && + es[i].off == ice_fd_pairs[j].off) { + set_bit(j, pair_list); + pair_start[j] = i; + } + } + + orig_free = first_free; + + /* determine missing swap fields that need to be added */ + for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) { + u8 bit1 = test_bit(i + 1, pair_list); + u8 bit0 = test_bit(i, pair_list); + + if (bit0 ^ bit1) { + u8 index; + + /* add the appropriate 'paired' entry */ + if (!bit0) + index = i; + else + index = i + 1; + + /* check for room */ + if (first_free + 1 < (s8)ice_fd_pairs[index].count) + return ICE_ERR_MAX_LIMIT; + + /* place in extraction sequence */ + for (k = 0; k < ice_fd_pairs[index].count; k++) { + es[first_free - k].prot_id = + ice_fd_pairs[index].prot_id; + es[first_free - k].off = + ice_fd_pairs[index].off + (k * 2); + + if (k > first_free) + return ICE_ERR_OUT_OF_RANGE; + + /* keep track of non-relevant fields */ + mask_sel |= BIT(first_free - k); + } + + pair_start[index] = first_free; + first_free -= ice_fd_pairs[index].count; + } + } + + /* fill in the swap array */ + si = hw->blk[ICE_BLK_FD].es.fvw - 1; + while (si >= 0) { + u8 indexes_used = 1; + + /* assume flat at this index */ +#define ICE_SWAP_VALID 0x80 + used[si] = si | ICE_SWAP_VALID; + + if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) { + si -= indexes_used; + continue; + } + + /* check for a swap location */ + for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++) + if (es[si].prot_id == ice_fd_pairs[j].prot_id && + es[si].off == ice_fd_pairs[j].off) { + u8 idx; + + /* determine the appropriate matching field */ + idx = j + ((j % 2) ? -1 : 1); + + indexes_used = ice_fd_pairs[idx].count; + for (k = 0; k < indexes_used; k++) { + used[si - k] = (pair_start[idx] - k) | + ICE_SWAP_VALID; + } + + break; + } + + si -= indexes_used; + } + + /* for each set of 4 swap and 4 inset indexes, write the appropriate + * register + */ + for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) { + u32 raw_swap = 0; + u32 raw_in = 0; + + for (k = 0; k < 4; k++) { + u8 idx; + + idx = (j * 4) + k; + if (used[idx] && !(mask_sel & BIT(idx))) { + raw_swap |= used[idx] << (k * BITS_PER_BYTE); +#define ICE_INSET_DFLT 0x9f + raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE); + } + } + + /* write the appropriate swap register set */ + wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap); + + ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n", + prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap); + + /* write the appropriate inset register set */ + wr32(hw, GLQF_FDINSET(prof_id, j), raw_in); + + ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n", + prof_id, j, GLQF_FDINSET(prof_id, j), raw_in); + } + + /* initially clear the mask select for this profile */ + ice_update_fd_mask(hw, prof_id, 0); + + return 0; +} + +/** * ice_add_prof - add profile * @hw: pointer to the HW struct * @blk: hardware block @@ -2939,7 +3776,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); struct ice_prof_map *prof; enum ice_status status; - u32 byte = 0; + u8 byte = 0; u8 prof_id; bitmap_zero(ptgs_used, ICE_XLT1_CNT); @@ -2953,6 +3790,18 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], status = ice_alloc_prof_id(hw, blk, &prof_id); if (status) goto err_ice_add_prof; + if (blk == ICE_BLK_FD) { + /* For Flow Director block, the extraction sequence may + * need to be altered in the case where there are paired + * fields that have no match. This is necessary because + * for Flow Director, src and dest fields need to paired + * for filter programming and these values are swapped + * during Tx. + */ + status = ice_update_fd_swap(hw, prof_id, es); + if (status) + goto err_ice_add_prof; + } /* and write new es */ ice_write_es(hw, blk, prof_id, es); @@ -2974,7 +3823,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], /* build list of ptgs */ while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) { - u32 bit; + u8 bit; if (!ptypes[byte]) { bytes--; @@ -3008,7 +3857,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], break; /* nothing left in byte, then exit */ - m = ~((1 << (bit + 1)) - 1); + m = ~(u8)((1 << (bit + 1)) - 1); if (!(ptypes[byte] & m)) break; } @@ -3705,8 +4554,10 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, t->tcam[i].prof_id, t->tcam[i].ptg, vsig, 0, 0, vl_msk, dc_msk, nm_msk); - if (status) + if (status) { + devm_kfree(ice_hw_to_dev(hw), p); goto err_ice_add_prof_id_vsig; + } /* log change */ list_add(&p->list_entry, chg); diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h index c7b5e1a6ea2b..568ea519af51 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h @@ -18,6 +18,14 @@ #define ICE_PKG_CNT 4 +bool +ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type, + u16 *port); +enum ice_status +ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port); +enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all); +bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index); + enum ice_status ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], struct ice_fv_word *es); diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h index 0fb3fe3ff3ea..a6f391eac8ff 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_type.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -20,7 +20,7 @@ struct ice_fv { /* Package and segment headers and tables */ struct ice_pkg_hdr { - struct ice_pkg_ver format_ver; + struct ice_pkg_ver pkg_format_ver; __le32 seg_count; __le32 seg_offset[1]; }; @@ -30,9 +30,9 @@ struct ice_generic_seg_hdr { #define SEGMENT_TYPE_METADATA 0x00000001 #define SEGMENT_TYPE_ICE 0x00000010 __le32 seg_type; - struct ice_pkg_ver seg_ver; + struct ice_pkg_ver seg_format_ver; __le32 seg_size; - char seg_name[ICE_PKG_NAME_SIZE]; + char seg_id[ICE_PKG_NAME_SIZE]; }; /* ice specific segment */ @@ -75,7 +75,7 @@ struct ice_buf_table { struct ice_global_metadata_seg { struct ice_generic_seg_hdr hdr; struct ice_pkg_ver pkg_ver; - __le32 track_id; + __le32 rsvd; char pkg_name[ICE_PKG_NAME_SIZE]; }; @@ -149,6 +149,7 @@ struct ice_buf_hdr { #define ICE_SID_CDID_REDIR_RSS 48 #define ICE_SID_RXPARSER_BOOST_TCAM 56 +#define ICE_SID_TXPARSER_BOOST_TCAM 66 #define ICE_SID_XLT0_PE 80 #define ICE_SID_XLT_KEY_BUILDER_PE 81 @@ -291,6 +292,38 @@ struct ice_pkg_enum { void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset); }; +/* Tunnel enabling */ + +enum ice_tunnel_type { + TNL_VXLAN = 0, + TNL_GENEVE, + TNL_LAST = 0xFF, + TNL_ALL = 0xFF, +}; + +struct ice_tunnel_type_scan { + enum ice_tunnel_type type; + const char *label_prefix; +}; + +struct ice_tunnel_entry { + enum ice_tunnel_type type; + u16 boost_addr; + u16 port; + u16 ref; + struct ice_boost_tcam_entry *boost_entry; + u8 valid; + u8 in_use; + u8 marked; +}; + +#define ICE_TUNNEL_MAX_ENTRIES 16 + +struct ice_tunnel_table { + struct ice_tunnel_entry tbl[ICE_TUNNEL_MAX_ENTRIES]; + u16 count; +}; + struct ice_pkg_es { __le16 count; __le16 offset; diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c index 3de862a3c789..d74e5290677f 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.c +++ b/drivers/net/ethernet/intel/ice/ice_flow.c @@ -42,7 +42,10 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)), /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)), - + /* GRE */ + /* ICE_FLOW_FIELD_IDX_GRE_KEYID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, + sizeof_field(struct gre_full_hdr, key)), }; /* Bitmaps indicating relevant packet types for a particular protocol header @@ -134,6 +137,18 @@ static const u32 ice_ptypes_sctp_il[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; +/* Packet types for packets with an Outermost/First GRE header */ +static const u32 ice_ptypes_gre_of[] = { + 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000, + 0x0000017E, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + /* Manage parameters and info. used during the creation of a flow profile */ struct ice_flow_prof_params { enum ice_block blk; @@ -178,6 +193,40 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) return 0; } +/* Sizes of fixed known protocol headers without header options */ +#define ICE_FLOW_PROT_HDR_SZ_MAC 14 +#define ICE_FLOW_PROT_HDR_SZ_IPV4 20 +#define ICE_FLOW_PROT_HDR_SZ_IPV6 40 +#define ICE_FLOW_PROT_HDR_SZ_TCP 20 +#define ICE_FLOW_PROT_HDR_SZ_UDP 8 +#define ICE_FLOW_PROT_HDR_SZ_SCTP 12 + +/** + * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers + * @params: information about the flow to be processed + * @seg: index of packet segment whose header size is to be determined + */ +static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg) +{ + u16 sz = ICE_FLOW_PROT_HDR_SZ_MAC; + + /* L3 headers */ + if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) + sz += ICE_FLOW_PROT_HDR_SZ_IPV4; + else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6) + sz += ICE_FLOW_PROT_HDR_SZ_IPV6; + + /* L4 headers */ + if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP) + sz += ICE_FLOW_PROT_HDR_SZ_TCP; + else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP) + sz += ICE_FLOW_PROT_HDR_SZ_UDP; + else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP) + sz += ICE_FLOW_PROT_HDR_SZ_SCTP; + + return sz; +} + /** * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments * @params: information about the flow to be processed @@ -225,6 +274,12 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) src = (const unsigned long *)ice_ptypes_sctp_il; bitmap_and(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) { + if (!i) { + src = (const unsigned long *)ice_ptypes_gre_of; + bitmap_and(params->ptypes, params->ptypes, + src, ICE_FLOW_PTYPE_MAX); + } } } @@ -275,6 +330,9 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT: prot_id = ICE_PROT_SCTP_IL; break; + case ICE_FLOW_FIELD_IDX_GRE_KEYID: + prot_id = ICE_PROT_GRE_OF; + break; default: return ICE_ERR_NOT_IMPL; } @@ -324,6 +382,81 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, } /** + * ice_flow_xtract_raws - Create extract sequence entries for raw bytes + * @hw: pointer to the HW struct + * @params: information about the flow to be processed + * @seg: index of packet segment whose raw fields are to be be extracted + */ +static enum ice_status +ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params, + u8 seg) +{ + u16 fv_words; + u16 hdrs_sz; + u8 i; + + if (!params->prof->segs[seg].raws_cnt) + return 0; + + if (params->prof->segs[seg].raws_cnt > + ARRAY_SIZE(params->prof->segs[seg].raws)) + return ICE_ERR_MAX_LIMIT; + + /* Offsets within the segment headers are not supported */ + hdrs_sz = ice_flow_calc_seg_sz(params, seg); + if (!hdrs_sz) + return ICE_ERR_PARAM; + + fv_words = hw->blk[params->blk].es.fvw; + + for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) { + struct ice_flow_seg_fld_raw *raw; + u16 off, cnt, j; + + raw = ¶ms->prof->segs[seg].raws[i]; + + /* Storing extraction information */ + raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S; + raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) * + ICE_FLOW_FV_EXTRACT_SZ; + raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) * + BITS_PER_BYTE; + raw->info.xtrct.idx = params->es_cnt; + + /* Determine the number of field vector entries this raw field + * consumes. + */ + cnt = DIV_ROUND_UP(raw->info.xtrct.disp + + (raw->info.src.last * BITS_PER_BYTE), + (ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE)); + off = raw->info.xtrct.off; + for (j = 0; j < cnt; j++) { + u16 idx; + + /* Make sure the number of extraction sequence required + * does not exceed the block's capability + */ + if (params->es_cnt >= hw->blk[params->blk].es.count || + params->es_cnt >= ICE_MAX_FV_WORDS) + return ICE_ERR_MAX_LIMIT; + + /* some blocks require a reversed field vector layout */ + if (hw->blk[params->blk].es.reverse) + idx = fv_words - params->es_cnt - 1; + else + idx = params->es_cnt; + + params->es[idx].prot_id = raw->info.xtrct.prot_id; + params->es[idx].off = off; + params->es_cnt++; + off += ICE_FLOW_FV_EXTRACT_SZ; + } + } + + return 0; +} + +/** * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments * @hw: pointer to the HW struct * @params: information about the flow to be processed @@ -349,6 +482,11 @@ ice_flow_create_xtrct_seq(struct ice_hw *hw, if (status) return status; } + + /* Process raw matching bytes */ + status = ice_flow_xtract_raws(hw, params, i); + if (status) + return status; } return status; @@ -373,10 +511,8 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params) return status; switch (params->blk) { + case ICE_BLK_FD: case ICE_BLK_RSS: - /* Only header information is provided for RSS configuration. - * No further processing is needed. - */ status = 0; break; default: @@ -458,6 +594,43 @@ ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id) } /** + * ice_dealloc_flow_entry - Deallocate flow entry memory + * @hw: pointer to the HW struct + * @entry: flow entry to be removed + */ +static void +ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry) +{ + if (!entry) + return; + + if (entry->entry) + devm_kfree(ice_hw_to_dev(hw), entry->entry); + + devm_kfree(ice_hw_to_dev(hw), entry); +} + +/** + * ice_flow_rem_entry_sync - Remove a flow entry + * @hw: pointer to the HW struct + * @blk: classification stage + * @entry: flow entry to be removed + */ +static enum ice_status +ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk, + struct ice_flow_entry *entry) +{ + if (!entry) + return ICE_ERR_BAD_PTR; + + list_del(&entry->l_entry); + + ice_dealloc_flow_entry(hw, entry); + + return 0; +} + +/** * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields * @hw: pointer to the HW struct * @blk: classification stage @@ -544,6 +717,21 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk, { enum ice_status status; + /* Remove all remaining flow entries before removing the flow profile */ + if (!list_empty(&prof->entries)) { + struct ice_flow_entry *e, *t; + + mutex_lock(&prof->entries_lock); + + list_for_each_entry_safe(e, t, &prof->entries, l_entry) { + status = ice_flow_rem_entry_sync(hw, blk, e); + if (status) + break; + } + + mutex_unlock(&prof->entries_lock); + } + /* Remove all hardware profiles associated with this flow profile */ status = ice_rem_prof(hw, blk, prof->id); if (!status) { @@ -629,7 +817,7 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, * @segs_cnt: number of packet segments provided * @prof: stores the returned flow profile added */ -static enum ice_status +enum ice_status ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, struct ice_flow_prof **prof) @@ -667,7 +855,7 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, * @blk: the block for which the flow profile is to be removed * @prof_id: unique ID of the flow profile to be removed */ -static enum ice_status +enum ice_status ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id) { struct ice_flow_prof *prof; @@ -691,6 +879,113 @@ out: } /** + * ice_flow_add_entry - Add a flow entry + * @hw: pointer to the HW struct + * @blk: classification stage + * @prof_id: ID of the profile to add a new flow entry to + * @entry_id: unique ID to identify this flow entry + * @vsi_handle: software VSI handle for the flow entry + * @prio: priority of the flow entry + * @data: pointer to a data buffer containing flow entry's match values/masks + * @entry_h: pointer to buffer that receives the new flow entry's handle + */ +enum ice_status +ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, + u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio, + void *data, u64 *entry_h) +{ + struct ice_flow_entry *e = NULL; + struct ice_flow_prof *prof; + enum ice_status status; + + /* No flow entry data is expected for RSS */ + if (!entry_h || (!data && blk != ICE_BLK_RSS)) + return ICE_ERR_BAD_PTR; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + + mutex_lock(&hw->fl_profs_locks[blk]); + + prof = ice_flow_find_prof_id(hw, blk, prof_id); + if (!prof) { + status = ICE_ERR_DOES_NOT_EXIST; + } else { + /* Allocate memory for the entry being added and associate + * the VSI to the found flow profile + */ + e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL); + if (!e) + status = ICE_ERR_NO_MEMORY; + else + status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); + } + + mutex_unlock(&hw->fl_profs_locks[blk]); + if (status) + goto out; + + e->id = entry_id; + e->vsi_handle = vsi_handle; + e->prof = prof; + e->priority = prio; + + switch (blk) { + case ICE_BLK_FD: + case ICE_BLK_RSS: + break; + default: + status = ICE_ERR_NOT_IMPL; + goto out; + } + + mutex_lock(&prof->entries_lock); + list_add(&e->l_entry, &prof->entries); + mutex_unlock(&prof->entries_lock); + + *entry_h = ICE_FLOW_ENTRY_HNDL(e); + +out: + if (status && e) { + if (e->entry) + devm_kfree(ice_hw_to_dev(hw), e->entry); + devm_kfree(ice_hw_to_dev(hw), e); + } + + return status; +} + +/** + * ice_flow_rem_entry - Remove a flow entry + * @hw: pointer to the HW struct + * @blk: classification stage + * @entry_h: handle to the flow entry to be removed + */ +enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, + u64 entry_h) +{ + struct ice_flow_entry *entry; + struct ice_flow_prof *prof; + enum ice_status status = 0; + + if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL) + return ICE_ERR_PARAM; + + entry = ICE_FLOW_ENTRY_PTR(entry_h); + + /* Retain the pointer to the flow profile as the entry will be freed */ + prof = entry->prof; + + if (prof) { + mutex_lock(&prof->entries_lock); + status = ice_flow_rem_entry_sync(hw, blk, entry); + mutex_unlock(&prof->entries_lock); + } + + return status; +} + +/** * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer * @seg: packet segment the field being set belongs to * @fld: field to be set @@ -752,7 +1047,7 @@ ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld, * create the content of a match entry. This function should only be used for * fixed-size data structures. */ -static void +void ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld, u16 val_loc, u16 mask_loc, u16 last_loc, bool range) { @@ -762,6 +1057,42 @@ ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld, ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc); } +/** + * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf + * @seg: packet segment the field being set belongs to + * @off: offset of the raw field from the beginning of the segment in bytes + * @len: length of the raw pattern to be matched + * @val_loc: location of the value to match from entry's input buffer + * @mask_loc: location of mask value from entry's input buffer + * + * This function specifies the offset of the raw field to be match from the + * beginning of the specified packet segment, and the locations, in the form of + * byte offsets from the start of the input buffer for a flow entry, from where + * the value to match and the mask value to be extracted. These locations are + * then stored in the flow profile. When adding flow entries to the associated + * flow profile, these locations can be used to quickly extract the values to + * create the content of a match entry. This function should only be used for + * fixed-size data structures. + */ +void +ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, + u16 val_loc, u16 mask_loc) +{ + if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) { + seg->raws[seg->raws_cnt].off = off; + seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE; + seg->raws[seg->raws_cnt].info.src.val = val_loc; + seg->raws[seg->raws_cnt].info.src.mask = mask_loc; + /* The "last" field is used to store the length of the field */ + seg->raws[seg->raws_cnt].info.src.last = len; + } + + /* Overflows of "raws" will be handled as an error condition later in + * the flow when this information is processed. + */ + seg->raws_cnt++; +} + #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \ (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6) @@ -945,6 +1276,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S)) #define ICE_RSS_OUTER_HEADERS 1 +#define ICE_RSS_INNER_HEADERS 2 /* Flow profile ID format: * [0:31] - Packet match fields @@ -1085,6 +1417,9 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, mutex_lock(&hw->rss_locks); status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, ICE_RSS_OUTER_HEADERS); + if (!status) + status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, + addl_hdrs, ICE_RSS_INNER_HEADERS); mutex_unlock(&hw->rss_locks); return status; @@ -1238,6 +1573,12 @@ enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) ICE_RSS_OUTER_HEADERS); if (status) break; + status = ice_add_rss_cfg_sync(hw, vsi_handle, + r->hashed_flds, + r->packet_hdr, + ICE_RSS_INNER_HEADERS); + if (status) + break; } } mutex_unlock(&hw->rss_locks); diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h index 5558627bd5eb..3913da2116d2 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.h +++ b/drivers/net/ethernet/intel/ice/ice_flow.h @@ -43,6 +43,7 @@ enum ice_flow_seg_hdr { ICE_FLOW_SEG_HDR_TCP = 0x00000040, ICE_FLOW_SEG_HDR_UDP = 0x00000080, ICE_FLOW_SEG_HDR_SCTP = 0x00000100, + ICE_FLOW_SEG_HDR_GRE = 0x00000200, }; enum ice_flow_field { @@ -58,6 +59,8 @@ enum ice_flow_field { ICE_FLOW_FIELD_IDX_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, + /* GRE */ + ICE_FLOW_FIELD_IDX_GRE_KEYID, /* The total number of enums must not exceed 64 */ ICE_FLOW_FIELD_IDX_MAX }; @@ -125,6 +128,7 @@ enum ice_flow_priority { }; #define ICE_FLOW_SEG_MAX 2 +#define ICE_FLOW_SEG_RAW_FLD_MAX 2 #define ICE_FLOW_FV_EXTRACT_SZ 2 #define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val)) @@ -161,14 +165,38 @@ struct ice_flow_fld_info { struct ice_flow_seg_xtrct xtrct; }; +struct ice_flow_seg_fld_raw { + struct ice_flow_fld_info info; + u16 off; /* Offset from the start of the segment */ +}; + struct ice_flow_seg_info { u32 hdrs; /* Bitmask indicating protocol headers present */ u64 match; /* Bitmask indicating header fields to be matched */ u64 range; /* Bitmask indicating header fields matched as ranges */ struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX]; + + u8 raws_cnt; /* Number of raw fields to be matched */ + struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX]; }; +/* This structure describes a flow entry, and is tracked only in this file */ +struct ice_flow_entry { + struct list_head l_entry; + + u64 id; + struct ice_flow_prof *prof; + /* Flow entry's content */ + void *entry; + enum ice_flow_priority priority; + u16 vsi_handle; + u16 entry_sz; +}; + +#define ICE_FLOW_ENTRY_HNDL(e) ((u64)e) +#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(h)) + struct ice_flow_prof { struct list_head l_entry; @@ -194,7 +222,24 @@ struct ice_rss_cfg { u32 packet_hdr; }; -enum ice_status ice_flow_rem_entry(struct ice_hw *hw, u64 entry_h); +enum ice_status +ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, + u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, + struct ice_flow_prof **prof); +enum ice_status +ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id); +enum ice_status +ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, + u64 entry_id, u16 vsi, enum ice_flow_priority prio, + void *data, u64 *entry_h); +enum ice_status +ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h); +void +ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld, + u16 val_loc, u16 mask_loc, u16 last_loc, bool range); +void +ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, + u16 val_loc, u16 mask_loc); void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle); enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle); enum ice_status diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c new file mode 100644 index 000000000000..2418d4fff037 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_fltr.c @@ -0,0 +1,397 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2020, Intel Corporation. */ + +#include "ice.h" +#include "ice_fltr.h" + +/** + * ice_fltr_free_list - free filter lists helper + * @dev: pointer to the device struct + * @h: pointer to the list head to be freed + * + * Helper function to free filter lists previously created using + * ice_fltr_add_mac_to_list + */ +void ice_fltr_free_list(struct device *dev, struct list_head *h) +{ + struct ice_fltr_list_entry *e, *tmp; + + list_for_each_entry_safe(e, tmp, h, list_entry) { + list_del(&e->list_entry); + devm_kfree(dev, e); + } +} + +/** + * ice_fltr_add_entry_to_list - allocate and add filter entry to list + * @dev: pointer to device needed by alloc function + * @info: filter info struct that gets added to the passed in list + * @list: pointer to the list which contains MAC filters entry + */ +static int +ice_fltr_add_entry_to_list(struct device *dev, struct ice_fltr_info *info, + struct list_head *list) +{ + struct ice_fltr_list_entry *entry; + + entry = devm_kzalloc(dev, sizeof(*entry), GFP_ATOMIC); + if (!entry) + return -ENOMEM; + + entry->fltr_info = *info; + + INIT_LIST_HEAD(&entry->list_entry); + list_add(&entry->list_entry, list); + + return 0; +} + +/** + * ice_fltr_add_mac_list - add list of MAC filters + * @vsi: pointer to VSI struct + * @list: list of filters + */ +enum ice_status +ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list) +{ + return ice_add_mac(&vsi->back->hw, list); +} + +/** + * ice_fltr_remove_mac_list - remove list of MAC filters + * @vsi: pointer to VSI struct + * @list: list of filters + */ +enum ice_status +ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list) +{ + return ice_remove_mac(&vsi->back->hw, list); +} + +/** + * ice_fltr_add_vlan_list - add list of VLAN filters + * @vsi: pointer to VSI struct + * @list: list of filters + */ +static enum ice_status +ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list) +{ + return ice_add_vlan(&vsi->back->hw, list); +} + +/** + * ice_fltr_remove_vlan_list - remove list of VLAN filters + * @vsi: pointer to VSI struct + * @list: list of filters + */ +static enum ice_status +ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list) +{ + return ice_remove_vlan(&vsi->back->hw, list); +} + +/** + * ice_fltr_add_eth_list - add list of ethertype filters + * @vsi: pointer to VSI struct + * @list: list of filters + */ +static enum ice_status +ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list) +{ + return ice_add_eth_mac(&vsi->back->hw, list); +} + +/** + * ice_fltr_remove_eth_list - remove list of ethertype filters + * @vsi: pointer to VSI struct + * @list: list of filters + */ +static enum ice_status +ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list) +{ + return ice_remove_eth_mac(&vsi->back->hw, list); +} + +/** + * ice_fltr_remove_all - remove all filters associated with VSI + * @vsi: pointer to VSI struct + */ +void ice_fltr_remove_all(struct ice_vsi *vsi) +{ + ice_remove_vsi_fltr(&vsi->back->hw, vsi->idx); +} + +/** + * ice_fltr_add_mac_to_list - add MAC filter info to exsisting list + * @vsi: pointer to VSI struct + * @list: list to add filter info to + * @mac: MAC address to add + * @action: filter action + */ +int +ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list, + const u8 *mac, enum ice_sw_fwd_act_type action) +{ + struct ice_fltr_info info = { 0 }; + + info.flag = ICE_FLTR_TX; + info.src_id = ICE_SRC_ID_VSI; + info.lkup_type = ICE_SW_LKUP_MAC; + info.fltr_act = action; + info.vsi_handle = vsi->idx; + + ether_addr_copy(info.l_data.mac.mac_addr, mac); + + return ice_fltr_add_entry_to_list(ice_pf_to_dev(vsi->back), &info, + list); +} + +/** + * ice_fltr_add_vlan_to_list - add VLAN filter info to exsisting list + * @vsi: pointer to VSI struct + * @list: list to add filter info to + * @vlan_id: VLAN ID to add + * @action: filter action + */ +static int +ice_fltr_add_vlan_to_list(struct ice_vsi *vsi, struct list_head *list, + u16 vlan_id, enum ice_sw_fwd_act_type action) +{ + struct ice_fltr_info info = { 0 }; + + info.flag = ICE_FLTR_TX; + info.src_id = ICE_SRC_ID_VSI; + info.lkup_type = ICE_SW_LKUP_VLAN; + info.fltr_act = action; + info.vsi_handle = vsi->idx; + info.l_data.vlan.vlan_id = vlan_id; + + return ice_fltr_add_entry_to_list(ice_pf_to_dev(vsi->back), &info, + list); +} + +/** + * ice_fltr_add_eth_to_list - add ethertype filter info to exsisting list + * @vsi: pointer to VSI struct + * @list: list to add filter info to + * @ethertype: ethertype of packet that matches filter + * @flag: filter direction, Tx or Rx + * @action: filter action + */ +static int +ice_fltr_add_eth_to_list(struct ice_vsi *vsi, struct list_head *list, + u16 ethertype, u16 flag, + enum ice_sw_fwd_act_type action) +{ + struct ice_fltr_info info = { 0 }; + + info.flag = flag; + info.lkup_type = ICE_SW_LKUP_ETHERTYPE; + info.fltr_act = action; + info.vsi_handle = vsi->idx; + info.l_data.ethertype_mac.ethertype = ethertype; + + if (flag == ICE_FLTR_TX) + info.src_id = ICE_SRC_ID_VSI; + else + info.src_id = ICE_SRC_ID_LPORT; + + return ice_fltr_add_entry_to_list(ice_pf_to_dev(vsi->back), &info, + list); +} + +/** + * ice_fltr_prepare_mac - add or remove MAC rule + * @vsi: pointer to VSI struct + * @mac: MAC address to add + * @action: action to be performed on filter match + * @mac_action: pointer to add or remove MAC function + */ +static enum ice_status +ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac, + enum ice_sw_fwd_act_type action, + enum ice_status (*mac_action)(struct ice_vsi *, + struct list_head *)) +{ + enum ice_status result; + LIST_HEAD(tmp_list); + + if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action)) { + ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); + return ICE_ERR_NO_MEMORY; + } + + result = mac_action(vsi, &tmp_list); + ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); + return result; +} + +/** + * ice_fltr_prepare_mac_and_broadcast - add or remove MAC and broadcast filter + * @vsi: pointer to VSI struct + * @mac: MAC address to add + * @action: action to be performed on filter match + * @mac_action: pointer to add or remove MAC function + */ +static enum ice_status +ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, + enum ice_sw_fwd_act_type action, + enum ice_status(*mac_action) + (struct ice_vsi *, struct list_head *)) +{ + u8 broadcast[ETH_ALEN]; + enum ice_status result; + LIST_HEAD(tmp_list); + + eth_broadcast_addr(broadcast); + if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action) || + ice_fltr_add_mac_to_list(vsi, &tmp_list, broadcast, action)) { + ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); + return ICE_ERR_NO_MEMORY; + } + + result = mac_action(vsi, &tmp_list); + ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); + return result; +} + +/** + * ice_fltr_prepare_vlan - add or remove VLAN filter + * @vsi: pointer to VSI struct + * @vlan_id: VLAN ID to add + * @action: action to be performed on filter match + * @vlan_action: pointer to add or remove VLAN function + */ +static enum ice_status +ice_fltr_prepare_vlan(struct ice_vsi *vsi, u16 vlan_id, + enum ice_sw_fwd_act_type action, + enum ice_status (*vlan_action)(struct ice_vsi *, + struct list_head *)) +{ + enum ice_status result; + LIST_HEAD(tmp_list); + + if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan_id, action)) + return ICE_ERR_NO_MEMORY; + + result = vlan_action(vsi, &tmp_list); + ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); + return result; +} + +/** + * ice_fltr_prepare_eth - add or remove ethertype filter + * @vsi: pointer to VSI struct + * @ethertype: ethertype of packet to be filtered + * @flag: direction of packet, Tx or Rx + * @action: action to be performed on filter match + * @eth_action: pointer to add or remove ethertype function + */ +static enum ice_status +ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, + enum ice_sw_fwd_act_type action, + enum ice_status (*eth_action)(struct ice_vsi *, + struct list_head *)) +{ + enum ice_status result; + LIST_HEAD(tmp_list); + + if (ice_fltr_add_eth_to_list(vsi, &tmp_list, ethertype, flag, action)) + return ICE_ERR_NO_MEMORY; + + result = eth_action(vsi, &tmp_list); + ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); + return result; +} + +/** + * ice_fltr_add_mac - add single MAC filter + * @vsi: pointer to VSI struct + * @mac: MAC to add + * @action: action to be performed on filter match + */ +enum ice_status ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac, + enum ice_sw_fwd_act_type action) +{ + return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_add_mac_list); +} + +/** + * ice_fltr_add_mac_and_broadcast - add single MAC and broadcast + * @vsi: pointer to VSI struct + * @mac: MAC to add + * @action: action to be performed on filter match + */ +enum ice_status +ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, + enum ice_sw_fwd_act_type action) +{ + return ice_fltr_prepare_mac_and_broadcast(vsi, mac, action, + ice_fltr_add_mac_list); +} + +/** + * ice_fltr_remove_mac - remove MAC filter + * @vsi: pointer to VSI struct + * @mac: filter MAC to remove + * @action: action to remove + */ +enum ice_status ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac, + enum ice_sw_fwd_act_type action) +{ + return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_remove_mac_list); +} + +/** + * ice_fltr_add_vlan - add single VLAN filter + * @vsi: pointer to VSI struct + * @vlan_id: VLAN ID to add + * @action: action to be performed on filter match + */ +enum ice_status ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id, + enum ice_sw_fwd_act_type action) +{ + return ice_fltr_prepare_vlan(vsi, vlan_id, action, + ice_fltr_add_vlan_list); +} + +/** + * ice_fltr_remove_vlan - remove VLAN filter + * @vsi: pointer to VSI struct + * @vlan_id: filter VLAN to remove + * @action: action to remove + */ +enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id, + enum ice_sw_fwd_act_type action) +{ + return ice_fltr_prepare_vlan(vsi, vlan_id, action, + ice_fltr_remove_vlan_list); +} + +/** + * ice_fltr_add_eth - add specyfic ethertype filter + * @vsi: pointer to VSI struct + * @ethertype: ethertype of filter + * @flag: direction of packet to be filtered, Tx or Rx + * @action: action to be performed on filter match + */ +enum ice_status ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, + enum ice_sw_fwd_act_type action) +{ + return ice_fltr_prepare_eth(vsi, ethertype, flag, action, + ice_fltr_add_eth_list); +} + +/** + * ice_fltr_remove_eth - remove ethertype filter + * @vsi: pointer to VSI struct + * @ethertype: ethertype of filter + * @flag: direction of filter + * @action: action to remove + */ +enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, + u16 flag, enum ice_sw_fwd_act_type action) +{ + return ice_fltr_prepare_eth(vsi, ethertype, flag, action, + ice_fltr_remove_eth_list); +} diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.h b/drivers/net/ethernet/intel/ice/ice_fltr.h new file mode 100644 index 000000000000..361cb4da9b43 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_fltr.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2020, Intel Corporation. */ + +#ifndef _ICE_FLTR_H_ +#define _ICE_FLTR_H_ + +void ice_fltr_free_list(struct device *dev, struct list_head *h); +enum ice_status +ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list, + const u8 *mac, enum ice_sw_fwd_act_type action); +enum ice_status +ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac, + enum ice_sw_fwd_act_type action); +enum ice_status +ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, + enum ice_sw_fwd_act_type action); +enum ice_status +ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list); +enum ice_status +ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac, + enum ice_sw_fwd_act_type action); +enum ice_status +ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list); + +enum ice_status +ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vid, + enum ice_sw_fwd_act_type action); +enum ice_status +ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vid, + enum ice_sw_fwd_act_type action); + +enum ice_status +ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, + enum ice_sw_fwd_act_type action); +enum ice_status +ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, + enum ice_sw_fwd_act_type action); +void ice_fltr_remove_all(struct ice_vsi *vsi); +#endif diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 1d37a9f02c1c..1086c9f778b4 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -6,9 +6,6 @@ #ifndef _ICE_HW_AUTOGEN_H_ #define _ICE_HW_AUTOGEN_H_ -#define PF0INT_ITR_0(_i) (0x03000004 + ((_i) * 4096)) -#define PF0INT_ITR_1(_i) (0x03000008 + ((_i) * 4096)) -#define PF0INT_ITR_2(_i) (0x0300000C + ((_i) * 4096)) #define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4)) #define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4)) #define QTX_COMM_HEAD_HEAD_S 0 @@ -42,6 +39,7 @@ #define PF_MBX_ARQH_ARQH_M ICE_M(0x3FF, 0) #define PF_MBX_ARQLEN 0x0022E480 #define PF_MBX_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0) +#define PF_MBX_ARQLEN_ARQCRIT_M BIT(30) #define PF_MBX_ARQLEN_ARQENABLE_M BIT(31) #define PF_MBX_ARQT 0x0022E580 #define PF_MBX_ATQBAH 0x0022E180 @@ -50,6 +48,7 @@ #define PF_MBX_ATQH_ATQH_M ICE_M(0x3FF, 0) #define PF_MBX_ATQLEN 0x0022E200 #define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0) +#define PF_MBX_ATQLEN_ATQCRIT_M BIT(30) #define PF_MBX_ATQLEN_ATQENABLE_M BIT(31) #define PF_MBX_ATQT 0x0022E300 #define PRTDCB_GENC 0x00083000 @@ -58,6 +57,7 @@ #define PRTDCB_GENS 0x00083020 #define PRTDCB_GENS_DCBX_STATUS_S 0 #define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0) +#define PRTDCB_TUP2TC 0x001D26C0 /* Reset Source: CORER */ #define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4)) #define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4)) #define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4)) @@ -218,6 +218,11 @@ #define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16) #define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4)) #define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0) +#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E36E0 + ((_i) * 32)) +#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8 +#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M ICE_M(0xFFFF, 0) +#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3800 + ((_i) * 32)) +#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M ICE_M(0xFFFF, 0) #define GL_MDCK_TX_TDPU 0x00049348 #define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1) #define GL_MDET_RX 0x00294C00 @@ -289,6 +294,20 @@ #define GL_PWR_MODE_CTL 0x000B820C #define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30 #define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30) +#define GLQF_FD_CNT 0x00460018 +#define GLQF_FD_CNT_FD_BCNT_S 16 +#define GLQF_FD_CNT_FD_BCNT_M ICE_M(0x7FFF, 16) +#define GLQF_FD_SIZE 0x00460010 +#define GLQF_FD_SIZE_FD_GSIZE_S 0 +#define GLQF_FD_SIZE_FD_GSIZE_M ICE_M(0x7FFF, 0) +#define GLQF_FD_SIZE_FD_BSIZE_S 16 +#define GLQF_FD_SIZE_FD_BSIZE_M ICE_M(0x7FFF, 16) +#define GLQF_FDINSET(_i, _j) (0x00412000 + ((_i) * 4 + (_j) * 512)) +#define GLQF_FDMASK_SEL(_i) (0x00410400 + ((_i) * 4)) +#define GLQF_FDSWAP(_i, _j) (0x00413000 + ((_i) * 4 + (_j) * 512)) +#define PFQF_FD_ENA 0x0043A000 +#define PFQF_FD_ENA_FD_ENA_M BIT(0) +#define PFQF_FD_SIZE 0x00460100 #define GLDCB_RTCTQ_RXQNUM_S 0 #define GLDCB_RTCTQ_RXQNUM_M ICE_M(0x7FF, 0) #define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8)) @@ -332,6 +351,7 @@ #define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8)) #define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8)) #define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8)) +#define GLSTAT_FD_CNT0L(_i) (0x003A0000 + ((_i) * 8)) #define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8)) #define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8)) #define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8)) @@ -342,6 +362,9 @@ #define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4)) #define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) #define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) +#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4)) +#define VSIQF_FD_CNT_FD_GCNT_S 0 +#define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0) #define VSIQF_HKEY_MAX_INDEX 12 #define VSIQF_HLUT_MAX_INDEX 15 #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 878e125d8b42..14dfbbc1b2cf 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -40,6 +40,104 @@ union ice_32byte_rx_desc { } wb; /* writeback */ }; +struct ice_fltr_desc { + __le64 qidx_compq_space_stat; + __le64 dtype_cmd_vsi_fdid; +}; + +#define ICE_FXD_FLTR_QW0_QINDEX_S 0 +#define ICE_FXD_FLTR_QW0_QINDEX_M (0x7FFULL << ICE_FXD_FLTR_QW0_QINDEX_S) +#define ICE_FXD_FLTR_QW0_COMP_Q_S 11 +#define ICE_FXD_FLTR_QW0_COMP_Q_M BIT_ULL(ICE_FXD_FLTR_QW0_COMP_Q_S) +#define ICE_FXD_FLTR_QW0_COMP_Q_ZERO 0x0ULL + +#define ICE_FXD_FLTR_QW0_COMP_REPORT_S 12 +#define ICE_FXD_FLTR_QW0_COMP_REPORT_M \ + (0x3ULL << ICE_FXD_FLTR_QW0_COMP_REPORT_S) +#define ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL 0x1ULL + +#define ICE_FXD_FLTR_QW0_FD_SPACE_S 14 +#define ICE_FXD_FLTR_QW0_FD_SPACE_M (0x3ULL << ICE_FXD_FLTR_QW0_FD_SPACE_S) +#define ICE_FXD_FLTR_QW0_FD_SPACE_GUAR_BEST 0x2ULL + +#define ICE_FXD_FLTR_QW0_STAT_CNT_S 16 +#define ICE_FXD_FLTR_QW0_STAT_CNT_M \ + (0x1FFFULL << ICE_FXD_FLTR_QW0_STAT_CNT_S) +#define ICE_FXD_FLTR_QW0_STAT_ENA_S 29 +#define ICE_FXD_FLTR_QW0_STAT_ENA_M (0x3ULL << ICE_FXD_FLTR_QW0_STAT_ENA_S) +#define ICE_FXD_FLTR_QW0_STAT_ENA_PKTS 0x1ULL + +#define ICE_FXD_FLTR_QW0_EVICT_ENA_S 31 +#define ICE_FXD_FLTR_QW0_EVICT_ENA_M BIT_ULL(ICE_FXD_FLTR_QW0_EVICT_ENA_S) +#define ICE_FXD_FLTR_QW0_EVICT_ENA_FALSE 0x0ULL +#define ICE_FXD_FLTR_QW0_EVICT_ENA_TRUE 0x1ULL + +#define ICE_FXD_FLTR_QW0_TO_Q_S 32 +#define ICE_FXD_FLTR_QW0_TO_Q_M (0x7ULL << ICE_FXD_FLTR_QW0_TO_Q_S) +#define ICE_FXD_FLTR_QW0_TO_Q_EQUALS_QINDEX 0x0ULL + +#define ICE_FXD_FLTR_QW0_TO_Q_PRI_S 35 +#define ICE_FXD_FLTR_QW0_TO_Q_PRI_M (0x7ULL << ICE_FXD_FLTR_QW0_TO_Q_PRI_S) +#define ICE_FXD_FLTR_QW0_TO_Q_PRIO1 0x1ULL + +#define ICE_FXD_FLTR_QW0_DPU_RECIPE_S 38 +#define ICE_FXD_FLTR_QW0_DPU_RECIPE_M \ + (0x3ULL << ICE_FXD_FLTR_QW0_DPU_RECIPE_S) +#define ICE_FXD_FLTR_QW0_DPU_RECIPE_DFLT 0x0ULL + +#define ICE_FXD_FLTR_QW0_DROP_S 40 +#define ICE_FXD_FLTR_QW0_DROP_M BIT_ULL(ICE_FXD_FLTR_QW0_DROP_S) +#define ICE_FXD_FLTR_QW0_DROP_NO 0x0ULL +#define ICE_FXD_FLTR_QW0_DROP_YES 0x1ULL + +#define ICE_FXD_FLTR_QW0_FLEX_PRI_S 41 +#define ICE_FXD_FLTR_QW0_FLEX_PRI_M (0x7ULL << ICE_FXD_FLTR_QW0_FLEX_PRI_S) +#define ICE_FXD_FLTR_QW0_FLEX_PRI_NONE 0x0ULL + +#define ICE_FXD_FLTR_QW0_FLEX_MDID_S 44 +#define ICE_FXD_FLTR_QW0_FLEX_MDID_M (0xFULL << ICE_FXD_FLTR_QW0_FLEX_MDID_S) +#define ICE_FXD_FLTR_QW0_FLEX_MDID0 0x0ULL + +#define ICE_FXD_FLTR_QW0_FLEX_VAL_S 48 +#define ICE_FXD_FLTR_QW0_FLEX_VAL_M \ + (0xFFFFULL << ICE_FXD_FLTR_QW0_FLEX_VAL_S) +#define ICE_FXD_FLTR_QW0_FLEX_VAL0 0x0ULL + +#define ICE_FXD_FLTR_QW1_DTYPE_S 0 +#define ICE_FXD_FLTR_QW1_DTYPE_M (0xFULL << ICE_FXD_FLTR_QW1_DTYPE_S) +#define ICE_FXD_FLTR_QW1_PCMD_S 4 +#define ICE_FXD_FLTR_QW1_PCMD_M BIT_ULL(ICE_FXD_FLTR_QW1_PCMD_S) +#define ICE_FXD_FLTR_QW1_PCMD_ADD 0x0ULL +#define ICE_FXD_FLTR_QW1_PCMD_REMOVE 0x1ULL + +#define ICE_FXD_FLTR_QW1_PROF_PRI_S 5 +#define ICE_FXD_FLTR_QW1_PROF_PRI_M (0x7ULL << ICE_FXD_FLTR_QW1_PROF_PRI_S) +#define ICE_FXD_FLTR_QW1_PROF_PRIO_ZERO 0x0ULL + +#define ICE_FXD_FLTR_QW1_PROF_S 8 +#define ICE_FXD_FLTR_QW1_PROF_M (0x3FULL << ICE_FXD_FLTR_QW1_PROF_S) +#define ICE_FXD_FLTR_QW1_PROF_ZERO 0x0ULL + +#define ICE_FXD_FLTR_QW1_FD_VSI_S 14 +#define ICE_FXD_FLTR_QW1_FD_VSI_M (0x3FFULL << ICE_FXD_FLTR_QW1_FD_VSI_S) +#define ICE_FXD_FLTR_QW1_SWAP_S 24 +#define ICE_FXD_FLTR_QW1_SWAP_M BIT_ULL(ICE_FXD_FLTR_QW1_SWAP_S) +#define ICE_FXD_FLTR_QW1_SWAP_NOT_SET 0x0ULL +#define ICE_FXD_FLTR_QW1_SWAP_SET 0x1ULL + +#define ICE_FXD_FLTR_QW1_FDID_PRI_S 25 +#define ICE_FXD_FLTR_QW1_FDID_PRI_M (0x7ULL << ICE_FXD_FLTR_QW1_FDID_PRI_S) +#define ICE_FXD_FLTR_QW1_FDID_PRI_ONE 0x1ULL + +#define ICE_FXD_FLTR_QW1_FDID_MDID_S 28 +#define ICE_FXD_FLTR_QW1_FDID_MDID_M (0xFULL << ICE_FXD_FLTR_QW1_FDID_MDID_S) +#define ICE_FXD_FLTR_QW1_FDID_MDID_FD 0x05ULL + +#define ICE_FXD_FLTR_QW1_FDID_S 32 +#define ICE_FXD_FLTR_QW1_FDID_M \ + (0xFFFFFFFFULL << ICE_FXD_FLTR_QW1_FDID_S) +#define ICE_FXD_FLTR_QW1_FDID_ZERO 0x0ULL + struct ice_rx_ptype_decoded { u32 ptype:10; u32 known:1; @@ -262,6 +360,12 @@ enum ice_rx_flex_desc_status_error_0_bits { ICE_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */ }; +enum ice_rx_flex_desc_status_error_1_bits { + /* Note: These are predefined bit offsets */ + ICE_RX_FLEX_DESC_STATUS1_NAT_S = 4, + ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */ +}; + #define ICE_RXQ_CTX_SIZE_DWORDS 8 #define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32)) #define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22 @@ -340,6 +444,7 @@ struct ice_tx_desc { enum ice_tx_desc_dtype_value { ICE_TX_DESC_DTYPE_DATA = 0x0, ICE_TX_DESC_DTYPE_CTX = 0x1, + ICE_TX_DESC_DTYPE_FLTR_PROG = 0x8, /* DESC_DONE - HW has completed write-back of descriptor */ ICE_TX_DESC_DTYPE_DESC_DONE = 0xF, }; @@ -351,12 +456,14 @@ enum ice_tx_desc_cmd_bits { ICE_TX_DESC_CMD_EOP = 0x0001, ICE_TX_DESC_CMD_RS = 0x0002, ICE_TX_DESC_CMD_IL2TAG1 = 0x0008, + ICE_TX_DESC_CMD_DUMMY = 0x0010, ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020, ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, + ICE_TX_DESC_CMD_RE = 0x0400, }; #define ICE_TXD_QW1_OFFSET_S 16 @@ -413,6 +520,25 @@ enum ice_tx_ctx_desc_cmd_bits { ICE_TX_CTX_DESC_RESERVED = 0x40 }; +enum ice_tx_ctx_desc_eipt_offload { + ICE_TX_CTX_EIPT_NONE = 0x0, + ICE_TX_CTX_EIPT_IPV6 = 0x1, + ICE_TX_CTX_EIPT_IPV4_NO_CSUM = 0x2, + ICE_TX_CTX_EIPT_IPV4 = 0x3 +}; + +#define ICE_TXD_CTX_QW0_EIPLEN_S 2 + +#define ICE_TXD_CTX_QW0_L4TUNT_S 9 + +#define ICE_TXD_CTX_UDP_TUNNELING BIT_ULL(ICE_TXD_CTX_QW0_L4TUNT_S) +#define ICE_TXD_CTX_GRE_TUNNELING (0x2ULL << ICE_TXD_CTX_QW0_L4TUNT_S) + +#define ICE_TXD_CTX_QW0_NATLEN_S 12 + +#define ICE_TXD_CTX_QW0_L4T_CS_S 23 +#define ICE_TXD_CTX_QW0_L4T_CS_M BIT_ULL(ICE_TXD_CTX_QW0_L4T_CS_S) + #define ICE_LAN_TXQ_MAX_QGRPS 127 #define ICE_LAN_TXQ_MAX_QDIS 1023 @@ -455,7 +581,7 @@ struct ice_tlan_ctx { u8 drop_ena; u8 cache_prof_idx; u8 pkt_shaper_prof_idx; - u8 int_q_state; /* width not needed - internal do not write */ + u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */ }; /* macro to make the table lines short */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 2f256bf45efc..89e8e4f7f56f 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -5,6 +5,7 @@ #include "ice_base.h" #include "ice_flow.h" #include "ice_lib.h" +#include "ice_fltr.h" #include "ice_dcb_lib.h" /** @@ -18,6 +19,8 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type) return "ICE_VSI_PF"; case ICE_VSI_VF: return "ICE_VSI_VF"; + case ICE_VSI_CTRL: + return "ICE_VSI_CTRL"; case ICE_VSI_LB: return "ICE_VSI_LB"; default: @@ -37,7 +40,8 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type) */ static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena) { - int i, ret = 0; + int ret = 0; + u16 i; for (i = 0; i < vsi->num_rxq; i++) ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false); @@ -121,6 +125,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi) { switch (vsi->type) { case ICE_VSI_PF: + case ICE_VSI_CTRL: case ICE_VSI_LB: vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; @@ -185,6 +190,11 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) */ vsi->num_q_vectors = pf->num_msix_per_vf - ICE_NONQ_VECS_VF; break; + case ICE_VSI_CTRL: + vsi->alloc_txq = 1; + vsi->alloc_rxq = 1; + vsi->num_q_vectors = 1; + break; case ICE_VSI_LB: vsi->alloc_txq = 1; vsi->alloc_rxq = 1; @@ -248,8 +258,8 @@ void ice_vsi_delete(struct ice_vsi *vsi) status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); if (status) - dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n", - vsi->vsi_num, status); + dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %s\n", + vsi->vsi_num, ice_stat_str(status)); kfree(ctxt); } @@ -320,7 +330,7 @@ int ice_vsi_clear(struct ice_vsi *vsi) /* updates the PF for this cleared VSI */ pf->vsi[vsi->idx] = NULL; - if (vsi->idx < pf->next_vsi) + if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL) pf->next_vsi = vsi->idx; ice_vsi_free_arrays(vsi); @@ -331,6 +341,25 @@ int ice_vsi_clear(struct ice_vsi *vsi) } /** + * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI + * @irq: interrupt number + * @data: pointer to a q_vector + */ +static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data) +{ + struct ice_q_vector *q_vector = (struct ice_q_vector *)data; + + if (!q_vector->tx.ring) + return IRQ_HANDLED; + +#define FDIR_RX_DESC_CLEAN_BUDGET 64 + ice_clean_rx_irq(q_vector->rx.ring, FDIR_RX_DESC_CLEAN_BUDGET); + ice_clean_ctrl_tx_irq(q_vector->tx.ring); + + return IRQ_HANDLED; +} + +/** * ice_msix_clean_rings - MSIX mode Interrupt Handler * @irq: interrupt number * @data: pointer to a q_vector @@ -381,8 +410,6 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id) vsi->back = pf; set_bit(__ICE_DOWN, vsi->state); - vsi->idx = pf->next_vsi; - if (vsi_type == ICE_VSI_VF) ice_vsi_set_num_qs(vsi, vf_id); else @@ -396,6 +423,13 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id) /* Setup default MSIX irq handler for VSI */ vsi->irq_handler = ice_msix_clean_rings; break; + case ICE_VSI_CTRL: + if (ice_vsi_alloc_arrays(vsi)) + goto err_rings; + + /* Setup ctrl VSI MSIX irq handler */ + vsi->irq_handler = ice_msix_clean_ctrl_vsi; + break; case ICE_VSI_VF: if (ice_vsi_alloc_arrays(vsi)) goto err_rings; @@ -409,12 +443,20 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id) goto unlock_pf; } - /* fill VSI slot in the PF struct */ - pf->vsi[pf->next_vsi] = vsi; + if (vsi->type == ICE_VSI_CTRL) { + /* Use the last VSI slot as the index for the control VSI */ + vsi->idx = pf->num_alloc_vsi - 1; + pf->ctrl_vsi_idx = vsi->idx; + pf->vsi[vsi->idx] = vsi; + } else { + /* fill slot and make note of the index */ + vsi->idx = pf->next_vsi; + pf->vsi[pf->next_vsi] = vsi; - /* prepare pf->next_vsi for next use */ - pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, - pf->next_vsi); + /* prepare pf->next_vsi for next use */ + pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, + pf->next_vsi); + } goto unlock_pf; err_rings: @@ -426,6 +468,48 @@ unlock_pf: } /** + * ice_alloc_fd_res - Allocate FD resource for a VSI + * @vsi: pointer to the ice_vsi + * + * This allocates the FD resources + * + * Returns 0 on success, -EPERM on no-op or -EIO on failure + */ +static int ice_alloc_fd_res(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + u32 g_val, b_val; + + /* Flow Director filters are only allocated/assigned to the PF VSI which + * passes the traffic. The CTRL VSI is only used to add/delete filters + * so we don't allocate resources to it + */ + + /* FD filters from guaranteed pool per VSI */ + g_val = pf->hw.func_caps.fd_fltr_guar; + if (!g_val) + return -EPERM; + + /* FD filters from best effort pool */ + b_val = pf->hw.func_caps.fd_fltr_best_effort; + if (!b_val) + return -EPERM; + + if (vsi->type != ICE_VSI_PF) + return -EPERM; + + if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) + return -EPERM; + + vsi->num_gfltr = g_val / pf->num_alloc_vsi; + + /* each VSI gets same "best_effort" quota */ + vsi->num_bfltr = b_val; + + return 0; +} + +/** * ice_vsi_get_qs - Assign queues from PF to VSI * @vsi: the VSI to assign queues to * @@ -521,8 +605,8 @@ static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi) status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx); if (status) - dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n", - vsi->vsi_num, status); + dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %s\n", + vsi->vsi_num, ice_stat_str(status)); } /** @@ -565,8 +649,8 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi) switch (vsi->type) { case ICE_VSI_PF: /* PF VSI will inherit RSS instance of PF */ - vsi->rss_table_size = cap->rss_table_size; - vsi->rss_size = min_t(int, num_online_cpus(), + vsi->rss_table_size = (u16)cap->rss_table_size; + vsi->rss_size = min_t(u16, num_online_cpus(), BIT(cap->rss_table_entry_width)); vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; break; @@ -581,8 +665,8 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi) case ICE_VSI_LB: break; default: - dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", - vsi->type); + dev_dbg(ice_pf_to_dev(pf), "Unsupported VSI type %s\n", + ice_vsi_type_str(vsi->type)); break; } } @@ -684,15 +768,15 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) max_rss = ICE_MAX_LG_RSS_QS; else max_rss = ICE_MAX_RSS_QS_PER_VF; - qcount_rx = min_t(int, rx_numq_tc, max_rss); + qcount_rx = min_t(u16, rx_numq_tc, max_rss); if (!vsi->req_rxq) - qcount_rx = min_t(int, qcount_rx, + qcount_rx = min_t(u16, qcount_rx, vsi->rss_size); } } /* find the (rounded up) power-of-2 of qcount */ - pow = order_base_2(qcount_rx); + pow = (u16)order_base_2(qcount_rx); ice_for_each_traffic_class(i) { if (!(vsi->tc_cfg.ena_tc & BIT(i))) { @@ -752,6 +836,51 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) } /** + * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI + * @ctxt: the VSI context being set + * @vsi: the VSI being configured + */ +static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) +{ + u8 dflt_q_group, dflt_q_prio; + u16 dflt_q, report_q, val; + + if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL) + return; + + val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID; + ctxt->info.valid_sections |= cpu_to_le16(val); + dflt_q = 0; + dflt_q_group = 0; + report_q = 0; + dflt_q_prio = 0; + + /* enable flow director filtering/programming */ + val = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE; + ctxt->info.fd_options = cpu_to_le16(val); + /* max of allocated flow director filters */ + ctxt->info.max_fd_fltr_dedicated = + cpu_to_le16(vsi->num_gfltr); + /* max of shared flow director filters any VSI may program */ + ctxt->info.max_fd_fltr_shared = + cpu_to_le16(vsi->num_bfltr); + /* default queue index within the VSI of the default FD */ + val = ((dflt_q << ICE_AQ_VSI_FD_DEF_Q_S) & + ICE_AQ_VSI_FD_DEF_Q_M); + /* target queue or queue group to the FD filter */ + val |= ((dflt_q_group << ICE_AQ_VSI_FD_DEF_GRP_S) & + ICE_AQ_VSI_FD_DEF_GRP_M); + ctxt->info.fd_def_q = cpu_to_le16(val); + /* queue index on which FD filter completion is reported */ + val = ((report_q << ICE_AQ_VSI_FD_REPORT_Q_S) & + ICE_AQ_VSI_FD_REPORT_Q_M); + /* priority of the default qindex action */ + val |= ((dflt_q_prio << ICE_AQ_VSI_FD_DEF_PRIORITY_S) & + ICE_AQ_VSI_FD_DEF_PRIORITY_M); + ctxt->info.fd_report_opt = cpu_to_le16(val); +} + +/** * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI * @ctxt: the VSI context being set * @vsi: the VSI being configured @@ -776,13 +905,10 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; break; - case ICE_VSI_LB: + default: dev_dbg(dev, "Unsupported VSI type %s\n", ice_vsi_type_str(vsi->type)); return; - default: - dev_warn(dev, "Unknown VSI type %d\n", vsi->type); - return; } ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & @@ -812,8 +938,8 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) if (!ctxt) return -ENOMEM; - ctxt->info = vsi->info; switch (vsi->type) { + case ICE_VSI_CTRL: case ICE_VSI_LB: case ICE_VSI_PF: ctxt->flags = ICE_AQ_VSI_TYPE_PF; @@ -829,12 +955,15 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) } ice_set_dflt_vsi_ctx(ctxt); + if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) + ice_set_fd_vsi_ctx(ctxt, vsi); /* if the switch is in VEB mode, allow VSI loopback */ if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; /* Set LUT type and HASH type if RSS is enabled */ - if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) && + vsi->type != ICE_VSI_CTRL) { ice_set_rss_vsi_ctx(ctxt, vsi); /* if updating VSI context, make sure to set valid_section: * to indicate which section of VSI context being updated @@ -941,7 +1070,7 @@ int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id) */ static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) { - int start = 0, end = 0; + u16 start = 0, end = 0; if (needed > res->end) return -ENOMEM; @@ -1024,6 +1153,7 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) struct ice_pf *pf = vsi->back; struct device *dev; u16 num_q_vectors; + int base; dev = ice_pf_to_dev(pf); /* SRIOV doesn't grab irq_tracker entries for each VSI */ @@ -1038,14 +1168,15 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) num_q_vectors = vsi->num_q_vectors; /* reserve slots from OS requested IRQs */ - vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors, - vsi->idx); - if (vsi->base_vector < 0) { + base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, vsi->idx); + + if (base < 0) { dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n", ice_get_free_res_count(pf->irq_tracker), ice_vsi_type_str(vsi->type), vsi->idx, num_q_vectors); return -ENOENT; } + vsi->base_vector = (u16)base; pf->num_avail_sw_msix -= num_q_vectors; return 0; @@ -1085,7 +1216,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; struct device *dev; - int i; + u16 i; dev = ice_pf_to_dev(pf); /* Allocate Tx rings */ @@ -1178,7 +1309,7 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) u8 *lut; dev = ice_pf_to_dev(pf); - vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq); + vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq); lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) @@ -1193,7 +1324,8 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) vsi->rss_table_size); if (status) { - dev_err(dev, "set_rss_lut failed, error %d\n", status); + dev_err(dev, "set_rss_lut failed, error %s\n", + ice_stat_str(status)); err = -EIO; goto ice_vsi_cfg_rss_exit; } @@ -1215,7 +1347,8 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key); if (status) { - dev_err(dev, "set_rss_key failed, error %d\n", status); + dev_err(dev, "set_rss_key failed, error %s\n", + ice_stat_str(status)); err = -EIO; } @@ -1248,8 +1381,8 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi) status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA); if (status) - dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n", - vsi->vsi_num, status); + dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %s\n", + vsi->vsi_num, ice_stat_str(status)); } /** @@ -1281,91 +1414,57 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_IPV4); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n", - vsi_num, status); + dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %s\n", + vsi_num, ice_stat_str(status)); /* configure RSS for IPv6 with input set IPv6 src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_IPV6); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n", - vsi_num, status); + dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %s\n", + vsi_num, ice_stat_str(status)); /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4, ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n", - vsi_num, status); + dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %s\n", + vsi_num, ice_stat_str(status)); /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4, ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n", - vsi_num, status); + dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %s\n", + vsi_num, ice_stat_str(status)); /* configure RSS for sctp4 with input set IP src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n", - vsi_num, status); + dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %s\n", + vsi_num, ice_stat_str(status)); /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6, ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n", - vsi_num, status); + dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %s\n", + vsi_num, ice_stat_str(status)); /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6, ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n", - vsi_num, status); + dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %s\n", + vsi_num, ice_stat_str(status)); /* configure RSS for sctp6 with input set IPv6 src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n", - vsi_num, status); -} - -/** - * ice_add_mac_to_list - Add a MAC address filter entry to the list - * @vsi: the VSI to be forwarded to - * @add_list: pointer to the list which contains MAC filter entries - * @macaddr: the MAC address to be added. - * - * Adds MAC address filter entry to the temp list - * - * Returns 0 on success or ENOMEM on failure. - */ -int -ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, - const u8 *macaddr) -{ - struct ice_fltr_list_entry *tmp; - struct ice_pf *pf = vsi->back; - - tmp = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*tmp), GFP_ATOMIC); - if (!tmp) - return -ENOMEM; - - tmp->fltr_info.flag = ICE_FLTR_TX; - tmp->fltr_info.src_id = ICE_SRC_ID_VSI; - tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC; - tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; - tmp->fltr_info.vsi_handle = vsi->idx; - ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr); - - INIT_LIST_HEAD(&tmp->list_entry); - list_add(&tmp->list_entry, add_list); - - return 0; + dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %s\n", + vsi_num, ice_stat_str(status)); } /** @@ -1415,54 +1514,21 @@ void ice_update_eth_stats(struct ice_vsi *vsi) } /** - * ice_free_fltr_list - free filter lists helper - * @dev: pointer to the device struct - * @h: pointer to the list head to be freed - * - * Helper function to free filter lists previously created using - * ice_add_mac_to_list - */ -void ice_free_fltr_list(struct device *dev, struct list_head *h) -{ - struct ice_fltr_list_entry *e, *tmp; - - list_for_each_entry_safe(e, tmp, h, list_entry) { - list_del(&e->list_entry); - devm_kfree(dev, e); - } -} - -/** * ice_vsi_add_vlan - Add VSI membership for given VLAN * @vsi: the VSI being configured * @vid: VLAN ID to be added + * @action: filter action to be performed on match */ -int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid) +int +ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action) { - struct ice_fltr_list_entry *tmp; struct ice_pf *pf = vsi->back; - LIST_HEAD(tmp_add_list); - enum ice_status status; struct device *dev; int err = 0; dev = ice_pf_to_dev(pf); - tmp = devm_kzalloc(dev, sizeof(*tmp), GFP_KERNEL); - if (!tmp) - return -ENOMEM; - - tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; - tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; - tmp->fltr_info.flag = ICE_FLTR_TX; - tmp->fltr_info.src_id = ICE_SRC_ID_VSI; - tmp->fltr_info.vsi_handle = vsi->idx; - tmp->fltr_info.l_data.vlan.vlan_id = vid; - - INIT_LIST_HEAD(&tmp->list_entry); - list_add(&tmp->list_entry, &tmp_add_list); - status = ice_add_vlan(&pf->hw, &tmp_add_list); - if (!status) { + if (!ice_fltr_add_vlan(vsi, vid, action)) { vsi->num_vlan++; } else { err = -ENODEV; @@ -1470,7 +1536,6 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid) vsi->vsi_num); } - ice_free_fltr_list(dev, &tmp_add_list); return err; } @@ -1483,41 +1548,25 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid) */ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) { - struct ice_fltr_list_entry *list; struct ice_pf *pf = vsi->back; - LIST_HEAD(tmp_add_list); enum ice_status status; struct device *dev; int err = 0; dev = ice_pf_to_dev(pf); - list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL); - if (!list) - return -ENOMEM; - list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; - list->fltr_info.vsi_handle = vsi->idx; - list->fltr_info.fltr_act = ICE_FWD_TO_VSI; - list->fltr_info.l_data.vlan.vlan_id = vid; - list->fltr_info.flag = ICE_FLTR_TX; - list->fltr_info.src_id = ICE_SRC_ID_VSI; - - INIT_LIST_HEAD(&list->list_entry); - list_add(&list->list_entry, &tmp_add_list); - - status = ice_remove_vlan(&pf->hw, &tmp_add_list); + status = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI); if (!status) { vsi->num_vlan--; } else if (status == ICE_ERR_DOES_NOT_EXIST) { - dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n", - vid, vsi->vsi_num, status); + dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %s\n", + vid, vsi->vsi_num, ice_stat_str(status)); } else { - dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n", - vid, vsi->vsi_num, status); + dev_err(dev, "Error removing VLAN %d on vsi %i error: %s\n", + vid, vsi->vsi_num, ice_stat_str(status)); err = -EIO; } - ice_free_fltr_list(dev, &tmp_add_list); return err; } @@ -1671,7 +1720,7 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - u32 txq = 0, rxq = 0; + u16 txq = 0, rxq = 0; int i, q; for (i = 0; i < vsi->num_q_vectors; i++) { @@ -1737,8 +1786,9 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (status) { - dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %d\n", - status, hw->adminq.sq_last_status); + dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); ret = -EIO; goto out; } @@ -1761,6 +1811,12 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) enum ice_status status; int ret = 0; + /* do not allow modifying VLAN stripping when a port VLAN is configured + * on this VSI + */ + if (vsi->info.pvid) + return 0; + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) return -ENOMEM; @@ -1783,8 +1839,9 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (status) { - dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n", - ena, status, hw->adminq.sq_last_status); + dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %s aq_err %s\n", + ena, ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); ret = -EIO; goto out; } @@ -1922,9 +1979,10 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc) status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL); if (status) { - netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n", - ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status, - pf->hw.adminq.sq_last_status); + netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %s, aq_err = %s\n", + ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, + ice_stat_str(status), + ice_aq_str(pf->hw.adminq.sq_last_status)); goto err_out; } @@ -1991,47 +2049,6 @@ clear_reg_idx: } /** - * ice_vsi_add_rem_eth_mac - Program VSI ethertype based filter with rule - * @vsi: the VSI being configured - * @add_rule: boolean value to add or remove ethertype filter rule - */ -static void -ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule) -{ - struct ice_fltr_list_entry *list; - struct ice_pf *pf = vsi->back; - LIST_HEAD(tmp_add_list); - enum ice_status status; - struct device *dev; - - dev = ice_pf_to_dev(pf); - list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL); - if (!list) - return; - - list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE; - list->fltr_info.fltr_act = ICE_DROP_PACKET; - list->fltr_info.flag = ICE_FLTR_TX; - list->fltr_info.src_id = ICE_SRC_ID_VSI; - list->fltr_info.vsi_handle = vsi->idx; - list->fltr_info.l_data.ethertype_mac.ethertype = vsi->ethtype; - - INIT_LIST_HEAD(&list->list_entry); - list_add(&list->list_entry, &tmp_add_list); - - if (add_rule) - status = ice_add_eth_mac(&pf->hw, &tmp_add_list); - else - status = ice_remove_eth_mac(&pf->hw, &tmp_add_list); - - if (status) - dev_err(dev, "Failure Adding or Removing Ethertype on VSI %i error: %d\n", - vsi->vsi_num, status); - - ice_free_fltr_list(dev, &tmp_add_list); -} - -/** * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling * @vsi: the VSI being configured * @tx: bool to determine Tx or Rx rule @@ -2039,45 +2056,25 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule) */ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) { - struct ice_fltr_list_entry *list; + enum ice_status (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag, + enum ice_sw_fwd_act_type act); struct ice_pf *pf = vsi->back; - LIST_HEAD(tmp_add_list); enum ice_status status; struct device *dev; dev = ice_pf_to_dev(pf); - list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL); - if (!list) - return; - - list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE; - list->fltr_info.vsi_handle = vsi->idx; - list->fltr_info.l_data.ethertype_mac.ethertype = ETH_P_LLDP; - - if (tx) { - list->fltr_info.fltr_act = ICE_DROP_PACKET; - list->fltr_info.flag = ICE_FLTR_TX; - list->fltr_info.src_id = ICE_SRC_ID_VSI; - } else { - list->fltr_info.fltr_act = ICE_FWD_TO_VSI; - list->fltr_info.flag = ICE_FLTR_RX; - list->fltr_info.src_id = ICE_SRC_ID_LPORT; - } - - INIT_LIST_HEAD(&list->list_entry); - list_add(&list->list_entry, &tmp_add_list); + eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth; - if (create) - status = ice_add_eth_mac(&pf->hw, &tmp_add_list); + if (tx) + status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX, + ICE_DROP_PACKET); else - status = ice_remove_eth_mac(&pf->hw, &tmp_add_list); + status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX, ICE_FWD_TO_VSI); if (status) - dev_err(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n", + dev_err(dev, "Fail %s %s LLDP rule on VSI %i error: %s\n", create ? "adding" : "removing", tx ? "TX" : "RX", - vsi->vsi_num, status); - - ice_free_fltr_list(dev, &tmp_add_list); + vsi->vsi_num, ice_stat_str(status)); } /** @@ -2122,10 +2119,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, if (vsi->type == ICE_VSI_VF) vsi->vf_id = vf_id; + ice_alloc_fd_res(vsi); + if (ice_vsi_get_qs(vsi)) { dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", vsi->idx); - goto unroll_get_qs; + goto unroll_vsi_alloc; } /* set RSS capabilities */ @@ -2140,6 +2139,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, goto unroll_get_qs; switch (vsi->type) { + case ICE_VSI_CTRL: case ICE_VSI_PF: ret = ice_vsi_alloc_q_vectors(vsi); if (ret) @@ -2164,20 +2164,23 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, * so this handles those cases (i.e. adding the PF to a bridge * without the 8021q module loaded). */ - ret = ice_vsi_add_vlan(vsi, 0); + ret = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI); if (ret) goto unroll_clear_rings; ice_vsi_map_rings_to_vectors(vsi); - /* Do not exit if configuring RSS had an issue, at least - * receive traffic on first queue. Hence no need to capture - * return value - */ - if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { - ice_vsi_cfg_rss_lut_key(vsi); - ice_vsi_set_rss_flow_fld(vsi); - } + /* ICE_VSI_CTRL does not need RSS so skip RSS processing */ + if (vsi->type != ICE_VSI_CTRL) + /* Do not exit if configuring RSS had an issue, at + * least receive traffic on first queue. Hence no + * need to capture return value + */ + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { + ice_vsi_cfg_rss_lut_key(vsi); + ice_vsi_set_rss_flow_fld(vsi); + } + ice_init_arfs(vsi); break; case ICE_VSI_VF: /* VF driver will take care of creating netdev for this type and @@ -2223,8 +2226,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); if (status) { - dev_err(dev, "VSI %d failed lan queue config, error %d\n", - vsi->vsi_num, status); + dev_err(dev, "VSI %d failed lan queue config, error %s\n", + vsi->vsi_num, ice_stat_str(status)); goto unroll_vector_base; } @@ -2239,9 +2242,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, */ if (!ice_is_safe_mode(pf)) if (vsi->type == ICE_VSI_PF) { - ice_vsi_add_rem_eth_mac(vsi, true); - - /* Tx LLDP packets */ + ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, + ICE_DROP_PACKET); ice_cfg_sw_lldp(vsi, true, true); } @@ -2259,6 +2261,7 @@ unroll_vsi_init: ice_vsi_delete(vsi); unroll_get_qs: ice_vsi_put_qs(vsi); +unroll_vsi_alloc: ice_vsi_clear(vsi); return NULL; @@ -2411,6 +2414,8 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked) if (!locked) rtnl_unlock(); } + } else if (vsi->type == ICE_VSI_CTRL) { + err = ice_vsi_open_ctrl(vsi); } return err; @@ -2440,6 +2445,8 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked) } else { ice_vsi_close(vsi); } + } else if (vsi->type == ICE_VSI_CTRL) { + ice_vsi_close(vsi); } } @@ -2558,7 +2565,8 @@ int ice_vsi_release(struct ice_vsi *vsi) if (!ice_is_safe_mode(pf)) { if (vsi->type == ICE_VSI_PF) { - ice_vsi_add_rem_eth_mac(vsi, false); + ice_fltr_remove_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, + ICE_DROP_PACKET); ice_cfg_sw_lldp(vsi, true, false); /* The Rx rule will only exist to remove if the LLDP FW * engine is currently stopped @@ -2568,7 +2576,7 @@ int ice_vsi_release(struct ice_vsi *vsi) } } - ice_remove_vsi_fltr(&pf->hw, vsi->idx); + ice_fltr_remove_all(vsi); ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); ice_vsi_delete(vsi); ice_vsi_free_q_vectors(vsi); @@ -2746,6 +2754,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) goto err_vsi; ice_vsi_get_qs(vsi); + + ice_alloc_fd_res(vsi); ice_vsi_set_tc_cfg(vsi); /* Initialize VSI struct elements and create VSI in FW */ @@ -2754,6 +2764,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) goto err_vsi; switch (vsi->type) { + case ICE_VSI_CTRL: case ICE_VSI_PF: ret = ice_vsi_alloc_q_vectors(vsi); if (ret) @@ -2773,17 +2784,19 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) ice_vsi_map_rings_to_vectors(vsi); if (ice_is_xdp_ena_vsi(vsi)) { - vsi->num_xdp_txq = vsi->alloc_txq; + vsi->num_xdp_txq = vsi->alloc_rxq; ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog); if (ret) goto err_vectors; } - /* Do not exit if configuring RSS had an issue, at least - * receive traffic on first queue. Hence no need to capture - * return value - */ - if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) - ice_vsi_cfg_rss_lut_key(vsi); + /* ICE_VSI_CTRL does not need RSS so skip RSS processing */ + if (vsi->type != ICE_VSI_CTRL) + /* Do not exit if configuring RSS had an issue, at + * least receive traffic on first queue. Hence no + * need to capture return value + */ + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) + ice_vsi_cfg_rss_lut_key(vsi); break; case ICE_VSI_VF: ret = ice_vsi_alloc_q_vectors(vsi); @@ -2814,8 +2827,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); if (status) { - dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n", - vsi->vsi_num, status); + dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %s\n", + vsi->vsi_num, ice_stat_str(status)); if (init_vsi) { ret = -EIO; goto err_vectors; @@ -2924,8 +2937,8 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) max_txqs); if (status) { - dev_err(dev, "VSI %d failed TC config, error %d\n", - vsi->vsi_num, status); + dev_err(dev, "VSI %d failed TC config, error %s\n", + vsi->vsi_num, ice_stat_str(status)); ret = -EIO; goto out; } @@ -2985,33 +2998,27 @@ void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes) } /** - * ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI - * @vsi: the VSI being configured MAC filter - * @macaddr: the MAC address to be added. - * @set: Add or delete a MAC filter - * - * Adds or removes MAC address filter entry for VF VSI + * ice_status_to_errno - convert from enum ice_status to Linux errno + * @err: ice_status value to convert */ -enum ice_status -ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set) +int ice_status_to_errno(enum ice_status err) { - LIST_HEAD(tmp_add_list); - enum ice_status status; - - /* Update MAC filter list to be added or removed for a VSI */ - if (ice_add_mac_to_list(vsi, &tmp_add_list, macaddr)) { - status = ICE_ERR_NO_MEMORY; - goto cfg_mac_fltr_exit; + switch (err) { + case ICE_SUCCESS: + return 0; + case ICE_ERR_DOES_NOT_EXIST: + return -ENOENT; + case ICE_ERR_OUT_OF_RANGE: + return -ENOTTY; + case ICE_ERR_PARAM: + return -EINVAL; + case ICE_ERR_NO_MEMORY: + return -ENOMEM; + case ICE_ERR_MAX_LIMIT: + return -EAGAIN; + default: + return -EINVAL; } - - if (set) - status = ice_add_mac(&vsi->back->hw, &tmp_add_list); - else - status = ice_remove_mac(&vsi->back->hw, &tmp_add_list); - -cfg_mac_fltr_exit: - ice_free_fltr_list(ice_pf_to_dev(vsi->back), &tmp_add_list); - return status; } /** @@ -3079,8 +3086,8 @@ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi) status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX); if (status) { - dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n", - vsi->vsi_num, status); + dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %s\n", + vsi->vsi_num, ice_stat_str(status)); return -EIO; } @@ -3118,8 +3125,8 @@ int ice_clear_dflt_vsi(struct ice_sw *sw) status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false, ICE_FLTR_RX); if (status) { - dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n", - dflt_vsi->vsi_num, status); + dev_err(dev, "Failed to clear the default forwarding VSI %d, error %s\n", + dflt_vsi->vsi_num, ice_stat_str(status)); return -EIO; } diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 04ca00799364..076e635e0c9f 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -8,12 +8,6 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type); -int -ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, - const u8 *macaddr); - -void ice_free_fltr_list(struct device *dev, struct list_head *h); - void ice_update_eth_stats(struct ice_vsi *vsi); int ice_vsi_cfg_rxqs(struct ice_vsi *vsi); @@ -22,7 +16,8 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi); void ice_vsi_cfg_msix(struct ice_vsi *vsi); -int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid); +int +ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action); int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid); @@ -97,6 +92,8 @@ void ice_update_rx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes); void ice_vsi_cfg_frame_size(struct ice_vsi *vsi); +int ice_status_to_errno(enum ice_status err); + u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran); enum ice_status diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 5b190c257124..bbf92d2f1ac1 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -8,6 +8,7 @@ #include "ice.h" #include "ice_base.h" #include "ice_lib.h" +#include "ice_fltr.h" #include "ice_dcb_lib.h" #include "ice_dcb_nl.h" #include "ice_devlink.h" @@ -133,38 +134,24 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf) static int ice_init_mac_fltr(struct ice_pf *pf) { enum ice_status status; - u8 broadcast[ETH_ALEN]; struct ice_vsi *vsi; + u8 *perm_addr; vsi = ice_get_main_vsi(pf); if (!vsi) return -EINVAL; - /* To add a MAC filter, first add the MAC to a list and then - * pass the list to ice_add_mac. - */ - - /* Add a unicast MAC filter so the VSI can get its packets */ - status = ice_vsi_cfg_mac_fltr(vsi, vsi->port_info->mac.perm_addr, true); - if (status) - goto unregister; - - /* VSI needs to receive broadcast traffic, so add the broadcast - * MAC address to the list as well. - */ - eth_broadcast_addr(broadcast); - status = ice_vsi_cfg_mac_fltr(vsi, broadcast, true); - if (status) - goto unregister; + perm_addr = vsi->port_info->mac.perm_addr; + status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); + if (!status) + return 0; - return 0; -unregister: /* We aren't useful with no MAC filters, so unregister if we * had an error */ - if (status && vsi->netdev->reg_state == NETREG_REGISTERED) { - dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %d. Unregistering device\n", - status); + if (vsi->netdev->reg_state == NETREG_REGISTERED) { + dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %s. Unregistering device\n", + ice_stat_str(status)); unregister_netdev(vsi->netdev); free_netdev(vsi->netdev); vsi->netdev = NULL; @@ -188,7 +175,8 @@ static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr) struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; - if (ice_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr)) + if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr, + ICE_FWD_TO_VSI)) return -EINVAL; return 0; @@ -209,7 +197,8 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; - if (ice_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr)) + if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr, + ICE_FWD_TO_VSI)) return -EINVAL; return 0; @@ -307,8 +296,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) } /* Remove MAC addresses in the unsync list */ - status = ice_remove_mac(hw, &vsi->tmp_unsync_list); - ice_free_fltr_list(dev, &vsi->tmp_unsync_list); + status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); + ice_fltr_free_list(dev, &vsi->tmp_unsync_list); if (status) { netdev_err(netdev, "Failed to delete MAC filters\n"); /* if we failed because of alloc failures, just bail */ @@ -319,8 +308,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) } /* Add MAC addresses in the sync list */ - status = ice_add_mac(hw, &vsi->tmp_sync_list); - ice_free_fltr_list(dev, &vsi->tmp_sync_list); + status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); + ice_fltr_free_list(dev, &vsi->tmp_sync_list); /* If filter is added successfully or already exists, do not go into * 'if' condition and report it as error. Instead continue processing * rest of the function. @@ -357,7 +346,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) vsi->current_netdev_flags &= ~IFF_ALLMULTI; goto out_promisc; } - } else if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) { + } else { + /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ if (vsi->vlan_ena) promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; else @@ -462,7 +452,7 @@ static void ice_prepare_for_reset(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; - int i; + unsigned int i; /* already prepared for reset */ if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) @@ -1017,8 +1007,8 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) if (ret == ICE_ERR_AQ_NO_WORK) break; if (ret) { - dev_err(dev, "%s Receive Queue event error %d\n", qtype, - ret); + dev_err(dev, "%s Receive Queue event error %s\n", qtype, + ice_stat_str(ret)); break; } @@ -1123,7 +1113,7 @@ static void ice_clean_mailboxq_subtask(struct ice_pf *pf) * * If not already scheduled, this puts the task into the work queue. */ -static void ice_service_task_schedule(struct ice_pf *pf) +void ice_service_task_schedule(struct ice_pf *pf) { if (!test_bit(__ICE_SERVICE_DIS, pf->state) && !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) && @@ -1198,8 +1188,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; + unsigned int i; u32 reg; - int i; if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) { /* Since the VF MDD event logging is rate limited, check if @@ -1332,8 +1322,13 @@ static void ice_handle_mdd_event(struct ice_pf *pf) * PF can be configured to reset the VF through ethtool * private flag mdd-auto-reset-vf. */ - if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) + if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) { + /* VF MDD event counters will be cleared by + * reset, so print the event prior to reset. + */ + ice_print_vf_rx_mdd_event(vf); ice_reset_vf(&pf->vf[i], false); + } } } @@ -1493,7 +1488,7 @@ static void ice_service_task(struct work_struct *work) ice_process_vflr_event(pf); ice_clean_mailboxq_subtask(pf); - + ice_sync_arfs_fltrs(pf); /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */ ice_service_task_complete(pf); @@ -1652,9 +1647,14 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) } /* register for affinity change notifications */ - q_vector->affinity_notify.notify = ice_irq_affinity_notify; - q_vector->affinity_notify.release = ice_irq_affinity_release; - irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); + if (!IS_ENABLED(CONFIG_RFS_ACCEL)) { + struct irq_affinity_notify *affinity_notify; + + affinity_notify = &q_vector->affinity_notify; + affinity_notify->notify = ice_irq_affinity_notify; + affinity_notify->release = ice_irq_affinity_release; + irq_set_affinity_notifier(irq_num, affinity_notify); + } /* assign the mask for this irq */ irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); @@ -1666,8 +1666,9 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) free_q_irqs: while (vector) { vector--; - irq_num = pf->msix_entries[base + vector].vector, - irq_set_affinity_notifier(irq_num, NULL); + irq_num = pf->msix_entries[base + vector].vector; + if (!IS_ENABLED(CONFIG_RFS_ACCEL)) + irq_set_affinity_notifier(irq_num, NULL); irq_set_affinity_hint(irq_num, NULL); devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); } @@ -1809,8 +1810,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); if (status) { - dev_err(dev, "Failed VSI LAN queue config for XDP, error:%d\n", - status); + dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n", + ice_stat_str(status)); goto clear_xdp_rings; } ice_vsi_assign_bpf_prog(vsi, prog); @@ -1898,6 +1899,9 @@ free_qmap: for (i = 0; i < vsi->tc_cfg.numtc; i++) max_txqs[i] = vsi->num_txq; + /* change number of XDP Tx queues to 0 */ + vsi->num_xdp_txq = 0; + return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); } @@ -1931,7 +1935,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, } if (!ice_is_xdp_ena_vsi(vsi) && prog) { - vsi->num_xdp_txq = vsi->alloc_txq; + vsi->num_xdp_txq = vsi->alloc_rxq; xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); @@ -2137,10 +2141,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) } ret = IRQ_HANDLED; - if (!test_bit(__ICE_DOWN, pf->state)) { - ice_service_task_schedule(pf); - ice_irq_dynamic_ena(hw, NULL, NULL); - } + ice_service_task_schedule(pf); + ice_irq_dynamic_ena(hw, NULL, NULL); return ret; } @@ -2247,7 +2249,7 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) return oicr_idx; pf->num_avail_sw_msix -= 1; - pf->oicr_idx = oicr_idx; + pf->oicr_idx = (u16)oicr_idx; err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector, ice_misc_intr, 0, pf->int_name, pf); @@ -2331,6 +2333,7 @@ static void ice_set_netdev_features(struct net_device *netdev) dflt_features = NETIF_F_SG | NETIF_F_HIGHDMA | + NETIF_F_NTUPLE | NETIF_F_RXHASH; csumo_features = NETIF_F_RXCSUM | @@ -2342,13 +2345,27 @@ static void ice_set_netdev_features(struct net_device *netdev) NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; - tso_features = NETIF_F_TSO | + tso_features = NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_TSO6 | + NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL | + NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_IPXIP6 | NETIF_F_GSO_UDP_L4; + netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_GRE_CSUM; /* set features that user can change */ netdev->hw_features = dflt_features | csumo_features | vlano_features | tso_features; + /* add support for HW_CSUM on packets with MPLS header */ + netdev->mpls_features = NETIF_F_HW_CSUM; + /* enable features */ netdev->features |= netdev->hw_features; /* encap and VLAN devices inherit default, csumo and tso features */ @@ -2411,7 +2428,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) err = register_netdev(vsi->netdev); if (err) - goto err_destroy_devlink_port; + goto err_free_netdev; devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev); @@ -2422,9 +2439,11 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) return 0; +err_free_netdev: + free_netdev(vsi->netdev); + vsi->netdev = NULL; err_destroy_devlink_port: ice_devlink_destroy_port(pf); - return err; } @@ -2457,6 +2476,20 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) } /** + * ice_ctrl_vsi_setup - Set up a control VSI + * @pf: board private structure + * @pi: pointer to the port_info instance + * + * Returns pointer to the successfully allocated VSI software struct + * on success, otherwise returns NULL on failure. + */ +static struct ice_vsi * +ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) +{ + return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID); +} + +/** * ice_lb_vsi_setup - Set up a loopback VSI * @pf: board private structure * @pi: pointer to the port_info instance @@ -2509,7 +2542,7 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged * packets aren't pruned by the device's internal switch on Rx */ - ret = ice_vsi_add_vlan(vsi, vid); + ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI); if (!ret) { vsi->vlan_ena = true; set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); @@ -2594,12 +2627,22 @@ static int ice_setup_pf_sw(struct ice_pf *pf) */ ice_napi_add(vsi); + status = ice_set_cpu_rx_rmap(vsi); + if (status) { + dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n", + vsi->vsi_num, status); + status = -EINVAL; + goto unroll_napi_add; + } status = ice_init_mac_fltr(pf); if (status) - goto unroll_napi_add; + goto free_cpu_rx_map; return status; +free_cpu_rx_map: + ice_free_cpu_rx_rmap(vsi); + unroll_napi_add: if (vsi) { ice_napi_del(vsi); @@ -2630,7 +2673,8 @@ unroll_vsi_setup: static u16 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size) { - u16 count = 0, bit; + unsigned long bit; + u16 count = 0; mutex_lock(lock); for_each_clear_bit(bit, pf_qmap, size) @@ -2703,6 +2747,23 @@ static void ice_set_pf_caps(struct ice_pf *pf) if (func_caps->common_cap.rss_table_size) set_bit(ICE_FLAG_RSS_ENA, pf->flags); + clear_bit(ICE_FLAG_FD_ENA, pf->flags); + if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) { + u16 unused; + + /* ctrl_vsi_idx will be set to a valid value when flow director + * is setup by ice_init_fdir + */ + pf->ctrl_vsi_idx = ICE_NO_VSI; + set_bit(ICE_FLAG_FD_ENA, pf->flags); + /* force guaranteed filter pool for PF */ + ice_alloc_fd_guar_item(&pf->hw, &unused, + func_caps->fd_fltr_guar); + /* force shared filter pool for PF */ + ice_alloc_fd_shrd_item(&pf->hw, &unused, + func_caps->fd_fltr_best_effort); + } + pf->max_pf_txqs = func_caps->common_cap.num_txq; pf->max_pf_rxqs = func_caps->common_cap.num_rxq; } @@ -2769,6 +2830,15 @@ static int ice_ena_msix_range(struct ice_pf *pf) v_budget += needed; v_left -= needed; + /* reserve one vector for flow director */ + if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { + needed = ICE_FDIR_MSIX; + if (v_left < needed) + goto no_hw_vecs_left_err; + v_budget += needed; + v_left -= needed; + } + pf->msix_entries = devm_kcalloc(dev, v_budget, sizeof(*pf->msix_entries), GFP_KERNEL); @@ -2793,8 +2863,10 @@ static int ice_ena_msix_range(struct ice_pf *pf) if (v_actual < v_budget) { dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", v_budget, v_actual); -/* 2 vectors for LAN (traffic + OICR) */ +/* 2 vectors each for LAN and RDMA (traffic + OICR), one for flow director */ #define ICE_MIN_LAN_VECS 2 +#define ICE_MIN_RDMA_VECS 2 +#define ICE_MIN_VECS (ICE_MIN_LAN_VECS + ICE_MIN_RDMA_VECS + 1) if (v_actual < ICE_MIN_LAN_VECS) { /* error if we can't get minimum vectors */ @@ -2869,8 +2941,8 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) } /* populate SW interrupts pool with number of OS granted IRQs. */ - pf->num_avail_sw_msix = vectors; - pf->irq_tracker->num_entries = vectors; + pf->num_avail_sw_msix = (u16)vectors; + pf->irq_tracker->num_entries = (u16)vectors; pf->irq_tracker->end = pf->irq_tracker->num_entries; return 0; @@ -2902,9 +2974,9 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx) } if (new_tx) - vsi->req_txq = new_tx; + vsi->req_txq = (u16)new_tx; if (new_rx) - vsi->req_rxq = new_rx; + vsi->req_rxq = (u16)new_rx; /* set for the next time the netdev is started */ if (!netif_running(vsi->netdev)) { @@ -2985,6 +3057,9 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) *status = ICE_ERR_NOT_SUPPORTED; } break; + case ICE_ERR_FW_DDP_MISMATCH: + dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); + break; case ICE_ERR_BUF_TOO_SHORT: case ICE_ERR_CFG: dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n"); @@ -3013,6 +3088,9 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) case ICE_AQ_RC_EBADMAN: case ICE_AQ_RC_EBADBUF: dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); + /* poll for reset to complete */ + if (ice_check_reset(hw)) + dev_err(dev, "Error resetting device. Please reload the driver\n"); return; default: break; @@ -3100,6 +3178,53 @@ static enum ice_status ice_send_version(struct ice_pf *pf) } /** + * ice_init_fdir - Initialize flow director VSI and configuration + * @pf: pointer to the PF instance + * + * returns 0 on success, negative on error + */ +static int ice_init_fdir(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_vsi *ctrl_vsi; + int err; + + /* Side Band Flow Director needs to have a control VSI. + * Allocate it and store it in the PF. + */ + ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info); + if (!ctrl_vsi) { + dev_dbg(dev, "could not create control VSI\n"); + return -ENOMEM; + } + + err = ice_vsi_open_ctrl(ctrl_vsi); + if (err) { + dev_dbg(dev, "could not open control VSI\n"); + goto err_vsi_open; + } + + mutex_init(&pf->hw.fdir_fltr_lock); + + err = ice_fdir_create_dflt_rules(pf); + if (err) + goto err_fdir_rule; + + return 0; + +err_fdir_rule: + ice_fdir_release_flows(&pf->hw); + ice_vsi_close(ctrl_vsi); +err_vsi_open: + ice_vsi_release(ctrl_vsi); + if (pf->ctrl_vsi_idx != ICE_NO_VSI) { + pf->vsi[pf->ctrl_vsi_idx] = NULL; + pf->ctrl_vsi_idx = ICE_NO_VSI; + } + return err; +} + +/** * ice_get_opt_fw_name - return optional firmware file name or NULL * @pf: pointer to the PF instance */ @@ -3295,12 +3420,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) if (err) { dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); err = -EIO; - goto err_init_interrupt_unroll; + goto err_init_vsi_unroll; } - /* Driver is mostly up */ - clear_bit(__ICE_DOWN, pf->state); - /* In case of MSIX we are going to setup the misc vector right here * to handle admin queue events etc. In case of legacy and MSI * the misc functionality and queue processing is combined in @@ -3356,12 +3478,16 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ice_verify_cacheline_size(pf); - /* If no DDP driven features have to be setup, return here */ + /* If no DDP driven features have to be setup, we are done with probe */ if (ice_is_safe_mode(pf)) - return 0; + goto probe_done; /* initialize DDP driven features */ + /* Note: Flow director init failure is non-fatal to load */ + if (ice_init_fdir(pf)) + dev_err(dev, "could not initialize flow director\n"); + /* Note: DCB init failure is non-fatal to load */ if (ice_init_pf_dcb(pf, false)) { clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); @@ -3373,6 +3499,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) /* print PCI link speed and width */ pcie_print_link_status(pf->pdev); +probe_done: + /* ready to go, so clear down state bit */ + clear_bit(__ICE_DOWN, pf->state); return 0; err_alloc_sw_unroll: @@ -3384,6 +3513,7 @@ err_msix_misc_unroll: ice_free_irq_msix_misc(pf); err_init_interrupt_unroll: ice_clear_interrupt_scheme(pf); +err_init_vsi_unroll: devm_kfree(dev, pf->vsi); err_init_pf_unroll: ice_deinit_pf(pf); @@ -3421,6 +3551,9 @@ static void ice_remove(struct pci_dev *pdev) set_bit(__ICE_DOWN, pf->state); ice_service_task_stop(pf); + mutex_destroy(&(&pf->hw)->fdir_fltr_lock); + if (!ice_is_safe_mode(pf)) + ice_remove_arfs(pf); ice_devlink_destroy_port(pf); ice_vsi_release_all(pf); ice_free_irq_msix_misc(pf); @@ -3705,25 +3838,24 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) return -EBUSY; } - /* When we change the MAC address we also have to change the MAC address - * based filter rules that were created previously for the old MAC - * address. So first, we remove the old filter rule using ice_remove_mac - * and then create a new filter rule using ice_add_mac via - * ice_vsi_cfg_mac_fltr function call for both add and/or remove - * filters. - */ - status = ice_vsi_cfg_mac_fltr(vsi, netdev->dev_addr, false); - if (status) { + /* Clean up old MAC filter. Not an error if old filter doesn't exist */ + status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI); + if (status && status != ICE_ERR_DOES_NOT_EXIST) { err = -EADDRNOTAVAIL; goto err_update_filters; } - status = ice_vsi_cfg_mac_fltr(vsi, mac, true); - if (status) { - err = -EADDRNOTAVAIL; - goto err_update_filters; + /* Add filter for new MAC. If filter exists, just return success */ + status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); + if (status == ICE_ERR_ALREADY_EXISTS) { + netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); + return 0; } + /* error if the new filter addition failed */ + if (status) + err = -EADDRNOTAVAIL; + err_update_filters: if (err) { netdev_err(netdev, "can't set MAC %pM. filter update failed\n", @@ -3740,8 +3872,8 @@ err_update_filters: flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; status = ice_aq_manage_mac_write(hw, mac, flags, NULL); if (status) { - netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n", - mac, status); + netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n", + mac, ice_stat_str(status)); } return 0; } @@ -3805,8 +3937,8 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, q_handle, ICE_MAX_BW, maxrate * 1000); if (status) { - netdev_err(netdev, "Unable to set Tx max rate, error %d\n", - status); + netdev_err(netdev, "Unable to set Tx max rate, error %s\n", + ice_stat_str(status)); return -EIO; } @@ -3938,6 +4070,16 @@ ice_set_features(struct net_device *netdev, netdev_features_t features) (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) ret = ice_cfg_vlan_pruning(vsi, false, false); + if ((features & NETIF_F_NTUPLE) && + !(netdev->features & NETIF_F_NTUPLE)) { + ice_vsi_manage_fdir(vsi, true); + ice_init_arfs(vsi); + } else if (!(features & NETIF_F_NTUPLE) && + (netdev->features & NETIF_F_NTUPLE)) { + ice_vsi_manage_fdir(vsi, false); + ice_clear_arfs(vsi); + } + return ret; } @@ -4084,6 +4226,33 @@ ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes) } /** + * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters + * @vsi: the VSI to be updated + * @rings: rings to work on + * @count: number of rings + */ +static void +ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings, + u16 count) +{ + struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; + u16 i; + + for (i = 0; i < count; i++) { + struct ice_ring *ring; + u64 pkts, bytes; + + ring = READ_ONCE(rings[i]); + ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); + vsi_stats->tx_packets += pkts; + vsi_stats->tx_bytes += bytes; + vsi->tx_restart += ring->tx_stats.restart_q; + vsi->tx_busy += ring->tx_stats.tx_busy; + vsi->tx_linearize += ring->tx_stats.tx_linearize; + } +} + +/** * ice_update_vsi_ring_stats - Update VSI stats counters * @vsi: the VSI to be updated */ @@ -4110,15 +4279,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) rcu_read_lock(); /* update Tx rings counters */ - ice_for_each_txq(vsi, i) { - ring = READ_ONCE(vsi->tx_rings[i]); - ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); - vsi_stats->tx_packets += pkts; - vsi_stats->tx_bytes += bytes; - vsi->tx_restart += ring->tx_stats.restart_q; - vsi->tx_busy += ring->tx_stats.tx_busy; - vsi->tx_linearize += ring->tx_stats.tx_linearize; - } + ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq); /* update Rx rings counters */ ice_for_each_rxq(vsi, i) { @@ -4130,6 +4291,11 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; } + /* update XDP Tx rings counters */ + if (ice_is_xdp_ena_vsi(vsi)) + ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings, + vsi->num_xdp_txq); + rcu_read_unlock(); } @@ -4162,7 +4328,13 @@ void ice_update_vsi_stats(struct ice_vsi *vsi) if (vsi->type == ICE_VSI_PF) { cur_ns->rx_crc_errors = pf->stats.crc_errors; cur_ns->rx_errors = pf->stats.crc_errors + - pf->stats.illegal_bytes; + pf->stats.illegal_bytes + + pf->stats.rx_len_errors + + pf->stats.rx_undersize + + pf->hw_csum_rx_error + + pf->stats.rx_jabber + + pf->stats.rx_fragments + + pf->stats.rx_oversize; cur_ns->rx_length_errors = pf->stats.rx_len_errors; /* record drops from the port level */ cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; @@ -4177,6 +4349,7 @@ void ice_update_pf_stats(struct ice_pf *pf) { struct ice_hw_port_stats *prev_ps, *cur_ps; struct ice_hw *hw = &pf->hw; + u16 fd_ctr_base; u8 port; port = hw->port_info->lport; @@ -4265,6 +4438,12 @@ void ice_update_pf_stats(struct ice_pf *pf) ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, &prev_ps->tx_size_big, &cur_ps->tx_size_big); + fd_ctr_base = hw->fd_ctr_base; + + ice_stat_update40(hw, + GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)), + pf->stat_prev_loaded, &prev_ps->fd_sb_match, + &cur_ps->fd_sb_match); ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); @@ -4308,6 +4487,8 @@ void ice_update_pf_stats(struct ice_pf *pf) ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, &prev_ps->rx_jabber, &cur_ps->rx_jabber); + cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; + pf->stat_prev_loaded = true; } @@ -4493,6 +4674,62 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) } /** + * ice_vsi_open_ctrl - open control VSI for use + * @vsi: the VSI to open + * + * Initialization of the Control VSI + * + * Returns 0 on success, negative value on error + */ +int ice_vsi_open_ctrl(struct ice_vsi *vsi) +{ + char int_name[ICE_INT_NAME_STR_LEN]; + struct ice_pf *pf = vsi->back; + struct device *dev; + int err; + + dev = ice_pf_to_dev(pf); + /* allocate descriptors */ + err = ice_vsi_setup_tx_rings(vsi); + if (err) + goto err_setup_tx; + + err = ice_vsi_setup_rx_rings(vsi); + if (err) + goto err_setup_rx; + + err = ice_vsi_cfg(vsi); + if (err) + goto err_setup_rx; + + snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl", + dev_driver_string(dev), dev_name(dev)); + err = ice_vsi_req_irq_msix(vsi, int_name); + if (err) + goto err_setup_rx; + + ice_vsi_cfg_msix(vsi); + + err = ice_vsi_start_all_rx_rings(vsi); + if (err) + goto err_up_complete; + + clear_bit(__ICE_DOWN, vsi->state); + ice_vsi_ena_irq(vsi); + + return 0; + +err_up_complete: + ice_down(vsi); +err_setup_rx: + ice_vsi_free_rx_rings(vsi); +err_setup_tx: + ice_vsi_free_tx_rings(vsi); + + return err; +} + +/** * ice_vsi_open - Called when a network interface is made active * @vsi: the VSI to open * @@ -4604,8 +4841,9 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) /* replay filters for the VSI */ status = ice_replay_vsi(&pf->hw, vsi->idx); if (status) { - dev_err(dev, "replay VSI failed, status %d, VSI index %d, type %s\n", - status, vsi->idx, ice_vsi_type_str(type)); + dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n", + ice_stat_str(status), vsi->idx, + ice_vsi_type_str(type)); return -EIO; } @@ -4659,6 +4897,11 @@ static void ice_update_pf_netdev_link(struct ice_pf *pf) * ice_rebuild - rebuild after reset * @pf: PF to rebuild * @reset_type: type of reset + * + * Do not rebuild VF VSI in this flow because that is already handled via + * ice_reset_all_vfs(). This is because requirements for resetting a VF after a + * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want + * to reset/rebuild all the VF VSI twice. */ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) { @@ -4674,7 +4917,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ret = ice_init_all_ctrlq(hw); if (ret) { - dev_err(dev, "control queues init failed %d\n", ret); + dev_err(dev, "control queues init failed %s\n", + ice_stat_str(ret)); goto err_init_ctrlq; } @@ -4690,7 +4934,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ret = ice_clear_pf_cfg(hw); if (ret) { - dev_err(dev, "clear PF configuration failed %d\n", ret); + dev_err(dev, "clear PF configuration failed %s\n", + ice_stat_str(ret)); goto err_init_ctrlq; } @@ -4704,7 +4949,13 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ret = ice_get_caps(hw); if (ret) { - dev_err(dev, "ice_get_caps failed %d\n", ret); + dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret)); + goto err_init_ctrlq; + } + + ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); + if (ret) { + dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret)); goto err_init_ctrlq; } @@ -4723,6 +4974,21 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) goto err_sched_init_port; } + if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { + wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); + if (!rd32(hw, PFQF_FD_SIZE)) { + u16 unused, guar, b_effort; + + guar = hw->func_caps.fd_fltr_guar; + b_effort = hw->func_caps.fd_fltr_best_effort; + + /* force guaranteed filter pool for PF */ + ice_alloc_fd_guar_item(hw, &unused, guar); + /* force shared filter pool for PF */ + ice_alloc_fd_shrd_item(hw, &unused, b_effort); + } + } + if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) ice_dcb_rebuild(pf); @@ -4733,12 +4999,22 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) goto err_vsi_rebuild; } - if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { - err = ice_vsi_rebuild_by_type(pf, ICE_VSI_VF); + /* If Flow Director is active */ + if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { + err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); if (err) { - dev_err(dev, "VF VSI rebuild failed: %d\n", err); + dev_err(dev, "control VSI rebuild failed: %d\n", err); goto err_vsi_rebuild; } + + /* replay HW Flow Director recipes */ + if (hw->fdir_prof) + ice_fdir_replay_flows(hw); + + /* replay Flow Director filters */ + ice_fdir_replay_fltrs(pf); + + ice_rebuild_arfs(pf); } ice_update_pf_netdev_link(pf); @@ -4746,8 +5022,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) /* tell the firmware we are up */ ret = ice_send_version(pf); if (ret) { - dev_err(dev, "Rebuild failed due to error sending driver version: %d\n", - ret); + dev_err(dev, "Rebuild failed due to error sending driver version: %s\n", + ice_stat_str(ret)); goto err_vsi_rebuild; } @@ -4795,7 +5071,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) struct ice_pf *pf = vsi->back; u8 count = 0; - if (new_mtu == netdev->mtu) { + if (new_mtu == (int)netdev->mtu) { netdev_warn(netdev, "MTU is already %u\n", netdev->mtu); return 0; } @@ -4810,11 +5086,11 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) } } - if (new_mtu < netdev->min_mtu) { + if (new_mtu < (int)netdev->min_mtu) { netdev_err(netdev, "new MTU invalid. min_mtu is %d\n", netdev->min_mtu); return -EINVAL; - } else if (new_mtu > netdev->max_mtu) { + } else if (new_mtu > (int)netdev->max_mtu) { netdev_err(netdev, "new MTU invalid. max_mtu is %d\n", netdev->min_mtu); return -EINVAL; @@ -4835,7 +5111,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) return -EBUSY; } - netdev->mtu = new_mtu; + netdev->mtu = (unsigned int)new_mtu; /* if VSI is up, bring it down and then back up */ if (!test_and_set_bit(__ICE_DOWN, vsi->state)) { @@ -4859,6 +5135,116 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) } /** + * ice_aq_str - convert AQ err code to a string + * @aq_err: the AQ error code to convert + */ +const char *ice_aq_str(enum ice_aq_err aq_err) +{ + switch (aq_err) { + case ICE_AQ_RC_OK: + return "OK"; + case ICE_AQ_RC_EPERM: + return "ICE_AQ_RC_EPERM"; + case ICE_AQ_RC_ENOENT: + return "ICE_AQ_RC_ENOENT"; + case ICE_AQ_RC_ENOMEM: + return "ICE_AQ_RC_ENOMEM"; + case ICE_AQ_RC_EBUSY: + return "ICE_AQ_RC_EBUSY"; + case ICE_AQ_RC_EEXIST: + return "ICE_AQ_RC_EEXIST"; + case ICE_AQ_RC_EINVAL: + return "ICE_AQ_RC_EINVAL"; + case ICE_AQ_RC_ENOSPC: + return "ICE_AQ_RC_ENOSPC"; + case ICE_AQ_RC_ENOSYS: + return "ICE_AQ_RC_ENOSYS"; + case ICE_AQ_RC_ENOSEC: + return "ICE_AQ_RC_ENOSEC"; + case ICE_AQ_RC_EBADSIG: + return "ICE_AQ_RC_EBADSIG"; + case ICE_AQ_RC_ESVN: + return "ICE_AQ_RC_ESVN"; + case ICE_AQ_RC_EBADMAN: + return "ICE_AQ_RC_EBADMAN"; + case ICE_AQ_RC_EBADBUF: + return "ICE_AQ_RC_EBADBUF"; + } + + return "ICE_AQ_RC_UNKNOWN"; +} + +/** + * ice_stat_str - convert status err code to a string + * @stat_err: the status error code to convert + */ +const char *ice_stat_str(enum ice_status stat_err) +{ + switch (stat_err) { + case ICE_SUCCESS: + return "OK"; + case ICE_ERR_PARAM: + return "ICE_ERR_PARAM"; + case ICE_ERR_NOT_IMPL: + return "ICE_ERR_NOT_IMPL"; + case ICE_ERR_NOT_READY: + return "ICE_ERR_NOT_READY"; + case ICE_ERR_NOT_SUPPORTED: + return "ICE_ERR_NOT_SUPPORTED"; + case ICE_ERR_BAD_PTR: + return "ICE_ERR_BAD_PTR"; + case ICE_ERR_INVAL_SIZE: + return "ICE_ERR_INVAL_SIZE"; + case ICE_ERR_DEVICE_NOT_SUPPORTED: + return "ICE_ERR_DEVICE_NOT_SUPPORTED"; + case ICE_ERR_RESET_FAILED: + return "ICE_ERR_RESET_FAILED"; + case ICE_ERR_FW_API_VER: + return "ICE_ERR_FW_API_VER"; + case ICE_ERR_NO_MEMORY: + return "ICE_ERR_NO_MEMORY"; + case ICE_ERR_CFG: + return "ICE_ERR_CFG"; + case ICE_ERR_OUT_OF_RANGE: + return "ICE_ERR_OUT_OF_RANGE"; + case ICE_ERR_ALREADY_EXISTS: + return "ICE_ERR_ALREADY_EXISTS"; + case ICE_ERR_NVM_CHECKSUM: + return "ICE_ERR_NVM_CHECKSUM"; + case ICE_ERR_BUF_TOO_SHORT: + return "ICE_ERR_BUF_TOO_SHORT"; + case ICE_ERR_NVM_BLANK_MODE: + return "ICE_ERR_NVM_BLANK_MODE"; + case ICE_ERR_IN_USE: + return "ICE_ERR_IN_USE"; + case ICE_ERR_MAX_LIMIT: + return "ICE_ERR_MAX_LIMIT"; + case ICE_ERR_RESET_ONGOING: + return "ICE_ERR_RESET_ONGOING"; + case ICE_ERR_HW_TABLE: + return "ICE_ERR_HW_TABLE"; + case ICE_ERR_DOES_NOT_EXIST: + return "ICE_ERR_DOES_NOT_EXIST"; + case ICE_ERR_FW_DDP_MISMATCH: + return "ICE_ERR_FW_DDP_MISMATCH"; + case ICE_ERR_AQ_ERROR: + return "ICE_ERR_AQ_ERROR"; + case ICE_ERR_AQ_TIMEOUT: + return "ICE_ERR_AQ_TIMEOUT"; + case ICE_ERR_AQ_FULL: + return "ICE_ERR_AQ_FULL"; + case ICE_ERR_AQ_NO_WORK: + return "ICE_ERR_AQ_NO_WORK"; + case ICE_ERR_AQ_EMPTY: + return "ICE_ERR_AQ_EMPTY"; + case ICE_ERR_AQ_FW_CRITICAL: + return "ICE_ERR_AQ_FW_CRITICAL"; + } + + return "ICE_ERR_UNKNOWN"; +} + +/** * ice_set_rss - Set RSS keys and lut * @vsi: Pointer to VSI structure * @seed: RSS hash seed @@ -4882,8 +5268,9 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) status = ice_aq_set_rss_key(hw, vsi->idx, buf); if (status) { - dev_err(dev, "Cannot set RSS key, err %d aq_err %d\n", - status, hw->adminq.rq_last_status); + dev_err(dev, "Cannot set RSS key, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); return -EIO; } } @@ -4892,8 +5279,9 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, lut, lut_size); if (status) { - dev_err(dev, "Cannot set RSS lut, err %d aq_err %d\n", - status, hw->adminq.rq_last_status); + dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); return -EIO; } } @@ -4924,8 +5312,9 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) status = ice_aq_get_rss_key(hw, vsi->idx, buf); if (status) { - dev_err(dev, "Cannot get RSS key, err %d aq_err %d\n", - status, hw->adminq.rq_last_status); + dev_err(dev, "Cannot get RSS key, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); return -EIO; } } @@ -4934,8 +5323,9 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type, lut, lut_size); if (status) { - dev_err(dev, "Cannot get RSS lut, err %d aq_err %d\n", - status, hw->adminq.rq_last_status); + dev_err(dev, "Cannot get RSS lut, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); return -EIO; } } @@ -5002,8 +5392,9 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (status) { - dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n", - bmode, status, hw->adminq.sq_last_status); + dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n", + bmode, ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); ret = -EIO; goto out; } @@ -5072,8 +5463,9 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, */ status = ice_update_sw_rule_bridge_mode(hw); if (status) { - netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %d\n", - mode, status, hw->adminq.sq_last_status); + netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n", + mode, ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); /* revert hw->evb_veb */ hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); return -EIO; @@ -5100,6 +5492,16 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) pf->tx_timeout_count++; + /* Check if PFC is enabled for the TC to which the queue belongs + * to. If yes then Tx timeout is not caused by a hung queue, no + * need to reset and rebuild + */ + if (ice_is_pfc_causing_hung_q(pf, txqueue)) { + dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n", + txqueue); + return; + } + /* now that we have an index, find the tx_ring struct */ for (i = 0; i < vsi->num_txq; i++) if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) @@ -5158,6 +5560,70 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) } /** + * ice_udp_tunnel_add - Get notifications about UDP tunnel ports that come up + * @netdev: This physical port's netdev + * @ti: Tunnel endpoint information + */ +static void +ice_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + enum ice_tunnel_type tnl_type; + u16 port = ntohs(ti->port); + enum ice_status status; + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + tnl_type = TNL_VXLAN; + break; + case UDP_TUNNEL_TYPE_GENEVE: + tnl_type = TNL_GENEVE; + break; + default: + netdev_err(netdev, "Unknown tunnel type\n"); + return; + } + + status = ice_create_tunnel(&pf->hw, tnl_type, port); + if (status == ICE_ERR_OUT_OF_RANGE) + netdev_info(netdev, "Max tunneled UDP ports reached, port %d not added\n", + port); + else if (status) + netdev_err(netdev, "Error adding UDP tunnel - %s\n", + ice_stat_str(status)); +} + +/** + * ice_udp_tunnel_del - Get notifications about UDP tunnel ports that go away + * @netdev: This physical port's netdev + * @ti: Tunnel endpoint information + */ +static void +ice_udp_tunnel_del(struct net_device *netdev, struct udp_tunnel_info *ti) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + u16 port = ntohs(ti->port); + enum ice_status status; + bool retval; + + retval = ice_tunnel_port_in_use(&pf->hw, port, NULL); + if (!retval) { + netdev_info(netdev, "port %d not found in UDP tunnels list\n", + port); + return; + } + + status = ice_destroy_tunnel(&pf->hw, port, false); + if (status) + netdev_err(netdev, "error deleting port %d from UDP tunnels list\n", + port); +} + +/** * ice_open - Called when a network interface becomes active * @netdev: network interface device structure * @@ -5173,14 +5639,20 @@ int ice_open(struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; struct ice_port_info *pi; int err; - if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) { + if (test_bit(__ICE_NEEDS_RESTART, pf->state)) { netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); return -EIO; } + if (test_bit(__ICE_DOWN, pf->state)) { + netdev_err(netdev, "device is not ready yet\n"); + return -EBUSY; + } + netif_carrier_off(netdev); pi = vsi->port_info; @@ -5213,6 +5685,10 @@ int ice_open(struct net_device *netdev) if (err) netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", vsi->vsi_num, vsi->vsw->sw_id); + + /* Update existing tunnels information */ + udp_tunnel_get_rx_info(netdev); + return err; } @@ -5263,21 +5739,21 @@ ice_features_check(struct sk_buff *skb, features &= ~NETIF_F_GSO_MASK; len = skb_network_header(skb) - skb->data; - if (len & ~(ICE_TXD_MACLEN_MAX)) + if (len > ICE_TXD_MACLEN_MAX || len & 0x1) goto out_rm_features; len = skb_transport_header(skb) - skb_network_header(skb); - if (len & ~(ICE_TXD_IPLEN_MAX)) + if (len > ICE_TXD_IPLEN_MAX || len & 0x1) goto out_rm_features; if (skb->encapsulation) { len = skb_inner_network_header(skb) - skb_transport_header(skb); - if (len & ~(ICE_TXD_L4LEN_MAX)) + if (len > ICE_TXD_L4LEN_MAX || len & 0x1) goto out_rm_features; len = skb_inner_transport_header(skb) - skb_inner_network_header(skb); - if (len & ~(ICE_TXD_IPLEN_MAX)) + if (len > ICE_TXD_IPLEN_MAX || len & 0x1) goto out_rm_features; } @@ -5322,8 +5798,13 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_bridge_setlink = ice_bridge_setlink, .ndo_fdb_add = ice_fdb_add, .ndo_fdb_del = ice_fdb_del, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = ice_rx_flow_steer, +#endif .ndo_tx_timeout = ice_tx_timeout, .ndo_bpf = ice_xdp, .ndo_xdp_xmit = ice_xdp_xmit, .ndo_xsk_wakeup = ice_xsk_wakeup, + .ndo_udp_tunnel_add = ice_udp_tunnel_add, + .ndo_udp_tunnel_del = ice_udp_tunnel_del, }; diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 8beb675d676b..b049c1c30c88 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -172,7 +172,8 @@ void ice_release_nvm(struct ice_hw *hw) * * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq. */ -enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) +static enum ice_status +ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) { enum ice_status status; @@ -196,7 +197,7 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) * Area (PFA) and returns the TLV pointer and length. The caller can * use these to read the variable length TLV value. */ -enum ice_status +static enum ice_status ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, u16 module_type) { @@ -367,6 +368,87 @@ static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw) } /** + * ice_get_netlist_ver_info + * @hw: pointer to the HW struct + * + * Get the netlist version information + */ +static enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw) +{ + struct ice_netlist_ver_info *ver = &hw->netlist_ver; + enum ice_status ret; + u32 id_blk_start; + __le16 raw_data; + u16 data, i; + u16 *buff; + + ret = ice_acquire_nvm(hw, ICE_RES_READ); + if (ret) + return ret; + buff = kcalloc(ICE_AQC_NVM_NETLIST_ID_BLK_LEN, sizeof(*buff), + GFP_KERNEL); + if (!buff) { + ret = ICE_ERR_NO_MEMORY; + goto exit_no_mem; + } + + /* read module length */ + ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID, + ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET * 2, + ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN, &raw_data, + false, false, NULL); + if (ret) + goto exit_error; + + data = le16_to_cpu(raw_data); + /* exit if length is = 0 */ + if (!data) + goto exit_error; + + /* read node count */ + ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID, + ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET * 2, + ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN, &raw_data, + false, false, NULL); + if (ret) + goto exit_error; + data = le16_to_cpu(raw_data) & ICE_AQC_NVM_NETLIST_NODE_COUNT_M; + + /* netlist ID block starts from offset 4 + node count * 2 */ + id_blk_start = ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET + data * 2; + + /* read the entire netlist ID block */ + ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID, + id_blk_start * 2, + ICE_AQC_NVM_NETLIST_ID_BLK_LEN * 2, buff, false, + false, NULL); + if (ret) + goto exit_error; + + for (i = 0; i < ICE_AQC_NVM_NETLIST_ID_BLK_LEN; i++) + buff[i] = le16_to_cpu(((__force __le16 *)buff)[i]); + + ver->major = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16) | + buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW]; + ver->minor = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16) | + buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW]; + ver->type = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH] << 16) | + buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW]; + ver->rev = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH] << 16) | + buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW]; + ver->cust_ver = buff[ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER]; + /* Read the left most 4 bytes of SHA */ + ver->hash = buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 15] << 16 | + buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 14]; + +exit_error: + kfree(buff); +exit_no_mem: + ice_release_nvm(hw); + return ret; +} + +/** * ice_discover_flash_size - Discover the available flash size. * @hw: pointer to the HW struct * @@ -515,6 +597,11 @@ enum ice_status ice_init_nvm(struct ice_hw *hw) return status; } + /* read the netlist version information */ + status = ice_get_netlist_ver_info(hw); + if (status) + ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n"); + return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h index 999f273ba6ad..165eda07b93d 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.h +++ b/drivers/net/ethernet/intel/ice/ice_nvm.h @@ -11,10 +11,6 @@ enum ice_status ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, bool read_shadow_ram); enum ice_status -ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, - u16 module_type); -enum ice_status ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size); enum ice_status ice_init_nvm(struct ice_hw *hw); -enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data); #endif /* _ICE_NVM_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h index 71647566964e..7f4c1ec1eff2 100644 --- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h +++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h @@ -12,12 +12,15 @@ */ enum ice_prot_id { ICE_PROT_ID_INVAL = 0, + ICE_PROT_MAC_OF_OR_S = 1, ICE_PROT_IPV4_OF_OR_S = 32, ICE_PROT_IPV4_IL = 33, ICE_PROT_IPV6_OF_OR_S = 40, ICE_PROT_IPV6_IL = 41, ICE_PROT_TCP_IL = 49, + ICE_PROT_UDP_OF = 52, ICE_PROT_UDP_IL_OR_S = 53, + ICE_PROT_GRE_OF = 64, ICE_PROT_SCTP_IL = 96, ICE_PROT_META_ID = 255, /* when offset == metadata */ ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */ diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index eae707ddf8e8..d63acd2fcf79 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -1917,7 +1917,7 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, */ static enum ice_status ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, - enum ice_rl_type rl_type, u8 bw_alloc) + enum ice_rl_type rl_type, u16 bw_alloc) { struct ice_aqc_txsched_elem_data buf; struct ice_aqc_txsched_elem *data; diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h index a9a8bc3aca42..4028c6365172 100644 --- a/drivers/net/ethernet/intel/ice/ice_status.h +++ b/drivers/net/ethernet/intel/ice/ice_status.h @@ -27,6 +27,8 @@ enum ice_status { ICE_ERR_MAX_LIMIT = -17, ICE_ERR_RESET_ONGOING = -18, ICE_ERR_HW_TABLE = -19, + ICE_ERR_FW_DDP_MISMATCH = -20, + ICE_ERR_NVM_CHECKSUM = -51, ICE_ERR_BUF_TOO_SHORT = -52, ICE_ERR_NVM_BLANK_MODE = -53, @@ -35,6 +37,7 @@ enum ice_status { ICE_ERR_AQ_FULL = -102, ICE_ERR_AQ_NO_WORK = -103, ICE_ERR_AQ_EMPTY = -104, + ICE_ERR_AQ_FW_CRITICAL = -105, }; #endif /* _ICE_STATUS_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 51825a203e35..0156b73df1b1 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -593,8 +593,8 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) ICE_AQC_GET_SW_CONF_RESP_IS_VF) is_vf = true; - res_type = le16_to_cpu(ele->vsi_port_num) >> - ICE_AQC_GET_SW_CONF_RESP_TYPE_S; + res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >> + ICE_AQC_GET_SW_CONF_RESP_TYPE_S); if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) { /* FW VSI is not needed. Just continue. */ @@ -1618,12 +1618,12 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list) struct ice_aqc_sw_rules_elem *s_rule, *r_iter; struct ice_fltr_list_entry *m_list_itr; struct list_head *rule_head; - u16 elem_sent, total_elem_left; + u16 total_elem_left, s_rule_size; struct ice_switch_info *sw; struct mutex *rule_lock; /* Lock to protect filter rule list */ enum ice_status status = 0; u16 num_unicast = 0; - u16 s_rule_size; + u8 elem_sent; if (!m_list || !hw) return ICE_ERR_PARAM; @@ -1707,8 +1707,8 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list) total_elem_left -= elem_sent) { struct ice_aqc_sw_rules_elem *entry = r_iter; - elem_sent = min(total_elem_left, - (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size)); + elem_sent = min_t(u8, total_elem_left, + (ICE_AQ_MAX_BUF_LEN / s_rule_size)); status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size, elem_sent, ice_aqc_opc_add_sw_rules, NULL); @@ -2678,6 +2678,81 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle) } /** + * ice_alloc_res_cntr - allocating resource counter + * @hw: pointer to the hardware structure + * @type: type of resource + * @alloc_shared: if set it is shared else dedicated + * @num_items: number of entries requested for FD resource type + * @counter_id: counter index returned by AQ call + */ +enum ice_status +ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, + u16 *counter_id) +{ + struct ice_aqc_alloc_free_res_elem *buf; + enum ice_status status; + u16 buf_len; + + /* Allocate resource */ + buf_len = sizeof(*buf); + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) + return ICE_ERR_NO_MEMORY; + + buf->num_elems = cpu_to_le16(num_items); + buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & + ICE_AQC_RES_TYPE_M) | alloc_shared); + + status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, + ice_aqc_opc_alloc_res, NULL); + if (status) + goto exit; + + *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp); + +exit: + kfree(buf); + return status; +} + +/** + * ice_free_res_cntr - free resource counter + * @hw: pointer to the hardware structure + * @type: type of resource + * @alloc_shared: if set it is shared else dedicated + * @num_items: number of entries to be freed for FD resource type + * @counter_id: counter ID resource which needs to be freed + */ +enum ice_status +ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, + u16 counter_id) +{ + struct ice_aqc_alloc_free_res_elem *buf; + enum ice_status status; + u16 buf_len; + + /* Free resource */ + buf_len = sizeof(*buf); + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) + return ICE_ERR_NO_MEMORY; + + buf->num_elems = cpu_to_le16(num_items); + buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & + ICE_AQC_RES_TYPE_M) | alloc_shared); + buf->elem[0].e.sw_resp = cpu_to_le16(counter_id); + + status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, + ice_aqc_opc_free_res, NULL); + if (status) + ice_debug(hw, ICE_DBG_SW, + "counter resource could not be freed\n"); + + kfree(buf); + return status; +} + +/** * ice_replay_vsi_fltr - Replay filters for requested VSI * @hw: pointer to the hardware structure * @vsi_handle: driver VSI handle diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index fa14b9545dab..8b4f9d35c860 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -208,6 +208,13 @@ void ice_clear_all_vsi_ctx(struct ice_hw *hw); /* Switch config */ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw); +enum ice_status +ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, + u16 *counter_id); +enum ice_status +ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, + u16 counter_id); + /* Switch/bridge related commands */ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw); enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 69b21b436f9a..cda7e05bd8ae 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -15,6 +15,90 @@ #define ICE_RX_HDR_SIZE 256 +#define FDIR_DESC_RXDID 0x40 +#define ICE_FDIR_CLEAN_DELAY 10 + +/** + * ice_prgm_fdir_fltr - Program a Flow Director filter + * @vsi: VSI to send dummy packet + * @fdir_desc: flow director descriptor + * @raw_packet: allocated buffer for flow director + */ +int +ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, + u8 *raw_packet) +{ + struct ice_tx_buf *tx_buf, *first; + struct ice_fltr_desc *f_desc; + struct ice_tx_desc *tx_desc; + struct ice_ring *tx_ring; + struct device *dev; + dma_addr_t dma; + u32 td_cmd; + u16 i; + + /* VSI and Tx ring */ + if (!vsi) + return -ENOENT; + tx_ring = vsi->tx_rings[0]; + if (!tx_ring || !tx_ring->desc) + return -ENOENT; + dev = tx_ring->dev; + + /* we are using two descriptors to add/del a filter and we can wait */ + for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { + if (!i) + return -EAGAIN; + msleep_interruptible(1); + } + + dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE, + DMA_TO_DEVICE); + + if (dma_mapping_error(dev, dma)) + return -EINVAL; + + /* grab the next descriptor */ + i = tx_ring->next_to_use; + first = &tx_ring->tx_buf[i]; + f_desc = ICE_TX_FDIRDESC(tx_ring, i); + memcpy(f_desc, fdir_desc, sizeof(*f_desc)); + + i++; + i = (i < tx_ring->count) ? i : 0; + tx_desc = ICE_TX_DESC(tx_ring, i); + tx_buf = &tx_ring->tx_buf[i]; + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + memset(tx_buf, 0, sizeof(*tx_buf)); + dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE); + dma_unmap_addr_set(tx_buf, dma, dma); + + tx_desc->buf_addr = cpu_to_le64(dma); + td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY | + ICE_TX_DESC_CMD_RE; + + tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT; + tx_buf->raw_buf = raw_packet; + + tx_desc->cmd_type_offset_bsz = + ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0); + + /* Force memory write to complete before letting h/w know + * there are new descriptors to fetch. + */ + wmb(); + + /* mark the data descriptor to be watched */ + first->next_to_watch = tx_desc; + + writel(tx_ring->next_to_use, tx_ring->tail); + + return 0; +} + /** * ice_unmap_and_free_tx_buf - Release a Tx buffer * @ring: the ring that owns the buffer @@ -24,7 +108,9 @@ static void ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf) { if (tx_buf->skb) { - if (ice_ring_is_xdp(ring)) + if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) + devm_kfree(ring->dev, tx_buf->raw_buf); + else if (ice_ring_is_xdp(ring)) page_frag_free(tx_buf->raw_buf); else dev_kfree_skb_any(tx_buf->skb); @@ -599,7 +685,8 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) struct ice_rx_buf *bi; /* do nothing if no valid netdev defined */ - if (!rx_ring->netdev || !cleaned_count) + if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) || + !cleaned_count) return false; /* get the Rx descriptor and buffer based on next_to_use */ @@ -819,7 +906,7 @@ static struct sk_buff * ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, struct xdp_buff *xdp) { - unsigned int metasize = xdp->data - xdp->data_meta; + u8 metasize = xdp->data - xdp->data_meta; #if (PAGE_SIZE < 8192) unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; #else @@ -934,7 +1021,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, */ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) { - u32 ntc = rx_ring->next_to_clean + 1; + u16 ntc = rx_ring->next_to_clean + 1; /* fetch, update, and store next to clean */ ntc = (ntc < rx_ring->count) ? ntc : 0; @@ -997,7 +1084,7 @@ ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, * * Returns amount of work completed */ -static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) +int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_pkts = 0; u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); @@ -1040,6 +1127,12 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) */ dma_rmb(); + if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { + ice_put_rx_buf(rx_ring, NULL); + cleaned_count++; + continue; + } + size = le16_to_cpu(rx_desc->wb.pkt_len) & ICE_RX_FLX_DESC_PKT_LEN_M; @@ -1544,7 +1637,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget) * don't allow the budget to go below 1 because that would exit * polling early. */ - budget_per_ring = max(budget / q_vector->num_ring_rx, 1); + budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1); else /* Max of 1 Rx ring in this q_vector so give it the budget */ budget_per_ring = budget; @@ -1680,7 +1773,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, */ while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { tx_desc->cmd_type_offset_bsz = - build_ctob(td_cmd, td_offset, max_data, td_tag); + ice_build_ctob(td_cmd, td_offset, max_data, + td_tag); tx_desc++; i++; @@ -1700,8 +1794,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, if (likely(!data_len)) break; - tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, - size, td_tag); + tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset, + size, td_tag); tx_desc++; i++; @@ -1732,8 +1826,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, /* write last descriptor with RS and EOP bits */ td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD; - tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size, - td_tag); + tx_desc->cmd_type_offset_bsz = + ice_build_ctob(td_cmd, td_offset, size, td_tag); /* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. @@ -1807,12 +1901,94 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) l2_len = ip.hdr - skb->data; offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; - if (skb->encapsulation) - return -1; + protocol = vlan_get_protocol(skb); + + if (protocol == htons(ETH_P_IP)) + first->tx_flags |= ICE_TX_FLAGS_IPV4; + else if (protocol == htons(ETH_P_IPV6)) + first->tx_flags |= ICE_TX_FLAGS_IPV6; + + if (skb->encapsulation) { + bool gso_ena = false; + u32 tunnel = 0; + + /* define outer network header type */ + if (first->tx_flags & ICE_TX_FLAGS_IPV4) { + tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ? + ICE_TX_CTX_EIPT_IPV4 : + ICE_TX_CTX_EIPT_IPV4_NO_CSUM; + l4_proto = ip.v4->protocol; + } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { + tunnel |= ICE_TX_CTX_EIPT_IPV6; + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + } + + /* define outer transport */ + switch (l4_proto) { + case IPPROTO_UDP: + tunnel |= ICE_TXD_CTX_UDP_TUNNELING; + first->tx_flags |= ICE_TX_FLAGS_TUNNEL; + break; + case IPPROTO_GRE: + tunnel |= ICE_TXD_CTX_GRE_TUNNELING; + first->tx_flags |= ICE_TX_FLAGS_TUNNEL; + break; + case IPPROTO_IPIP: + case IPPROTO_IPV6: + first->tx_flags |= ICE_TX_FLAGS_TUNNEL; + l4.hdr = skb_inner_network_header(skb); + break; + default: + if (first->tx_flags & ICE_TX_FLAGS_TSO) + return -1; + + skb_checksum_help(skb); + return 0; + } + + /* compute outer L3 header size */ + tunnel |= ((l4.hdr - ip.hdr) / 4) << + ICE_TXD_CTX_QW0_EIPLEN_S; + + /* switch IP header pointer from outer to inner header */ + ip.hdr = skb_inner_network_header(skb); + + /* compute tunnel header size */ + tunnel |= ((ip.hdr - l4.hdr) / 2) << + ICE_TXD_CTX_QW0_NATLEN_S; + + gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; + /* indicate if we need to offload outer UDP header */ + if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) + tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M; + + /* record tunnel offload values */ + off->cd_tunnel_params |= tunnel; + + /* set DTYP=1 to indicate that it's an Tx context descriptor + * in IPsec tunnel mode with Tx offloads in Quad word 1 + */ + off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX; + + /* switch L4 header pointer from outer to inner */ + l4.hdr = skb_inner_transport_header(skb); + l4_proto = 0; + + /* reset type as we transition from outer to inner headers */ + first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6); + if (ip.v4->version == 4) + first->tx_flags |= ICE_TX_FLAGS_IPV4; + if (ip.v6->version == 6) + first->tx_flags |= ICE_TX_FLAGS_IPV6; + } /* Enable IP checksum offloads */ - protocol = vlan_get_protocol(skb); - if (protocol == htons(ETH_P_IP)) { + if (first->tx_flags & ICE_TX_FLAGS_IPV4) { l4_proto = ip.v4->protocol; /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. @@ -1822,7 +1998,7 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) else cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; - } else if (protocol == htons(ETH_P_IPV6)) { + } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; exthdr = ip.hdr + sizeof(*ip.v6); l4_proto = ip.v6->nexthdr; @@ -1944,7 +2120,8 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) unsigned char *hdr; } l4; u64 cd_mss, cd_tso_len; - u32 paylen, l4_start; + u32 paylen; + u8 l4_start; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -1969,8 +2146,42 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) ip.v6->payload_len = 0; } + if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | + SKB_GSO_GRE_CSUM | + SKB_GSO_IPXIP4 | + SKB_GSO_IPXIP6 | + SKB_GSO_UDP_TUNNEL | + SKB_GSO_UDP_TUNNEL_CSUM)) { + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { + l4.udp->len = 0; + + /* determine offset of outer transport header */ + l4_start = (u8)(l4.hdr - skb->data); + + /* remove payload length from outer checksum */ + paylen = skb->len - l4_start; + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + } + + /* reset pointers to inner headers */ + + /* cppcheck-suppress unreadVariable */ + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + + /* initialize inner IP header fields */ + if (ip.v4->version == 4) { + ip.v4->tot_len = 0; + ip.v4->check = 0; + } else { + ip.v6->payload_len = 0; + } + } + /* determine offset of transport header */ - l4_start = l4.hdr - skb->data; + l4_start = (u8)(l4.hdr - skb->data); /* remove payload length from checksum */ paylen = skb->len - l4_start; @@ -1979,12 +2190,12 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen)); /* compute length of UDP segmentation header */ - off->header_len = sizeof(l4.udp) + l4_start; + off->header_len = (u8)sizeof(l4.udp) + l4_start; } else { csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); /* compute length of TCP segmentation header */ - off->header_len = (l4.tcp->doff * 4) + l4_start; + off->header_len = (u8)((l4.tcp->doff * 4) + l4_start); } /* update gso_segs and bytecount */ @@ -2215,7 +2426,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { struct ice_tx_ctx_desc *cdesc; - int i = tx_ring->next_to_use; + u16 i = tx_ring->next_to_use; /* grab the next descriptor */ cdesc = ICE_TX_CTX_DESC(tx_ring, i); @@ -2260,3 +2471,86 @@ netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) return ice_xmit_frame_ring(skb, tx_ring); } + +/** + * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue + * @tx_ring: tx_ring to clean + */ +void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring) +{ + struct ice_vsi *vsi = tx_ring->vsi; + s16 i = tx_ring->next_to_clean; + int budget = ICE_DFLT_IRQ_WORK; + struct ice_tx_desc *tx_desc; + struct ice_tx_buf *tx_buf; + + tx_buf = &tx_ring->tx_buf[i]; + tx_desc = ICE_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; + + /* if next_to_watch is not set then there is no pending work */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if the descriptor isn't done, no work to do */ + if (!(eop_desc->cmd_type_offset_bsz & + cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buf->next_to_watch = NULL; + tx_desc->buf_addr = 0; + tx_desc->cmd_type_offset_bsz = 0; + + /* move past filter desc */ + tx_buf++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buf = tx_ring->tx_buf; + tx_desc = ICE_TX_DESC(tx_ring, 0); + } + + /* unmap the data header */ + if (dma_unmap_len(tx_buf, len)) + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) + devm_kfree(tx_ring->dev, tx_buf->raw_buf); + + /* clear next_to_watch to prevent false hangs */ + tx_buf->raw_buf = NULL; + tx_buf->tx_flags = 0; + tx_buf->next_to_watch = NULL; + dma_unmap_len_set(tx_buf, len, 0); + tx_desc->buf_addr = 0; + tx_desc->cmd_type_offset_bsz = 0; + + /* move past eop_desc for start of next FD desc */ + tx_buf++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buf = tx_ring->tx_buf; + tx_desc = ICE_TX_DESC(tx_ring, 0); + } + + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + + /* re-enable interrupt if needed */ + ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]); +} diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 7ee00a128663..e70c4619edc3 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -38,7 +38,8 @@ */ #if (PAGE_SIZE < 8192) #define ICE_2K_TOO_SMALL_WITH_PADDING \ -((NET_SKB_PAD + ICE_RXBUF_1536) > SKB_WITH_OVERHEAD(ICE_RXBUF_2048)) + ((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \ + SKB_WITH_OVERHEAD(ICE_RXBUF_2048)) /** * ice_compute_pad - compute the padding @@ -107,12 +108,19 @@ static inline int ice_skb_pad(void) #define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \ ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR) #define ICE_DESC_UNUSED(R) \ - ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ - (R)->next_to_clean - (R)->next_to_use - 1) + (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) #define ICE_TX_FLAGS_TSO BIT(0) #define ICE_TX_FLAGS_HW_VLAN BIT(1) #define ICE_TX_FLAGS_SW_VLAN BIT(2) +/* ICE_TX_FLAGS_DUMMY_PKT is used to mark dummy packets that should be + * freed instead of returned like skb packets. + */ +#define ICE_TX_FLAGS_DUMMY_PKT BIT(3) +#define ICE_TX_FLAGS_IPV4 BIT(5) +#define ICE_TX_FLAGS_IPV6 BIT(6) +#define ICE_TX_FLAGS_TUNNEL BIT(7) #define ICE_TX_FLAGS_VLAN_M 0xffff0000 #define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000 #define ICE_TX_FLAGS_VLAN_PR_S 29 @@ -155,17 +163,16 @@ struct ice_tx_offload_params { }; struct ice_rx_buf { - struct sk_buff *skb; - dma_addr_t dma; union { struct { + struct sk_buff *skb; + dma_addr_t dma; struct page *page; unsigned int page_offset; u16 pagecnt_bias; }; struct { - void *addr; - u64 handle; + struct xdp_buff *xdp; }; }; }; @@ -289,7 +296,6 @@ struct ice_ring { struct rcu_head rcu; /* to avoid race on free */ struct bpf_prog *xdp_prog; struct xdp_umem *xsk_umem; - struct zero_copy_allocator zca; /* CL3 - 3rd cacheline starts here */ struct xdp_rxq_info xdp_rxq; /* CLX - the below items are only accessed infrequently and should be @@ -373,5 +379,9 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring); void ice_free_tx_ring(struct ice_ring *tx_ring); void ice_free_rx_ring(struct ice_ring *rx_ring); int ice_napi_poll(struct napi_struct *napi, int budget); - +int +ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, + u8 *raw_packet); +int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget); +void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring); #endif /* _ICE_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index 6da048a6ca7c..ab2031b1c635 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -8,7 +8,7 @@ * @rx_ring: ring to bump * @val: new head index */ -void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) +void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val) { u16 prev_ntu = rx_ring->next_to_use & ~0x7; @@ -84,11 +84,11 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, union ice_32b_rx_flex_desc *rx_desc, u8 ptype) { struct ice_rx_ptype_decoded decoded; - u32 rx_error, rx_status; + u16 rx_status0, rx_status1; bool ipv4, ipv6; - rx_status = le16_to_cpu(rx_desc->wb.status_error0); - rx_error = rx_status; + rx_status0 = le16_to_cpu(rx_desc->wb.status_error0); + rx_status1 = le16_to_cpu(rx_desc->wb.status_error1); decoded = ice_decode_rx_desc_ptype(ptype); @@ -101,7 +101,7 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, return; /* check if HW has decoded the packet and checksum */ - if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) + if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) return; if (!(decoded.known && decoded.outer_ip)) @@ -112,19 +112,31 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); - if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | - BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) + if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | + BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) goto checksum_fail; - else if (ipv6 && (rx_status & - (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S)))) + + if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S)))) goto checksum_fail; /* check for L4 errors and handle packets that were not able to be * checksummed due to arrival speed */ - if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) + if (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) + goto checksum_fail; + + /* check for outer UDP checksum error in tunneled packets */ + if ((rx_status1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) && + (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S))) goto checksum_fail; + /* If there is an outer header present that might contain a checksum + * we need to bump the checksum level by 1 to reflect the fact that + * we are indicating we validated the inner checksum. + */ + if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT) + skb->csum_level = 1; + /* Only report checksum unnecessary for TCP, UDP, or SCTP */ switch (decoded.inner_prot) { case ICE_RX_PTYPE_INNER_PROT_TCP: @@ -215,8 +227,8 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring) tx_desc = ICE_TX_DESC(xdp_ring, i); tx_desc->buf_addr = cpu_to_le64(dma); - tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD, 0, - size, 0); + tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, + size, 0); /* Make certain all of the status bits have been updated * before next_to_watch is written. diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h index ba9164dad9ae..58ff58f0f972 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h @@ -22,7 +22,7 @@ ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits) } static inline __le64 -build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) +ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) { return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA | (td_cmd << ICE_TXD_QW1_CMD_S) | @@ -49,7 +49,7 @@ static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring) void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res); int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring); int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring); -void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val); +void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val); void ice_process_skb_fields(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 4ce5f92fca4a..c1ad8622e65c 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -118,7 +118,8 @@ enum ice_media_type { enum ice_vsi_type { ICE_VSI_PF = 0, - ICE_VSI_VF, + ICE_VSI_VF = 1, + ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */ ICE_VSI_LB = 6, }; @@ -161,6 +162,38 @@ struct ice_phy_info { u8 get_link_info; }; +/* protocol enumeration for filters */ +enum ice_fltr_ptype { + /* NONE - used for undef/error */ + ICE_FLTR_PTYPE_NONF_NONE = 0, + ICE_FLTR_PTYPE_NONF_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_SCTP, + ICE_FLTR_PTYPE_NONF_IPV4_OTHER, + ICE_FLTR_PTYPE_FRAG_IPV4, + ICE_FLTR_PTYPE_NONF_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV6_TCP, + ICE_FLTR_PTYPE_NONF_IPV6_SCTP, + ICE_FLTR_PTYPE_NONF_IPV6_OTHER, + ICE_FLTR_PTYPE_MAX, +}; + +enum ice_fd_hw_seg { + ICE_FD_HW_SEG_NON_TUN = 0, + ICE_FD_HW_SEG_TUN, + ICE_FD_HW_SEG_MAX, +}; + +/* 2 VSI = 1 ICE_VSI_PF + 1 ICE_VSI_CTRL */ +#define ICE_MAX_FDIR_VSI_PER_FILTER 2 + +struct ice_fd_hw_prof { + struct ice_flow_seg_info *fdir_seg[ICE_FD_HW_SEG_MAX]; + int cnt; + u64 entry_h[ICE_MAX_FDIR_VSI_PER_FILTER][ICE_FD_HW_SEG_MAX]; + u16 vsi_h[ICE_MAX_FDIR_VSI_PER_FILTER]; +}; + /* Common HW capabilities for SW use */ struct ice_hw_common_caps { u32 valid_functions; @@ -197,6 +230,8 @@ struct ice_hw_func_caps { u32 num_allocd_vfs; /* Number of allocated VFs */ u32 vf_base_id; /* Logical ID of the first VF */ u32 guar_num_vsi; + u32 fd_fltr_guar; /* Number of filters guaranteed */ + u32 fd_fltr_best_effort; /* Number of best effort filters */ }; /* Device wide capabilities */ @@ -204,6 +239,7 @@ struct ice_hw_dev_caps { struct ice_hw_common_caps common_cap; u32 num_vfs_exposed; /* Total number of VFs exposed */ u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */ + u32 num_flow_director_fltr; /* Number of FD filters available */ u32 num_funcs; }; @@ -259,6 +295,16 @@ struct ice_nvm_info { #define ICE_NVM_VER_LEN 32 +/* netlist version information */ +struct ice_netlist_ver_info { + u32 major; /* major high/low */ + u32 minor; /* minor high/low */ + u32 type; /* type high/low */ + u32 rev; /* revision high/low */ + u32 hash; /* SHA-1 hash word */ + u16 cust_ver; /* customer version */ +}; + /* Max number of port to queue branches w.r.t topology */ #define ICE_MAX_TRAFFIC_CLASS 8 #define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS @@ -479,6 +525,8 @@ struct ice_hw { u64 debug_mask; /* bitmap for debug mask */ enum ice_mac_type mac_type; + u16 fd_ctr_base; /* FD counter base index */ + /* pci info */ u16 device_id; u16 vendor_id; @@ -491,8 +539,8 @@ struct ice_hw { u16 max_burst_size; /* driver sets this value */ /* Tx Scheduler values */ - u16 num_tx_sched_layers; - u16 num_tx_sched_phys_layers; + u8 num_tx_sched_layers; + u8 num_tx_sched_phys_layers; u8 flattened_layers; u8 max_cgds; u8 sw_entry_point_layer; @@ -506,6 +554,7 @@ struct ice_hw { struct ice_nvm_info nvm; struct ice_hw_dev_caps dev_caps; /* device capabilities */ struct ice_hw_func_caps func_caps; /* function capabilities */ + struct ice_netlist_ver_info netlist_ver; /* netlist version info */ struct ice_switch_info *switch_info; /* switch filter lists */ @@ -548,6 +597,7 @@ struct ice_hw { /* Active package version (currently active) */ struct ice_pkg_ver active_pkg_ver; + u32 active_track_id; u8 active_pkg_name[ICE_PKG_NAME_SIZE]; u8 active_pkg_in_nvm; @@ -568,10 +618,29 @@ struct ice_hw { u8 *pkg_copy; u32 pkg_size; + /* tunneling info */ + struct mutex tnl_lock; + struct ice_tunnel_table tnl; + /* HW block tables */ struct ice_blk_info blk[ICE_BLK_COUNT]; struct mutex fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */ struct list_head fl_profs[ICE_BLK_COUNT]; + + /* Flow Director filter info */ + int fdir_active_fltr; + + struct mutex fdir_fltr_lock; /* protect Flow Director */ + struct list_head fdir_list_head; + + /* Book-keeping of side-band filter count per flow-type. + * This is used to detect and handle input set changes for + * respective flow-type. + */ + u16 fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX]; + + struct ice_fd_hw_prof **fdir_prof; + DECLARE_BITMAP(fdir_perfect_fltr, ICE_FLTR_PTYPE_MAX); struct mutex rss_locks; /* protect RSS configuration */ struct list_head rss_list_head; }; @@ -592,6 +661,8 @@ struct ice_eth_stats { u64 tx_errors; /* tepc */ }; +#define ICE_MAX_UP 8 + /* Statistics collected by the MAC */ struct ice_hw_port_stats { /* eth stats collected by the port */ @@ -631,6 +702,9 @@ struct ice_hw_port_stats { u64 tx_size_1023; /* ptc1023 */ u64 tx_size_1522; /* ptc1522 */ u64 tx_size_big; /* ptc9522 */ + /* flow director stats */ + u32 fd_sb_status; + u64 fd_sb_match; }; /* Checksum and Shadow RAM pointers */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 15191a325918..a126e7c7663d 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -4,16 +4,18 @@ #include "ice.h" #include "ice_base.h" #include "ice_lib.h" +#include "ice_fltr.h" /** * ice_validate_vf_id - helper to check if VF ID is valid * @pf: pointer to the PF structure * @vf_id: the ID of the VF to check */ -static int ice_validate_vf_id(struct ice_pf *pf, int vf_id) +static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id) { + /* vf_id range is only valid for 0-255, and should always be unsigned */ if (vf_id >= pf->num_alloc_vfs) { - dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %d\n", vf_id); + dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id); return -EINVAL; } return 0; @@ -27,7 +29,7 @@ static int ice_validate_vf_id(struct ice_pf *pf, int vf_id) static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf) { if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { - dev_err(ice_pf_to_dev(pf), "VF ID: %d in reset. Try again.\n", + dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n", vf->vf_id); return -EBUSY; } @@ -35,6 +37,37 @@ static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf) } /** + * ice_err_to_virt_err - translate errors for VF return code + * @ice_err: error return code + */ +static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err) +{ + switch (ice_err) { + case ICE_SUCCESS: + return VIRTCHNL_STATUS_SUCCESS; + case ICE_ERR_BAD_PTR: + case ICE_ERR_INVAL_SIZE: + case ICE_ERR_DEVICE_NOT_SUPPORTED: + case ICE_ERR_PARAM: + case ICE_ERR_CFG: + return VIRTCHNL_STATUS_ERR_PARAM; + case ICE_ERR_NO_MEMORY: + return VIRTCHNL_STATUS_ERR_NO_MEMORY; + case ICE_ERR_NOT_READY: + case ICE_ERR_RESET_FAILED: + case ICE_ERR_FW_API_VER: + case ICE_ERR_AQ_ERROR: + case ICE_ERR_AQ_TIMEOUT: + case ICE_ERR_AQ_FULL: + case ICE_ERR_AQ_NO_WORK: + case ICE_ERR_AQ_EMPTY: + return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; + default: + return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; + } +} + +/** * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF * @pf: pointer to the PF structure * @v_opcode: operation code @@ -47,7 +80,7 @@ ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode, enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) { struct ice_hw *hw = &pf->hw; - int i; + unsigned int i; ice_for_each_vf(pf, i) { struct ice_vf *vf = &pf->vf[i]; @@ -149,6 +182,26 @@ static void ice_vc_notify_vf_link_state(struct ice_vf *vf) } /** + * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access + * @vf: VF to remove access to VSI for + */ +static void ice_vf_invalidate_vsi(struct ice_vf *vf) +{ + vf->lan_vsi_idx = ICE_NO_VSI; + vf->lan_vsi_num = ICE_NO_VSI; +} + +/** + * ice_vf_vsi_release - invalidate the VF's VSI after freeing it + * @vf: invalidate this VF's VSI after freeing it + */ +static void ice_vf_vsi_release(struct ice_vf *vf) +{ + ice_vsi_release(vf->pf->vsi[vf->lan_vsi_idx]); + ice_vf_invalidate_vsi(vf); +} + +/** * ice_free_vf_res - Free a VF's resources * @vf: pointer to the VF info */ @@ -163,10 +216,8 @@ static void ice_free_vf_res(struct ice_vf *vf) clear_bit(ICE_VF_STATE_INIT, vf->vf_states); /* free VSI and disconnect it from the parent uplink */ - if (vf->lan_vsi_idx) { - ice_vsi_release(pf->vsi[vf->lan_vsi_idx]); - vf->lan_vsi_idx = 0; - vf->lan_vsi_num = 0; + if (vf->lan_vsi_idx != ICE_NO_VSI) { + ice_vf_vsi_release(vf); vf->num_mac = 0; } @@ -292,7 +343,7 @@ void ice_free_vfs(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - int tmp, i; + unsigned int tmp, i; if (!pf->vf) return; @@ -337,7 +388,7 @@ void ice_free_vfs(struct ice_pf *pf) * before this function ever gets called. */ if (!pci_vfs_assigned(pf->pdev)) { - int vf_id; + unsigned int vf_id; /* Acknowledge VFLR for all VFs. Without this, VFs will fail to * work correctly when SR-IOV gets re-enabled. @@ -368,9 +419,9 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) { struct ice_pf *pf = vf->pf; u32 reg, reg_idx, bit_idx; + unsigned int vf_abs_id, i; struct device *dev; struct ice_hw *hw; - int vf_abs_id, i; dev = ice_pf_to_dev(pf); hw = &pf->hw; @@ -380,10 +431,7 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); /* Disable VF's configuration API during reset. The flag is re-enabled - * in ice_alloc_vf_res(), when it's safe again to access VF's VSI. - * It's normally disabled in ice_free_vf_res(), but it's safer - * to do it earlier to give some time to finish to any VF config - * functions that may still be running at this point. + * when it's safe again to access VF's VSI. */ clear_bit(ICE_VF_STATE_INIT, vf->vf_states); @@ -418,7 +466,7 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) if ((reg & VF_TRANS_PENDING_M) == 0) break; - dev_err(dev, "VF %d PCI transactions stuck\n", vf->vf_id); + dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id); udelay(ICE_PCI_CIAD_WAIT_DELAY_US); } } @@ -460,8 +508,9 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable) status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (status) { - dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %d\n", - status, hw->adminq.sq_last_status); + dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); ret = -EIO; goto out; } @@ -475,18 +524,39 @@ out: } /** + * ice_vf_get_port_info - Get the VF's port info structure + * @vf: VF used to get the port info structure for + */ +static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf) +{ + return vf->pf->hw.port_info; +} + +/** * ice_vf_vsi_setup - Set up a VF VSI - * @pf: board private structure - * @pi: pointer to the port_info instance - * @vf_id: defines VF ID to which this VSI connects. + * @vf: VF to setup VSI for * * Returns pointer to the successfully allocated VSI struct on success, * otherwise returns NULL on failure. */ -static struct ice_vsi * -ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id) +static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) { - return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id); + struct ice_port_info *pi = ice_vf_get_port_info(vf); + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + + vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id); + + if (!vsi) { + dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n"); + ice_vf_invalidate_vsi(vf); + return NULL; + } + + vf->lan_vsi_idx = vsi->idx; + vf->lan_vsi_num = vsi->vsi_num; + + return vsi; } /** @@ -507,170 +577,158 @@ static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) } /** - * ice_alloc_vsi_res - Setup VF VSI and its resources - * @vf: pointer to the VF structure + * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN + * @vf: VF to add MAC filters for * - * Returns 0 on success, negative value on failure + * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver + * always re-adds either a VLAN 0 or port VLAN based filter after reset. */ -static int ice_alloc_vsi_res(struct ice_vf *vf) +static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf) { - struct ice_pf *pf = vf->pf; - LIST_HEAD(tmp_add_list); - u8 broadcast[ETH_ALEN]; - struct ice_vsi *vsi; - struct device *dev; - int status = 0; + struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; + struct device *dev = ice_pf_to_dev(vf->pf); + u16 vlan_id = 0; + int err; - dev = ice_pf_to_dev(pf); - /* first vector index is the VFs OICR index */ - vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf); + if (vf->port_vlan_info) { + err = ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true); + if (err) { + dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n", + vf->vf_id, err); + return err; + } - vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id); - if (!vsi) { - dev_err(dev, "Failed to create VF VSI\n"); - return -ENOMEM; + vlan_id = vf->port_vlan_info & VLAN_VID_MASK; } - vf->lan_vsi_idx = vsi->idx; - vf->lan_vsi_num = vsi->vsi_num; - - /* Check if port VLAN exist before, and restore it accordingly */ - if (vf->port_vlan_info) { - ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true); - if (ice_vsi_add_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK)) - dev_warn(ice_pf_to_dev(pf), "Failed to add Port VLAN %d filter for VF %d\n", - vf->port_vlan_info & VLAN_VID_MASK, vf->vf_id); - } else { - /* set VLAN 0 filter by default when no port VLAN is - * enabled. If a port VLAN is enabled we don't want - * untagged broadcast/multicast traffic seen on the VF - * interface. - */ - if (ice_vsi_add_vlan(vsi, 0)) - dev_warn(ice_pf_to_dev(pf), "Failed to add VLAN 0 filter for VF %d, MDD events will trigger. Reset the VF, disable spoofchk, or enable 8021q module on the guest\n", - vf->vf_id); + /* vlan_id will either be 0 or the port VLAN number */ + err = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI); + if (err) { + dev_err(dev, "failed to add %s VLAN %u filter for VF %u, error %d\n", + vf->port_vlan_info ? "port" : "", vlan_id, vf->vf_id, + err); + return err; } + return 0; +} + +/** + * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA + * @vf: VF to add MAC filters for + * + * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver + * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset. + */ +static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) +{ + struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; + struct device *dev = ice_pf_to_dev(vf->pf); + enum ice_status status; + u8 broadcast[ETH_ALEN]; + eth_broadcast_addr(broadcast); + status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); + if (status) { + dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n", + vf->vf_id, ice_stat_str(status)); + return ice_status_to_errno(status); + } - status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast); - if (status) - goto ice_alloc_vsi_res_exit; + vf->num_mac++; if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) { - status = ice_add_mac_to_list(vsi, &tmp_add_list, - vf->dflt_lan_addr.addr); - if (status) - goto ice_alloc_vsi_res_exit; + status = ice_fltr_add_mac(vsi, vf->dflt_lan_addr.addr, + ICE_FWD_TO_VSI); + if (status) { + dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n", + &vf->dflt_lan_addr.addr[0], vf->vf_id, + ice_stat_str(status)); + return ice_status_to_errno(status); + } + vf->num_mac++; } - status = ice_add_mac(&pf->hw, &tmp_add_list); - if (status) - dev_err(dev, "could not add mac filters error %d\n", status); - else - vf->num_mac = 1; - - /* Clear this bit after VF initialization since we shouldn't reclaim - * and reassign interrupts for synchronous or asynchronous VFR events. - * We don't want to reconfigure interrupts since AVF driver doesn't - * expect vector assignment to be changed unless there is a request for - * more vectors. - */ -ice_alloc_vsi_res_exit: - ice_free_fltr_list(dev, &tmp_add_list); - return status; + return 0; } /** - * ice_alloc_vf_res - Allocate VF resources - * @vf: pointer to the VF structure + * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value + * @vf: VF to configure trust setting for */ -static int ice_alloc_vf_res(struct ice_vf *vf) +static void ice_vf_set_host_trust_cfg(struct ice_vf *vf) { - struct ice_pf *pf = vf->pf; - int tx_rx_queue_left; - int status; - - /* Update number of VF queues, in case VF had requested for queue - * changes - */ - tx_rx_queue_left = min_t(int, ice_get_avail_txq_count(pf), - ice_get_avail_rxq_count(pf)); - tx_rx_queue_left += pf->num_qps_per_vf; - if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left && - vf->num_req_qs != vf->num_vf_qs) - vf->num_vf_qs = vf->num_req_qs; - - /* setup VF VSI and necessary resources */ - status = ice_alloc_vsi_res(vf); - if (status) - goto ice_alloc_vf_res_exit; - if (vf->trusted) set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); else clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); - - /* VF is now completely initialized */ - set_bit(ICE_VF_STATE_INIT, vf->vf_states); - - return status; - -ice_alloc_vf_res_exit: - ice_free_vf_res(vf); - return status; } /** - * ice_ena_vf_mappings - * @vf: pointer to the VF structure + * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware + * @vf: VF to enable MSIX mappings for * - * Enable VF vectors and queues allocation by writing the details into - * respective registers. + * Some of the registers need to be indexed/configured using hardware global + * device values and other registers need 0-based values, which represent PF + * based values. */ -static void ice_ena_vf_mappings(struct ice_vf *vf) +static void ice_ena_vf_msix_mappings(struct ice_vf *vf) { - int abs_vf_id, abs_first, abs_last; + int device_based_first_msix, device_based_last_msix; + int pf_based_first_msix, pf_based_last_msix, v; struct ice_pf *pf = vf->pf; - struct ice_vsi *vsi; - struct device *dev; - int first, last, v; + int device_based_vf_id; struct ice_hw *hw; u32 reg; - dev = ice_pf_to_dev(pf); hw = &pf->hw; - vsi = pf->vsi[vf->lan_vsi_idx]; - first = vf->first_vector_idx; - last = (first + pf->num_msix_per_vf) - 1; - abs_first = first + pf->hw.func_caps.common_cap.msix_vector_first_id; - abs_last = (abs_first + pf->num_msix_per_vf) - 1; - abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; - - /* VF Vector allocation */ - reg = (((abs_first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) | - ((abs_last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) | - VPINT_ALLOC_VALID_M); + pf_based_first_msix = vf->first_vector_idx; + pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1; + + device_based_first_msix = pf_based_first_msix + + pf->hw.func_caps.common_cap.msix_vector_first_id; + device_based_last_msix = + (device_based_first_msix + pf->num_msix_per_vf) - 1; + device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + + reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) & + VPINT_ALLOC_FIRST_M) | + ((device_based_last_msix << VPINT_ALLOC_LAST_S) & + VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M); wr32(hw, VPINT_ALLOC(vf->vf_id), reg); - reg = (((abs_first << VPINT_ALLOC_PCI_FIRST_S) + reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S) & VPINT_ALLOC_PCI_FIRST_M) | - ((abs_last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) | - VPINT_ALLOC_PCI_VALID_M); + ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) & + VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M); wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg); + /* map the interrupts to its functions */ - for (v = first; v <= last; v++) { - reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) & + for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) { + reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) & GLINT_VECT2FUNC_VF_NUM_M) | ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & GLINT_VECT2FUNC_PF_NUM_M)); wr32(hw, GLINT_VECT2FUNC(v), reg); } - /* Map mailbox interrupt. We put an explicit 0 here to remind us that - * VF admin queue interrupts will go to VF MSI-X vector 0. - */ - wr32(hw, VPINT_MBX_CTL(abs_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M | 0); + /* Map mailbox interrupt to VF MSI-X vector 0 */ + wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M); +} + +/** + * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF + * @vf: VF to enable the mappings for + * @max_txq: max Tx queues allowed on the VF's VSI + * @max_rxq: max Rx queues allowed on the VF's VSI + */ +static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq) +{ + struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_hw *hw = &vf->pf->hw; + u32 reg; + /* set regardless of mapping mode */ wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M); @@ -682,7 +740,7 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) */ reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) & VPLAN_TX_QBASE_VFFIRSTQ_M) | - (((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) & + (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) & VPLAN_TX_QBASE_VFNUMQ_M)); wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg); } else { @@ -700,7 +758,7 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) */ reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) & VPLAN_RX_QBASE_VFFIRSTQ_M) | - (((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) & + (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) & VPLAN_RX_QBASE_VFNUMQ_M)); wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg); } else { @@ -709,6 +767,18 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) } /** + * ice_ena_vf_mappings - enable VF MSIX and queue mapping + * @vf: pointer to the VF structure + */ +static void ice_ena_vf_mappings(struct ice_vf *vf) +{ + struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; + + ice_ena_vf_msix_mappings(vf); + ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq); +} + +/** * ice_determine_res * @pf: pointer to the PF structure * @avail_res: available resources in the PF structure @@ -906,51 +976,18 @@ static int ice_set_per_vf_res(struct ice_pf *pf) } /** - * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset - * @vf: pointer to the VF structure - * - * Cleanup a VF after the hardware reset is finished. Expects the caller to - * have verified whether the reset is finished properly, and ensure the - * minimum amount of wait time has passed. Reallocate VF resources back to make - * VF state active + * ice_clear_vf_reset_trigger - enable VF to access hardware + * @vf: VF to enabled hardware access for */ -static void ice_cleanup_and_realloc_vf(struct ice_vf *vf) +static void ice_clear_vf_reset_trigger(struct ice_vf *vf) { - struct ice_pf *pf = vf->pf; - struct ice_hw *hw; + struct ice_hw *hw = &vf->pf->hw; u32 reg; - hw = &pf->hw; - - /* PF software completes the flow by notifying VF that reset flow is - * completed. This is done by enabling hardware by clearing the reset - * bit in the VPGEN_VFRTRIG reg and setting VFR_STATE in the VFGEN_RSTAT - * register to VFR completed (done at the end of this function) - * By doing this we allow HW to access VF memory at any point. If we - * did it any sooner, HW could access memory while it was being freed - * in ice_free_vf_res(), causing an IOMMU fault. - * - * On the other hand, this needs to be done ASAP, because the VF driver - * is waiting for this to happen and may report a timeout. It's - * harmless, but it gets logged into Guest OS kernel log, so best avoid - * it. - */ reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); reg &= ~VPGEN_VFRTRIG_VFSWR_M; wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); - - /* reallocate VF resources to finish resetting the VSI state */ - if (!ice_alloc_vf_res(vf)) { - ice_ena_vf_mappings(vf); - set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); - clear_bit(ICE_VF_STATE_DIS, vf->vf_states); - } - - /* Tell the VF driver the reset is done. This needs to be done only - * after VF has been fully initialized, because the VF driver may - * request resources immediately after setting this flag. - */ - wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); + ice_flush(hw); } /** @@ -994,44 +1031,124 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m, return status; } +static void ice_vf_clear_counters(struct ice_vf *vf) +{ + struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; + + vf->num_mac = 0; + vsi->num_vlan = 0; + memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); + memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); +} + /** - * ice_config_res_vfs - Finalize allocation of VFs resources in one go - * @pf: pointer to the PF structure + * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild + * @vf: VF to perform pre VSI rebuild tasks * - * This function is being called as last part of resetting all VFs, or when - * configuring VFs for the first time, where there is no resource to be freed - * Returns true if resources were properly allocated for all VFs, and false - * otherwise. + * These tasks are items that don't need to be amortized since they are most + * likely called in a for loop with all VF(s) in the reset_all_vfs() case. */ -static bool ice_config_res_vfs(struct ice_pf *pf) +static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf) { - struct device *dev = ice_pf_to_dev(pf); - struct ice_hw *hw = &pf->hw; - int v; + ice_vf_clear_counters(vf); + ice_clear_vf_reset_trigger(vf); +} - if (ice_set_per_vf_res(pf)) { - dev_err(dev, "Cannot allocate VF resources, try with fewer number of VFs\n"); - return false; - } +/** + * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset + * @vf: VF to rebuild host configuration on + */ +static void ice_vf_rebuild_host_cfg(struct ice_vf *vf) +{ + struct device *dev = ice_pf_to_dev(vf->pf); - /* rearm global interrupts */ - if (test_and_clear_bit(__ICE_OICR_INTR_DIS, pf->state)) - ice_irq_dynamic_ena(hw, NULL, NULL); + ice_vf_set_host_trust_cfg(vf); - /* Finish resetting each VF and allocate resources */ - ice_for_each_vf(pf, v) { - struct ice_vf *vf = &pf->vf[v]; + if (ice_vf_rebuild_host_mac_cfg(vf)) + dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n", + vf->vf_id); - vf->num_vf_qs = pf->num_qps_per_vf; - dev_dbg(dev, "VF-id %d has %d queues configured\n", vf->vf_id, - vf->num_vf_qs); - ice_cleanup_and_realloc_vf(vf); + if (ice_vf_rebuild_host_vlan_cfg(vf)) + dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n", + vf->vf_id); +} + +/** + * ice_vf_rebuild_vsi_with_release - release and setup the VF's VSI + * @vf: VF to release and setup the VSI for + * + * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF + * configuration change, etc.). + */ +static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf) +{ + ice_vf_vsi_release(vf); + if (!ice_vf_vsi_setup(vf)) + return -ENOMEM; + + return 0; +} + +/** + * ice_vf_rebuild_vsi - rebuild the VF's VSI + * @vf: VF to rebuild the VSI for + * + * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the + * host, PFR, CORER, etc.). + */ +static int ice_vf_rebuild_vsi(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + + vsi = pf->vsi[vf->lan_vsi_idx]; + + if (ice_vsi_rebuild(vsi, true)) { + dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n", + vf->vf_id); + return -EIO; } + /* vsi->idx will remain the same in this case so don't update + * vf->lan_vsi_idx + */ + vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); + vf->lan_vsi_num = vsi->vsi_num; - ice_flush(hw); - clear_bit(__ICE_VF_DIS, pf->state); + return 0; +} - return true; +/** + * ice_vf_set_initialized - VF is ready for VIRTCHNL communication + * @vf: VF to set in initialized state + * + * After this function the VF will be ready to receive/handle the + * VIRTCHNL_OP_GET_VF_RESOURCES message + */ +static void ice_vf_set_initialized(struct ice_vf *vf) +{ + ice_set_vf_state_qs_dis(vf); + clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); + clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); + clear_bit(ICE_VF_STATE_DIS, vf->vf_states); + set_bit(ICE_VF_STATE_INIT, vf->vf_states); +} + +/** + * ice_vf_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt + * @vf: VF to perform tasks on + */ +static void ice_vf_post_vsi_rebuild(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct ice_hw *hw; + + hw = &pf->hw; + + ice_vf_rebuild_host_cfg(vf); + + ice_vf_set_initialized(vf); + ice_ena_vf_mappings(vf); + wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); } /** @@ -1065,17 +1182,6 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ice_for_each_vf(pf, v) ice_trigger_vf_reset(&pf->vf[v], is_vflr, true); - ice_for_each_vf(pf, v) { - struct ice_vsi *vsi; - - vf = &pf->vf[v]; - vsi = pf->vsi[vf->lan_vsi_idx]; - if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) - ice_dis_vf_qs(vf); - ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, - NULL, ICE_VF_RESET, vf->vf_id, NULL); - } - /* HW requires some time to make sure it can flush the FIFO for a VF * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in * sequence to make sure that it has completed. We'll keep track of @@ -1112,21 +1218,13 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ice_for_each_vf(pf, v) { vf = &pf->vf[v]; - ice_free_vf_res(vf); - - /* Free VF queues as well, and reallocate later. - * If a given VF has different number of queues - * configured, the request for update will come - * via mailbox communication. - */ - vf->num_vf_qs = 0; + ice_vf_pre_vsi_rebuild(vf); + ice_vf_rebuild_vsi(vf); + ice_vf_post_vsi_rebuild(vf); } - if (ice_sriov_free_msix_res(pf)) - dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n"); - - if (!ice_config_res_vfs(pf)) - return false; + ice_flush(hw); + clear_bit(__ICE_VF_DIS, pf->state); return true; } @@ -1238,12 +1336,9 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) dev_err(dev, "disabling promiscuous mode failed\n"); } - /* free VF resources to begin resetting the VSI state */ - ice_free_vf_res(vf); - - ice_cleanup_and_realloc_vf(vf); - - ice_flush(hw); + ice_vf_pre_vsi_rebuild(vf); + ice_vf_rebuild_vsi_with_release(vf); + ice_vf_post_vsi_rebuild(vf); return true; } @@ -1311,16 +1406,144 @@ static void ice_vc_notify_vf_reset(struct ice_vf *vf) } /** - * ice_alloc_vfs - Allocate and set up VFs resources + * ice_init_vf_vsi_res - initialize/setup VF VSI resources + * @vf: VF to initialize/setup the VSI for + * + * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the + * VF VSI's broadcast filter and is only used during initial VF creation. + */ +static int ice_init_vf_vsi_res(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + u8 broadcast[ETH_ALEN]; + enum ice_status status; + struct ice_vsi *vsi; + struct device *dev; + int err; + + vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf); + + dev = ice_pf_to_dev(pf); + vsi = ice_vf_vsi_setup(vf); + if (!vsi) + return -ENOMEM; + + err = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI); + if (err) { + dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n", + vf->vf_id); + goto release_vsi; + } + + eth_broadcast_addr(broadcast); + status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); + if (status) { + dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n", + vf->vf_id, ice_stat_str(status)); + err = ice_status_to_errno(status); + goto release_vsi; + } + + vf->num_mac = 1; + + return 0; + +release_vsi: + ice_vf_vsi_release(vf); + return err; +} + +/** + * ice_start_vfs - start VFs so they are ready to be used by SR-IOV + * @pf: PF the VFs are associated with + */ +static int ice_start_vfs(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + int retval, i; + + ice_for_each_vf(pf, i) { + struct ice_vf *vf = &pf->vf[i]; + + ice_clear_vf_reset_trigger(vf); + + retval = ice_init_vf_vsi_res(vf); + if (retval) { + dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n", + vf->vf_id, retval); + goto teardown; + } + + set_bit(ICE_VF_STATE_INIT, vf->vf_states); + ice_ena_vf_mappings(vf); + wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); + } + + ice_flush(hw); + return 0; + +teardown: + for (i = i - 1; i >= 0; i--) { + struct ice_vf *vf = &pf->vf[i]; + + ice_dis_vf_mappings(vf); + ice_vf_vsi_release(vf); + } + + return retval; +} + +/** + * ice_set_dflt_settings - set VF defaults during initialization/creation + * @pf: PF holding reference to all VFs for default configuration + */ +static void ice_set_dflt_settings_vfs(struct ice_pf *pf) +{ + int i; + + ice_for_each_vf(pf, i) { + struct ice_vf *vf = &pf->vf[i]; + + vf->pf = pf; + vf->vf_id = i; + vf->vf_sw_id = pf->first_sw; + /* assign default capabilities */ + set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps); + vf->spoofchk = true; + vf->num_vf_qs = pf->num_qps_per_vf; + } +} + +/** + * ice_alloc_vfs - allocate num_vfs in the PF structure + * @pf: PF to store the allocated VFs in + * @num_vfs: number of VFs to allocate + */ +static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs) +{ + struct ice_vf *vfs; + + vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs), + GFP_KERNEL); + if (!vfs) + return -ENOMEM; + + pf->vf = vfs; + pf->num_alloc_vfs = num_vfs; + + return 0; +} + +/** + * ice_ena_vfs - enable VFs so they are ready to be used * @pf: pointer to the PF structure - * @num_alloc_vfs: number of VFs to allocate + * @num_vfs: number of VFs to enable */ -static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) +static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - struct ice_vf *vfs; - int i, ret; + int ret; /* Disable global interrupt 0 so we don't try to handle the VFLR. */ wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), @@ -1328,43 +1551,37 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) set_bit(__ICE_OICR_INTR_DIS, pf->state); ice_flush(hw); - ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); + ret = pci_enable_sriov(pf->pdev, num_vfs); if (ret) { pf->num_alloc_vfs = 0; goto err_unroll_intr; } - /* allocate memory */ - vfs = devm_kcalloc(dev, num_alloc_vfs, sizeof(*vfs), GFP_KERNEL); - if (!vfs) { - ret = -ENOMEM; - goto err_pci_disable_sriov; - } - pf->vf = vfs; - pf->num_alloc_vfs = num_alloc_vfs; - /* apply default profile */ - ice_for_each_vf(pf, i) { - vfs[i].pf = pf; - vfs[i].vf_sw_id = pf->first_sw; - vfs[i].vf_id = i; + ret = ice_alloc_vfs(pf, num_vfs); + if (ret) + goto err_pci_disable_sriov; - /* assign default capabilities */ - set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); - vfs[i].spoofchk = true; + if (ice_set_per_vf_res(pf)) { + dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n", + num_vfs); + ret = -ENOSPC; + goto err_unroll_sriov; } - /* VF resources get allocated with initialization */ - if (!ice_config_res_vfs(pf)) { - ret = -EIO; + ice_set_dflt_settings_vfs(pf); + + if (ice_start_vfs(pf)) { + dev_err(dev, "Failed to start VF(s)\n"); + ret = -EAGAIN; goto err_unroll_sriov; } - return ret; + clear_bit(__ICE_VF_DIS, pf->state); + return 0; err_unroll_sriov: + devm_kfree(dev, pf->vf); pf->vf = NULL; - devm_kfree(dev, vfs); - vfs = NULL; pf->num_alloc_vfs = 0; err_pci_disable_sriov: pci_disable_sriov(pf->pdev); @@ -1404,6 +1621,8 @@ static bool ice_pf_state_is_nominal(struct ice_pf *pf) * ice_pci_sriov_ena - Enable or change number of VFs * @pf: pointer to the PF structure * @num_vfs: number of VFs to allocate + * + * Returns 0 on success and negative on failure */ static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) { @@ -1411,20 +1630,10 @@ static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) struct device *dev = ice_pf_to_dev(pf); int err; - if (!ice_pf_state_is_nominal(pf)) { - dev_err(dev, "Cannot enable SR-IOV, device not ready\n"); - return -EBUSY; - } - - if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) { - dev_err(dev, "This device is not capable of SR-IOV\n"); - return -EOPNOTSUPP; - } - if (pre_existing_vfs && pre_existing_vfs != num_vfs) ice_free_vfs(pf); else if (pre_existing_vfs && pre_existing_vfs == num_vfs) - return num_vfs; + return 0; if (num_vfs > pf->num_vfs_supported) { dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n", @@ -1432,45 +1641,77 @@ static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) return -EOPNOTSUPP; } - dev_info(dev, "Allocating %d VFs\n", num_vfs); - err = ice_alloc_vfs(pf, num_vfs); + dev_info(dev, "Enabling %d VFs\n", num_vfs); + err = ice_ena_vfs(pf, num_vfs); if (err) { dev_err(dev, "Failed to enable SR-IOV: %d\n", err); return err; } set_bit(ICE_FLAG_SRIOV_ENA, pf->flags); - return num_vfs; + return 0; +} + +/** + * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks + * @pf: PF to enabled SR-IOV on + */ +static int ice_check_sriov_allowed(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + + if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) { + dev_err(dev, "This device is not capable of SR-IOV\n"); + return -EOPNOTSUPP; + } + + if (ice_is_safe_mode(pf)) { + dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n"); + return -EOPNOTSUPP; + } + + if (!ice_pf_state_is_nominal(pf)) { + dev_err(dev, "Cannot enable SR-IOV, device not ready\n"); + return -EBUSY; + } + + return 0; } /** * ice_sriov_configure - Enable or change number of VFs via sysfs * @pdev: pointer to a pci_dev structure - * @num_vfs: number of VFs to allocate + * @num_vfs: number of VFs to allocate or 0 to free VFs * - * This function is called when the user updates the number of VFs in sysfs. + * This function is called when the user updates the number of VFs in sysfs. On + * success return whatever num_vfs was set to by the caller. Return negative on + * failure. */ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct ice_pf *pf = pci_get_drvdata(pdev); struct device *dev = ice_pf_to_dev(pf); + int err; - if (ice_is_safe_mode(pf)) { - dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n"); - return -EOPNOTSUPP; - } + err = ice_check_sriov_allowed(pf); + if (err) + return err; - if (num_vfs) - return ice_pci_sriov_ena(pf, num_vfs); + if (!num_vfs) { + if (!pci_vfs_assigned(pdev)) { + ice_free_vfs(pf); + return 0; + } - if (!pci_vfs_assigned(pdev)) { - ice_free_vfs(pf); - } else { dev_err(dev, "can't free VFs because some are assigned to VMs.\n"); return -EBUSY; } - return 0; + err = ice_pci_sriov_ena(pf, num_vfs); + if (err) + return err; + + return num_vfs; } /** @@ -1483,7 +1724,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) void ice_process_vflr_event(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; - int vf_id; + unsigned int vf_id; u32 reg; if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) || @@ -1524,7 +1765,7 @@ static void ice_vc_reset_vf(struct ice_vf *vf) */ static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq) { - int vf_id; + unsigned int vf_id; ice_for_each_vf(pf, vf_id) { struct ice_vf *vf = &pf->vf[vf_id]; @@ -1628,8 +1869,9 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, msg, msglen, NULL); if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) { - dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %d\n", - vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status); + dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n", + vf->vf_id, ice_stat_str(aq_ret), + ice_aq_str(pf->hw.mailboxq.sq_last_status)); return -EIO; } @@ -2044,8 +2286,9 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL); if (status) { - dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d\n", - ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status); + dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n", + ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, + ice_stat_str(status)); ret = -EIO; goto out; } @@ -2060,6 +2303,174 @@ out: } /** + * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode + * @pf: PF structure for accessing VF(s) + * + * Return false if no VF(s) are in unicast and/or multicast promiscuous mode, + * else return true + */ +bool ice_is_any_vf_in_promisc(struct ice_pf *pf) +{ + int vf_idx; + + ice_for_each_vf(pf, vf_idx) { + struct ice_vf *vf = &pf->vf[vf_idx]; + + /* found a VF that has promiscuous mode configured */ + if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) || + test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) + return true; + } + + return false; +} + +/** + * ice_vc_cfg_promiscuous_mode_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * called from the VF to configure VF VSIs promiscuous mode + */ +static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct virtchnl_promisc_info *info = + (struct virtchnl_promisc_info *)msg; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + struct device *dev; + bool rm_promisc; + int ret = 0; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + dev = ice_pf_to_dev(pf); + if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { + dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n", + vf->vf_id); + /* Leave v_ret alone, lie to the VF on purpose. */ + goto error_param; + } + + rm_promisc = !(info->flags & FLAG_VF_UNICAST_PROMISC) && + !(info->flags & FLAG_VF_MULTICAST_PROMISC); + + if (vsi->num_vlan || vf->port_vlan_info) { + struct ice_vsi *pf_vsi = ice_get_main_vsi(pf); + struct net_device *pf_netdev; + + if (!pf_vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + pf_netdev = pf_vsi->netdev; + + ret = ice_set_vf_spoofchk(pf_netdev, vf->vf_id, rm_promisc); + if (ret) { + dev_err(dev, "Failed to update spoofchk to %s for VF %d VSI %d when setting promiscuous mode\n", + rm_promisc ? "ON" : "OFF", vf->vf_id, + vsi->vsi_num); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + } + + ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc); + if (ret) { + dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n"); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + } + + if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) { + bool set_dflt_vsi = !!(info->flags & FLAG_VF_UNICAST_PROMISC); + + if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw)) + /* only attempt to set the default forwarding VSI if + * it's not currently set + */ + ret = ice_set_dflt_vsi(pf->first_sw, vsi); + else if (!set_dflt_vsi && + ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) + /* only attempt to free the default forwarding VSI if we + * are the owner + */ + ret = ice_clear_dflt_vsi(pf->first_sw); + + if (ret) { + dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n", + set_dflt_vsi ? "en" : "dis", vf->vf_id, ret); + v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; + goto error_param; + } + } else { + enum ice_status status; + u8 promisc_m; + + if (info->flags & FLAG_VF_UNICAST_PROMISC) { + if (vf->port_vlan_info || vsi->num_vlan) + promisc_m = ICE_UCAST_VLAN_PROMISC_BITS; + else + promisc_m = ICE_UCAST_PROMISC_BITS; + } else if (info->flags & FLAG_VF_MULTICAST_PROMISC) { + if (vf->port_vlan_info || vsi->num_vlan) + promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; + else + promisc_m = ICE_MCAST_PROMISC_BITS; + } else { + if (vf->port_vlan_info || vsi->num_vlan) + promisc_m = ICE_UCAST_VLAN_PROMISC_BITS; + else + promisc_m = ICE_UCAST_PROMISC_BITS; + } + + /* Configure multicast/unicast with or without VLAN promiscuous + * mode + */ + status = ice_vf_set_vsi_promisc(vf, vsi, promisc_m, rm_promisc); + if (status) { + dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed, error: %s\n", + rm_promisc ? "dis" : "en", vf->vf_id, + ice_stat_str(status)); + v_ret = ice_err_to_virt_err(status); + goto error_param; + } else { + dev_dbg(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d succeeded\n", + rm_promisc ? "dis" : "en", vf->vf_id); + } + } + + if (info->flags & FLAG_VF_MULTICAST_PROMISC) + set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); + else + clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); + + if (info->flags & FLAG_VF_UNICAST_PROMISC) + set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); + else + clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); + +error_param: + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, + v_ret, NULL, 0); +} + +/** * ice_vc_get_stats_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -2118,6 +2529,52 @@ static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs) } /** + * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL + * @vsi: VSI of the VF to configure + * @q_idx: VF queue index used to determine the queue in the PF's space + */ +static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx) +{ + struct ice_hw *hw = &vsi->back->hw; + u32 pfq = vsi->txq_map[q_idx]; + u32 reg; + + reg = rd32(hw, QINT_TQCTL(pfq)); + + /* MSI-X index 0 in the VF's space is always for the OICR, which means + * this is most likely a poll mode VF driver, so don't enable an + * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP + */ + if (!(reg & QINT_TQCTL_MSIX_INDX_M)) + return; + + wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M); +} + +/** + * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL + * @vsi: VSI of the VF to configure + * @q_idx: VF queue index used to determine the queue in the PF's space + */ +static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx) +{ + struct ice_hw *hw = &vsi->back->hw; + u32 pfq = vsi->rxq_map[q_idx]; + u32 reg; + + reg = rd32(hw, QINT_RQCTL(pfq)); + + /* MSI-X index 0 in the VF's space is always for the OICR, which means + * this is most likely a poll mode VF driver, so don't enable an + * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP + */ + if (!(reg & QINT_RQCTL_MSIX_INDX_M)) + return; + + wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M); +} + +/** * ice_vc_ena_qs_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -2177,6 +2634,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } + ice_vf_ena_rxq_interrupt(vsi, vf_q_id); set_bit(vf_q_id, vf->rxq_ena); } @@ -2192,6 +2650,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) if (test_bit(vf_q_id, vf->txq_ena)) continue; + ice_vf_ena_txq_interrupt(vsi, vf_q_id); set_bit(vf_q_id, vf->txq_ena); } @@ -2604,20 +3063,22 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr) return -EPERM; } - status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, true); + status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI); if (status == ICE_ERR_ALREADY_EXISTS) { dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr, vf->vf_id); return -EEXIST; } else if (status) { - dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n", - mac_addr, vf->vf_id, status); + dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n", + mac_addr, vf->vf_id, ice_stat_str(status)); return -EIO; } - /* only set dflt_lan_addr once */ - if (is_zero_ether_addr(vf->dflt_lan_addr.addr) && - is_unicast_ether_addr(mac_addr)) + /* Set the default LAN address to the latest unicast MAC address added + * by the VF. The default LAN address is reported by the PF via + * ndo_get_vf_config. + */ + if (is_unicast_ether_addr(mac_addr)) ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr); vf->num_mac++; @@ -2641,14 +3102,14 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr) ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr)) return 0; - status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, false); + status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI); if (status == ICE_ERR_DOES_NOT_EXIST) { dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr, vf->vf_id); return -ENOENT; } else if (status) { - dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n", - mac_addr, vf->vf_id, status); + dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n", + mac_addr, vf->vf_id, ice_stat_str(status)); return -EIO; } @@ -2885,7 +3346,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, /* add VLAN 0 filter back when transitioning from port VLAN to * no port VLAN. No change to old port VLAN on failure. */ - ret = ice_vsi_add_vlan(vsi, 0); + ret = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI); if (ret) return ret; ret = ice_vsi_manage_pvid(vsi, 0, false); @@ -2898,7 +3359,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, vlan_id, qos, vf_id); /* add VLAN filter for the port VLAN */ - ret = ice_vsi_add_vlan(vsi, vlan_id); + ret = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI); if (ret) return ret; } @@ -2992,8 +3453,9 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) goto error_param; } - if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) || - test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) + if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) || + test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) && + test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) vlan_promisc = true; if (add_v) { @@ -3018,7 +3480,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) if (!vid) continue; - status = ice_vsi_add_vlan(vsi, vid); + status = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI); if (status) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -3317,6 +3779,9 @@ error_handler: case VIRTCHNL_OP_GET_STATS: err = ice_vc_get_stats_msg(vf, msg); break; + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + err = ice_vc_cfg_promiscuous_mode_msg(vf, msg); + break; case VIRTCHNL_OP_ADD_VLAN: err = ice_vc_add_vlan_msg(vf, msg); break; @@ -3390,6 +3855,39 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) } /** + * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch + * @pf: PF used to reference the switch's rules + * @umac: unicast MAC to compare against existing switch rules + * + * Return true on the first/any match, else return false + */ +static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac) +{ + struct ice_sw_recipe *mac_recipe_list = + &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC]; + struct ice_fltr_mgmt_list_entry *list_itr; + struct list_head *rule_head; + struct mutex *rule_lock; /* protect MAC filter list access */ + + rule_head = &mac_recipe_list->filt_rules; + rule_lock = &mac_recipe_list->filt_rule_lock; + + mutex_lock(rule_lock); + list_for_each_entry(list_itr, rule_head, list_entry) { + u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0]; + + if (ether_addr_equal(existing_mac, umac)) { + mutex_unlock(rule_lock); + return true; + } + } + + mutex_unlock(rule_lock); + + return false; +} + +/** * ice_set_vf_mac * @netdev: network interface device structure * @vf_id: VF identifier @@ -3412,10 +3910,20 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) } vf = &pf->vf[vf_id]; + /* nothing left to do, unicast MAC already set */ + if (ether_addr_equal(vf->dflt_lan_addr.addr, mac)) + return 0; + ret = ice_check_vf_ready_for_cfg(vf); if (ret) return ret; + if (ice_unicast_mac_exists(pf, mac)) { + netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n", + mac, vf_id, mac); + return -EINVAL; + } + /* copy MAC into dflt_lan_addr and trigger a VF reset. The reset * flow will use the updated dflt_lan_addr and add a MAC filter * using ice_add_mac. Also set pf_set_mac to indicate that the PF has @@ -3554,6 +4062,24 @@ int ice_get_vf_stats(struct net_device *netdev, int vf_id, } /** + * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event + * @vf: pointer to the VF structure + */ +void ice_print_vf_rx_mdd_event(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct device *dev; + + dev = ice_pf_to_dev(pf); + + dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n", + vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id, + vf->dflt_lan_addr.addr, + test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) + ? "on" : "off"); +} + +/** * ice_print_vfs_mdd_event - print VFs malicious driver detect event * @pf: pointer to the PF structure * @@ -3582,12 +4108,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf) if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) { vf->mdd_rx_events.last_printed = vf->mdd_rx_events.count; - - dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n", - vf->mdd_rx_events.count, hw->pf_id, i, - vf->dflt_lan_addr.addr, - test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) - ? "on" : "off"); + ice_print_vf_rx_mdd_event(vf); } /* only print Tx MDD event message if there are new events */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 3f9464269bd2..0adff89a6749 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -64,7 +64,7 @@ struct ice_mdd_vf_events { struct ice_vf { struct ice_pf *pf; - s16 vf_id; /* VF ID in the PF space */ + u16 vf_id; /* VF ID in the PF space */ u16 lan_vsi_idx; /* index into PF struct */ /* first vector index of this VF in the PF space */ int first_vector_idx; @@ -128,9 +128,11 @@ void ice_set_vf_state_qs_dis(struct ice_vf *vf); int ice_get_vf_stats(struct net_device *netdev, int vf_id, struct ifla_vf_stats *vf_stats); +bool ice_is_any_vf_in_promisc(struct ice_pf *pf); void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event); void ice_print_vfs_mdd_events(struct ice_pf *pf); +void ice_print_vf_rx_mdd_event(struct ice_vf *vf); #else /* CONFIG_PCI_IOV */ #define ice_process_vflr_event(pf) do {} while (0) #define ice_free_vfs(pf) do {} while (0) @@ -140,6 +142,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf); #define ice_set_vf_state_qs_dis(vf) do {} while (0) #define ice_vf_lan_overflow_event(pf, event) do {} while (0) #define ice_print_vfs_mdd_events(pf) do {} while (0) +#define ice_print_vf_rx_mdd_event(vf) do {} while (0) static inline bool ice_reset_all_vfs(struct ice_pf __always_unused *pf, @@ -219,5 +222,10 @@ ice_get_vf_stats(struct net_device __always_unused *netdev, { return -EOPNOTSUPP; } + +static inline bool ice_is_any_vf_in_promisc(struct ice_pf __always_unused *pf) +{ + return false; +} #endif /* CONFIG_PCI_IOV */ #endif /* _ICE_VIRTCHNL_PF_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 23e5515d4527..b6f928c9e9c9 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -2,7 +2,7 @@ /* Copyright (c) 2019, Intel Corporation. */ #include <linux/bpf_trace.h> -#include <net/xdp_sock.h> +#include <net/xdp_sock_drv.h> #include <net/xdp.h> #include "ice.h" #include "ice_base.h" @@ -280,28 +280,6 @@ static int ice_xsk_alloc_umems(struct ice_vsi *vsi) } /** - * ice_xsk_add_umem - add a UMEM region for XDP sockets - * @vsi: VSI to which the UMEM will be added - * @umem: pointer to a requested UMEM region - * @qid: queue ID - * - * Returns 0 on success, negative on error - */ -static int ice_xsk_add_umem(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid) -{ - int err; - - err = ice_xsk_alloc_umems(vsi); - if (err) - return err; - - vsi->xsk_umems[qid] = umem; - vsi->num_xsk_umems_used++; - - return 0; -} - -/** * ice_xsk_remove_umem - Remove an UMEM for a certain ring/qid * @vsi: VSI from which the VSI will be removed * @qid: Ring/qid associated with the UMEM @@ -318,65 +296,6 @@ static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid) } } -/** - * ice_xsk_umem_dma_map - DMA map UMEM region for XDP sockets - * @vsi: VSI to map the UMEM region - * @umem: UMEM to map - * - * Returns 0 on success, negative on error - */ -static int ice_xsk_umem_dma_map(struct ice_vsi *vsi, struct xdp_umem *umem) -{ - struct ice_pf *pf = vsi->back; - struct device *dev; - unsigned int i; - - dev = ice_pf_to_dev(pf); - for (i = 0; i < umem->npgs; i++) { - dma_addr_t dma = dma_map_page_attrs(dev, umem->pgs[i], 0, - PAGE_SIZE, - DMA_BIDIRECTIONAL, - ICE_RX_DMA_ATTR); - if (dma_mapping_error(dev, dma)) { - dev_dbg(dev, "XSK UMEM DMA mapping error on page num %d\n", - i); - goto out_unmap; - } - - umem->pages[i].dma = dma; - } - - return 0; - -out_unmap: - for (; i > 0; i--) { - dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, - DMA_BIDIRECTIONAL, ICE_RX_DMA_ATTR); - umem->pages[i].dma = 0; - } - - return -EFAULT; -} - -/** - * ice_xsk_umem_dma_unmap - DMA unmap UMEM region for XDP sockets - * @vsi: VSI from which the UMEM will be unmapped - * @umem: UMEM to unmap - */ -static void ice_xsk_umem_dma_unmap(struct ice_vsi *vsi, struct xdp_umem *umem) -{ - struct ice_pf *pf = vsi->back; - struct device *dev; - unsigned int i; - - dev = ice_pf_to_dev(pf); - for (i = 0; i < umem->npgs; i++) { - dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, - DMA_BIDIRECTIONAL, ICE_RX_DMA_ATTR); - - umem->pages[i].dma = 0; - } -} /** * ice_xsk_umem_disable - disable a UMEM region @@ -391,7 +310,7 @@ static int ice_xsk_umem_disable(struct ice_vsi *vsi, u16 qid) !vsi->xsk_umems[qid]) return -EINVAL; - ice_xsk_umem_dma_unmap(vsi, vsi->xsk_umems[qid]); + xsk_buff_dma_unmap(vsi->xsk_umems[qid], ICE_RX_DMA_ATTR); ice_xsk_remove_umem(vsi, qid); return 0; @@ -408,7 +327,6 @@ static int ice_xsk_umem_disable(struct ice_vsi *vsi, u16 qid) static int ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid) { - struct xdp_umem_fq_reuse *reuseq; int err; if (vsi->type != ICE_VSI_PF) @@ -419,20 +337,18 @@ ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid) if (qid >= vsi->num_xsk_umems) return -EINVAL; + err = ice_xsk_alloc_umems(vsi); + if (err) + return err; + if (vsi->xsk_umems && vsi->xsk_umems[qid]) return -EBUSY; - reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count); - if (!reuseq) - return -ENOMEM; - - xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq)); - - err = ice_xsk_umem_dma_map(vsi, umem); - if (err) - return err; + vsi->xsk_umems[qid] = umem; + vsi->num_xsk_umems_used++; - err = ice_xsk_add_umem(vsi, umem, qid); + err = xsk_buff_dma_map(vsi->xsk_umems[qid], ice_pf_to_dev(vsi->back), + ICE_RX_DMA_ATTR); if (err) return err; @@ -484,137 +400,22 @@ xsk_umem_if_up: } /** - * ice_zca_free - Callback for MEM_TYPE_ZERO_COPY allocations - * @zca: zero-cpoy allocator - * @handle: Buffer handle - */ -void ice_zca_free(struct zero_copy_allocator *zca, unsigned long handle) -{ - struct ice_rx_buf *rx_buf; - struct ice_ring *rx_ring; - struct xdp_umem *umem; - u64 hr, mask; - u16 nta; - - rx_ring = container_of(zca, struct ice_ring, zca); - umem = rx_ring->xsk_umem; - hr = umem->headroom + XDP_PACKET_HEADROOM; - - mask = umem->chunk_mask; - - nta = rx_ring->next_to_alloc; - rx_buf = &rx_ring->rx_buf[nta]; - - nta++; - rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - - handle &= mask; - - rx_buf->dma = xdp_umem_get_dma(umem, handle); - rx_buf->dma += hr; - - rx_buf->addr = xdp_umem_get_data(umem, handle); - rx_buf->addr += hr; - - rx_buf->handle = (u64)handle + umem->headroom; -} - -/** - * ice_alloc_buf_fast_zc - Retrieve buffer address from XDP umem - * @rx_ring: ring with an xdp_umem bound to it - * @rx_buf: buffer to which xsk page address will be assigned - * - * This function allocates an Rx buffer in the hot path. - * The buffer can come from fill queue or recycle queue. - * - * Returns true if an assignment was successful, false if not. - */ -static __always_inline bool -ice_alloc_buf_fast_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) -{ - struct xdp_umem *umem = rx_ring->xsk_umem; - void *addr = rx_buf->addr; - u64 handle, hr; - - if (addr) { - rx_ring->rx_stats.page_reuse_count++; - return true; - } - - if (!xsk_umem_peek_addr(umem, &handle)) { - rx_ring->rx_stats.alloc_page_failed++; - return false; - } - - hr = umem->headroom + XDP_PACKET_HEADROOM; - - rx_buf->dma = xdp_umem_get_dma(umem, handle); - rx_buf->dma += hr; - - rx_buf->addr = xdp_umem_get_data(umem, handle); - rx_buf->addr += hr; - - rx_buf->handle = handle + umem->headroom; - - xsk_umem_release_addr(umem); - return true; -} - -/** - * ice_alloc_buf_slow_zc - Retrieve buffer address from XDP umem - * @rx_ring: ring with an xdp_umem bound to it - * @rx_buf: buffer to which xsk page address will be assigned - * - * This function allocates an Rx buffer in the slow path. - * The buffer can come from fill queue or recycle queue. - * - * Returns true if an assignment was successful, false if not. - */ -static __always_inline bool -ice_alloc_buf_slow_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) -{ - struct xdp_umem *umem = rx_ring->xsk_umem; - u64 handle, headroom; - - if (!xsk_umem_peek_addr_rq(umem, &handle)) { - rx_ring->rx_stats.alloc_page_failed++; - return false; - } - - handle &= umem->chunk_mask; - headroom = umem->headroom + XDP_PACKET_HEADROOM; - - rx_buf->dma = xdp_umem_get_dma(umem, handle); - rx_buf->dma += headroom; - - rx_buf->addr = xdp_umem_get_data(umem, handle); - rx_buf->addr += headroom; - - rx_buf->handle = handle + umem->headroom; - - xsk_umem_release_addr_rq(umem); - return true; -} - -/** * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers * @rx_ring: Rx ring * @count: The number of buffers to allocate - * @alloc: the function pointer to call for allocation * * This function allocates a number of Rx buffers from the fill ring * or the internal recycle mechanism and places them on the Rx ring. * * Returns false if all allocations were successful, true if any fail. */ -static bool -ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count, - bool (*alloc)(struct ice_ring *, struct ice_rx_buf *)) +bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) { union ice_32b_rx_flex_desc *rx_desc; u16 ntu = rx_ring->next_to_use; struct ice_rx_buf *rx_buf; bool ret = false; + dma_addr_t dma; if (!count) return false; @@ -623,16 +424,14 @@ ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count, rx_buf = &rx_ring->rx_buf[ntu]; do { - if (!alloc(rx_ring, rx_buf)) { + rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_umem); + if (!rx_buf->xdp) { ret = true; break; } - dma_sync_single_range_for_device(rx_ring->dev, rx_buf->dma, 0, - rx_ring->rx_buf_len, - DMA_BIDIRECTIONAL); - - rx_desc->read.pkt_addr = cpu_to_le64(rx_buf->dma); + dma = xsk_buff_xdp_get_dma(rx_buf->xdp); + rx_desc->read.pkt_addr = cpu_to_le64(dma); rx_desc->wb.status_error0 = 0; rx_desc++; @@ -653,32 +452,6 @@ ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count, } /** - * ice_alloc_rx_bufs_fast_zc - allocate zero copy bufs in the hot path - * @rx_ring: Rx ring - * @count: number of bufs to allocate - * - * Returns false on success, true on failure. - */ -static bool ice_alloc_rx_bufs_fast_zc(struct ice_ring *rx_ring, u16 count) -{ - return ice_alloc_rx_bufs_zc(rx_ring, count, - ice_alloc_buf_fast_zc); -} - -/** - * ice_alloc_rx_bufs_slow_zc - allocate zero copy bufs in the slow path - * @rx_ring: Rx ring - * @count: number of bufs to allocate - * - * Returns false on success, true on failure. - */ -bool ice_alloc_rx_bufs_slow_zc(struct ice_ring *rx_ring, u16 count) -{ - return ice_alloc_rx_bufs_zc(rx_ring, count, - ice_alloc_buf_slow_zc); -} - -/** * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring * @rx_ring: Rx ring */ @@ -692,76 +465,21 @@ static void ice_bump_ntc(struct ice_ring *rx_ring) } /** - * ice_get_rx_buf_zc - Fetch the current Rx buffer - * @rx_ring: Rx ring - * @size: size of a buffer - * - * This function returns the current, received Rx buffer and does - * DMA synchronization. - * - * Returns a pointer to the received Rx buffer. - */ -static struct ice_rx_buf *ice_get_rx_buf_zc(struct ice_ring *rx_ring, int size) -{ - struct ice_rx_buf *rx_buf; - - rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; - - dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 0, - size, DMA_BIDIRECTIONAL); - - return rx_buf; -} - -/** - * ice_reuse_rx_buf_zc - reuse an Rx buffer - * @rx_ring: Rx ring - * @old_buf: The buffer to recycle - * - * This function recycles a finished Rx buffer, and places it on the recycle - * queue (next_to_alloc). - */ -static void -ice_reuse_rx_buf_zc(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) -{ - unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask; - u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; - u16 nta = rx_ring->next_to_alloc; - struct ice_rx_buf *new_buf; - - new_buf = &rx_ring->rx_buf[nta++]; - rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - - new_buf->dma = old_buf->dma & mask; - new_buf->dma += hr; - - new_buf->addr = (void *)((unsigned long)old_buf->addr & mask); - new_buf->addr += hr; - - new_buf->handle = old_buf->handle & mask; - new_buf->handle += rx_ring->xsk_umem->headroom; - - old_buf->addr = NULL; -} - -/** * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer * @rx_ring: Rx ring * @rx_buf: zero-copy Rx buffer - * @xdp: XDP buffer * * This function allocates a new skb from a zero-copy Rx buffer. * * Returns the skb on success, NULL on failure. */ static struct sk_buff * -ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, - struct xdp_buff *xdp) +ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) { - unsigned int metasize = xdp->data - xdp->data_meta; - unsigned int datasize = xdp->data_end - xdp->data; - unsigned int datasize_hard = xdp->data_end - - xdp->data_hard_start; + unsigned int metasize = rx_buf->xdp->data - rx_buf->xdp->data_meta; + unsigned int datasize = rx_buf->xdp->data_end - rx_buf->xdp->data; + unsigned int datasize_hard = rx_buf->xdp->data_end - + rx_buf->xdp->data_hard_start; struct sk_buff *skb; skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard, @@ -769,13 +487,13 @@ ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, if (unlikely(!skb)) return NULL; - skb_reserve(skb, xdp->data - xdp->data_hard_start); - memcpy(__skb_put(skb, datasize), xdp->data, datasize); + skb_reserve(skb, rx_buf->xdp->data - rx_buf->xdp->data_hard_start); + memcpy(__skb_put(skb, datasize), rx_buf->xdp->data, datasize); if (metasize) skb_metadata_set(skb, metasize); - ice_reuse_rx_buf_zc(rx_ring, rx_buf); - + xsk_buff_free(rx_buf->xdp); + rx_buf->xdp = NULL; return skb; } @@ -802,7 +520,6 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp) } act = bpf_prog_run_xdp(xdp_prog, xdp); - xdp->handle += xdp->data - xdp->data_hard_start; switch (act) { case XDP_PASS: break; @@ -840,13 +557,8 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); - struct xdp_umem *umem = rx_ring->xsk_umem; unsigned int xdp_xmit = 0; bool failure = false; - struct xdp_buff xdp; - - xdp.rxq = &rx_ring->xdp_rxq; - xdp.frame_sz = xsk_umem_xdp_frame_sz(umem); while (likely(total_rx_packets < (unsigned int)budget)) { union ice_32b_rx_flex_desc *rx_desc; @@ -858,8 +570,8 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) u8 rx_ptype; if (cleaned_count >= ICE_RX_BUF_WRITE) { - failure |= ice_alloc_rx_bufs_fast_zc(rx_ring, - cleaned_count); + failure |= ice_alloc_rx_bufs_zc(rx_ring, + cleaned_count); cleaned_count = 0; } @@ -880,25 +592,19 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) if (!size) break; - rx_buf = ice_get_rx_buf_zc(rx_ring, size); - if (!rx_buf->addr) - break; - xdp.data = rx_buf->addr; - xdp.data_meta = xdp.data; - xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; - xdp.data_end = xdp.data + size; - xdp.handle = rx_buf->handle; + rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; + rx_buf->xdp->data_end = rx_buf->xdp->data + size; + xsk_buff_dma_sync_for_cpu(rx_buf->xdp); - xdp_res = ice_run_xdp_zc(rx_ring, &xdp); + xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp); if (xdp_res) { - if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) { + if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) xdp_xmit |= xdp_res; - rx_buf->addr = NULL; - } else { - ice_reuse_rx_buf_zc(rx_ring, rx_buf); - } + else + xsk_buff_free(rx_buf->xdp); + rx_buf->xdp = NULL; total_rx_bytes += size; total_rx_packets++; cleaned_count++; @@ -908,7 +614,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) } /* XDP_PASS path */ - skb = ice_construct_skb_zc(rx_ring, rx_buf, &xdp); + skb = ice_construct_skb_zc(rx_ring, rx_buf); if (!skb) { rx_ring->rx_stats.alloc_buf_failed++; break; @@ -979,17 +685,16 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget) if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc)) break; - dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr); - - dma_sync_single_for_device(xdp_ring->dev, dma, desc.len, - DMA_BIDIRECTIONAL); + dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr); + xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma, + desc.len); tx_buf->bytecount = desc.len; tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use); tx_desc->buf_addr = cpu_to_le64(dma); - tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD, - 0, desc.len, 0); + tx_desc->cmd_type_offset_bsz = + ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, desc.len, 0); xdp_ring->next_to_use++; if (xdp_ring->next_to_use == xdp_ring->count) @@ -1165,11 +870,10 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring) for (i = 0; i < rx_ring->count; i++) { struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; - if (!rx_buf->addr) + if (!rx_buf->xdp) continue; - xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_buf->handle); - rx_buf->addr = NULL; + rx_buf->xdp = NULL; } } diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h index 8a4ba7c6d549..fc1a06b4df36 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.h +++ b/drivers/net/ethernet/intel/ice/ice_xsk.h @@ -10,11 +10,10 @@ struct ice_vsi; #ifdef CONFIG_XDP_SOCKETS int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid); -void ice_zca_free(struct zero_copy_allocator *zca, unsigned long handle); int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget); bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget); int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags); -bool ice_alloc_rx_bufs_slow_zc(struct ice_ring *rx_ring, u16 count); +bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count); bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi); void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring); void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring); @@ -27,12 +26,6 @@ ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi, return -EOPNOTSUPP; } -static inline void -ice_zca_free(struct zero_copy_allocator __always_unused *zca, - unsigned long __always_unused handle) -{ -} - static inline int ice_clean_rx_irq_zc(struct ice_ring __always_unused *rx_ring, int __always_unused budget) @@ -48,8 +41,8 @@ ice_clean_tx_irq_zc(struct ice_ring __always_unused *xdp_ring, } static inline bool -ice_alloc_rx_bufs_slow_zc(struct ice_ring __always_unused *rx_ring, - u16 __always_unused count) +ice_alloc_rx_bufs_zc(struct ice_ring __always_unused *rx_ring, + u16 __always_unused count) { return false; } diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index 79ee0a747260..3254737c07a3 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -12,7 +12,7 @@ #include "igb.h" static s32 igb_set_default_fc(struct e1000_hw *hw); -static s32 igb_set_fc_watermarks(struct e1000_hw *hw); +static void igb_set_fc_watermarks(struct e1000_hw *hw); /** * igb_get_bus_info_pcie - Get PCIe bus information @@ -687,7 +687,7 @@ s32 igb_setup_link(struct e1000_hw *hw) wr32(E1000_FCTTV, hw->fc.pause_time); - ret_val = igb_set_fc_watermarks(hw); + igb_set_fc_watermarks(hw); out: @@ -723,9 +723,8 @@ void igb_config_collision_dist(struct e1000_hw *hw) * flow control XON frame transmission is enabled, then set XON frame * tansmission as well. **/ -static s32 igb_set_fc_watermarks(struct e1000_hw *hw) +static void igb_set_fc_watermarks(struct e1000_hw *hw) { - s32 ret_val = 0; u32 fcrtl = 0, fcrth = 0; /* Set the flow control receive threshold registers. Normally, @@ -747,8 +746,6 @@ static s32 igb_set_fc_watermarks(struct e1000_hw *hw) } wr32(E1000_FCRTL, fcrtl); wr32(E1000_FCRTH, fcrth); - - return ret_val; } /** diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 39d3b76a6f5d..2cd003c5ad43 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -143,7 +143,8 @@ static int igb_get_link_ksettings(struct net_device *netdev, u32 speed; u32 supported, advertising; - status = rd32(E1000_STATUS); + status = pm_runtime_suspended(&adapter->pdev->dev) ? + 0 : rd32(E1000_STATUS); if (hw->phy.media_type == e1000_media_type_copper) { supported = (SUPPORTED_10baseT_Half | diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile index 3652f211f351..1c3051db9085 100644 --- a/drivers/net/ethernet/intel/igc/Makefile +++ b/drivers/net/ethernet/intel/igc/Makefile @@ -8,4 +8,4 @@ obj-$(CONFIG_IGC) += igc.o igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \ -igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o +igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 8ddc39482a8e..5dbc5a156626 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -16,8 +16,7 @@ #include "igc_hw.h" -/* forward declaration */ -void igc_set_ethtool_ops(struct net_device *); +void igc_ethtool_set_ops(struct net_device *); /* Transmit and receive queues */ #define IGC_MAX_RX_QUEUES 4 @@ -26,9 +25,14 @@ void igc_set_ethtool_ops(struct net_device *); #define MAX_Q_VECTORS 8 #define MAX_STD_JUMBO_FRAME_SIZE 9216 -#define MAX_ETYPE_FILTER (4 - 1) +#define MAX_ETYPE_FILTER 8 #define IGC_RETA_SIZE 128 +enum igc_mac_filter_type { + IGC_MAC_FILTER_TYPE_DST = 0, + IGC_MAC_FILTER_TYPE_SRC +}; + struct igc_tx_queue_stats { u64 packets; u64 bytes; @@ -183,21 +187,20 @@ struct igc_adapter { u32 rss_queues; u32 rss_indir_tbl_init; - /* RX network flow classification support */ - struct hlist_head nfc_filter_list; - unsigned int nfc_filter_count; - - /* lock for RX network flow classification filter */ - spinlock_t nfc_lock; - bool etype_bitmap[MAX_ETYPE_FILTER]; - - struct igc_mac_addr *mac_table; + /* Any access to elements in nfc_rule_list is protected by the + * nfc_rule_lock. + */ + struct mutex nfc_rule_lock; + struct list_head nfc_rule_list; + unsigned int nfc_rule_count; u8 rss_indir_tbl[IGC_RETA_SIZE]; unsigned long link_check_timeout; struct igc_info ei; + u32 test_icr; + struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_caps; struct work_struct ptp_tx_work; @@ -215,6 +218,8 @@ struct igc_adapter { void igc_up(struct igc_adapter *adapter); void igc_down(struct igc_adapter *adapter); +int igc_open(struct net_device *netdev); +int igc_close(struct net_device *netdev); int igc_setup_tx_resources(struct igc_ring *ring); int igc_setup_rx_resources(struct igc_ring *ring); void igc_free_tx_resources(struct igc_ring *ring); @@ -227,10 +232,6 @@ void igc_write_rss_indir_tbl(struct igc_adapter *adapter); bool igc_has_link(struct igc_adapter *adapter); void igc_reset(struct igc_adapter *adapter); int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx); -int igc_add_mac_filter(struct igc_adapter *adapter, const u8 *addr, - const s8 queue, const u8 flags); -int igc_del_mac_filter(struct igc_adapter *adapter, const u8 *addr, - const u8 flags); void igc_update_stats(struct igc_adapter *adapter); /* igc_dump declarations */ @@ -441,40 +442,25 @@ enum igc_filter_match_flags { IGC_FILTER_FLAG_DST_MAC_ADDR = 0x8, }; -/* RX network flow classification data structure */ -struct igc_nfc_input { - /* Byte layout in order, all values with MSB first: - * match_flags - 1 byte - * etype - 2 bytes - * vlan_tci - 2 bytes - */ +struct igc_nfc_filter { u8 match_flags; - __be16 etype; - __be16 vlan_tci; + u16 etype; + u16 vlan_tci; u8 src_addr[ETH_ALEN]; u8 dst_addr[ETH_ALEN]; }; -struct igc_nfc_filter { - struct hlist_node nfc_node; - struct igc_nfc_input filter; - unsigned long cookie; - u16 etype_reg_index; - u16 sw_idx; +struct igc_nfc_rule { + struct list_head list; + struct igc_nfc_filter filter; + u32 location; u16 action; }; -struct igc_mac_addr { - u8 addr[ETH_ALEN]; - s8 queue; - u8 state; /* bitmask */ -}; - -#define IGC_MAC_STATE_DEFAULT 0x1 -#define IGC_MAC_STATE_IN_USE 0x2 -#define IGC_MAC_STATE_SRC_ADDR 0x4 - -#define IGC_MAX_RXNFC_FILTERS 16 +/* IGC supports a total of 32 NFC rules: 16 MAC address based,, 8 VLAN priority + * based, and 8 ethertype based. + */ +#define IGC_MAX_RXNFC_RULES 32 /* igc_desc_unused - calculate if we have unused descriptors */ static inline u16 igc_desc_unused(const struct igc_ring *ring) @@ -550,12 +536,11 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data) return 0; } -/* forward declaration */ void igc_reinit_locked(struct igc_adapter *); -int igc_add_filter(struct igc_adapter *adapter, - struct igc_nfc_filter *input); -int igc_erase_filter(struct igc_adapter *adapter, - struct igc_nfc_filter *input); +struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, + u32 location); +int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule); +void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule); void igc_ptp_init(struct igc_adapter *adapter); void igc_ptp_reset(struct igc_adapter *adapter); diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index f7fb18d8d8f5..cc5a6cf531c7 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -26,7 +26,7 @@ static s32 igc_reset_hw_base(struct igc_hw *hw) */ ret_val = igc_disable_pcie_master(hw); if (ret_val) - hw_dbg("PCI-E Master disable polling has failed.\n"); + hw_dbg("PCI-E Master disable polling has failed\n"); hw_dbg("Masking off all interrupts\n"); wr32(IGC_IMC, 0xffffffff); @@ -177,7 +177,7 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw) */ ret_val = hw->phy.ops.reset(hw); if (ret_val) { - hw_dbg("Error resetting the PHY.\n"); + hw_dbg("Error resetting the PHY\n"); goto out; } @@ -367,7 +367,7 @@ void igc_rx_fifo_flush_base(struct igc_hw *hw) } if (ms_wait == 10) - pr_debug("Queue disable timed out after 10ms\n"); + hw_dbg("Queue disable timed out after 10ms\n"); /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all * incoming packets are rejected. Set enable and wait 2ms so that diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index af0c03d77a39..186deb1d9375 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -47,7 +47,6 @@ /* Loop limit on how long we wait for auto-negotiation to complete */ #define COPPER_LINK_UP_LIMIT 10 #define PHY_AUTO_NEG_LIMIT 45 -#define PHY_FORCE_LIMIT 20 /* Number of 100 microseconds we wait for PCI Express master disable */ #define MASTER_DISABLE_TIMEOUT 800 @@ -63,6 +62,9 @@ * (RAR[15]) for our directed address used by controllers with * manageability enabled, allowing us room for 15 multicast addresses. */ +#define IGC_RAH_RAH_MASK 0x0000FFFF +#define IGC_RAH_ASEL_MASK 0x00030000 +#define IGC_RAH_ASEL_SRC_ADDR BIT(16) #define IGC_RAH_QSEL_MASK 0x000C0000 #define IGC_RAH_QSEL_SHIFT 18 #define IGC_RAH_QSEL_ENABLE BIT(28) @@ -164,11 +166,6 @@ /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ #define NVM_SUM 0xBABA - -#define NVM_PBA_OFFSET_0 8 -#define NVM_PBA_OFFSET_1 9 -#define NVM_RESERVED_WORD 0xFFFF -#define NVM_PBA_PTR_GUARD 0xFAFA #define NVM_WORD_SIZE_BASE_SHIFT 6 /* Collision related configuration parameters */ @@ -250,7 +247,6 @@ /* Interrupt Cause Set */ #define IGC_ICS_LSC IGC_ICR_LSC /* Link Status Change */ #define IGC_ICS_RXDMT0 IGC_ICR_RXDMT0 /* rx desc min. threshold */ -#define IGC_ICS_DRSTA IGC_ICR_DRSTA /* Device Reset Aserted */ #define IGC_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ #define IGC_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ @@ -269,21 +265,13 @@ #define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ #define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ #define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */ -#define IGC_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ #define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */ -#define IGC_TXD_CMD_RS 0x08000000 /* Report Status */ -#define IGC_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ #define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ #define IGC_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ -#define IGC_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ #define IGC_TXD_STAT_DD 0x00000001 /* Descriptor Done */ -#define IGC_TXD_STAT_EC 0x00000002 /* Excess Collisions */ -#define IGC_TXD_STAT_LC 0x00000004 /* Late Collisions */ -#define IGC_TXD_STAT_TU 0x00000008 /* Transmit underrun */ #define IGC_TXD_CMD_TCP 0x01000000 /* TCP packet */ #define IGC_TXD_CMD_IP 0x02000000 /* IP packet */ #define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ -#define IGC_TXD_STAT_TC 0x00000004 /* Tx Underrun */ #define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ /* IPSec Encrypt Enable */ @@ -390,9 +378,6 @@ #define IGC_TSICR_INTERRUPTS IGC_TSICR_TXTS -/* PTP Queue Filter */ -#define IGC_ETQF_1588 BIT(30) - #define IGC_FTQF_VF_BP 0x00008000 #define IGC_FTQF_1588_TIME_STAMP 0x08000000 #define IGC_FTQF_MASK 0xF0000000 @@ -514,9 +499,9 @@ #define IGC_MAX_MAC_HDR_LEN 127 #define IGC_MAX_NETWORK_HDR_LEN 511 -#define IGC_VLAPQF_QUEUE_SEL(_n, q_idx) ((q_idx) << ((_n) * 4)) -#define IGC_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4)) -#define IGC_VLAPQF_QUEUE_MASK 0x03 +#define IGC_VLANPQF_QSEL(_n, q_idx) ((q_idx) << ((_n) * 4)) +#define IGC_VLANPQF_VALID(_n) (0x1 << (3 + (_n) * 4)) +#define IGC_VLANPQF_QUEUE_MASK 0x03 #define IGC_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ #define IGC_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type:1=IPv4 */ diff --git a/drivers/net/ethernet/intel/igc/igc_diag.c b/drivers/net/ethernet/intel/igc/igc_diag.c new file mode 100644 index 000000000000..cc621970c0cd --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc_diag.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Intel Corporation */ + +#include "igc.h" +#include "igc_diag.h" + +static struct igc_reg_test reg_test[] = { + { IGC_FCAL, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_FCAH, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { IGC_FCT, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { IGC_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IGC_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IGC_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_FCRTH, 1, PATTERN_TEST, 0x0003FFF0, 0x0003FFF0 }, + { IGC_FCTTV, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_TIPG, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { IGC_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IGC_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IGC_TDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_RCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { IGC_RCTL, 1, SET_READ_TEST, 0x04CFB2FE, 0x003FFFFB }, + { IGC_RCTL, 1, SET_READ_TEST, 0x04CFB2FE, 0xFFFFFFFF }, + { IGC_TCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { IGC_RA, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_RA, 16, TABLE64_TEST_HI, + 0x900FFFFF, 0xFFFFFFFF }, + { IGC_MTA, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0} +}; + +static bool reg_pattern_test(struct igc_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct igc_hw *hw = &adapter->hw; + u32 pat, val, before; + static const u32 test_pattern[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = rd32(reg); + wr32(reg, test_pattern[pat] & write); + val = rd32(reg); + if (val != (test_pattern[pat] & write & mask)) { + netdev_err(adapter->netdev, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X", + reg, val, test_pattern[pat] & write & mask); + *data = reg; + wr32(reg, before); + return false; + } + wr32(reg, before); + } + return true; +} + +static bool reg_set_and_check(struct igc_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct igc_hw *hw = &adapter->hw; + u32 val, before; + + before = rd32(reg); + wr32(reg, write & mask); + val = rd32(reg); + if ((write & mask) != (val & mask)) { + netdev_err(adapter->netdev, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X", + reg, (val & mask), (write & mask)); + *data = reg; + wr32(reg, before); + return false; + } + wr32(reg, before); + return true; +} + +bool igc_reg_test(struct igc_adapter *adapter, u64 *data) +{ + struct igc_reg_test *test = reg_test; + struct igc_hw *hw = &adapter->hw; + u32 value, before, after; + u32 i, toggle, b = false; + + /* Because the status register is such a special case, + * we handle it separately from the rest of the register + * tests. Some bits are read-only, some toggle, and some + * are writeable. + */ + toggle = 0x6800D3; + before = rd32(IGC_STATUS); + value = before & toggle; + wr32(IGC_STATUS, toggle); + after = rd32(IGC_STATUS) & toggle; + if (value != after) { + netdev_err(adapter->netdev, + "failed STATUS register test got: 0x%08X expected: 0x%08X", + after, value); + *data = 1; + return false; + } + /* restore previous status */ + wr32(IGC_STATUS, before); + + /* Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + test->reg + 4 + (i * 8), + test->mask, + test->write); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, + test->write); + break; + } + if (!b) + return false; + } + test++; + } + *data = 0; + return true; +} + +bool igc_eeprom_test(struct igc_adapter *adapter, u64 *data) +{ + struct igc_hw *hw = &adapter->hw; + + *data = 0; + + if (hw->nvm.ops.validate(hw) != IGC_SUCCESS) { + *data = 1; + return false; + } + + return true; +} + +bool igc_link_test(struct igc_adapter *adapter, u64 *data) +{ + bool link_up; + + *data = 0; + + /* add delay to give enough time for autonegotioation to finish */ + if (adapter->hw.mac.autoneg) + ssleep(5); + + link_up = igc_has_link(adapter); + if (!link_up) { + *data = 1; + return false; + } + + return true; +} diff --git a/drivers/net/ethernet/intel/igc/igc_diag.h b/drivers/net/ethernet/intel/igc/igc_diag.h new file mode 100644 index 000000000000..600658e33bec --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc_diag.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 Intel Corporation */ + +bool igc_reg_test(struct igc_adapter *adapter, u64 *data); +bool igc_eeprom_test(struct igc_adapter *adapter, u64 *data); +bool igc_link_test(struct igc_adapter *adapter, u64 *data); + +struct igc_reg_test { + u16 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define TABLE32_TEST 3 +#define TABLE64_TEST_LO 4 +#define TABLE64_TEST_HI 5 diff --git a/drivers/net/ethernet/intel/igc/igc_dump.c b/drivers/net/ethernet/intel/igc/igc_dump.c index 657ab50ae296..4b9ec7d0b727 100644 --- a/drivers/net/ethernet/intel/igc/igc_dump.c +++ b/drivers/net/ethernet/intel/igc/igc_dump.c @@ -35,10 +35,6 @@ static const struct igc_reg_info igc_reg_info_tbl[] = { {IGC_TDH(0), "TDH"}, {IGC_TDT(0), "TDT"}, {IGC_TXDCTL(0), "TXDCTL"}, - {IGC_TDFH, "TDFH"}, - {IGC_TDFT, "TDFT"}, - {IGC_TDFHS, "TDFHS"}, - {IGC_TDFPC, "TDFPC"}, /* List Terminator */ {} @@ -47,6 +43,7 @@ static const struct igc_reg_info igc_reg_info_tbl[] = { /* igc_regdump - register printout routine */ static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo) { + struct net_device *dev = igc_get_hw_dev(hw); int n = 0; char rname[16]; u32 regs[8]; @@ -101,13 +98,14 @@ static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo) regs[n] = rd32(IGC_TXDCTL(n)); break; default: - pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs)); + netdev_info(dev, "%-15s %08x\n", reginfo->name, + rd32(reginfo->ofs)); return; } snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); - pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], - regs[2], regs[3]); + netdev_info(dev, "%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], + regs[2], regs[3]); } /* igc_rings_dump - Tx-rings and Rx-rings */ @@ -125,39 +123,34 @@ void igc_rings_dump(struct igc_adapter *adapter) if (!netif_msg_hw(adapter)) return; - /* Print netdevice Info */ - if (netdev) { - dev_info(&adapter->pdev->dev, "Net device Info\n"); - pr_info("Device Name state trans_start\n"); - pr_info("%-15s %016lX %016lX\n", netdev->name, - netdev->state, dev_trans_start(netdev)); - } + netdev_info(netdev, "Device info: state %016lX trans_start %016lX\n", + netdev->state, dev_trans_start(netdev)); /* Print TX Ring Summary */ - if (!netdev || !netif_running(netdev)) + if (!netif_running(netdev)) goto exit; - dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); - pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + netdev_info(netdev, "TX Rings Summary\n"); + netdev_info(netdev, "Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); for (n = 0; n < adapter->num_tx_queues; n++) { struct igc_tx_buffer *buffer_info; tx_ring = adapter->tx_ring[n]; buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; - pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", - n, tx_ring->next_to_use, tx_ring->next_to_clean, - (u64)dma_unmap_addr(buffer_info, dma), - dma_unmap_len(buffer_info, len), - buffer_info->next_to_watch, - (u64)buffer_info->time_stamp); + netdev_info(netdev, "%5d %5X %5X %016llX %04X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp); } /* Print TX Rings */ if (!netif_msg_tx_done(adapter)) goto rx_ring_summary; - dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); + netdev_info(netdev, "TX Rings Dump\n"); /* Transmit Descriptor Formats * @@ -172,10 +165,11 @@ void igc_rings_dump(struct igc_adapter *adapter) for (n = 0; n < adapter->num_tx_queues; n++) { tx_ring = adapter->tx_ring[n]; - pr_info("------------------------------------\n"); - pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); - pr_info("------------------------------------\n"); - pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n"); + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "TX QUEUE INDEX = %d\n", + tx_ring->queue_index); + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n"); for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { const char *next_desc; @@ -194,14 +188,14 @@ void igc_rings_dump(struct igc_adapter *adapter) else next_desc = ""; - pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n", - i, le64_to_cpu(u0->a), - le64_to_cpu(u0->b), - (u64)dma_unmap_addr(buffer_info, dma), - dma_unmap_len(buffer_info, len), - buffer_info->next_to_watch, - (u64)buffer_info->time_stamp, - buffer_info->skb, next_desc); + netdev_info(netdev, "T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n", + i, le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, + buffer_info->skb, next_desc); if (netif_msg_pktdata(adapter) && buffer_info->skb) print_hex_dump(KERN_INFO, "", @@ -214,19 +208,19 @@ void igc_rings_dump(struct igc_adapter *adapter) /* Print RX Rings Summary */ rx_ring_summary: - dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); - pr_info("Queue [NTU] [NTC]\n"); + netdev_info(netdev, "RX Rings Summary\n"); + netdev_info(netdev, "Queue [NTU] [NTC]\n"); for (n = 0; n < adapter->num_rx_queues; n++) { rx_ring = adapter->rx_ring[n]; - pr_info(" %5d %5X %5X\n", - n, rx_ring->next_to_use, rx_ring->next_to_clean); + netdev_info(netdev, "%5d %5X %5X\n", n, rx_ring->next_to_use, + rx_ring->next_to_clean); } /* Print RX Rings */ if (!netif_msg_rx_status(adapter)) goto exit; - dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); + netdev_info(netdev, "RX Rings Dump\n"); /* Advanced Receive Descriptor (Read) Format * 63 1 0 @@ -251,11 +245,12 @@ rx_ring_summary: for (n = 0; n < adapter->num_rx_queues; n++) { rx_ring = adapter->rx_ring[n]; - pr_info("------------------------------------\n"); - pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); - pr_info("------------------------------------\n"); - pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); - pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n"); + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "RX QUEUE INDEX = %d\n", + rx_ring->queue_index); + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); + netdev_info(netdev, "RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n"); for (i = 0; i < rx_ring->count; i++) { const char *next_desc; @@ -275,18 +270,18 @@ rx_ring_summary: if (staterr & IGC_RXD_STAT_DD) { /* Descriptor Done */ - pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n", - "RWB", i, - le64_to_cpu(u0->a), - le64_to_cpu(u0->b), - next_desc); + netdev_info(netdev, "%s[0x%03X] %016llX %016llX ---------------- %s\n", + "RWB", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + next_desc); } else { - pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n", - "R ", i, - le64_to_cpu(u0->a), - le64_to_cpu(u0->b), - (u64)buffer_info->dma, - next_desc); + netdev_info(netdev, "%s[0x%03X] %016llX %016llX %016llX %s\n", + "R ", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)buffer_info->dma, + next_desc); if (netif_msg_pktdata(adapter) && buffer_info->dma && buffer_info->page) { @@ -314,8 +309,8 @@ void igc_regs_dump(struct igc_adapter *adapter) struct igc_reg_info *reginfo; /* Print Registers */ - dev_info(&adapter->pdev->dev, "Register Dump\n"); - pr_info(" Register Name Value\n"); + netdev_info(adapter->netdev, "Register Dump\n"); + netdev_info(adapter->netdev, "Register Name Value\n"); for (reginfo = (struct igc_reg_info *)igc_reg_info_tbl; reginfo->name; reginfo++) { igc_regdump(hw, reginfo); diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 0a8c4a7412a4..a938ec8db681 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -6,6 +6,7 @@ #include <linux/pm_runtime.h> #include "igc.h" +#include "igc_diag.h" /* forward declaration */ struct igc_stats { @@ -123,8 +124,8 @@ static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = { #define IGC_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igc_priv_flags_strings) -static void igc_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) +static void igc_ethtool_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) { struct igc_adapter *adapter = netdev_priv(netdev); @@ -138,13 +139,13 @@ static void igc_get_drvinfo(struct net_device *netdev, drvinfo->n_priv_flags = IGC_PRIV_FLAGS_STR_LEN; } -static int igc_get_regs_len(struct net_device *netdev) +static int igc_ethtool_get_regs_len(struct net_device *netdev) { return IGC_REGS_LEN * sizeof(u32); } -static void igc_get_regs(struct net_device *netdev, - struct ethtool_regs *regs, void *p) +static void igc_ethtool_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) { struct igc_adapter *adapter = netdev_priv(netdev); struct igc_hw *hw = &adapter->hw; @@ -315,9 +316,15 @@ static void igc_get_regs(struct net_device *netdev, regs_buff[172 + i] = rd32(IGC_RAL(i)); for (i = 0; i < 16; i++) regs_buff[188 + i] = rd32(IGC_RAH(i)); + + regs_buff[204] = rd32(IGC_VLANPQF); + + for (i = 0; i < 8; i++) + regs_buff[205 + i] = rd32(IGC_ETQF(i)); } -static void igc_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +static void igc_ethtool_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) { struct igc_adapter *adapter = netdev_priv(netdev); @@ -348,7 +355,8 @@ static void igc_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) wol->wolopts |= WAKE_PHY; } -static int igc_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +static int igc_ethtool_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) { struct igc_adapter *adapter = netdev_priv(netdev); @@ -376,21 +384,21 @@ static int igc_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) return 0; } -static u32 igc_get_msglevel(struct net_device *netdev) +static u32 igc_ethtool_get_msglevel(struct net_device *netdev) { struct igc_adapter *adapter = netdev_priv(netdev); return adapter->msg_enable; } -static void igc_set_msglevel(struct net_device *netdev, u32 data) +static void igc_ethtool_set_msglevel(struct net_device *netdev, u32 data) { struct igc_adapter *adapter = netdev_priv(netdev); adapter->msg_enable = data; } -static int igc_nway_reset(struct net_device *netdev) +static int igc_ethtool_nway_reset(struct net_device *netdev) { struct igc_adapter *adapter = netdev_priv(netdev); @@ -399,7 +407,7 @@ static int igc_nway_reset(struct net_device *netdev) return 0; } -static u32 igc_get_link(struct net_device *netdev) +static u32 igc_ethtool_get_link(struct net_device *netdev) { struct igc_adapter *adapter = netdev_priv(netdev); struct igc_mac_info *mac = &adapter->hw.mac; @@ -416,15 +424,15 @@ static u32 igc_get_link(struct net_device *netdev) return igc_has_link(adapter); } -static int igc_get_eeprom_len(struct net_device *netdev) +static int igc_ethtool_get_eeprom_len(struct net_device *netdev) { struct igc_adapter *adapter = netdev_priv(netdev); return adapter->hw.nvm.word_size * 2; } -static int igc_get_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) +static int igc_ethtool_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) { struct igc_adapter *adapter = netdev_priv(netdev); struct igc_hw *hw = &adapter->hw; @@ -470,8 +478,8 @@ static int igc_get_eeprom(struct net_device *netdev, return ret_val; } -static int igc_set_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) +static int igc_ethtool_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) { struct igc_adapter *adapter = netdev_priv(netdev); struct igc_hw *hw = &adapter->hw; @@ -538,8 +546,8 @@ static int igc_set_eeprom(struct net_device *netdev, return ret_val; } -static void igc_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) +static void igc_ethtool_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) { struct igc_adapter *adapter = netdev_priv(netdev); @@ -549,8 +557,8 @@ static void igc_get_ringparam(struct net_device *netdev, ring->tx_pending = adapter->tx_ring_count; } -static int igc_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) +static int igc_ethtool_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) { struct igc_adapter *adapter = netdev_priv(netdev); struct igc_ring *temp_ring; @@ -664,8 +672,8 @@ clear_reset: return err; } -static void igc_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) +static void igc_ethtool_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) { struct igc_adapter *adapter = netdev_priv(netdev); struct igc_hw *hw = &adapter->hw; @@ -683,8 +691,8 @@ static void igc_get_pauseparam(struct net_device *netdev, } } -static int igc_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) +static int igc_ethtool_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) { struct igc_adapter *adapter = netdev_priv(netdev); struct igc_hw *hw = &adapter->hw; @@ -723,7 +731,8 @@ static int igc_set_pauseparam(struct net_device *netdev, return retval; } -static void igc_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +static void igc_ethtool_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) { struct igc_adapter *adapter = netdev_priv(netdev); u8 *p = data; @@ -774,7 +783,7 @@ static void igc_get_strings(struct net_device *netdev, u32 stringset, u8 *data) } } -static int igc_get_sset_count(struct net_device *netdev, int sset) +static int igc_ethtool_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { case ETH_SS_STATS: @@ -788,7 +797,7 @@ static int igc_get_sset_count(struct net_device *netdev, int sset) } } -static void igc_get_ethtool_stats(struct net_device *netdev, +static void igc_ethtool_get_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct igc_adapter *adapter = netdev_priv(netdev); @@ -844,8 +853,8 @@ static void igc_get_ethtool_stats(struct net_device *netdev, spin_unlock(&adapter->stats64_lock); } -static int igc_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) +static int igc_ethtool_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) { struct igc_adapter *adapter = netdev_priv(netdev); @@ -864,8 +873,8 @@ static int igc_get_coalesce(struct net_device *netdev, return 0; } -static int igc_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) +static int igc_ethtool_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) { struct igc_adapter *adapter = netdev_priv(netdev); int i; @@ -922,81 +931,83 @@ static int igc_set_coalesce(struct net_device *netdev, } #define ETHER_TYPE_FULL_MASK ((__force __be16)~0) -static int igc_get_ethtool_nfc_entry(struct igc_adapter *adapter, - struct ethtool_rxnfc *cmd) +static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) { struct ethtool_rx_flow_spec *fsp = &cmd->fs; - struct igc_nfc_filter *rule = NULL; + struct igc_nfc_rule *rule = NULL; - /* report total rule count */ - cmd->data = IGC_MAX_RXNFC_FILTERS; + cmd->data = IGC_MAX_RXNFC_RULES; - hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { - if (fsp->location <= rule->sw_idx) - break; + mutex_lock(&adapter->nfc_rule_lock); + + rule = igc_get_nfc_rule(adapter, fsp->location); + if (!rule) + goto out; + + fsp->flow_type = ETHER_FLOW; + fsp->ring_cookie = rule->action; + + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + fsp->h_u.ether_spec.h_proto = htons(rule->filter.etype); + fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; } - if (!rule || fsp->location != rule->sw_idx) - return -EINVAL; + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + fsp->flow_type |= FLOW_EXT; + fsp->h_ext.vlan_tci = htons(rule->filter.vlan_tci); + fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); + } - if (rule->filter.match_flags) { - fsp->flow_type = ETHER_FLOW; - fsp->ring_cookie = rule->action; - if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { - fsp->h_u.ether_spec.h_proto = rule->filter.etype; - fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; - } - if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { - fsp->flow_type |= FLOW_EXT; - fsp->h_ext.vlan_tci = rule->filter.vlan_tci; - fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); - } - if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { - ether_addr_copy(fsp->h_u.ether_spec.h_dest, - rule->filter.dst_addr); - /* As we only support matching by the full - * mask, return the mask to userspace - */ - eth_broadcast_addr(fsp->m_u.ether_spec.h_dest); - } - if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { - ether_addr_copy(fsp->h_u.ether_spec.h_source, - rule->filter.src_addr); - /* As we only support matching by the full - * mask, return the mask to userspace - */ - eth_broadcast_addr(fsp->m_u.ether_spec.h_source); - } + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_dest, + rule->filter.dst_addr); + eth_broadcast_addr(fsp->m_u.ether_spec.h_dest); + } - return 0; + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_source, + rule->filter.src_addr); + eth_broadcast_addr(fsp->m_u.ether_spec.h_source); } + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; + +out: + mutex_unlock(&adapter->nfc_rule_lock); return -EINVAL; } -static int igc_get_ethtool_nfc_all(struct igc_adapter *adapter, - struct ethtool_rxnfc *cmd, - u32 *rule_locs) +static int igc_ethtool_get_nfc_rules(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) { - struct igc_nfc_filter *rule; + struct igc_nfc_rule *rule; int cnt = 0; - /* report total rule count */ - cmd->data = IGC_MAX_RXNFC_FILTERS; + cmd->data = IGC_MAX_RXNFC_RULES; + + mutex_lock(&adapter->nfc_rule_lock); - hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { - if (cnt == cmd->rule_cnt) + list_for_each_entry(rule, &adapter->nfc_rule_list, list) { + if (cnt == cmd->rule_cnt) { + mutex_unlock(&adapter->nfc_rule_lock); return -EMSGSIZE; - rule_locs[cnt] = rule->sw_idx; + } + rule_locs[cnt] = rule->location; cnt++; } + mutex_unlock(&adapter->nfc_rule_lock); + cmd->rule_cnt = cnt; return 0; } -static int igc_get_rss_hash_opts(struct igc_adapter *adapter, - struct ethtool_rxnfc *cmd) +static int igc_ethtool_get_rss_hash_opts(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) { cmd->data = 0; @@ -1045,41 +1056,33 @@ static int igc_get_rss_hash_opts(struct igc_adapter *adapter, return 0; } -static int igc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, - u32 *rule_locs) +static int igc_ethtool_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *cmd, u32 *rule_locs) { struct igc_adapter *adapter = netdev_priv(dev); - int ret = -EOPNOTSUPP; switch (cmd->cmd) { case ETHTOOL_GRXRINGS: cmd->data = adapter->num_rx_queues; - ret = 0; - break; + return 0; case ETHTOOL_GRXCLSRLCNT: - cmd->rule_cnt = adapter->nfc_filter_count; - ret = 0; - break; + cmd->rule_cnt = adapter->nfc_rule_count; + return 0; case ETHTOOL_GRXCLSRULE: - ret = igc_get_ethtool_nfc_entry(adapter, cmd); - break; + return igc_ethtool_get_nfc_rule(adapter, cmd); case ETHTOOL_GRXCLSRLALL: - ret = igc_get_ethtool_nfc_all(adapter, cmd, rule_locs); - break; + return igc_ethtool_get_nfc_rules(adapter, cmd, rule_locs); case ETHTOOL_GRXFH: - ret = igc_get_rss_hash_opts(adapter, cmd); - break; + return igc_ethtool_get_rss_hash_opts(adapter, cmd); default: - break; + return -EOPNOTSUPP; } - - return ret; } #define UDP_RSS_FLAGS (IGC_FLAG_RSS_FIELD_IPV4_UDP | \ IGC_FLAG_RSS_FIELD_IPV6_UDP) -static int igc_set_rss_hash_opt(struct igc_adapter *adapter, - struct ethtool_rxnfc *nfc) +static int igc_ethtool_set_rss_hash_opt(struct igc_adapter *adapter, + struct ethtool_rxnfc *nfc) { u32 flags = adapter->flags; @@ -1154,8 +1157,8 @@ static int igc_set_rss_hash_opt(struct igc_adapter *adapter, if ((flags & UDP_RSS_FLAGS) && !(adapter->flags & UDP_RSS_FLAGS)) - dev_err(&adapter->pdev->dev, - "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + netdev_err(adapter->netdev, + "Enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); adapter->flags = flags; @@ -1180,338 +1183,184 @@ static int igc_set_rss_hash_opt(struct igc_adapter *adapter, return 0; } -static int igc_rxnfc_write_etype_filter(struct igc_adapter *adapter, - struct igc_nfc_filter *input) +static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule, + const struct ethtool_rx_flow_spec *fsp) { - struct igc_hw *hw = &adapter->hw; - u8 i; - u32 etqf; - u16 etype; - - /* find an empty etype filter register */ - for (i = 0; i < MAX_ETYPE_FILTER; ++i) { - if (!adapter->etype_bitmap[i]) - break; - } - if (i == MAX_ETYPE_FILTER) { - dev_err(&adapter->pdev->dev, "ethtool -N: etype filters are all used.\n"); - return -EINVAL; - } - - adapter->etype_bitmap[i] = true; - - etqf = rd32(IGC_ETQF(i)); - etype = ntohs(input->filter.etype & ETHER_TYPE_FULL_MASK); - - etqf |= IGC_ETQF_FILTER_ENABLE; - etqf &= ~IGC_ETQF_ETYPE_MASK; - etqf |= (etype & IGC_ETQF_ETYPE_MASK); + INIT_LIST_HEAD(&rule->list); - etqf &= ~IGC_ETQF_QUEUE_MASK; - etqf |= ((input->action << IGC_ETQF_QUEUE_SHIFT) - & IGC_ETQF_QUEUE_MASK); - etqf |= IGC_ETQF_QUEUE_ENABLE; + rule->action = fsp->ring_cookie; + rule->location = fsp->location; - wr32(IGC_ETQF(i), etqf); - - input->etype_reg_index = i; - - return 0; -} - -static int igc_rxnfc_write_vlan_prio_filter(struct igc_adapter *adapter, - struct igc_nfc_filter *input) -{ - struct igc_hw *hw = &adapter->hw; - u8 vlan_priority; - u16 queue_index; - u32 vlapqf; - - vlapqf = rd32(IGC_VLAPQF); - vlan_priority = (ntohs(input->filter.vlan_tci) & VLAN_PRIO_MASK) - >> VLAN_PRIO_SHIFT; - queue_index = (vlapqf >> (vlan_priority * 4)) & IGC_VLAPQF_QUEUE_MASK; - - /* check whether this vlan prio is already set */ - if (vlapqf & IGC_VLAPQF_P_VALID(vlan_priority) && - queue_index != input->action) { - dev_err(&adapter->pdev->dev, "ethtool rxnfc set vlan prio filter failed.\n"); - return -EEXIST; - } - - vlapqf |= IGC_VLAPQF_P_VALID(vlan_priority); - vlapqf |= IGC_VLAPQF_QUEUE_SEL(vlan_priority, input->action); - - wr32(IGC_VLAPQF, vlapqf); - - return 0; -} - -int igc_add_filter(struct igc_adapter *adapter, struct igc_nfc_filter *input) -{ - struct igc_hw *hw = &adapter->hw; - int err = -EINVAL; - - if (hw->mac.type == igc_i225 && - !(input->filter.match_flags & ~IGC_FILTER_FLAG_SRC_MAC_ADDR)) { - dev_err(&adapter->pdev->dev, - "i225 doesn't support flow classification rules specifying only source addresses.\n"); - return -EOPNOTSUPP; + if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { + rule->filter.vlan_tci = ntohs(fsp->h_ext.vlan_tci); + rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI; } - if (input->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { - err = igc_rxnfc_write_etype_filter(adapter, input); - if (err) - return err; + if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) { + rule->filter.etype = ntohs(fsp->h_u.ether_spec.h_proto); + rule->filter.match_flags = IGC_FILTER_FLAG_ETHER_TYPE; } - if (input->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { - err = igc_add_mac_filter(adapter, input->filter.dst_addr, - input->action, 0); - if (err) - return err; + /* Both source and destination address filters only support the full + * mask. + */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) { + rule->filter.match_flags |= IGC_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(rule->filter.src_addr, + fsp->h_u.ether_spec.h_source); } - if (input->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { - err = igc_add_mac_filter(adapter, input->filter.src_addr, - input->action, - IGC_MAC_STATE_SRC_ADDR); - if (err) - return err; + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) { + rule->filter.match_flags |= IGC_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(rule->filter.dst_addr, + fsp->h_u.ether_spec.h_dest); } - - if (input->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) - err = igc_rxnfc_write_vlan_prio_filter(adapter, input); - - return err; -} - -static void igc_clear_etype_filter_regs(struct igc_adapter *adapter, - u16 reg_index) -{ - struct igc_hw *hw = &adapter->hw; - u32 etqf = rd32(IGC_ETQF(reg_index)); - - etqf &= ~IGC_ETQF_QUEUE_ENABLE; - etqf &= ~IGC_ETQF_QUEUE_MASK; - etqf &= ~IGC_ETQF_FILTER_ENABLE; - - wr32(IGC_ETQF(reg_index), etqf); - - adapter->etype_bitmap[reg_index] = false; -} - -static void igc_clear_vlan_prio_filter(struct igc_adapter *adapter, - u16 vlan_tci) -{ - struct igc_hw *hw = &adapter->hw; - u8 vlan_priority; - u32 vlapqf; - - vlan_priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; - - vlapqf = rd32(IGC_VLAPQF); - vlapqf &= ~IGC_VLAPQF_P_VALID(vlan_priority); - vlapqf &= ~IGC_VLAPQF_QUEUE_SEL(vlan_priority, - IGC_VLAPQF_QUEUE_MASK); - - wr32(IGC_VLAPQF, vlapqf); } -int igc_erase_filter(struct igc_adapter *adapter, struct igc_nfc_filter *input) +/** + * igc_ethtool_check_nfc_rule() - Check if NFC rule is valid + * @adapter: Pointer to adapter + * @rule: Rule under evaluation + * + * The driver doesn't support rules with multiple matches so if more than + * one bit in filter flags is set, @rule is considered invalid. + * + * Also, if there is already another rule with the same filter in a different + * location, @rule is considered invalid. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_ethtool_check_nfc_rule(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) { - if (input->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) - igc_clear_etype_filter_regs(adapter, - input->etype_reg_index); + struct net_device *dev = adapter->netdev; + u8 flags = rule->filter.match_flags; + struct igc_nfc_rule *tmp; - if (input->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) - igc_clear_vlan_prio_filter(adapter, - ntohs(input->filter.vlan_tci)); - - if (input->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) - igc_del_mac_filter(adapter, input->filter.src_addr, - IGC_MAC_STATE_SRC_ADDR); - - if (input->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) - igc_del_mac_filter(adapter, input->filter.dst_addr, 0); - - return 0; -} - -static int igc_update_ethtool_nfc_entry(struct igc_adapter *adapter, - struct igc_nfc_filter *input, - u16 sw_idx) -{ - struct igc_nfc_filter *rule, *parent; - int err = -EINVAL; - - parent = NULL; - rule = NULL; - - hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { - /* hash found, or no matching entry */ - if (rule->sw_idx >= sw_idx) - break; - parent = rule; + if (!flags) { + netdev_dbg(dev, "Rule with no match\n"); + return -EINVAL; } - /* if there is an old rule occupying our place remove it */ - if (rule && rule->sw_idx == sw_idx) { - if (!input) - err = igc_erase_filter(adapter, rule); - - hlist_del(&rule->nfc_node); - kfree(rule); - adapter->nfc_filter_count--; + if (flags & (flags - 1)) { + netdev_dbg(dev, "Rule with multiple matches not supported\n"); + return -EOPNOTSUPP; } - /* If no input this was a delete, err should be 0 if a rule was - * successfully found and removed from the list else -EINVAL - */ - if (!input) - return err; - - /* initialize node */ - INIT_HLIST_NODE(&input->nfc_node); - - /* add filter to the list */ - if (parent) - hlist_add_behind(&input->nfc_node, &parent->nfc_node); - else - hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list); - - /* update counts */ - adapter->nfc_filter_count++; + list_for_each_entry(tmp, &adapter->nfc_rule_list, list) { + if (!memcmp(&rule->filter, &tmp->filter, + sizeof(rule->filter)) && + tmp->location != rule->location) { + netdev_dbg(dev, "Rule already exists\n"); + return -EEXIST; + } + } return 0; } -static int igc_add_ethtool_nfc_entry(struct igc_adapter *adapter, - struct ethtool_rxnfc *cmd) +static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) { struct net_device *netdev = adapter->netdev; struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; - struct igc_nfc_filter *input, *rule; - int err = 0; + struct igc_nfc_rule *rule, *old_rule; + int err; - if (!(netdev->hw_features & NETIF_F_NTUPLE)) + if (!(netdev->hw_features & NETIF_F_NTUPLE)) { + netdev_dbg(netdev, "N-tuple filters disabled\n"); return -EOPNOTSUPP; + } - /* Don't allow programming if the action is a queue greater than - * the number of online Rx queues. - */ - if (fsp->ring_cookie == RX_CLS_FLOW_DISC || - fsp->ring_cookie >= adapter->num_rx_queues) { - dev_err(&adapter->pdev->dev, "ethtool -N: The specified action is invalid\n"); - return -EINVAL; + if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) { + netdev_dbg(netdev, "Only ethernet flow type is supported\n"); + return -EOPNOTSUPP; } - /* Don't allow indexes to exist outside of available space */ - if (fsp->location >= IGC_MAX_RXNFC_FILTERS) { - dev_err(&adapter->pdev->dev, "Location out of range\n"); - return -EINVAL; + if ((fsp->flow_type & FLOW_EXT) && + fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) { + netdev_dbg(netdev, "VLAN mask not supported\n"); + return -EOPNOTSUPP; } - if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) + if (fsp->ring_cookie >= adapter->num_rx_queues) { + netdev_dbg(netdev, "Invalid action\n"); return -EINVAL; - - input = kzalloc(sizeof(*input), GFP_KERNEL); - if (!input) - return -ENOMEM; - - if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) { - input->filter.etype = fsp->h_u.ether_spec.h_proto; - input->filter.match_flags = IGC_FILTER_FLAG_ETHER_TYPE; } - /* Only support matching addresses by the full mask */ - if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) { - input->filter.match_flags |= IGC_FILTER_FLAG_SRC_MAC_ADDR; - ether_addr_copy(input->filter.src_addr, - fsp->h_u.ether_spec.h_source); + if (fsp->location >= IGC_MAX_RXNFC_RULES) { + netdev_dbg(netdev, "Invalid location\n"); + return -EINVAL; } - /* Only support matching addresses by the full mask */ - if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) { - input->filter.match_flags |= IGC_FILTER_FLAG_DST_MAC_ADDR; - ether_addr_copy(input->filter.dst_addr, - fsp->h_u.ether_spec.h_dest); - } + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; - if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { - if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) { - err = -EINVAL; - goto err_out; - } - input->filter.vlan_tci = fsp->h_ext.vlan_tci; - input->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI; - } + igc_ethtool_init_nfc_rule(rule, fsp); - input->action = fsp->ring_cookie; - input->sw_idx = fsp->location; + mutex_lock(&adapter->nfc_rule_lock); - spin_lock(&adapter->nfc_lock); + err = igc_ethtool_check_nfc_rule(adapter, rule); + if (err) + goto err; - hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { - if (!memcmp(&input->filter, &rule->filter, - sizeof(input->filter))) { - err = -EEXIST; - dev_err(&adapter->pdev->dev, - "ethtool: this filter is already set\n"); - goto err_out_w_lock; - } - } + old_rule = igc_get_nfc_rule(adapter, fsp->location); + if (old_rule) + igc_del_nfc_rule(adapter, old_rule); - err = igc_add_filter(adapter, input); + err = igc_add_nfc_rule(adapter, rule); if (err) - goto err_out_w_lock; - - igc_update_ethtool_nfc_entry(adapter, input, input->sw_idx); + goto err; - spin_unlock(&adapter->nfc_lock); + mutex_unlock(&adapter->nfc_rule_lock); return 0; -err_out_w_lock: - spin_unlock(&adapter->nfc_lock); -err_out: - kfree(input); +err: + mutex_unlock(&adapter->nfc_rule_lock); + kfree(rule); return err; } -static int igc_del_ethtool_nfc_entry(struct igc_adapter *adapter, - struct ethtool_rxnfc *cmd) +static int igc_ethtool_del_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) { struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; - int err; + struct igc_nfc_rule *rule; - spin_lock(&adapter->nfc_lock); - err = igc_update_ethtool_nfc_entry(adapter, NULL, fsp->location); - spin_unlock(&adapter->nfc_lock); + mutex_lock(&adapter->nfc_rule_lock); - return err; + rule = igc_get_nfc_rule(adapter, fsp->location); + if (!rule) { + mutex_unlock(&adapter->nfc_rule_lock); + return -EINVAL; + } + + igc_del_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; } -static int igc_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +static int igc_ethtool_set_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *cmd) { struct igc_adapter *adapter = netdev_priv(dev); - int ret = -EOPNOTSUPP; switch (cmd->cmd) { case ETHTOOL_SRXFH: - ret = igc_set_rss_hash_opt(adapter, cmd); - break; + return igc_ethtool_set_rss_hash_opt(adapter, cmd); case ETHTOOL_SRXCLSRLINS: - ret = igc_add_ethtool_nfc_entry(adapter, cmd); - break; + return igc_ethtool_add_nfc_rule(adapter, cmd); case ETHTOOL_SRXCLSRLDEL: - ret = igc_del_ethtool_nfc_entry(adapter, cmd); + return igc_ethtool_del_nfc_rule(adapter, cmd); default: - break; + return -EOPNOTSUPP; } - - return ret; } void igc_write_rss_indir_tbl(struct igc_adapter *adapter) @@ -1536,13 +1385,13 @@ void igc_write_rss_indir_tbl(struct igc_adapter *adapter) } } -static u32 igc_get_rxfh_indir_size(struct net_device *netdev) +static u32 igc_ethtool_get_rxfh_indir_size(struct net_device *netdev) { return IGC_RETA_SIZE; } -static int igc_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) +static int igc_ethtool_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) { struct igc_adapter *adapter = netdev_priv(netdev); int i; @@ -1557,8 +1406,8 @@ static int igc_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, return 0; } -static int igc_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) +static int igc_ethtool_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) { struct igc_adapter *adapter = netdev_priv(netdev); u32 num_queues; @@ -1586,18 +1435,13 @@ static int igc_set_rxfh(struct net_device *netdev, const u32 *indir, return 0; } -static unsigned int igc_max_channels(struct igc_adapter *adapter) -{ - return igc_get_max_rss_queues(adapter); -} - -static void igc_get_channels(struct net_device *netdev, - struct ethtool_channels *ch) +static void igc_ethtool_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) { struct igc_adapter *adapter = netdev_priv(netdev); /* Report maximum channels */ - ch->max_combined = igc_max_channels(adapter); + ch->max_combined = igc_get_max_rss_queues(adapter); /* Report info for other vector */ if (adapter->flags & IGC_FLAG_HAS_MSIX) { @@ -1608,8 +1452,8 @@ static void igc_get_channels(struct net_device *netdev, ch->combined_count = adapter->rss_queues; } -static int igc_set_channels(struct net_device *netdev, - struct ethtool_channels *ch) +static int igc_ethtool_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) { struct igc_adapter *adapter = netdev_priv(netdev); unsigned int count = ch->combined_count; @@ -1624,7 +1468,7 @@ static int igc_set_channels(struct net_device *netdev, return -EINVAL; /* Verify the number of channels doesn't exceed hw limits */ - max_combined = igc_max_channels(adapter); + max_combined = igc_get_max_rss_queues(adapter); if (count > max_combined) return -EINVAL; @@ -1641,8 +1485,8 @@ static int igc_set_channels(struct net_device *netdev, return 0; } -static int igc_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) +static int igc_ethtool_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) { struct igc_adapter *adapter = netdev_priv(dev); @@ -1674,7 +1518,7 @@ static int igc_get_ts_info(struct net_device *dev, } } -static u32 igc_get_priv_flags(struct net_device *netdev) +static u32 igc_ethtool_get_priv_flags(struct net_device *netdev) { struct igc_adapter *adapter = netdev_priv(netdev); u32 priv_flags = 0; @@ -1685,7 +1529,7 @@ static u32 igc_get_priv_flags(struct net_device *netdev) return priv_flags; } -static int igc_set_priv_flags(struct net_device *netdev, u32 priv_flags) +static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags) { struct igc_adapter *adapter = netdev_priv(netdev); unsigned int flags = adapter->flags; @@ -1720,8 +1564,8 @@ static void igc_ethtool_complete(struct net_device *netdev) pm_runtime_put(&adapter->pdev->dev); } -static int igc_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *cmd) +static int igc_ethtool_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct igc_adapter *adapter = netdev_priv(netdev); struct igc_hw *hw = &adapter->hw; @@ -1827,10 +1671,12 @@ static int igc_get_link_ksettings(struct net_device *netdev, return 0; } -static int igc_set_link_ksettings(struct net_device *netdev, - const struct ethtool_link_ksettings *cmd) +static int +igc_ethtool_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) { struct igc_adapter *adapter = netdev_priv(netdev); + struct net_device *dev = adapter->netdev; struct igc_hw *hw = &adapter->hw; u32 advertising; @@ -1838,8 +1684,7 @@ static int igc_set_link_ksettings(struct net_device *netdev, * cannot be changed */ if (igc_check_reset_block(hw)) { - dev_err(&adapter->pdev->dev, - "Cannot change link characteristics when reset is active.\n"); + netdev_err(dev, "Cannot change link characteristics when reset is active\n"); return -EINVAL; } @@ -1850,7 +1695,7 @@ static int igc_set_link_ksettings(struct net_device *netdev, if (cmd->base.eth_tp_mdix_ctrl) { if (cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO && cmd->base.autoneg != AUTONEG_ENABLE) { - dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + netdev_err(dev, "Forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); return -EINVAL; } } @@ -1867,9 +1712,7 @@ static int igc_set_link_ksettings(struct net_device *netdev, if (adapter->fc_autoneg) hw->fc.requested_mode = igc_fc_default; } else { - /* calling this overrides forced MDI setting */ - dev_info(&adapter->pdev->dev, - "Force mode currently not supported\n"); + netdev_info(dev, "Force mode currently not supported\n"); } /* MDI-X => 2; MDI => 1; Auto => 3 */ @@ -1896,46 +1739,105 @@ static int igc_set_link_ksettings(struct net_device *netdev, return 0; } +static void igc_ethtool_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + netdev_info(adapter->netdev, "Offline testing starting"); + set_bit(__IGC_TESTING, &adapter->state); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (!igc_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + igc_close(netdev); + else + igc_reset(adapter); + + netdev_info(adapter->netdev, "Register testing starting"); + if (!igc_reg_test(adapter, &data[TEST_REG])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igc_reset(adapter); + + netdev_info(adapter->netdev, "EEPROM testing starting"); + if (!igc_eeprom_test(adapter, &data[TEST_EEP])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igc_reset(adapter); + + /* loopback and interrupt tests + * will be implemented in the future + */ + data[TEST_LOOP] = 0; + data[TEST_IRQ] = 0; + + clear_bit(__IGC_TESTING, &adapter->state); + if (if_running) + igc_open(netdev); + } else { + netdev_info(adapter->netdev, "Online testing starting"); + + /* register, eeprom, intr and loopback tests not run online */ + data[TEST_REG] = 0; + data[TEST_EEP] = 0; + data[TEST_IRQ] = 0; + data[TEST_LOOP] = 0; + + if (!igc_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + } + + msleep_interruptible(4 * 1000); +} + static const struct ethtool_ops igc_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS, - .get_drvinfo = igc_get_drvinfo, - .get_regs_len = igc_get_regs_len, - .get_regs = igc_get_regs, - .get_wol = igc_get_wol, - .set_wol = igc_set_wol, - .get_msglevel = igc_get_msglevel, - .set_msglevel = igc_set_msglevel, - .nway_reset = igc_nway_reset, - .get_link = igc_get_link, - .get_eeprom_len = igc_get_eeprom_len, - .get_eeprom = igc_get_eeprom, - .set_eeprom = igc_set_eeprom, - .get_ringparam = igc_get_ringparam, - .set_ringparam = igc_set_ringparam, - .get_pauseparam = igc_get_pauseparam, - .set_pauseparam = igc_set_pauseparam, - .get_strings = igc_get_strings, - .get_sset_count = igc_get_sset_count, - .get_ethtool_stats = igc_get_ethtool_stats, - .get_coalesce = igc_get_coalesce, - .set_coalesce = igc_set_coalesce, - .get_rxnfc = igc_get_rxnfc, - .set_rxnfc = igc_set_rxnfc, - .get_rxfh_indir_size = igc_get_rxfh_indir_size, - .get_rxfh = igc_get_rxfh, - .set_rxfh = igc_set_rxfh, - .get_ts_info = igc_get_ts_info, - .get_channels = igc_get_channels, - .set_channels = igc_set_channels, - .get_priv_flags = igc_get_priv_flags, - .set_priv_flags = igc_set_priv_flags, + .get_drvinfo = igc_ethtool_get_drvinfo, + .get_regs_len = igc_ethtool_get_regs_len, + .get_regs = igc_ethtool_get_regs, + .get_wol = igc_ethtool_get_wol, + .set_wol = igc_ethtool_set_wol, + .get_msglevel = igc_ethtool_get_msglevel, + .set_msglevel = igc_ethtool_set_msglevel, + .nway_reset = igc_ethtool_nway_reset, + .get_link = igc_ethtool_get_link, + .get_eeprom_len = igc_ethtool_get_eeprom_len, + .get_eeprom = igc_ethtool_get_eeprom, + .set_eeprom = igc_ethtool_set_eeprom, + .get_ringparam = igc_ethtool_get_ringparam, + .set_ringparam = igc_ethtool_set_ringparam, + .get_pauseparam = igc_ethtool_get_pauseparam, + .set_pauseparam = igc_ethtool_set_pauseparam, + .get_strings = igc_ethtool_get_strings, + .get_sset_count = igc_ethtool_get_sset_count, + .get_ethtool_stats = igc_ethtool_get_stats, + .get_coalesce = igc_ethtool_get_coalesce, + .set_coalesce = igc_ethtool_set_coalesce, + .get_rxnfc = igc_ethtool_get_rxnfc, + .set_rxnfc = igc_ethtool_set_rxnfc, + .get_rxfh_indir_size = igc_ethtool_get_rxfh_indir_size, + .get_rxfh = igc_ethtool_get_rxfh, + .set_rxfh = igc_ethtool_set_rxfh, + .get_ts_info = igc_ethtool_get_ts_info, + .get_channels = igc_ethtool_get_channels, + .set_channels = igc_ethtool_set_channels, + .get_priv_flags = igc_ethtool_get_priv_flags, + .set_priv_flags = igc_ethtool_set_priv_flags, .begin = igc_ethtool_begin, .complete = igc_ethtool_complete, - .get_link_ksettings = igc_get_link_ksettings, - .set_link_ksettings = igc_set_link_ksettings, + .get_link_ksettings = igc_ethtool_get_link_ksettings, + .set_link_ksettings = igc_ethtool_set_link_ksettings, + .self_test = igc_ethtool_diag_test, }; -void igc_set_ethtool_ops(struct net_device *netdev) +void igc_ethtool_set_ops(struct net_device *netdev) { netdev->ethtool_ops = &igc_ethtool_ops; } diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c index 12aa6b5fcb5d..410aeb01de5c 100644 --- a/drivers/net/ethernet/intel/igc/igc_mac.c +++ b/drivers/net/ethernet/intel/igc/igc_mac.c @@ -235,15 +235,14 @@ out: void igc_clear_hw_cntrs_base(struct igc_hw *hw) { rd32(IGC_CRCERRS); - rd32(IGC_SYMERRS); rd32(IGC_MPC); rd32(IGC_SCC); rd32(IGC_ECOL); rd32(IGC_MCC); rd32(IGC_LATECOL); rd32(IGC_COLC); + rd32(IGC_RERC); rd32(IGC_DC); - rd32(IGC_SEC); rd32(IGC_RLEC); rd32(IGC_XONRXC); rd32(IGC_XONTXC); @@ -288,7 +287,7 @@ void igc_clear_hw_cntrs_base(struct igc_hw *hw) rd32(IGC_ALGNERRC); rd32(IGC_RXERRC); rd32(IGC_TNCRS); - rd32(IGC_CEXTERR); + rd32(IGC_HTDPMC); rd32(IGC_TSCTC); rd32(IGC_TSCTFC); @@ -307,12 +306,8 @@ void igc_clear_hw_cntrs_base(struct igc_hw *hw) rd32(IGC_ICTXQMTC); rd32(IGC_ICRXDMTC); - rd32(IGC_CBTMPC); - rd32(IGC_HTDPMC); - rd32(IGC_CBRMPC); rd32(IGC_RPTHC); rd32(IGC_HGPTC); - rd32(IGC_HTCBDPC); rd32(IGC_HGORCL); rd32(IGC_HGORCH); rd32(IGC_HGOTCL); diff --git a/drivers/net/ethernet/intel/igc/igc_mac.h b/drivers/net/ethernet/intel/igc/igc_mac.h index 832cccec87cd..b5963f86defb 100644 --- a/drivers/net/ethernet/intel/igc/igc_mac.h +++ b/drivers/net/ethernet/intel/igc/igc_mac.h @@ -8,10 +8,6 @@ #include "igc_phy.h" #include "igc_defines.h" -#ifndef IGC_REMOVED -#define IGC_REMOVED(a) (0) -#endif /* IGC_REMOVED */ - /* forward declaration */ s32 igc_disable_pcie_master(struct igc_hw *hw); s32 igc_check_for_copper_link(struct igc_hw *hw); diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 9d5f8287c704..43fcabb5c023 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -76,7 +76,7 @@ static void igc_power_down_link(struct igc_adapter *adapter) void igc_reset(struct igc_adapter *adapter) { - struct pci_dev *pdev = adapter->pdev; + struct net_device *dev = adapter->netdev; struct igc_hw *hw = &adapter->hw; struct igc_fc_info *fc = &hw->fc; u32 pba, hwm; @@ -103,7 +103,7 @@ void igc_reset(struct igc_adapter *adapter) hw->mac.ops.reset_hw(hw); if (hw->mac.ops.init_hw(hw)) - dev_err(&pdev->dev, "Hardware Error\n"); + netdev_err(dev, "Error on hardware initialization\n"); if (!netif_running(adapter->netdev)) igc_power_down_link(adapter); @@ -288,6 +288,7 @@ static void igc_clean_all_tx_rings(struct igc_adapter *adapter) */ int igc_setup_tx_resources(struct igc_ring *tx_ring) { + struct net_device *ndev = tx_ring->netdev; struct device *dev = tx_ring->dev; int size = 0; @@ -313,8 +314,7 @@ int igc_setup_tx_resources(struct igc_ring *tx_ring) err: vfree(tx_ring->tx_buffer_info); - dev_err(dev, - "Unable to allocate memory for the transmit descriptor ring\n"); + netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n"); return -ENOMEM; } @@ -326,14 +326,13 @@ err: */ static int igc_setup_all_tx_resources(struct igc_adapter *adapter) { - struct pci_dev *pdev = adapter->pdev; + struct net_device *dev = adapter->netdev; int i, err = 0; for (i = 0; i < adapter->num_tx_queues; i++) { err = igc_setup_tx_resources(adapter->tx_ring[i]); if (err) { - dev_err(&pdev->dev, - "Allocation for Tx Queue %u failed\n", i); + netdev_err(dev, "Error on Tx queue %u setup\n", i); for (i--; i >= 0; i--) igc_free_tx_resources(adapter->tx_ring[i]); break; @@ -444,6 +443,7 @@ static void igc_free_all_rx_resources(struct igc_adapter *adapter) */ int igc_setup_rx_resources(struct igc_ring *rx_ring) { + struct net_device *ndev = rx_ring->netdev; struct device *dev = rx_ring->dev; int size, desc_len; @@ -473,8 +473,7 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring) err: vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; - dev_err(dev, - "Unable to allocate memory for the receive descriptor ring\n"); + netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n"); return -ENOMEM; } @@ -487,14 +486,13 @@ err: */ static int igc_setup_all_rx_resources(struct igc_adapter *adapter) { - struct pci_dev *pdev = adapter->pdev; + struct net_device *dev = adapter->netdev; int i, err = 0; for (i = 0; i < adapter->num_rx_queues; i++) { err = igc_setup_rx_resources(adapter->rx_ring[i]); if (err) { - dev_err(&pdev->dev, - "Allocation for Rx Queue %u failed\n", i); + netdev_err(dev, "Error on Rx queue %u setup\n", i); for (i--; i >= 0; i--) igc_free_rx_resources(adapter->rx_ring[i]); break; @@ -768,12 +766,14 @@ static void igc_setup_tctl(struct igc_adapter *adapter) * igc_set_mac_filter_hw() - Set MAC address filter in hardware * @adapter: Pointer to adapter where the filter should be set * @index: Filter index - * @addr: Destination MAC address + * @type: MAC address filter type (source or destination) + * @addr: MAC address * @queue: If non-negative, queue assignment feature is enabled and frames * matching the filter are enqueued onto 'queue'. Otherwise, queue * assignment is disabled. */ static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index, + enum igc_mac_filter_type type, const u8 *addr, int queue) { struct net_device *dev = adapter->netdev; @@ -786,6 +786,11 @@ static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index, ral = le32_to_cpup((__le32 *)(addr)); rah = le16_to_cpup((__le16 *)(addr + 4)); + if (type == IGC_MAC_FILTER_TYPE_SRC) { + rah &= ~IGC_RAH_ASEL_MASK; + rah |= IGC_RAH_ASEL_SRC_ADDR; + } + if (queue >= 0) { rah &= ~IGC_RAH_QSEL_MASK; rah |= (queue << IGC_RAH_QSEL_SHIFT); @@ -822,17 +827,12 @@ static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index) /* Set default MAC address for the PF in the first RAR entry */ static void igc_set_default_mac_filter(struct igc_adapter *adapter) { - struct igc_mac_addr *mac_table = &adapter->mac_table[0]; struct net_device *dev = adapter->netdev; u8 *addr = adapter->hw.mac.addr; netdev_dbg(dev, "Set default MAC address filter: address %pM", addr); - ether_addr_copy(mac_table->addr, addr); - mac_table->state = IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE; - mac_table->queue = -1; - - igc_set_mac_filter_hw(adapter, 0, addr, mac_table->queue); + igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1); } /** @@ -1196,7 +1196,7 @@ static int igc_tx_map(struct igc_ring *tx_ring, return 0; dma_error: - dev_err(tx_ring->dev, "TX DMA map failed\n"); + netdev_err(tx_ring->netdev, "TX DMA map failed\n"); tx_buffer = &tx_ring->tx_buffer_info[i]; /* clear dma mappings for failed tx_buffer_info map */ @@ -1459,8 +1459,8 @@ static void igc_rx_checksum(struct igc_ring *ring, IGC_RXD_STAT_UDPCS)) skb->ip_summed = CHECKSUM_UNNECESSARY; - dev_dbg(ring->dev, "cksum success: bits %08X\n", - le32_to_cpu(rx_desc->wb.upper.status_error)); + netdev_dbg(ring->netdev, "cksum success: bits %08X\n", + le32_to_cpu(rx_desc->wb.upper.status_error)); } static inline void igc_rx_hash(struct igc_ring *ring, @@ -2122,27 +2122,27 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) (adapter->tx_timeout_factor * HZ)) && !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) { /* detected Tx unit hang */ - dev_err(tx_ring->dev, - "Detected Tx Unit Hang\n" - " Tx Queue <%d>\n" - " TDH <%x>\n" - " TDT <%x>\n" - " next_to_use <%x>\n" - " next_to_clean <%x>\n" - "buffer_info[next_to_clean]\n" - " time_stamp <%lx>\n" - " next_to_watch <%p>\n" - " jiffies <%lx>\n" - " desc.status <%x>\n", - tx_ring->queue_index, - rd32(IGC_TDH(tx_ring->reg_idx)), - readl(tx_ring->tail), - tx_ring->next_to_use, - tx_ring->next_to_clean, - tx_buffer->time_stamp, - tx_buffer->next_to_watch, - jiffies, - tx_buffer->next_to_watch->wb.status); + netdev_err(tx_ring->netdev, + "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%p>\n" + " jiffies <%lx>\n" + " desc.status <%x>\n", + tx_ring->queue_index, + rd32(IGC_TDH(tx_ring->reg_idx)), + readl(tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_buffer->time_stamp, + tx_buffer->next_to_watch, + jiffies, + tx_buffer->next_to_watch->wb.status); netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); @@ -2174,34 +2174,26 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) return !!budget; } -static void igc_nfc_filter_restore(struct igc_adapter *adapter) -{ - struct igc_nfc_filter *rule; - - spin_lock(&adapter->nfc_lock); - - hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) - igc_add_filter(adapter, rule); - - spin_unlock(&adapter->nfc_lock); -} - -static int igc_find_mac_filter(struct igc_adapter *adapter, const u8 *addr, - u8 flags) +static int igc_find_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr) { - int max_entries = adapter->hw.mac.rar_entry_count; - struct igc_mac_addr *entry; + struct igc_hw *hw = &adapter->hw; + int max_entries = hw->mac.rar_entry_count; + u32 ral, rah; int i; for (i = 0; i < max_entries; i++) { - entry = &adapter->mac_table[i]; + ral = rd32(IGC_RAL(i)); + rah = rd32(IGC_RAH(i)); - if (!(entry->state & IGC_MAC_STATE_IN_USE)) + if (!(rah & IGC_RAH_AV)) + continue; + if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type) continue; - if (!ether_addr_equal(addr, entry->addr)) + if ((rah & IGC_RAH_RAH_MASK) != + le16_to_cpup((__le16 *)(addr + 4))) continue; - if ((entry->state & IGC_MAC_STATE_SRC_ADDR) != - (flags & IGC_MAC_STATE_SRC_ADDR)) + if (ral != le32_to_cpup((__le32 *)(addr))) continue; return i; @@ -2212,14 +2204,15 @@ static int igc_find_mac_filter(struct igc_adapter *adapter, const u8 *addr, static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter) { - int max_entries = adapter->hw.mac.rar_entry_count; - struct igc_mac_addr *entry; + struct igc_hw *hw = &adapter->hw; + int max_entries = hw->mac.rar_entry_count; + u32 rah; int i; for (i = 0; i < max_entries; i++) { - entry = &adapter->mac_table[i]; + rah = rd32(IGC_RAH(i)); - if (!(entry->state & IGC_MAC_STATE_IN_USE)) + if (!(rah & IGC_RAH_AV)) return i; } @@ -2229,105 +2222,388 @@ static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter) /** * igc_add_mac_filter() - Add MAC address filter * @adapter: Pointer to adapter where the filter should be added + * @type: MAC address filter type (source or destination) * @addr: MAC address * @queue: If non-negative, queue assignment feature is enabled and frames * matching the filter are enqueued onto 'queue'. Otherwise, queue * assignment is disabled. - * @flags: Set IGC_MAC_STATE_SRC_ADDR bit to indicate @address is a source - * address * * Return: 0 in case of success, negative errno code otherwise. */ -int igc_add_mac_filter(struct igc_adapter *adapter, const u8 *addr, - const s8 queue, const u8 flags) +static int igc_add_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr, + int queue) { struct net_device *dev = adapter->netdev; int index; - if (!is_valid_ether_addr(addr)) - return -EINVAL; - if (flags & IGC_MAC_STATE_SRC_ADDR) - return -ENOTSUPP; - - index = igc_find_mac_filter(adapter, addr, flags); + index = igc_find_mac_filter(adapter, type, addr); if (index >= 0) - goto update_queue_assignment; + goto update_filter; index = igc_get_avail_mac_filter_slot(adapter); if (index < 0) return -ENOSPC; - netdev_dbg(dev, "Add MAC address filter: index %d address %pM queue %d", - index, addr, queue); - - ether_addr_copy(adapter->mac_table[index].addr, addr); - adapter->mac_table[index].state |= IGC_MAC_STATE_IN_USE | flags; -update_queue_assignment: - adapter->mac_table[index].queue = queue; + netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n", + index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", + addr, queue); - igc_set_mac_filter_hw(adapter, index, addr, queue); +update_filter: + igc_set_mac_filter_hw(adapter, index, type, addr, queue); return 0; } /** * igc_del_mac_filter() - Delete MAC address filter * @adapter: Pointer to adapter where the filter should be deleted from + * @type: MAC address filter type (source or destination) * @addr: MAC address - * @flags: Set IGC_MAC_STATE_SRC_ADDR bit to indicate @address is a source - * address - * - * Return: 0 in case of success, negative errno code otherwise. */ -int igc_del_mac_filter(struct igc_adapter *adapter, const u8 *addr, - const u8 flags) +static void igc_del_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr) { struct net_device *dev = adapter->netdev; - struct igc_mac_addr *entry; int index; - if (!is_valid_ether_addr(addr)) - return -EINVAL; - - index = igc_find_mac_filter(adapter, addr, flags); + index = igc_find_mac_filter(adapter, type, addr); if (index < 0) - return -ENOENT; - - entry = &adapter->mac_table[index]; + return; - if (entry->state & IGC_MAC_STATE_DEFAULT) { + if (index == 0) { /* If this is the default filter, we don't actually delete it. * We just reset to its default value i.e. disable queue * assignment. */ netdev_dbg(dev, "Disable default MAC filter queue assignment"); - entry->queue = -1; - igc_set_mac_filter_hw(adapter, 0, addr, entry->queue); + igc_set_mac_filter_hw(adapter, 0, type, addr, -1); } else { - netdev_dbg(dev, "Delete MAC address filter: index %d address %pM", - index, addr); + netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n", + index, + type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", + addr); - entry->state = 0; - entry->queue = -1; - memset(entry->addr, 0, ETH_ALEN); igc_clear_mac_filter_hw(adapter, index); } +} + +/** + * igc_add_vlan_prio_filter() - Add VLAN priority filter + * @adapter: Pointer to adapter where the filter should be added + * @prio: VLAN priority value + * @queue: Queue number which matching frames are assigned to + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio, + int queue) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 vlanpqf; + + vlanpqf = rd32(IGC_VLANPQF); + if (vlanpqf & IGC_VLANPQF_VALID(prio)) { + netdev_dbg(dev, "VLAN priority filter already in use\n"); + return -EEXIST; + } + + vlanpqf |= IGC_VLANPQF_QSEL(prio, queue); + vlanpqf |= IGC_VLANPQF_VALID(prio); + + wr32(IGC_VLANPQF, vlanpqf); + + netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n", + prio, queue); return 0; } +/** + * igc_del_vlan_prio_filter() - Delete VLAN priority filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @prio: VLAN priority value + */ +static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio) +{ + struct igc_hw *hw = &adapter->hw; + u32 vlanpqf; + + vlanpqf = rd32(IGC_VLANPQF); + + vlanpqf &= ~IGC_VLANPQF_VALID(prio); + vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK); + + wr32(IGC_VLANPQF, vlanpqf); + + netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n", + prio); +} + +static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < MAX_ETYPE_FILTER; i++) { + u32 etqf = rd32(IGC_ETQF(i)); + + if (!(etqf & IGC_ETQF_FILTER_ENABLE)) + return i; + } + + return -1; +} + +/** + * igc_add_etype_filter() - Add ethertype filter + * @adapter: Pointer to adapter where the filter should be added + * @etype: Ethertype value + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype, + int queue) +{ + struct igc_hw *hw = &adapter->hw; + int index; + u32 etqf; + + index = igc_get_avail_etype_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + etqf = rd32(IGC_ETQF(index)); + + etqf &= ~IGC_ETQF_ETYPE_MASK; + etqf |= etype; + + if (queue >= 0) { + etqf &= ~IGC_ETQF_QUEUE_MASK; + etqf |= (queue << IGC_ETQF_QUEUE_SHIFT); + etqf |= IGC_ETQF_QUEUE_ENABLE; + } + + etqf |= IGC_ETQF_FILTER_ENABLE; + + wr32(IGC_ETQF(index), etqf); + + netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n", + etype, queue); + return 0; +} + +static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < MAX_ETYPE_FILTER; i++) { + u32 etqf = rd32(IGC_ETQF(i)); + + if ((etqf & IGC_ETQF_ETYPE_MASK) == etype) + return i; + } + + return -1; +} + +/** + * igc_del_etype_filter() - Delete ethertype filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @etype: Ethertype value + */ +static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype) +{ + struct igc_hw *hw = &adapter->hw; + int index; + + index = igc_find_etype_filter(adapter, etype); + if (index < 0) + return; + + wr32(IGC_ETQF(index), 0); + + netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n", + etype); +} + +static int igc_enable_nfc_rule(struct igc_adapter *adapter, + const struct igc_nfc_rule *rule) +{ + int err; + + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + err = igc_add_etype_filter(adapter, rule->filter.etype, + rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { + err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, + rule->filter.src_addr, rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { + err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, + rule->filter.dst_addr, rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT; + + err = igc_add_vlan_prio_filter(adapter, prio, rule->action); + if (err) + return err; + } + + return 0; +} + +static void igc_disable_nfc_rule(struct igc_adapter *adapter, + const struct igc_nfc_rule *rule) +{ + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) + igc_del_etype_filter(adapter, rule->filter.etype); + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT; + + igc_del_vlan_prio_filter(adapter, prio); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, + rule->filter.src_addr); + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, + rule->filter.dst_addr); +} + +/** + * igc_get_nfc_rule() - Get NFC rule + * @adapter: Pointer to adapter + * @location: Rule location + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: Pointer to NFC rule at @location. If not found, NULL. + */ +struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, + u32 location) +{ + struct igc_nfc_rule *rule; + + list_for_each_entry(rule, &adapter->nfc_rule_list, list) { + if (rule->location == location) + return rule; + if (rule->location > location) + break; + } + + return NULL; +} + +/** + * igc_del_nfc_rule() - Delete NFC rule + * @adapter: Pointer to adapter + * @rule: Pointer to rule to be deleted + * + * Disable NFC rule in hardware and delete it from adapter. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + */ +void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) +{ + igc_disable_nfc_rule(adapter, rule); + + list_del(&rule->list); + adapter->nfc_rule_count--; + + kfree(rule); +} + +static void igc_flush_nfc_rules(struct igc_adapter *adapter) +{ + struct igc_nfc_rule *rule, *tmp; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list) + igc_del_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); +} + +/** + * igc_add_nfc_rule() - Add NFC rule + * @adapter: Pointer to adapter + * @rule: Pointer to rule to be added + * + * Enable NFC rule in hardware and add it to adapter. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: 0 on success, negative errno on failure. + */ +int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) +{ + struct igc_nfc_rule *pred, *cur; + int err; + + err = igc_enable_nfc_rule(adapter, rule); + if (err) + return err; + + pred = NULL; + list_for_each_entry(cur, &adapter->nfc_rule_list, list) { + if (cur->location >= rule->location) + break; + pred = cur; + } + + list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list); + adapter->nfc_rule_count++; + return 0; +} + +static void igc_restore_nfc_rules(struct igc_adapter *adapter) +{ + struct igc_nfc_rule *rule; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list) + igc_enable_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); +} + static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr) { struct igc_adapter *adapter = netdev_priv(netdev); - return igc_add_mac_filter(adapter, addr, -1, 0); + return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1); } static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr) { struct igc_adapter *adapter = netdev_priv(netdev); - return igc_del_mac_filter(adapter, addr, 0); + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr); + return 0; } /** @@ -2398,7 +2674,7 @@ static void igc_configure(struct igc_adapter *adapter) igc_setup_rctl(adapter); igc_set_default_mac_filter(adapter); - igc_nfc_filter_restore(adapter); + igc_restore_nfc_rules(adapter); igc_configure_tx(adapter); igc_configure_rx(adapter); @@ -2592,12 +2868,7 @@ void igc_set_flag_queue_pairs(struct igc_adapter *adapter, unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) { - unsigned int max_rss_queues; - - /* Determine the maximum number of RSS queues supported. */ - max_rss_queues = IGC_MAX_RX_QUEUES; - - return max_rss_queues; + return IGC_MAX_RX_QUEUES; } static void igc_init_queue_configuration(struct igc_adapter *adapter) @@ -3238,14 +3509,14 @@ err_out: */ static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix) { - struct pci_dev *pdev = adapter->pdev; + struct net_device *dev = adapter->netdev; int err = 0; igc_set_interrupt_capability(adapter, msix); err = igc_alloc_q_vectors(adapter); if (err) { - dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); + netdev_err(dev, "Unable to allocate memory for vectors\n"); goto err_alloc_q_vectors; } @@ -3272,8 +3543,6 @@ static int igc_sw_init(struct igc_adapter *adapter) struct pci_dev *pdev = adapter->pdev; struct igc_hw *hw = &adapter->hw; - int size = sizeof(struct igc_mac_addr) * hw->mac.rar_entry_count; - pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); /* set default ring sizes */ @@ -3292,20 +3561,19 @@ static int igc_sw_init(struct igc_adapter *adapter) VLAN_HLEN; adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; - spin_lock_init(&adapter->nfc_lock); + mutex_init(&adapter->nfc_rule_lock); + INIT_LIST_HEAD(&adapter->nfc_rule_list); + adapter->nfc_rule_count = 0; + spin_lock_init(&adapter->stats64_lock); /* Assume MSI-X interrupts, will be checked during IRQ allocation */ adapter->flags |= IGC_FLAG_HAS_MSIX; - adapter->mac_table = kzalloc(size, GFP_ATOMIC); - if (!adapter->mac_table) - return -ENOMEM; - igc_init_queue_configuration(adapter); /* This call may decrease the number of queues */ if (igc_init_interrupt_scheme(adapter, true)) { - dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + netdev_err(netdev, "Unable to allocate memory for queues\n"); return -ENOMEM; } @@ -3433,8 +3701,6 @@ void igc_update_stats(struct igc_adapter *adapter) adapter->stats.prc511 += rd32(IGC_PRC511); adapter->stats.prc1023 += rd32(IGC_PRC1023); adapter->stats.prc1522 += rd32(IGC_PRC1522); - adapter->stats.symerrs += rd32(IGC_SYMERRS); - adapter->stats.sec += rd32(IGC_SEC); mpc = rd32(IGC_MPC); adapter->stats.mpc += mpc; @@ -3473,6 +3739,7 @@ void igc_update_stats(struct igc_adapter *adapter) adapter->stats.tpt += rd32(IGC_TPT); adapter->stats.colc += rd32(IGC_COLC); + adapter->stats.colc += rd32(IGC_RERC); adapter->stats.algnerrc += rd32(IGC_ALGNERRC); @@ -3523,18 +3790,6 @@ void igc_update_stats(struct igc_adapter *adapter) adapter->stats.mgpdc += rd32(IGC_MGTPDC); } -static void igc_nfc_filter_exit(struct igc_adapter *adapter) -{ - struct igc_nfc_filter *rule; - - spin_lock(&adapter->nfc_lock); - - hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) - igc_erase_filter(adapter, rule); - - spin_unlock(&adapter->nfc_lock); -} - /** * igc_down - Close the interface * @adapter: board private structure @@ -3553,8 +3808,6 @@ void igc_down(struct igc_adapter *adapter) wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); /* flush and sleep below */ - igc_nfc_filter_exit(adapter); - /* set trans_start so we don't get spurious watchdogs during reset */ netif_trans_update(netdev); @@ -3648,8 +3901,7 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu) if (netif_running(netdev)) igc_down(adapter); - netdev_dbg(netdev, "changing MTU from %d to %d\n", - netdev->mtu, new_mtu); + netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (netif_running(netdev)) @@ -3704,20 +3956,8 @@ static int igc_set_features(struct net_device *netdev, if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) return 0; - if (!(features & NETIF_F_NTUPLE)) { - struct hlist_node *node2; - struct igc_nfc_filter *rule; - - spin_lock(&adapter->nfc_lock); - hlist_for_each_entry_safe(rule, node2, - &adapter->nfc_filter_list, nfc_node) { - igc_erase_filter(adapter, rule); - hlist_del(&rule->nfc_node); - kfree(rule); - } - spin_unlock(&adapter->nfc_lock); - adapter->nfc_filter_count = 0; - } + if (!(features & NETIF_F_NTUPLE)) + igc_flush_nfc_rules(adapter); netdev->features = features; @@ -4006,8 +4246,7 @@ static void igc_watchdog_task(struct work_struct *work) ctrl = rd32(IGC_CTRL); /* Link status message must follow this format */ netdev_info(netdev, - "igc: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", - netdev->name, + "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", adapter->link_speed, adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half", @@ -4045,10 +4284,10 @@ retry_read_status: retry_count--; goto retry_read_status; } else if (!retry_count) { - dev_err(&adapter->pdev->dev, "exceed max 2 second\n"); + netdev_err(netdev, "exceed max 2 second\n"); } } else { - dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n"); + netdev_err(netdev, "read 1000Base-T Status Reg\n"); } no_wait: netif_carrier_on(netdev); @@ -4064,8 +4303,7 @@ no_wait: adapter->link_duplex = 0; /* Links status message must follow this format */ - netdev_info(netdev, "igc: %s NIC Link is Down\n", - netdev->name); + netdev_info(netdev, "NIC Link is Down\n"); netif_carrier_off(netdev); /* link state has changed, schedule phy info update */ @@ -4283,8 +4521,7 @@ static int igc_request_irq(struct igc_adapter *adapter) netdev->name, adapter); if (err) - dev_err(&pdev->dev, "Error %d getting interrupt\n", - err); + netdev_err(netdev, "Error %d getting interrupt\n", err); request_done: return err; @@ -4386,7 +4623,7 @@ err_setup_tx: return err; } -static int igc_open(struct net_device *netdev) +int igc_open(struct net_device *netdev) { return __igc_open(netdev, false); } @@ -4428,7 +4665,7 @@ static int __igc_close(struct net_device *netdev, bool suspending) return 0; } -static int igc_close(struct net_device *netdev) +int igc_close(struct net_device *netdev) { if (netif_device_present(netdev) || netdev->dismantle) return __igc_close(netdev, false); @@ -4665,9 +4902,6 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg) u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); u32 value = 0; - if (IGC_REMOVED(hw_addr)) - return ~value; - value = readl(&hw_addr[reg]); /* reads should not return all F's */ @@ -4686,7 +4920,6 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg) int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx) { - struct pci_dev *pdev = adapter->pdev; struct igc_mac_info *mac = &adapter->hw.mac; mac->autoneg = 0; @@ -4731,7 +4964,7 @@ int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx) return 0; err_inval: - dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); + netdev_err(adapter->netdev, "Unsupported Speed/Duplex configuration\n"); return -EINVAL; } @@ -4812,7 +5045,7 @@ static int igc_probe(struct pci_dev *pdev, hw->hw_addr = adapter->io_addr; netdev->netdev_ops = &igc_netdev_ops; - igc_set_ethtool_ops(netdev); + igc_ethtool_set_ops(netdev); netdev->watchdog_timeo = 5 * HZ; netdev->mem_start = pci_resource_start(pdev, 0); @@ -4838,6 +5071,7 @@ static int igc_probe(struct pci_dev *pdev, netdev->features |= NETIF_F_SG; netdev->features |= NETIF_F_TSO; netdev->features |= NETIF_F_TSO6; + netdev->features |= NETIF_F_TSO_ECN; netdev->features |= NETIF_F_RXCSUM; netdev->features |= NETIF_F_HW_CSUM; netdev->features |= NETIF_F_SCTP_CRC; @@ -4876,8 +5110,7 @@ static int igc_probe(struct pci_dev *pdev, if (igc_get_flash_presence_i225(hw)) { if (hw->nvm.ops.validate(hw) < 0) { - dev_err(&pdev->dev, - "The NVM Checksum Is Not Valid\n"); + dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); err = -EIO; goto err_eeprom; } @@ -4991,6 +5224,8 @@ static void igc_remove(struct pci_dev *pdev) pm_runtime_get_noresume(&pdev->dev); + igc_flush_nfc_rules(adapter); + igc_ptp_stop(adapter); set_bit(__IGC_DOWN, &adapter->state); @@ -5011,7 +5246,6 @@ static void igc_remove(struct pci_dev *pdev) pci_iounmap(pdev, adapter->io_addr); pci_release_mem_regions(pdev); - kfree(adapter->mac_table); free_netdev(netdev); pci_disable_pcie_error_reporting(pdev); @@ -5140,8 +5374,7 @@ static int __maybe_unused igc_resume(struct device *dev) return -ENODEV; err = pci_enable_device_mem(pdev); if (err) { - dev_err(&pdev->dev, - "igc: Cannot enable PCI device from suspend\n"); + netdev_err(netdev, "Cannot enable PCI device from suspend\n"); return err; } pci_set_master(pdev); @@ -5150,7 +5383,7 @@ static int __maybe_unused igc_resume(struct device *dev) pci_enable_wake(pdev, PCI_D3cold, 0); if (igc_init_interrupt_scheme(adapter, true)) { - dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + netdev_err(netdev, "Unable to allocate memory for queues\n"); return -ENOMEM; } @@ -5254,8 +5487,7 @@ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) pci_ers_result_t result; if (pci_enable_device_mem(pdev)) { - dev_err(&pdev->dev, - "Could not re-enable PCI device after reset.\n"); + netdev_err(netdev, "Could not re-enable PCI device after reset\n"); result = PCI_ERS_RESULT_DISCONNECT; } else { pci_set_master(pdev); @@ -5294,7 +5526,7 @@ static void igc_io_resume(struct pci_dev *pdev) rtnl_lock(); if (netif_running(netdev)) { if (igc_open(netdev)) { - dev_err(&pdev->dev, "igc_open failed after reset\n"); + netdev_err(netdev, "igc_open failed after reset\n"); return; } } @@ -5341,7 +5573,6 @@ static struct pci_driver igc_driver = { int igc_reinit_queues(struct igc_adapter *adapter) { struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; int err = 0; if (netif_running(netdev)) @@ -5350,7 +5581,7 @@ int igc_reinit_queues(struct igc_adapter *adapter) igc_reset_interrupt_capability(adapter); if (igc_init_interrupt_scheme(adapter, true)) { - dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + netdev_err(netdev, "Unable to allocate memory for queues\n"); return -ENOMEM; } diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index f99c514ad0f4..0d746f8588c8 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -305,7 +305,6 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, struct igc_hw *hw = &adapter->hw; u32 tsync_rx_cfg = 0; bool is_l4 = false; - bool is_l2 = false; u32 regval; /* reserved for future extensions */ @@ -346,7 +345,6 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_EVENT_V2; config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; - is_l2 = true; is_l4 = true; break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: @@ -370,7 +368,6 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_ALL; tsync_rx_ctl |= IGC_TSYNCRXCTL_RXSYNSIG; config->rx_filter = HWTSTAMP_FILTER_ALL; - is_l2 = true; is_l4 = true; if (hw->mac.type == igc_i225) { @@ -405,15 +402,6 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, /* define which PTP packets are time stamped */ wr32(IGC_TSYNCRXCFG, tsync_rx_cfg); - /* define ethertype filter for timestamped packets */ - if (is_l2) - wr32(IGC_ETQF(3), - (IGC_ETQF_FILTER_ENABLE | /* enable filter */ - IGC_ETQF_1588 | /* enable timestamping */ - ETH_P_1588)); /* 1588 eth protocol type */ - else - wr32(IGC_ETQF(3), 0); - /* L4 Queue Filter[3]: filter by destination port and protocol */ if (is_l4) { u32 ftqf = (IPPROTO_UDP /* UDP */ @@ -466,7 +454,7 @@ void igc_ptp_tx_hang(struct igc_adapter *adapter) * interrupt */ rd32(IGC_TXSTMPH); - dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n"); + netdev_warn(adapter->netdev, "Clearing Tx timestamp hang\n"); } } @@ -529,7 +517,7 @@ static void igc_ptp_tx_work(struct work_struct *work) * interrupt */ rd32(IGC_TXSTMPH); - dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n"); + netdev_warn(adapter->netdev, "Clearing Tx timestamp hang\n"); return; } @@ -626,10 +614,9 @@ void igc_ptp_init(struct igc_adapter *adapter) &adapter->pdev->dev); if (IS_ERR(adapter->ptp_clock)) { adapter->ptp_clock = NULL; - dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n"); + netdev_err(netdev, "ptp_clock_register failed\n"); } else if (adapter->ptp_clock) { - dev_info(&adapter->pdev->dev, "added PHC on %s\n", - adapter->netdev->name); + netdev_info(netdev, "PHC added\n"); adapter->ptp_flags |= IGC_PTP_ENABLED; } } @@ -666,8 +653,7 @@ void igc_ptp_stop(struct igc_adapter *adapter) if (adapter->ptp_clock) { ptp_clock_unregister(adapter->ptp_clock); - dev_info(&adapter->pdev->dev, "removed PHC on %s\n", - adapter->netdev->name); + netdev_info(adapter->netdev, "PHC removed\n"); adapter->ptp_flags &= ~IGC_PTP_ENABLED; } } diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index 6093cde2351c..232e82dec62e 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -17,11 +17,6 @@ /* Internal Packet Buffer Size Registers */ #define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ #define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ -#define IGC_TDFH 0x03410 /* Tx Data FIFO Head - RW */ -#define IGC_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ -#define IGC_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ -#define IGC_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ -#define IGC_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ /* NVM Register Descriptions */ #define IGC_EERD 0x12014 /* EEprom mode read - RW */ @@ -35,10 +30,6 @@ #define IGC_FCRTL 0x02160 /* FC Receive Threshold Low - RW */ #define IGC_FCRTH 0x02168 /* FC Receive Threshold High - RW */ #define IGC_FCRTV 0x02460 /* FC Refresh Timer Value - RW */ -#define IGC_FCSTS 0x02464 /* FC Status - RO */ - -/* PCIe Register Description */ -#define IGC_GCR 0x05B00 /* PCIe control- RW */ /* Semaphore registers */ #define IGC_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ @@ -49,6 +40,7 @@ #define IGC_FACTPS 0x05B30 /* Interrupt Register Description */ +#define IGC_EICR 0x01580 /* Ext. Interrupt Cause read - W0 */ #define IGC_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ #define IGC_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ #define IGC_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ @@ -76,13 +68,6 @@ #define IGC_ICRXDMTC 0x04120 /* Rx Descriptor Min Threshold Count */ #define IGC_ICRXOC 0x04124 /* Receiver Overrun Count */ -#define IGC_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */ -#define IGC_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ -#define IGC_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */ -#define IGC_RPTHC 0x04104 /* Rx Packets To Host */ -#define IGC_HGPTC 0x04118 /* Host Good Packets TX Count */ -#define IGC_HTCBDPC 0x04124 /* Host TX Circ.Breaker Drop Count */ - /* MSI-X Table Register Descriptions */ #define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */ @@ -119,10 +104,11 @@ #define IGC_RLPML 0x05004 /* Rx Long Packet Max Length */ #define IGC_RFCTL 0x05008 /* Receive Filter Control*/ #define IGC_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define IGC_RA 0x05400 /* Receive Address - RW Array */ #define IGC_UTA 0x0A000 /* Unicast Table Array - RW */ #define IGC_RAL(_n) (0x05400 + ((_n) * 0x08)) #define IGC_RAH(_n) (0x05404 + ((_n) * 0x08)) -#define IGC_VLAPQF 0x055B0 /* VLAN Priority Queue Filter VLAPQF */ +#define IGC_VLANPQF 0x055B0 /* VLAN Priority Queue Filter - RW */ /* Transmit Register Descriptions */ #define IGC_TCTL 0x00400 /* Tx Control - RW */ @@ -138,13 +124,9 @@ #define IGC_MMDAC 13 /* MMD Access Control */ #define IGC_MMDAAD 14 /* MMD Access Address/Data */ -/* Good transmitted packets counter registers */ -#define IGC_PQGPTC(_n) (0x010014 + (0x100 * (_n))) - /* Statistics Register Descriptions */ #define IGC_CRCERRS 0x04000 /* CRC Error Count - R/clr */ #define IGC_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ -#define IGC_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ #define IGC_RXERRC 0x0400C /* Receive Error Count - R/clr */ #define IGC_MPC 0x04010 /* Missed Packet Count - R/clr */ #define IGC_SCC 0x04014 /* Single Collision Count - R/clr */ @@ -152,10 +134,10 @@ #define IGC_MCC 0x0401C /* Multiple Collision Count - R/clr */ #define IGC_LATECOL 0x04020 /* Late Collision Count - R/clr */ #define IGC_COLC 0x04028 /* Collision Count - R/clr */ +#define IGC_RERC 0x0402C /* Receive Error Count - R/clr */ #define IGC_DC 0x04030 /* Defer Count - R/clr */ #define IGC_TNCRS 0x04034 /* Tx-No CRS - R/clr */ -#define IGC_SEC 0x04038 /* Sequence Error Count - R/clr */ -#define IGC_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define IGC_HTDPMC 0x0403C /* Host Transmit Discarded by MAC - R/clr */ #define IGC_RLEC 0x04040 /* Receive Length Error Count - R/clr */ #define IGC_XONRXC 0x04048 /* XON Rx Count - R/clr */ #define IGC_XONTXC 0x0404C /* XON Tx Count - R/clr */ @@ -213,7 +195,6 @@ #define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ #define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ #define IGC_LENERRS 0x04138 /* Length Errors Count */ -#define IGC_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */ /* Time sync registers */ #define IGC_TSICR 0x0B66C /* Time Sync Interrupt Cause */ @@ -229,8 +210,6 @@ #define IGC_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */ -#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ - /* Transmit Scheduling Registers */ #define IGC_TQAVCTRL 0x3570 #define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n)) @@ -277,8 +256,7 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg); #define wr32(reg, val) \ do { \ u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ - if (!IGC_REMOVED(hw_addr)) \ - writel((val), &hw_addr[(reg)]); \ + writel((val), &hw_addr[(reg)]); \ } while (0) #define rd32(reg) (igc_rd32(hw, reg)) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 2833e4f041ce..5ddfc83a1e46 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -224,17 +224,17 @@ struct ixgbe_tx_buffer { }; struct ixgbe_rx_buffer { - struct sk_buff *skb; - dma_addr_t dma; union { struct { + struct sk_buff *skb; + dma_addr_t dma; struct page *page; __u32 page_offset; __u16 pagecnt_bias; }; struct { - void *addr; - u64 handle; + bool discard; + struct xdp_buff *xdp; }; }; }; @@ -351,7 +351,6 @@ struct ixgbe_ring { }; struct xdp_rxq_info xdp_rxq; struct xdp_umem *xsk_umem; - struct zero_copy_allocator zca; /* ZC allocator anchor */ u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */ u16 rx_buf_len; } ____cacheline_internodealigned_in_smp; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 0bd1294ba517..17357a12cbdc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -64,8 +64,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) hw->mac.ops.check_link(hw, &speed, &link_up, false); /* if link is down, assume supported */ if (link_up) - supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? - true : false; + supported = speed == IXGBE_LINK_SPEED_1GB_FULL; else supported = true; } @@ -2243,7 +2242,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) } /* Configure pause time (2 TCs per register) */ - reg = hw->fc.pause_time * 0x00010001; + reg = hw->fc.pause_time * 0x00010001U; for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index eab5934b04f5..a59c166f794f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -35,7 +35,7 @@ #include <net/tc_act/tc_mirred.h> #include <net/vxlan.h> #include <net/mpls.h> -#include <net/xdp_sock.h> +#include <net/xdp_sock_drv.h> #include <net/xfrm.h> #include "ixgbe.h" @@ -2973,35 +2973,6 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, /* skip the flush */ } -static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, - u64 qmask) -{ - u32 mask; - struct ixgbe_hw *hw = &adapter->hw; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - mask = (IXGBE_EIMS_RTX_QUEUE & qmask); - IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - mask = (qmask & 0xFFFFFFFF); - if (mask) - IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); - mask = (qmask >> 32); - if (mask) - IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); - break; - default: - break; - } - /* skip the flush */ -} - /** * ixgbe_irq_enable - Enable default interrupt generation settings * @adapter: board private structure @@ -3745,8 +3716,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, /* configure the packet buffer length */ if (rx_ring->xsk_umem) { - u32 xsk_buf_len = rx_ring->xsk_umem->chunk_size_nohr - - XDP_PACKET_HEADROOM; + u32 xsk_buf_len = xsk_umem_get_rx_frame_size(rx_ring->xsk_umem); /* If the MAC support setting RXDCTL.RLPML, the * SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and @@ -4093,11 +4063,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); ring->xsk_umem = ixgbe_xsk_umem(adapter, ring); if (ring->xsk_umem) { - ring->zca.free = ixgbe_zca_free; WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, - MEM_TYPE_ZERO_COPY, - &ring->zca)); - + MEM_TYPE_XSK_BUFF_POOL, + NULL)); + xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq); } else { WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL)); @@ -4153,8 +4122,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, } if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) { - u32 xsk_buf_len = ring->xsk_umem->chunk_size_nohr - - XDP_PACKET_HEADROOM; + u32 xsk_buf_len = xsk_umem_get_rx_frame_size(ring->xsk_umem); rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | IXGBE_RXDCTL_RLPML_EN); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 537dfff585e0..d05a5690e66b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -102,7 +102,7 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, * indirection table and RSS hash key with PF therefore * we want to disable the querying by default. */ - adapter->vfinfo[i].rss_query_enabled = 0; + adapter->vfinfo[i].rss_query_enabled = false; /* Untrust all VFs */ adapter->vfinfo[i].trusted = false; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h index 6d01700b46bc..7887ae4aaf4f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h @@ -35,7 +35,7 @@ int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem, void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle); -void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count); +bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count); int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *rx_ring, const int budget); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index a656ee9a1fae..86add9fbd36c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -2,7 +2,7 @@ /* Copyright(c) 2018 Intel Corporation. */ #include <linux/bpf_trace.h> -#include <net/xdp_sock.h> +#include <net/xdp_sock_drv.h> #include <net/xdp.h> #include "ixgbe.h" @@ -20,54 +20,11 @@ struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter, return xdp_get_umem_from_qid(adapter->netdev, qid); } -static int ixgbe_xsk_umem_dma_map(struct ixgbe_adapter *adapter, - struct xdp_umem *umem) -{ - struct device *dev = &adapter->pdev->dev; - unsigned int i, j; - dma_addr_t dma; - - for (i = 0; i < umem->npgs; i++) { - dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE, - DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR); - if (dma_mapping_error(dev, dma)) - goto out_unmap; - - umem->pages[i].dma = dma; - } - - return 0; - -out_unmap: - for (j = 0; j < i; j++) { - dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, - DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR); - umem->pages[i].dma = 0; - } - - return -1; -} - -static void ixgbe_xsk_umem_dma_unmap(struct ixgbe_adapter *adapter, - struct xdp_umem *umem) -{ - struct device *dev = &adapter->pdev->dev; - unsigned int i; - - for (i = 0; i < umem->npgs; i++) { - dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, - DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR); - - umem->pages[i].dma = 0; - } -} - static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter, struct xdp_umem *umem, u16 qid) { struct net_device *netdev = adapter->netdev; - struct xdp_umem_fq_reuse *reuseq; bool if_running; int err; @@ -78,13 +35,7 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter, qid >= netdev->real_num_tx_queues) return -EINVAL; - reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count); - if (!reuseq) - return -ENOMEM; - - xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq)); - - err = ixgbe_xsk_umem_dma_map(adapter, umem); + err = xsk_buff_dma_map(umem, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR); if (err) return err; @@ -124,7 +75,7 @@ static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid) ixgbe_txrx_ring_disable(adapter, qid); clear_bit(qid, adapter->af_xdp_zc_qps); - ixgbe_xsk_umem_dma_unmap(adapter, umem); + xsk_buff_dma_unmap(umem, IXGBE_RX_DMA_ATTR); if (if_running) ixgbe_txrx_ring_enable(adapter, qid); @@ -143,19 +94,14 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, struct xdp_buff *xdp) { - struct xdp_umem *umem = rx_ring->xsk_umem; int err, result = IXGBE_XDP_PASS; struct bpf_prog *xdp_prog; struct xdp_frame *xdpf; - u64 offset; u32 act; rcu_read_lock(); xdp_prog = READ_ONCE(rx_ring->xdp_prog); act = bpf_prog_run_xdp(xdp_prog, xdp); - offset = xdp->data - xdp->data_hard_start; - - xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset); switch (act) { case XDP_PASS: @@ -186,140 +132,16 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, return result; } -static struct -ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring, - unsigned int size) -{ - struct ixgbe_rx_buffer *bi; - - bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; - - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - bi->dma, 0, - size, - DMA_BIDIRECTIONAL); - - return bi; -} - -static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring, - struct ixgbe_rx_buffer *obi) -{ - u16 nta = rx_ring->next_to_alloc; - struct ixgbe_rx_buffer *nbi; - - nbi = &rx_ring->rx_buffer_info[rx_ring->next_to_alloc]; - /* update, and store next to alloc */ - nta++; - rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - - /* transfer page from old buffer to new buffer */ - nbi->dma = obi->dma; - nbi->addr = obi->addr; - nbi->handle = obi->handle; - - obi->addr = NULL; - obi->skb = NULL; -} - -void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle) -{ - struct ixgbe_rx_buffer *bi; - struct ixgbe_ring *rx_ring; - u64 hr, mask; - u16 nta; - - rx_ring = container_of(alloc, struct ixgbe_ring, zca); - hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; - mask = rx_ring->xsk_umem->chunk_mask; - - nta = rx_ring->next_to_alloc; - bi = rx_ring->rx_buffer_info; - - nta++; - rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - - handle &= mask; - - bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle); - bi->dma += hr; - - bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle); - bi->addr += hr; - - bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle, - rx_ring->xsk_umem->headroom); -} - -static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring, - struct ixgbe_rx_buffer *bi) -{ - struct xdp_umem *umem = rx_ring->xsk_umem; - void *addr = bi->addr; - u64 handle, hr; - - if (addr) - return true; - - if (!xsk_umem_peek_addr(umem, &handle)) { - rx_ring->rx_stats.alloc_rx_page_failed++; - return false; - } - - hr = umem->headroom + XDP_PACKET_HEADROOM; - - bi->dma = xdp_umem_get_dma(umem, handle); - bi->dma += hr; - - bi->addr = xdp_umem_get_data(umem, handle); - bi->addr += hr; - - bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); - - xsk_umem_release_addr(umem); - return true; -} - -static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring, - struct ixgbe_rx_buffer *bi) -{ - struct xdp_umem *umem = rx_ring->xsk_umem; - u64 handle, hr; - - if (!xsk_umem_peek_addr_rq(umem, &handle)) { - rx_ring->rx_stats.alloc_rx_page_failed++; - return false; - } - - handle &= rx_ring->xsk_umem->chunk_mask; - - hr = umem->headroom + XDP_PACKET_HEADROOM; - - bi->dma = xdp_umem_get_dma(umem, handle); - bi->dma += hr; - - bi->addr = xdp_umem_get_data(umem, handle); - bi->addr += hr; - - bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); - - xsk_umem_release_addr_rq(umem); - return true; -} - -static __always_inline bool -__ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count, - bool alloc(struct ixgbe_ring *rx_ring, - struct ixgbe_rx_buffer *bi)) +bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count) { union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *bi; u16 i = rx_ring->next_to_use; + dma_addr_t dma; bool ok = true; /* nothing to do */ - if (!cleaned_count) + if (!count) return true; rx_desc = IXGBE_RX_DESC(rx_ring, i); @@ -327,21 +149,18 @@ __ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count, i -= rx_ring->count; do { - if (!alloc(rx_ring, bi)) { + bi->xdp = xsk_buff_alloc(rx_ring->xsk_umem); + if (!bi->xdp) { ok = false; break; } - /* sync the buffer for use by the device */ - dma_sync_single_range_for_device(rx_ring->dev, bi->dma, - bi->page_offset, - rx_ring->rx_buf_len, - DMA_BIDIRECTIONAL); + dma = xsk_buff_xdp_get_dma(bi->xdp); /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + rx_desc->read.pkt_addr = cpu_to_le64(dma); rx_desc++; bi++; @@ -355,17 +174,14 @@ __ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count, /* clear the length for the next_to_use descriptor */ rx_desc->wb.upper.length = 0; - cleaned_count--; - } while (cleaned_count); + count--; + } while (count); i += rx_ring->count; if (rx_ring->next_to_use != i) { rx_ring->next_to_use = i; - /* update next to alloc since we have filled the ring */ - rx_ring->next_to_alloc = i; - /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, @@ -378,40 +194,27 @@ __ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count, return ok; } -void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count) -{ - __ixgbe_alloc_rx_buffers_zc(rx_ring, count, - ixgbe_alloc_buffer_slow_zc); -} - -static bool ixgbe_alloc_rx_buffers_fast_zc(struct ixgbe_ring *rx_ring, - u16 count) -{ - return __ixgbe_alloc_rx_buffers_zc(rx_ring, count, - ixgbe_alloc_buffer_zc); -} - static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring, - struct ixgbe_rx_buffer *bi, - struct xdp_buff *xdp) + struct ixgbe_rx_buffer *bi) { - unsigned int metasize = xdp->data - xdp->data_meta; - unsigned int datasize = xdp->data_end - xdp->data; + unsigned int metasize = bi->xdp->data - bi->xdp->data_meta; + unsigned int datasize = bi->xdp->data_end - bi->xdp->data; struct sk_buff *skb; /* allocate a skb to store the frags */ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, - xdp->data_end - xdp->data_hard_start, + bi->xdp->data_end - bi->xdp->data_hard_start, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) return NULL; - skb_reserve(skb, xdp->data - xdp->data_hard_start); - memcpy(__skb_put(skb, datasize), xdp->data, datasize); + skb_reserve(skb, bi->xdp->data - bi->xdp->data_hard_start); + memcpy(__skb_put(skb, datasize), bi->xdp->data, datasize); if (metasize) skb_metadata_set(skb, metasize); - ixgbe_reuse_rx_buffer_zc(rx_ring, bi); + xsk_buff_free(bi->xdp); + bi->xdp = NULL; return skb; } @@ -431,14 +234,9 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, unsigned int total_rx_bytes = 0, total_rx_packets = 0; struct ixgbe_adapter *adapter = q_vector->adapter; u16 cleaned_count = ixgbe_desc_unused(rx_ring); - struct xdp_umem *umem = rx_ring->xsk_umem; unsigned int xdp_res, xdp_xmit = 0; bool failure = false; struct sk_buff *skb; - struct xdp_buff xdp; - - xdp.rxq = &rx_ring->xdp_rxq; - xdp.frame_sz = xsk_umem_xdp_frame_sz(umem); while (likely(total_rx_packets < budget)) { union ixgbe_adv_rx_desc *rx_desc; @@ -448,8 +246,8 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { failure = failure || - !ixgbe_alloc_rx_buffers_fast_zc(rx_ring, - cleaned_count); + !ixgbe_alloc_rx_buffers_zc(rx_ring, + cleaned_count); cleaned_count = 0; } @@ -464,42 +262,40 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, */ dma_rmb(); - bi = ixgbe_get_rx_buffer_zc(rx_ring, size); + bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) { struct ixgbe_rx_buffer *next_bi; - ixgbe_reuse_rx_buffer_zc(rx_ring, bi); + xsk_buff_free(bi->xdp); + bi->xdp = NULL; ixgbe_inc_ntc(rx_ring); next_bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; - next_bi->skb = ERR_PTR(-EINVAL); + next_bi->discard = true; continue; } - if (unlikely(bi->skb)) { - ixgbe_reuse_rx_buffer_zc(rx_ring, bi); + if (unlikely(bi->discard)) { + xsk_buff_free(bi->xdp); + bi->xdp = NULL; + bi->discard = false; ixgbe_inc_ntc(rx_ring); continue; } - xdp.data = bi->addr; - xdp.data_meta = xdp.data; - xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; - xdp.data_end = xdp.data + size; - xdp.handle = bi->handle; - - xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp); + bi->xdp->data_end = bi->xdp->data + size; + xsk_buff_dma_sync_for_cpu(bi->xdp); + xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp); if (xdp_res) { - if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) { + if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) xdp_xmit |= xdp_res; - bi->addr = NULL; - bi->skb = NULL; - } else { - ixgbe_reuse_rx_buffer_zc(rx_ring, bi); - } + else + xsk_buff_free(bi->xdp); + + bi->xdp = NULL; total_rx_packets++; total_rx_bytes += size; @@ -509,7 +305,7 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, } /* XDP_PASS path */ - skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp); + skb = ixgbe_construct_skb_zc(rx_ring, bi); if (!skb) { rx_ring->rx_stats.alloc_rx_buff_failed++; break; @@ -561,17 +357,17 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring) { - u16 i = rx_ring->next_to_clean; - struct ixgbe_rx_buffer *bi = &rx_ring->rx_buffer_info[i]; + struct ixgbe_rx_buffer *bi; + u16 i; - while (i != rx_ring->next_to_alloc) { - xsk_umem_fq_reuse(rx_ring->xsk_umem, bi->handle); - i++; - bi++; - if (i == rx_ring->count) { - i = 0; - bi = rx_ring->rx_buffer_info; - } + for (i = 0; i < rx_ring->count; i++) { + bi = &rx_ring->rx_buffer_info[i]; + + if (!bi->xdp) + continue; + + xsk_buff_free(bi->xdp); + bi->xdp = NULL; } } @@ -594,10 +390,9 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc)) break; - dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr); - - dma_sync_single_for_device(xdp_ring->dev, dma, desc.len, - DMA_BIDIRECTIONAL); + dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr); + xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma, + desc.len); tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use]; tx_bi->bytecount = desc.len; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 41d2a0eac5fa..15e42a7f8a86 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -324,7 +324,8 @@ ETH_HLEN + ETH_FCS_LEN, \ cache_line_size()) -#define MVNETA_SKB_HEADROOM max(XDP_PACKET_HEADROOM, NET_SKB_PAD) +/* Driver assumes that the last 3 bits are 0 */ +#define MVNETA_SKB_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) & ~0x7) #define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \ MVNETA_SKB_HEADROOM)) #define MVNETA_SKB_SIZE(len) (SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD) @@ -3566,10 +3567,6 @@ static void mvneta_start_dev(struct mvneta_port *pp) MVNETA_CAUSE_LINK_CHANGE); phylink_start(pp->phylink); - - /* We may have called phy_speed_down before */ - phy_speed_up(pp->dev->phydev); - netif_tx_start_all_queues(pp->dev); } @@ -3577,9 +3574,6 @@ static void mvneta_stop_dev(struct mvneta_port *pp) { unsigned int cpu; - if (device_may_wakeup(&pp->dev->dev)) - phy_speed_down(pp->dev->phydev, false); - phylink_stop(pp->phylink); if (!pp->neta_armada3700) { @@ -4052,10 +4046,6 @@ static int mvneta_mdio_probe(struct mvneta_port *pp) phylink_ethtool_get_wol(pp->phylink, &wol); device_set_wakeup_capable(&pp->dev->dev, !!wol.supported); - /* PHY WoL may be enabled but device wakeup disabled */ - if (wol.supported) - device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts); - return err; } diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index 7352244c5e68..d4a4e241333d 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -1070,7 +1070,7 @@ void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); - val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); + val &= ~MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); } diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 7a0d785b826c..17243bb5ba91 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1418,7 +1418,7 @@ static int pxa168_eth_probe(struct platform_device *pdev) pep->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pep->base)) { - err = -ENOMEM; + err = PTR_ERR(pep->base); goto err_netdev; } diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig index 4968352ba188..500c15e7ea4a 100644 --- a/drivers/net/ethernet/mediatek/Kconfig +++ b/drivers/net/ethernet/mediatek/Kconfig @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only config NET_VENDOR_MEDIATEK - bool "MediaTek ethernet driver" + bool "MediaTek devices" depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620 ---help--- If you have a Mediatek SoC with ethernet, say Y. @@ -14,4 +14,11 @@ config NET_MEDIATEK_SOC This driver supports the gigabit ethernet MACs in the MediaTek SoC family. +config NET_MEDIATEK_STAR_EMAC + tristate "MediaTek STAR Ethernet MAC support" + select PHYLIB + help + This driver supports the ethernet MAC IP first used on + MediaTek MT85** SoCs. + endif #NET_VENDOR_MEDIATEK diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile index 2d8362f9341b..3a777b4a6cd3 100644 --- a/drivers/net/ethernet/mediatek/Makefile +++ b/drivers/net/ethernet/mediatek/Makefile @@ -3,5 +3,6 @@ # Makefile for the Mediatek SoCs built-in ethernet macs # -obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o +obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o +obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c new file mode 100644 index 000000000000..7df35872c107 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c @@ -0,0 +1,1661 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020 MediaTek Corporation + * Copyright (c) 2020 BayLibre SAS + * + * Author: Bartosz Golaszewski <bgolaszewski@baylibre.com> + */ + +#include <linux/bits.h> +#include <linux/clk.h> +#include <linux/compiler.h> +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/mii.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/of.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/regmap.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> + +#define MTK_STAR_DRVNAME "mtk_star_emac" + +#define MTK_STAR_WAIT_TIMEOUT 300 +#define MTK_STAR_MAX_FRAME_SIZE 1514 +#define MTK_STAR_SKB_ALIGNMENT 16 +#define MTK_STAR_NAPI_WEIGHT 64 +#define MTK_STAR_HASHTABLE_MC_LIMIT 256 +#define MTK_STAR_HASHTABLE_SIZE_MAX 512 + +/* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't + * work for this controller. + */ +#define MTK_STAR_IP_ALIGN 2 + +static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" }; +#define MTK_STAR_NCLKS ARRAY_SIZE(mtk_star_clk_names) + +/* PHY Control Register 0 */ +#define MTK_STAR_REG_PHY_CTRL0 0x0000 +#define MTK_STAR_BIT_PHY_CTRL0_WTCMD BIT(13) +#define MTK_STAR_BIT_PHY_CTRL0_RDCMD BIT(14) +#define MTK_STAR_BIT_PHY_CTRL0_RWOK BIT(15) +#define MTK_STAR_MSK_PHY_CTRL0_PREG GENMASK(12, 8) +#define MTK_STAR_OFF_PHY_CTRL0_PREG 8 +#define MTK_STAR_MSK_PHY_CTRL0_RWDATA GENMASK(31, 16) +#define MTK_STAR_OFF_PHY_CTRL0_RWDATA 16 + +/* PHY Control Register 1 */ +#define MTK_STAR_REG_PHY_CTRL1 0x0004 +#define MTK_STAR_BIT_PHY_CTRL1_LINK_ST BIT(0) +#define MTK_STAR_BIT_PHY_CTRL1_AN_EN BIT(8) +#define MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD 9 +#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M 0x00 +#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M 0x01 +#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M 0x02 +#define MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX BIT(11) +#define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX BIT(12) +#define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX BIT(13) + +/* MAC Configuration Register */ +#define MTK_STAR_REG_MAC_CFG 0x0008 +#define MTK_STAR_OFF_MAC_CFG_IPG 10 +#define MTK_STAR_VAL_MAC_CFG_IPG_96BIT GENMASK(4, 0) +#define MTK_STAR_BIT_MAC_CFG_MAXLEN_1522 BIT(16) +#define MTK_STAR_BIT_MAC_CFG_AUTO_PAD BIT(19) +#define MTK_STAR_BIT_MAC_CFG_CRC_STRIP BIT(20) +#define MTK_STAR_BIT_MAC_CFG_VLAN_STRIP BIT(22) +#define MTK_STAR_BIT_MAC_CFG_NIC_PD BIT(31) + +/* Flow-Control Configuration Register */ +#define MTK_STAR_REG_FC_CFG 0x000c +#define MTK_STAR_BIT_FC_CFG_BP_EN BIT(7) +#define MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR BIT(8) +#define MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH 16 +#define MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH GENMASK(27, 16) +#define MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K 0x800 + +/* ARL Configuration Register */ +#define MTK_STAR_REG_ARL_CFG 0x0010 +#define MTK_STAR_BIT_ARL_CFG_HASH_ALG BIT(0) +#define MTK_STAR_BIT_ARL_CFG_MISC_MODE BIT(4) + +/* MAC High and Low Bytes Registers */ +#define MTK_STAR_REG_MY_MAC_H 0x0014 +#define MTK_STAR_REG_MY_MAC_L 0x0018 + +/* Hash Table Control Register */ +#define MTK_STAR_REG_HASH_CTRL 0x001c +#define MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR GENMASK(8, 0) +#define MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA BIT(12) +#define MTK_STAR_BIT_HASH_CTRL_ACC_CMD BIT(13) +#define MTK_STAR_BIT_HASH_CTRL_CMD_START BIT(14) +#define MTK_STAR_BIT_HASH_CTRL_BIST_OK BIT(16) +#define MTK_STAR_BIT_HASH_CTRL_BIST_DONE BIT(17) +#define MTK_STAR_BIT_HASH_CTRL_BIST_EN BIT(31) + +/* TX DMA Control Register */ +#define MTK_STAR_REG_TX_DMA_CTRL 0x0034 +#define MTK_STAR_BIT_TX_DMA_CTRL_START BIT(0) +#define MTK_STAR_BIT_TX_DMA_CTRL_STOP BIT(1) +#define MTK_STAR_BIT_TX_DMA_CTRL_RESUME BIT(2) + +/* RX DMA Control Register */ +#define MTK_STAR_REG_RX_DMA_CTRL 0x0038 +#define MTK_STAR_BIT_RX_DMA_CTRL_START BIT(0) +#define MTK_STAR_BIT_RX_DMA_CTRL_STOP BIT(1) +#define MTK_STAR_BIT_RX_DMA_CTRL_RESUME BIT(2) + +/* DMA Address Registers */ +#define MTK_STAR_REG_TX_DPTR 0x003c +#define MTK_STAR_REG_RX_DPTR 0x0040 +#define MTK_STAR_REG_TX_BASE_ADDR 0x0044 +#define MTK_STAR_REG_RX_BASE_ADDR 0x0048 + +/* Interrupt Status Register */ +#define MTK_STAR_REG_INT_STS 0x0050 +#define MTK_STAR_REG_INT_STS_PORT_STS_CHG BIT(2) +#define MTK_STAR_REG_INT_STS_MIB_CNT_TH BIT(3) +#define MTK_STAR_BIT_INT_STS_FNRC BIT(6) +#define MTK_STAR_BIT_INT_STS_TNTC BIT(8) + +/* Interrupt Mask Register */ +#define MTK_STAR_REG_INT_MASK 0x0054 +#define MTK_STAR_BIT_INT_MASK_FNRC BIT(6) + +/* Misc. Config Register */ +#define MTK_STAR_REG_TEST1 0x005c +#define MTK_STAR_BIT_TEST1_RST_HASH_MBIST BIT(31) + +/* Extended Configuration Register */ +#define MTK_STAR_REG_EXT_CFG 0x0060 +#define MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS 16 +#define MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS GENMASK(26, 16) +#define MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K 0x400 + +/* EthSys Configuration Register */ +#define MTK_STAR_REG_SYS_CONF 0x0094 +#define MTK_STAR_BIT_MII_PAD_OUT_ENABLE BIT(0) +#define MTK_STAR_BIT_EXT_MDC_MODE BIT(1) +#define MTK_STAR_BIT_SWC_MII_MODE BIT(2) + +/* MAC Clock Configuration Register */ +#define MTK_STAR_REG_MAC_CLK_CONF 0x00ac +#define MTK_STAR_MSK_MAC_CLK_CONF GENMASK(7, 0) +#define MTK_STAR_BIT_CLK_DIV_10 0x0a + +/* Counter registers. */ +#define MTK_STAR_REG_C_RXOKPKT 0x0100 +#define MTK_STAR_REG_C_RXOKBYTE 0x0104 +#define MTK_STAR_REG_C_RXRUNT 0x0108 +#define MTK_STAR_REG_C_RXLONG 0x010c +#define MTK_STAR_REG_C_RXDROP 0x0110 +#define MTK_STAR_REG_C_RXCRC 0x0114 +#define MTK_STAR_REG_C_RXARLDROP 0x0118 +#define MTK_STAR_REG_C_RXVLANDROP 0x011c +#define MTK_STAR_REG_C_RXCSERR 0x0120 +#define MTK_STAR_REG_C_RXPAUSE 0x0124 +#define MTK_STAR_REG_C_TXOKPKT 0x0128 +#define MTK_STAR_REG_C_TXOKBYTE 0x012c +#define MTK_STAR_REG_C_TXPAUSECOL 0x0130 +#define MTK_STAR_REG_C_TXRTY 0x0134 +#define MTK_STAR_REG_C_TXSKIP 0x0138 +#define MTK_STAR_REG_C_TX_ARP 0x013c +#define MTK_STAR_REG_C_RX_RERR 0x01d8 +#define MTK_STAR_REG_C_RX_UNI 0x01dc +#define MTK_STAR_REG_C_RX_MULTI 0x01e0 +#define MTK_STAR_REG_C_RX_BROAD 0x01e4 +#define MTK_STAR_REG_C_RX_ALIGNERR 0x01e8 +#define MTK_STAR_REG_C_TX_UNI 0x01ec +#define MTK_STAR_REG_C_TX_MULTI 0x01f0 +#define MTK_STAR_REG_C_TX_BROAD 0x01f4 +#define MTK_STAR_REG_C_TX_TIMEOUT 0x01f8 +#define MTK_STAR_REG_C_TX_LATECOL 0x01fc +#define MTK_STAR_REG_C_RX_LENGTHERR 0x0214 +#define MTK_STAR_REG_C_RX_TWIST 0x0218 + +/* Ethernet CFG Control */ +#define MTK_PERICFG_REG_NIC_CFG_CON 0x03c4 +#define MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII GENMASK(3, 0) +#define MTK_PERICFG_BIT_NIC_CFG_CON_RMII BIT(0) + +/* Represents the actual structure of descriptors used by the MAC. We can + * reuse the same structure for both TX and RX - the layout is the same, only + * the flags differ slightly. + */ +struct mtk_star_ring_desc { + /* Contains both the status flags as well as packet length. */ + u32 status; + u32 data_ptr; + u32 vtag; + u32 reserved; +}; + +#define MTK_STAR_DESC_MSK_LEN GENMASK(15, 0) +#define MTK_STAR_DESC_BIT_RX_CRCE BIT(24) +#define MTK_STAR_DESC_BIT_RX_OSIZE BIT(25) +#define MTK_STAR_DESC_BIT_INT BIT(27) +#define MTK_STAR_DESC_BIT_LS BIT(28) +#define MTK_STAR_DESC_BIT_FS BIT(29) +#define MTK_STAR_DESC_BIT_EOR BIT(30) +#define MTK_STAR_DESC_BIT_COWN BIT(31) + +/* Helper structure for storing data read from/written to descriptors in order + * to limit reads from/writes to DMA memory. + */ +struct mtk_star_ring_desc_data { + unsigned int len; + unsigned int flags; + dma_addr_t dma_addr; + struct sk_buff *skb; +}; + +#define MTK_STAR_RING_NUM_DESCS 128 +#define MTK_STAR_NUM_TX_DESCS MTK_STAR_RING_NUM_DESCS +#define MTK_STAR_NUM_RX_DESCS MTK_STAR_RING_NUM_DESCS +#define MTK_STAR_NUM_DESCS_TOTAL (MTK_STAR_RING_NUM_DESCS * 2) +#define MTK_STAR_DMA_SIZE \ + (MTK_STAR_NUM_DESCS_TOTAL * sizeof(struct mtk_star_ring_desc)) + +struct mtk_star_ring { + struct mtk_star_ring_desc *descs; + struct sk_buff *skbs[MTK_STAR_RING_NUM_DESCS]; + dma_addr_t dma_addrs[MTK_STAR_RING_NUM_DESCS]; + unsigned int head; + unsigned int tail; +}; + +struct mtk_star_priv { + struct net_device *ndev; + + struct regmap *regs; + struct regmap *pericfg; + + struct clk_bulk_data clks[MTK_STAR_NCLKS]; + + void *ring_base; + struct mtk_star_ring_desc *descs_base; + dma_addr_t dma_addr; + struct mtk_star_ring tx_ring; + struct mtk_star_ring rx_ring; + + struct mii_bus *mii; + struct napi_struct napi; + + struct device_node *phy_node; + phy_interface_t phy_intf; + struct phy_device *phydev; + unsigned int link; + int speed; + int duplex; + int pause; + + /* Protects against concurrent descriptor access. */ + spinlock_t lock; + + struct rtnl_link_stats64 stats; + struct work_struct stats_work; +}; + +static struct device *mtk_star_get_dev(struct mtk_star_priv *priv) +{ + return priv->ndev->dev.parent; +} + +static const struct regmap_config mtk_star_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .disable_locking = true, +}; + +static void mtk_star_ring_init(struct mtk_star_ring *ring, + struct mtk_star_ring_desc *descs) +{ + memset(ring, 0, sizeof(*ring)); + ring->descs = descs; + ring->head = 0; + ring->tail = 0; +} + +static int mtk_star_ring_pop_tail(struct mtk_star_ring *ring, + struct mtk_star_ring_desc_data *desc_data) +{ + struct mtk_star_ring_desc *desc = &ring->descs[ring->tail]; + unsigned int status; + + status = READ_ONCE(desc->status); + dma_rmb(); /* Make sure we read the status bits before checking it. */ + + if (!(status & MTK_STAR_DESC_BIT_COWN)) + return -1; + + desc_data->len = status & MTK_STAR_DESC_MSK_LEN; + desc_data->flags = status & ~MTK_STAR_DESC_MSK_LEN; + desc_data->dma_addr = ring->dma_addrs[ring->tail]; + desc_data->skb = ring->skbs[ring->tail]; + + ring->dma_addrs[ring->tail] = 0; + ring->skbs[ring->tail] = NULL; + + status &= MTK_STAR_DESC_BIT_COWN | MTK_STAR_DESC_BIT_EOR; + + WRITE_ONCE(desc->data_ptr, 0); + WRITE_ONCE(desc->status, status); + + ring->tail = (ring->tail + 1) % MTK_STAR_RING_NUM_DESCS; + + return 0; +} + +static void mtk_star_ring_push_head(struct mtk_star_ring *ring, + struct mtk_star_ring_desc_data *desc_data, + unsigned int flags) +{ + struct mtk_star_ring_desc *desc = &ring->descs[ring->head]; + unsigned int status; + + status = READ_ONCE(desc->status); + + ring->skbs[ring->head] = desc_data->skb; + ring->dma_addrs[ring->head] = desc_data->dma_addr; + + status |= desc_data->len; + if (flags) + status |= flags; + + WRITE_ONCE(desc->data_ptr, desc_data->dma_addr); + WRITE_ONCE(desc->status, status); + status &= ~MTK_STAR_DESC_BIT_COWN; + /* Flush previous modifications before ownership change. */ + dma_wmb(); + WRITE_ONCE(desc->status, status); + + ring->head = (ring->head + 1) % MTK_STAR_RING_NUM_DESCS; +} + +static void +mtk_star_ring_push_head_rx(struct mtk_star_ring *ring, + struct mtk_star_ring_desc_data *desc_data) +{ + mtk_star_ring_push_head(ring, desc_data, 0); +} + +static void +mtk_star_ring_push_head_tx(struct mtk_star_ring *ring, + struct mtk_star_ring_desc_data *desc_data) +{ + static const unsigned int flags = MTK_STAR_DESC_BIT_FS | + MTK_STAR_DESC_BIT_LS | + MTK_STAR_DESC_BIT_INT; + + mtk_star_ring_push_head(ring, desc_data, flags); +} + +static unsigned int mtk_star_ring_num_used_descs(struct mtk_star_ring *ring) +{ + return abs(ring->head - ring->tail); +} + +static bool mtk_star_ring_full(struct mtk_star_ring *ring) +{ + return mtk_star_ring_num_used_descs(ring) == MTK_STAR_RING_NUM_DESCS; +} + +static bool mtk_star_ring_descs_available(struct mtk_star_ring *ring) +{ + return mtk_star_ring_num_used_descs(ring) > 0; +} + +static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv, + struct sk_buff *skb) +{ + struct device *dev = mtk_star_get_dev(priv); + + /* Data pointer for the RX DMA descriptor must be aligned to 4N + 2. */ + return dma_map_single(dev, skb_tail_pointer(skb) - 2, + skb_tailroom(skb), DMA_FROM_DEVICE); +} + +static void mtk_star_dma_unmap_rx(struct mtk_star_priv *priv, + struct mtk_star_ring_desc_data *desc_data) +{ + struct device *dev = mtk_star_get_dev(priv); + + dma_unmap_single(dev, desc_data->dma_addr, + skb_tailroom(desc_data->skb), DMA_FROM_DEVICE); +} + +static dma_addr_t mtk_star_dma_map_tx(struct mtk_star_priv *priv, + struct sk_buff *skb) +{ + struct device *dev = mtk_star_get_dev(priv); + + return dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); +} + +static void mtk_star_dma_unmap_tx(struct mtk_star_priv *priv, + struct mtk_star_ring_desc_data *desc_data) +{ + struct device *dev = mtk_star_get_dev(priv); + + return dma_unmap_single(dev, desc_data->dma_addr, + skb_headlen(desc_data->skb), DMA_TO_DEVICE); +} + +static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv) +{ + regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CFG, + MTK_STAR_BIT_MAC_CFG_NIC_PD, 0); +} + +/* Unmask the three interrupts we care about, mask all others. */ +static void mtk_star_intr_enable(struct mtk_star_priv *priv) +{ + unsigned int val = MTK_STAR_BIT_INT_STS_TNTC | + MTK_STAR_BIT_INT_STS_FNRC | + MTK_STAR_REG_INT_STS_MIB_CNT_TH; + + regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~val); +} + +static void mtk_star_intr_disable(struct mtk_star_priv *priv) +{ + regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0); +} + +static void mtk_star_intr_enable_tx(struct mtk_star_priv *priv) +{ + regmap_update_bits(priv->regs, MTK_STAR_REG_INT_MASK, + MTK_STAR_BIT_INT_STS_TNTC, 0); +} + +static void mtk_star_intr_enable_rx(struct mtk_star_priv *priv) +{ + regmap_update_bits(priv->regs, MTK_STAR_REG_INT_MASK, + MTK_STAR_BIT_INT_STS_FNRC, 0); +} + +static void mtk_star_intr_enable_stats(struct mtk_star_priv *priv) +{ + regmap_update_bits(priv->regs, MTK_STAR_REG_INT_MASK, + MTK_STAR_REG_INT_STS_MIB_CNT_TH, 0); +} + +static void mtk_star_intr_disable_tx(struct mtk_star_priv *priv) +{ + regmap_update_bits(priv->regs, MTK_STAR_REG_INT_MASK, + MTK_STAR_BIT_INT_STS_TNTC, + MTK_STAR_BIT_INT_STS_TNTC); +} + +static void mtk_star_intr_disable_rx(struct mtk_star_priv *priv) +{ + regmap_update_bits(priv->regs, MTK_STAR_REG_INT_MASK, + MTK_STAR_BIT_INT_STS_FNRC, + MTK_STAR_BIT_INT_STS_FNRC); +} + +static void mtk_star_intr_disable_stats(struct mtk_star_priv *priv) +{ + regmap_update_bits(priv->regs, MTK_STAR_REG_INT_MASK, + MTK_STAR_REG_INT_STS_MIB_CNT_TH, + MTK_STAR_REG_INT_STS_MIB_CNT_TH); +} + +static unsigned int mtk_star_intr_read(struct mtk_star_priv *priv) +{ + unsigned int val; + + regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val); + + return val; +} + +static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv) +{ + unsigned int val; + + val = mtk_star_intr_read(priv); + regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val); + + return val; +} + +static void mtk_star_dma_init(struct mtk_star_priv *priv) +{ + struct mtk_star_ring_desc *desc; + unsigned int val; + int i; + + priv->descs_base = (struct mtk_star_ring_desc *)priv->ring_base; + + for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++) { + desc = &priv->descs_base[i]; + + memset(desc, 0, sizeof(*desc)); + desc->status = MTK_STAR_DESC_BIT_COWN; + if ((i == MTK_STAR_NUM_TX_DESCS - 1) || + (i == MTK_STAR_NUM_DESCS_TOTAL - 1)) + desc->status |= MTK_STAR_DESC_BIT_EOR; + } + + mtk_star_ring_init(&priv->tx_ring, priv->descs_base); + mtk_star_ring_init(&priv->rx_ring, + priv->descs_base + MTK_STAR_NUM_TX_DESCS); + + /* Set DMA pointers. */ + val = (unsigned int)priv->dma_addr; + regmap_write(priv->regs, MTK_STAR_REG_TX_BASE_ADDR, val); + regmap_write(priv->regs, MTK_STAR_REG_TX_DPTR, val); + + val += sizeof(struct mtk_star_ring_desc) * MTK_STAR_NUM_TX_DESCS; + regmap_write(priv->regs, MTK_STAR_REG_RX_BASE_ADDR, val); + regmap_write(priv->regs, MTK_STAR_REG_RX_DPTR, val); +} + +static void mtk_star_dma_start(struct mtk_star_priv *priv) +{ + regmap_update_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL, + MTK_STAR_BIT_TX_DMA_CTRL_START, + MTK_STAR_BIT_TX_DMA_CTRL_START); + regmap_update_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL, + MTK_STAR_BIT_RX_DMA_CTRL_START, + MTK_STAR_BIT_RX_DMA_CTRL_START); +} + +static void mtk_star_dma_stop(struct mtk_star_priv *priv) +{ + regmap_write(priv->regs, MTK_STAR_REG_TX_DMA_CTRL, + MTK_STAR_BIT_TX_DMA_CTRL_STOP); + regmap_write(priv->regs, MTK_STAR_REG_RX_DMA_CTRL, + MTK_STAR_BIT_RX_DMA_CTRL_STOP); +} + +static void mtk_star_dma_disable(struct mtk_star_priv *priv) +{ + int i; + + mtk_star_dma_stop(priv); + + /* Take back all descriptors. */ + for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++) + priv->descs_base[i].status |= MTK_STAR_DESC_BIT_COWN; +} + +static void mtk_star_dma_resume_rx(struct mtk_star_priv *priv) +{ + regmap_update_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL, + MTK_STAR_BIT_RX_DMA_CTRL_RESUME, + MTK_STAR_BIT_RX_DMA_CTRL_RESUME); +} + +static void mtk_star_dma_resume_tx(struct mtk_star_priv *priv) +{ + regmap_update_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL, + MTK_STAR_BIT_TX_DMA_CTRL_RESUME, + MTK_STAR_BIT_TX_DMA_CTRL_RESUME); +} + +static void mtk_star_set_mac_addr(struct net_device *ndev) +{ + struct mtk_star_priv *priv = netdev_priv(ndev); + u8 *mac_addr = ndev->dev_addr; + unsigned int high, low; + + high = mac_addr[0] << 8 | mac_addr[1] << 0; + low = mac_addr[2] << 24 | mac_addr[3] << 16 | + mac_addr[4] << 8 | mac_addr[5]; + + regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_H, high); + regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_L, low); +} + +static void mtk_star_reset_counters(struct mtk_star_priv *priv) +{ + static const unsigned int counter_regs[] = { + MTK_STAR_REG_C_RXOKPKT, + MTK_STAR_REG_C_RXOKBYTE, + MTK_STAR_REG_C_RXRUNT, + MTK_STAR_REG_C_RXLONG, + MTK_STAR_REG_C_RXDROP, + MTK_STAR_REG_C_RXCRC, + MTK_STAR_REG_C_RXARLDROP, + MTK_STAR_REG_C_RXVLANDROP, + MTK_STAR_REG_C_RXCSERR, + MTK_STAR_REG_C_RXPAUSE, + MTK_STAR_REG_C_TXOKPKT, + MTK_STAR_REG_C_TXOKBYTE, + MTK_STAR_REG_C_TXPAUSECOL, + MTK_STAR_REG_C_TXRTY, + MTK_STAR_REG_C_TXSKIP, + MTK_STAR_REG_C_TX_ARP, + MTK_STAR_REG_C_RX_RERR, + MTK_STAR_REG_C_RX_UNI, + MTK_STAR_REG_C_RX_MULTI, + MTK_STAR_REG_C_RX_BROAD, + MTK_STAR_REG_C_RX_ALIGNERR, + MTK_STAR_REG_C_TX_UNI, + MTK_STAR_REG_C_TX_MULTI, + MTK_STAR_REG_C_TX_BROAD, + MTK_STAR_REG_C_TX_TIMEOUT, + MTK_STAR_REG_C_TX_LATECOL, + MTK_STAR_REG_C_RX_LENGTHERR, + MTK_STAR_REG_C_RX_TWIST, + }; + + unsigned int i, val; + + for (i = 0; i < ARRAY_SIZE(counter_regs); i++) + regmap_read(priv->regs, counter_regs[i], &val); +} + +static void mtk_star_update_stat(struct mtk_star_priv *priv, + unsigned int reg, u64 *stat) +{ + unsigned int val; + + regmap_read(priv->regs, reg, &val); + *stat += val; +} + +/* Try to get as many stats as possible from the internal registers instead + * of tracking them ourselves. + */ +static void mtk_star_update_stats(struct mtk_star_priv *priv) +{ + struct rtnl_link_stats64 *stats = &priv->stats; + + /* OK packets and bytes. */ + mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKPKT, &stats->rx_packets); + mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKPKT, &stats->tx_packets); + mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKBYTE, &stats->rx_bytes); + mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKBYTE, &stats->tx_bytes); + + /* RX & TX multicast. */ + mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_MULTI, &stats->multicast); + mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_MULTI, &stats->multicast); + + /* Collisions. */ + mtk_star_update_stat(priv, MTK_STAR_REG_C_TXPAUSECOL, + &stats->collisions); + mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_LATECOL, + &stats->collisions); + mtk_star_update_stat(priv, MTK_STAR_REG_C_RXRUNT, &stats->collisions); + + /* RX Errors. */ + mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_LENGTHERR, + &stats->rx_length_errors); + mtk_star_update_stat(priv, MTK_STAR_REG_C_RXLONG, + &stats->rx_over_errors); + mtk_star_update_stat(priv, MTK_STAR_REG_C_RXCRC, &stats->rx_crc_errors); + mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_ALIGNERR, + &stats->rx_frame_errors); + mtk_star_update_stat(priv, MTK_STAR_REG_C_RXDROP, + &stats->rx_fifo_errors); + /* Sum of the general RX error counter + all of the above. */ + mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_RERR, &stats->rx_errors); + stats->rx_errors += stats->rx_length_errors; + stats->rx_errors += stats->rx_over_errors; + stats->rx_errors += stats->rx_crc_errors; + stats->rx_errors += stats->rx_frame_errors; + stats->rx_errors += stats->rx_fifo_errors; +} + +/* This runs in process context and parallel TX and RX paths executing in + * napi context may result in losing some stats data but this should happen + * seldom enough to be acceptable. + */ +static void mtk_star_update_stats_work(struct work_struct *work) +{ + struct mtk_star_priv *priv = container_of(work, struct mtk_star_priv, + stats_work); + + mtk_star_update_stats(priv); + mtk_star_reset_counters(priv); + mtk_star_intr_enable_stats(priv); +} + +static struct sk_buff *mtk_star_alloc_skb(struct net_device *ndev) +{ + uintptr_t tail, offset; + struct sk_buff *skb; + + skb = dev_alloc_skb(MTK_STAR_MAX_FRAME_SIZE); + if (!skb) + return NULL; + + /* Align to 16 bytes. */ + tail = (uintptr_t)skb_tail_pointer(skb); + if (tail & (MTK_STAR_SKB_ALIGNMENT - 1)) { + offset = tail & (MTK_STAR_SKB_ALIGNMENT - 1); + skb_reserve(skb, MTK_STAR_SKB_ALIGNMENT - offset); + } + + /* Ensure 16-byte alignment of the skb pointer: eth_type_trans() will + * extract the Ethernet header (14 bytes) so we need two more bytes. + */ + skb_reserve(skb, MTK_STAR_IP_ALIGN); + + return skb; +} + +static int mtk_star_prepare_rx_skbs(struct net_device *ndev) +{ + struct mtk_star_priv *priv = netdev_priv(ndev); + struct mtk_star_ring *ring = &priv->rx_ring; + struct device *dev = mtk_star_get_dev(priv); + struct mtk_star_ring_desc *desc; + struct sk_buff *skb; + dma_addr_t dma_addr; + int i; + + for (i = 0; i < MTK_STAR_NUM_RX_DESCS; i++) { + skb = mtk_star_alloc_skb(ndev); + if (!skb) + return -ENOMEM; + + dma_addr = mtk_star_dma_map_rx(priv, skb); + if (dma_mapping_error(dev, dma_addr)) { + dev_kfree_skb(skb); + return -ENOMEM; + } + + desc = &ring->descs[i]; + desc->data_ptr = dma_addr; + desc->status |= skb_tailroom(skb) & MTK_STAR_DESC_MSK_LEN; + desc->status &= ~MTK_STAR_DESC_BIT_COWN; + ring->skbs[i] = skb; + ring->dma_addrs[i] = dma_addr; + } + + return 0; +} + +static void +mtk_star_ring_free_skbs(struct mtk_star_priv *priv, struct mtk_star_ring *ring, + void (*unmap_func)(struct mtk_star_priv *, + struct mtk_star_ring_desc_data *)) +{ + struct mtk_star_ring_desc_data desc_data; + int i; + + for (i = 0; i < MTK_STAR_RING_NUM_DESCS; i++) { + if (!ring->dma_addrs[i]) + continue; + + desc_data.dma_addr = ring->dma_addrs[i]; + desc_data.skb = ring->skbs[i]; + + unmap_func(priv, &desc_data); + dev_kfree_skb(desc_data.skb); + } +} + +static void mtk_star_free_rx_skbs(struct mtk_star_priv *priv) +{ + struct mtk_star_ring *ring = &priv->rx_ring; + + mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_rx); +} + +static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv) +{ + struct mtk_star_ring *ring = &priv->tx_ring; + + mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx); +} + +/* All processing for TX and RX happens in the napi poll callback. */ +static irqreturn_t mtk_star_handle_irq(int irq, void *data) +{ + struct mtk_star_priv *priv; + struct net_device *ndev; + bool need_napi = false; + unsigned int status; + + ndev = data; + priv = netdev_priv(ndev); + + if (netif_running(ndev)) { + status = mtk_star_intr_read(priv); + + if (status & MTK_STAR_BIT_INT_STS_TNTC) { + mtk_star_intr_disable_tx(priv); + need_napi = true; + } + + if (status & MTK_STAR_BIT_INT_STS_FNRC) { + mtk_star_intr_disable_rx(priv); + need_napi = true; + } + + if (need_napi) + napi_schedule(&priv->napi); + + /* One of the counters reached 0x8000000 - update stats and + * reset all counters. + */ + if (unlikely(status & MTK_STAR_REG_INT_STS_MIB_CNT_TH)) { + mtk_star_intr_disable_stats(priv); + schedule_work(&priv->stats_work); + } + + mtk_star_intr_ack_all(priv); + } + + return IRQ_HANDLED; +} + +/* Wait for the completion of any previous command - CMD_START bit must be + * cleared by hardware. + */ +static int mtk_star_hash_wait_cmd_start(struct mtk_star_priv *priv) +{ + unsigned int val; + + return regmap_read_poll_timeout_atomic(priv->regs, + MTK_STAR_REG_HASH_CTRL, val, + !(val & MTK_STAR_BIT_HASH_CTRL_CMD_START), + 10, MTK_STAR_WAIT_TIMEOUT); +} + +static int mtk_star_hash_wait_ok(struct mtk_star_priv *priv) +{ + unsigned int val; + int ret; + + /* Wait for BIST_DONE bit. */ + ret = regmap_read_poll_timeout_atomic(priv->regs, + MTK_STAR_REG_HASH_CTRL, val, + val & MTK_STAR_BIT_HASH_CTRL_BIST_DONE, + 10, MTK_STAR_WAIT_TIMEOUT); + if (ret) + return ret; + + /* Check the BIST_OK bit. */ + regmap_read(priv->regs, MTK_STAR_REG_HASH_CTRL, &val); + if (!(val & MTK_STAR_BIT_HASH_CTRL_BIST_OK)) + return -EIO; + + return 0; +} + +static int mtk_star_set_hashbit(struct mtk_star_priv *priv, + unsigned int hash_addr) +{ + unsigned int val; + int ret; + + ret = mtk_star_hash_wait_cmd_start(priv); + if (ret) + return ret; + + val = hash_addr & MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR; + val |= MTK_STAR_BIT_HASH_CTRL_ACC_CMD; + val |= MTK_STAR_BIT_HASH_CTRL_CMD_START; + val |= MTK_STAR_BIT_HASH_CTRL_BIST_EN; + val |= MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA; + regmap_write(priv->regs, MTK_STAR_REG_HASH_CTRL, val); + + return mtk_star_hash_wait_ok(priv); +} + +static int mtk_star_reset_hash_table(struct mtk_star_priv *priv) +{ + int ret; + + ret = mtk_star_hash_wait_cmd_start(priv); + if (ret) + return ret; + + regmap_update_bits(priv->regs, MTK_STAR_REG_HASH_CTRL, + MTK_STAR_BIT_HASH_CTRL_BIST_EN, + MTK_STAR_BIT_HASH_CTRL_BIST_EN); + regmap_update_bits(priv->regs, MTK_STAR_REG_TEST1, + MTK_STAR_BIT_TEST1_RST_HASH_MBIST, + MTK_STAR_BIT_TEST1_RST_HASH_MBIST); + + return mtk_star_hash_wait_ok(priv); +} + +static void mtk_star_phy_config(struct mtk_star_priv *priv) +{ + unsigned int val; + + if (priv->speed == SPEED_1000) + val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M; + else if (priv->speed == SPEED_100) + val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M; + else + val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M; + val <<= MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD; + + val |= MTK_STAR_BIT_PHY_CTRL1_AN_EN; + val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX; + val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX; + /* Only full-duplex supported for now. */ + val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX; + + regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val); + + if (priv->pause) { + val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K; + val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH; + val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR; + } else { + val = 0; + } + + regmap_update_bits(priv->regs, MTK_STAR_REG_FC_CFG, + MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH | + MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR, val); + + if (priv->pause) { + val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K; + val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS; + } else { + val = 0; + } + + regmap_update_bits(priv->regs, MTK_STAR_REG_EXT_CFG, + MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS, val); +} + +static void mtk_star_adjust_link(struct net_device *ndev) +{ + struct mtk_star_priv *priv = netdev_priv(ndev); + struct phy_device *phydev = priv->phydev; + bool new_state = false; + + if (phydev->link) { + if (!priv->link) { + priv->link = phydev->link; + new_state = true; + } + + if (priv->speed != phydev->speed) { + priv->speed = phydev->speed; + new_state = true; + } + + if (priv->pause != phydev->pause) { + priv->pause = phydev->pause; + new_state = true; + } + } else { + if (priv->link) { + priv->link = phydev->link; + new_state = true; + } + } + + if (new_state) { + if (phydev->link) + mtk_star_phy_config(priv); + + phy_print_status(ndev->phydev); + } +} + +static void mtk_star_init_config(struct mtk_star_priv *priv) +{ + unsigned int val; + + val = (MTK_STAR_BIT_MII_PAD_OUT_ENABLE | + MTK_STAR_BIT_EXT_MDC_MODE | + MTK_STAR_BIT_SWC_MII_MODE); + + regmap_write(priv->regs, MTK_STAR_REG_SYS_CONF, val); + regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CLK_CONF, + MTK_STAR_MSK_MAC_CLK_CONF, + MTK_STAR_BIT_CLK_DIV_10); +} + +static void mtk_star_set_mode_rmii(struct mtk_star_priv *priv) +{ + regmap_update_bits(priv->pericfg, MTK_PERICFG_REG_NIC_CFG_CON, + MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII, + MTK_PERICFG_BIT_NIC_CFG_CON_RMII); +} + +static int mtk_star_enable(struct net_device *ndev) +{ + struct mtk_star_priv *priv = netdev_priv(ndev); + unsigned int val; + int ret; + + mtk_star_nic_disable_pd(priv); + mtk_star_intr_disable(priv); + mtk_star_dma_stop(priv); + + mtk_star_set_mac_addr(ndev); + + /* Configure the MAC */ + val = MTK_STAR_VAL_MAC_CFG_IPG_96BIT; + val <<= MTK_STAR_OFF_MAC_CFG_IPG; + val |= MTK_STAR_BIT_MAC_CFG_MAXLEN_1522; + val |= MTK_STAR_BIT_MAC_CFG_AUTO_PAD; + val |= MTK_STAR_BIT_MAC_CFG_CRC_STRIP; + regmap_write(priv->regs, MTK_STAR_REG_MAC_CFG, val); + + /* Enable Hash Table BIST and reset it */ + ret = mtk_star_reset_hash_table(priv); + if (ret) + return ret; + + /* Setup the hashing algorithm */ + regmap_update_bits(priv->regs, MTK_STAR_REG_ARL_CFG, + MTK_STAR_BIT_ARL_CFG_HASH_ALG | + MTK_STAR_BIT_ARL_CFG_MISC_MODE, 0); + + /* Don't strip VLAN tags */ + regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CFG, + MTK_STAR_BIT_MAC_CFG_VLAN_STRIP, 0); + + /* Setup DMA */ + mtk_star_dma_init(priv); + + ret = mtk_star_prepare_rx_skbs(ndev); + if (ret) + goto err_out; + + /* Request the interrupt */ + ret = request_irq(ndev->irq, mtk_star_handle_irq, + IRQF_TRIGGER_FALLING, ndev->name, ndev); + if (ret) + goto err_free_skbs; + + napi_enable(&priv->napi); + + mtk_star_intr_ack_all(priv); + mtk_star_intr_enable(priv); + + /* Connect to and start PHY */ + priv->phydev = of_phy_connect(ndev, priv->phy_node, + mtk_star_adjust_link, 0, priv->phy_intf); + if (!priv->phydev) { + netdev_err(ndev, "failed to connect to PHY\n"); + goto err_free_irq; + } + + mtk_star_dma_start(priv); + phy_start(priv->phydev); + netif_start_queue(ndev); + + return 0; + +err_free_irq: + free_irq(ndev->irq, ndev); +err_free_skbs: + mtk_star_free_rx_skbs(priv); +err_out: + return ret; +} + +static void mtk_star_disable(struct net_device *ndev) +{ + struct mtk_star_priv *priv = netdev_priv(ndev); + + netif_stop_queue(ndev); + napi_disable(&priv->napi); + mtk_star_intr_disable(priv); + mtk_star_dma_disable(priv); + mtk_star_intr_ack_all(priv); + phy_stop(priv->phydev); + phy_disconnect(priv->phydev); + free_irq(ndev->irq, ndev); + mtk_star_free_rx_skbs(priv); + mtk_star_free_tx_skbs(priv); +} + +static int mtk_star_netdev_open(struct net_device *ndev) +{ + return mtk_star_enable(ndev); +} + +static int mtk_star_netdev_stop(struct net_device *ndev) +{ + mtk_star_disable(ndev); + + return 0; +} + +static int mtk_star_netdev_ioctl(struct net_device *ndev, + struct ifreq *req, int cmd) +{ + if (!netif_running(ndev)) + return -EINVAL; + + return phy_mii_ioctl(ndev->phydev, req, cmd); +} + +static int mtk_star_netdev_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct mtk_star_priv *priv = netdev_priv(ndev); + struct mtk_star_ring *ring = &priv->tx_ring; + struct device *dev = mtk_star_get_dev(priv); + struct mtk_star_ring_desc_data desc_data; + + desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb); + if (dma_mapping_error(dev, desc_data.dma_addr)) + goto err_drop_packet; + + desc_data.skb = skb; + desc_data.len = skb->len; + + spin_lock_bh(&priv->lock); + + mtk_star_ring_push_head_tx(ring, &desc_data); + + netdev_sent_queue(ndev, skb->len); + + if (mtk_star_ring_full(ring)) + netif_stop_queue(ndev); + + spin_unlock_bh(&priv->lock); + + mtk_star_dma_resume_tx(priv); + + return NETDEV_TX_OK; + +err_drop_packet: + dev_kfree_skb(skb); + ndev->stats.tx_dropped++; + return NETDEV_TX_BUSY; +} + +/* Returns the number of bytes sent or a negative number on the first + * descriptor owned by DMA. + */ +static int mtk_star_tx_complete_one(struct mtk_star_priv *priv) +{ + struct mtk_star_ring *ring = &priv->tx_ring; + struct mtk_star_ring_desc_data desc_data; + int ret; + + ret = mtk_star_ring_pop_tail(ring, &desc_data); + if (ret) + return ret; + + mtk_star_dma_unmap_tx(priv, &desc_data); + ret = desc_data.skb->len; + dev_kfree_skb_irq(desc_data.skb); + + return ret; +} + +static void mtk_star_tx_complete_all(struct mtk_star_priv *priv) +{ + struct mtk_star_ring *ring = &priv->tx_ring; + struct net_device *ndev = priv->ndev; + int ret, pkts_compl, bytes_compl; + bool wake = false; + + spin_lock(&priv->lock); + + for (pkts_compl = 0, bytes_compl = 0;; + pkts_compl++, bytes_compl += ret, wake = true) { + if (!mtk_star_ring_descs_available(ring)) + break; + + ret = mtk_star_tx_complete_one(priv); + if (ret < 0) + break; + } + + netdev_completed_queue(ndev, pkts_compl, bytes_compl); + + if (wake && netif_queue_stopped(ndev)) + netif_wake_queue(ndev); + + mtk_star_intr_enable_tx(priv); + + spin_unlock(&priv->lock); +} + +static void mtk_star_netdev_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats) +{ + struct mtk_star_priv *priv = netdev_priv(ndev); + + mtk_star_update_stats(priv); + + memcpy(stats, &priv->stats, sizeof(*stats)); +} + +static void mtk_star_set_rx_mode(struct net_device *ndev) +{ + struct mtk_star_priv *priv = netdev_priv(ndev); + struct netdev_hw_addr *hw_addr; + unsigned int hash_addr, i; + int ret; + + if (ndev->flags & IFF_PROMISC) { + regmap_update_bits(priv->regs, MTK_STAR_REG_ARL_CFG, + MTK_STAR_BIT_ARL_CFG_MISC_MODE, + MTK_STAR_BIT_ARL_CFG_MISC_MODE); + } else if (netdev_mc_count(ndev) > MTK_STAR_HASHTABLE_MC_LIMIT || + ndev->flags & IFF_ALLMULTI) { + for (i = 0; i < MTK_STAR_HASHTABLE_SIZE_MAX; i++) { + ret = mtk_star_set_hashbit(priv, i); + if (ret) + goto hash_fail; + } + } else { + /* Clear previous settings. */ + ret = mtk_star_reset_hash_table(priv); + if (ret) + goto hash_fail; + + netdev_for_each_mc_addr(hw_addr, ndev) { + hash_addr = (hw_addr->addr[0] & 0x01) << 8; + hash_addr += hw_addr->addr[5]; + ret = mtk_star_set_hashbit(priv, hash_addr); + if (ret) + goto hash_fail; + } + } + + return; + +hash_fail: + if (ret == -ETIMEDOUT) + netdev_err(ndev, "setting hash bit timed out\n"); + else + /* Should be -EIO */ + netdev_err(ndev, "unable to set hash bit"); +} + +static const struct net_device_ops mtk_star_netdev_ops = { + .ndo_open = mtk_star_netdev_open, + .ndo_stop = mtk_star_netdev_stop, + .ndo_start_xmit = mtk_star_netdev_start_xmit, + .ndo_get_stats64 = mtk_star_netdev_get_stats64, + .ndo_set_rx_mode = mtk_star_set_rx_mode, + .ndo_do_ioctl = mtk_star_netdev_ioctl, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static void mtk_star_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, MTK_STAR_DRVNAME, sizeof(info->driver)); +} + +/* TODO Add ethtool stats. */ +static const struct ethtool_ops mtk_star_ethtool_ops = { + .get_drvinfo = mtk_star_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, +}; + +static int mtk_star_receive_packet(struct mtk_star_priv *priv) +{ + struct mtk_star_ring *ring = &priv->rx_ring; + struct device *dev = mtk_star_get_dev(priv); + struct mtk_star_ring_desc_data desc_data; + struct net_device *ndev = priv->ndev; + struct sk_buff *curr_skb, *new_skb; + dma_addr_t new_dma_addr; + int ret; + + spin_lock(&priv->lock); + ret = mtk_star_ring_pop_tail(ring, &desc_data); + spin_unlock(&priv->lock); + if (ret) + return -1; + + curr_skb = desc_data.skb; + + if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) || + (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) { + /* Error packet -> drop and reuse skb. */ + new_skb = curr_skb; + goto push_new_skb; + } + + /* Prepare new skb before receiving the current one. Reuse the current + * skb if we fail at any point. + */ + new_skb = mtk_star_alloc_skb(ndev); + if (!new_skb) { + ndev->stats.rx_dropped++; + new_skb = curr_skb; + goto push_new_skb; + } + + new_dma_addr = mtk_star_dma_map_rx(priv, new_skb); + if (dma_mapping_error(dev, new_dma_addr)) { + ndev->stats.rx_dropped++; + dev_kfree_skb(new_skb); + new_skb = curr_skb; + netdev_err(ndev, "DMA mapping error of RX descriptor\n"); + goto push_new_skb; + } + + desc_data.dma_addr = new_dma_addr; + + /* We can't fail anymore at this point: it's safe to unmap the skb. */ + mtk_star_dma_unmap_rx(priv, &desc_data); + + skb_put(desc_data.skb, desc_data.len); + desc_data.skb->ip_summed = CHECKSUM_NONE; + desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev); + desc_data.skb->dev = ndev; + netif_receive_skb(desc_data.skb); + +push_new_skb: + desc_data.len = skb_tailroom(new_skb); + desc_data.skb = new_skb; + + spin_lock(&priv->lock); + mtk_star_ring_push_head_rx(ring, &desc_data); + spin_unlock(&priv->lock); + + return 0; +} + +static int mtk_star_process_rx(struct mtk_star_priv *priv, int budget) +{ + int received, ret; + + for (received = 0, ret = 0; received < budget && ret == 0; received++) + ret = mtk_star_receive_packet(priv); + + mtk_star_dma_resume_rx(priv); + + return received; +} + +static int mtk_star_poll(struct napi_struct *napi, int budget) +{ + struct mtk_star_priv *priv; + int received = 0; + + priv = container_of(napi, struct mtk_star_priv, napi); + + /* Clean-up all TX descriptors. */ + mtk_star_tx_complete_all(priv); + /* Receive up to $budget packets. */ + received = mtk_star_process_rx(priv, budget); + + if (received < budget) { + napi_complete_done(napi, received); + mtk_star_intr_enable_rx(priv); + } + + return received; +} + +static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv) +{ + regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, + MTK_STAR_BIT_PHY_CTRL0_RWOK); +} + +static int mtk_star_mdio_rwok_wait(struct mtk_star_priv *priv) +{ + unsigned int val; + + return regmap_read_poll_timeout(priv->regs, MTK_STAR_REG_PHY_CTRL0, + val, val & MTK_STAR_BIT_PHY_CTRL0_RWOK, + 10, MTK_STAR_WAIT_TIMEOUT); +} + +static int mtk_star_mdio_read(struct mii_bus *mii, int phy_id, int regnum) +{ + struct mtk_star_priv *priv = mii->priv; + unsigned int val, data; + int ret; + + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + + mtk_star_mdio_rwok_clear(priv); + + val = (regnum << MTK_STAR_OFF_PHY_CTRL0_PREG); + val &= MTK_STAR_MSK_PHY_CTRL0_PREG; + val |= MTK_STAR_BIT_PHY_CTRL0_RDCMD; + + regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val); + + ret = mtk_star_mdio_rwok_wait(priv); + if (ret) + return ret; + + regmap_read(priv->regs, MTK_STAR_REG_PHY_CTRL0, &data); + + data &= MTK_STAR_MSK_PHY_CTRL0_RWDATA; + data >>= MTK_STAR_OFF_PHY_CTRL0_RWDATA; + + return data; +} + +static int mtk_star_mdio_write(struct mii_bus *mii, int phy_id, + int regnum, u16 data) +{ + struct mtk_star_priv *priv = mii->priv; + unsigned int val; + + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + + mtk_star_mdio_rwok_clear(priv); + + val = data; + val <<= MTK_STAR_OFF_PHY_CTRL0_RWDATA; + val &= MTK_STAR_MSK_PHY_CTRL0_RWDATA; + regnum <<= MTK_STAR_OFF_PHY_CTRL0_PREG; + regnum &= MTK_STAR_MSK_PHY_CTRL0_PREG; + val |= regnum; + val |= MTK_STAR_BIT_PHY_CTRL0_WTCMD; + + regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val); + + return mtk_star_mdio_rwok_wait(priv); +} + +static int mtk_star_mdio_init(struct net_device *ndev) +{ + struct mtk_star_priv *priv = netdev_priv(ndev); + struct device *dev = mtk_star_get_dev(priv); + struct device_node *of_node, *mdio_node; + int ret; + + of_node = dev->of_node; + + mdio_node = of_get_child_by_name(of_node, "mdio"); + if (!mdio_node) + return -ENODEV; + + if (!of_device_is_available(mdio_node)) { + ret = -ENODEV; + goto out_put_node; + } + + priv->mii = devm_mdiobus_alloc(dev); + if (!priv->mii) { + ret = -ENOMEM; + goto out_put_node; + } + + snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); + priv->mii->name = "mtk-mac-mdio"; + priv->mii->parent = dev; + priv->mii->read = mtk_star_mdio_read; + priv->mii->write = mtk_star_mdio_write; + priv->mii->priv = priv; + + ret = of_mdiobus_register(priv->mii, mdio_node); + +out_put_node: + of_node_put(mdio_node); + return ret; +} + +static __maybe_unused int mtk_star_suspend(struct device *dev) +{ + struct mtk_star_priv *priv; + struct net_device *ndev; + + ndev = dev_get_drvdata(dev); + priv = netdev_priv(ndev); + + if (netif_running(ndev)) + mtk_star_disable(ndev); + + clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks); + + return 0; +} + +static __maybe_unused int mtk_star_resume(struct device *dev) +{ + struct mtk_star_priv *priv; + struct net_device *ndev; + int ret; + + ndev = dev_get_drvdata(dev); + priv = netdev_priv(ndev); + + ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks); + if (ret) + return ret; + + if (netif_running(ndev)) { + ret = mtk_star_enable(ndev); + if (ret) + clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks); + } + + return ret; +} + +static void mtk_star_clk_disable_unprepare(void *data) +{ + struct mtk_star_priv *priv = data; + + clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks); +} + +static void mtk_star_mdiobus_unregister(void *data) +{ + struct mtk_star_priv *priv = data; + + mdiobus_unregister(priv->mii); +} + +static int mtk_star_probe(struct platform_device *pdev) +{ + struct device_node *of_node; + struct mtk_star_priv *priv; + struct net_device *ndev; + struct device *dev; + void __iomem *base; + int ret, i; + + dev = &pdev->dev; + of_node = dev->of_node; + + ndev = devm_alloc_etherdev(dev, sizeof(*priv)); + if (!ndev) + return -ENOMEM; + + priv = netdev_priv(ndev); + priv->ndev = ndev; + SET_NETDEV_DEV(ndev, dev); + platform_set_drvdata(pdev, ndev); + + ndev->min_mtu = ETH_ZLEN; + ndev->max_mtu = MTK_STAR_MAX_FRAME_SIZE; + + spin_lock_init(&priv->lock); + INIT_WORK(&priv->stats_work, mtk_star_update_stats_work); + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + /* We won't be checking the return values of regmap read & write + * functions. They can only fail for mmio if there's a clock attached + * to regmap which is not the case here. + */ + priv->regs = devm_regmap_init_mmio(dev, base, + &mtk_star_regmap_config); + if (IS_ERR(priv->regs)) + return PTR_ERR(priv->regs); + + priv->pericfg = syscon_regmap_lookup_by_phandle(of_node, + "mediatek,pericfg"); + if (IS_ERR(priv->pericfg)) { + dev_err(dev, "Failed to lookup the PERICFG syscon\n"); + return PTR_ERR(priv->pericfg); + } + + ndev->irq = platform_get_irq(pdev, 0); + if (ndev->irq < 0) + return ndev->irq; + + for (i = 0; i < MTK_STAR_NCLKS; i++) + priv->clks[i].id = mtk_star_clk_names[i]; + ret = devm_clk_bulk_get(dev, MTK_STAR_NCLKS, priv->clks); + if (ret) + return ret; + + ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks); + if (ret) + return ret; + + ret = devm_add_action_or_reset(dev, + mtk_star_clk_disable_unprepare, priv); + if (ret) + return ret; + + ret = of_get_phy_mode(of_node, &priv->phy_intf); + if (ret) { + return ret; + } else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII) { + dev_err(dev, "unsupported phy mode: %s\n", + phy_modes(priv->phy_intf)); + return -EINVAL; + } + + priv->phy_node = of_parse_phandle(of_node, "phy-handle", 0); + if (!priv->phy_node) { + dev_err(dev, "failed to retrieve the phy handle from device tree\n"); + return -ENODEV; + } + + mtk_star_set_mode_rmii(priv); + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(dev, "unsupported DMA mask\n"); + return ret; + } + + priv->ring_base = dmam_alloc_coherent(dev, MTK_STAR_DMA_SIZE, + &priv->dma_addr, + GFP_KERNEL | GFP_DMA); + if (!priv->ring_base) + return -ENOMEM; + + mtk_star_nic_disable_pd(priv); + mtk_star_init_config(priv); + + ret = mtk_star_mdio_init(ndev); + if (ret) + return ret; + + ret = devm_add_action_or_reset(dev, mtk_star_mdiobus_unregister, priv); + if (ret) + return ret; + + ret = eth_platform_get_mac_address(dev, ndev->dev_addr); + if (ret || !is_valid_ether_addr(ndev->dev_addr)) + eth_hw_addr_random(ndev); + + ndev->netdev_ops = &mtk_star_netdev_ops; + ndev->ethtool_ops = &mtk_star_ethtool_ops; + + netif_napi_add(ndev, &priv->napi, mtk_star_poll, MTK_STAR_NAPI_WEIGHT); + + return devm_register_netdev(dev, ndev); +} + +static const struct of_device_id mtk_star_of_match[] = { + { .compatible = "mediatek,mt8516-eth", }, + { .compatible = "mediatek,mt8518-eth", }, + { .compatible = "mediatek,mt8175-eth", }, + { } +}; +MODULE_DEVICE_TABLE(of, mtk_star_of_match); + +static SIMPLE_DEV_PM_OPS(mtk_star_pm_ops, + mtk_star_suspend, mtk_star_resume); + +static struct platform_driver mtk_star_driver = { + .driver = { + .name = MTK_STAR_DRVNAME, + .pm = &mtk_star_pm_ops, + .of_match_table = of_match_ptr(mtk_star_of_match), + }, + .probe = mtk_star_probe, +}; +module_platform_driver(mtk_star_driver); + +MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>"); +MODULE_DESCRIPTION("Mediatek STAR Ethernet MAC Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 6e501af0e532..f6ff9620a137 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -2734,7 +2734,7 @@ void mlx4_opreq_action(struct work_struct *work) if (err) { mlx4_err(dev, "Failed to retrieve required operation: %d\n", err); - return; + goto out; } MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET); MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 7d69a3061f17..b6ffd1622cfd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -78,9 +78,24 @@ config MLX5_ESWITCH Legacy SRIOV mode (L2 mac vlan steering based). Switchdev mode (eswitch offloads). +config MLX5_CLS_ACT + bool "MLX5 TC classifier action support" + depends on MLX5_ESWITCH && NET_CLS_ACT + default y + help + mlx5 ConnectX offloads support for TC classifier action (NET_CLS_ACT), + works in both native NIC mode and Switchdev SRIOV mode. + Actions get attached to a Hardware offloaded classifiers and are + invoked after a successful classification. Actions are used to + overwrite the classification result, instantly drop or redirect and/or + reformat packets in wire speeds without involving the host cpu. + + If set to N, TC offloads in both NIC and switchdev modes will be disabled. + If unsure, set to Y + config MLX5_TC_CT bool "MLX5 TC connection tracking offload support" - depends on MLX5_CORE_EN && NET_SWITCHDEV && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT + depends on MLX5_CLS_ACT && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT default y help Say Y here if you want to support offloading connection tracking rules diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index d3c7dbd7f1d5..b61e47bc16e8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -33,17 +33,24 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o -mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o lag_mp.o \ - lib/geneve.o en/mapping.o en/tc_tun_vxlan.o en/tc_tun_gre.o \ - en/tc_tun_geneve.o diag/en_tc_tracepoint.o mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o +mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag_mp.o lib/geneve.o lib/port_tun.o \ + en_rep.o en/rep/bond.o +mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \ + en/mapping.o esw/chains.o en/tc_tun.o \ + en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \ + en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o # # Core extra # mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \ - ecpf.o rdma.o esw/chains.o + ecpf.o rdma.o +mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \ + esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \ + esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o + mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c index cab708af3422..cbf3d76c05a8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c @@ -56,8 +56,8 @@ void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx); } -int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, - u64 rcd_sn) +int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, + u32 seq, __be64 rcd_sn) { return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h index e09bc3858d57..aefea467f7b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h @@ -109,8 +109,8 @@ int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, bool direction_sx); void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, bool direction_sx); -int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, - u64 rcd_sn); +int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, + u32 seq, __be64 rcd_sn); bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev); u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev); int mlx5_accel_tls_init(struct mlx5_core_dev *mdev); @@ -125,8 +125,8 @@ mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, bool direction_sx) { return -ENOTSUPP; } static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, bool direction_sx) { } -static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, - u32 seq, u64 rcd_sn) { return 0; } +static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, + u32 seq, __be64 rcd_sn) { return 0; } static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) { return mlx5_accel_is_ktls_device(mdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index cede5bdfd598..1d91a0d0ab1d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -848,6 +848,14 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); +static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode) +{ + if (cmd->allowed_opcode == CMD_ALLOWED_OPCODE_ALL) + return true; + + return cmd->allowed_opcode == opcode; +} + static void cmd_work_handler(struct work_struct *work) { struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); @@ -861,6 +869,7 @@ static void cmd_work_handler(struct work_struct *work) int alloc_ret; int cmd_mode; + complete(&ent->handling); sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; down(sem); if (!ent->page_queue) { @@ -913,7 +922,9 @@ static void cmd_work_handler(struct work_struct *work) /* Skip sending command to fw if internal error */ if (pci_channel_offline(dev->pdev) || - dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { + dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR || + cmd->state != MLX5_CMDIF_STATE_UP || + !opcode_allowed(&dev->cmd, ent->op)) { u8 status = 0; u32 drv_synd; @@ -978,6 +989,11 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) struct mlx5_cmd *cmd = &dev->cmd; int err; + if (!wait_for_completion_timeout(&ent->handling, timeout) && + cancel_work_sync(&ent->work)) { + ent->ret = -ECANCELED; + goto out_err; + } if (cmd->mode == CMD_MODE_POLLING || ent->polling) { wait_for_completion(&ent->done); } else if (!wait_for_completion_timeout(&ent->done, timeout)) { @@ -985,12 +1001,17 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); } +out_err: err = ent->ret; if (err == -ETIMEDOUT) { mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); + } else if (err == -ECANCELED) { + mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n", + mlx5_command_str(msg_to_opcode(ent->in)), + msg_to_opcode(ent->in)); } mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err, deliv_status_to_str(ent->status), ent->status); @@ -1026,6 +1047,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, ent->token = token; ent->polling = force_polling; + init_completion(&ent->handling); if (!callback) init_completion(&ent->done); @@ -1045,10 +1067,12 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, err = wait_func(dev, ent); if (err == -ETIMEDOUT) goto out; + if (err == -ECANCELED) + goto out_free; ds = ent->ts2 - ent->ts1; op = MLX5_GET(mbox_in, in->first.data, opcode); - if (op < ARRAY_SIZE(cmd->stats)) { + if (op < MLX5_CMD_OP_MAX) { stats = &cmd->stats[op]; spin_lock_irq(&stats->lock); stats->sum += ds; @@ -1391,6 +1415,22 @@ static void create_debugfs_files(struct mlx5_core_dev *dev) mlx5_cmdif_debugfs_init(dev); } +void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode) +{ + struct mlx5_cmd *cmd = &dev->cmd; + int i; + + for (i = 0; i < cmd->max_reg_cmds; i++) + down(&cmd->sem); + down(&cmd->pages_sem); + + cmd->allowed_opcode = opcode; + + up(&cmd->pages_sem); + for (i = 0; i < cmd->max_reg_cmds; i++) + up(&cmd->sem); +} + static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) { struct mlx5_cmd *cmd = &dev->cmd; @@ -1511,7 +1551,7 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force if (ent->callback) { ds = ent->ts2 - ent->ts1; - if (ent->op < ARRAY_SIZE(cmd->stats)) { + if (ent->op < MLX5_CMD_OP_MAX) { stats = &cmd->stats[ent->op]; spin_lock_irqsave(&stats->lock, flags); stats->sum += ds; @@ -1667,12 +1707,14 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int err; u8 status = 0; u32 drv_synd; + u16 opcode; u8 token; + opcode = MLX5_GET(mbox_in, in, opcode); if (pci_channel_offline(dev->pdev) || - dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { - u16 opcode = MLX5_GET(mbox_in, in, opcode); - + dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR || + dev->cmd.state != MLX5_CMDIF_STATE_UP || + !opcode_allowed(&dev->cmd, opcode)) { err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status); MLX5_SET(mbox_out, out, status, status); MLX5_SET(mbox_out, out, syndrome, drv_synd); @@ -1894,6 +1936,11 @@ static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) cmd->alloc_dma); } +static u16 cmdif_rev(struct mlx5_core_dev *dev) +{ + return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; +} + int mlx5_cmd_init(struct mlx5_core_dev *dev) { int size = sizeof(struct mlx5_cmd_prot_block); @@ -1913,10 +1960,16 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) return -EINVAL; } - cmd->pool = dma_pool_create("mlx5_cmd", dev->device, size, align, 0); - if (!cmd->pool) + cmd->stats = kvzalloc(MLX5_CMD_OP_MAX * sizeof(*cmd->stats), GFP_KERNEL); + if (!cmd->stats) return -ENOMEM; + cmd->pool = dma_pool_create("mlx5_cmd", dev->device, size, align, 0); + if (!cmd->pool) { + err = -ENOMEM; + goto dma_pool_err; + } + err = alloc_cmd_page(dev, cmd); if (err) goto err_free_pool; @@ -1937,6 +1990,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) goto err_free_page; } + cmd->state = MLX5_CMDIF_STATE_DOWN; cmd->checksum_disabled = 1; cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1; @@ -1951,7 +2005,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) spin_lock_init(&cmd->alloc_lock); spin_lock_init(&cmd->token_lock); - for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) + for (i = 0; i < MLX5_CMD_OP_MAX; i++) spin_lock_init(&cmd->stats[i].lock); sema_init(&cmd->sem, cmd->max_reg_cmds); @@ -1974,6 +2028,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); cmd->mode = CMD_MODE_POLLING; + cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL; create_msg_cache(dev); @@ -1997,7 +2052,8 @@ err_free_page: err_free_pool: dma_pool_destroy(cmd->pool); - +dma_pool_err: + kvfree(cmd->stats); return err; } EXPORT_SYMBOL(mlx5_cmd_init); @@ -2011,5 +2067,13 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) destroy_msg_cache(dev); free_cmd_page(dev, cmd); dma_pool_destroy(cmd->pool); + kvfree(cmd->stats); } EXPORT_SYMBOL(mlx5_cmd_cleanup); + +void mlx5_cmd_set_state(struct mlx5_core_dev *dev, + enum mlx5_cmdif_state cmdif_state) +{ + dev->cmd.state = cmdif_state; +} +EXPORT_SYMBOL(mlx5_cmd_set_state); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 6409090b3ec5..07c8d9811bc8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -171,7 +171,7 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev) cmd = &dev->priv.cmdif_debugfs; *cmd = debugfs_create_dir("commands", dev->priv.dbg_root); - for (i = 0; i < ARRAY_SIZE(dev->cmd.stats); i++) { + for (i = 0; i < MLX5_CMD_OP_MAX; i++) { stats = &dev->cmd.stats[i]; namep = mlx5_command_str(i); if (strcmp(namep, "unknown command opcode")) { @@ -202,18 +202,23 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev) static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, int index, int *is_str) { - u32 out[MLX5_ST_SZ_BYTES(query_qp_out)] = {}; + int outlen = MLX5_ST_SZ_BYTES(query_qp_out); u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {}; u64 param = 0; + u32 *out; int state; u32 *qpc; int err; + out = kzalloc(outlen, GFP_KERNEL); + if (!out) + return 0; + MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP); MLX5_SET(query_qp_in, in, qpn, qp->qpn); err = mlx5_cmd_exec_inout(dev, query_qp, in, out); if (err) - return 0; + goto out; *is_str = 0; @@ -269,7 +274,8 @@ static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, param = MLX5_GET(qpc, qpc, remote_qpn); break; } - +out: + kfree(out); return param; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c index 8ecac81a385d..a700f3c86899 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c @@ -76,58 +76,59 @@ static void print_lyr_2_4_hdrs(struct trace_seq *p, .v = MLX5_GET(fte_match_set_lyr_2_4, value, dmac_47_16) << 16 | MLX5_GET(fte_match_set_lyr_2_4, value, dmac_15_0)}; MASK_VAL_L2(u16, ethertype, ethertype); + MASK_VAL_L2(u8, ip_version, ip_version); PRINT_MASKED_VALP(smac, u8 *, p, "%pM"); PRINT_MASKED_VALP(dmac, u8 *, p, "%pM"); PRINT_MASKED_VAL(ethertype, p, "%04x"); - if (ethertype.m == 0xffff) { - if (ethertype.v == ETH_P_IP) { + if ((ethertype.m == 0xffff && ethertype.v == ETH_P_IP) || + (ip_version.m == 0xf && ip_version.v == 4)) { #define MASK_VAL_L2_BE(type, name, fld) \ MASK_VAL_BE(type, fte_match_set_lyr_2_4, name, mask, value, fld) - MASK_VAL_L2_BE(u32, src_ipv4, - src_ipv4_src_ipv6.ipv4_layout.ipv4); - MASK_VAL_L2_BE(u32, dst_ipv4, - dst_ipv4_dst_ipv6.ipv4_layout.ipv4); + MASK_VAL_L2_BE(u32, src_ipv4, + src_ipv4_src_ipv6.ipv4_layout.ipv4); + MASK_VAL_L2_BE(u32, dst_ipv4, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4); - PRINT_MASKED_VALP(src_ipv4, typeof(&src_ipv4.v), p, - "%pI4"); - PRINT_MASKED_VALP(dst_ipv4, typeof(&dst_ipv4.v), p, - "%pI4"); - } else if (ethertype.v == ETH_P_IPV6) { - static const struct in6_addr full_ones = { - .in6_u.u6_addr32 = {__constant_htonl(0xffffffff), - __constant_htonl(0xffffffff), - __constant_htonl(0xffffffff), - __constant_htonl(0xffffffff)}, - }; - DECLARE_MASK_VAL(struct in6_addr, src_ipv6); - DECLARE_MASK_VAL(struct in6_addr, dst_ipv6); + PRINT_MASKED_VALP(src_ipv4, typeof(&src_ipv4.v), p, + "%pI4"); + PRINT_MASKED_VALP(dst_ipv4, typeof(&dst_ipv4.v), p, + "%pI4"); + } else if ((ethertype.m == 0xffff && ethertype.v == ETH_P_IPV6) || + (ip_version.m == 0xf && ip_version.v == 6)) { + static const struct in6_addr full_ones = { + .in6_u.u6_addr32 = {__constant_htonl(0xffffffff), + __constant_htonl(0xffffffff), + __constant_htonl(0xffffffff), + __constant_htonl(0xffffffff)}, + }; + DECLARE_MASK_VAL(struct in6_addr, src_ipv6); + DECLARE_MASK_VAL(struct in6_addr, dst_ipv6); - memcpy(src_ipv6.m.in6_u.u6_addr8, - MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask, - src_ipv4_src_ipv6.ipv6_layout.ipv6), - sizeof(src_ipv6.m)); - memcpy(dst_ipv6.m.in6_u.u6_addr8, - MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask, - dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - sizeof(dst_ipv6.m)); - memcpy(src_ipv6.v.in6_u.u6_addr8, - MLX5_ADDR_OF(fte_match_set_lyr_2_4, value, - src_ipv4_src_ipv6.ipv6_layout.ipv6), - sizeof(src_ipv6.v)); - memcpy(dst_ipv6.v.in6_u.u6_addr8, - MLX5_ADDR_OF(fte_match_set_lyr_2_4, value, - dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - sizeof(dst_ipv6.v)); + memcpy(src_ipv6.m.in6_u.u6_addr8, + MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask, + src_ipv4_src_ipv6.ipv6_layout.ipv6), + sizeof(src_ipv6.m)); + memcpy(dst_ipv6.m.in6_u.u6_addr8, + MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + sizeof(dst_ipv6.m)); + memcpy(src_ipv6.v.in6_u.u6_addr8, + MLX5_ADDR_OF(fte_match_set_lyr_2_4, value, + src_ipv4_src_ipv6.ipv6_layout.ipv6), + sizeof(src_ipv6.v)); + memcpy(dst_ipv6.v.in6_u.u6_addr8, + MLX5_ADDR_OF(fte_match_set_lyr_2_4, value, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + sizeof(dst_ipv6.v)); - if (!memcmp(&src_ipv6.m, &full_ones, sizeof(full_ones))) - trace_seq_printf(p, "src_ipv6=%pI6 ", - &src_ipv6.v); - if (!memcmp(&dst_ipv6.m, &full_ones, sizeof(full_ones))) - trace_seq_printf(p, "dst_ipv6=%pI6 ", - &dst_ipv6.v); - } + if (!memcmp(&src_ipv6.m, &full_ones, sizeof(full_ones))) + trace_seq_printf(p, "src_ipv6=%pI6 ", + &src_ipv6.v); + if (!memcmp(&dst_ipv6.m, &full_ones, sizeof(full_ones))) + trace_seq_printf(p, "dst_ipv6=%pI6 ", + &dst_ipv6.v); } #define PRINT_MASKED_VAL_L2(type, name, fld, p, format) {\ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 81fd53569463..842db20493df 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -365,10 +365,7 @@ struct mlx5e_dma_info { dma_addr_t addr; union { struct page *page; - struct { - u64 handle; - void *data; - } xsk; + struct xdp_buff *xsk; }; }; @@ -581,7 +578,6 @@ struct mlx5e_rq { } mpwqe; }; struct { - u16 umem_headroom; u16 headroom; u32 frame0_sz; u8 map_dir; /* dma map direction */ @@ -614,7 +610,6 @@ struct mlx5e_rq { struct page_pool *page_pool; /* AF_XDP zero-copy */ - struct zero_copy_allocator zca; struct xdp_umem *umem; struct work_struct recover_work; @@ -1000,10 +995,12 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, int num_channels); -void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, - u8 cq_period_mode); -void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, - u8 cq_period_mode); + +void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode); +void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode); +void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); +void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); + void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params); void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params); @@ -1047,7 +1044,7 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq); int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv); int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc); -void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc); +void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv); int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs); void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index eb2e1f2138e4..38e4f19d69f8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -12,15 +12,16 @@ static inline bool mlx5e_rx_is_xdp(struct mlx5e_params *params, u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) { - u16 headroom = NET_IP_ALIGN; + u16 headroom; - if (mlx5e_rx_is_xdp(params, xsk)) { + if (xsk) + return xsk->headroom; + + headroom = NET_IP_ALIGN; + if (mlx5e_rx_is_xdp(params, xsk)) headroom += XDP_PACKET_HEADROOM; - if (xsk) - headroom += xsk->headroom; - } else { + else headroom += MLX5_RX_HEADROOM; - } return headroom; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 2c4a670c8ffd..2a8950b3056f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -369,17 +369,19 @@ enum mlx5e_fec_supported_link_mode { *_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \ } while (0) -#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \ - do { \ - u16 *__policy = &(policy); \ - bool _write = (write); \ - \ - if (_write && *__policy) \ - *__policy = find_first_bit((u_long *)__policy, \ - sizeof(u16) * BITS_PER_BYTE);\ - MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \ - if (!_write && *__policy) \ - *__policy = 1 << *__policy; \ +#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \ + do { \ + unsigned long policy_long; \ + u16 *__policy = &(policy); \ + bool _write = (write); \ + \ + policy_long = *__policy; \ + if (_write && *__policy) \ + *__policy = find_first_bit(&policy_long, \ + sizeof(policy_long) * BITS_PER_BYTE);\ + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \ + if (!_write && *__policy) \ + *__policy = 1 << *__policy; \ } while (0) /* get/set FEC admin field for a given speed */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c new file mode 100644 index 000000000000..bdb71332cbf2 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c @@ -0,0 +1,350 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */ + +#include <linux/netdevice.h> +#include <linux/list.h> +#include <net/lag.h> + +#include "mlx5_core.h" +#include "eswitch.h" +#include "esw/acl/ofld.h" +#include "en_rep.h" + +struct mlx5e_rep_bond { + struct notifier_block nb; + struct netdev_net_notifier nn; + struct list_head metadata_list; +}; + +struct mlx5e_rep_bond_slave_entry { + struct list_head list; + struct net_device *netdev; +}; + +struct mlx5e_rep_bond_metadata { + struct list_head list; /* link to global list of rep_bond_metadata */ + struct mlx5_eswitch *esw; + /* private of uplink holding rep bond metadata list */ + struct net_device *lag_dev; + u32 metadata_reg_c_0; + + struct list_head slaves_list; /* slaves list */ + int slaves; +}; + +static struct mlx5e_rep_bond_metadata * +mlx5e_lookup_rep_bond_metadata(struct mlx5_rep_uplink_priv *uplink_priv, + const struct net_device *lag_dev) +{ + struct mlx5e_rep_bond_metadata *found = NULL; + struct mlx5e_rep_bond_metadata *cur; + + list_for_each_entry(cur, &uplink_priv->bond->metadata_list, list) { + if (cur->lag_dev == lag_dev) { + found = cur; + break; + } + } + + return found; +} + +static struct mlx5e_rep_bond_slave_entry * +mlx5e_lookup_rep_bond_slave_entry(struct mlx5e_rep_bond_metadata *mdata, + const struct net_device *netdev) +{ + struct mlx5e_rep_bond_slave_entry *found = NULL; + struct mlx5e_rep_bond_slave_entry *cur; + + list_for_each_entry(cur, &mdata->slaves_list, list) { + if (cur->netdev == netdev) { + found = cur; + break; + } + } + + return found; +} + +static void mlx5e_rep_bond_metadata_release(struct mlx5e_rep_bond_metadata *mdata) +{ + netdev_dbg(mdata->lag_dev, "destroy rep_bond_metadata(%d)\n", + mdata->metadata_reg_c_0); + list_del(&mdata->list); + mlx5_esw_match_metadata_free(mdata->esw, mdata->metadata_reg_c_0); + WARN_ON(!list_empty(&mdata->slaves_list)); + kfree(mdata); +} + +/* This must be called under rtnl_lock */ +int mlx5e_rep_bond_enslave(struct mlx5_eswitch *esw, struct net_device *netdev, + struct net_device *lag_dev) +{ + struct mlx5e_rep_bond_slave_entry *s_entry; + struct mlx5e_rep_bond_metadata *mdata; + struct mlx5e_rep_priv *rpriv; + struct mlx5e_priv *priv; + int err; + + ASSERT_RTNL(); + + rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + mdata = mlx5e_lookup_rep_bond_metadata(&rpriv->uplink_priv, lag_dev); + if (!mdata) { + /* First netdev becomes slave, no metadata presents the lag_dev. Create one */ + mdata = kzalloc(sizeof(*mdata), GFP_KERNEL); + if (!mdata) + return -ENOMEM; + + mdata->lag_dev = lag_dev; + mdata->esw = esw; + INIT_LIST_HEAD(&mdata->slaves_list); + mdata->metadata_reg_c_0 = mlx5_esw_match_metadata_alloc(esw); + if (!mdata->metadata_reg_c_0) { + kfree(mdata); + return -ENOSPC; + } + list_add(&mdata->list, &rpriv->uplink_priv.bond->metadata_list); + + netdev_dbg(lag_dev, "create rep_bond_metadata(%d)\n", + mdata->metadata_reg_c_0); + } + + s_entry = kzalloc(sizeof(*s_entry), GFP_KERNEL); + if (!s_entry) { + err = -ENOMEM; + goto entry_alloc_err; + } + + s_entry->netdev = netdev; + priv = netdev_priv(netdev); + rpriv = priv->ppriv; + + err = mlx5_esw_acl_ingress_vport_bond_update(esw, rpriv->rep->vport, + mdata->metadata_reg_c_0); + if (err) + goto ingress_err; + + mdata->slaves++; + list_add_tail(&s_entry->list, &mdata->slaves_list); + netdev_dbg(netdev, "enslave rep vport(%d) lag_dev(%s) metadata(0x%x)\n", + rpriv->rep->vport, lag_dev->name, mdata->metadata_reg_c_0); + + return 0; + +ingress_err: + kfree(s_entry); +entry_alloc_err: + if (!mdata->slaves) + mlx5e_rep_bond_metadata_release(mdata); + return err; +} + +/* This must be called under rtnl_lock */ +void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw, + const struct net_device *netdev, + const struct net_device *lag_dev) +{ + struct mlx5e_rep_bond_slave_entry *s_entry; + struct mlx5e_rep_bond_metadata *mdata; + struct mlx5e_rep_priv *rpriv; + struct mlx5e_priv *priv; + + ASSERT_RTNL(); + + rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + mdata = mlx5e_lookup_rep_bond_metadata(&rpriv->uplink_priv, lag_dev); + if (!mdata) + return; + + s_entry = mlx5e_lookup_rep_bond_slave_entry(mdata, netdev); + if (!s_entry) + return; + + priv = netdev_priv(netdev); + rpriv = priv->ppriv; + + /* Reset bond_metadata to zero first then reset all ingress/egress + * acls and rx rules of unslave representor's vport + */ + mlx5_esw_acl_ingress_vport_bond_update(esw, rpriv->rep->vport, 0); + mlx5_esw_acl_egress_vport_unbond(esw, rpriv->rep->vport); + mlx5e_rep_bond_update(priv, false); + + list_del(&s_entry->list); + + netdev_dbg(netdev, "unslave rep vport(%d) lag_dev(%s) metadata(0x%x)\n", + rpriv->rep->vport, lag_dev->name, mdata->metadata_reg_c_0); + + if (--mdata->slaves == 0) + mlx5e_rep_bond_metadata_release(mdata); + kfree(s_entry); +} + +static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_rep_priv *rpriv = priv->ppriv; + + /* A given netdev is not a representor or not a slave of LAG configuration */ + if (!mlx5e_eswitch_rep(netdev) || !bond_slave_get_rtnl(netdev)) + return false; + + /* Egress acl forward to vport is supported only non-uplink representor */ + return rpriv->rep->vport != MLX5_VPORT_UPLINK; +} + +static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *ptr) +{ + struct netdev_notifier_changelowerstate_info *info; + struct netdev_lag_lower_state_info *lag_info; + struct mlx5e_rep_priv *rpriv; + struct net_device *lag_dev; + struct mlx5e_priv *priv; + struct list_head *iter; + struct net_device *dev; + u16 acl_vport_num; + u16 fwd_vport_num; + int err; + + if (!mlx5e_rep_is_lag_netdev(netdev)) + return; + + info = ptr; + lag_info = info->lower_state_info; + /* This is not an event of a representor becoming active slave */ + if (!lag_info->tx_enabled) + return; + + priv = netdev_priv(netdev); + rpriv = priv->ppriv; + fwd_vport_num = rpriv->rep->vport; + lag_dev = netdev_master_upper_dev_get(netdev); + + netdev_dbg(netdev, "lag_dev(%s)'s slave vport(%d) is txable(%d)\n", + lag_dev->name, fwd_vport_num, net_lag_port_dev_txable(netdev)); + + /* Point everyone's egress acl to the vport of the active representor */ + netdev_for_each_lower_dev(lag_dev, dev, iter) { + priv = netdev_priv(dev); + rpriv = priv->ppriv; + acl_vport_num = rpriv->rep->vport; + if (acl_vport_num != fwd_vport_num) { + /* Only single rx_rule for unique bond_metadata should be + * present, delete it if it's saved as passive vport's + * rx_rule with destination as passive vport's root_ft + */ + mlx5e_rep_bond_update(priv, true); + err = mlx5_esw_acl_egress_vport_bond(priv->mdev->priv.eswitch, + fwd_vport_num, + acl_vport_num); + if (err) + netdev_warn(dev, + "configure slave vport(%d) egress fwd, err(%d)", + acl_vport_num, err); + } + } + + /* Insert new rx_rule for unique bond_metadata, save it as active vport's + * rx_rule with new destination as active vport's root_ft + */ + err = mlx5e_rep_bond_update(netdev_priv(netdev), false); + if (err) + netdev_warn(netdev, "configure active slave vport(%d) rx_rule, err(%d)", + fwd_vport_num, err); +} + +static void mlx5e_rep_changeupper_event(struct net_device *netdev, void *ptr) +{ + struct netdev_notifier_changeupper_info *info = ptr; + struct mlx5e_rep_priv *rpriv; + struct net_device *lag_dev; + struct mlx5e_priv *priv; + + if (!mlx5e_rep_is_lag_netdev(netdev)) + return; + + priv = netdev_priv(netdev); + rpriv = priv->ppriv; + lag_dev = info->upper_dev; + + netdev_dbg(netdev, "%sslave vport(%d) lag(%s)\n", + info->linking ? "en" : "un", rpriv->rep->vport, lag_dev->name); + + if (info->linking) + mlx5e_rep_bond_enslave(priv->mdev->priv.eswitch, netdev, lag_dev); + else + mlx5e_rep_bond_unslave(priv->mdev->priv.eswitch, netdev, lag_dev); +} + +/* Bond device of representors and netdev events are used here in specific way + * to support eswitch vports bonding and to perform failover of eswitch vport + * by modifying the vport's egress acl of lower dev representors. Thus this + * also change the traditional behavior of lower dev under bond device. + * All non-representor netdevs or representors of other vendors as lower dev + * of bond device are not supported. + */ +static int mlx5e_rep_esw_bond_netevent(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + + switch (event) { + case NETDEV_CHANGELOWERSTATE: + mlx5e_rep_changelowerstate_event(netdev, ptr); + break; + case NETDEV_CHANGEUPPER: + mlx5e_rep_changeupper_event(netdev, ptr); + break; + } + return NOTIFY_DONE; +} + +/* If HW support eswitch vports bonding, register a specific notifier to + * handle it when two or more representors are bonded + */ +int mlx5e_rep_bond_init(struct mlx5e_rep_priv *rpriv) +{ + struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv; + struct net_device *netdev = rpriv->netdev; + struct mlx5e_priv *priv; + int ret = 0; + + priv = netdev_priv(netdev); + if (!mlx5_esw_acl_egress_fwd2vport_supported(priv->mdev->priv.eswitch)) + goto out; + + uplink_priv->bond = kvzalloc(sizeof(*uplink_priv->bond), GFP_KERNEL); + if (!uplink_priv->bond) { + ret = -ENOMEM; + goto out; + } + + INIT_LIST_HEAD(&uplink_priv->bond->metadata_list); + uplink_priv->bond->nb.notifier_call = mlx5e_rep_esw_bond_netevent; + ret = register_netdevice_notifier_dev_net(netdev, + &uplink_priv->bond->nb, + &uplink_priv->bond->nn); + if (ret) { + netdev_err(netdev, "register bonding netevent notifier, err(%d)\n", ret); + kvfree(uplink_priv->bond); + uplink_priv->bond = NULL; + } + +out: + return ret; +} + +void mlx5e_rep_bond_cleanup(struct mlx5e_rep_priv *rpriv) +{ + struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); + + if (!mlx5_esw_acl_egress_fwd2vport_supported(priv->mdev->priv.eswitch) || + !rpriv->uplink_priv.bond) + return; + + unregister_netdevice_notifier_dev_net(rpriv->netdev, + &rpriv->uplink_priv.bond->nb, + &rpriv->uplink_priv.bond->nn); + kvfree(rpriv->uplink_priv.bond); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c new file mode 100644 index 000000000000..baa162432e75 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c @@ -0,0 +1,368 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020 Mellanox Technologies. */ + +#include <linux/refcount.h> +#include <linux/list.h> +#include <linux/rculist.h> +#include <linux/rtnetlink.h> +#include <linux/workqueue.h> +#include <linux/rwlock.h> +#include <linux/spinlock.h> +#include <linux/notifier.h> +#include <net/netevent.h> +#include "neigh.h" +#include "tc.h" +#include "en_rep.h" +#include "fs_core.h" +#include "diag/en_rep_tracepoint.h" + +static unsigned long mlx5e_rep_ipv6_interval(void) +{ + if (IS_ENABLED(CONFIG_IPV6) && ipv6_stub->nd_tbl) + return NEIGH_VAR(&ipv6_stub->nd_tbl->parms, DELAY_PROBE_TIME); + + return ~0UL; +} + +static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv) +{ + unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME); + unsigned long ipv6_interval = mlx5e_rep_ipv6_interval(); + struct net_device *netdev = rpriv->netdev; + struct mlx5e_priv *priv = netdev_priv(netdev); + + rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval); + mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval); +} + +void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv) +{ + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; + + mlx5_fc_queue_stats_work(priv->mdev, + &neigh_update->neigh_stats_work, + neigh_update->min_interval); +} + +static bool mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe) +{ + return refcount_inc_not_zero(&nhe->refcnt); +} + +static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe); + +void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe) +{ + if (refcount_dec_and_test(&nhe->refcnt)) { + mlx5e_rep_neigh_entry_remove(nhe); + kfree_rcu(nhe, rcu); + } +} + +static struct mlx5e_neigh_hash_entry * +mlx5e_get_next_nhe(struct mlx5e_rep_priv *rpriv, + struct mlx5e_neigh_hash_entry *nhe) +{ + struct mlx5e_neigh_hash_entry *next = NULL; + + rcu_read_lock(); + + for (next = nhe ? + list_next_or_null_rcu(&rpriv->neigh_update.neigh_list, + &nhe->neigh_list, + struct mlx5e_neigh_hash_entry, + neigh_list) : + list_first_or_null_rcu(&rpriv->neigh_update.neigh_list, + struct mlx5e_neigh_hash_entry, + neigh_list); + next; + next = list_next_or_null_rcu(&rpriv->neigh_update.neigh_list, + &next->neigh_list, + struct mlx5e_neigh_hash_entry, + neigh_list)) + if (mlx5e_rep_neigh_entry_hold(next)) + break; + + rcu_read_unlock(); + + if (nhe) + mlx5e_rep_neigh_entry_release(nhe); + + return next; +} + +static void mlx5e_rep_neigh_stats_work(struct work_struct *work) +{ + struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv, + neigh_update.neigh_stats_work.work); + struct net_device *netdev = rpriv->netdev; + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_neigh_hash_entry *nhe = NULL; + + rtnl_lock(); + if (!list_empty(&rpriv->neigh_update.neigh_list)) + mlx5e_rep_queue_neigh_stats_work(priv); + + while ((nhe = mlx5e_get_next_nhe(rpriv, nhe)) != NULL) + mlx5e_tc_update_neigh_used_value(nhe); + + rtnl_unlock(); +} + +static void mlx5e_rep_neigh_update(struct work_struct *work) +{ + struct mlx5e_neigh_hash_entry *nhe = + container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work); + struct neighbour *n = nhe->n; + struct mlx5e_encap_entry *e; + unsigned char ha[ETH_ALEN]; + struct mlx5e_priv *priv; + bool neigh_connected; + u8 nud_state, dead; + + rtnl_lock(); + + /* If these parameters are changed after we release the lock, + * we'll receive another event letting us know about it. + * We use this lock to avoid inconsistency between the neigh validity + * and it's hw address. + */ + read_lock_bh(&n->lock); + memcpy(ha, n->ha, ETH_ALEN); + nud_state = n->nud_state; + dead = n->dead; + read_unlock_bh(&n->lock); + + neigh_connected = (nud_state & NUD_VALID) && !dead; + + trace_mlx5e_rep_neigh_update(nhe, ha, neigh_connected); + + list_for_each_entry(e, &nhe->encap_list, encap_list) { + if (!mlx5e_encap_take(e)) + continue; + + priv = netdev_priv(e->out_dev); + mlx5e_rep_update_flows(priv, e, neigh_connected, ha); + mlx5e_encap_put(priv, e); + } + mlx5e_rep_neigh_entry_release(nhe); + rtnl_unlock(); + neigh_release(n); +} + +static void mlx5e_rep_queue_neigh_update_work(struct mlx5e_priv *priv, + struct mlx5e_neigh_hash_entry *nhe, + struct neighbour *n) +{ + /* Take a reference to ensure the neighbour and mlx5 encap + * entry won't be destructed until we drop the reference in + * delayed work. + */ + neigh_hold(n); + + /* This assignment is valid as long as the the neigh reference + * is taken + */ + nhe->n = n; + + if (!queue_work(priv->wq, &nhe->neigh_update_work)) { + mlx5e_rep_neigh_entry_release(nhe); + neigh_release(n); + } +} + +static int mlx5e_rep_netevent_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv, + neigh_update.netevent_nb); + struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; + struct net_device *netdev = rpriv->netdev; + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_neigh_hash_entry *nhe = NULL; + struct mlx5e_neigh m_neigh = {}; + struct neigh_parms *p; + struct neighbour *n; + bool found = false; + + switch (event) { + case NETEVENT_NEIGH_UPDATE: + n = ptr; +#if IS_ENABLED(CONFIG_IPV6) + if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl) +#else + if (n->tbl != &arp_tbl) +#endif + return NOTIFY_DONE; + + m_neigh.dev = n->dev; + m_neigh.family = n->ops->family; + memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len); + + rcu_read_lock(); + nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh); + rcu_read_unlock(); + if (!nhe) + return NOTIFY_DONE; + + mlx5e_rep_queue_neigh_update_work(priv, nhe, n); + break; + + case NETEVENT_DELAY_PROBE_TIME_UPDATE: + p = ptr; + + /* We check the device is present since we don't care about + * changes in the default table, we only care about changes + * done per device delay prob time parameter. + */ +#if IS_ENABLED(CONFIG_IPV6) + if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl)) +#else + if (!p->dev || p->tbl != &arp_tbl) +#endif + return NOTIFY_DONE; + + rcu_read_lock(); + list_for_each_entry_rcu(nhe, &neigh_update->neigh_list, + neigh_list) { + if (p->dev == nhe->m_neigh.dev) { + found = true; + break; + } + } + rcu_read_unlock(); + if (!found) + return NOTIFY_DONE; + + neigh_update->min_interval = min_t(unsigned long, + NEIGH_VAR(p, DELAY_PROBE_TIME), + neigh_update->min_interval); + mlx5_fc_update_sampling_interval(priv->mdev, + neigh_update->min_interval); + break; + } + return NOTIFY_DONE; +} + +static const struct rhashtable_params mlx5e_neigh_ht_params = { + .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node), + .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh), + .key_len = sizeof(struct mlx5e_neigh), + .automatic_shrinking = true, +}; + +int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv) +{ + struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; + int err; + + err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params); + if (err) + return err; + + INIT_LIST_HEAD(&neigh_update->neigh_list); + mutex_init(&neigh_update->encap_lock); + INIT_DELAYED_WORK(&neigh_update->neigh_stats_work, + mlx5e_rep_neigh_stats_work); + mlx5e_rep_neigh_update_init_interval(rpriv); + + rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event; + err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb); + if (err) + goto out_err; + return 0; + +out_err: + rhashtable_destroy(&neigh_update->neigh_ht); + return err; +} + +void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv) +{ + struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; + struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); + + unregister_netevent_notifier(&neigh_update->netevent_nb); + + flush_workqueue(priv->wq); /* flush neigh update works */ + + cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work); + + mutex_destroy(&neigh_update->encap_lock); + rhashtable_destroy(&neigh_update->neigh_ht); +} + +static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv, + struct mlx5e_neigh_hash_entry *nhe) +{ + struct mlx5e_rep_priv *rpriv = priv->ppriv; + int err; + + err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht, + &nhe->rhash_node, + mlx5e_neigh_ht_params); + if (err) + return err; + + list_add_rcu(&nhe->neigh_list, &rpriv->neigh_update.neigh_list); + + return err; +} + +static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe) +{ + struct mlx5e_rep_priv *rpriv = nhe->priv->ppriv; + + mutex_lock(&rpriv->neigh_update.encap_lock); + + list_del_rcu(&nhe->neigh_list); + + rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht, + &nhe->rhash_node, + mlx5e_neigh_ht_params); + mutex_unlock(&rpriv->neigh_update.encap_lock); +} + +/* This function must only be called under the representor's encap_lock or + * inside rcu read lock section. + */ +struct mlx5e_neigh_hash_entry * +mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv, + struct mlx5e_neigh *m_neigh) +{ + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; + struct mlx5e_neigh_hash_entry *nhe; + + nhe = rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh, + mlx5e_neigh_ht_params); + return nhe && mlx5e_rep_neigh_entry_hold(nhe) ? nhe : NULL; +} + +int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e, + struct mlx5e_neigh_hash_entry **nhe) +{ + int err; + + *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL); + if (!*nhe) + return -ENOMEM; + + (*nhe)->priv = priv; + memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh)); + INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update); + spin_lock_init(&(*nhe)->encap_list_lock); + INIT_LIST_HEAD(&(*nhe)->encap_list); + refcount_set(&(*nhe)->refcnt, 1); + + err = mlx5e_rep_neigh_entry_insert(priv, *nhe); + if (err) + goto out_free; + return 0; + +out_free: + kfree(*nhe); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h new file mode 100644 index 000000000000..32b239189c95 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 Mellanox Technologies. */ + +#ifndef __MLX5_EN_REP_NEIGH__ +#define __MLX5_EN_REP_NEIGH__ + +#include "en.h" +#include "en_rep.h" + +#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) + +int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv); +void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv); + +struct mlx5e_neigh_hash_entry * +mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv, + struct mlx5e_neigh *m_neigh); +int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e, + struct mlx5e_neigh_hash_entry **nhe); +void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe); + +void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv); + +#else /* CONFIG_MLX5_CLS_ACT */ + +static inline int +mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv) { return 0; } +static inline void +mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv) {} + +#endif /* CONFIG_MLX5_CLS_ACT */ + +#endif /* __MLX5_EN_REP_NEIGH__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c new file mode 100644 index 000000000000..c609a5e50ebc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -0,0 +1,711 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020 Mellanox Technologies. */ + +#include <net/dst_metadata.h> +#include <linux/netdevice.h> +#include <linux/list.h> +#include <linux/rculist.h> +#include <linux/rtnetlink.h> +#include <linux/workqueue.h> +#include <linux/spinlock.h> +#include "tc.h" +#include "neigh.h" +#include "en_rep.h" +#include "eswitch.h" +#include "esw/chains.h" +#include "en/tc_ct.h" +#include "en/mapping.h" +#include "en/tc_tun.h" +#include "lib/port_tun.h" + +struct mlx5e_rep_indr_block_priv { + struct net_device *netdev; + struct mlx5e_rep_priv *rpriv; + + struct list_head list; +}; + +int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e) +{ + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv; + struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy; + struct mlx5e_neigh_hash_entry *nhe; + int err; + + err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type); + if (err) + return err; + + mutex_lock(&rpriv->neigh_update.encap_lock); + nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh); + if (!nhe) { + err = mlx5e_rep_neigh_entry_create(priv, e, &nhe); + if (err) { + mutex_unlock(&rpriv->neigh_update.encap_lock); + mlx5_tun_entropy_refcount_dec(tun_entropy, + e->reformat_type); + return err; + } + } + + e->nhe = nhe; + spin_lock(&nhe->encap_list_lock); + list_add_rcu(&e->encap_list, &nhe->encap_list); + spin_unlock(&nhe->encap_list_lock); + + mutex_unlock(&rpriv->neigh_update.encap_lock); + + return 0; +} + +void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e) +{ + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv; + struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy; + + if (!e->nhe) + return; + + spin_lock(&e->nhe->encap_list_lock); + list_del_rcu(&e->encap_list); + spin_unlock(&e->nhe->encap_list_lock); + + mlx5e_rep_neigh_entry_release(e->nhe); + e->nhe = NULL; + mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type); +} + +void mlx5e_rep_update_flows(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e, + bool neigh_connected, + unsigned char ha[ETH_ALEN]) +{ + struct ethhdr *eth = (struct ethhdr *)e->encap_header; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + bool encap_connected; + LIST_HEAD(flow_list); + + ASSERT_RTNL(); + + /* wait for encap to be fully initialized */ + wait_for_completion(&e->res_ready); + + mutex_lock(&esw->offloads.encap_tbl_lock); + encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID); + if (e->compl_result < 0 || (encap_connected == neigh_connected && + ether_addr_equal(e->h_dest, ha))) + goto unlock; + + mlx5e_take_all_encap_flows(e, &flow_list); + + if ((e->flags & MLX5_ENCAP_ENTRY_VALID) && + (!neigh_connected || !ether_addr_equal(e->h_dest, ha))) + mlx5e_tc_encap_flows_del(priv, e, &flow_list); + + if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) { + ether_addr_copy(e->h_dest, ha); + ether_addr_copy(eth->h_dest, ha); + /* Update the encap source mac, in case that we delete + * the flows when encap source mac changed. + */ + ether_addr_copy(eth->h_source, e->route_dev->dev_addr); + + mlx5e_tc_encap_flows_add(priv, e, &flow_list); + } +unlock: + mutex_unlock(&esw->offloads.encap_tbl_lock); + mlx5e_put_encap_flow_list(priv, &flow_list); +} + +static int +mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv, + struct flow_cls_offload *cls_flower, int flags) +{ + switch (cls_flower->command) { + case FLOW_CLS_REPLACE: + return mlx5e_configure_flower(priv->netdev, priv, cls_flower, + flags); + case FLOW_CLS_DESTROY: + return mlx5e_delete_flower(priv->netdev, priv, cls_flower, + flags); + case FLOW_CLS_STATS: + return mlx5e_stats_flower(priv->netdev, priv, cls_flower, + flags); + default: + return -EOPNOTSUPP; + } +} + +static +int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv, + struct tc_cls_matchall_offload *ma) +{ + switch (ma->command) { + case TC_CLSMATCHALL_REPLACE: + return mlx5e_tc_configure_matchall(priv, ma); + case TC_CLSMATCHALL_DESTROY: + return mlx5e_tc_delete_matchall(priv, ma); + case TC_CLSMATCHALL_STATS: + mlx5e_tc_stats_matchall(priv, ma); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD); + struct mlx5e_priv *priv = cb_priv; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags); + case TC_SETUP_CLSMATCHALL: + return mlx5e_rep_setup_tc_cls_matchall(priv, type_data); + default: + return -EOPNOTSUPP; + } +} + +static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct flow_cls_offload tmp, *f = type_data; + struct mlx5e_priv *priv = cb_priv; + struct mlx5_eswitch *esw; + unsigned long flags; + int err; + + flags = MLX5_TC_FLAG(INGRESS) | + MLX5_TC_FLAG(ESW_OFFLOAD) | + MLX5_TC_FLAG(FT_OFFLOAD); + esw = priv->mdev->priv.eswitch; + + switch (type) { + case TC_SETUP_CLSFLOWER: + memcpy(&tmp, f, sizeof(*f)); + + if (!mlx5_esw_chains_prios_supported(esw)) + return -EOPNOTSUPP; + + /* Re-use tc offload path by moving the ft flow to the + * reserved ft chain. + * + * FT offload can use prio range [0, INT_MAX], so we normalize + * it to range [1, mlx5_esw_chains_get_prio_range(esw)] + * as with tc, where prio 0 isn't supported. + * + * We only support chain 0 of FT offload. + */ + if (tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw)) + return -EOPNOTSUPP; + if (tmp.common.chain_index != 0) + return -EOPNOTSUPP; + + tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw); + tmp.common.prio++; + err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags); + memcpy(&f->stats, &tmp.stats, sizeof(f->stats)); + return err; + default: + return -EOPNOTSUPP; + } +} + +static LIST_HEAD(mlx5e_rep_block_tc_cb_list); +static LIST_HEAD(mlx5e_rep_block_ft_cb_list); +int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct flow_block_offload *f = type_data; + + f->unlocked_driver_cb = true; + + switch (type) { + case TC_SETUP_BLOCK: + return flow_block_cb_setup_simple(type_data, + &mlx5e_rep_block_tc_cb_list, + mlx5e_rep_setup_tc_cb, + priv, priv, true); + case TC_SETUP_FT: + return flow_block_cb_setup_simple(type_data, + &mlx5e_rep_block_ft_cb_list, + mlx5e_rep_setup_ft_cb, + priv, priv, true); + default: + return -EOPNOTSUPP; + } +} + +int mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv) +{ + struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv; + int err; + + mutex_init(&uplink_priv->unready_flows_lock); + INIT_LIST_HEAD(&uplink_priv->unready_flows); + + /* init shared tc flow table */ + err = mlx5e_tc_esw_init(&uplink_priv->tc_ht); + return err; +} + +void mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv) +{ + /* delete shared tc flow table */ + mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht); + mutex_destroy(&rpriv->uplink_priv.unready_flows_lock); +} + +void mlx5e_rep_tc_enable(struct mlx5e_priv *priv) +{ + struct mlx5e_rep_priv *rpriv = priv->ppriv; + + INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work, + mlx5e_tc_reoffload_flows_work); +} + +void mlx5e_rep_tc_disable(struct mlx5e_priv *priv) +{ + struct mlx5e_rep_priv *rpriv = priv->ppriv; + + cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work); +} + +int mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv *priv) +{ + struct mlx5e_rep_priv *rpriv = priv->ppriv; + + queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work); + + return NOTIFY_OK; +} + +static struct mlx5e_rep_indr_block_priv * +mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv, + struct net_device *netdev) +{ + struct mlx5e_rep_indr_block_priv *cb_priv; + + /* All callback list access should be protected by RTNL. */ + ASSERT_RTNL(); + + list_for_each_entry(cb_priv, + &rpriv->uplink_priv.tc_indr_block_priv_list, + list) + if (cb_priv->netdev == netdev) + return cb_priv; + + return NULL; +} + +static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv, + struct net_device *netdev); + +void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv) +{ + struct mlx5e_rep_indr_block_priv *cb_priv, *temp; + struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list; + + list_for_each_entry_safe(cb_priv, temp, head, list) { + mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev); + kfree(cb_priv); + } +} + +static int +mlx5e_rep_indr_offload(struct net_device *netdev, + struct flow_cls_offload *flower, + struct mlx5e_rep_indr_block_priv *indr_priv, + unsigned long flags) +{ + struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev); + int err = 0; + + switch (flower->command) { + case FLOW_CLS_REPLACE: + err = mlx5e_configure_flower(netdev, priv, flower, flags); + break; + case FLOW_CLS_DESTROY: + err = mlx5e_delete_flower(netdev, priv, flower, flags); + break; + case FLOW_CLS_STATS: + err = mlx5e_stats_flower(netdev, priv, flower, flags); + break; + default: + err = -EOPNOTSUPP; + } + + return err; +} + +static int mlx5e_rep_indr_setup_tc_cb(enum tc_setup_type type, + void *type_data, void *indr_priv) +{ + unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD); + struct mlx5e_rep_indr_block_priv *priv = indr_priv; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return mlx5e_rep_indr_offload(priv->netdev, type_data, priv, + flags); + default: + return -EOPNOTSUPP; + } +} + +static int mlx5e_rep_indr_setup_ft_cb(enum tc_setup_type type, + void *type_data, void *indr_priv) +{ + struct mlx5e_rep_indr_block_priv *priv = indr_priv; + struct flow_cls_offload *f = type_data; + struct flow_cls_offload tmp; + struct mlx5e_priv *mpriv; + struct mlx5_eswitch *esw; + unsigned long flags; + int err; + + mpriv = netdev_priv(priv->rpriv->netdev); + esw = mpriv->mdev->priv.eswitch; + + flags = MLX5_TC_FLAG(EGRESS) | + MLX5_TC_FLAG(ESW_OFFLOAD) | + MLX5_TC_FLAG(FT_OFFLOAD); + + switch (type) { + case TC_SETUP_CLSFLOWER: + memcpy(&tmp, f, sizeof(*f)); + + /* Re-use tc offload path by moving the ft flow to the + * reserved ft chain. + * + * FT offload can use prio range [0, INT_MAX], so we normalize + * it to range [1, mlx5_esw_chains_get_prio_range(esw)] + * as with tc, where prio 0 isn't supported. + * + * We only support chain 0 of FT offload. + */ + if (!mlx5_esw_chains_prios_supported(esw) || + tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw) || + tmp.common.chain_index) + return -EOPNOTSUPP; + + tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw); + tmp.common.prio++; + err = mlx5e_rep_indr_offload(priv->netdev, &tmp, priv, flags); + memcpy(&f->stats, &tmp.stats, sizeof(f->stats)); + return err; + default: + return -EOPNOTSUPP; + } +} + +static void mlx5e_rep_indr_block_unbind(void *cb_priv) +{ + struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv; + + list_del(&indr_priv->list); + kfree(indr_priv); +} + +static LIST_HEAD(mlx5e_block_cb_list); + +static int +mlx5e_rep_indr_setup_block(struct net_device *netdev, + struct mlx5e_rep_priv *rpriv, + struct flow_block_offload *f, + flow_setup_cb_t *setup_cb) +{ + struct mlx5e_rep_indr_block_priv *indr_priv; + struct flow_block_cb *block_cb; + + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + f->unlocked_driver_cb = true; + f->driver_block_list = &mlx5e_block_cb_list; + + switch (f->command) { + case FLOW_BLOCK_BIND: + indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); + if (indr_priv) + return -EEXIST; + + indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL); + if (!indr_priv) + return -ENOMEM; + + indr_priv->netdev = netdev; + indr_priv->rpriv = rpriv; + list_add(&indr_priv->list, + &rpriv->uplink_priv.tc_indr_block_priv_list); + + block_cb = flow_block_cb_alloc(setup_cb, indr_priv, indr_priv, + mlx5e_rep_indr_block_unbind); + if (IS_ERR(block_cb)) { + list_del(&indr_priv->list); + kfree(indr_priv); + return PTR_ERR(block_cb); + } + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, &mlx5e_block_cb_list); + + return 0; + case FLOW_BLOCK_UNBIND: + indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); + if (!indr_priv) + return -ENOENT; + + block_cb = flow_block_cb_lookup(f->block, setup_cb, indr_priv); + if (!block_cb) + return -ENOENT; + + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); + return 0; + default: + return -EOPNOTSUPP; + } + return 0; +} + +static +int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv, + enum tc_setup_type type, void *type_data) +{ + switch (type) { + case TC_SETUP_BLOCK: + return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data, + mlx5e_rep_indr_setup_tc_cb); + case TC_SETUP_FT: + return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data, + mlx5e_rep_indr_setup_ft_cb); + default: + return -EOPNOTSUPP; + } +} + +static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv, + struct net_device *netdev) +{ + int err; + + err = __flow_indr_block_cb_register(netdev, rpriv, + mlx5e_rep_indr_setup_cb, + rpriv); + if (err) { + struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); + + mlx5_core_err(priv->mdev, "Failed to register remote block notifier for %s err=%d\n", + netdev_name(netdev), err); + } + return err; +} + +static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv, + struct net_device *netdev) +{ + __flow_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_cb, + rpriv); +} + +static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv, + uplink_priv.netdevice_nb); + struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + + if (!mlx5e_tc_tun_device_to_offload(priv, netdev) && + !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev)) + return NOTIFY_OK; + + switch (event) { + case NETDEV_REGISTER: + mlx5e_rep_indr_register_block(rpriv, netdev); + break; + case NETDEV_UNREGISTER: + mlx5e_rep_indr_unregister_block(rpriv, netdev); + break; + } + return NOTIFY_OK; +} + +int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv) +{ + struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv; + int err; + + /* init indirect block notifications */ + INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list); + + uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event; + err = register_netdevice_notifier_dev_net(rpriv->netdev, + &uplink_priv->netdevice_nb, + &uplink_priv->netdevice_nn); + return err; +} + +void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv) +{ + struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv; + + /* clean indirect TC block notifications */ + unregister_netdevice_notifier_dev_net(rpriv->netdev, + &uplink_priv->netdevice_nb, + &uplink_priv->netdevice_nn); +} + +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) +static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb, + struct mlx5e_tc_update_priv *tc_priv, + u32 tunnel_id) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct tunnel_match_enc_opts enc_opts = {}; + struct mlx5_rep_uplink_priv *uplink_priv; + struct mlx5e_rep_priv *uplink_rpriv; + struct metadata_dst *tun_dst; + struct tunnel_match_key key; + u32 tun_id, enc_opts_id; + struct net_device *dev; + int err; + + enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK; + tun_id = tunnel_id >> ENC_OPTS_BITS; + + if (!tun_id) + return true; + + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + uplink_priv = &uplink_rpriv->uplink_priv; + + err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key); + if (err) { + WARN_ON_ONCE(true); + netdev_dbg(priv->netdev, + "Couldn't find tunnel for tun_id: %d, err: %d\n", + tun_id, err); + return false; + } + + if (enc_opts_id) { + err = mapping_find(uplink_priv->tunnel_enc_opts_mapping, + enc_opts_id, &enc_opts); + if (err) { + netdev_dbg(priv->netdev, + "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n", + enc_opts_id, err); + return false; + } + } + + tun_dst = tun_rx_dst(enc_opts.key.len); + if (!tun_dst) { + WARN_ON_ONCE(true); + return false; + } + + ip_tunnel_key_init(&tun_dst->u.tun_info.key, + key.enc_ipv4.src, key.enc_ipv4.dst, + key.enc_ip.tos, key.enc_ip.ttl, + 0, /* label */ + key.enc_tp.src, key.enc_tp.dst, + key32_to_tunnel_id(key.enc_key_id.keyid), + TUNNEL_KEY); + + if (enc_opts.key.len) + ip_tunnel_info_opts_set(&tun_dst->u.tun_info, + enc_opts.key.data, + enc_opts.key.len, + enc_opts.key.dst_opt_type); + + skb_dst_set(skb, (struct dst_entry *)tun_dst); + dev = dev_get_by_index(&init_net, key.filter_ifindex); + if (!dev) { + netdev_dbg(priv->netdev, + "Couldn't find tunnel device with ifindex: %d\n", + key.filter_ifindex); + return false; + } + + /* Set tun_dev so we do dev_put() after datapath */ + tc_priv->tun_dev = dev; + + skb->dev = dev; + + return true; +} +#endif /* CONFIG_NET_TC_SKB_EXT */ + +bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, + struct sk_buff *skb, + struct mlx5e_tc_update_priv *tc_priv) +{ +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) + u32 chain = 0, reg_c0, reg_c1, tunnel_id, tuple_id; + struct mlx5_rep_uplink_priv *uplink_priv; + struct mlx5e_rep_priv *uplink_rpriv; + struct tc_skb_ext *tc_skb_ext; + struct mlx5_eswitch *esw; + struct mlx5e_priv *priv; + int tunnel_moffset; + int err; + + reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK); + if (reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG) + reg_c0 = 0; + reg_c1 = be32_to_cpu(cqe->ft_metadata); + + if (!reg_c0) + return true; + + priv = netdev_priv(skb->dev); + esw = priv->mdev->priv.eswitch; + + err = mlx5_eswitch_get_chain_for_tag(esw, reg_c0, &chain); + if (err) { + netdev_dbg(priv->netdev, + "Couldn't find chain for chain tag: %d, err: %d\n", + reg_c0, err); + return false; + } + + if (chain) { + tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT); + if (!tc_skb_ext) { + WARN_ON(1); + return false; + } + + tc_skb_ext->chain = chain; + + tuple_id = reg_c1 & TUPLE_ID_MAX; + + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + uplink_priv = &uplink_rpriv->uplink_priv; + if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb, tuple_id)) + return false; + } + + tunnel_moffset = mlx5e_tc_attr_to_reg_mappings[TUNNEL_TO_REG].moffset; + tunnel_id = reg_c1 >> (8 * tunnel_moffset); + return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id); +#endif /* CONFIG_NET_TC_SKB_EXT */ + + return true; +} + +void mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv) +{ + if (tc_priv->tun_dev) + dev_put(tc_priv->tun_dev); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h new file mode 100644 index 000000000000..86f92abf2fdd --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 Mellanox Technologies. */ + +#ifndef __MLX5_EN_REP_TC_H__ +#define __MLX5_EN_REP_TC_H__ + +#include <linux/skbuff.h> +#include "en_tc.h" +#include "en_rep.h" + +#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) + +int mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv); +void mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv); + +int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv); +void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv); + +void mlx5e_rep_tc_enable(struct mlx5e_priv *priv); +void mlx5e_rep_tc_disable(struct mlx5e_priv *priv); + +int mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv *priv); + +void mlx5e_rep_update_flows(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e, + bool neigh_connected, + unsigned char ha[ETH_ALEN]); + +int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e); +void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e); + +int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data); +void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv); + +bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, + struct sk_buff *skb, + struct mlx5e_tc_update_priv *tc_priv); +void mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv); + +#else /* CONFIG_MLX5_CLS_ACT */ + +struct mlx5e_rep_priv; +static inline int +mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv) { return 0; } +static inline void +mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv) {} + +static inline int +mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv) { return 0; } +static inline void +mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv) {} + +static inline void +mlx5e_rep_tc_enable(struct mlx5e_priv *priv) {} +static inline void +mlx5e_rep_tc_disable(struct mlx5e_priv *priv) {} + +static inline int +mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv *priv) { return NOTIFY_DONE; } + +static inline int +mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) { return -EOPNOTSUPP; } + +static inline void +mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv) {} + +struct mlx5e_tc_update_priv; +static inline bool +mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, + struct sk_buff *skb, + struct mlx5e_tc_update_priv *tc_priv) { return true; } +static inline void +mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv) {} + +#endif /* CONFIG_MLX5_CLS_ACT */ + +#endif /* __MLX5_EN_REP_TC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 98263f00ee43..afc19dca1f5f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -119,7 +119,7 @@ mlx5_tc_ct_get_ct_priv(struct mlx5e_priv *priv) } static int -mlx5_tc_ct_set_tuple_match(struct mlx5_flow_spec *spec, +mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct flow_rule *rule) { void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, @@ -134,10 +134,8 @@ mlx5_tc_ct_set_tuple_match(struct mlx5_flow_spec *spec, flow_rule_match_basic(rule, &match); - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, - ntohs(match.mask->n_proto)); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, - ntohs(match.key->n_proto)); + mlx5e_tc_set_ethertype(priv->mdev, &match, true, headers_c, + headers_v); MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, match.mask->ip_proto); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, @@ -533,7 +531,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, attr->counter = entry->counter; attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT; - mlx5_tc_ct_set_tuple_match(spec, flow_rule); + mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule); mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->zone & MLX5_CT_ZONE_MASK, MLX5_CT_ZONE_MASK); @@ -711,6 +709,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, struct netlink_ext_ack *extack) { struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); + struct flow_rule *rule = flow_cls_offload_flow_rule(f); struct flow_dissector_key_ct *mask, *key; bool trk, est, untrk, unest, new; u32 ctstate = 0, ctstate_mask = 0; @@ -718,7 +717,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, u16 ct_state, ct_state_mask; struct flow_match_ct match; - if (!flow_rule_match_key(f->rule, FLOW_DISSECTOR_KEY_CT)) + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT)) return 0; if (!ct_priv) { @@ -727,7 +726,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, return -EOPNOTSUPP; } - flow_rule_match_ct(f->rule, &match); + flow_rule_match_ct(rule, &match); key = match.key; mask = match.mask; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h index 091d305b633e..626f6c04882e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h @@ -130,7 +130,9 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, struct flow_cls_offload *f, struct netlink_ext_ack *extack) { - if (!flow_rule_match_key(f->rule, FLOW_DISSECTOR_KEY_CT)) + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT)) return 0; NL_SET_ERR_MSG_MOD(extack, "mlx5 tc ct offload isn't enabled."); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index b45c3f46570b..7cce85faa16f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -4,8 +4,11 @@ #include <net/vxlan.h> #include <net/gre.h> #include <net/geneve.h> +#include <net/bareudp.h> #include "en/tc_tun.h" #include "en_tc.h" +#include "rep/tc.h" +#include "rep/neigh.h" struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev) { @@ -16,6 +19,8 @@ struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev) else if (netif_is_gretap(tunnel_dev) || netif_is_ip6gretap(tunnel_dev)) return &gre_tunnel; + else if (netif_is_bareudp(tunnel_dev)) + return &mplsoudp_tunnel; else return NULL; } @@ -96,9 +101,8 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, } rt = ip_route_output_key(dev_net(mirred_dev), fl4); - ret = PTR_ERR_OR_ZERO(rt); - if (ret) - return ret; + if (IS_ERR(rt)) + return PTR_ERR(rt); if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) { ip_rt_put(rt); @@ -508,6 +512,13 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev, } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { + struct flow_dissector_key_basic key_basic = {}; + struct flow_dissector_key_basic mask_basic = { + .n_proto = htons(0xFFFF), + }; + struct flow_match_basic match_basic = { + .key = &key_basic, .mask = &mask_basic, + }; struct flow_match_control match; u16 addr_type; @@ -533,10 +544,9 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev, dst_ipv4_dst_ipv6.ipv4_layout.ipv4, ntohl(match.key->dst)); - MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, - ethertype); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, - ETH_P_IP); + key_basic.n_proto = htons(ETH_P_IP); + mlx5e_tc_set_ethertype(priv->mdev, &match_basic, true, + headers_c, headers_v); } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { struct flow_match_ipv6_addrs match; @@ -559,10 +569,9 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev, &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); - MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, - ethertype); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, - ETH_P_IPV6); + key_basic.n_proto = htons(ETH_P_IPV6); + mlx5e_tc_set_ethertype(priv->mdev, &match_basic, true, + headers_c, headers_v); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h index 1630f0ec3ad7..704359df6095 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h @@ -16,6 +16,7 @@ enum { MLX5E_TC_TUNNEL_TYPE_VXLAN, MLX5E_TC_TUNNEL_TYPE_GENEVE, MLX5E_TC_TUNNEL_TYPE_GRETAP, + MLX5E_TC_TUNNEL_TYPE_MPLSOUDP, }; struct mlx5e_tc_tunnel { @@ -46,6 +47,7 @@ struct mlx5e_tc_tunnel { extern struct mlx5e_tc_tunnel vxlan_tunnel; extern struct mlx5e_tc_tunnel geneve_tunnel; extern struct mlx5e_tc_tunnel gre_tunnel; +extern struct mlx5e_tc_tunnel mplsoudp_tunnel; struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c new file mode 100644 index 000000000000..1f9526244222 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2018 Mellanox Technologies. */ + +#include <net/bareudp.h> +#include <net/mpls.h> +#include "en/tc_tun.h" + +static bool can_offload(struct mlx5e_priv *priv) +{ + return MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_l3_tunnel_to_l2); +} + +static int calc_hlen(struct mlx5e_encap_entry *e) +{ + return sizeof(struct udphdr) + MPLS_HLEN; +} + +static int init_encap_attr(struct net_device *tunnel_dev, + struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e, + struct netlink_ext_ack *extack) +{ + e->tunnel = &mplsoudp_tunnel; + e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL; + return 0; +} + +static int generate_ip_tun_hdr(char buf[], + __u8 *ip_proto, + struct mlx5e_encap_entry *r) +{ + const struct ip_tunnel_key *tun_key = &r->tun_info->key; + struct udphdr *udp = (struct udphdr *)(buf); + struct mpls_shim_hdr *mpls; + u32 tun_id; + + tun_id = be32_to_cpu(tunnel_id_to_key32(tun_key->tun_id)); + mpls = (struct mpls_shim_hdr *)(udp + 1); + *ip_proto = IPPROTO_UDP; + + udp->dest = tun_key->tp_dst; + *mpls = mpls_entry_encode(tun_id, tun_key->ttl, tun_key->tos, true); + + return 0; +} + +static int parse_udp_ports(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + struct flow_cls_offload *f, + void *headers_c, + void *headers_v) +{ + return mlx5e_tc_tun_parse_udp_ports(priv, spec, f, headers_c, headers_v); +} + +static int parse_tunnel(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + struct flow_cls_offload *f, + void *headers_c, + void *headers_v) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct flow_match_enc_keyid enc_keyid; + struct flow_match_mpls match; + void *misc2_c; + void *misc2_v; + + misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters_2); + misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters_2); + + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) + return 0; + + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) + return 0; + + flow_rule_match_enc_keyid(rule, &enc_keyid); + + if (!enc_keyid.mask->keyid) + return 0; + + if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & + MLX5_FLEX_PROTO_CW_MPLS_UDP)) + return -EOPNOTSUPP; + + flow_rule_match_mpls(rule, &match); + + /* Only support matching the first LSE */ + if (match.mask->used_lses != 1) + return -EOPNOTSUPP; + + MLX5_SET(fte_match_set_misc2, misc2_c, + outer_first_mpls_over_udp.mpls_label, + match.mask->ls[0].mpls_label); + MLX5_SET(fte_match_set_misc2, misc2_v, + outer_first_mpls_over_udp.mpls_label, + match.key->ls[0].mpls_label); + + MLX5_SET(fte_match_set_misc2, misc2_c, + outer_first_mpls_over_udp.mpls_exp, + match.mask->ls[0].mpls_tc); + MLX5_SET(fte_match_set_misc2, misc2_v, + outer_first_mpls_over_udp.mpls_exp, match.key->ls[0].mpls_tc); + + MLX5_SET(fte_match_set_misc2, misc2_c, + outer_first_mpls_over_udp.mpls_s_bos, + match.mask->ls[0].mpls_bos); + MLX5_SET(fte_match_set_misc2, misc2_v, + outer_first_mpls_over_udp.mpls_s_bos, + match.key->ls[0].mpls_bos); + + MLX5_SET(fte_match_set_misc2, misc2_c, + outer_first_mpls_over_udp.mpls_ttl, + match.mask->ls[0].mpls_ttl); + MLX5_SET(fte_match_set_misc2, misc2_v, + outer_first_mpls_over_udp.mpls_ttl, + match.key->ls[0].mpls_ttl); + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; + + return 0; +} + +struct mlx5e_tc_tunnel mplsoudp_tunnel = { + .tunnel_type = MLX5E_TC_TUNNEL_TYPE_MPLSOUDP, + .match_level = MLX5_MATCH_L4, + .can_offload = can_offload, + .calc_hlen = calc_hlen, + .init_encap_attr = init_encap_attr, + .generate_ip_tun_hdr = generate_ip_tun_hdr, + .parse_udp_ports = parse_udp_ports, + .parse_tunnel = parse_tunnel, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 42202d19245c..3bea1d4be53b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -31,7 +31,7 @@ */ #include <linux/bpf_trace.h> -#include <net/xdp_sock.h> +#include <net/xdp_sock_drv.h> #include "en/xdp.h" #include "en/params.h" @@ -71,7 +71,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, xdptxd.data = xdpf->data; xdptxd.len = xdpf->len; - if (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) { + if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) { /* The xdp_buff was in the UMEM and was copied into a newly * allocated page. The UMEM page was returned via the ZCA, and * this new page has to be mapped at this point and has to be @@ -119,50 +119,33 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, /* returns true if packet was consumed by xdp */ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, - void *va, u16 *rx_headroom, u32 *len, bool xsk) + u32 *len, struct xdp_buff *xdp) { struct bpf_prog *prog = READ_ONCE(rq->xdp_prog); - struct xdp_umem *umem = rq->umem; - struct xdp_buff xdp; u32 act; int err; if (!prog) return false; - xdp.data = va + *rx_headroom; - xdp_set_data_meta_invalid(&xdp); - xdp.data_end = xdp.data + *len; - xdp.data_hard_start = va; - if (xsk) - xdp.handle = di->xsk.handle; - xdp.rxq = &rq->xdp_rxq; - xdp.frame_sz = rq->buff.frame0_sz; - - act = bpf_prog_run_xdp(prog, &xdp); - if (xsk) { - u64 off = xdp.data - xdp.data_hard_start; - - xdp.handle = xsk_umem_adjust_offset(umem, xdp.handle, off); - } + act = bpf_prog_run_xdp(prog, xdp); switch (act) { case XDP_PASS: - *rx_headroom = xdp.data - xdp.data_hard_start; - *len = xdp.data_end - xdp.data; + *len = xdp->data_end - xdp->data; return false; case XDP_TX: - if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, di, &xdp))) + if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, di, xdp))) goto xdp_abort; __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ return true; case XDP_REDIRECT: /* When XDP enabled then page-refcnt==1 here */ - err = xdp_do_redirect(rq->netdev, &xdp, prog); + err = xdp_do_redirect(rq->netdev, xdp, prog); if (unlikely(err)) goto xdp_abort; __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); __set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags); - if (!xsk) + if (xdp->rxq->mem.type != MEM_TYPE_XSK_BUFF_POOL) mlx5e_page_dma_unmap(rq, di); rq->stats->xdp_redirect++; return true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index be64eb68f4e5..ca48c293151b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -61,7 +61,7 @@ struct mlx5e_xsk_param; int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk); bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, - void *va, u16 *rx_headroom, u32 *len, bool xsk); + u32 *len, struct xdp_buff *xdp); void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq); bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq); void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c index 62fc8a128a8d..a33a1f762c70 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c @@ -3,71 +3,10 @@ #include "rx.h" #include "en/xdp.h" -#include <net/xdp_sock.h> +#include <net/xdp_sock_drv.h> /* RX data path */ -bool mlx5e_xsk_pages_enough_umem(struct mlx5e_rq *rq, int count) -{ - /* Check in advance that we have enough frames, instead of allocating - * one-by-one, failing and moving frames to the Reuse Ring. - */ - return xsk_umem_has_addrs_rq(rq->umem, count); -} - -int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq, - struct mlx5e_dma_info *dma_info) -{ - struct xdp_umem *umem = rq->umem; - u64 handle; - - if (!xsk_umem_peek_addr_rq(umem, &handle)) - return -ENOMEM; - - dma_info->xsk.handle = xsk_umem_adjust_offset(umem, handle, - rq->buff.umem_headroom); - dma_info->xsk.data = xdp_umem_get_data(umem, dma_info->xsk.handle); - - /* No need to add headroom to the DMA address. In striding RQ case, we - * just provide pages for UMR, and headroom is counted at the setup - * stage when creating a WQE. In non-striding RQ case, headroom is - * accounted in mlx5e_alloc_rx_wqe. - */ - dma_info->addr = xdp_umem_get_dma(umem, handle); - - xsk_umem_release_addr_rq(umem); - - dma_sync_single_for_device(rq->pdev, dma_info->addr, PAGE_SIZE, - DMA_BIDIRECTIONAL); - - return 0; -} - -static inline void mlx5e_xsk_recycle_frame(struct mlx5e_rq *rq, u64 handle) -{ - xsk_umem_fq_reuse(rq->umem, handle & rq->umem->chunk_mask); -} - -/* XSKRQ uses pages from UMEM, they must not be released. They are returned to - * the userspace if possible, and if not, this function is called to reuse them - * in the driver. - */ -void mlx5e_xsk_page_release(struct mlx5e_rq *rq, - struct mlx5e_dma_info *dma_info) -{ - mlx5e_xsk_recycle_frame(rq, dma_info->xsk.handle); -} - -/* Return a frame back to the hardware to fill in again. It is used by XDP when - * the XDP program returns XDP_TX or XDP_REDIRECT not to an XSKMAP. - */ -void mlx5e_xsk_zca_free(struct zero_copy_allocator *zca, unsigned long handle) -{ - struct mlx5e_rq *rq = container_of(zca, struct mlx5e_rq, zca); - - mlx5e_xsk_recycle_frame(rq, handle); -} - static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, void *data, u32 cqe_bcnt) { @@ -90,11 +29,8 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, u32 head_offset, u32 page_idx) { - struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx]; - u16 rx_headroom = rq->buff.headroom - rq->buff.umem_headroom; + struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk; u32 cqe_bcnt32 = cqe_bcnt; - void *va, *data; - u32 frag_size; bool consumed; /* Check packet size. Note LRO doesn't use linear SKB */ @@ -103,22 +39,20 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, return NULL; } - /* head_offset is not used in this function, because di->xsk.data and - * di->addr point directly to the necessary place. Furthermore, in the - * current implementation, UMR pages are mapped to XSK frames, so + /* head_offset is not used in this function, because xdp->data and the + * DMA address point directly to the necessary place. Furthermore, in + * the current implementation, UMR pages are mapped to XSK frames, so * head_offset should always be 0. */ WARN_ON_ONCE(head_offset); - va = di->xsk.data; - data = va + rx_headroom; - frag_size = rq->buff.headroom + cqe_bcnt32; - - dma_sync_single_for_cpu(rq->pdev, di->addr, frag_size, DMA_BIDIRECTIONAL); - prefetch(data); + xdp->data_end = xdp->data + cqe_bcnt32; + xdp_set_data_meta_invalid(xdp); + xsk_buff_dma_sync_for_cpu(xdp); + prefetch(xdp->data); rcu_read_lock(); - consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt32, true); + consumed = mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp); rcu_read_unlock(); /* Possible flows: @@ -145,7 +79,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, /* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the * frame. On SKB allocation failure, NULL is returned. */ - return mlx5e_xsk_construct_skb(rq, data, cqe_bcnt32); + return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt32); } struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, @@ -153,25 +87,20 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) { - struct mlx5e_dma_info *di = wi->di; - u16 rx_headroom = rq->buff.headroom - rq->buff.umem_headroom; - void *va, *data; + struct xdp_buff *xdp = wi->di->xsk; bool consumed; - u32 frag_size; - /* wi->offset is not used in this function, because di->xsk.data and - * di->addr point directly to the necessary place. Furthermore, in the - * current implementation, one page = one packet = one frame, so + /* wi->offset is not used in this function, because xdp->data and the + * DMA address point directly to the necessary place. Furthermore, the + * XSK allocator allocates frames per packet, instead of pages, so * wi->offset should always be 0. */ WARN_ON_ONCE(wi->offset); - va = di->xsk.data; - data = va + rx_headroom; - frag_size = rq->buff.headroom + cqe_bcnt; - - dma_sync_single_for_cpu(rq->pdev, di->addr, frag_size, DMA_BIDIRECTIONAL); - prefetch(data); + xdp->data_end = xdp->data + cqe_bcnt; + xdp_set_data_meta_invalid(xdp); + xsk_buff_dma_sync_for_cpu(xdp); + prefetch(xdp->data); if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) { rq->stats->wqe_err++; @@ -179,7 +108,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, } rcu_read_lock(); - consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt, true); + consumed = mlx5e_xdp_handle(rq, NULL, &cqe_bcnt, xdp); rcu_read_unlock(); if (likely(consumed)) @@ -189,5 +118,5 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, * will be handled by mlx5e_put_rx_frag. * On SKB allocation failure, NULL is returned. */ - return mlx5e_xsk_construct_skb(rq, data, cqe_bcnt); + return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h index cab0e93497ae..d147b2f13b54 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h @@ -5,16 +5,10 @@ #define __MLX5_EN_XSK_RX_H__ #include "en.h" -#include <net/xdp_sock.h> +#include <net/xdp_sock_drv.h> /* RX data path */ -bool mlx5e_xsk_pages_enough_umem(struct mlx5e_rq *rq, int count); -int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq, - struct mlx5e_dma_info *dma_info); -void mlx5e_xsk_page_release(struct mlx5e_rq *rq, - struct mlx5e_dma_info *dma_info); -void mlx5e_xsk_zca_free(struct zero_copy_allocator *zca, unsigned long handle); struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, u16 cqe_bcnt, @@ -25,6 +19,23 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); +static inline int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq, + struct mlx5e_dma_info *dma_info) +{ + dma_info->xsk = xsk_buff_alloc(rq->umem); + if (!dma_info->xsk) + return -ENOMEM; + + /* Store the DMA address without headroom. In striding RQ case, we just + * provide pages for UMR, and headroom is counted at the setup stage + * when creating a WQE. In non-striding RQ case, headroom is accounted + * in mlx5e_alloc_rx_wqe. + */ + dma_info->addr = xsk_buff_xdp_get_frame_dma(dma_info->xsk); + + return 0; +} + static inline bool mlx5e_xsk_update_rx_wakeup(struct mlx5e_rq *rq, bool alloc_err) { if (!xsk_umem_uses_need_wakeup(rq->umem)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c index 3bcdb5b2fc20..83dce9cdb8c2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c @@ -5,7 +5,7 @@ #include "umem.h" #include "en/xdp.h" #include "en/params.h" -#include <net/xdp_sock.h> +#include <net/xdp_sock_drv.h> int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) { @@ -92,12 +92,11 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget) break; } - xdptxd.dma_addr = xdp_umem_get_dma(umem, desc.addr); - xdptxd.data = xdp_umem_get_data(umem, desc.addr); + xdptxd.dma_addr = xsk_buff_raw_get_dma(umem, desc.addr); + xdptxd.data = xsk_buff_raw_get_data(umem, desc.addr); xdptxd.len = desc.len; - dma_sync_single_for_device(sq->pdev, xdptxd.dma_addr, - xdptxd.len, DMA_BIDIRECTIONAL); + xsk_buff_raw_dma_sync_for_device(umem, xdptxd.dma_addr, xdptxd.len); if (unlikely(!sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, check_result))) { if (sq->mpwqe.wqe) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h index 79b487d89757..39fa0a705856 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h @@ -5,7 +5,7 @@ #define __MLX5_EN_XSK_TX_H__ #include "en.h" -#include <net/xdp_sock.h> +#include <net/xdp_sock_drv.h> /* TX data path */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c index 4baaa5788320..7b17fcd0a56d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2019 Mellanox Technologies. */ -#include <net/xdp_sock.h> +#include <net/xdp_sock_drv.h> #include "umem.h" #include "setup.h" #include "en/params.h" @@ -10,40 +10,14 @@ static int mlx5e_xsk_map_umem(struct mlx5e_priv *priv, struct xdp_umem *umem) { struct device *dev = priv->mdev->device; - u32 i; - for (i = 0; i < umem->npgs; i++) { - dma_addr_t dma = dma_map_page(dev, umem->pgs[i], 0, PAGE_SIZE, - DMA_BIDIRECTIONAL); - - if (unlikely(dma_mapping_error(dev, dma))) - goto err_unmap; - umem->pages[i].dma = dma; - } - - return 0; - -err_unmap: - while (i--) { - dma_unmap_page(dev, umem->pages[i].dma, PAGE_SIZE, - DMA_BIDIRECTIONAL); - umem->pages[i].dma = 0; - } - - return -ENOMEM; + return xsk_buff_dma_map(umem, dev, 0); } static void mlx5e_xsk_unmap_umem(struct mlx5e_priv *priv, struct xdp_umem *umem) { - struct device *dev = priv->mdev->device; - u32 i; - - for (i = 0; i < umem->npgs; i++) { - dma_unmap_page(dev, umem->pages[i].dma, PAGE_SIZE, - DMA_BIDIRECTIONAL); - umem->pages[i].dma = 0; - } + return xsk_buff_dma_unmap(umem, 0); } static int mlx5e_xsk_get_umems(struct mlx5e_xsk *xsk) @@ -90,13 +64,14 @@ static void mlx5e_xsk_remove_umem(struct mlx5e_xsk *xsk, u16 ix) static bool mlx5e_xsk_is_umem_sane(struct xdp_umem *umem) { - return umem->headroom <= 0xffff && umem->chunk_size_nohr <= 0xffff; + return xsk_umem_get_headroom(umem) <= 0xffff && + xsk_umem_get_chunk_size(umem) <= 0xffff; } void mlx5e_build_xsk_param(struct xdp_umem *umem, struct mlx5e_xsk_param *xsk) { - xsk->headroom = umem->headroom; - xsk->chunk_size = umem->chunk_size_nohr + umem->headroom; + xsk->headroom = xsk_umem_get_headroom(umem); + xsk->chunk_size = xsk_umem_get_chunk_size(umem); } static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv, @@ -241,18 +216,6 @@ int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid) mlx5e_xsk_disable_umem(priv, ix); } -int mlx5e_xsk_resize_reuseq(struct xdp_umem *umem, u32 nentries) -{ - struct xdp_umem_fq_reuse *reuseq; - - reuseq = xsk_reuseq_prepare(nentries); - if (unlikely(!reuseq)) - return -ENOMEM; - xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq)); - - return 0; -} - u16 mlx5e_xsk_first_unused_channel(struct mlx5e_params *params, struct mlx5e_xsk *xsk) { u16 res = xsk->refcnt ? params->num_channels : 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c index 417a2d9dd248..452fcf59c36b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c @@ -82,8 +82,8 @@ static void mlx5e_ktls_del(struct net_device *netdev, struct mlx5e_ktls_offload_context_tx *tx_priv = mlx5e_get_ktls_tx_priv_ctx(tls_ctx); - mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id); mlx5e_destroy_tis(priv->mdev, tx_priv->tisn); + mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id); kvfree(tx_priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c index c27e9a609d51..1fbb5a90cb38 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c @@ -167,7 +167,7 @@ static int mlx5e_tls_resync(struct net_device *netdev, struct sock *sk, struct tls_context *tls_ctx = tls_get_ctx(sk); struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_tls_offload_context_rx *rx_ctx; - u64 rcd_sn = *(u64 *)rcd_sn_data; + __be64 rcd_sn = *(__be64 *)rcd_sn_data; if (WARN_ON_ONCE(direction != TLS_OFFLOAD_CTX_DIR_RX)) return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index ec7b332d74c2..bc102d094bbd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -985,7 +985,7 @@ static int mlx5e_dcbnl_setbuffer(struct net_device *dev, return err; } -const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = { +static const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = { .ieee_getets = mlx5e_dcbnl_ieee_getets, .ieee_setets = mlx5e_dcbnl_ieee_setets, .ieee_getmaxrate = mlx5e_dcbnl_ieee_getmaxrate, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 0279bb7246e1..3ef2525e8de9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -527,8 +527,8 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, struct dim_cq_moder *rx_moder, *tx_moder; struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_channels new_channels = {}; + bool reset_rx, reset_tx; int err = 0; - bool reset; if (!MLX5_CAP_GEN(mdev, cq_moderation)) return -EOPNOTSUPP; @@ -566,15 +566,28 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, } /* we are opened */ - reset = (!!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled) || - (!!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled); + reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled; + reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled; - if (!reset) { + if (!reset_rx && !reset_tx) { mlx5e_set_priv_channels_coalesce(priv, coal); priv->channels.params = new_channels.params; goto out; } + if (reset_rx) { + u8 mode = MLX5E_GET_PFLAG(&new_channels.params, + MLX5E_PFLAG_RX_CQE_BASED_MODER); + + mlx5e_reset_rx_moderation(&new_channels.params, mode); + } + if (reset_tx) { + u8 mode = MLX5E_GET_PFLAG(&new_channels.params, + MLX5E_PFLAG_TX_CQE_BASED_MODER); + + mlx5e_reset_tx_moderation(&new_channels.params, mode); + } + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); out: @@ -665,11 +678,12 @@ static const u32 pplm_fec_2_ethtool_linkmodes[] = { static int get_fec_supported_advertised(struct mlx5_core_dev *dev, struct ethtool_link_ksettings *link_ksettings) { - u_long active_fec = 0; + unsigned long active_fec_long; + u32 active_fec; u32 bitn; int err; - err = mlx5e_get_fec_mode(dev, (u32 *)&active_fec, NULL); + err = mlx5e_get_fec_mode(dev, &active_fec, NULL); if (err) return (err == -EOPNOTSUPP) ? 0 : err; @@ -682,10 +696,11 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev, MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_LLRS_272_257_1, ETHTOOL_LINK_MODE_FEC_LLRS_BIT); + active_fec_long = active_fec; /* active fec is a bit set, find out which bit is set and * advertise the corresponding ethtool bit */ - bitn = find_first_bit(&active_fec, sizeof(u32) * BITS_PER_BYTE); + bitn = find_first_bit(&active_fec_long, sizeof(active_fec_long) * BITS_PER_BYTE); if (bitn < ARRAY_SIZE(pplm_fec_2_ethtool_linkmodes)) __set_bit(pplm_fec_2_ethtool_linkmodes[bitn], link_ksettings->link_modes.advertising); @@ -1517,8 +1532,8 @@ static int mlx5e_get_fecparam(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; - u16 fec_configured = 0; - u32 fec_active = 0; + u16 fec_configured; + u32 fec_active; int err; err = mlx5e_get_fec_mode(mdev, &fec_active, &fec_configured); @@ -1526,14 +1541,14 @@ static int mlx5e_get_fecparam(struct net_device *netdev, if (err) return err; - fecparam->active_fec = pplm2ethtool_fec((u_long)fec_active, - sizeof(u32) * BITS_PER_BYTE); + fecparam->active_fec = pplm2ethtool_fec((unsigned long)fec_active, + sizeof(unsigned long) * BITS_PER_BYTE); if (!fecparam->active_fec) return -EOPNOTSUPP; - fecparam->fec = pplm2ethtool_fec((u_long)fec_configured, - sizeof(u16) * BITS_PER_BYTE); + fecparam->fec = pplm2ethtool_fec((unsigned long)fec_configured, + sizeof(unsigned long) * BITS_PER_BYTE); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 07823abe5557..a836a02a2116 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -38,7 +38,7 @@ #include <linux/bpf.h> #include <linux/if_bridge.h> #include <net/page_pool.h> -#include <net/xdp_sock.h> +#include <net/xdp_sock_drv.h> #include "eswitch.h" #include "en.h" #include "en/txrx.h" @@ -373,7 +373,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, struct mlx5_core_dev *mdev = c->mdev; void *rqc = rqp->rqc; void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); - u32 num_xsk_frames = 0; u32 rq_xdp_ix; u32 pool_size; int wq_sz; @@ -413,7 +412,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk); - rq->buff.umem_headroom = xsk ? xsk->headroom : 0; pool_size = 1 << params->log_rq_mtu_frames; switch (rq->wq_type) { @@ -427,10 +425,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); - if (xsk) - num_xsk_frames = wq_sz << - mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); - pool_size = MLX5_MPWRQ_PAGES_PER_WQE << mlx5e_mpwqe_get_log_rq_size(params, xsk); @@ -482,9 +476,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq); - if (xsk) - num_xsk_frames = wq_sz << rq->wqe.info.log_num_frags; - rq->wqe.info = rqp->frags_info; rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride; @@ -525,19 +516,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, } if (xsk) { - rq->buff.frame0_sz = xsk_umem_xdp_frame_sz(umem); - - err = mlx5e_xsk_resize_reuseq(umem, num_xsk_frames); - if (unlikely(err)) { - mlx5_core_err(mdev, "Unable to allocate the Reuse Ring for %u frames\n", - num_xsk_frames); - goto err_free; - } - - rq->zca.free = mlx5e_xsk_zca_free; err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, - MEM_TYPE_ZERO_COPY, - &rq->zca); + MEM_TYPE_XSK_BUFF_POOL, NULL); + xsk_buff_set_rxq_info(rq->umem, &rq->xdp_rxq); } else { /* Create a page_pool and register it with rxq */ pp_params.order = 0; @@ -2734,7 +2715,8 @@ void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in) mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in); } - if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) + /* Verify inner tirs resources allocated */ + if (!priv->inner_indir_tir[0].tirn) return; for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { @@ -3455,14 +3437,15 @@ out: return err; } -void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc) +void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv) { int i; for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]); - if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev)) + /* Verify inner tirs resources allocated */ + if (!priv->inner_indir_tir[0].tirn) return; for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) @@ -3539,41 +3522,6 @@ out: return err; } -#ifdef CONFIG_MLX5_ESWITCH -static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv, - struct flow_cls_offload *cls_flower, - unsigned long flags) -{ - switch (cls_flower->command) { - case FLOW_CLS_REPLACE: - return mlx5e_configure_flower(priv->netdev, priv, cls_flower, - flags); - case FLOW_CLS_DESTROY: - return mlx5e_delete_flower(priv->netdev, priv, cls_flower, - flags); - case FLOW_CLS_STATS: - return mlx5e_stats_flower(priv->netdev, priv, cls_flower, - flags); - default: - return -EOPNOTSUPP; - } -} - -static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, - void *cb_priv) -{ - unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD); - struct mlx5e_priv *priv = cb_priv; - - switch (type) { - case TC_SETUP_CLSFLOWER: - return mlx5e_setup_tc_cls_flower(priv, type_data, flags); - default: - return -EOPNOTSUPP; - } -} -#endif - static LIST_HEAD(mlx5e_block_cb_list); static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, @@ -3582,7 +3530,6 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, struct mlx5e_priv *priv = netdev_priv(dev); switch (type) { -#ifdef CONFIG_MLX5_ESWITCH case TC_SETUP_BLOCK: { struct flow_block_offload *f = type_data; @@ -3592,7 +3539,6 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, mlx5e_setup_tc_block_cb, priv, priv, true); } -#endif case TC_SETUP_QDISC_MQPRIO: return mlx5e_setup_tc_mqprio(priv, type_data); default: @@ -3765,7 +3711,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable) return 0; } -#ifdef CONFIG_MLX5_ESWITCH +#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -3876,7 +3822,7 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER, set_feature_cvlan_filter); -#ifdef CONFIG_MLX5_ESWITCH +#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters); #endif err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all); @@ -4761,7 +4707,7 @@ static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode) DIM_CQ_PERIOD_MODE_START_FROM_EQE; } -void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) +void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode) { if (params->tx_dim_enabled) { u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); @@ -4770,13 +4716,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) } else { params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode); } - - MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, - params->tx_cq_moderation.cq_period_mode == - MLX5_CQ_PERIOD_MODE_START_FROM_CQE); } -void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) +void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode) { if (params->rx_dim_enabled) { u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); @@ -4785,7 +4727,19 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) } else { params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode); } +} + +void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) +{ + mlx5e_reset_tx_moderation(params, cq_period_mode); + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, + params->tx_cq_moderation.cq_period_mode == + MLX5_CQ_PERIOD_MODE_START_FROM_CQE); +} +void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) +{ + mlx5e_reset_rx_moderation(params, cq_period_mode); MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, params->rx_cq_moderation.cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE); @@ -5178,7 +5132,7 @@ err_destroy_xsk_rqts: err_destroy_direct_tirs: mlx5e_destroy_direct_tirs(priv, priv->direct_tir); err_destroy_indirect_tirs: - mlx5e_destroy_indirect_tirs(priv, true); + mlx5e_destroy_indirect_tirs(priv); err_destroy_direct_rqts: mlx5e_destroy_direct_rqts(priv, priv->direct_tir); err_destroy_indirect_rqts: @@ -5197,7 +5151,7 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) mlx5e_destroy_direct_tirs(priv, priv->xsk_tir); mlx5e_destroy_direct_rqts(priv, priv->xsk_tir); mlx5e_destroy_direct_tirs(priv, priv->direct_tir); - mlx5e_destroy_indirect_tirs(priv, true); + mlx5e_destroy_indirect_tirs(priv); mlx5e_destroy_direct_rqts(priv, priv->direct_tir); mlx5e_destroy_rqt(priv, &priv->indir_rqt); mlx5e_close_drop_rq(&priv->drop_rq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 52351c105627..af89a4803c7d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -35,7 +35,6 @@ #include <net/switchdev.h> #include <net/pkt_cls.h> #include <net/act_api.h> -#include <net/netevent.h> #include <net/arp.h> #include <net/devlink.h> #include <net/ipv6_stubs.h> @@ -45,9 +44,9 @@ #include "en.h" #include "en_rep.h" #include "en_tc.h" -#include "en/tc_tun.h" +#include "en/rep/tc.h" +#include "en/rep/neigh.h" #include "fs_core.h" -#include "lib/port_tun.h" #include "lib/mlx5.h" #define CREATE_TRACE_POINTS #include "diag/en_rep_tracepoint.h" @@ -58,16 +57,6 @@ static const char mlx5e_rep_driver_name[] = "mlx5e_rep"; -struct mlx5e_rep_indr_block_priv { - struct net_device *netdev; - struct mlx5e_rep_priv *rpriv; - - struct list_head list; -}; - -static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv, - struct net_device *netdev); - static void mlx5e_rep_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { @@ -485,706 +474,6 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) mlx5e_sqs2vport_stop(esw, rep); } -static unsigned long mlx5e_rep_ipv6_interval(void) -{ - if (IS_ENABLED(CONFIG_IPV6) && ipv6_stub->nd_tbl) - return NEIGH_VAR(&ipv6_stub->nd_tbl->parms, DELAY_PROBE_TIME); - - return ~0UL; -} - -static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv) -{ - unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME); - unsigned long ipv6_interval = mlx5e_rep_ipv6_interval(); - struct net_device *netdev = rpriv->netdev; - struct mlx5e_priv *priv = netdev_priv(netdev); - - rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval); - mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval); -} - -void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv) -{ - struct mlx5e_rep_priv *rpriv = priv->ppriv; - struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; - - mlx5_fc_queue_stats_work(priv->mdev, - &neigh_update->neigh_stats_work, - neigh_update->min_interval); -} - -static bool mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe) -{ - return refcount_inc_not_zero(&nhe->refcnt); -} - -static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe); - -static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe) -{ - if (refcount_dec_and_test(&nhe->refcnt)) { - mlx5e_rep_neigh_entry_remove(nhe); - kfree_rcu(nhe, rcu); - } -} - -static struct mlx5e_neigh_hash_entry * -mlx5e_get_next_nhe(struct mlx5e_rep_priv *rpriv, - struct mlx5e_neigh_hash_entry *nhe) -{ - struct mlx5e_neigh_hash_entry *next = NULL; - - rcu_read_lock(); - - for (next = nhe ? - list_next_or_null_rcu(&rpriv->neigh_update.neigh_list, - &nhe->neigh_list, - struct mlx5e_neigh_hash_entry, - neigh_list) : - list_first_or_null_rcu(&rpriv->neigh_update.neigh_list, - struct mlx5e_neigh_hash_entry, - neigh_list); - next; - next = list_next_or_null_rcu(&rpriv->neigh_update.neigh_list, - &next->neigh_list, - struct mlx5e_neigh_hash_entry, - neigh_list)) - if (mlx5e_rep_neigh_entry_hold(next)) - break; - - rcu_read_unlock(); - - if (nhe) - mlx5e_rep_neigh_entry_release(nhe); - - return next; -} - -static void mlx5e_rep_neigh_stats_work(struct work_struct *work) -{ - struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv, - neigh_update.neigh_stats_work.work); - struct net_device *netdev = rpriv->netdev; - struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5e_neigh_hash_entry *nhe = NULL; - - rtnl_lock(); - if (!list_empty(&rpriv->neigh_update.neigh_list)) - mlx5e_rep_queue_neigh_stats_work(priv); - - while ((nhe = mlx5e_get_next_nhe(rpriv, nhe)) != NULL) - mlx5e_tc_update_neigh_used_value(nhe); - - rtnl_unlock(); -} - -static void mlx5e_rep_update_flows(struct mlx5e_priv *priv, - struct mlx5e_encap_entry *e, - bool neigh_connected, - unsigned char ha[ETH_ALEN]) -{ - struct ethhdr *eth = (struct ethhdr *)e->encap_header; - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - bool encap_connected; - LIST_HEAD(flow_list); - - ASSERT_RTNL(); - - /* wait for encap to be fully initialized */ - wait_for_completion(&e->res_ready); - - mutex_lock(&esw->offloads.encap_tbl_lock); - encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID); - if (e->compl_result < 0 || (encap_connected == neigh_connected && - ether_addr_equal(e->h_dest, ha))) - goto unlock; - - mlx5e_take_all_encap_flows(e, &flow_list); - - if ((e->flags & MLX5_ENCAP_ENTRY_VALID) && - (!neigh_connected || !ether_addr_equal(e->h_dest, ha))) - mlx5e_tc_encap_flows_del(priv, e, &flow_list); - - if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) { - ether_addr_copy(e->h_dest, ha); - ether_addr_copy(eth->h_dest, ha); - /* Update the encap source mac, in case that we delete - * the flows when encap source mac changed. - */ - ether_addr_copy(eth->h_source, e->route_dev->dev_addr); - - mlx5e_tc_encap_flows_add(priv, e, &flow_list); - } -unlock: - mutex_unlock(&esw->offloads.encap_tbl_lock); - mlx5e_put_encap_flow_list(priv, &flow_list); -} - -static void mlx5e_rep_neigh_update(struct work_struct *work) -{ - struct mlx5e_neigh_hash_entry *nhe = - container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work); - struct neighbour *n = nhe->n; - struct mlx5e_encap_entry *e; - unsigned char ha[ETH_ALEN]; - struct mlx5e_priv *priv; - bool neigh_connected; - u8 nud_state, dead; - - rtnl_lock(); - - /* If these parameters are changed after we release the lock, - * we'll receive another event letting us know about it. - * We use this lock to avoid inconsistency between the neigh validity - * and it's hw address. - */ - read_lock_bh(&n->lock); - memcpy(ha, n->ha, ETH_ALEN); - nud_state = n->nud_state; - dead = n->dead; - read_unlock_bh(&n->lock); - - neigh_connected = (nud_state & NUD_VALID) && !dead; - - trace_mlx5e_rep_neigh_update(nhe, ha, neigh_connected); - - list_for_each_entry(e, &nhe->encap_list, encap_list) { - if (!mlx5e_encap_take(e)) - continue; - - priv = netdev_priv(e->out_dev); - mlx5e_rep_update_flows(priv, e, neigh_connected, ha); - mlx5e_encap_put(priv, e); - } - mlx5e_rep_neigh_entry_release(nhe); - rtnl_unlock(); - neigh_release(n); -} - -static struct mlx5e_rep_indr_block_priv * -mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv, - struct net_device *netdev) -{ - struct mlx5e_rep_indr_block_priv *cb_priv; - - /* All callback list access should be protected by RTNL. */ - ASSERT_RTNL(); - - list_for_each_entry(cb_priv, - &rpriv->uplink_priv.tc_indr_block_priv_list, - list) - if (cb_priv->netdev == netdev) - return cb_priv; - - return NULL; -} - -static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv) -{ - struct mlx5e_rep_indr_block_priv *cb_priv, *temp; - struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list; - - list_for_each_entry_safe(cb_priv, temp, head, list) { - mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev); - kfree(cb_priv); - } -} - -static int -mlx5e_rep_indr_offload(struct net_device *netdev, - struct flow_cls_offload *flower, - struct mlx5e_rep_indr_block_priv *indr_priv, - unsigned long flags) -{ - struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev); - int err = 0; - - switch (flower->command) { - case FLOW_CLS_REPLACE: - err = mlx5e_configure_flower(netdev, priv, flower, flags); - break; - case FLOW_CLS_DESTROY: - err = mlx5e_delete_flower(netdev, priv, flower, flags); - break; - case FLOW_CLS_STATS: - err = mlx5e_stats_flower(netdev, priv, flower, flags); - break; - default: - err = -EOPNOTSUPP; - } - - return err; -} - -static int mlx5e_rep_indr_setup_tc_cb(enum tc_setup_type type, - void *type_data, void *indr_priv) -{ - unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD); - struct mlx5e_rep_indr_block_priv *priv = indr_priv; - - switch (type) { - case TC_SETUP_CLSFLOWER: - return mlx5e_rep_indr_offload(priv->netdev, type_data, priv, - flags); - default: - return -EOPNOTSUPP; - } -} - -static int mlx5e_rep_indr_setup_ft_cb(enum tc_setup_type type, - void *type_data, void *indr_priv) -{ - struct mlx5e_rep_indr_block_priv *priv = indr_priv; - struct flow_cls_offload *f = type_data; - struct flow_cls_offload tmp; - struct mlx5e_priv *mpriv; - struct mlx5_eswitch *esw; - unsigned long flags; - int err; - - mpriv = netdev_priv(priv->rpriv->netdev); - esw = mpriv->mdev->priv.eswitch; - - flags = MLX5_TC_FLAG(EGRESS) | - MLX5_TC_FLAG(ESW_OFFLOAD) | - MLX5_TC_FLAG(FT_OFFLOAD); - - switch (type) { - case TC_SETUP_CLSFLOWER: - memcpy(&tmp, f, sizeof(*f)); - - /* Re-use tc offload path by moving the ft flow to the - * reserved ft chain. - * - * FT offload can use prio range [0, INT_MAX], so we normalize - * it to range [1, mlx5_esw_chains_get_prio_range(esw)] - * as with tc, where prio 0 isn't supported. - * - * We only support chain 0 of FT offload. - */ - if (!mlx5_esw_chains_prios_supported(esw) || - tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw) || - tmp.common.chain_index) - return -EOPNOTSUPP; - - tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw); - tmp.common.prio++; - err = mlx5e_rep_indr_offload(priv->netdev, &tmp, priv, flags); - memcpy(&f->stats, &tmp.stats, sizeof(f->stats)); - return err; - default: - return -EOPNOTSUPP; - } -} - -static void mlx5e_rep_indr_block_unbind(void *cb_priv) -{ - struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv; - - list_del(&indr_priv->list); - kfree(indr_priv); -} - -static LIST_HEAD(mlx5e_block_cb_list); - -static int -mlx5e_rep_indr_setup_block(struct net_device *netdev, - struct mlx5e_rep_priv *rpriv, - struct flow_block_offload *f, - flow_setup_cb_t *setup_cb) -{ - struct mlx5e_rep_indr_block_priv *indr_priv; - struct flow_block_cb *block_cb; - - if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) - return -EOPNOTSUPP; - - f->unlocked_driver_cb = true; - f->driver_block_list = &mlx5e_block_cb_list; - - switch (f->command) { - case FLOW_BLOCK_BIND: - indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); - if (indr_priv) - return -EEXIST; - - indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL); - if (!indr_priv) - return -ENOMEM; - - indr_priv->netdev = netdev; - indr_priv->rpriv = rpriv; - list_add(&indr_priv->list, - &rpriv->uplink_priv.tc_indr_block_priv_list); - - block_cb = flow_block_cb_alloc(setup_cb, indr_priv, indr_priv, - mlx5e_rep_indr_block_unbind); - if (IS_ERR(block_cb)) { - list_del(&indr_priv->list); - kfree(indr_priv); - return PTR_ERR(block_cb); - } - flow_block_cb_add(block_cb, f); - list_add_tail(&block_cb->driver_list, &mlx5e_block_cb_list); - - return 0; - case FLOW_BLOCK_UNBIND: - indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); - if (!indr_priv) - return -ENOENT; - - block_cb = flow_block_cb_lookup(f->block, setup_cb, indr_priv); - if (!block_cb) - return -ENOENT; - - flow_block_cb_remove(block_cb, f); - list_del(&block_cb->driver_list); - return 0; - default: - return -EOPNOTSUPP; - } - return 0; -} - -static -int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv, - enum tc_setup_type type, void *type_data) -{ - switch (type) { - case TC_SETUP_BLOCK: - return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data, - mlx5e_rep_indr_setup_tc_cb); - case TC_SETUP_FT: - return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data, - mlx5e_rep_indr_setup_ft_cb); - default: - return -EOPNOTSUPP; - } -} - -static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv, - struct net_device *netdev) -{ - int err; - - err = __flow_indr_block_cb_register(netdev, rpriv, - mlx5e_rep_indr_setup_cb, - rpriv); - if (err) { - struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); - - mlx5_core_err(priv->mdev, "Failed to register remote block notifier for %s err=%d\n", - netdev_name(netdev), err); - } - return err; -} - -static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv, - struct net_device *netdev) -{ - __flow_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_cb, - rpriv); -} - -static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb, - unsigned long event, void *ptr) -{ - struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv, - uplink_priv.netdevice_nb); - struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); - struct net_device *netdev = netdev_notifier_info_to_dev(ptr); - - if (!mlx5e_tc_tun_device_to_offload(priv, netdev) && - !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev)) - return NOTIFY_OK; - - switch (event) { - case NETDEV_REGISTER: - mlx5e_rep_indr_register_block(rpriv, netdev); - break; - case NETDEV_UNREGISTER: - mlx5e_rep_indr_unregister_block(rpriv, netdev); - break; - } - return NOTIFY_OK; -} - -static void -mlx5e_rep_queue_neigh_update_work(struct mlx5e_priv *priv, - struct mlx5e_neigh_hash_entry *nhe, - struct neighbour *n) -{ - /* Take a reference to ensure the neighbour and mlx5 encap - * entry won't be destructed until we drop the reference in - * delayed work. - */ - neigh_hold(n); - - /* This assignment is valid as long as the the neigh reference - * is taken - */ - nhe->n = n; - - if (!queue_work(priv->wq, &nhe->neigh_update_work)) { - mlx5e_rep_neigh_entry_release(nhe); - neigh_release(n); - } -} - -static struct mlx5e_neigh_hash_entry * -mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv, - struct mlx5e_neigh *m_neigh); - -static int mlx5e_rep_netevent_event(struct notifier_block *nb, - unsigned long event, void *ptr) -{ - struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv, - neigh_update.netevent_nb); - struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; - struct net_device *netdev = rpriv->netdev; - struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5e_neigh_hash_entry *nhe = NULL; - struct mlx5e_neigh m_neigh = {}; - struct neigh_parms *p; - struct neighbour *n; - bool found = false; - - switch (event) { - case NETEVENT_NEIGH_UPDATE: - n = ptr; -#if IS_ENABLED(CONFIG_IPV6) - if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl) -#else - if (n->tbl != &arp_tbl) -#endif - return NOTIFY_DONE; - - m_neigh.dev = n->dev; - m_neigh.family = n->ops->family; - memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len); - - rcu_read_lock(); - nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh); - rcu_read_unlock(); - if (!nhe) - return NOTIFY_DONE; - - mlx5e_rep_queue_neigh_update_work(priv, nhe, n); - break; - - case NETEVENT_DELAY_PROBE_TIME_UPDATE: - p = ptr; - - /* We check the device is present since we don't care about - * changes in the default table, we only care about changes - * done per device delay prob time parameter. - */ -#if IS_ENABLED(CONFIG_IPV6) - if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl)) -#else - if (!p->dev || p->tbl != &arp_tbl) -#endif - return NOTIFY_DONE; - - rcu_read_lock(); - list_for_each_entry_rcu(nhe, &neigh_update->neigh_list, - neigh_list) { - if (p->dev == nhe->m_neigh.dev) { - found = true; - break; - } - } - rcu_read_unlock(); - if (!found) - return NOTIFY_DONE; - - neigh_update->min_interval = min_t(unsigned long, - NEIGH_VAR(p, DELAY_PROBE_TIME), - neigh_update->min_interval); - mlx5_fc_update_sampling_interval(priv->mdev, - neigh_update->min_interval); - break; - } - return NOTIFY_DONE; -} - -static const struct rhashtable_params mlx5e_neigh_ht_params = { - .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node), - .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh), - .key_len = sizeof(struct mlx5e_neigh), - .automatic_shrinking = true, -}; - -static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv) -{ - struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; - int err; - - err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params); - if (err) - return err; - - INIT_LIST_HEAD(&neigh_update->neigh_list); - mutex_init(&neigh_update->encap_lock); - INIT_DELAYED_WORK(&neigh_update->neigh_stats_work, - mlx5e_rep_neigh_stats_work); - mlx5e_rep_neigh_update_init_interval(rpriv); - - rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event; - err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb); - if (err) - goto out_err; - return 0; - -out_err: - rhashtable_destroy(&neigh_update->neigh_ht); - return err; -} - -static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv) -{ - struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; - struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); - - unregister_netevent_notifier(&neigh_update->netevent_nb); - - flush_workqueue(priv->wq); /* flush neigh update works */ - - cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work); - - mutex_destroy(&neigh_update->encap_lock); - rhashtable_destroy(&neigh_update->neigh_ht); -} - -static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv, - struct mlx5e_neigh_hash_entry *nhe) -{ - struct mlx5e_rep_priv *rpriv = priv->ppriv; - int err; - - err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht, - &nhe->rhash_node, - mlx5e_neigh_ht_params); - if (err) - return err; - - list_add_rcu(&nhe->neigh_list, &rpriv->neigh_update.neigh_list); - - return err; -} - -static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe) -{ - struct mlx5e_rep_priv *rpriv = nhe->priv->ppriv; - - mutex_lock(&rpriv->neigh_update.encap_lock); - - list_del_rcu(&nhe->neigh_list); - - rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht, - &nhe->rhash_node, - mlx5e_neigh_ht_params); - mutex_unlock(&rpriv->neigh_update.encap_lock); -} - -/* This function must only be called under the representor's encap_lock or - * inside rcu read lock section. - */ -static struct mlx5e_neigh_hash_entry * -mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv, - struct mlx5e_neigh *m_neigh) -{ - struct mlx5e_rep_priv *rpriv = priv->ppriv; - struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; - struct mlx5e_neigh_hash_entry *nhe; - - nhe = rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh, - mlx5e_neigh_ht_params); - return nhe && mlx5e_rep_neigh_entry_hold(nhe) ? nhe : NULL; -} - -static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv, - struct mlx5e_encap_entry *e, - struct mlx5e_neigh_hash_entry **nhe) -{ - int err; - - *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL); - if (!*nhe) - return -ENOMEM; - - (*nhe)->priv = priv; - memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh)); - INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update); - spin_lock_init(&(*nhe)->encap_list_lock); - INIT_LIST_HEAD(&(*nhe)->encap_list); - refcount_set(&(*nhe)->refcnt, 1); - - err = mlx5e_rep_neigh_entry_insert(priv, *nhe); - if (err) - goto out_free; - return 0; - -out_free: - kfree(*nhe); - return err; -} - -int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv, - struct mlx5e_encap_entry *e) -{ - struct mlx5e_rep_priv *rpriv = priv->ppriv; - struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv; - struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy; - struct mlx5e_neigh_hash_entry *nhe; - int err; - - err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type); - if (err) - return err; - - mutex_lock(&rpriv->neigh_update.encap_lock); - nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh); - if (!nhe) { - err = mlx5e_rep_neigh_entry_create(priv, e, &nhe); - if (err) { - mutex_unlock(&rpriv->neigh_update.encap_lock); - mlx5_tun_entropy_refcount_dec(tun_entropy, - e->reformat_type); - return err; - } - } - - e->nhe = nhe; - spin_lock(&nhe->encap_list_lock); - list_add_rcu(&e->encap_list, &nhe->encap_list); - spin_unlock(&nhe->encap_list_lock); - - mutex_unlock(&rpriv->neigh_update.encap_lock); - - return 0; -} - -void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, - struct mlx5e_encap_entry *e) -{ - struct mlx5e_rep_priv *rpriv = priv->ppriv; - struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv; - struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy; - - if (!e->nhe) - return; - - spin_lock(&e->nhe->encap_list_lock); - list_del_rcu(&e->encap_list); - spin_unlock(&e->nhe->encap_list_lock); - - mlx5e_rep_neigh_entry_release(e->nhe); - e->nhe = NULL; - mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type); -} - static int mlx5e_rep_open(struct net_device *dev) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -1225,129 +514,6 @@ static int mlx5e_rep_close(struct net_device *dev) return ret; } -static int -mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv, - struct flow_cls_offload *cls_flower, int flags) -{ - switch (cls_flower->command) { - case FLOW_CLS_REPLACE: - return mlx5e_configure_flower(priv->netdev, priv, cls_flower, - flags); - case FLOW_CLS_DESTROY: - return mlx5e_delete_flower(priv->netdev, priv, cls_flower, - flags); - case FLOW_CLS_STATS: - return mlx5e_stats_flower(priv->netdev, priv, cls_flower, - flags); - default: - return -EOPNOTSUPP; - } -} - -static -int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv, - struct tc_cls_matchall_offload *ma) -{ - switch (ma->command) { - case TC_CLSMATCHALL_REPLACE: - return mlx5e_tc_configure_matchall(priv, ma); - case TC_CLSMATCHALL_DESTROY: - return mlx5e_tc_delete_matchall(priv, ma); - case TC_CLSMATCHALL_STATS: - mlx5e_tc_stats_matchall(priv, ma); - return 0; - default: - return -EOPNOTSUPP; - } -} - -static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, - void *cb_priv) -{ - unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD); - struct mlx5e_priv *priv = cb_priv; - - switch (type) { - case TC_SETUP_CLSFLOWER: - return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags); - case TC_SETUP_CLSMATCHALL: - return mlx5e_rep_setup_tc_cls_matchall(priv, type_data); - default: - return -EOPNOTSUPP; - } -} - -static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data, - void *cb_priv) -{ - struct flow_cls_offload tmp, *f = type_data; - struct mlx5e_priv *priv = cb_priv; - struct mlx5_eswitch *esw; - unsigned long flags; - int err; - - flags = MLX5_TC_FLAG(INGRESS) | - MLX5_TC_FLAG(ESW_OFFLOAD) | - MLX5_TC_FLAG(FT_OFFLOAD); - esw = priv->mdev->priv.eswitch; - - switch (type) { - case TC_SETUP_CLSFLOWER: - memcpy(&tmp, f, sizeof(*f)); - - if (!mlx5_esw_chains_prios_supported(esw)) - return -EOPNOTSUPP; - - /* Re-use tc offload path by moving the ft flow to the - * reserved ft chain. - * - * FT offload can use prio range [0, INT_MAX], so we normalize - * it to range [1, mlx5_esw_chains_get_prio_range(esw)] - * as with tc, where prio 0 isn't supported. - * - * We only support chain 0 of FT offload. - */ - if (tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw)) - return -EOPNOTSUPP; - if (tmp.common.chain_index != 0) - return -EOPNOTSUPP; - - tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw); - tmp.common.prio++; - err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags); - memcpy(&f->stats, &tmp.stats, sizeof(f->stats)); - return err; - default: - return -EOPNOTSUPP; - } -} - -static LIST_HEAD(mlx5e_rep_block_tc_cb_list); -static LIST_HEAD(mlx5e_rep_block_ft_cb_list); -static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, - void *type_data) -{ - struct mlx5e_priv *priv = netdev_priv(dev); - struct flow_block_offload *f = type_data; - - f->unlocked_driver_cb = true; - - switch (type) { - case TC_SETUP_BLOCK: - return flow_block_cb_setup_simple(type_data, - &mlx5e_rep_block_tc_cb_list, - mlx5e_rep_setup_tc_cb, - priv, priv, true); - case TC_SETUP_FT: - return flow_block_cb_setup_simple(type_data, - &mlx5e_rep_block_ft_cb_list, - mlx5e_rep_setup_ft_cb, - priv, priv, true); - default: - return -EOPNOTSUPP; - } -} - bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { struct mlx5e_rep_priv *rpriv = priv->ppriv; @@ -1484,13 +650,9 @@ bool mlx5e_eswitch_uplink_rep(struct net_device *netdev) return netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep; } -bool mlx5e_eswitch_rep(struct net_device *netdev) +bool mlx5e_eswitch_vf_rep(struct net_device *netdev) { - if (netdev->netdev_ops == &mlx5e_netdev_ops_rep || - netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep) - return true; - - return false; + return netdev->netdev_ops == &mlx5e_netdev_ops_rep; } static void mlx5e_build_rep_params(struct net_device *netdev) @@ -1692,6 +854,24 @@ static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv) return 0; } +static void rep_vport_rx_rule_destroy(struct mlx5e_priv *priv) +{ + struct mlx5e_rep_priv *rpriv = priv->ppriv; + + if (!rpriv->vport_rx_rule) + return; + + mlx5_del_flow_rules(rpriv->vport_rx_rule); + rpriv->vport_rx_rule = NULL; +} + +int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup) +{ + rep_vport_rx_rule_destroy(priv); + + return cleanup ? 0 : mlx5e_create_rep_vport_rx_rule(priv); +} + static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; @@ -1744,7 +924,7 @@ err_destroy_ttc_table: err_destroy_direct_tirs: mlx5e_destroy_direct_tirs(priv, priv->direct_tir); err_destroy_indirect_tirs: - mlx5e_destroy_indirect_tirs(priv, false); + mlx5e_destroy_indirect_tirs(priv); err_destroy_direct_rqts: mlx5e_destroy_direct_rqts(priv, priv->direct_tir); err_destroy_indirect_rqts: @@ -1756,13 +936,11 @@ err_close_drop_rq: static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) { - struct mlx5e_rep_priv *rpriv = priv->ppriv; - - mlx5_del_flow_rules(rpriv->vport_rx_rule); + rep_vport_rx_rule_destroy(priv); mlx5e_destroy_rep_root_ft(priv); mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); mlx5e_destroy_direct_tirs(priv, priv->direct_tir); - mlx5e_destroy_indirect_tirs(priv, false); + mlx5e_destroy_indirect_tirs(priv); mlx5e_destroy_direct_rqts(priv, priv->direct_tir); mlx5e_destroy_rqt(priv, &priv->indir_rqt); mlx5e_close_drop_rq(&priv->drop_rq); @@ -1791,31 +969,25 @@ static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv) priv = netdev_priv(netdev); uplink_priv = &rpriv->uplink_priv; - mutex_init(&uplink_priv->unready_flows_lock); - INIT_LIST_HEAD(&uplink_priv->unready_flows); - - /* init shared tc flow table */ - err = mlx5e_tc_esw_init(&uplink_priv->tc_ht); + err = mlx5e_rep_tc_init(rpriv); if (err) return err; mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev); - /* init indirect block notifications */ - INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list); - uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event; - err = register_netdevice_notifier_dev_net(rpriv->netdev, - &uplink_priv->netdevice_nb, - &uplink_priv->netdevice_nn); + mlx5e_rep_bond_init(rpriv); + err = mlx5e_rep_tc_netdevice_event_register(rpriv); if (err) { - mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n"); - goto tc_esw_cleanup; + mlx5_core_err(priv->mdev, "Failed to register netdev notifier, err: %d\n", + err); + goto err_event_reg; } return 0; -tc_esw_cleanup: - mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht); +err_event_reg: + mlx5e_rep_bond_cleanup(rpriv); + mlx5e_rep_tc_cleanup(rpriv); return err; } @@ -1845,17 +1017,10 @@ destroy_tises: static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv) { - struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv; - - /* clean indirect TC block notifications */ - unregister_netdevice_notifier_dev_net(rpriv->netdev, - &uplink_priv->netdevice_nb, - &uplink_priv->netdevice_nn); + mlx5e_rep_tc_netdevice_event_unregister(rpriv); mlx5e_rep_indr_clean_block_privs(rpriv); - - /* delete shared tc flow table */ - mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht); - mutex_destroy(&rpriv->uplink_priv.unready_flows_lock); + mlx5e_rep_bond_cleanup(rpriv); + mlx5e_rep_tc_cleanup(rpriv); } static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) @@ -1897,13 +1062,8 @@ static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event return NOTIFY_OK; } - if (event == MLX5_DEV_EVENT_PORT_AFFINITY) { - struct mlx5e_rep_priv *rpriv = priv->ppriv; - - queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work); - - return NOTIFY_OK; - } + if (event == MLX5_DEV_EVENT_PORT_AFFINITY) + return mlx5e_rep_tc_event_port_affinity(priv); return NOTIFY_DONE; } @@ -1912,7 +1072,6 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) { struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_rep_priv *rpriv = priv->ppriv; u16 max_mtu; netdev->min_mtu = ETH_MIN_MTU; @@ -1920,8 +1079,7 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu); mlx5e_set_dev_port_mtu(priv); - INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work, - mlx5e_tc_reoffload_flows_work); + mlx5e_rep_tc_enable(priv); mlx5_lag_add(mdev, netdev); priv->events_nb.notifier_call = uplink_rep_async_event; @@ -1933,11 +1091,10 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_rep_priv *rpriv = priv->ppriv; mlx5e_dcbnl_delete_app(priv); mlx5_notifier_unregister(mdev, &priv->events_nb); - cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work); + mlx5e_rep_tc_disable(priv); mlx5_lag_remove(mdev); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index 6a2337900420..da9f1686d525 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -56,6 +56,7 @@ struct mlx5e_neigh_update_table { }; struct mlx5_tc_ct_priv; +struct mlx5e_rep_bond; struct mlx5_rep_uplink_priv { /* Filters DB - instantiated by the uplink representor and shared by * the uplink's VFs @@ -89,6 +90,9 @@ struct mlx5_rep_uplink_priv { struct mapping_ctx *tunnel_enc_opts_mapping; struct mlx5_tc_ct_priv *ct_priv; + + /* support eswitch vports bonding */ + struct mlx5e_rep_bond *bond; }; struct mlx5e_rep_priv { @@ -158,6 +162,22 @@ struct mlx5e_neigh_hash_entry { enum { /* set when the encap entry is successfully offloaded into HW */ MLX5_ENCAP_ENTRY_VALID = BIT(0), + MLX5_REFORMAT_DECAP = BIT(1), +}; + +struct mlx5e_decap_key { + struct ethhdr key; +}; + +struct mlx5e_decap_entry { + struct mlx5e_decap_key key; + struct list_head flows; + struct hlist_node hlist; + refcount_t refcnt; + struct completion res_ready; + int compl_result; + struct mlx5_pkt_reformat *pkt_reformat; + struct rcu_head rcu; }; struct mlx5e_encap_entry { @@ -195,6 +215,15 @@ struct mlx5e_rep_sq { void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev); void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev); +int mlx5e_rep_bond_init(struct mlx5e_rep_priv *rpriv); +void mlx5e_rep_bond_cleanup(struct mlx5e_rep_priv *rpriv); +int mlx5e_rep_bond_enslave(struct mlx5_eswitch *esw, struct net_device *netdev, + struct net_device *lag_dev); +void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw, + const struct net_device *netdev, + const struct net_device *lag_dev); +int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup); + bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv); int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv); void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv); @@ -203,15 +232,15 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); -int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv, - struct mlx5e_encap_entry *e); -void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, - struct mlx5e_encap_entry *e); - void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv); -bool mlx5e_eswitch_rep(struct net_device *netdev); +bool mlx5e_eswitch_vf_rep(struct net_device *netdev); bool mlx5e_eswitch_uplink_rep(struct net_device *netdev); +static inline bool mlx5e_eswitch_rep(struct net_device *netdev) +{ + return mlx5e_eswitch_vf_rep(netdev) || + mlx5e_eswitch_uplink_rep(netdev); +} #else /* CONFIG_MLX5_ESWITCH */ static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index a514685fb560..dbb1c6323967 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -42,6 +42,7 @@ #include "en_tc.h" #include "eswitch.h" #include "en_rep.h" +#include "en/rep/tc.h" #include "ipoib/ipoib.h" #include "en_accel/ipsec_rxtx.h" #include "en_accel/tls_rxtx.h" @@ -300,7 +301,7 @@ static inline void mlx5e_page_release(struct mlx5e_rq *rq, * put into the Reuse Ring, because there is no way to return * the page to the userspace when the interface goes down. */ - mlx5e_xsk_page_release(rq, dma_info); + xsk_buff_free(dma_info->xsk); else mlx5e_page_release_dynamic(rq, dma_info, recycle); } @@ -385,7 +386,11 @@ static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk) if (rq->umem) { int pages_desired = wqe_bulk << rq->wqe.info.log_num_frags; - if (unlikely(!mlx5e_xsk_pages_enough_umem(rq, pages_desired))) + /* Check in advance that we have enough frames, instead of + * allocating one-by-one, failing and moving frames to the + * Reuse Ring. + */ + if (unlikely(!xsk_buff_can_alloc(rq->umem, pages_desired))) return -ENOMEM; } @@ -480,8 +485,11 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) int err; int i; + /* Check in advance that we have enough frames, instead of allocating + * one-by-one, failing and moving frames to the Reuse Ring. + */ if (rq->umem && - unlikely(!mlx5e_xsk_pages_enough_umem(rq, MLX5_MPWRQ_PAGES_PER_WQE))) { + unlikely(!xsk_buff_can_alloc(rq->umem, MLX5_MPWRQ_PAGES_PER_WQE))) { err = -ENOMEM; goto err; } @@ -1044,12 +1052,24 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, return skb; } +static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom, + u32 len, struct xdp_buff *xdp) +{ + xdp->data_hard_start = va; + xdp->data = va + headroom; + xdp_set_data_meta_invalid(xdp); + xdp->data_end = xdp->data + len; + xdp->rxq = &rq->xdp_rxq; + xdp->frame_sz = rq->buff.frame0_sz; +} + struct sk_buff * mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) { struct mlx5e_dma_info *di = wi->di; u16 rx_headroom = rq->buff.headroom; + struct xdp_buff xdp; struct sk_buff *skb; void *va, *data; bool consumed; @@ -1065,11 +1085,13 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, prefetch(data); rcu_read_lock(); - consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt, false); + mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp); + consumed = mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp); rcu_read_unlock(); if (consumed) return NULL; /* page/packet was consumed by XDP */ + rx_headroom = xdp.data - xdp.data_hard_start; frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt); if (unlikely(!skb)) @@ -1216,12 +1238,12 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) if (rep->vlan && skb_vlan_tag_present(skb)) skb_vlan_pop(skb); - if (!mlx5e_tc_rep_update_skb(cqe, skb, &tc_priv)) + if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) goto free_wqe; napi_gro_receive(rq->cq.napi, skb); - mlx5_tc_rep_post_napi_receive(&tc_priv); + mlx5_rep_tc_post_napi_receive(&tc_priv); free_wqe: mlx5e_free_rx_wqe(rq, wi, true); @@ -1272,12 +1294,12 @@ void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); - if (!mlx5e_tc_rep_update_skb(cqe, skb, &tc_priv)) + if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) goto mpwrq_cqe_out; napi_gro_receive(rq->cq.napi, skb); - mlx5_tc_rep_post_napi_receive(&tc_priv); + mlx5_rep_tc_post_napi_receive(&tc_priv); mpwrq_cqe_out: if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) @@ -1343,6 +1365,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx]; u16 rx_headroom = rq->buff.headroom; u32 cqe_bcnt32 = cqe_bcnt; + struct xdp_buff xdp; struct sk_buff *skb; void *va, *data; u32 frag_size; @@ -1364,7 +1387,8 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, prefetch(data); rcu_read_lock(); - consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt32, false); + mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp); + consumed = mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp); rcu_read_unlock(); if (consumed) { if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) @@ -1372,6 +1396,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, return NULL; /* page/packet was consumed by XDP */ } + rx_headroom = xdp.data - xdp.data_hard_start; frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32); skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32); if (unlikely(!skb)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index a050808f2128..7fc84f58e28a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -31,6 +31,7 @@ */ #include <net/flow_dissector.h> +#include <net/flow_offload.h> #include <net/sch_generic.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_gact.h> @@ -45,10 +46,15 @@ #include <net/tc_act/tc_tunnel_key.h> #include <net/tc_act/tc_pedit.h> #include <net/tc_act/tc_csum.h> +#include <net/tc_act/tc_mpls.h> #include <net/arp.h> #include <net/ipv6_stubs.h> +#include <net/bareudp.h> +#include <net/bonding.h> #include "en.h" #include "en_rep.h" +#include "en/rep/tc.h" +#include "en/rep/neigh.h" #include "en_tc.h" #include "eswitch.h" #include "esw/chains.h" @@ -89,6 +95,7 @@ enum { MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5, MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6, MLX5E_TC_FLOW_FLAG_CT = MLX5E_TC_FLOW_BASE + 7, + MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 8, }; #define MLX5E_TC_MAX_SPLITS 1 @@ -122,6 +129,11 @@ struct mlx5e_tc_flow { u64 cookie; unsigned long flags; struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1]; + + /* flows sharing the same reformat object - currently mpls decap */ + struct list_head l3_to_l2_reformat; + struct mlx5e_decap_entry *decap_reformat; + /* Flow can be associated with multiple encap IDs. * The number of encaps is bounded by the number of supported * destinations. @@ -134,6 +146,7 @@ struct mlx5e_tc_flow { struct list_head hairpin; /* flows sharing the same hairpin */ struct list_head peer; /* flows with peer flow */ struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */ + struct net_device *orig_dev; /* netdev adding flow first */ int tmp_efi_index; struct list_head tmp_list; /* temporary flow list used by neigh update */ refcount_t refcnt; @@ -153,40 +166,12 @@ struct mlx5e_tc_flow_parse_attr { struct mlx5_flow_spec spec; struct mlx5e_tc_mod_hdr_acts mod_hdr_acts; int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS]; + struct ethhdr eth; }; #define MLX5E_TC_TABLE_NUM_GROUPS 4 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16) -struct tunnel_match_key { - struct flow_dissector_key_control enc_control; - struct flow_dissector_key_keyid enc_key_id; - struct flow_dissector_key_ports enc_tp; - struct flow_dissector_key_ip enc_ip; - union { - struct flow_dissector_key_ipv4_addrs enc_ipv4; - struct flow_dissector_key_ipv6_addrs enc_ipv6; - }; - - int filter_ifindex; -}; - -struct tunnel_match_enc_opts { - struct flow_dissector_key_enc_opts key; - struct flow_dissector_key_enc_opts mask; -}; - -/* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS. - * Upper TUNNEL_INFO_BITS for general tunnel info. - * Lower ENC_OPTS_BITS bits for enc_opts. - */ -#define TUNNEL_INFO_BITS 6 -#define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0) -#define ENC_OPTS_BITS 2 -#define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0) -#define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS) -#define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0) - struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = { [CHAIN_TO_REG] = { .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0, @@ -225,8 +210,8 @@ mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec, fmask = headers_c + soffset; fval = headers_v + soffset; - mask = cpu_to_be32(mask) >> (32 - (match_len * 8)); - data = cpu_to_be32(data) >> (32 - (match_len * 8)); + mask = (__force u32)(cpu_to_be32(mask)) >> (32 - (match_len * 8)); + data = (__force u32)(cpu_to_be32(data)) >> (32 - (match_len * 8)); memcpy(fmask, &mask, match_len); memcpy(fval, &data, match_len); @@ -1149,6 +1134,11 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct netlink_ext_ack *extack, struct net_device **encap_dev, bool *encap_valid); +static int mlx5e_attach_decap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack); +static void mlx5e_detach_decap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow); static struct mlx5_flow_handle * mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, @@ -1324,6 +1314,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, return -EOPNOTSUPP; } + if (flow_flag_test(flow, L3_TO_L2_DECAP)) { + err = mlx5e_attach_decap(priv, flow, extack); + if (err) + return err; + } + for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { int mirred_ifindex; @@ -1433,6 +1429,9 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) mlx5_fc_destroy(attr->counter_dev, attr->counter); + + if (flow_flag_test(flow, L3_TO_L2_DECAP)) + mlx5e_detach_decap(priv, flow); } void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, @@ -1709,6 +1708,17 @@ static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entr kfree_rcu(e, rcu); } +static void mlx5e_decap_dealloc(struct mlx5e_priv *priv, + struct mlx5e_decap_entry *d) +{ + WARN_ON(!list_empty(&d->flows)); + + if (!d->compl_result) + mlx5_packet_reformat_dealloc(priv->mdev, d->pkt_reformat); + + kfree_rcu(d, rcu); +} + void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; @@ -1721,6 +1731,18 @@ void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e) mlx5e_encap_dealloc(priv, e); } +static void mlx5e_decap_put(struct mlx5e_priv *priv, struct mlx5e_decap_entry *d) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + if (!refcount_dec_and_mutex_lock(&d->refcnt, &esw->offloads.decap_tbl_lock)) + return; + hash_del_rcu(&d->hlist); + mutex_unlock(&esw->offloads.decap_tbl_lock); + + mlx5e_decap_dealloc(priv, d); +} + static void mlx5e_detach_encap(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, int out_index) { @@ -1744,6 +1766,29 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv, mlx5e_encap_dealloc(priv, e); } +static void mlx5e_detach_decap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_decap_entry *d = flow->decap_reformat; + + if (!d) + return; + + mutex_lock(&esw->offloads.decap_tbl_lock); + list_del(&flow->l3_to_l2_reformat); + flow->decap_reformat = NULL; + + if (!refcount_dec_and_test(&d->refcnt)) { + mutex_unlock(&esw->offloads.decap_tbl_lock); + return; + } + hash_del_rcu(&d->hlist); + mutex_unlock(&esw->offloads.decap_tbl_lock); + + mlx5e_decap_dealloc(priv, d); +} + static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow) { struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch; @@ -1828,7 +1873,7 @@ enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv, memchr_inv(opt->opt_data, 0, opt->length * 4)) { *dont_care = false; - if (opt->opt_class != U16_MAX || + if (opt->opt_class != htons(U16_MAX) || opt->type != U8_MAX) { NL_SET_ERR_MSG(extack, "Partial match of tunnel options in chain > 0 isn't supported"); @@ -1975,6 +2020,32 @@ u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow) return flow->tunnel_id; } +void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev, + struct flow_match_basic *match, bool outer, + void *headers_c, void *headers_v) +{ + bool ip_version_cap; + + ip_version_cap = outer ? + MLX5_CAP_FLOWTABLE_NIC_RX(mdev, + ft_field_support.outer_ip_version) : + MLX5_CAP_FLOWTABLE_NIC_RX(mdev, + ft_field_support.inner_ip_version); + + if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) && + (match->key->n_proto == htons(ETH_P_IP) || + match->key->n_proto == htons(ETH_P_IPV6))) { + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, + match->key->n_proto == htons(ETH_P_IP) ? 4 : 6); + } else { + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, + ntohs(match->mask->n_proto)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, + ntohs(match->key->n_proto)); + } +} + static int parse_tunnel_attr(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, struct mlx5_flow_spec *spec, @@ -2015,7 +2086,11 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, return err; } - flow->esw_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; + /* With mpls over udp we decapsulate using packet reformat + * object + */ + if (!netif_is_bareudp(filter_dev)) + flow->esw_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; } if (!needs_mapping && !sets_mapping) @@ -2078,7 +2153,7 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev, flow_rule_match_meta(rule, &match); if (match.mask->ingress_ifindex != 0xFFFFFFFF) { NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask"); - return -EINVAL; + return -EOPNOTSUPP; } ingress_dev = __dev_get_by_index(dev_net(filter_dev), @@ -2086,18 +2161,32 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev, if (!ingress_dev) { NL_SET_ERR_MSG_MOD(extack, "Can't find the ingress port to match on"); - return -EINVAL; + return -ENOENT; } if (ingress_dev != filter_dev) { NL_SET_ERR_MSG_MOD(extack, "Can't match on the ingress filter port"); - return -EINVAL; + return -EOPNOTSUPP; } return 0; } +static bool skip_key_basic(struct net_device *filter_dev, + struct flow_cls_offload *f) +{ + /* When doing mpls over udp decap, the user needs to provide + * MPLS_UC as the protocol in order to be able to match on mpls + * label fields. However, the actual ethertype is IP so we want to + * avoid matching on this, otherwise we'll fail the match. + */ + if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0) + return true; + + return false; +} + static int __parse_cls_flower(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, struct mlx5_flow_spec *spec, @@ -2142,7 +2231,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, BIT(FLOW_DISSECTOR_KEY_IP) | BIT(FLOW_DISSECTOR_KEY_CT) | BIT(FLOW_DISSECTOR_KEY_ENC_IP) | - BIT(FLOW_DISSECTOR_KEY_ENC_OPTS))) { + BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | + BIT(FLOW_DISSECTOR_KEY_MPLS))) { NL_SET_ERR_MSG_MOD(extack, "Unsupported key"); netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n", dissector->used_keys); @@ -2172,14 +2262,14 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, if (err) return err; - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) && + !skip_key_basic(filter_dev, f)) { struct flow_match_basic match; flow_rule_match_basic(rule, &match); - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, - ntohs(match.mask->n_proto)); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, - ntohs(match.key->n_proto)); + mlx5e_tc_set_ethertype(priv->mdev, &match, + match_level == outer_match_level, + headers_c, headers_v); if (match.mask->n_proto) *match_level = MLX5_MATCH_L2; @@ -2725,10 +2815,10 @@ static int offload_pedit_fields(struct mlx5e_priv *priv, continue; if (f->field_bsize == 32) { - mask_be32 = (__be32)mask; + mask_be32 = (__force __be32)(mask); mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32)); } else if (f->field_bsize == 16) { - mask_be32 = (__be32)mask; + mask_be32 = (__force __be32)(mask); mask_be16 = *(__be16 *)&mask_be32; mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16)); } @@ -2837,10 +2927,12 @@ void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts) static const struct pedit_headers zero_masks = {}; -static int parse_tc_pedit_action(struct mlx5e_priv *priv, - const struct flow_action_entry *act, int namespace, - struct pedit_headers_action *hdrs, - struct netlink_ext_ack *extack) +static int +parse_pedit_to_modify_hdr(struct mlx5e_priv *priv, + const struct flow_action_entry *act, int namespace, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct pedit_headers_action *hdrs, + struct netlink_ext_ack *extack) { u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1; int err = -EOPNOTSUPP; @@ -2876,6 +2968,46 @@ out_err: return err; } +static int +parse_pedit_to_reformat(struct mlx5e_priv *priv, + const struct flow_action_entry *act, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct netlink_ext_ack *extack) +{ + u32 mask, val, offset; + u32 *p; + + if (act->id != FLOW_ACTION_MANGLE) + return -EOPNOTSUPP; + + if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) { + NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported"); + return -EOPNOTSUPP; + } + + mask = ~act->mangle.mask; + val = act->mangle.val; + offset = act->mangle.offset; + p = (u32 *)&parse_attr->eth; + *(p + (offset >> 2)) |= (val & mask); + + return 0; +} + +static int parse_tc_pedit_action(struct mlx5e_priv *priv, + const struct flow_action_entry *act, int namespace, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct pedit_headers_action *hdrs, + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) +{ + if (flow && flow_flag_test(flow, L3_TO_L2_DECAP)) + return parse_pedit_to_reformat(priv, act, parse_attr, extack); + + return parse_pedit_to_modify_hdr(priv, act, namespace, + parse_attr, hdrs, extack); +} + static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace, struct mlx5e_tc_flow_parse_attr *parse_attr, struct pedit_headers_action *hdrs, @@ -3013,16 +3145,19 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, { const struct flow_action_entry *act; bool modify_ip_header; + void *headers_c; void *headers_v; u16 ethertype; u8 ip_proto; int i, err; + headers_c = get_match_headers_criteria(actions, spec); headers_v = get_match_headers_value(actions, spec); ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); /* for non-IP we only re-write MACs, so we're okay */ - if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6) + if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 && + ethertype != ETH_P_IP && ethertype != ETH_P_IPV6) goto out_ok; modify_ip_header = false; @@ -3083,6 +3218,11 @@ static bool actions_match_supported(struct mlx5e_priv *priv, return true; } +static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) +{ + return priv->mdev == peer_priv->mdev; +} + static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) { struct mlx5_core_dev *fmdev, *pmdev; @@ -3134,7 +3274,7 @@ static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace, return -EOPNOTSUPP; } - err = parse_tc_pedit_action(priv, &pedit_act, namespace, hdrs, NULL); + err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr, hdrs, NULL, extack); *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; return err; @@ -3200,7 +3340,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, case FLOW_ACTION_MANGLE: case FLOW_ACTION_ADD: err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL, - hdrs, extack); + parse_attr, hdrs, NULL, extack); if (err) return err; @@ -3294,14 +3434,24 @@ static inline int cmp_encap_info(struct encap_key *a, a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type; } +static inline int cmp_decap_info(struct mlx5e_decap_key *a, + struct mlx5e_decap_key *b) +{ + return memcmp(&a->key, &b->key, sizeof(b->key)); +} + static inline int hash_encap_info(struct encap_key *key) { return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key), key->tc_tunnel->tunnel_type); } +static inline int hash_decap_info(struct mlx5e_decap_key *key) +{ + return jhash(&key->key, sizeof(key->key), 0); +} -static bool is_merged_eswitch_dev(struct mlx5e_priv *priv, +static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv, struct net_device *peer_netdev) { struct mlx5e_priv *peer_priv; @@ -3309,18 +3459,21 @@ static bool is_merged_eswitch_dev(struct mlx5e_priv *priv, peer_priv = netdev_priv(peer_netdev); return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) && - mlx5e_eswitch_rep(priv->netdev) && - mlx5e_eswitch_rep(peer_netdev) && + mlx5e_eswitch_vf_rep(priv->netdev) && + mlx5e_eswitch_vf_rep(peer_netdev) && same_hw_devs(priv, peer_priv)); } - - bool mlx5e_encap_take(struct mlx5e_encap_entry *e) { return refcount_inc_not_zero(&e->refcnt); } +static bool mlx5e_decap_take(struct mlx5e_decap_entry *e) +{ + return refcount_inc_not_zero(&e->refcnt); +} + static struct mlx5e_encap_entry * mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key, uintptr_t hash_key) @@ -3341,6 +3494,24 @@ mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key, return NULL; } +static struct mlx5e_decap_entry * +mlx5e_decap_get(struct mlx5e_priv *priv, struct mlx5e_decap_key *key, + uintptr_t hash_key) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_decap_key r_key; + struct mlx5e_decap_entry *e; + + hash_for_each_possible_rcu(esw->offloads.decap_tbl, e, + hlist, hash_key) { + r_key = e->key; + if (!cmp_decap_info(&r_key, key) && + mlx5e_decap_take(e)) + return e; + } + return NULL; +} + static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info) { size_t tun_size = sizeof(*tun_info) + tun_info->options_len; @@ -3486,6 +3657,84 @@ out_err_init: return err; } +static int mlx5e_attach_decap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_esw_flow_attr *attr = flow->esw_attr; + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct mlx5e_decap_entry *d; + struct mlx5e_decap_key key; + uintptr_t hash_key; + int err = 0; + + parse_attr = attr->parse_attr; + if (sizeof(parse_attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) { + NL_SET_ERR_MSG_MOD(extack, + "encap header larger than max supported"); + return -EOPNOTSUPP; + } + + key.key = parse_attr->eth; + hash_key = hash_decap_info(&key); + mutex_lock(&esw->offloads.decap_tbl_lock); + d = mlx5e_decap_get(priv, &key, hash_key); + if (d) { + mutex_unlock(&esw->offloads.decap_tbl_lock); + wait_for_completion(&d->res_ready); + mutex_lock(&esw->offloads.decap_tbl_lock); + if (d->compl_result) { + err = -EREMOTEIO; + goto out_free; + } + goto found; + } + + d = kzalloc(sizeof(*d), GFP_KERNEL); + if (!d) { + err = -ENOMEM; + goto out_err; + } + + d->key = key; + refcount_set(&d->refcnt, 1); + init_completion(&d->res_ready); + INIT_LIST_HEAD(&d->flows); + hash_add_rcu(esw->offloads.decap_tbl, &d->hlist, hash_key); + mutex_unlock(&esw->offloads.decap_tbl_lock); + + d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, + MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2, + sizeof(parse_attr->eth), + &parse_attr->eth, + MLX5_FLOW_NAMESPACE_FDB); + if (IS_ERR(d->pkt_reformat)) { + err = PTR_ERR(d->pkt_reformat); + d->compl_result = err; + } + mutex_lock(&esw->offloads.decap_tbl_lock); + complete_all(&d->res_ready); + if (err) + goto out_free; + +found: + flow->decap_reformat = d; + attr->decap_pkt_reformat = d->pkt_reformat; + list_add(&flow->l3_to_l2_reformat, &d->flows); + mutex_unlock(&esw->offloads.decap_tbl_lock); + return 0; + +out_free: + mutex_unlock(&esw->offloads.decap_tbl_lock); + mlx5e_decap_put(priv, d); + return err; + +out_err: + mutex_unlock(&esw->offloads.decap_tbl_lock); + return err; +} + static int parse_tc_vlan_action(struct mlx5e_priv *priv, const struct flow_action_entry *act, struct mlx5_esw_flow_attr *attr, @@ -3539,6 +3788,28 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv, return 0; } +static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev, + struct net_device *out_dev) +{ + struct net_device *fdb_out_dev = out_dev; + struct net_device *uplink_upper; + + rcu_read_lock(); + uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev); + if (uplink_upper && netif_is_lag_master(uplink_upper) && + uplink_upper == out_dev) { + fdb_out_dev = uplink_dev; + } else if (netif_is_lag_master(out_dev)) { + fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev)); + if (fdb_out_dev && + (!mlx5e_eswitch_rep(fdb_out_dev) || + !netdev_port_same_parent_id(fdb_out_dev, uplink_dev))) + fdb_out_dev = NULL; + } + rcu_read_unlock(); + return fdb_out_dev; +} + static int add_vlan_push_action(struct mlx5e_priv *priv, struct mlx5_esw_flow_attr *attr, struct net_device **out_dev, @@ -3585,14 +3856,37 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv, return err; } +static bool same_hw_reps(struct mlx5e_priv *priv, + struct net_device *peer_netdev) +{ + struct mlx5e_priv *peer_priv; + + peer_priv = netdev_priv(peer_netdev); + + return mlx5e_eswitch_rep(priv->netdev) && + mlx5e_eswitch_rep(peer_netdev) && + same_hw_devs(priv, peer_priv); +} + +static bool is_lag_dev(struct mlx5e_priv *priv, + struct net_device *peer_netdev) +{ + return ((mlx5_lag_is_sriov(priv->mdev) || + mlx5_lag_is_multipath(priv->mdev)) && + same_hw_reps(priv, peer_netdev)); +} + bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, struct net_device *out_dev) { - if (is_merged_eswitch_dev(priv, out_dev)) + if (is_merged_eswitch_vfs(priv, out_dev)) + return true; + + if (is_lag_dev(priv, out_dev)) return true; return mlx5e_eswitch_rep(out_dev) && - same_hw_devs(priv, netdev_priv(out_dev)); + same_port_devs(priv, netdev_priv(out_dev)); } static bool is_duplicated_output_device(struct net_device *dev, @@ -3697,7 +3991,8 @@ static int verify_uplink_forwarding(struct mlx5e_priv *priv, static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct flow_action *flow_action, struct mlx5e_tc_flow *flow, - struct netlink_ext_ack *extack) + struct netlink_ext_ack *extack, + struct net_device *filter_dev) { struct pedit_headers_action hdrs[2] = {}; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; @@ -3711,6 +4006,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, bool encap = false, decap = false; u32 action = attr->action; int err, i, if_count = 0; + bool mpls_push = false; if (!flow_action_has_entries(flow_action)) return -EINVAL; @@ -3725,15 +4021,48 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; break; + case FLOW_ACTION_MPLS_PUSH: + if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, + reformat_l2_to_l3_tunnel) || + act->mpls_push.proto != htons(ETH_P_MPLS_UC)) { + NL_SET_ERR_MSG_MOD(extack, + "mpls push is supported only for mpls_uc protocol"); + return -EOPNOTSUPP; + } + mpls_push = true; + break; + case FLOW_ACTION_MPLS_POP: + /* we only support mpls pop if it is the first action + * and the filter net device is bareudp. Subsequent + * actions can be pedit and the last can be mirred + * egress redirect. + */ + if (i) { + NL_SET_ERR_MSG_MOD(extack, + "mpls pop supported only as first action"); + return -EOPNOTSUPP; + } + if (!netif_is_bareudp(filter_dev)) { + NL_SET_ERR_MSG_MOD(extack, + "mpls pop supported only on bareudp devices"); + return -EOPNOTSUPP; + } + + parse_attr->eth.h_proto = act->mpls_pop.proto; + action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; + flow_flag_set(flow, L3_TO_L2_DECAP); + break; case FLOW_ACTION_MANGLE: case FLOW_ACTION_ADD: err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB, - hdrs, extack); + parse_attr, hdrs, flow, extack); if (err) return err; - action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - attr->split_count = attr->out_count; + if (!flow_flag_test(flow, L3_TO_L2_DECAP)) { + action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + attr->split_count = attr->out_count; + } break; case FLOW_ACTION_CSUM: if (csum_offload_supported(priv, action, @@ -3755,6 +4084,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, return -EINVAL; } + if (mpls_push && !netif_is_bareudp(out_dev)) { + NL_SET_ERR_MSG_MOD(extack, + "mpls is supported only through a bareudp device"); + return -EOPNOTSUPP; + } + if (ft_flow && out_dev == priv->netdev) { /* Ignore forward to self rules generated * by adding both mlx5 devs to the flow table @@ -3790,7 +4125,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, } else if (netdev_port_same_parent_id(priv->netdev, out_dev)) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); - struct net_device *uplink_upper; if (is_duplicated_output_device(priv->netdev, out_dev, @@ -3802,14 +4136,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, ifindexes[if_count] = out_dev->ifindex; if_count++; - rcu_read_lock(); - uplink_upper = - netdev_master_upper_dev_get_rcu(uplink_dev); - if (uplink_upper && - netif_is_lag_master(uplink_upper) && - uplink_upper == out_dev) - out_dev = uplink_dev; - rcu_read_unlock(); + out_dev = get_fdb_out_dev(uplink_dev, out_dev); + if (!out_dev) + return -ENODEV; if (is_vlan_dev(out_dev)) { err = add_vlan_push_action(priv, attr, @@ -3833,10 +4162,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) { NL_SET_ERR_MSG_MOD(extack, "devices are not on same switch HW, can't offload forwarding"); - netdev_warn(priv->netdev, - "devices %s %s not on same switch HW, can't offload forwarding\n", - priv->netdev->name, - out_dev->name); return -EOPNOTSUPP; } @@ -4085,6 +4410,7 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, INIT_LIST_HEAD(&flow->encaps[out_index].list); INIT_LIST_HEAD(&flow->mod_hdr); INIT_LIST_HEAD(&flow->hairpin); + INIT_LIST_HEAD(&flow->l3_to_l2_reformat); refcount_set(&flow->refcnt, 1); init_completion(&flow->init_done); @@ -4154,7 +4480,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, if (err) goto err_free; - err = parse_tc_fdb_actions(priv, &rule->action, flow, extack); + err = parse_tc_fdb_actions(priv, &rule->action, flow, extack, filter_dev); if (err) goto err_free; @@ -4340,11 +4666,21 @@ mlx5e_tc_add_flow(struct mlx5e_priv *priv, return err; } +static bool is_flow_rule_duplicate_allowed(struct net_device *dev, + struct mlx5e_rep_priv *rpriv) +{ + /* Offloaded flow rule is allowed to duplicate on non-uplink representor + * sharing tc block with other slaves of a lag device. + */ + return netif_is_lag_port(dev) && rpriv->rep->vport != MLX5_VPORT_UPLINK; +} + int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, struct flow_cls_offload *f, unsigned long flags) { struct netlink_ext_ack *extack = f->common.extack; struct rhashtable *tc_ht = get_tc_ht(priv, flags); + struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5e_tc_flow *flow; int err = 0; @@ -4352,6 +4688,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params); rcu_read_unlock(); if (flow) { + /* Same flow rule offloaded to non-uplink representor sharing tc block, + * just return 0. + */ + if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev) + goto out; + NL_SET_ERR_MSG_MOD(extack, "flow cookie already exists, ignoring"); netdev_warn_once(priv->netdev, @@ -4366,6 +4708,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, if (err) goto out; + /* Flow rule offloaded to non-uplink representor sharing tc block, + * set the flow's owner dev. + */ + if (is_flow_rule_duplicate_allowed(dev, rpriv)) + flow->orig_dev = dev; + err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params); if (err) goto err_free; @@ -4598,7 +4946,7 @@ void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv, dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets; dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes; rpriv->prev_vf_vport_stats = cur_stats; - flow_stats_update(&ma->stats, dpkts, dbytes, jiffies, + flow_stats_update(&ma->stats, dbytes, dpkts, jiffies, FLOW_ACTION_HW_STATS_DELAYED); } @@ -4806,148 +5154,35 @@ void mlx5e_tc_reoffload_flows_work(struct work_struct *work) mutex_unlock(&rpriv->unready_flows_lock); } -#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) -static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb, - struct mlx5e_tc_update_priv *tc_priv, - u32 tunnel_id) -{ - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct tunnel_match_enc_opts enc_opts = {}; - struct mlx5_rep_uplink_priv *uplink_priv; - struct mlx5e_rep_priv *uplink_rpriv; - struct metadata_dst *tun_dst; - struct tunnel_match_key key; - u32 tun_id, enc_opts_id; - struct net_device *dev; - int err; - - enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK; - tun_id = tunnel_id >> ENC_OPTS_BITS; - - if (!tun_id) - return true; - - uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); - uplink_priv = &uplink_rpriv->uplink_priv; - - err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key); - if (err) { - WARN_ON_ONCE(true); - netdev_dbg(priv->netdev, - "Couldn't find tunnel for tun_id: %d, err: %d\n", - tun_id, err); - return false; - } - - if (enc_opts_id) { - err = mapping_find(uplink_priv->tunnel_enc_opts_mapping, - enc_opts_id, &enc_opts); - if (err) { - netdev_dbg(priv->netdev, - "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n", - enc_opts_id, err); - return false; - } - } - - tun_dst = tun_rx_dst(enc_opts.key.len); - if (!tun_dst) { - WARN_ON_ONCE(true); - return false; - } - - ip_tunnel_key_init(&tun_dst->u.tun_info.key, - key.enc_ipv4.src, key.enc_ipv4.dst, - key.enc_ip.tos, key.enc_ip.ttl, - 0, /* label */ - key.enc_tp.src, key.enc_tp.dst, - key32_to_tunnel_id(key.enc_key_id.keyid), - TUNNEL_KEY); - - if (enc_opts.key.len) - ip_tunnel_info_opts_set(&tun_dst->u.tun_info, - enc_opts.key.data, - enc_opts.key.len, - enc_opts.key.dst_opt_type); - - skb_dst_set(skb, (struct dst_entry *)tun_dst); - dev = dev_get_by_index(&init_net, key.filter_ifindex); - if (!dev) { - netdev_dbg(priv->netdev, - "Couldn't find tunnel device with ifindex: %d\n", - key.filter_ifindex); - return false; +static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv, + struct flow_cls_offload *cls_flower, + unsigned long flags) +{ + switch (cls_flower->command) { + case FLOW_CLS_REPLACE: + return mlx5e_configure_flower(priv->netdev, priv, cls_flower, + flags); + case FLOW_CLS_DESTROY: + return mlx5e_delete_flower(priv->netdev, priv, cls_flower, + flags); + case FLOW_CLS_STATS: + return mlx5e_stats_flower(priv->netdev, priv, cls_flower, + flags); + default: + return -EOPNOTSUPP; } - - /* Set tun_dev so we do dev_put() after datapath */ - tc_priv->tun_dev = dev; - - skb->dev = dev; - - return true; } -#endif /* CONFIG_NET_TC_SKB_EXT */ -bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe, - struct sk_buff *skb, - struct mlx5e_tc_update_priv *tc_priv) +int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) { -#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) - u32 chain = 0, reg_c0, reg_c1, tunnel_id, tuple_id; - struct mlx5_rep_uplink_priv *uplink_priv; - struct mlx5e_rep_priv *uplink_rpriv; - struct tc_skb_ext *tc_skb_ext; - struct mlx5_eswitch *esw; - struct mlx5e_priv *priv; - int tunnel_moffset; - int err; - - reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK); - if (reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG) - reg_c0 = 0; - reg_c1 = be32_to_cpu(cqe->ft_metadata); - - if (!reg_c0) - return true; - - priv = netdev_priv(skb->dev); - esw = priv->mdev->priv.eswitch; - - err = mlx5_eswitch_get_chain_for_tag(esw, reg_c0, &chain); - if (err) { - netdev_dbg(priv->netdev, - "Couldn't find chain for chain tag: %d, err: %d\n", - reg_c0, err); - return false; - } - - if (chain) { - tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT); - if (!tc_skb_ext) { - WARN_ON(1); - return false; - } + unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD); + struct mlx5e_priv *priv = cb_priv; - tc_skb_ext->chain = chain; - - tuple_id = reg_c1 & TUPLE_ID_MAX; - - uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); - uplink_priv = &uplink_rpriv->uplink_priv; - if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb, tuple_id)) - return false; + switch (type) { + case TC_SETUP_CLSFLOWER: + return mlx5e_setup_tc_cls_flower(priv, type_data, flags); + default: + return -EOPNOTSUPP; } - - tunnel_moffset = mlx5e_tc_attr_to_reg_mappings[TUNNEL_TO_REG].moffset; - tunnel_id = reg_c1 >> (8 * tunnel_moffset); - return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id); -#endif /* CONFIG_NET_TC_SKB_EXT */ - - return true; -} - -void mlx5_tc_rep_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv) -{ - if (tc_priv->tun_dev) - dev_put(tc_priv->tun_dev); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index abdcfa4c4e0e..5c330b0cae21 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -34,11 +34,41 @@ #define __MLX5_EN_TC_H__ #include <net/pkt_cls.h> +#include "en.h" #define MLX5E_TC_FLOW_ID_MASK 0x0000ffff #ifdef CONFIG_MLX5_ESWITCH +struct tunnel_match_key { + struct flow_dissector_key_control enc_control; + struct flow_dissector_key_keyid enc_key_id; + struct flow_dissector_key_ports enc_tp; + struct flow_dissector_key_ip enc_ip; + union { + struct flow_dissector_key_ipv4_addrs enc_ipv4; + struct flow_dissector_key_ipv6_addrs enc_ipv6; + }; + + int filter_ifindex; +}; + +struct tunnel_match_enc_opts { + struct flow_dissector_key_enc_opts key; + struct flow_dissector_key_enc_opts mask; +}; + +/* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS. + * Upper TUNNEL_INFO_BITS for general tunnel info. + * Lower ENC_OPTS_BITS bits for enc_opts. + */ +#define TUNNEL_INFO_BITS 6 +#define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0) +#define ENC_OPTS_BITS 2 +#define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0) +#define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS) +#define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0) + enum { MLX5E_TC_FLAG_INGRESS_BIT, MLX5E_TC_FLAG_EGRESS_BIT, @@ -50,9 +80,6 @@ enum { #define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT) -int mlx5e_tc_nic_init(struct mlx5e_priv *priv); -void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv); - int mlx5e_tc_esw_init(struct rhashtable *tc_ht); void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht); @@ -119,11 +146,6 @@ struct mlx5e_tc_update_priv { struct net_device *tun_dev; }; -bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb, - struct mlx5e_tc_update_priv *tc_priv); - -void mlx5_tc_rep_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv); - struct mlx5e_tc_mod_hdr_acts { int num_actions; int max_actions; @@ -148,6 +170,26 @@ void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts); struct mlx5e_tc_flow; u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow); +void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev, + struct flow_match_basic *match, bool outer, + void *headers_c, void *headers_v); + +#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) + +int mlx5e_tc_nic_init(struct mlx5e_priv *priv); +void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv); + +int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv); + +#else /* CONFIG_MLX5_CLS_ACT */ +static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; } +static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {} +static inline int +mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) +{ return -EOPNOTSUPP; } +#endif /* CONFIG_MLX5_CLS_ACT */ + #else /* CONFIG_MLX5_ESWITCH */ static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; } static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {} @@ -156,6 +198,10 @@ static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv, { return 0; } + +static inline int +mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) +{ return -EOPNOTSUPP; } #endif #endif /* __MLX5_EN_TC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index f79454746d0d..6d406063aca4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -529,10 +529,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) { struct mlx5e_tx_wqe_info *wi; + u32 dma_fifo_cc, nbytes = 0; + u16 ci, sqcc, npkts = 0; struct sk_buff *skb; - u32 dma_fifo_cc; - u16 sqcc; - u16 ci; int i; sqcc = sq->cc; @@ -557,11 +556,15 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) } dev_kfree_skb_any(skb); + npkts++; + nbytes += wi->num_bytes; sqcc += wi->num_wqebbs; } sq->dma_fifo_cc = dma_fifo_cc; sq->cc = sqcc; + + netdev_tx_completed_queue(sq->txq, npkts, nbytes); } #ifdef CONFIG_MLX5_CORE_IPOIB diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 4d974b5405b5..31ef9f8420c8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -609,11 +609,13 @@ static int create_async_eqs(struct mlx5_core_dev *dev) .nent = MLX5_NUM_CMD_EQE, .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD, }; + mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_CREATE_EQ); err = setup_async_eq(dev, &table->cmd_eq, ¶m, "cmd"); if (err) goto err1; mlx5_cmd_use_events(dev); + mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); param = (struct mlx5_eq_param) { .irq_index = 0, @@ -643,6 +645,7 @@ err2: mlx5_cmd_use_polling(dev); cleanup_async_eq(dev, &table->cmd_eq, "cmd"); err1: + mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c new file mode 100644 index 000000000000..d46f8b225ebe --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */ + +#include "mlx5_core.h" +#include "eswitch.h" +#include "helper.h" +#include "lgcy.h" + +static void esw_acl_egress_lgcy_rules_destroy(struct mlx5_vport *vport) +{ + esw_acl_egress_vlan_destroy(vport); + if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_rule)) { + mlx5_del_flow_rules(vport->egress.legacy.drop_rule); + vport->egress.legacy.drop_rule = NULL; + } +} + +static int esw_acl_egress_lgcy_groups_create(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_group *drop_grp; + u32 *flow_group_in; + int err = 0; + + err = esw_acl_egress_vlan_grp_create(esw, vport); + if (err) + return err; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) { + err = -ENOMEM; + goto alloc_err; + } + + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); + drop_grp = mlx5_create_flow_group(vport->egress.acl, flow_group_in); + if (IS_ERR(drop_grp)) { + err = PTR_ERR(drop_grp); + esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n", + vport->vport, err); + goto drop_grp_err; + } + + vport->egress.legacy.drop_grp = drop_grp; + kvfree(flow_group_in); + return 0; + +drop_grp_err: + kvfree(flow_group_in); +alloc_err: + esw_acl_egress_vlan_grp_destroy(vport); + return err; +} + +static void esw_acl_egress_lgcy_groups_destroy(struct mlx5_vport *vport) +{ + if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_grp)) { + mlx5_destroy_flow_group(vport->egress.legacy.drop_grp); + vport->egress.legacy.drop_grp = NULL; + } + esw_acl_egress_vlan_grp_destroy(vport); +} + +int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + struct mlx5_flow_destination drop_ctr_dst = {}; + struct mlx5_flow_destination *dst = NULL; + struct mlx5_fc *drop_counter = NULL; + struct mlx5_flow_act flow_act = {}; + /* The egress acl table contains 2 rules: + * 1)Allow traffic with vlan_tag=vst_vlan_id + * 2)Drop all other traffic. + */ + int table_size = 2; + int dest_num = 0; + int err = 0; + + if (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) { + drop_counter = mlx5_fc_create(esw->dev, false); + if (IS_ERR(drop_counter)) + esw_warn(esw->dev, + "vport[%d] configure egress drop rule counter err(%ld)\n", + vport->vport, PTR_ERR(drop_counter)); + vport->egress.legacy.drop_counter = drop_counter; + } + + esw_acl_egress_lgcy_rules_destroy(vport); + + if (!vport->info.vlan && !vport->info.qos) { + esw_acl_egress_lgcy_cleanup(esw, vport); + return 0; + } + + if (!IS_ERR_OR_NULL(vport->egress.acl)) + return 0; + + vport->egress.acl = esw_acl_table_create(esw, vport->vport, + MLX5_FLOW_NAMESPACE_ESW_EGRESS, + table_size); + if (IS_ERR_OR_NULL(vport->egress.acl)) { + err = PTR_ERR(vport->egress.acl); + vport->egress.acl = NULL; + goto out; + } + + err = esw_acl_egress_lgcy_groups_create(esw, vport); + if (err) + goto out; + + esw_debug(esw->dev, + "vport[%d] configure egress rules, vlan(%d) qos(%d)\n", + vport->vport, vport->info.vlan, vport->info.qos); + + /* Allowed vlan rule */ + err = esw_egress_acl_vlan_create(esw, vport, NULL, vport->info.vlan, + MLX5_FLOW_CONTEXT_ACTION_ALLOW); + if (err) + goto out; + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; + + /* Attach egress drop flow counter */ + if (!IS_ERR_OR_NULL(drop_counter)) { + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; + drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + drop_ctr_dst.counter_id = mlx5_fc_id(drop_counter); + dst = &drop_ctr_dst; + dest_num++; + } + vport->egress.legacy.drop_rule = + mlx5_add_flow_rules(vport->egress.acl, NULL, + &flow_act, dst, dest_num); + if (IS_ERR(vport->egress.legacy.drop_rule)) { + err = PTR_ERR(vport->egress.legacy.drop_rule); + esw_warn(esw->dev, + "vport[%d] configure egress drop rule failed, err(%d)\n", + vport->vport, err); + vport->egress.legacy.drop_rule = NULL; + goto out; + } + + return err; + +out: + esw_acl_egress_lgcy_cleanup(esw, vport); + return err; +} + +void esw_acl_egress_lgcy_cleanup(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + if (IS_ERR_OR_NULL(vport->egress.acl)) + goto clean_drop_counter; + + esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport); + + esw_acl_egress_lgcy_rules_destroy(vport); + esw_acl_egress_lgcy_groups_destroy(vport); + esw_acl_egress_table_destroy(vport); + +clean_drop_counter: + if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter)) { + mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter); + vport->egress.legacy.drop_counter = NULL; + } +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c new file mode 100644 index 000000000000..07b2acd7e6b3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */ + +#include "mlx5_core.h" +#include "eswitch.h" +#include "helper.h" +#include "ofld.h" + +static void esw_acl_egress_ofld_fwd2vport_destroy(struct mlx5_vport *vport) +{ + if (!vport->egress.offloads.fwd_rule) + return; + + mlx5_del_flow_rules(vport->egress.offloads.fwd_rule); + vport->egress.offloads.fwd_rule = NULL; +} + +static int esw_acl_egress_ofld_fwd2vport_create(struct mlx5_eswitch *esw, + struct mlx5_vport *vport, + struct mlx5_flow_destination *fwd_dest) +{ + struct mlx5_flow_act flow_act = {}; + int err = 0; + + esw_debug(esw->dev, "vport(%d) configure egress acl rule fwd2vport(%d)\n", + vport->vport, fwd_dest->vport.num); + + /* Delete the old egress forward-to-vport rule if any */ + esw_acl_egress_ofld_fwd2vport_destroy(vport); + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + + vport->egress.offloads.fwd_rule = + mlx5_add_flow_rules(vport->egress.acl, NULL, + &flow_act, fwd_dest, 1); + if (IS_ERR(vport->egress.offloads.fwd_rule)) { + err = PTR_ERR(vport->egress.offloads.fwd_rule); + esw_warn(esw->dev, + "vport(%d) failed to add fwd2vport acl rule err(%d)\n", + vport->vport, err); + vport->egress.offloads.fwd_rule = NULL; + } + + return err; +} + +static int esw_acl_egress_ofld_rules_create(struct mlx5_eswitch *esw, + struct mlx5_vport *vport, + struct mlx5_flow_destination *fwd_dest) +{ + int err = 0; + int action; + + if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) { + /* For prio tag mode, there is only 1 FTEs: + * 1) prio tag packets - pop the prio tag VLAN, allow + * Unmatched traffic is allowed by default + */ + esw_debug(esw->dev, + "vport[%d] configure prio tag egress rules\n", vport->vport); + + action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; + action |= fwd_dest ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST : + MLX5_FLOW_CONTEXT_ACTION_ALLOW; + + /* prio tag vlan rule - pop it so vport receives untagged packets */ + err = esw_egress_acl_vlan_create(esw, vport, fwd_dest, 0, action); + if (err) + goto prio_err; + } + + if (fwd_dest) { + err = esw_acl_egress_ofld_fwd2vport_create(esw, vport, fwd_dest); + if (err) + goto fwd_err; + } + + return 0; + +fwd_err: + esw_acl_egress_vlan_destroy(vport); +prio_err: + return err; +} + +static void esw_acl_egress_ofld_rules_destroy(struct mlx5_vport *vport) +{ + esw_acl_egress_vlan_destroy(vport); + esw_acl_egress_ofld_fwd2vport_destroy(vport); +} + +static int esw_acl_egress_ofld_groups_create(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_group *fwd_grp; + u32 *flow_group_in; + u32 flow_index = 0; + int ret = 0; + + if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) { + ret = esw_acl_egress_vlan_grp_create(esw, vport); + if (ret) + return ret; + + flow_index++; + } + + if (!mlx5_esw_acl_egress_fwd2vport_supported(esw)) + goto out; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) { + ret = -ENOMEM; + goto fwd_grp_err; + } + + /* This group holds 1 FTE to forward all packets to other vport + * when bond vports is supported. + */ + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index); + fwd_grp = mlx5_create_flow_group(vport->egress.acl, flow_group_in); + if (IS_ERR(fwd_grp)) { + ret = PTR_ERR(fwd_grp); + esw_warn(esw->dev, + "Failed to create vport[%d] egress fwd2vport flow group, err(%d)\n", + vport->vport, ret); + kvfree(flow_group_in); + goto fwd_grp_err; + } + vport->egress.offloads.fwd_grp = fwd_grp; + kvfree(flow_group_in); + return 0; + +fwd_grp_err: + esw_acl_egress_vlan_grp_destroy(vport); +out: + return ret; +} + +static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport) +{ + if (!IS_ERR_OR_NULL(vport->egress.offloads.fwd_grp)) { + mlx5_destroy_flow_group(vport->egress.offloads.fwd_grp); + vport->egress.offloads.fwd_grp = NULL; + } + esw_acl_egress_vlan_grp_destroy(vport); +} + +int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) +{ + int table_size = 0; + int err; + + if (!mlx5_esw_acl_egress_fwd2vport_supported(esw) && + !MLX5_CAP_GEN(esw->dev, prio_tag_required)) + return 0; + + esw_acl_egress_ofld_rules_destroy(vport); + + if (mlx5_esw_acl_egress_fwd2vport_supported(esw)) + table_size++; + if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) + table_size++; + vport->egress.acl = esw_acl_table_create(esw, vport->vport, + MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size); + if (IS_ERR_OR_NULL(vport->egress.acl)) { + err = PTR_ERR(vport->egress.acl); + vport->egress.acl = NULL; + return err; + } + + err = esw_acl_egress_ofld_groups_create(esw, vport); + if (err) + goto group_err; + + esw_debug(esw->dev, "vport[%d] configure egress rules\n", vport->vport); + + err = esw_acl_egress_ofld_rules_create(esw, vport, NULL); + if (err) + goto rules_err; + + return 0; + +rules_err: + esw_acl_egress_ofld_groups_destroy(vport); +group_err: + esw_acl_egress_table_destroy(vport); + return err; +} + +void esw_acl_egress_ofld_cleanup(struct mlx5_vport *vport) +{ + esw_acl_egress_ofld_rules_destroy(vport); + esw_acl_egress_ofld_groups_destroy(vport); + esw_acl_egress_table_destroy(vport); +} + +int mlx5_esw_acl_egress_vport_bond(struct mlx5_eswitch *esw, u16 active_vport_num, + u16 passive_vport_num) +{ + struct mlx5_vport *passive_vport = mlx5_eswitch_get_vport(esw, passive_vport_num); + struct mlx5_vport *active_vport = mlx5_eswitch_get_vport(esw, active_vport_num); + struct mlx5_flow_destination fwd_dest = {}; + + if (IS_ERR(active_vport)) + return PTR_ERR(active_vport); + if (IS_ERR(passive_vport)) + return PTR_ERR(passive_vport); + + /* Cleanup and recreate rules WITHOUT fwd2vport of active vport */ + esw_acl_egress_ofld_rules_destroy(active_vport); + esw_acl_egress_ofld_rules_create(esw, active_vport, NULL); + + /* Cleanup and recreate all rules + fwd2vport rule of passive vport to forward */ + esw_acl_egress_ofld_rules_destroy(passive_vport); + fwd_dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + fwd_dest.vport.num = active_vport_num; + fwd_dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id); + fwd_dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID; + + return esw_acl_egress_ofld_rules_create(esw, passive_vport, &fwd_dest); +} + +int mlx5_esw_acl_egress_vport_unbond(struct mlx5_eswitch *esw, u16 vport_num) +{ + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); + + if (IS_ERR(vport)) + return PTR_ERR(vport); + + esw_acl_egress_ofld_rules_destroy(vport); + return esw_acl_egress_ofld_rules_create(esw, vport, NULL); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c new file mode 100644 index 000000000000..22f4c1c28006 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */ + +#include "mlx5_core.h" +#include "eswitch.h" +#include "helper.h" + +struct mlx5_flow_table * +esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size) +{ + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_namespace *root_ns; + struct mlx5_flow_table *acl; + int acl_supported; + int vport_index; + int err; + + acl_supported = (ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS) ? + MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) : + MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support); + + if (!acl_supported) + return ERR_PTR(-EOPNOTSUPP); + + esw_debug(dev, "Create vport[%d] %s ACL table\n", vport_num, + ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS ? "ingress" : "egress"); + + vport_index = mlx5_eswitch_vport_num_to_index(esw, vport_num); + root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport_index); + if (!root_ns) { + esw_warn(dev, "Failed to get E-Switch root namespace for vport (%d)\n", + vport_num); + return ERR_PTR(-EOPNOTSUPP); + } + + acl = mlx5_create_vport_flow_table(root_ns, 0, size, 0, vport_num); + if (IS_ERR(acl)) { + err = PTR_ERR(acl); + esw_warn(dev, "vport[%d] create %s ACL table, err(%d)\n", vport_num, + ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS ? "ingress" : "egress", err); + } + return acl; +} + +int esw_egress_acl_vlan_create(struct mlx5_eswitch *esw, + struct mlx5_vport *vport, + struct mlx5_flow_destination *fwd_dest, + u16 vlan_id, u32 flow_action) +{ + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_spec *spec; + int err = 0; + + if (vport->egress.allowed_vlan) + return -EEXIST; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vlan_id); + + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + flow_act.action = flow_action; + vport->egress.allowed_vlan = + mlx5_add_flow_rules(vport->egress.acl, spec, + &flow_act, fwd_dest, 0); + if (IS_ERR(vport->egress.allowed_vlan)) { + err = PTR_ERR(vport->egress.allowed_vlan); + esw_warn(esw->dev, + "vport[%d] configure egress vlan rule failed, err(%d)\n", + vport->vport, err); + vport->egress.allowed_vlan = NULL; + } + + kvfree(spec); + return err; +} + +void esw_acl_egress_vlan_destroy(struct mlx5_vport *vport) +{ + if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) { + mlx5_del_flow_rules(vport->egress.allowed_vlan); + vport->egress.allowed_vlan = NULL; + } +} + +int esw_acl_egress_vlan_grp_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_group *vlan_grp; + void *match_criteria; + u32 *flow_group_in; + int ret = 0; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) + return -ENOMEM; + + MLX5_SET(create_flow_group_in, flow_group_in, + match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + match_criteria = MLX5_ADDR_OF(create_flow_group_in, + flow_group_in, match_criteria); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); + + vlan_grp = mlx5_create_flow_group(vport->egress.acl, flow_group_in); + if (IS_ERR(vlan_grp)) { + ret = PTR_ERR(vlan_grp); + esw_warn(esw->dev, + "Failed to create E-Switch vport[%d] egress pop vlans flow group, err(%d)\n", + vport->vport, ret); + goto out; + } + vport->egress.vlan_grp = vlan_grp; + +out: + kvfree(flow_group_in); + return ret; +} + +void esw_acl_egress_vlan_grp_destroy(struct mlx5_vport *vport) +{ + if (!IS_ERR_OR_NULL(vport->egress.vlan_grp)) { + mlx5_destroy_flow_group(vport->egress.vlan_grp); + vport->egress.vlan_grp = NULL; + } +} + +void esw_acl_egress_table_destroy(struct mlx5_vport *vport) +{ + if (IS_ERR_OR_NULL(vport->egress.acl)) + return; + + mlx5_destroy_flow_table(vport->egress.acl); + vport->egress.acl = NULL; +} + +void esw_acl_ingress_table_destroy(struct mlx5_vport *vport) +{ + if (!vport->ingress.acl) + return; + + mlx5_destroy_flow_table(vport->ingress.acl); + vport->ingress.acl = NULL; +} + +void esw_acl_ingress_allow_rule_destroy(struct mlx5_vport *vport) +{ + if (!vport->ingress.allow_rule) + return; + + mlx5_del_flow_rules(vport->ingress.allow_rule); + vport->ingress.allow_rule = NULL; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h new file mode 100644 index 000000000000..8dc4cab66a71 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */ + +#ifndef __MLX5_ESWITCH_ACL_HELPER_H__ +#define __MLX5_ESWITCH_ACL_HELPER_H__ + +#include "eswitch.h" + +/* General acl helper functions */ +struct mlx5_flow_table * +esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size); + +/* Egress acl helper functions */ +void esw_acl_egress_table_destroy(struct mlx5_vport *vport); +int esw_egress_acl_vlan_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + struct mlx5_flow_destination *fwd_dest, + u16 vlan_id, u32 flow_action); +void esw_acl_egress_vlan_destroy(struct mlx5_vport *vport); +int esw_acl_egress_vlan_grp_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport); +void esw_acl_egress_vlan_grp_destroy(struct mlx5_vport *vport); + +/* Ingress acl helper functions */ +void esw_acl_ingress_table_destroy(struct mlx5_vport *vport); +void esw_acl_ingress_allow_rule_destroy(struct mlx5_vport *vport); + +#endif /* __MLX5_ESWITCH_ACL_HELPER_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c new file mode 100644 index 000000000000..9bda4fe2eafa --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */ + +#include "mlx5_core.h" +#include "eswitch.h" +#include "helper.h" +#include "lgcy.h" + +static void esw_acl_ingress_lgcy_rules_destroy(struct mlx5_vport *vport) +{ + if (vport->ingress.legacy.drop_rule) { + mlx5_del_flow_rules(vport->ingress.legacy.drop_rule); + vport->ingress.legacy.drop_rule = NULL; + } + esw_acl_ingress_allow_rule_destroy(vport); +} + +static int esw_acl_ingress_lgcy_groups_create(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_group *g; + void *match_criteria; + u32 *flow_group_in; + int err; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) + return -ENOMEM; + + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); + + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); + + g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n", + vport->vport, err); + goto spoof_err; + } + vport->ingress.legacy.allow_untagged_spoofchk_grp = g; + + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); + + g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n", + vport->vport, err); + goto untagged_err; + } + vport->ingress.legacy.allow_untagged_only_grp = g; + + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2); + + g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n", + vport->vport, err); + goto allow_spoof_err; + } + vport->ingress.legacy.allow_spoofchk_only_grp = g; + + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3); + + g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n", + vport->vport, err); + goto drop_err; + } + vport->ingress.legacy.drop_grp = g; + kvfree(flow_group_in); + return 0; + +drop_err: + if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) { + mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp); + vport->ingress.legacy.allow_spoofchk_only_grp = NULL; + } +allow_spoof_err: + if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) { + mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp); + vport->ingress.legacy.allow_untagged_only_grp = NULL; + } +untagged_err: + if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) { + mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp); + vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL; + } +spoof_err: + kvfree(flow_group_in); + return err; +} + +static void esw_acl_ingress_lgcy_groups_destroy(struct mlx5_vport *vport) +{ + if (vport->ingress.legacy.allow_spoofchk_only_grp) { + mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp); + vport->ingress.legacy.allow_spoofchk_only_grp = NULL; + } + if (vport->ingress.legacy.allow_untagged_only_grp) { + mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp); + vport->ingress.legacy.allow_untagged_only_grp = NULL; + } + if (vport->ingress.legacy.allow_untagged_spoofchk_grp) { + mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp); + vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL; + } + if (vport->ingress.legacy.drop_grp) { + mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp); + vport->ingress.legacy.drop_grp = NULL; + } +} + +int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + struct mlx5_flow_destination drop_ctr_dst = {}; + struct mlx5_flow_destination *dst = NULL; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_spec *spec = NULL; + struct mlx5_fc *counter = NULL; + /* The ingress acl table contains 4 groups + * (2 active rules at the same time - + * 1 allow rule from one of the first 3 groups. + * 1 drop rule from the last group): + * 1)Allow untagged traffic with smac=original mac. + * 2)Allow untagged traffic. + * 3)Allow traffic with smac=original mac. + * 4)Drop all other traffic. + */ + int table_size = 4; + int dest_num = 0; + int err = 0; + u8 *smac_v; + + esw_acl_ingress_lgcy_rules_destroy(vport); + + if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) { + counter = mlx5_fc_create(esw->dev, false); + if (IS_ERR(counter)) + esw_warn(esw->dev, + "vport[%d] configure ingress drop rule counter failed\n", + vport->vport); + vport->ingress.legacy.drop_counter = counter; + } + + if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) { + esw_acl_ingress_lgcy_cleanup(esw, vport); + return 0; + } + + if (!vport->ingress.acl) { + vport->ingress.acl = esw_acl_table_create(esw, vport->vport, + MLX5_FLOW_NAMESPACE_ESW_INGRESS, + table_size); + if (IS_ERR_OR_NULL(vport->ingress.acl)) { + err = PTR_ERR(vport->ingress.acl); + vport->ingress.acl = NULL; + return err; + } + + err = esw_acl_ingress_lgcy_groups_create(esw, vport); + if (err) + goto out; + } + + esw_debug(esw->dev, + "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n", + vport->vport, vport->info.vlan, vport->info.qos); + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) { + err = -ENOMEM; + goto out; + } + + if (vport->info.vlan || vport->info.qos) + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.cvlan_tag); + + if (vport->info.spoofchk) { + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.smac_47_16); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.smac_15_0); + smac_v = MLX5_ADDR_OF(fte_match_param, + spec->match_value, + outer_headers.smac_47_16); + ether_addr_copy(smac_v, vport->info.mac); + } + + /* Create ingress allow rule */ + memset(spec, 0, sizeof(*spec)); + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; + vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec, + &flow_act, NULL, 0); + if (IS_ERR(vport->ingress.allow_rule)) { + err = PTR_ERR(vport->ingress.allow_rule); + esw_warn(esw->dev, + "vport[%d] configure ingress allow rule, err(%d)\n", + vport->vport, err); + vport->ingress.allow_rule = NULL; + goto out; + } + + memset(&flow_act, 0, sizeof(flow_act)); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; + /* Attach drop flow counter */ + if (counter) { + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; + drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + drop_ctr_dst.counter_id = mlx5_fc_id(counter); + dst = &drop_ctr_dst; + dest_num++; + } + vport->ingress.legacy.drop_rule = + mlx5_add_flow_rules(vport->ingress.acl, NULL, + &flow_act, dst, dest_num); + if (IS_ERR(vport->ingress.legacy.drop_rule)) { + err = PTR_ERR(vport->ingress.legacy.drop_rule); + esw_warn(esw->dev, + "vport[%d] configure ingress drop rule, err(%d)\n", + vport->vport, err); + vport->ingress.legacy.drop_rule = NULL; + goto out; + } + kvfree(spec); + return 0; + +out: + esw_acl_ingress_lgcy_cleanup(esw, vport); + kvfree(spec); + return err; +} + +void esw_acl_ingress_lgcy_cleanup(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + if (IS_ERR_OR_NULL(vport->ingress.acl)) + goto clean_drop_counter; + + esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport); + + esw_acl_ingress_lgcy_rules_destroy(vport); + esw_acl_ingress_lgcy_groups_destroy(vport); + esw_acl_ingress_table_destroy(vport); + +clean_drop_counter: + if (!IS_ERR_OR_NULL(vport->ingress.legacy.drop_counter)) { + mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter); + vport->ingress.legacy.drop_counter = NULL; + } +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c new file mode 100644 index 000000000000..4e55d7225a26 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c @@ -0,0 +1,322 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */ + +#include "mlx5_core.h" +#include "eswitch.h" +#include "helper.h" +#include "ofld.h" + +static bool +esw_acl_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw, + const struct mlx5_vport *vport) +{ + return (MLX5_CAP_GEN(esw->dev, prio_tag_required) && + mlx5_eswitch_is_vf_vport(esw, vport->vport)); +} + +static int esw_acl_ingress_prio_tag_create(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_spec *spec; + int err = 0; + + /* For prio tag mode, there is only 1 FTEs: + * 1) Untagged packets - push prio tag VLAN and modify metadata if + * required, allow + * Unmatched traffic is allowed by default + */ + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + /* Untagged packets - push prio tag VLAN, allow */ + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0); + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | + MLX5_FLOW_CONTEXT_ACTION_ALLOW; + flow_act.vlan[0].ethtype = ETH_P_8021Q; + flow_act.vlan[0].vid = 0; + flow_act.vlan[0].prio = 0; + + if (vport->ingress.offloads.modify_metadata_rule) { + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + flow_act.modify_hdr = vport->ingress.offloads.modify_metadata; + } + + vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec, + &flow_act, NULL, 0); + if (IS_ERR(vport->ingress.allow_rule)) { + err = PTR_ERR(vport->ingress.allow_rule); + esw_warn(esw->dev, + "vport[%d] configure ingress untagged allow rule, err(%d)\n", + vport->vport, err); + vport->ingress.allow_rule = NULL; + } + + kvfree(spec); + return err; +} + +static int esw_acl_ingress_mod_metadata_create(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; + struct mlx5_flow_act flow_act = {}; + int err = 0; + u32 key; + + key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport); + key >>= ESW_SOURCE_PORT_METADATA_OFFSET; + + MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); + MLX5_SET(set_action_in, action, field, + MLX5_ACTION_IN_FIELD_METADATA_REG_C_0); + MLX5_SET(set_action_in, action, data, key); + MLX5_SET(set_action_in, action, offset, + ESW_SOURCE_PORT_METADATA_OFFSET); + MLX5_SET(set_action_in, action, length, + ESW_SOURCE_PORT_METADATA_BITS); + + vport->ingress.offloads.modify_metadata = + mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS, + 1, action); + if (IS_ERR(vport->ingress.offloads.modify_metadata)) { + err = PTR_ERR(vport->ingress.offloads.modify_metadata); + esw_warn(esw->dev, + "failed to alloc modify header for vport %d ingress acl (%d)\n", + vport->vport, err); + return err; + } + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW; + flow_act.modify_hdr = vport->ingress.offloads.modify_metadata; + vport->ingress.offloads.modify_metadata_rule = + mlx5_add_flow_rules(vport->ingress.acl, + NULL, &flow_act, NULL, 0); + if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) { + err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule); + esw_warn(esw->dev, + "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n", + vport->vport, err); + mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata); + vport->ingress.offloads.modify_metadata_rule = NULL; + } + return err; +} + +static void esw_acl_ingress_mod_metadata_destroy(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + if (!vport->ingress.offloads.modify_metadata_rule) + return; + + mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule); + mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata); + vport->ingress.offloads.modify_metadata_rule = NULL; +} + +static int esw_acl_ingress_ofld_rules_create(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + int err; + + if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { + err = esw_acl_ingress_mod_metadata_create(esw, vport); + if (err) { + esw_warn(esw->dev, + "vport(%d) create ingress modify metadata, err(%d)\n", + vport->vport, err); + return err; + } + } + + if (esw_acl_ingress_prio_tag_enabled(esw, vport)) { + err = esw_acl_ingress_prio_tag_create(esw, vport); + if (err) { + esw_warn(esw->dev, + "vport(%d) create ingress prio tag rule, err(%d)\n", + vport->vport, err); + goto prio_tag_err; + } + } + + return 0; + +prio_tag_err: + esw_acl_ingress_mod_metadata_destroy(esw, vport); + return err; +} + +static void esw_acl_ingress_ofld_rules_destroy(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + esw_acl_ingress_allow_rule_destroy(vport); + esw_acl_ingress_mod_metadata_destroy(esw, vport); +} + +static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_group *g; + void *match_criteria; + u32 *flow_group_in; + u32 flow_index = 0; + int ret = 0; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) + return -ENOMEM; + + if (esw_acl_ingress_prio_tag_enabled(esw, vport)) { + /* This group is to hold FTE to match untagged packets when prio_tag + * is enabled. + */ + match_criteria = MLX5_ADDR_OF(create_flow_group_in, + flow_group_in, match_criteria); + MLX5_SET(create_flow_group_in, flow_group_in, + match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index); + + g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); + if (IS_ERR(g)) { + ret = PTR_ERR(g); + esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n", + vport->vport, ret); + goto prio_tag_err; + } + vport->ingress.offloads.metadata_prio_tag_grp = g; + flow_index++; + } + + if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { + /* This group holds an FTE with no match to add metadata for + * tagged packets if prio-tag is enabled, or for all untagged + * traffic in case prio-tag is disabled. + */ + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index); + + g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); + if (IS_ERR(g)) { + ret = PTR_ERR(g); + esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n", + vport->vport, ret); + goto metadata_err; + } + vport->ingress.offloads.metadata_allmatch_grp = g; + } + + kvfree(flow_group_in); + return 0; + +metadata_err: + if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) { + mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp); + vport->ingress.offloads.metadata_prio_tag_grp = NULL; + } +prio_tag_err: + kvfree(flow_group_in); + return ret; +} + +static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport) +{ + if (vport->ingress.offloads.metadata_allmatch_grp) { + mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp); + vport->ingress.offloads.metadata_allmatch_grp = NULL; + } + + if (vport->ingress.offloads.metadata_prio_tag_grp) { + mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp); + vport->ingress.offloads.metadata_prio_tag_grp = NULL; + } +} + +int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + int num_ftes = 0; + int err; + + if (!mlx5_eswitch_vport_match_metadata_enabled(esw) && + !esw_acl_ingress_prio_tag_enabled(esw, vport)) + return 0; + + esw_acl_ingress_allow_rule_destroy(vport); + + if (mlx5_eswitch_vport_match_metadata_enabled(esw)) + num_ftes++; + if (esw_acl_ingress_prio_tag_enabled(esw, vport)) + num_ftes++; + + vport->ingress.acl = esw_acl_table_create(esw, vport->vport, + MLX5_FLOW_NAMESPACE_ESW_INGRESS, + num_ftes); + if (IS_ERR_OR_NULL(vport->ingress.acl)) { + err = PTR_ERR(vport->ingress.acl); + vport->ingress.acl = NULL; + return err; + } + + err = esw_acl_ingress_ofld_groups_create(esw, vport); + if (err) + goto group_err; + + esw_debug(esw->dev, + "vport[%d] configure ingress rules\n", vport->vport); + + err = esw_acl_ingress_ofld_rules_create(esw, vport); + if (err) + goto rules_err; + + return 0; + +rules_err: + esw_acl_ingress_ofld_groups_destroy(vport); +group_err: + esw_acl_ingress_table_destroy(vport); + return err; +} + +void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + esw_acl_ingress_ofld_rules_destroy(esw, vport); + esw_acl_ingress_ofld_groups_destroy(vport); + esw_acl_ingress_table_destroy(vport); +} + +/* Caller must hold rtnl_lock */ +int mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch *esw, u16 vport_num, + u32 metadata) +{ + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); + int err; + + if (WARN_ON_ONCE(IS_ERR(vport))) { + esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num); + err = PTR_ERR(vport); + goto out; + } + + esw_acl_ingress_ofld_rules_destroy(esw, vport); + + vport->metadata = metadata ? metadata : vport->default_metadata; + + /* Recreate ingress acl rules with vport->metadata */ + err = esw_acl_ingress_ofld_rules_create(esw, vport); + if (err) + goto out; + + return 0; + +out: + vport->metadata = vport->default_metadata; + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/lgcy.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/lgcy.h new file mode 100644 index 000000000000..44c152da3d83 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/lgcy.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */ + +#ifndef __MLX5_ESWITCH_ACL_LGCY_H__ +#define __MLX5_ESWITCH_ACL_LGCY_H__ + +#include "eswitch.h" + +/* Eswitch acl egress external APIs */ +int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); +void esw_acl_egress_lgcy_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); + +/* Eswitch acl ingress external APIs */ +int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); +void esw_acl_ingress_lgcy_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); + +#endif /* __MLX5_ESWITCH_ACL_LGCY_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h new file mode 100644 index 000000000000..c57869b93d60 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */ + +#ifndef __MLX5_ESWITCH_ACL_OFLD_H__ +#define __MLX5_ESWITCH_ACL_OFLD_H__ + +#include "eswitch.h" + +/* Eswitch acl egress external APIs */ +int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); +void esw_acl_egress_ofld_cleanup(struct mlx5_vport *vport); +int mlx5_esw_acl_egress_vport_bond(struct mlx5_eswitch *esw, u16 active_vport_num, + u16 passive_vport_num); +int mlx5_esw_acl_egress_vport_unbond(struct mlx5_eswitch *esw, u16 vport_num); + +static inline bool mlx5_esw_acl_egress_fwd2vport_supported(struct mlx5_eswitch *esw) +{ + return esw && esw->mode == MLX5_ESWITCH_OFFLOADS && + mlx5_eswitch_vport_match_metadata_enabled(esw) && + MLX5_CAP_ESW_FLOWTABLE(esw->dev, egress_acl_forward_to_vport); +} + +/* Eswitch acl ingress external APIs */ +int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); +void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); +int mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch *esw, u16 vport_num, + u32 metadata); + +#endif /* __MLX5_ESWITCH_ACL_OFLD_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h index f8c4239846ea..7679ac359e31 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h @@ -6,6 +6,8 @@ #include "eswitch.h" +#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) + bool mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw); bool @@ -46,4 +48,21 @@ void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw); int mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag, u32 *chain); +#else /* CONFIG_MLX5_CLS_ACT */ + +static inline struct mlx5_flow_table * +mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio, + u32 level) { return ERR_PTR(-EOPNOTSUPP); } +static inline void +mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio, + u32 level) {} + +static inline struct mlx5_flow_table * +mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw) { return ERR_PTR(-EOPNOTSUPP); } + +static inline int mlx5_esw_chains_create(struct mlx5_eswitch *esw) { return 0; } +static inline void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw) {} + +#endif /* CONFIG_MLX5_CLS_ACT */ + #endif /* __ML5_ESW_CHAINS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index c5eb4e7754a9..1116ab9bea6c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -35,6 +35,7 @@ #include <linux/mlx5/mlx5_ifc.h> #include <linux/mlx5/vport.h> #include <linux/mlx5/fs.h> +#include "esw/acl/lgcy.h" #include "mlx5_core.h" #include "lib/eq.h" #include "eswitch.h" @@ -936,512 +937,6 @@ static void esw_vport_change_handler(struct work_struct *work) mutex_unlock(&esw->state_lock); } -int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) -{ - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - struct mlx5_flow_group *vlan_grp = NULL; - struct mlx5_flow_group *drop_grp = NULL; - struct mlx5_core_dev *dev = esw->dev; - struct mlx5_flow_namespace *root_ns; - struct mlx5_flow_table *acl; - void *match_criteria; - u32 *flow_group_in; - /* The egress acl table contains 2 rules: - * 1)Allow traffic with vlan_tag=vst_vlan_id - * 2)Drop all other traffic. - */ - int table_size = 2; - int err = 0; - - if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) - return -EOPNOTSUPP; - - if (!IS_ERR_OR_NULL(vport->egress.acl)) - return 0; - - esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n", - vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size)); - - root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS, - mlx5_eswitch_vport_num_to_index(esw, vport->vport)); - if (!root_ns) { - esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport); - return -EOPNOTSUPP; - } - - flow_group_in = kvzalloc(inlen, GFP_KERNEL); - if (!flow_group_in) - return -ENOMEM; - - acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); - if (IS_ERR(acl)) { - err = PTR_ERR(acl); - esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n", - vport->vport, err); - goto out; - } - - MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid); - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); - - vlan_grp = mlx5_create_flow_group(acl, flow_group_in); - if (IS_ERR(vlan_grp)) { - err = PTR_ERR(vlan_grp); - esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n", - vport->vport, err); - goto out; - } - - memset(flow_group_in, 0, inlen); - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); - drop_grp = mlx5_create_flow_group(acl, flow_group_in); - if (IS_ERR(drop_grp)) { - err = PTR_ERR(drop_grp); - esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n", - vport->vport, err); - goto out; - } - - vport->egress.acl = acl; - vport->egress.drop_grp = drop_grp; - vport->egress.allowed_vlans_grp = vlan_grp; -out: - kvfree(flow_group_in); - if (err && !IS_ERR_OR_NULL(vlan_grp)) - mlx5_destroy_flow_group(vlan_grp); - if (err && !IS_ERR_OR_NULL(acl)) - mlx5_destroy_flow_table(acl); - return err; -} - -void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) -{ - if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) { - mlx5_del_flow_rules(vport->egress.allowed_vlan); - vport->egress.allowed_vlan = NULL; - } - - if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_rule)) { - mlx5_del_flow_rules(vport->egress.legacy.drop_rule); - vport->egress.legacy.drop_rule = NULL; - } -} - -void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) -{ - if (IS_ERR_OR_NULL(vport->egress.acl)) - return; - - esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport); - - esw_vport_cleanup_egress_rules(esw, vport); - mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp); - mlx5_destroy_flow_group(vport->egress.drop_grp); - mlx5_destroy_flow_table(vport->egress.acl); - vport->egress.allowed_vlans_grp = NULL; - vport->egress.drop_grp = NULL; - vport->egress.acl = NULL; -} - -static int -esw_vport_create_legacy_ingress_acl_groups(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) -{ - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - struct mlx5_core_dev *dev = esw->dev; - struct mlx5_flow_group *g; - void *match_criteria; - u32 *flow_group_in; - int err; - - flow_group_in = kvzalloc(inlen, GFP_KERNEL); - if (!flow_group_in) - return -ENOMEM; - - match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); - - MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0); - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); - - g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); - if (IS_ERR(g)) { - err = PTR_ERR(g); - esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n", - vport->vport, err); - goto spoof_err; - } - vport->ingress.legacy.allow_untagged_spoofchk_grp = g; - - memset(flow_group_in, 0, inlen); - MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); - - g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); - if (IS_ERR(g)) { - err = PTR_ERR(g); - esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n", - vport->vport, err); - goto untagged_err; - } - vport->ingress.legacy.allow_untagged_only_grp = g; - - memset(flow_group_in, 0, inlen); - MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0); - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2); - - g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); - if (IS_ERR(g)) { - err = PTR_ERR(g); - esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n", - vport->vport, err); - goto allow_spoof_err; - } - vport->ingress.legacy.allow_spoofchk_only_grp = g; - - memset(flow_group_in, 0, inlen); - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3); - - g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); - if (IS_ERR(g)) { - err = PTR_ERR(g); - esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n", - vport->vport, err); - goto drop_err; - } - vport->ingress.legacy.drop_grp = g; - kvfree(flow_group_in); - return 0; - -drop_err: - if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) { - mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp); - vport->ingress.legacy.allow_spoofchk_only_grp = NULL; - } -allow_spoof_err: - if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) { - mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp); - vport->ingress.legacy.allow_untagged_only_grp = NULL; - } -untagged_err: - if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) { - mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp); - vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL; - } -spoof_err: - kvfree(flow_group_in); - return err; -} - -int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw, - struct mlx5_vport *vport, int table_size) -{ - struct mlx5_core_dev *dev = esw->dev; - struct mlx5_flow_namespace *root_ns; - struct mlx5_flow_table *acl; - int vport_index; - int err; - - if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) - return -EOPNOTSUPP; - - esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n", - vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size)); - - vport_index = mlx5_eswitch_vport_num_to_index(esw, vport->vport); - root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS, - vport_index); - if (!root_ns) { - esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", - vport->vport); - return -EOPNOTSUPP; - } - - acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); - if (IS_ERR(acl)) { - err = PTR_ERR(acl); - esw_warn(dev, "vport[%d] ingress create flow Table, err(%d)\n", - vport->vport, err); - return err; - } - vport->ingress.acl = acl; - return 0; -} - -void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport) -{ - if (!vport->ingress.acl) - return; - - mlx5_destroy_flow_table(vport->ingress.acl); - vport->ingress.acl = NULL; -} - -void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) -{ - if (vport->ingress.legacy.drop_rule) { - mlx5_del_flow_rules(vport->ingress.legacy.drop_rule); - vport->ingress.legacy.drop_rule = NULL; - } - - if (vport->ingress.allow_rule) { - mlx5_del_flow_rules(vport->ingress.allow_rule); - vport->ingress.allow_rule = NULL; - } -} - -static void esw_vport_disable_legacy_ingress_acl(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) -{ - if (!vport->ingress.acl) - return; - - esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport); - - esw_vport_cleanup_ingress_rules(esw, vport); - if (vport->ingress.legacy.allow_spoofchk_only_grp) { - mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp); - vport->ingress.legacy.allow_spoofchk_only_grp = NULL; - } - if (vport->ingress.legacy.allow_untagged_only_grp) { - mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp); - vport->ingress.legacy.allow_untagged_only_grp = NULL; - } - if (vport->ingress.legacy.allow_untagged_spoofchk_grp) { - mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp); - vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL; - } - if (vport->ingress.legacy.drop_grp) { - mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp); - vport->ingress.legacy.drop_grp = NULL; - } - esw_vport_destroy_ingress_acl_table(vport); -} - -static int esw_vport_ingress_config(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) -{ - struct mlx5_fc *counter = vport->ingress.legacy.drop_counter; - struct mlx5_flow_destination drop_ctr_dst = {0}; - struct mlx5_flow_destination *dst = NULL; - struct mlx5_flow_act flow_act = {0}; - struct mlx5_flow_spec *spec = NULL; - int dest_num = 0; - int err = 0; - u8 *smac_v; - - /* The ingress acl table contains 4 groups - * (2 active rules at the same time - - * 1 allow rule from one of the first 3 groups. - * 1 drop rule from the last group): - * 1)Allow untagged traffic with smac=original mac. - * 2)Allow untagged traffic. - * 3)Allow traffic with smac=original mac. - * 4)Drop all other traffic. - */ - int table_size = 4; - - esw_vport_cleanup_ingress_rules(esw, vport); - - if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) { - esw_vport_disable_legacy_ingress_acl(esw, vport); - return 0; - } - - if (!vport->ingress.acl) { - err = esw_vport_create_ingress_acl_table(esw, vport, table_size); - if (err) { - esw_warn(esw->dev, - "vport[%d] enable ingress acl err (%d)\n", - err, vport->vport); - return err; - } - - err = esw_vport_create_legacy_ingress_acl_groups(esw, vport); - if (err) - goto out; - } - - esw_debug(esw->dev, - "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n", - vport->vport, vport->info.vlan, vport->info.qos); - - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) { - err = -ENOMEM; - goto out; - } - - if (vport->info.vlan || vport->info.qos) - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); - - if (vport->info.spoofchk) { - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16); - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0); - smac_v = MLX5_ADDR_OF(fte_match_param, - spec->match_value, - outer_headers.smac_47_16); - ether_addr_copy(smac_v, vport->info.mac); - } - - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; - vport->ingress.allow_rule = - mlx5_add_flow_rules(vport->ingress.acl, spec, - &flow_act, NULL, 0); - if (IS_ERR(vport->ingress.allow_rule)) { - err = PTR_ERR(vport->ingress.allow_rule); - esw_warn(esw->dev, - "vport[%d] configure ingress allow rule, err(%d)\n", - vport->vport, err); - vport->ingress.allow_rule = NULL; - goto out; - } - - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; - - /* Attach drop flow counter */ - if (counter) { - flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; - drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - drop_ctr_dst.counter_id = mlx5_fc_id(counter); - dst = &drop_ctr_dst; - dest_num++; - } - vport->ingress.legacy.drop_rule = - mlx5_add_flow_rules(vport->ingress.acl, NULL, - &flow_act, dst, dest_num); - if (IS_ERR(vport->ingress.legacy.drop_rule)) { - err = PTR_ERR(vport->ingress.legacy.drop_rule); - esw_warn(esw->dev, - "vport[%d] configure ingress drop rule, err(%d)\n", - vport->vport, err); - vport->ingress.legacy.drop_rule = NULL; - goto out; - } - kvfree(spec); - return 0; - -out: - esw_vport_disable_legacy_ingress_acl(esw, vport); - kvfree(spec); - return err; -} - -int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw, - struct mlx5_vport *vport, - u16 vlan_id, u32 flow_action) -{ - struct mlx5_flow_act flow_act = {}; - struct mlx5_flow_spec *spec; - int err = 0; - - if (vport->egress.allowed_vlan) - return -EEXIST; - - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) - return -ENOMEM; - - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); - MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag); - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid); - MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vlan_id); - - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - flow_act.action = flow_action; - vport->egress.allowed_vlan = - mlx5_add_flow_rules(vport->egress.acl, spec, - &flow_act, NULL, 0); - if (IS_ERR(vport->egress.allowed_vlan)) { - err = PTR_ERR(vport->egress.allowed_vlan); - esw_warn(esw->dev, - "vport[%d] configure egress vlan rule failed, err(%d)\n", - vport->vport, err); - vport->egress.allowed_vlan = NULL; - } - - kvfree(spec); - return err; -} - -static int esw_vport_egress_config(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) -{ - struct mlx5_fc *counter = vport->egress.legacy.drop_counter; - struct mlx5_flow_destination drop_ctr_dst = {0}; - struct mlx5_flow_destination *dst = NULL; - struct mlx5_flow_act flow_act = {0}; - int dest_num = 0; - int err = 0; - - esw_vport_cleanup_egress_rules(esw, vport); - - if (!vport->info.vlan && !vport->info.qos) { - esw_vport_disable_egress_acl(esw, vport); - return 0; - } - - err = esw_vport_enable_egress_acl(esw, vport); - if (err) { - mlx5_core_warn(esw->dev, - "failed to enable egress acl (%d) on vport[%d]\n", - err, vport->vport); - return err; - } - - esw_debug(esw->dev, - "vport[%d] configure egress rules, vlan(%d) qos(%d)\n", - vport->vport, vport->info.vlan, vport->info.qos); - - /* Allowed vlan rule */ - err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, vport->info.vlan, - MLX5_FLOW_CONTEXT_ACTION_ALLOW); - if (err) - return err; - - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; - - /* Attach egress drop flow counter */ - if (counter) { - flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; - drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - drop_ctr_dst.counter_id = mlx5_fc_id(counter); - dst = &drop_ctr_dst; - dest_num++; - } - vport->egress.legacy.drop_rule = - mlx5_add_flow_rules(vport->egress.acl, NULL, - &flow_act, dst, dest_num); - if (IS_ERR(vport->egress.legacy.drop_rule)) { - err = PTR_ERR(vport->egress.legacy.drop_rule); - esw_warn(esw->dev, - "vport[%d] configure egress drop rule failed, err(%d)\n", - vport->vport, err); - vport->egress.legacy.drop_rule = NULL; - } - - return err; -} - static bool element_type_supported(struct mlx5_eswitch *esw, int type) { const struct mlx5_core_dev *dev = esw->dev; @@ -1653,44 +1148,19 @@ static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw, if (mlx5_esw_is_manager_vport(esw, vport->vport)) return 0; - if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) { - vport->ingress.legacy.drop_counter = mlx5_fc_create(esw->dev, false); - if (IS_ERR(vport->ingress.legacy.drop_counter)) { - esw_warn(esw->dev, - "vport[%d] configure ingress drop rule counter failed\n", - vport->vport); - vport->ingress.legacy.drop_counter = NULL; - } - } - - ret = esw_vport_ingress_config(esw, vport); + ret = esw_acl_ingress_lgcy_setup(esw, vport); if (ret) goto ingress_err; - if (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) { - vport->egress.legacy.drop_counter = mlx5_fc_create(esw->dev, false); - if (IS_ERR(vport->egress.legacy.drop_counter)) { - esw_warn(esw->dev, - "vport[%d] configure egress drop rule counter failed\n", - vport->vport); - vport->egress.legacy.drop_counter = NULL; - } - } - - ret = esw_vport_egress_config(esw, vport); + ret = esw_acl_egress_lgcy_setup(esw, vport); if (ret) goto egress_err; return 0; egress_err: - esw_vport_disable_legacy_ingress_acl(esw, vport); - mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter); - vport->egress.legacy.drop_counter = NULL; - + esw_acl_ingress_lgcy_cleanup(esw, vport); ingress_err: - mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter); - vport->ingress.legacy.drop_counter = NULL; return ret; } @@ -1710,13 +1180,8 @@ static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw, if (mlx5_esw_is_manager_vport(esw, vport->vport)) return; - esw_vport_disable_egress_acl(esw, vport); - mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter); - vport->egress.legacy.drop_counter = NULL; - - esw_vport_disable_legacy_ingress_acl(esw, vport); - mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter); - vport->ingress.legacy.drop_counter = NULL; + esw_acl_egress_lgcy_cleanup(esw, vport); + esw_acl_ingress_lgcy_cleanup(esw, vport); } static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw, @@ -2262,7 +1727,10 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) hash_init(esw->offloads.encap_tbl); mutex_init(&esw->offloads.mod_hdr.lock); hash_init(esw->offloads.mod_hdr.hlist); + mutex_init(&esw->offloads.decap_tbl_lock); + hash_init(esw->offloads.decap_tbl); atomic64_set(&esw->offloads.num_flows, 0); + ida_init(&esw->offloads.vport_metadata_ida); mutex_init(&esw->state_lock); mutex_init(&esw->mode_lock); @@ -2301,8 +1769,10 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) esw_offloads_cleanup_reps(esw); mutex_destroy(&esw->mode_lock); mutex_destroy(&esw->state_lock); + ida_destroy(&esw->offloads.vport_metadata_ida); mutex_destroy(&esw->offloads.mod_hdr.lock); mutex_destroy(&esw->offloads.encap_tbl_lock); + mutex_destroy(&esw->offloads.decap_tbl_lock); kfree(esw->vports); kfree(esw); } @@ -2345,7 +1815,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, ether_addr_copy(evport->info.mac, mac); evport->info.node_guid = node_guid; if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) - err = esw_vport_ingress_config(esw, evport); + err = esw_acl_ingress_lgcy_setup(esw, evport); unlock: mutex_unlock(&esw->state_lock); @@ -2427,10 +1897,10 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, evport->info.vlan = vlan; evport->info.qos = qos; if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) { - err = esw_vport_ingress_config(esw, evport); + err = esw_acl_ingress_lgcy_setup(esw, evport); if (err) return err; - err = esw_vport_egress_config(esw, evport); + err = esw_acl_egress_lgcy_setup(esw, evport); } return err; @@ -2472,7 +1942,7 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, "Spoofchk in set while MAC is invalid, vport(%d)\n", evport->vport); if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) - err = esw_vport_ingress_config(esw, evport); + err = esw_acl_ingress_lgcy_setup(esw, evport); if (err) evport->info.spoofchk = pschk; mutex_unlock(&esw->state_lock); @@ -2731,7 +2201,7 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev, if (!vport->enabled) goto unlock; - if (vport->egress.legacy.drop_counter) + if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter)) mlx5_fc_query(dev, vport->egress.legacy.drop_counter, &stats->rx_dropped, &bytes); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 4a1c6c78bb14..a5175e98c0b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -99,13 +99,19 @@ struct vport_ingress { struct vport_egress { struct mlx5_flow_table *acl; - struct mlx5_flow_group *allowed_vlans_grp; - struct mlx5_flow_group *drop_grp; struct mlx5_flow_handle *allowed_vlan; - struct { - struct mlx5_flow_handle *drop_rule; - struct mlx5_fc *drop_counter; - } legacy; + struct mlx5_flow_group *vlan_grp; + union { + struct { + struct mlx5_flow_group *drop_grp; + struct mlx5_flow_handle *drop_rule; + struct mlx5_fc *drop_counter; + } legacy; + struct { + struct mlx5_flow_group *fwd_grp; + struct mlx5_flow_handle *fwd_rule; + } offloads; + }; }; struct mlx5_vport_drop_stats { @@ -143,6 +149,8 @@ struct mlx5_vport { struct vport_ingress ingress; struct vport_egress egress; + u32 default_metadata; + u32 metadata; struct mlx5_vport_info info; @@ -209,6 +217,8 @@ struct mlx5_esw_offload { struct mutex peer_mutex; struct mutex encap_tbl_lock; /* protects encap_tbl */ DECLARE_HASHTABLE(encap_tbl, 8); + struct mutex decap_tbl_lock; /* protects decap_tbl */ + DECLARE_HASHTABLE(decap_tbl, 8); struct mod_hdr_tbl mod_hdr; DECLARE_HASHTABLE(termtbl_tbl, 8); struct mutex termtbl_mutex; /* protects termtbl hash */ @@ -216,6 +226,7 @@ struct mlx5_esw_offload { u8 inline_mode; atomic64_t num_flows; enum devlink_eswitch_encap_mode encap; + struct ida vport_metadata_ida; }; /* E-Switch MC FDB table hash node */ @@ -283,18 +294,10 @@ void esw_offloads_disable(struct mlx5_eswitch *esw); int esw_offloads_enable(struct mlx5_eswitch *esw); void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw); int esw_offloads_init_reps(struct mlx5_eswitch *esw); -void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, - struct mlx5_vport *vport); -int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw, - struct mlx5_vport *vport, - int table_size); -void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport); -void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, - struct mlx5_vport *vport); -int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, - struct mlx5_vport *vport); -void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, - struct mlx5_vport *vport); + +u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw); +void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata); + int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps); @@ -432,6 +435,7 @@ struct mlx5_esw_flow_attr { struct mlx5_flow_table *fdb; struct mlx5_flow_table *dest_ft; struct mlx5_ct_attr ct_attr; + struct mlx5_pkt_reformat *decap_pkt_reformat; struct mlx5e_tc_flow_parse_attr *parse_attr; }; @@ -455,10 +459,6 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, u16 vport, u16 vlan, u8 qos, u8 set_flags); -int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw, - struct mlx5_vport *vport, - u16 vlan_id, u32 flow_action); - static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch *esw) { return esw->qos.enabled; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 57ac2ef52e80..060354bb211a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -31,12 +31,14 @@ */ #include <linux/etherdevice.h> +#include <linux/idr.h> #include <linux/mlx5/driver.h> #include <linux/mlx5/mlx5_ifc.h> #include <linux/mlx5/vport.h> #include <linux/mlx5/fs.h> #include "mlx5_core.h" #include "eswitch.h" +#include "esw/acl/ofld.h" #include "esw/chains.h" #include "rdma.h" #include "en.h" @@ -234,13 +236,6 @@ static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, return &esw->offloads.vport_reps[idx]; } -static bool -esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw, - const struct mlx5_vport *vport) -{ - return (MLX5_CAP_GEN(esw->dev, prio_tag_required) && - mlx5_eswitch_is_vf_vport(esw, vport->vport)); -} static void mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, @@ -366,6 +361,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, } } } + + if (attr->decap_pkt_reformat) + flow_act.pkt_reformat = attr->decap_pkt_reformat; + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; dest[i].counter_id = mlx5_fc_id(attr->counter); @@ -1727,7 +1726,9 @@ static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw, static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw) { +#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) mlx5e_tc_clean_fdb_peer_flows(esw); +#endif esw_del_fdb_peer_miss_rules(esw); } @@ -1845,279 +1846,6 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS); } -static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) -{ - struct mlx5_flow_act flow_act = {0}; - struct mlx5_flow_spec *spec; - int err = 0; - - /* For prio tag mode, there is only 1 FTEs: - * 1) Untagged packets - push prio tag VLAN and modify metadata if - * required, allow - * Unmatched traffic is allowed by default - */ - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) - return -ENOMEM; - - /* Untagged packets - push prio tag VLAN, allow */ - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); - MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0); - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | - MLX5_FLOW_CONTEXT_ACTION_ALLOW; - flow_act.vlan[0].ethtype = ETH_P_8021Q; - flow_act.vlan[0].vid = 0; - flow_act.vlan[0].prio = 0; - - if (vport->ingress.offloads.modify_metadata_rule) { - flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - flow_act.modify_hdr = vport->ingress.offloads.modify_metadata; - } - - vport->ingress.allow_rule = - mlx5_add_flow_rules(vport->ingress.acl, spec, - &flow_act, NULL, 0); - if (IS_ERR(vport->ingress.allow_rule)) { - err = PTR_ERR(vport->ingress.allow_rule); - esw_warn(esw->dev, - "vport[%d] configure ingress untagged allow rule, err(%d)\n", - vport->vport, err); - vport->ingress.allow_rule = NULL; - } - - kvfree(spec); - return err; -} - -static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) -{ - u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; - struct mlx5_flow_act flow_act = {}; - int err = 0; - u32 key; - - key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport); - key >>= ESW_SOURCE_PORT_METADATA_OFFSET; - - MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); - MLX5_SET(set_action_in, action, field, - MLX5_ACTION_IN_FIELD_METADATA_REG_C_0); - MLX5_SET(set_action_in, action, data, key); - MLX5_SET(set_action_in, action, offset, - ESW_SOURCE_PORT_METADATA_OFFSET); - MLX5_SET(set_action_in, action, length, - ESW_SOURCE_PORT_METADATA_BITS); - - vport->ingress.offloads.modify_metadata = - mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS, - 1, action); - if (IS_ERR(vport->ingress.offloads.modify_metadata)) { - err = PTR_ERR(vport->ingress.offloads.modify_metadata); - esw_warn(esw->dev, - "failed to alloc modify header for vport %d ingress acl (%d)\n", - vport->vport, err); - return err; - } - - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW; - flow_act.modify_hdr = vport->ingress.offloads.modify_metadata; - vport->ingress.offloads.modify_metadata_rule = - mlx5_add_flow_rules(vport->ingress.acl, - NULL, &flow_act, NULL, 0); - if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) { - err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule); - esw_warn(esw->dev, - "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n", - vport->vport, err); - mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata); - vport->ingress.offloads.modify_metadata_rule = NULL; - } - return err; -} - -static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) -{ - if (vport->ingress.offloads.modify_metadata_rule) { - mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule); - mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata); - - vport->ingress.offloads.modify_metadata_rule = NULL; - } -} - -static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) -{ - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - struct mlx5_flow_group *g; - void *match_criteria; - u32 *flow_group_in; - u32 flow_index = 0; - int ret = 0; - - flow_group_in = kvzalloc(inlen, GFP_KERNEL); - if (!flow_group_in) - return -ENOMEM; - - if (esw_check_ingress_prio_tag_enabled(esw, vport)) { - /* This group is to hold FTE to match untagged packets when prio_tag - * is enabled. - */ - memset(flow_group_in, 0, inlen); - - match_criteria = MLX5_ADDR_OF(create_flow_group_in, - flow_group_in, match_criteria); - MLX5_SET(create_flow_group_in, flow_group_in, - match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index); - - g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); - if (IS_ERR(g)) { - ret = PTR_ERR(g); - esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n", - vport->vport, ret); - goto prio_tag_err; - } - vport->ingress.offloads.metadata_prio_tag_grp = g; - flow_index++; - } - - if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { - /* This group holds an FTE with no matches for add metadata for - * tagged packets, if prio-tag is enabled (as a fallthrough), - * or all traffic in case prio-tag is disabled. - */ - memset(flow_group_in, 0, inlen); - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index); - - g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); - if (IS_ERR(g)) { - ret = PTR_ERR(g); - esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n", - vport->vport, ret); - goto metadata_err; - } - vport->ingress.offloads.metadata_allmatch_grp = g; - } - - kvfree(flow_group_in); - return 0; - -metadata_err: - if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) { - mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp); - vport->ingress.offloads.metadata_prio_tag_grp = NULL; - } -prio_tag_err: - kvfree(flow_group_in); - return ret; -} - -static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport) -{ - if (vport->ingress.offloads.metadata_allmatch_grp) { - mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp); - vport->ingress.offloads.metadata_allmatch_grp = NULL; - } - - if (vport->ingress.offloads.metadata_prio_tag_grp) { - mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp); - vport->ingress.offloads.metadata_prio_tag_grp = NULL; - } -} - -static int esw_vport_ingress_config(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) -{ - int num_ftes = 0; - int err; - - if (!mlx5_eswitch_vport_match_metadata_enabled(esw) && - !esw_check_ingress_prio_tag_enabled(esw, vport)) - return 0; - - esw_vport_cleanup_ingress_rules(esw, vport); - - if (mlx5_eswitch_vport_match_metadata_enabled(esw)) - num_ftes++; - if (esw_check_ingress_prio_tag_enabled(esw, vport)) - num_ftes++; - - err = esw_vport_create_ingress_acl_table(esw, vport, num_ftes); - if (err) { - esw_warn(esw->dev, - "failed to enable ingress acl (%d) on vport[%d]\n", - err, vport->vport); - return err; - } - - err = esw_vport_create_ingress_acl_group(esw, vport); - if (err) - goto group_err; - - esw_debug(esw->dev, - "vport[%d] configure ingress rules\n", vport->vport); - - if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { - err = esw_vport_add_ingress_acl_modify_metadata(esw, vport); - if (err) - goto metadata_err; - } - - if (esw_check_ingress_prio_tag_enabled(esw, vport)) { - err = esw_vport_ingress_prio_tag_config(esw, vport); - if (err) - goto prio_tag_err; - } - return 0; - -prio_tag_err: - esw_vport_del_ingress_acl_modify_metadata(esw, vport); -metadata_err: - esw_vport_destroy_ingress_acl_group(vport); -group_err: - esw_vport_destroy_ingress_acl_table(vport); - return err; -} - -static int esw_vport_egress_config(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) -{ - int err; - - if (!MLX5_CAP_GEN(esw->dev, prio_tag_required)) - return 0; - - esw_vport_cleanup_egress_rules(esw, vport); - - err = esw_vport_enable_egress_acl(esw, vport); - if (err) - return err; - - /* For prio tag mode, there is only 1 FTEs: - * 1) prio tag packets - pop the prio tag VLAN, allow - * Unmatched traffic is allowed by default - */ - esw_debug(esw->dev, - "vport[%d] configure prio tag egress rules\n", vport->vport); - - /* prio tag vlan rule - pop it so VF receives untagged packets */ - err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, 0, - MLX5_FLOW_CONTEXT_ACTION_VLAN_POP | - MLX5_FLOW_CONTEXT_ACTION_ALLOW); - if (err) - esw_vport_disable_egress_acl(esw, vport); - - return err; -} - static bool esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw) { @@ -2150,25 +1878,83 @@ static bool esw_use_vport_metadata(const struct mlx5_eswitch *esw) esw_check_vport_match_metadata_supported(esw); } +u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw) +{ + u32 num_vports = GENMASK(ESW_VPORT_BITS - 1, 0) - 1; + u32 vhca_id_mask = GENMASK(ESW_VHCA_ID_BITS - 1, 0); + u32 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id); + u32 start; + u32 end; + int id; + + /* Make sure the vhca_id fits the ESW_VHCA_ID_BITS */ + WARN_ON_ONCE(vhca_id >= BIT(ESW_VHCA_ID_BITS)); + + /* Trim vhca_id to ESW_VHCA_ID_BITS */ + vhca_id &= vhca_id_mask; + + start = (vhca_id << ESW_VPORT_BITS); + end = start + num_vports; + if (!vhca_id) + start += 1; /* zero is reserved/invalid metadata */ + id = ida_alloc_range(&esw->offloads.vport_metadata_ida, start, end, GFP_KERNEL); + + return (id < 0) ? 0 : id; +} + +void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata) +{ + ida_free(&esw->offloads.vport_metadata_ida, metadata); +} + +static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + if (vport->vport == MLX5_VPORT_UPLINK) + return 0; + + vport->default_metadata = mlx5_esw_match_metadata_alloc(esw); + vport->metadata = vport->default_metadata; + return vport->metadata ? 0 : -ENOSPC; +} + +static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + if (vport->vport == MLX5_VPORT_UPLINK || !vport->default_metadata) + return; + + WARN_ON(vport->metadata != vport->default_metadata); + mlx5_esw_match_metadata_free(esw, vport->default_metadata); +} + int esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { int err; - err = esw_vport_ingress_config(esw, vport); + err = esw_offloads_vport_metadata_setup(esw, vport); if (err) - return err; + goto metadata_err; + + err = esw_acl_ingress_ofld_setup(esw, vport); + if (err) + goto ingress_err; if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) { - err = esw_vport_egress_config(esw, vport); - if (err) { - esw_vport_cleanup_ingress_rules(esw, vport); - esw_vport_del_ingress_acl_modify_metadata(esw, vport); - esw_vport_destroy_ingress_acl_group(vport); - esw_vport_destroy_ingress_acl_table(vport); - } + err = esw_acl_egress_ofld_setup(esw, vport); + if (err) + goto egress_err; } + + return 0; + +egress_err: + esw_acl_ingress_ofld_cleanup(esw, vport); +ingress_err: + esw_offloads_vport_metadata_cleanup(esw, vport); +metadata_err: return err; } @@ -2176,11 +1962,9 @@ void esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { - esw_vport_disable_egress_acl(esw, vport); - esw_vport_cleanup_ingress_rules(esw, vport); - esw_vport_del_ingress_acl_modify_metadata(esw, vport); - esw_vport_destroy_ingress_acl_group(vport); - esw_vport_destroy_ingress_acl_table(vport); + esw_acl_egress_ofld_cleanup(vport); + esw_acl_ingress_ofld_cleanup(esw, vport); + esw_offloads_vport_metadata_cleanup(esw, vport); } static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) @@ -2846,38 +2630,11 @@ EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled); u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, u16 vport_num) { - u32 vport_num_mask = GENMASK(ESW_VPORT_BITS - 1, 0); - u32 vhca_id_mask = GENMASK(ESW_VHCA_ID_BITS - 1, 0); - u32 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id); - u32 val; + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); - /* Make sure the vhca_id fits the ESW_VHCA_ID_BITS */ - WARN_ON_ONCE(vhca_id >= BIT(ESW_VHCA_ID_BITS)); - - /* Trim vhca_id to ESW_VHCA_ID_BITS */ - vhca_id &= vhca_id_mask; - - /* Make sure pf and ecpf map to end of ESW_VPORT_BITS range so they - * don't overlap with VF numbers, and themselves, after trimming. - */ - WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) < - vport_num_mask - 1); - WARN_ON_ONCE((MLX5_VPORT_ECPF & vport_num_mask) < - vport_num_mask - 1); - WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) == - (MLX5_VPORT_ECPF & vport_num_mask)); - - /* Make sure that the VF vport_num fits ESW_VPORT_BITS and don't - * overlap with pf and ecpf. - */ - if (vport_num != MLX5_VPORT_UPLINK && - vport_num != MLX5_VPORT_ECPF) - WARN_ON_ONCE(vport_num >= vport_num_mask - 1); - - /* We can now trim vport_num to ESW_VPORT_BITS */ - vport_num &= vport_num_mask; + if (WARN_ON_ONCE(IS_ERR(vport))) + return 0; - val = (vhca_id << ESW_VPORT_BITS) | vport_num; - return val << (32 - ESW_SOURCE_PORT_METADATA_BITS); + return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS); } EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c index 8bcf3426b9c6..3ce17c3d7a00 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/events.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c @@ -346,8 +346,10 @@ int mlx5_events_init(struct mlx5_core_dev *dev) events->dev = dev; dev->priv.events = events; events->wq = create_singlethread_workqueue("mlx5_events"); - if (!events->wq) + if (!events->wq) { + kfree(events); return -ENOMEM; + } INIT_WORK(&events->pcie_core_work, mlx5_pcie_event); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c index 22a2ef111514..29b7339ebfa3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c @@ -194,8 +194,8 @@ static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd) MLX5_GET(tls_flow, flow, direction_sx)); } -int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, - u64 rcd_sn) +int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, + u32 seq, __be64 rcd_sn) { struct mlx5_fpga_dma_buf *buf; int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h index 3b2e37bf76fe..5714cf391d1b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h @@ -68,7 +68,7 @@ static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev) return mdev->fpga->tls->caps; } -int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, - u64 rcd_sn); +int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, + u32 seq, __be64 rcd_sn); #endif /* __MLX5_FPGA_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 1a8e826ac86b..465a1076a477 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -781,6 +781,10 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns, max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions); table_type = FS_FT_ESW_INGRESS_ACL; break; + case MLX5_FLOW_NAMESPACE_RDMA_TX: + max_actions = MLX5_CAP_FLOWTABLE_RDMA_TX(dev, max_modify_header_actions); + table_type = FS_FT_RDMA_TX; + break; default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 52af6023a4b4..13e2fb79c21a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -254,7 +254,7 @@ static void del_sw_flow_group(struct fs_node *node); static void del_sw_fte(struct fs_node *node); static void del_sw_prio(struct fs_node *node); static void del_sw_ns(struct fs_node *node); -/* Delete rule (destination) is special case that +/* Delete rule (destination) is special case that * requires to lock the FTE for all the deletion process. */ static void del_sw_hw_rule(struct fs_node *node); @@ -344,17 +344,12 @@ static void tree_put_node(struct fs_node *node, bool locked) if (node->del_hw_func) node->del_hw_func(node); if (parent_node) { - /* Only root namespace doesn't have parent and we just - * need to free its node. - */ down_write_ref_node(parent_node, locked); list_del_init(&node->list); - if (node->del_sw_func) - node->del_sw_func(node); - up_write_ref_node(parent_node, locked); - } else { - kfree(node); } + node->del_sw_func(node); + if (parent_node) + up_write_ref_node(parent_node, locked); node = NULL; } if (!node && parent_node) @@ -384,6 +379,12 @@ static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns, return NULL; } +static bool is_fwd_next_action(u32 action) +{ + return action & (MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO | + MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS); +} + static bool check_valid_spec(const struct mlx5_flow_spec *spec) { int i; @@ -468,8 +469,10 @@ static void del_sw_flow_table(struct fs_node *node) fs_get_obj(ft, node); rhltable_destroy(&ft->fgs_hash); - fs_get_obj(prio, ft->node.parent); - prio->num_ft--; + if (ft->node.parent) { + fs_get_obj(prio, ft->node.parent); + prio->num_ft--; + } kfree(ft); } @@ -502,7 +505,7 @@ static void del_sw_hw_rule(struct fs_node *node) fs_get_obj(rule, node); fs_get_obj(fte, rule->node.parent); trace_mlx5_fs_del_rule(rule); - if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { + if (is_fwd_next_action(rule->sw_action)) { mutex_lock(&rule->dest_attr.ft->lock); list_del(&rule->next_ft); mutex_unlock(&rule->dest_attr.ft->lock); @@ -826,6 +829,36 @@ static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio) return find_closest_ft(prio, true); } +static struct fs_prio *find_fwd_ns_prio(struct mlx5_flow_root_namespace *root, + struct mlx5_flow_namespace *ns) +{ + struct mlx5_flow_namespace *root_ns = &root->ns; + struct fs_prio *iter_prio; + struct fs_prio *prio; + + fs_get_obj(prio, ns->node.parent); + list_for_each_entry(iter_prio, &root_ns->node.children, node.list) { + if (iter_prio == prio && + !list_is_last(&prio->node.children, &iter_prio->node.list)) + return list_next_entry(iter_prio, node.list); + } + return NULL; +} + +static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft, + struct mlx5_flow_act *flow_act) +{ + struct mlx5_flow_root_namespace *root = find_root(&ft->node); + struct fs_prio *prio; + + if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) + prio = find_fwd_ns_prio(root, ft->ns); + else + fs_get_obj(prio, ft->node.parent); + + return (prio) ? find_next_chained_ft(prio) : NULL; +} + static int connect_fts_in_prio(struct mlx5_core_dev *dev, struct fs_prio *prio, struct mlx5_flow_table *ft) @@ -976,6 +1009,10 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev, list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules); mutex_unlock(&old_next_ft->lock); list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) { + if ((iter->sw_action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) && + iter->ft->ns == new_next_ft->ns) + continue; + err = _mlx5_modify_rule_destination(iter, &dest); if (err) pr_err("mlx5_core: failed to modify rule to point on flow table %d\n", @@ -1077,6 +1114,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa next_ft = unmanaged ? ft_attr->next_ft : find_next_chained_ft(fs_prio); ft->def_miss_action = ns->def_miss_action; + ft->ns = ns; err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft); if (err) goto free_ft; @@ -1901,48 +1939,59 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft, { struct mlx5_flow_root_namespace *root = find_root(&ft->node); static const struct mlx5_flow_spec zero_spec = {}; - struct mlx5_flow_destination gen_dest = {}; + struct mlx5_flow_destination *gen_dest = NULL; struct mlx5_flow_table *next_ft = NULL; struct mlx5_flow_handle *handle = NULL; u32 sw_action = flow_act->action; - struct fs_prio *prio; + int i; if (!spec) spec = &zero_spec; - fs_get_obj(prio, ft->node.parent); - if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { - if (!fwd_next_prio_supported(ft)) - return ERR_PTR(-EOPNOTSUPP); - if (num_dest) - return ERR_PTR(-EINVAL); - mutex_lock(&root->chain_lock); - next_ft = find_next_chained_ft(prio); - if (next_ft) { - gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - gen_dest.ft = next_ft; - dest = &gen_dest; - num_dest = 1; - flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - } else { - mutex_unlock(&root->chain_lock); - return ERR_PTR(-EOPNOTSUPP); - } - } + if (!is_fwd_next_action(sw_action)) + return _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest); - handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest); + if (!fwd_next_prio_supported(ft)) + return ERR_PTR(-EOPNOTSUPP); - if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { - if (!IS_ERR_OR_NULL(handle) && - (list_empty(&handle->rule[0]->next_ft))) { - mutex_lock(&next_ft->lock); - list_add(&handle->rule[0]->next_ft, - &next_ft->fwd_rules); - mutex_unlock(&next_ft->lock); - handle->rule[0]->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; - } - mutex_unlock(&root->chain_lock); - } + mutex_lock(&root->chain_lock); + next_ft = find_next_fwd_ft(ft, flow_act); + if (!next_ft) { + handle = ERR_PTR(-EOPNOTSUPP); + goto unlock; + } + + gen_dest = kcalloc(num_dest + 1, sizeof(*dest), + GFP_KERNEL); + if (!gen_dest) { + handle = ERR_PTR(-ENOMEM); + goto unlock; + } + for (i = 0; i < num_dest; i++) + gen_dest[i] = dest[i]; + gen_dest[i].type = + MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + gen_dest[i].ft = next_ft; + dest = gen_dest; + num_dest++; + flow_act->action &= ~(MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO | + MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS); + flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest); + if (IS_ERR(handle)) + goto unlock; + + if (list_empty(&handle->rule[num_dest - 1]->next_ft)) { + mutex_lock(&next_ft->lock); + list_add(&handle->rule[num_dest - 1]->next_ft, + &next_ft->fwd_rules); + mutex_unlock(&next_ft->lock); + handle->rule[num_dest - 1]->sw_action = sw_action; + handle->rule[num_dest - 1]->ft = ft; + } +unlock: + mutex_unlock(&root->chain_lock); + kfree(gen_dest); return handle; } EXPORT_SYMBOL(mlx5_add_flow_rules); @@ -2353,6 +2402,17 @@ static int init_root_tree(struct mlx5_flow_steering *steering, return 0; } +static void del_sw_root_ns(struct fs_node *node) +{ + struct mlx5_flow_root_namespace *root_ns; + struct mlx5_flow_namespace *ns; + + fs_get_obj(ns, node); + root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns); + mutex_destroy(&root_ns->chain_lock); + kfree(node); +} + static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering *steering, enum fs_flow_table_type table_type) @@ -2379,7 +2439,7 @@ static struct mlx5_flow_root_namespace ns = &root_ns->ns; fs_init_namespace(ns); mutex_init(&root_ns->chain_lock); - tree_init_node(&ns->node, NULL, NULL); + tree_init_node(&ns->node, NULL, del_sw_root_ns); tree_add_node(&ns->node, NULL); return root_ns; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 508108c58dae..825b662f809b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -138,6 +138,7 @@ struct fs_node { struct mlx5_flow_rule { struct fs_node node; + struct mlx5_flow_table *ft; struct mlx5_flow_destination dest_attr; /* next_ft should be accessed under chain_lock and only of * destination type is FWD_NEXT_fT. @@ -175,6 +176,7 @@ struct mlx5_flow_table { u32 flags; struct rhltable fgs_hash; enum mlx5_flow_table_miss_action def_miss_action; + struct mlx5_flow_namespace *ns; }; struct mlx5_ft_underlay_qp { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 7db70b6ccc07..690b822c6152 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -407,7 +407,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) err_destroy_direct_tirs: mlx5e_destroy_direct_tirs(priv, priv->direct_tir); err_destroy_indirect_tirs: - mlx5e_destroy_indirect_tirs(priv, true); + mlx5e_destroy_indirect_tirs(priv); err_destroy_direct_rqts: mlx5e_destroy_direct_rqts(priv, priv->direct_tir); err_destroy_indirect_rqts: @@ -423,7 +423,7 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) { mlx5i_destroy_flow_steering(priv); mlx5e_destroy_direct_tirs(priv, priv->direct_tir); - mlx5e_destroy_indirect_tirs(priv, true); + mlx5e_destroy_indirect_tirs(priv); mlx5e_destroy_direct_rqts(priv, priv->direct_tir); mlx5e_destroy_rqt(priv, &priv->indir_rqt); mlx5e_close_drop_rq(&priv->drop_rq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c index 8809a65ecefb..e042e0924079 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c @@ -144,11 +144,11 @@ static int mlx5_set_entropy(struct mlx5_tun_entropy *tun_entropy, int mlx5_tun_entropy_refcount_inc(struct mlx5_tun_entropy *tun_entropy, int reformat_type) { - /* the default is error for unknown (non VXLAN/GRE tunnel types) */ int err = -EOPNOTSUPP; mutex_lock(&tun_entropy->lock); - if (reformat_type == MLX5_REFORMAT_TYPE_L2_TO_VXLAN && + if ((reformat_type == MLX5_REFORMAT_TYPE_L2_TO_VXLAN || + reformat_type == MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL) && tun_entropy->enabled) { /* in case entropy calculation is enabled for all tunneling * types, it is ok for VXLAN, so approve. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 4d2e1e982460..df46b1fce3a7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -177,6 +177,11 @@ static struct mlx5_profile profile[] = { #define FW_PRE_INIT_TIMEOUT_MILI 120000 #define FW_INIT_WARN_MESSAGE_INTERVAL 20000 +static int fw_initializing(struct mlx5_core_dev *dev) +{ + return ioread32be(&dev->iseg->initializing) >> 31; +} + static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili, u32 warn_time_mili) { @@ -946,6 +951,8 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot) goto err_cmd_cleanup; } + mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP); + err = mlx5_core_enable_hca(dev, 0); if (err) { mlx5_core_err(dev, "enable hca failed\n"); @@ -1007,6 +1014,7 @@ reclaim_boot_pages: err_disable_hca: mlx5_core_disable_hca(dev, 0); err_cmd_cleanup: + mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN); mlx5_cmd_cleanup(dev); return err; @@ -1024,6 +1032,7 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot) } mlx5_reclaim_startup_pages(dev); mlx5_core_disable_hca(dev, 0); + mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN); mlx5_cmd_cleanup(dev); return 0; @@ -1171,7 +1180,7 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) err = mlx5_function_setup(dev, boot); if (err) - goto out; + goto err_function; if (boot) { err = mlx5_init_once(dev); @@ -1208,6 +1217,7 @@ err_load: mlx5_cleanup_once(dev); function_teardown: mlx5_function_teardown(dev, boot); +err_function: dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; out: mutex_unlock(&dev->intf_state_mutex); @@ -1267,7 +1277,7 @@ static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) mlx5_debugfs_root); if (!priv->dbg_root) { dev_err(dev->device, "mlx5_core: error, Cannot create debugfs dir, aborting\n"); - return -ENOMEM; + goto err_dbg_root; } err = mlx5_health_init(dev); @@ -1284,15 +1294,27 @@ err_pagealloc_init: mlx5_health_cleanup(dev); err_health_init: debugfs_remove(dev->priv.dbg_root); - +err_dbg_root: + mutex_destroy(&priv->pgdir_mutex); + mutex_destroy(&priv->alloc_mutex); + mutex_destroy(&priv->bfregs.wc_head.lock); + mutex_destroy(&priv->bfregs.reg_head.lock); + mutex_destroy(&dev->intf_state_mutex); return err; } static void mlx5_mdev_uninit(struct mlx5_core_dev *dev) { + struct mlx5_priv *priv = &dev->priv; + mlx5_pagealloc_cleanup(dev); mlx5_health_cleanup(dev); debugfs_remove_recursive(dev->priv.dbg_root); + mutex_destroy(&priv->pgdir_mutex); + mutex_destroy(&priv->alloc_mutex); + mutex_destroy(&priv->bfregs.wc_head.lock); + mutex_destroy(&priv->bfregs.reg_head.lock); + mutex_destroy(&dev->intf_state_mutex); } #define MLX5_IB_MOD "mlx5_ib" @@ -1522,6 +1544,22 @@ static void shutdown(struct pci_dev *pdev) mlx5_pci_disable_device(dev); } +static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + + mlx5_unload_one(dev, false); + + return 0; +} + +static int mlx5_resume(struct pci_dev *pdev) +{ + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + + return mlx5_load_one(dev, false); +} + static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) }, { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */ @@ -1565,6 +1603,8 @@ static struct pci_driver mlx5_core_driver = { .id_table = mlx5_core_pci_table, .probe = init_one, .remove = remove_one, + .suspend = mlx5_suspend, + .resume = mlx5_resume, .shutdown = shutdown, .err_handler = &mlx5_err_handler, .sriov_configure = mlx5_core_sriov_configure, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index 554811de4c9d..df1363a34a42 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -1662,7 +1662,7 @@ dr_action_modify_check_field_limitation(struct mlx5dr_action *action, } static bool -dr_action_modify_check_is_ttl_modify(const u64 *sw_action) +dr_action_modify_check_is_ttl_modify(const void *sw_action) { u16 sw_field = MLX5_GET(set_action_in, sw_action, field); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c index 48b6358b6845..890767a2a7cb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c @@ -297,7 +297,8 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type) dmn->mdev = mdev; dmn->type = type; refcount_set(&dmn->refcount, 1); - mutex_init(&dmn->mutex); + mutex_init(&dmn->info.rx.mutex); + mutex_init(&dmn->info.tx.mutex); if (dr_domain_caps_init(mdev, dmn)) { mlx5dr_err(dmn, "Failed init domain, no caps\n"); @@ -345,9 +346,9 @@ int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags) int ret = 0; if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) { - mutex_lock(&dmn->mutex); + mlx5dr_domain_lock(dmn); ret = mlx5dr_send_ring_force_drain(dmn); - mutex_unlock(&dmn->mutex); + mlx5dr_domain_unlock(dmn); if (ret) { mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n", flags, ret); @@ -371,7 +372,8 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn) dr_domain_uninit_cache(dmn); dr_domain_uninit_resources(dmn); dr_domain_caps_uninit(dmn); - mutex_destroy(&dmn->mutex); + mutex_destroy(&dmn->info.tx.mutex); + mutex_destroy(&dmn->info.rx.mutex); kfree(dmn); return 0; } @@ -379,7 +381,7 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn) void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn, struct mlx5dr_domain *peer_dmn) { - mutex_lock(&dmn->mutex); + mlx5dr_domain_lock(dmn); if (dmn->peer_dmn) refcount_dec(&dmn->peer_dmn->refcount); @@ -389,5 +391,5 @@ void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn, if (dmn->peer_dmn) refcount_inc(&dmn->peer_dmn->refcount); - mutex_unlock(&dmn->mutex); + mlx5dr_domain_unlock(dmn); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c index a95938874798..31abcbb95ca2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c @@ -690,7 +690,7 @@ mlx5dr_matcher_create(struct mlx5dr_table *tbl, refcount_set(&matcher->refcount, 1); INIT_LIST_HEAD(&matcher->matcher_list); - mutex_lock(&tbl->dmn->mutex); + mlx5dr_domain_lock(tbl->dmn); ret = dr_matcher_init(matcher, mask); if (ret) @@ -700,14 +700,14 @@ mlx5dr_matcher_create(struct mlx5dr_table *tbl, if (ret) goto matcher_uninit; - mutex_unlock(&tbl->dmn->mutex); + mlx5dr_domain_unlock(tbl->dmn); return matcher; matcher_uninit: dr_matcher_uninit(matcher); free_matcher: - mutex_unlock(&tbl->dmn->mutex); + mlx5dr_domain_unlock(tbl->dmn); kfree(matcher); dec_ref: refcount_dec(&tbl->refcount); @@ -791,13 +791,13 @@ int mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher) if (refcount_read(&matcher->refcount) > 1) return -EBUSY; - mutex_lock(&tbl->dmn->mutex); + mlx5dr_domain_lock(tbl->dmn); dr_matcher_remove_from_tbl(matcher); dr_matcher_uninit(matcher); refcount_dec(&matcher->tbl->refcount); - mutex_unlock(&tbl->dmn->mutex); + mlx5dr_domain_unlock(tbl->dmn); kfree(matcher); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c index cce3ee7a6614..cd708dcc2e3a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c @@ -938,7 +938,10 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher, static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule, struct mlx5dr_rule_rx_tx *nic_rule) { + mlx5dr_domain_nic_lock(nic_rule->nic_matcher->nic_tbl->nic_dmn); dr_rule_clean_rule_members(rule, nic_rule); + mlx5dr_domain_nic_unlock(nic_rule->nic_matcher->nic_tbl->nic_dmn); + return 0; } @@ -1039,18 +1042,18 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule, if (dr_rule_skip(dmn->type, nic_dmn->ste_type, &matcher->mask, param)) return 0; + hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL); + if (!hw_ste_arr) + return -ENOMEM; + + mlx5dr_domain_nic_lock(nic_dmn); + ret = mlx5dr_matcher_select_builders(matcher, nic_matcher, dr_rule_get_ipv(¶m->outer), dr_rule_get_ipv(¶m->inner)); if (ret) - goto out_err; - - hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL); - if (!hw_ste_arr) { - ret = -ENOMEM; - goto out_err; - } + goto free_hw_ste; /* Set the tag values inside the ste array */ ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr); @@ -1115,6 +1118,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule, if (htbl) mlx5dr_htbl_put(htbl); + mlx5dr_domain_nic_unlock(nic_dmn); + kfree(hw_ste_arr); return 0; @@ -1129,8 +1134,8 @@ free_rule: kfree(ste_info); } free_hw_ste: + mlx5dr_domain_nic_unlock(nic_dmn); kfree(hw_ste_arr); -out_err: return ret; } @@ -1232,31 +1237,23 @@ struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher, { struct mlx5dr_rule *rule; - mutex_lock(&matcher->tbl->dmn->mutex); refcount_inc(&matcher->refcount); rule = dr_rule_create_rule(matcher, value, num_actions, actions); if (!rule) refcount_dec(&matcher->refcount); - mutex_unlock(&matcher->tbl->dmn->mutex); - return rule; } int mlx5dr_rule_destroy(struct mlx5dr_rule *rule) { struct mlx5dr_matcher *matcher = rule->matcher; - struct mlx5dr_table *tbl = rule->matcher->tbl; int ret; - mutex_lock(&tbl->dmn->mutex); - ret = dr_rule_destroy_rule(rule); - - mutex_unlock(&tbl->dmn->mutex); - if (!ret) refcount_dec(&matcher->refcount); + return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index b8d97d44be7b..f421013b0b54 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -357,9 +357,11 @@ static int dr_postsend_icm_data(struct mlx5dr_domain *dmn, u32 buff_offset; int ret; + spin_lock(&send_ring->lock); + ret = dr_handle_pending_wc(dmn, send_ring); if (ret) - return ret; + goto out_unlock; if (send_info->write.length > dmn->info.max_inline_size) { buff_offset = (send_ring->tx_head & @@ -377,7 +379,9 @@ static int dr_postsend_icm_data(struct mlx5dr_domain *dmn, dr_fill_data_segs(send_ring, send_info); dr_post_send(send_ring->qp, send_info); - return 0; +out_unlock: + spin_unlock(&send_ring->lock); + return ret; } static int dr_get_tbl_copy_details(struct mlx5dr_domain *dmn, @@ -563,9 +567,7 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn, send_info.remote_addr = action->rewrite.chunk->mr_addr; send_info.rkey = action->rewrite.chunk->rkey; - mutex_lock(&dmn->mutex); ret = dr_postsend_icm_data(dmn, &send_info); - mutex_unlock(&dmn->mutex); return ret; } @@ -886,6 +888,7 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn) init_attr.pdn = dmn->pdn; init_attr.uar = dmn->uar; init_attr.max_send_wr = QUEUE_SIZE; + spin_lock_init(&dmn->send_ring->lock); dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr); if (!dmn->send_ring->qp) { @@ -990,7 +993,9 @@ int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn) return ret; } + spin_lock(&send_ring->lock); ret = dr_handle_pending_wc(dmn, send_ring); + spin_unlock(&send_ring->lock); return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index c0e3a1e7389d..00c2f598f034 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -112,7 +112,7 @@ static u32 dr_ste_crc32_calc(const void *input_data, size_t length) { u32 crc = crc32(0, input_data, length); - return htonl(crc); + return (__force u32)htonl(crc); } u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl) @@ -869,7 +869,7 @@ static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec) static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec) { - u32 raw_ip[4]; + __be32 raw_ip[4]; spec->smac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_47_16); @@ -961,7 +961,6 @@ static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec) spec->metadata_reg_c_1 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_1); spec->metadata_reg_c_0 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_0); spec->metadata_reg_a = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_a); - spec->metadata_reg_b = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_b); } static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c index c2fe48d7b75a..b599b6beb5b9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c @@ -14,7 +14,7 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, if (action && action->action_type != DR_ACTION_TYP_FT) return -EOPNOTSUPP; - mutex_lock(&tbl->dmn->mutex); + mlx5dr_domain_lock(tbl->dmn); if (!list_empty(&tbl->matcher_list)) last_matcher = list_last_entry(&tbl->matcher_list, @@ -78,7 +78,7 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, refcount_inc(&action->refcount); out: - mutex_unlock(&tbl->dmn->mutex); + mlx5dr_domain_unlock(tbl->dmn); return ret; } @@ -95,7 +95,7 @@ static void dr_table_uninit_fdb(struct mlx5dr_table *tbl) static void dr_table_uninit(struct mlx5dr_table *tbl) { - mutex_lock(&tbl->dmn->mutex); + mlx5dr_domain_lock(tbl->dmn); switch (tbl->dmn->type) { case MLX5DR_DOMAIN_TYPE_NIC_RX: @@ -112,7 +112,7 @@ static void dr_table_uninit(struct mlx5dr_table *tbl) break; } - mutex_unlock(&tbl->dmn->mutex); + mlx5dr_domain_unlock(tbl->dmn); } static int dr_table_init_nic(struct mlx5dr_domain *dmn, @@ -177,7 +177,7 @@ static int dr_table_init(struct mlx5dr_table *tbl) INIT_LIST_HEAD(&tbl->matcher_list); - mutex_lock(&tbl->dmn->mutex); + mlx5dr_domain_lock(tbl->dmn); switch (tbl->dmn->type) { case MLX5DR_DOMAIN_TYPE_NIC_RX: @@ -201,7 +201,7 @@ static int dr_table_init(struct mlx5dr_table *tbl) break; } - mutex_unlock(&tbl->dmn->mutex); + mlx5dr_domain_unlock(tbl->dmn); return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 984783238baa..0883956c58c0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -554,8 +554,7 @@ struct mlx5dr_match_misc2 { u32 metadata_reg_c_1; /* metadata_reg_c_1 */ u32 metadata_reg_c_0; /* metadata_reg_c_0 */ u32 metadata_reg_a; /* metadata_reg_a */ - u32 metadata_reg_b; /* metadata_reg_b */ - u8 reserved_auto2[8]; + u8 reserved_auto2[12]; }; struct mlx5dr_match_misc3 { @@ -636,6 +635,7 @@ struct mlx5dr_domain_rx_tx { u64 drop_icm_addr; u64 default_icm_addr; enum mlx5dr_ste_entry_type ste_type; + struct mutex mutex; /* protect rx/tx domain */ }; struct mlx5dr_domain_info { @@ -660,7 +660,6 @@ struct mlx5dr_domain { struct mlx5_uars_page *uar; enum mlx5dr_domain_type type; refcount_t refcount; - struct mutex mutex; /* protect domain */ struct mlx5dr_icm_pool *ste_icm_pool; struct mlx5dr_icm_pool *action_icm_pool; struct mlx5dr_send_ring *send_ring; @@ -814,6 +813,28 @@ struct mlx5dr_icm_chunk { struct list_head *miss_list; }; +static inline void mlx5dr_domain_nic_lock(struct mlx5dr_domain_rx_tx *nic_dmn) +{ + mutex_lock(&nic_dmn->mutex); +} + +static inline void mlx5dr_domain_nic_unlock(struct mlx5dr_domain_rx_tx *nic_dmn) +{ + mutex_unlock(&nic_dmn->mutex); +} + +static inline void mlx5dr_domain_lock(struct mlx5dr_domain *dmn) +{ + mlx5dr_domain_nic_lock(&dmn->info.rx); + mlx5dr_domain_nic_lock(&dmn->info.tx); +} + +static inline void mlx5dr_domain_unlock(struct mlx5dr_domain *dmn) +{ + mlx5dr_domain_nic_unlock(&dmn->info.tx); + mlx5dr_domain_nic_unlock(&dmn->info.rx); +} + static inline int mlx5dr_matcher_supp_flex_parser_icmp_v4(struct mlx5dr_cmd_caps *caps) { @@ -1043,6 +1064,7 @@ struct mlx5dr_send_ring { struct ib_wc wc[MAX_SEND_CQE]; u8 sync_buff[MIN_READ_SYNC]; struct mlx5dr_mr *sync_mr; + spinlock_t lock; /* Protect the data path of the send ring */ }; int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn); diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 3c3db1c874b6..38fa7304af0c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -5526,40 +5526,37 @@ MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4); enum mlxsw_reg_htgt_trap_group { MLXSW_REG_HTGT_TRAP_GROUP_EMAD, - MLXSW_REG_HTGT_TRAP_GROUP_SX2_RX, - MLXSW_REG_HTGT_TRAP_GROUP_SX2_CTRL, MLXSW_REG_HTGT_TRAP_GROUP_SP_STP, MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP, MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP, - MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP, + MLXSW_REG_HTGT_TRAP_GROUP_SP_MC_SNOOPING, MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP, MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF, MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM, MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST, - MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP, + MLXSW_REG_HTGT_TRAP_GROUP_SP_NEIGH_DISCOVERY, MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP, MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE, MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME, MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP, MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT, - MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD, - MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND, + MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6, MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR, MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0, MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1, MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP, - - __MLXSW_REG_HTGT_TRAP_GROUP_MAX, - MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1 -}; - -enum mlxsw_reg_htgt_discard_trap_group { - MLXSW_REG_HTGT_DISCARD_TRAP_GROUP_BASE = MLXSW_REG_HTGT_TRAP_GROUP_MAX, + MLXSW_REG_HTGT_TRAP_GROUP_SP_PKT_SAMPLE, + MLXSW_REG_HTGT_TRAP_GROUP_SP_FLOW_LOGGING, + MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS, + MLXSW_REG_HTGT_TRAP_GROUP_SP_BFD, MLXSW_REG_HTGT_TRAP_GROUP_SP_DUMMY, MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS, MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS, MLXSW_REG_HTGT_TRAP_GROUP_SP_TUNNEL_DISCARDS, MLXSW_REG_HTGT_TRAP_GROUP_SP_ACL_DISCARDS, + + __MLXSW_REG_HTGT_TRAP_GROUP_MAX, + MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1 }; /* reg_htgt_trap_group diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index f78bde8bc16e..c598ae9ed106 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3555,6 +3555,7 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) mlxsw_sp_port_remove(mlxsw_sp, i); mlxsw_sp_cpu_port_remove(mlxsw_sp); kfree(mlxsw_sp->ports); + mlxsw_sp->ports = NULL; } static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) @@ -3591,6 +3592,7 @@ err_port_create: mlxsw_sp_cpu_port_remove(mlxsw_sp); err_cpu_port_create: kfree(mlxsw_sp->ports); + mlxsw_sp->ports = NULL; return err; } @@ -3712,6 +3714,14 @@ static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core, return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id); } +static struct mlxsw_sp_port * +mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port) +{ + if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) + return mlxsw_sp->ports[local_port]; + return NULL; +} + static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, unsigned int count, struct netlink_ext_ack *extack) @@ -3725,7 +3735,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, int i; int err; - mlxsw_sp_port = mlxsw_sp->ports[local_port]; + mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); if (!mlxsw_sp_port) { dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", local_port); @@ -3820,7 +3830,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, int offset; int i; - mlxsw_sp_port = mlxsw_sp->ports[local_port]; + mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); if (!mlxsw_sp_port) { dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", local_port); @@ -4035,54 +4045,56 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), MLXSW_RXL(mlxsw_sp_rx_listener_ptp, LLDP, TRAP_TO_CPU, false, SP_LLDP, DISCARD), - MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), - MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), - MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), - MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), - MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), - MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), - MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), - MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), - MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), - MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, - false), - MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, - false), - MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, - false), - MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, + MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, MC_SNOOPING, false), + MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, MC_SNOOPING, false), + MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, MC_SNOOPING, false), + MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, MC_SNOOPING, false), + MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, MC_SNOOPING, false), + MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, NEIGH_DISCOVERY, false), + MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, NEIGH_DISCOVERY, false), + MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false), + MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, + MC_SNOOPING, false), + MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, + MC_SNOOPING, false), + MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, MC_SNOOPING, false), + MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, + MC_SNOOPING, false), /* L3 traps */ MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false), MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, IP2ME, false), MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, + MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, IPV6, false), + MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, IPV6, false), MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), + MLXSW_SP_RXL_MARK(IPV4_DHCP, TRAP_TO_CPU, DHCP, false), MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), - MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, - false), - MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, + MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6, false), - MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, + MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISEMENT, TRAP_TO_CPU, IPV6, false), - MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, - false), - MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), + MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, + NEIGH_DISCOVERY, false), + MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISEMENT, TRAP_TO_CPU, + NEIGH_DISCOVERY, false), + MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6, false), MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, IP2ME, false), + MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, IP2ME, false), MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, VRRP, false), MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, VRRP, false), + MLXSW_SP_RXL_MARK(IPV4_BFD, TRAP_TO_CPU, BFD, false), + MLXSW_SP_RXL_MARK(IPV6_BFD, TRAP_TO_CPU, BFD, false), MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD, ROUTER_EXP, false), MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD, @@ -4093,17 +4105,18 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { ROUTER_EXP, false), /* PKT Sample trap */ MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, - false, SP_IP2ME, DISCARD), + false, SP_PKT_SAMPLE, DISCARD), /* ACL trap */ - MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), + MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, FLOW_LOGGING, false), /* Multicast Router Traps */ MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), /* NVE traps */ - MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), - MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false), + MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false), + MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, + false), /* PTP traps */ MLXSW_RXL(mlxsw_sp_rx_listener_ptp, PTP0, TRAP_TO_CPU, false, SP_PTP0, DISCARD), @@ -4142,25 +4155,23 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: rate = 128; burst_size = 7; break; - case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: - case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_MC_SNOOPING: rate = 16 * 1024; burst_size = 10; break; case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: - case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: - case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_NEIGH_DISCOVERY: case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: - case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6: case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: - rate = 1024; - burst_size = 7; - break; + case MLXSW_REG_HTGT_TRAP_GROUP_SP_FLOW_LOGGING: case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: rate = 1024; burst_size = 7; break; @@ -4176,6 +4187,10 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) rate = 360; burst_size = 7; break; + case MLXSW_REG_HTGT_TRAP_GROUP_SP_BFD: + rate = 20 * 1024; + burst_size = 10; + break; default: continue; } @@ -4217,35 +4232,38 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_BFD: priority = 5; tc = 5; break; + case MLXSW_REG_HTGT_TRAP_GROUP_SP_FLOW_LOGGING: case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: - case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: priority = 4; tc = 4; break; - case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: - case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: - case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_MC_SNOOPING: priority = 3; tc = 3; break; - case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: - case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_NEIGH_DISCOVERY: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6: case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: priority = 2; tc = 2; break; case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: priority = 1; tc = 1; break; + case MLXSW_REG_HTGT_TRAP_GROUP_SP_PKT_SAMPLE: case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: priority = 0; - tc = 1; + tc = 0; break; case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 968f0902e4fe..21bfb2f6a6f0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -614,7 +614,7 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = { #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, MLXSW_SP_SB_POOL_EGR_CPU) static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { - MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU), MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU), MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU), MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU), diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index b286fe158820..51e1b3930c56 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -30,14 +30,14 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, return -EOPNOTSUPP; act = flow_action_first_entry_get(flow_action); - if (act->hw_stats == FLOW_ACTION_HW_STATS_ANY || - act->hw_stats == FLOW_ACTION_HW_STATS_IMMEDIATE) { + if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) { + /* Nothing to do */ + } else if (act->hw_stats & FLOW_ACTION_HW_STATS_IMMEDIATE) { /* Count action is inserted first */ err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack); if (err) return err; - } else if (act->hw_stats != FLOW_ACTION_HW_STATS_DISABLED && - act->hw_stats != FLOW_ACTION_HW_STATS_DONT_CARE) { + } else { NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type"); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 71aee4914619..770de0222e7b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -5003,9 +5003,11 @@ static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt) { /* Packets with link-local destination IP arriving to the router * are trapped to the CPU, so no need to program specific routes - * for them. + * for them. Only allow prefix routes (usually one fe80::/64) so + * that packets are trapped for the right reason. */ - if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL) + if ((ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL) && + (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))) return true; /* Multicast routes aren't supported, so ignore them. Neighbour @@ -7572,7 +7574,7 @@ static struct mlxsw_sp_fid * mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif, struct netlink_ext_ack *extack) { - struct net_device *br_dev = rif->dev; + struct net_device *br_dev; u16 vid; int err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c index 3a13b17cd1b8..f4b812276a5a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -21,7 +21,6 @@ struct mlxsw_sp_trap_group_item { struct devlink_trap_group group; u16 hw_group_id; u8 priority; - u8 tc; }; #define MLXSW_SP_TRAP_LISTENERS_MAX 3 @@ -207,25 +206,21 @@ static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = { .group = DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 1), .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS, .priority = 0, - .tc = 1, }, { .group = DEVLINK_TRAP_GROUP_GENERIC(L3_DROPS, 1), .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS, .priority = 0, - .tc = 1, }, { .group = DEVLINK_TRAP_GROUP_GENERIC(TUNNEL_DROPS, 1), .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_TUNNEL_DISCARDS, .priority = 0, - .tc = 1, }, { .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_DROPS, 1), .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_ACL_DISCARDS, .priority = 0, - .tc = 1, }, }; @@ -446,8 +441,6 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = { }, }; -#define MLXSW_SP_THIN_POLICER_ID (MLXSW_REG_HTGT_TRAP_GROUP_MAX + 1) - static struct mlxsw_sp_trap_policer_item * mlxsw_sp_trap_policer_item_lookup(struct mlxsw_sp *mlxsw_sp, u32 id) { @@ -492,14 +485,21 @@ mlxsw_sp_trap_item_lookup(struct mlxsw_sp *mlxsw_sp, u16 id) static int mlxsw_sp_trap_cpu_policers_set(struct mlxsw_sp *mlxsw_sp) { + struct mlxsw_sp_trap *trap = mlxsw_sp->trap; char qpcr_pl[MLXSW_REG_QPCR_LEN]; + u16 hw_id; /* The purpose of "thin" policer is to drop as many packets * as possible. The dummy group is using it. */ - __set_bit(MLXSW_SP_THIN_POLICER_ID, mlxsw_sp->trap->policers_usage); - mlxsw_reg_qpcr_pack(qpcr_pl, MLXSW_SP_THIN_POLICER_ID, - MLXSW_REG_QPCR_IR_UNITS_M, false, 1, 4); + hw_id = find_first_zero_bit(trap->policers_usage, trap->max_policers); + if (WARN_ON(hw_id == trap->max_policers)) + return -ENOBUFS; + + __set_bit(hw_id, trap->policers_usage); + trap->thin_policer_hw_id = hw_id; + mlxsw_reg_qpcr_pack(qpcr_pl, hw_id, MLXSW_REG_QPCR_IR_UNITS_M, + false, 1, 4); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpcr), qpcr_pl); } @@ -508,7 +508,7 @@ static int mlxsw_sp_trap_dummy_group_init(struct mlxsw_sp *mlxsw_sp) char htgt_pl[MLXSW_REG_HTGT_LEN]; mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SP_DUMMY, - MLXSW_SP_THIN_POLICER_ID, 0, 1); + mlxsw_sp->trap->thin_policer_hw_id, 0, 1); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); } @@ -865,7 +865,7 @@ __mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core, } mlxsw_reg_htgt_pack(htgt_pl, group_item->hw_group_id, hw_policer_id, - group_item->priority, group_item->tc); + group_item->priority, group_item->priority); return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h index 759146897b3a..13ac412f4d53 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h @@ -17,6 +17,8 @@ struct mlxsw_sp_trap { struct mlxsw_sp_trap_item *trap_items_arr; u64 traps_count; /* Number of registered traps */ + u16 thin_policer_hw_id; + u64 max_policers; unsigned long policers_usage[]; /* Usage bitmap */ }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 90535820b559..b438f5576e18 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -1259,6 +1259,7 @@ static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx) if (mlxsw_sx_port_created(mlxsw_sx, i)) mlxsw_sx_port_remove(mlxsw_sx, i); kfree(mlxsw_sx->ports); + mlxsw_sx->ports = NULL; } static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx) @@ -1293,6 +1294,7 @@ err_port_module_info_get: if (mlxsw_sx_port_created(mlxsw_sx, i)) mlxsw_sx_port_remove(mlxsw_sx, i); kfree(mlxsw_sx->ports); + mlxsw_sx->ports = NULL; return err; } @@ -1376,6 +1378,12 @@ static int mlxsw_sx_port_type_set(struct mlxsw_core *mlxsw_core, u8 local_port, u8 module, width; int err; + if (!mlxsw_sx->ports || !mlxsw_sx->ports[local_port]) { + dev_err(mlxsw_sx->bus_info->dev, "Port number \"%d\" does not exist\n", + local_port); + return -EINVAL; + } + if (new_type == DEVLINK_PORT_TYPE_AUTO) return -EOPNOTSUPP; @@ -1396,6 +1404,11 @@ err_port_module_info_get: return err; } +enum { + MLXSW_REG_HTGT_TRAP_GROUP_SX2_RX = 1, + MLXSW_REG_HTGT_TRAP_GROUP_SX2_CTRL = 2, +}; + #define MLXSW_SX_RXL(_trap_id) \ MLXSW_RXL(mlxsw_sx_rx_listener_func, _trap_id, TRAP_TO_CPU, \ false, SX2_RX, FORWARD) diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index eaa521b7561b..28e60697d14e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -55,16 +55,19 @@ enum { MLXSW_TRAP_ID_IPV4_BGP = 0x88, MLXSW_TRAP_ID_IPV6_BGP = 0x89, MLXSW_TRAP_ID_L3_IPV6_ROUTER_SOLICITATION = 0x8A, - MLXSW_TRAP_ID_L3_IPV6_ROUTER_ADVERTISMENT = 0x8B, + MLXSW_TRAP_ID_L3_IPV6_ROUTER_ADVERTISEMENT = 0x8B, MLXSW_TRAP_ID_L3_IPV6_NEIGHBOR_SOLICITATION = 0x8C, - MLXSW_TRAP_ID_L3_IPV6_NEIGHBOR_ADVERTISMENT = 0x8D, + MLXSW_TRAP_ID_L3_IPV6_NEIGHBOR_ADVERTISEMENT = 0x8D, MLXSW_TRAP_ID_L3_IPV6_REDIRECTION = 0x8E, + MLXSW_TRAP_ID_IPV4_DHCP = 0x8F, MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90, MLXSW_TRAP_ID_IPV6_MC_LINK_LOCAL_DEST = 0x91, MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92, MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1, MLXSW_TRAP_ID_NVE_DECAP_ARP = 0xB8, MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD, + MLXSW_TRAP_ID_IPV4_BFD = 0xD0, + MLXSW_TRAP_ID_IPV6_BFD = 0xD1, MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6, MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7, MLXSW_TRAP_ID_DISCARD_NON_ROUTABLE = 0x11A, diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig index b9c4d48e28e4..09f35209d43d 100644 --- a/drivers/net/ethernet/micrel/Kconfig +++ b/drivers/net/ethernet/micrel/Kconfig @@ -38,6 +38,8 @@ config KS8851_MLL tristate "Micrel KS8851 MLL" depends on HAS_IOMEM select MII + select CRC32 + select EEPROM_93CX6 ---help--- This platform driver is for Micrel KS8851 Address/data bus multiplexed network chip. diff --git a/drivers/net/ethernet/micrel/Makefile b/drivers/net/ethernet/micrel/Makefile index 6d8ac5527aef..5cc00d22c708 100644 --- a/drivers/net/ethernet/micrel/Makefile +++ b/drivers/net/ethernet/micrel/Makefile @@ -5,5 +5,7 @@ obj-$(CONFIG_KS8842) += ks8842.o obj-$(CONFIG_KS8851) += ks8851.o +ks8851-objs = ks8851_common.o ks8851_spi.o obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o +ks8851_mll-objs = ks8851_common.o ks8851_par.o obj-$(CONFIG_KSZ884X_PCI) += ksz884x.o diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h index 8f834aef8e32..2b319e451121 100644 --- a/drivers/net/ethernet/micrel/ks8851.h +++ b/drivers/net/ethernet/micrel/ks8851.h @@ -7,6 +7,11 @@ * KS8851 register definitions */ +#ifndef __KS8851_H__ +#define __KS8851_H__ + +#include <linux/eeprom_93cx6.h> + #define KS_CCR 0x08 #define CCR_LE (1 << 10) /* KSZ8851-16MLL */ #define CCR_EEPROM (1 << 9) @@ -19,7 +24,7 @@ #define CCR_32PIN (1 << 0) /* KSZ8851SNL */ /* MAC address registers */ -#define KS_MAR(_m) (0x15 - (_m)) +#define KS_MAR(_m) (0x14 - (_m)) #define KS_MARL 0x10 #define KS_MARM 0x12 #define KS_MARH 0x14 @@ -300,3 +305,147 @@ #define TXFR_TXIC (1 << 15) #define TXFR_TXFID_MASK (0x3f << 0) #define TXFR_TXFID_SHIFT (0) + +/** + * struct ks8851_rxctrl - KS8851 driver rx control + * @mchash: Multicast hash-table data. + * @rxcr1: KS_RXCR1 register setting + * @rxcr2: KS_RXCR2 register setting + * + * Representation of the settings needs to control the receive filtering + * such as the multicast hash-filter and the receive register settings. This + * is used to make the job of working out if the receive settings change and + * then issuing the new settings to the worker that will send the necessary + * commands. + */ +struct ks8851_rxctrl { + u16 mchash[4]; + u16 rxcr1; + u16 rxcr2; +}; + +/** + * union ks8851_tx_hdr - tx header data + * @txb: The header as bytes + * @txw: The header as 16bit, little-endian words + * + * A dual representation of the tx header data to allow + * access to individual bytes, and to allow 16bit accesses + * with 16bit alignment. + */ +union ks8851_tx_hdr { + u8 txb[6]; + __le16 txw[3]; +}; + +/** + * struct ks8851_net - KS8851 driver private data + * @netdev: The network device we're bound to + * @statelock: Lock on this structure for tx list. + * @mii: The MII state information for the mii calls. + * @rxctrl: RX settings for @rxctrl_work. + * @rxctrl_work: Work queue for updating RX mode and multicast lists + * @txq: Queue of packets for transmission. + * @txh: Space for generating packet TX header in DMA-able data + * @rxd: Space for receiving SPI data, in DMA-able space. + * @txd: Space for transmitting SPI data, in DMA-able space. + * @msg_enable: The message flags controlling driver output (see ethtool). + * @fid: Incrementing frame id tag. + * @rc_ier: Cached copy of KS_IER. + * @rc_ccr: Cached copy of KS_CCR. + * @rc_rxqcr: Cached copy of KS_RXQCR. + * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM. + * @vdd_reg: Optional regulator supplying the chip + * @vdd_io: Optional digital power supply for IO + * @gpio: Optional reset_n gpio + * @lock: Bus access lock callback + * @unlock: Bus access unlock callback + * @rdreg16: 16bit register read callback + * @wrreg16: 16bit register write callback + * @rdfifo: FIFO read callback + * @wrfifo: FIFO write callback + * @start_xmit: start_xmit() implementation callback + * @rx_skb: rx_skb() implementation callback + * @flush_tx_work: flush_tx_work() implementation callback + * + * The @statelock is used to protect information in the structure which may + * need to be accessed via several sources, such as the network driver layer + * or one of the work queues. + * + * We align the buffers we may use for rx/tx to ensure that if the SPI driver + * wants to DMA map them, it will not have any problems with data the driver + * modifies. + */ +struct ks8851_net { + struct net_device *netdev; + spinlock_t statelock; + + union ks8851_tx_hdr txh ____cacheline_aligned; + u8 rxd[8]; + u8 txd[8]; + + u32 msg_enable ____cacheline_aligned; + u16 tx_space; + u8 fid; + + u16 rc_ier; + u16 rc_rxqcr; + u16 rc_ccr; + + struct mii_if_info mii; + struct ks8851_rxctrl rxctrl; + + struct work_struct rxctrl_work; + + struct sk_buff_head txq; + + struct eeprom_93cx6 eeprom; + struct regulator *vdd_reg; + struct regulator *vdd_io; + int gpio; + + void (*lock)(struct ks8851_net *ks, + unsigned long *flags); + void (*unlock)(struct ks8851_net *ks, + unsigned long *flags); + unsigned int (*rdreg16)(struct ks8851_net *ks, + unsigned int reg); + void (*wrreg16)(struct ks8851_net *ks, + unsigned int reg, unsigned int val); + void (*rdfifo)(struct ks8851_net *ks, u8 *buff, + unsigned int len); + void (*wrfifo)(struct ks8851_net *ks, + struct sk_buff *txp, bool irq); + netdev_tx_t (*start_xmit)(struct sk_buff *skb, + struct net_device *dev); + void (*rx_skb)(struct ks8851_net *ks, + struct sk_buff *skb); + void (*flush_tx_work)(struct ks8851_net *ks); +}; + +int ks8851_probe_common(struct net_device *netdev, struct device *dev, + int msg_en); +int ks8851_remove_common(struct device *dev); +int ks8851_suspend(struct device *dev); +int ks8851_resume(struct device *dev); + +static __maybe_unused SIMPLE_DEV_PM_OPS(ks8851_pm_ops, + ks8851_suspend, ks8851_resume); + +/** + * ks8851_done_tx - update and then free skbuff after transmitting + * @ks: The device state + * @txb: The buffer transmitted + */ +static void __maybe_unused ks8851_done_tx(struct ks8851_net *ks, + struct sk_buff *txb) +{ + struct net_device *dev = ks->netdev; + + dev->stats.tx_bytes += txb->len; + dev->stats.tx_packets++; + + dev_kfree_skb(txb); +} + +#endif /* __KS8851_H__ */ diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851_common.c index 33305c9c5a62..d65872172229 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851_common.c @@ -19,10 +19,8 @@ #include <linux/cache.h> #include <linux/crc32.h> #include <linux/mii.h> -#include <linux/eeprom_93cx6.h> #include <linux/regulator/consumer.h> -#include <linux/spi/spi.h> #include <linux/gpio.h> #include <linux/of_gpio.h> #include <linux/of_net.h> @@ -30,255 +28,41 @@ #include "ks8851.h" /** - * struct ks8851_rxctrl - KS8851 driver rx control - * @mchash: Multicast hash-table data. - * @rxcr1: KS_RXCR1 register setting - * @rxcr2: KS_RXCR2 register setting - * - * Representation of the settings needs to control the receive filtering - * such as the multicast hash-filter and the receive register settings. This - * is used to make the job of working out if the receive settings change and - * then issuing the new settings to the worker that will send the necessary - * commands. - */ -struct ks8851_rxctrl { - u16 mchash[4]; - u16 rxcr1; - u16 rxcr2; -}; - -/** - * union ks8851_tx_hdr - tx header data - * @txb: The header as bytes - * @txw: The header as 16bit, little-endian words - * - * A dual representation of the tx header data to allow - * access to individual bytes, and to allow 16bit accesses - * with 16bit alignment. - */ -union ks8851_tx_hdr { - u8 txb[6]; - __le16 txw[3]; -}; - -/** - * struct ks8851_net - KS8851 driver private data - * @netdev: The network device we're bound to - * @spidev: The spi device we're bound to. - * @lock: Lock to ensure that the device is not accessed when busy. - * @statelock: Lock on this structure for tx list. - * @mii: The MII state information for the mii calls. - * @rxctrl: RX settings for @rxctrl_work. - * @tx_work: Work queue for tx packets - * @rxctrl_work: Work queue for updating RX mode and multicast lists - * @txq: Queue of packets for transmission. - * @spi_msg1: pre-setup SPI transfer with one message, @spi_xfer1. - * @spi_msg2: pre-setup SPI transfer with two messages, @spi_xfer2. - * @txh: Space for generating packet TX header in DMA-able data - * @rxd: Space for receiving SPI data, in DMA-able space. - * @txd: Space for transmitting SPI data, in DMA-able space. - * @msg_enable: The message flags controlling driver output (see ethtool). - * @fid: Incrementing frame id tag. - * @rc_ier: Cached copy of KS_IER. - * @rc_ccr: Cached copy of KS_CCR. - * @rc_rxqcr: Cached copy of KS_RXQCR. - * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM. - * @vdd_reg: Optional regulator supplying the chip - * @vdd_io: Optional digital power supply for IO - * @gpio: Optional reset_n gpio - * - * The @lock ensures that the chip is protected when certain operations are - * in progress. When the read or write packet transfer is in progress, most - * of the chip registers are not ccessible until the transfer is finished and - * the DMA has been de-asserted. - * - * The @statelock is used to protect information in the structure which may - * need to be accessed via several sources, such as the network driver layer - * or one of the work queues. - * - * We align the buffers we may use for rx/tx to ensure that if the SPI driver - * wants to DMA map them, it will not have any problems with data the driver - * modifies. - */ -struct ks8851_net { - struct net_device *netdev; - struct spi_device *spidev; - struct mutex lock; - spinlock_t statelock; - - union ks8851_tx_hdr txh ____cacheline_aligned; - u8 rxd[8]; - u8 txd[8]; - - u32 msg_enable ____cacheline_aligned; - u16 tx_space; - u8 fid; - - u16 rc_ier; - u16 rc_rxqcr; - u16 rc_ccr; - - struct mii_if_info mii; - struct ks8851_rxctrl rxctrl; - - struct work_struct tx_work; - struct work_struct rxctrl_work; - - struct sk_buff_head txq; - - struct spi_message spi_msg1; - struct spi_message spi_msg2; - struct spi_transfer spi_xfer1; - struct spi_transfer spi_xfer2[2]; - - struct eeprom_93cx6 eeprom; - struct regulator *vdd_reg; - struct regulator *vdd_io; - int gpio; -}; - -static int msg_enable; - -/* SPI frame opcodes */ -#define KS_SPIOP_RD (0x00) -#define KS_SPIOP_WR (0x40) -#define KS_SPIOP_RXFIFO (0x80) -#define KS_SPIOP_TXFIFO (0xC0) - -/* shift for byte-enable data */ -#define BYTE_EN(_x) ((_x) << 2) - -/* turn register number and byte-enable mask into data for start of packet */ -#define MK_OP(_byteen, _reg) (BYTE_EN(_byteen) | (_reg) << (8+2) | (_reg) >> 6) - -/* SPI register read/write calls. + * ks8851_lock - register access lock + * @ks: The chip state + * @flags: Spinlock flags * - * All these calls issue SPI transactions to access the chip's registers. They - * all require that the necessary lock is held to prevent accesses when the - * chip is busy transferring packet data (RX/TX FIFO accesses). + * Claim chip register access lock */ +static void ks8851_lock(struct ks8851_net *ks, unsigned long *flags) +{ + ks->lock(ks, flags); +} /** - * ks8851_wrreg16 - write 16bit register value to chip + * ks8851_unlock - register access unlock * @ks: The chip state - * @reg: The register address - * @val: The value to write + * @flags: Spinlock flags * - * Issue a write to put the value @val into the register specified in @reg. + * Release chip register access lock */ -static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val) +static void ks8851_unlock(struct ks8851_net *ks, unsigned long *flags) { - struct spi_transfer *xfer = &ks->spi_xfer1; - struct spi_message *msg = &ks->spi_msg1; - __le16 txb[2]; - int ret; - - txb[0] = cpu_to_le16(MK_OP(reg & 2 ? 0xC : 0x03, reg) | KS_SPIOP_WR); - txb[1] = cpu_to_le16(val); - - xfer->tx_buf = txb; - xfer->rx_buf = NULL; - xfer->len = 4; - - ret = spi_sync(ks->spidev, msg); - if (ret < 0) - netdev_err(ks->netdev, "spi_sync() failed\n"); + ks->unlock(ks, flags); } /** - * ks8851_wrreg8 - write 8bit register value to chip + * ks8851_wrreg16 - write 16bit register value to chip * @ks: The chip state * @reg: The register address * @val: The value to write * * Issue a write to put the value @val into the register specified in @reg. */ -static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val) -{ - struct spi_transfer *xfer = &ks->spi_xfer1; - struct spi_message *msg = &ks->spi_msg1; - __le16 txb[2]; - int ret; - int bit; - - bit = 1 << (reg & 3); - - txb[0] = cpu_to_le16(MK_OP(bit, reg) | KS_SPIOP_WR); - txb[1] = val; - - xfer->tx_buf = txb; - xfer->rx_buf = NULL; - xfer->len = 3; - - ret = spi_sync(ks->spidev, msg); - if (ret < 0) - netdev_err(ks->netdev, "spi_sync() failed\n"); -} - -/** - * ks8851_rdreg - issue read register command and return the data - * @ks: The device state - * @op: The register address and byte enables in message format. - * @rxb: The RX buffer to return the result into - * @rxl: The length of data expected. - * - * This is the low level read call that issues the necessary spi message(s) - * to read data from the register specified in @op. - */ -static void ks8851_rdreg(struct ks8851_net *ks, unsigned op, - u8 *rxb, unsigned rxl) -{ - struct spi_transfer *xfer; - struct spi_message *msg; - __le16 *txb = (__le16 *)ks->txd; - u8 *trx = ks->rxd; - int ret; - - txb[0] = cpu_to_le16(op | KS_SPIOP_RD); - - if (ks->spidev->master->flags & SPI_MASTER_HALF_DUPLEX) { - msg = &ks->spi_msg2; - xfer = ks->spi_xfer2; - - xfer->tx_buf = txb; - xfer->rx_buf = NULL; - xfer->len = 2; - - xfer++; - xfer->tx_buf = NULL; - xfer->rx_buf = trx; - xfer->len = rxl; - } else { - msg = &ks->spi_msg1; - xfer = &ks->spi_xfer1; - - xfer->tx_buf = txb; - xfer->rx_buf = trx; - xfer->len = rxl + 2; - } - - ret = spi_sync(ks->spidev, msg); - if (ret < 0) - netdev_err(ks->netdev, "read: spi_sync() failed\n"); - else if (ks->spidev->master->flags & SPI_MASTER_HALF_DUPLEX) - memcpy(rxb, trx, rxl); - else - memcpy(rxb, trx + 2, rxl); -} - -/** - * ks8851_rdreg8 - read 8 bit register from device - * @ks: The chip information - * @reg: The register address - * - * Read a 8bit register from the chip, returning the result -*/ -static unsigned ks8851_rdreg8(struct ks8851_net *ks, unsigned reg) +static void ks8851_wrreg16(struct ks8851_net *ks, unsigned int reg, + unsigned int val) { - u8 rxb[1]; - - ks8851_rdreg(ks, MK_OP(1 << (reg & 3), reg), rxb, 1); - return rxb[0]; + ks->wrreg16(ks, reg, val); } /** @@ -287,32 +71,11 @@ static unsigned ks8851_rdreg8(struct ks8851_net *ks, unsigned reg) * @reg: The register address * * Read a 16bit register from the chip, returning the result -*/ -static unsigned ks8851_rdreg16(struct ks8851_net *ks, unsigned reg) -{ - __le16 rx = 0; - - ks8851_rdreg(ks, MK_OP(reg & 2 ? 0xC : 0x3, reg), (u8 *)&rx, 2); - return le16_to_cpu(rx); -} - -/** - * ks8851_rdreg32 - read 32 bit register from device - * @ks: The chip information - * @reg: The register address - * - * Read a 32bit register from the chip. - * - * Note, this read requires the address be aligned to 4 bytes. -*/ -static unsigned ks8851_rdreg32(struct ks8851_net *ks, unsigned reg) + */ +static unsigned int ks8851_rdreg16(struct ks8851_net *ks, + unsigned int reg) { - __le32 rx = 0; - - WARN_ON(reg & 3); - - ks8851_rdreg(ks, MK_OP(0xf, reg), (u8 *)&rx, 4); - return le32_to_cpu(rx); + return ks->rdreg16(ks, reg); } /** @@ -368,21 +131,27 @@ static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode) static int ks8851_write_mac_addr(struct net_device *dev) { struct ks8851_net *ks = netdev_priv(dev); + unsigned long flags; + u16 val; int i; - mutex_lock(&ks->lock); + ks8851_lock(ks, &flags); /* * Wake up chip in case it was powered off when stopped; otherwise, * the first write to the MAC address does not take effect. */ ks8851_set_powermode(ks, PMECR_PM_NORMAL); - for (i = 0; i < ETH_ALEN; i++) - ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]); + + for (i = 0; i < ETH_ALEN; i += 2) { + val = (dev->dev_addr[i] << 8) | dev->dev_addr[i + 1]; + ks8851_wrreg16(ks, KS_MAR(i), val); + } + if (!netif_running(dev)) ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN); - mutex_unlock(&ks->lock); + ks8851_unlock(ks, &flags); return 0; } @@ -396,19 +165,25 @@ static int ks8851_write_mac_addr(struct net_device *dev) static void ks8851_read_mac_addr(struct net_device *dev) { struct ks8851_net *ks = netdev_priv(dev); + unsigned long flags; + u16 reg; int i; - mutex_lock(&ks->lock); + ks8851_lock(ks, &flags); - for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = ks8851_rdreg8(ks, KS_MAR(i)); + for (i = 0; i < ETH_ALEN; i += 2) { + reg = ks8851_rdreg16(ks, KS_MAR(i)); + dev->dev_addr[i] = reg >> 8; + dev->dev_addr[i + 1] = reg & 0xff; + } - mutex_unlock(&ks->lock); + ks8851_unlock(ks, &flags); } /** * ks8851_init_mac - initialise the mac address * @ks: The device structure + * @np: The device node pointer * * Get or create the initial mac address for the device and then set that * into the station address register. A mac address supplied in the device @@ -416,12 +191,12 @@ static void ks8851_read_mac_addr(struct net_device *dev) * we try that. If no valid mac address is found we use eth_random_addr() * to create a new one. */ -static void ks8851_init_mac(struct ks8851_net *ks) +static void ks8851_init_mac(struct ks8851_net *ks, struct device_node *np) { struct net_device *dev = ks->netdev; const u8 *mac_addr; - mac_addr = of_get_mac_address(ks->spidev->dev.of_node); + mac_addr = of_get_mac_address(np); if (!IS_ERR(mac_addr)) { ether_addr_copy(dev->dev_addr, mac_addr); ks8851_write_mac_addr(dev); @@ -442,48 +217,12 @@ static void ks8851_init_mac(struct ks8851_net *ks) } /** - * ks8851_rdfifo - read data from the receive fifo - * @ks: The device state. - * @buff: The buffer address - * @len: The length of the data to read - * - * Issue an RXQ FIFO read command and read the @len amount of data from - * the FIFO into the buffer specified by @buff. - */ -static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len) -{ - struct spi_transfer *xfer = ks->spi_xfer2; - struct spi_message *msg = &ks->spi_msg2; - u8 txb[1]; - int ret; - - netif_dbg(ks, rx_status, ks->netdev, - "%s: %d@%p\n", __func__, len, buff); - - /* set the operation we're issuing */ - txb[0] = KS_SPIOP_RXFIFO; - - xfer->tx_buf = txb; - xfer->rx_buf = NULL; - xfer->len = 1; - - xfer++; - xfer->rx_buf = buff; - xfer->tx_buf = NULL; - xfer->len = len; - - ret = spi_sync(ks->spidev, msg); - if (ret < 0) - netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__); -} - -/** * ks8851_dbg_dumpkkt - dump initial packet contents to debug * @ks: The device state * @rxpkt: The data for the received packet * * Dump the initial data from the packet to dev_dbg(). -*/ + */ static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt) { netdev_dbg(ks->netdev, @@ -494,6 +233,16 @@ static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt) } /** + * ks8851_rx_skb - receive skbuff + * @ks: The device state. + * @skb: The skbuff + */ +static void ks8851_rx_skb(struct ks8851_net *ks, struct sk_buff *skb) +{ + ks->rx_skb(ks, skb); +} + +/** * ks8851_rx_pkts - receive packets from the host * @ks: The device information. * @@ -507,10 +256,9 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) unsigned rxfc; unsigned rxlen; unsigned rxstat; - u32 rxh; u8 *rxpkt; - rxfc = ks8851_rdreg8(ks, KS_RXFC); + rxfc = (ks8851_rdreg16(ks, KS_RXFCTR) >> 8) & 0xff; netif_dbg(ks, rx_status, ks->netdev, "%s: %d packets\n", __func__, rxfc); @@ -526,9 +274,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) */ for (; rxfc != 0; rxfc--) { - rxh = ks8851_rdreg32(ks, KS_RXFHSR); - rxstat = rxh & 0xffff; - rxlen = (rxh >> 16) & 0xfff; + rxstat = ks8851_rdreg16(ks, KS_RXFHSR); + rxlen = ks8851_rdreg16(ks, KS_RXFHBCR) & RXFHBCR_CNT_MASK; netif_dbg(ks, rx_status, ks->netdev, "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen); @@ -557,13 +304,13 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) rxpkt = skb_put(skb, rxlen) - 8; - ks8851_rdfifo(ks, rxpkt, rxalign + 8); + ks->rdfifo(ks, rxpkt, rxalign + 8); if (netif_msg_pktdata(ks)) ks8851_dbg_dumpkkt(ks, rxpkt); skb->protocol = eth_type_trans(skb, ks->netdev); - netif_rx_ni(skb); + ks8851_rx_skb(ks, skb); ks->netdev->stats.rx_packets++; ks->netdev->stats.rx_bytes += rxlen; @@ -590,10 +337,11 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) static irqreturn_t ks8851_irq(int irq, void *_ks) { struct ks8851_net *ks = _ks; - unsigned status; unsigned handled = 0; + unsigned long flags; + unsigned int status; - mutex_lock(&ks->lock); + ks8851_lock(ks, &flags); status = ks8851_rdreg16(ks, KS_ISR); @@ -631,7 +379,7 @@ static irqreturn_t ks8851_irq(int irq, void *_ks) handled |= IRQ_RXI; if (status & IRQ_SPIBEI) { - dev_err(&ks->spidev->dev, "%s: spi bus error\n", __func__); + netdev_err(ks->netdev, "%s: spi bus error\n", __func__); handled |= IRQ_SPIBEI; } @@ -662,7 +410,7 @@ static irqreturn_t ks8851_irq(int irq, void *_ks) ks8851_wrreg16(ks, KS_RXCR1, rxc->rxcr1); } - mutex_unlock(&ks->lock); + ks8851_unlock(ks, &flags); if (status & IRQ_LCI) mii_check_link(&ks->mii); @@ -674,108 +422,13 @@ static irqreturn_t ks8851_irq(int irq, void *_ks) } /** - * calc_txlen - calculate size of message to send packet - * @len: Length of data - * - * Returns the size of the TXFIFO message needed to send - * this packet. - */ -static inline unsigned calc_txlen(unsigned len) -{ - return ALIGN(len + 4, 4); -} - -/** - * ks8851_wrpkt - write packet to TX FIFO - * @ks: The device state. - * @txp: The sk_buff to transmit. - * @irq: IRQ on completion of the packet. - * - * Send the @txp to the chip. This means creating the relevant packet header - * specifying the length of the packet and the other information the chip - * needs, such as IRQ on completion. Send the header and the packet data to - * the device. - */ -static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq) -{ - struct spi_transfer *xfer = ks->spi_xfer2; - struct spi_message *msg = &ks->spi_msg2; - unsigned fid = 0; - int ret; - - netif_dbg(ks, tx_queued, ks->netdev, "%s: skb %p, %d@%p, irq %d\n", - __func__, txp, txp->len, txp->data, irq); - - fid = ks->fid++; - fid &= TXFR_TXFID_MASK; - - if (irq) - fid |= TXFR_TXIC; /* irq on completion */ - - /* start header at txb[1] to align txw entries */ - ks->txh.txb[1] = KS_SPIOP_TXFIFO; - ks->txh.txw[1] = cpu_to_le16(fid); - ks->txh.txw[2] = cpu_to_le16(txp->len); - - xfer->tx_buf = &ks->txh.txb[1]; - xfer->rx_buf = NULL; - xfer->len = 5; - - xfer++; - xfer->tx_buf = txp->data; - xfer->rx_buf = NULL; - xfer->len = ALIGN(txp->len, 4); - - ret = spi_sync(ks->spidev, msg); - if (ret < 0) - netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__); -} - -/** - * ks8851_done_tx - update and then free skbuff after transmitting + * ks8851_flush_tx_work - flush outstanding TX work * @ks: The device state - * @txb: The buffer transmitted - */ -static void ks8851_done_tx(struct ks8851_net *ks, struct sk_buff *txb) -{ - struct net_device *dev = ks->netdev; - - dev->stats.tx_bytes += txb->len; - dev->stats.tx_packets++; - - dev_kfree_skb(txb); -} - -/** - * ks8851_tx_work - process tx packet(s) - * @work: The work strucutre what was scheduled. - * - * This is called when a number of packets have been scheduled for - * transmission and need to be sent to the device. */ -static void ks8851_tx_work(struct work_struct *work) +static void ks8851_flush_tx_work(struct ks8851_net *ks) { - struct ks8851_net *ks = container_of(work, struct ks8851_net, tx_work); - struct sk_buff *txb; - bool last = skb_queue_empty(&ks->txq); - - mutex_lock(&ks->lock); - - while (!last) { - txb = skb_dequeue(&ks->txq); - last = skb_queue_empty(&ks->txq); - - if (txb != NULL) { - ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); - ks8851_wrpkt(ks, txb, last); - ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); - ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE); - - ks8851_done_tx(ks, txb); - } - } - - mutex_unlock(&ks->lock); + if (ks->flush_tx_work) + ks->flush_tx_work(ks); } /** @@ -788,6 +441,7 @@ static void ks8851_tx_work(struct work_struct *work) static int ks8851_net_open(struct net_device *dev) { struct ks8851_net *ks = netdev_priv(dev); + unsigned long flags; int ret; ret = request_threaded_irq(dev->irq, NULL, ks8851_irq, @@ -800,7 +454,7 @@ static int ks8851_net_open(struct net_device *dev) /* lock the card, even if we may not actually be doing anything * else at the moment */ - mutex_lock(&ks->lock); + ks8851_lock(ks, &flags); netif_dbg(ks, ifup, ks->netdev, "opening\n"); @@ -844,23 +498,14 @@ static int ks8851_net_open(struct net_device *dev) ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); /* clear then enable interrupts */ - -#define STD_IRQ (IRQ_LCI | /* Link Change */ \ - IRQ_TXI | /* TX done */ \ - IRQ_RXI | /* RX done */ \ - IRQ_SPIBEI | /* SPI bus error */ \ - IRQ_TXPSI | /* TX process stop */ \ - IRQ_RXPSI) /* RX process stop */ - - ks->rc_ier = STD_IRQ; - ks8851_wrreg16(ks, KS_ISR, STD_IRQ); - ks8851_wrreg16(ks, KS_IER, STD_IRQ); + ks8851_wrreg16(ks, KS_ISR, ks->rc_ier); + ks8851_wrreg16(ks, KS_IER, ks->rc_ier); netif_start_queue(ks->netdev); netif_dbg(ks, ifup, ks->netdev, "network device up\n"); - mutex_unlock(&ks->lock); + ks8851_unlock(ks, &flags); mii_check_link(&ks->mii); return 0; } @@ -876,22 +521,23 @@ static int ks8851_net_open(struct net_device *dev) static int ks8851_net_stop(struct net_device *dev) { struct ks8851_net *ks = netdev_priv(dev); + unsigned long flags; netif_info(ks, ifdown, dev, "shutting down\n"); netif_stop_queue(dev); - mutex_lock(&ks->lock); + ks8851_lock(ks, &flags); /* turn off the IRQs and ack any outstanding */ ks8851_wrreg16(ks, KS_IER, 0x0000); ks8851_wrreg16(ks, KS_ISR, 0xffff); - mutex_unlock(&ks->lock); + ks8851_unlock(ks, &flags); /* stop any outstanding work */ - flush_work(&ks->tx_work); + ks8851_flush_tx_work(ks); flush_work(&ks->rxctrl_work); - mutex_lock(&ks->lock); + ks8851_lock(ks, &flags); /* shutdown RX process */ ks8851_wrreg16(ks, KS_RXCR1, 0x0000); @@ -900,7 +546,7 @@ static int ks8851_net_stop(struct net_device *dev) /* set powermode to soft power down to save power */ ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN); - mutex_unlock(&ks->lock); + ks8851_unlock(ks, &flags); /* ensure any queued tx buffers are dumped */ while (!skb_queue_empty(&ks->txq)) { @@ -934,26 +580,8 @@ static netdev_tx_t ks8851_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ks8851_net *ks = netdev_priv(dev); - unsigned needed = calc_txlen(skb->len); - netdev_tx_t ret = NETDEV_TX_OK; - - netif_dbg(ks, tx_queued, ks->netdev, - "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data); - spin_lock(&ks->statelock); - - if (needed > ks->tx_space) { - netif_stop_queue(dev); - ret = NETDEV_TX_BUSY; - } else { - ks->tx_space -= needed; - skb_queue_tail(&ks->txq, skb); - } - - spin_unlock(&ks->statelock); - schedule_work(&ks->tx_work); - - return ret; + return ks->start_xmit(skb, dev); } /** @@ -972,13 +600,14 @@ static netdev_tx_t ks8851_start_xmit(struct sk_buff *skb, static void ks8851_rxctrl_work(struct work_struct *work) { struct ks8851_net *ks = container_of(work, struct ks8851_net, rxctrl_work); + unsigned long flags; - mutex_lock(&ks->lock); + ks8851_lock(ks, &flags); /* need to shutdown RXQ before modifying filter parameters */ ks8851_wrreg16(ks, KS_RXCR1, 0x00); - mutex_unlock(&ks->lock); + ks8851_unlock(ks, &flags); } static void ks8851_set_rx_mode(struct net_device *dev) @@ -1160,11 +789,6 @@ static void ks8851_eeprom_regwrite(struct eeprom_93cx6 *ee) */ static int ks8851_eeprom_claim(struct ks8851_net *ks) { - if (!(ks->rc_ccr & CCR_EEPROM)) - return -ENOENT; - - mutex_lock(&ks->lock); - /* start with clock low, cs high */ ks8851_wrreg16(ks, KS_EEPCR, EEPCR_EESA | EEPCR_EECS); return 0; @@ -1181,7 +805,6 @@ static void ks8851_eeprom_release(struct ks8851_net *ks) unsigned val = ks8851_rdreg16(ks, KS_EEPCR); ks8851_wrreg16(ks, KS_EEPCR, val & ~EEPCR_EESA); - mutex_unlock(&ks->lock); } #define KS_EEPROM_MAGIC (0x00008851) @@ -1191,6 +814,7 @@ static int ks8851_set_eeprom(struct net_device *dev, { struct ks8851_net *ks = netdev_priv(dev); int offset = ee->offset; + unsigned long flags; int len = ee->len; u16 tmp; @@ -1201,9 +825,13 @@ static int ks8851_set_eeprom(struct net_device *dev, if (ee->magic != KS_EEPROM_MAGIC) return -EINVAL; - if (ks8851_eeprom_claim(ks)) + if (!(ks->rc_ccr & CCR_EEPROM)) return -ENOENT; + ks8851_lock(ks, &flags); + + ks8851_eeprom_claim(ks); + eeprom_93cx6_wren(&ks->eeprom, true); /* ethtool currently only supports writing bytes, which means @@ -1223,6 +851,7 @@ static int ks8851_set_eeprom(struct net_device *dev, eeprom_93cx6_wren(&ks->eeprom, false); ks8851_eeprom_release(ks); + ks8851_unlock(ks, &flags); return 0; } @@ -1232,19 +861,25 @@ static int ks8851_get_eeprom(struct net_device *dev, { struct ks8851_net *ks = netdev_priv(dev); int offset = ee->offset; + unsigned long flags; int len = ee->len; /* must be 2 byte aligned */ if (len & 1 || offset & 1) return -EINVAL; - if (ks8851_eeprom_claim(ks)) + if (!(ks->rc_ccr & CCR_EEPROM)) return -ENOENT; + ks8851_lock(ks, &flags); + + ks8851_eeprom_claim(ks); + ee->magic = KS_EEPROM_MAGIC; eeprom_93cx6_multiread(&ks->eeprom, offset/2, (__le16 *)data, len/2); ks8851_eeprom_release(ks); + ks8851_unlock(ks, &flags); return 0; } @@ -1318,6 +953,7 @@ static int ks8851_phy_reg(int reg) static int ks8851_phy_read(struct net_device *dev, int phy_addr, int reg) { struct ks8851_net *ks = netdev_priv(dev); + unsigned long flags; int ksreg; int result; @@ -1325,9 +961,9 @@ static int ks8851_phy_read(struct net_device *dev, int phy_addr, int reg) if (!ksreg) return 0x0; /* no error return allowed, so use zero */ - mutex_lock(&ks->lock); + ks8851_lock(ks, &flags); result = ks8851_rdreg16(ks, ksreg); - mutex_unlock(&ks->lock); + ks8851_unlock(ks, &flags); return result; } @@ -1336,13 +972,14 @@ static void ks8851_phy_write(struct net_device *dev, int phy, int reg, int value) { struct ks8851_net *ks = netdev_priv(dev); + unsigned long flags; int ksreg; ksreg = ks8851_phy_reg(reg); if (ksreg) { - mutex_lock(&ks->lock); + ks8851_lock(ks, &flags); ks8851_wrreg16(ks, ksreg, value); - mutex_unlock(&ks->lock); + ks8851_unlock(ks, &flags); } } @@ -1382,7 +1019,7 @@ static int ks8851_read_selftest(struct ks8851_net *ks) #ifdef CONFIG_PM_SLEEP -static int ks8851_suspend(struct device *dev) +int ks8851_suspend(struct device *dev) { struct ks8851_net *ks = dev_get_drvdata(dev); struct net_device *netdev = ks->netdev; @@ -1395,7 +1032,7 @@ static int ks8851_suspend(struct device *dev) return 0; } -static int ks8851_resume(struct device *dev) +int ks8851_resume(struct device *dev) { struct ks8851_net *ks = dev_get_drvdata(dev); struct net_device *netdev = ks->netdev; @@ -1409,46 +1046,32 @@ static int ks8851_resume(struct device *dev) } #endif -static SIMPLE_DEV_PM_OPS(ks8851_pm_ops, ks8851_suspend, ks8851_resume); - -static int ks8851_probe(struct spi_device *spi) +int ks8851_probe_common(struct net_device *netdev, struct device *dev, + int msg_en) { - struct net_device *ndev; - struct ks8851_net *ks; - int ret; + struct ks8851_net *ks = netdev_priv(netdev); unsigned cider; int gpio; + int ret; - ndev = alloc_etherdev(sizeof(struct ks8851_net)); - if (!ndev) - return -ENOMEM; - - spi->bits_per_word = 8; - - ks = netdev_priv(ndev); - - ks->netdev = ndev; - ks->spidev = spi; + ks->netdev = netdev; ks->tx_space = 6144; - gpio = of_get_named_gpio_flags(spi->dev.of_node, "reset-gpios", - 0, NULL); - if (gpio == -EPROBE_DEFER) { - ret = gpio; - goto err_gpio; - } + gpio = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0, NULL); + if (gpio == -EPROBE_DEFER) + return gpio; ks->gpio = gpio; if (gpio_is_valid(gpio)) { - ret = devm_gpio_request_one(&spi->dev, gpio, + ret = devm_gpio_request_one(dev, gpio, GPIOF_OUT_INIT_LOW, "ks8851_rst_n"); if (ret) { - dev_err(&spi->dev, "reset gpio request failed\n"); - goto err_gpio; + dev_err(dev, "reset gpio request failed\n"); + return ret; } } - ks->vdd_io = devm_regulator_get(&spi->dev, "vdd-io"); + ks->vdd_io = devm_regulator_get(dev, "vdd-io"); if (IS_ERR(ks->vdd_io)) { ret = PTR_ERR(ks->vdd_io); goto err_reg_io; @@ -1456,12 +1079,11 @@ static int ks8851_probe(struct spi_device *spi) ret = regulator_enable(ks->vdd_io); if (ret) { - dev_err(&spi->dev, "regulator vdd_io enable fail: %d\n", - ret); + dev_err(dev, "regulator vdd_io enable fail: %d\n", ret); goto err_reg_io; } - ks->vdd_reg = devm_regulator_get(&spi->dev, "vdd"); + ks->vdd_reg = devm_regulator_get(dev, "vdd"); if (IS_ERR(ks->vdd_reg)) { ret = PTR_ERR(ks->vdd_reg); goto err_reg; @@ -1469,8 +1091,7 @@ static int ks8851_probe(struct spi_device *spi) ret = regulator_enable(ks->vdd_reg); if (ret) { - dev_err(&spi->dev, "regulator vdd enable fail: %d\n", - ret); + dev_err(dev, "regulator vdd enable fail: %d\n", ret); goto err_reg; } @@ -1479,54 +1100,41 @@ static int ks8851_probe(struct spi_device *spi) gpio_set_value(gpio, 1); } - mutex_init(&ks->lock); spin_lock_init(&ks->statelock); - INIT_WORK(&ks->tx_work, ks8851_tx_work); INIT_WORK(&ks->rxctrl_work, ks8851_rxctrl_work); - /* initialise pre-made spi transfer messages */ - - spi_message_init(&ks->spi_msg1); - spi_message_add_tail(&ks->spi_xfer1, &ks->spi_msg1); - - spi_message_init(&ks->spi_msg2); - spi_message_add_tail(&ks->spi_xfer2[0], &ks->spi_msg2); - spi_message_add_tail(&ks->spi_xfer2[1], &ks->spi_msg2); - /* setup EEPROM state */ - ks->eeprom.data = ks; ks->eeprom.width = PCI_EEPROM_WIDTH_93C46; ks->eeprom.register_read = ks8851_eeprom_regread; ks->eeprom.register_write = ks8851_eeprom_regwrite; /* setup mii state */ - ks->mii.dev = ndev; + ks->mii.dev = netdev; ks->mii.phy_id = 1, ks->mii.phy_id_mask = 1; ks->mii.reg_num_mask = 0xf; ks->mii.mdio_read = ks8851_phy_read; ks->mii.mdio_write = ks8851_phy_write; - dev_info(&spi->dev, "message enable is %d\n", msg_enable); + dev_info(dev, "message enable is %d\n", msg_en); /* set the default message enable */ - ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV | - NETIF_MSG_PROBE | - NETIF_MSG_LINK)); + ks->msg_enable = netif_msg_init(msg_en, NETIF_MSG_DRV | + NETIF_MSG_PROBE | + NETIF_MSG_LINK); skb_queue_head_init(&ks->txq); - ndev->ethtool_ops = &ks8851_ethtool_ops; - SET_NETDEV_DEV(ndev, &spi->dev); + netdev->ethtool_ops = &ks8851_ethtool_ops; + SET_NETDEV_DEV(netdev, dev); - spi_set_drvdata(spi, ks); + dev_set_drvdata(dev, ks); netif_carrier_off(ks->netdev); - ndev->if_port = IF_PORT_100BASET; - ndev->netdev_ops = &ks8851_netdev_ops; - ndev->irq = spi->irq; + netdev->if_port = IF_PORT_100BASET; + netdev->netdev_ops = &ks8851_netdev_ops; /* issue a global soft reset to reset the device. */ ks8851_soft_reset(ks, GRR_GSR); @@ -1534,7 +1142,7 @@ static int ks8851_probe(struct spi_device *spi) /* simple check for a valid chip being connected to the bus */ cider = ks8851_rdreg16(ks, KS_CIDER); if ((cider & ~CIDER_REV_MASK) != CIDER_ID) { - dev_err(&spi->dev, "failed to read device ID\n"); + dev_err(dev, "failed to read device ID\n"); ret = -ENODEV; goto err_id; } @@ -1543,16 +1151,16 @@ static int ks8851_probe(struct spi_device *spi) ks->rc_ccr = ks8851_rdreg16(ks, KS_CCR); ks8851_read_selftest(ks); - ks8851_init_mac(ks); + ks8851_init_mac(ks, dev->of_node); - ret = register_netdev(ndev); + ret = register_netdev(netdev); if (ret) { - dev_err(&spi->dev, "failed to register network device\n"); + dev_err(dev, "failed to register network device\n"); goto err_netdev; } - netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n", - CIDER_REV_GET(cider), ndev->dev_addr, ndev->irq, + netdev_info(netdev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n", + CIDER_REV_GET(cider), netdev->dev_addr, netdev->irq, ks->rc_ccr & CCR_EEPROM ? "has" : "no"); return 0; @@ -1565,49 +1173,21 @@ err_id: err_reg: regulator_disable(ks->vdd_io); err_reg_io: -err_gpio: - free_netdev(ndev); return ret; } -static int ks8851_remove(struct spi_device *spi) +int ks8851_remove_common(struct device *dev) { - struct ks8851_net *priv = spi_get_drvdata(spi); + struct ks8851_net *priv = dev_get_drvdata(dev); if (netif_msg_drv(priv)) - dev_info(&spi->dev, "remove\n"); + dev_info(dev, "remove\n"); unregister_netdev(priv->netdev); if (gpio_is_valid(priv->gpio)) gpio_set_value(priv->gpio, 0); regulator_disable(priv->vdd_reg); regulator_disable(priv->vdd_io); - free_netdev(priv->netdev); return 0; } - -static const struct of_device_id ks8851_match_table[] = { - { .compatible = "micrel,ks8851" }, - { } -}; -MODULE_DEVICE_TABLE(of, ks8851_match_table); - -static struct spi_driver ks8851_driver = { - .driver = { - .name = "ks8851", - .of_match_table = ks8851_match_table, - .pm = &ks8851_pm_ops, - }, - .probe = ks8851_probe, - .remove = ks8851_remove, -}; -module_spi_driver(ks8851_driver); - -MODULE_DESCRIPTION("KS8851 Network driver"); -MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); -MODULE_LICENSE("GPL"); - -module_param_named(message, msg_enable, int, 0); -MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)"); -MODULE_ALIAS("spi:ks8851"); diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c deleted file mode 100644 index 45cc840d8e2e..000000000000 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ /dev/null @@ -1,1393 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/** - * drivers/net/ethernet/micrel/ks8851_mll.c - * Copyright (c) 2009 Micrel Inc. - */ - -/* Supports: - * KS8851 16bit MLL chip from Micrel Inc. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/interrupt.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/ethtool.h> -#include <linux/cache.h> -#include <linux/crc32.h> -#include <linux/crc32poly.h> -#include <linux/mii.h> -#include <linux/platform_device.h> -#include <linux/delay.h> -#include <linux/slab.h> -#include <linux/ks8851_mll.h> -#include <linux/of.h> -#include <linux/of_device.h> -#include <linux/of_net.h> - -#include "ks8851.h" - -#define DRV_NAME "ks8851_mll" - -static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 }; -#define MAX_RECV_FRAMES 255 -#define MAX_BUF_SIZE 2048 -#define TX_BUF_SIZE 2000 -#define RX_BUF_SIZE 2000 - -#define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \ - RXCR1_RXMAFMA | RXCR1_RXPAFMA) -#define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE) - -#define ENUM_BUS_NONE 0 -#define ENUM_BUS_8BIT 1 -#define ENUM_BUS_16BIT 2 -#define ENUM_BUS_32BIT 3 - -#define MAX_MCAST_LST 32 -#define HW_MCAST_SIZE 8 - -/** - * union ks_tx_hdr - tx header data - * @txb: The header as bytes - * @txw: The header as 16bit, little-endian words - * - * A dual representation of the tx header data to allow - * access to individual bytes, and to allow 16bit accesses - * with 16bit alignment. - */ -union ks_tx_hdr { - u8 txb[4]; - __le16 txw[2]; -}; - -/** - * struct ks_net - KS8851 driver private data - * @net_device : The network device we're bound to - * @hw_addr : start address of data register. - * @hw_addr_cmd : start address of command register. - * @txh : temporaly buffer to save status/length. - * @lock : Lock to ensure that the device is not accessed when busy. - * @pdev : Pointer to platform device. - * @mii : The MII state information for the mii calls. - * @frame_head_info : frame header information for multi-pkt rx. - * @statelock : Lock on this structure for tx list. - * @msg_enable : The message flags controlling driver output (see ethtool). - * @frame_cnt : number of frames received. - * @bus_width : i/o bus width. - * @rc_rxqcr : Cached copy of KS_RXQCR. - * @rc_txcr : Cached copy of KS_TXCR. - * @rc_ier : Cached copy of KS_IER. - * @sharedbus : Multipex(addr and data bus) mode indicator. - * @cmd_reg_cache : command register cached. - * @cmd_reg_cache_int : command register cached. Used in the irq handler. - * @promiscuous : promiscuous mode indicator. - * @all_mcast : mutlicast indicator. - * @mcast_lst_size : size of multicast list. - * @mcast_lst : multicast list. - * @mcast_bits : multicast enabed. - * @mac_addr : MAC address assigned to this device. - * @fid : frame id. - * @extra_byte : number of extra byte prepended rx pkt. - * @enabled : indicator this device works. - * - * The @lock ensures that the chip is protected when certain operations are - * in progress. When the read or write packet transfer is in progress, most - * of the chip registers are not accessible until the transfer is finished and - * the DMA has been de-asserted. - * - * The @statelock is used to protect information in the structure which may - * need to be accessed via several sources, such as the network driver layer - * or one of the work queues. - * - */ - -/* Receive multiplex framer header info */ -struct type_frame_head { - u16 sts; /* Frame status */ - u16 len; /* Byte count */ -}; - -struct ks_net { - struct net_device *netdev; - void __iomem *hw_addr; - void __iomem *hw_addr_cmd; - union ks_tx_hdr txh ____cacheline_aligned; - struct mutex lock; /* spinlock to be interrupt safe */ - struct platform_device *pdev; - struct mii_if_info mii; - struct type_frame_head *frame_head_info; - spinlock_t statelock; - u32 msg_enable; - u32 frame_cnt; - int bus_width; - - u16 rc_rxqcr; - u16 rc_txcr; - u16 rc_ier; - u16 sharedbus; - u16 cmd_reg_cache; - u16 cmd_reg_cache_int; - u16 promiscuous; - u16 all_mcast; - u16 mcast_lst_size; - u8 mcast_lst[MAX_MCAST_LST][ETH_ALEN]; - u8 mcast_bits[HW_MCAST_SIZE]; - u8 mac_addr[6]; - u8 fid; - u8 extra_byte; - u8 enabled; -}; - -static int msg_enable; - -#define BE3 0x8000 /* Byte Enable 3 */ -#define BE2 0x4000 /* Byte Enable 2 */ -#define BE1 0x2000 /* Byte Enable 1 */ -#define BE0 0x1000 /* Byte Enable 0 */ - -/* register read/write calls. - * - * All these calls issue transactions to access the chip's registers. They - * all require that the necessary lock is held to prevent accesses when the - * chip is busy transferring packet data (RX/TX FIFO accesses). - */ - -/** - * ks_check_endian - Check whether endianness of the bus is correct - * @ks : The chip information - * - * The KS8851-16MLL EESK pin allows selecting the endianness of the 16bit - * bus. To maintain optimum performance, the bus endianness should be set - * such that it matches the endianness of the CPU. - */ - -static int ks_check_endian(struct ks_net *ks) -{ - u16 cider; - - /* - * Read CIDER register first, however read it the "wrong" way around. - * If the endian strap on the KS8851-16MLL in incorrect and the chip - * is operating in different endianness than the CPU, then the meaning - * of BE[3:0] byte-enable bits is also swapped such that: - * BE[3,2,1,0] becomes BE[1,0,3,2] - * - * Luckily for us, the byte-enable bits are the top four MSbits of - * the address register and the CIDER register is at offset 0xc0. - * Hence, by reading address 0xc0c0, which is not impacted by endian - * swapping, we assert either BE[3:2] or BE[1:0] while reading the - * CIDER register. - * - * If the bus configuration is correct, reading 0xc0c0 asserts - * BE[3:2] and this read returns 0x0000, because to read register - * with bottom two LSbits of address set to 0, BE[1:0] must be - * asserted. - * - * If the bus configuration is NOT correct, reading 0xc0c0 asserts - * BE[1:0] and this read returns non-zero 0x8872 value. - */ - iowrite16(BE3 | BE2 | KS_CIDER, ks->hw_addr_cmd); - cider = ioread16(ks->hw_addr); - if (!cider) - return 0; - - netdev_err(ks->netdev, "incorrect EESK endian strap setting\n"); - - return -EINVAL; -} - -/** - * ks_rdreg16 - read 16 bit register from device - * @ks : The chip information - * @offset: The register address - * - * Read a 16bit register from the chip, returning the result - */ - -static u16 ks_rdreg16(struct ks_net *ks, int offset) -{ - ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02)); - iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd); - return ioread16(ks->hw_addr); -} - -/** - * ks_wrreg16 - write 16bit register value to chip - * @ks: The chip information - * @offset: The register address - * @value: The value to write - * - */ - -static void ks_wrreg16(struct ks_net *ks, int offset, u16 value) -{ - ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02)); - iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd); - iowrite16(value, ks->hw_addr); -} - -/** - * ks_inblk - read a block of data from QMU. This is called after sudo DMA mode enabled. - * @ks: The chip state - * @wptr: buffer address to save data - * @len: length in byte to read - * - */ -static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len) -{ - len >>= 1; - while (len--) - *wptr++ = (u16)ioread16(ks->hw_addr); -} - -/** - * ks_outblk - write data to QMU. This is called after sudo DMA mode enabled. - * @ks: The chip information - * @wptr: buffer address - * @len: length in byte to write - * - */ -static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len) -{ - len >>= 1; - while (len--) - iowrite16(*wptr++, ks->hw_addr); -} - -static void ks_disable_int(struct ks_net *ks) -{ - ks_wrreg16(ks, KS_IER, 0x0000); -} /* ks_disable_int */ - -static void ks_enable_int(struct ks_net *ks) -{ - ks_wrreg16(ks, KS_IER, ks->rc_ier); -} /* ks_enable_int */ - -/** - * ks_tx_fifo_space - return the available hardware buffer size. - * @ks: The chip information - * - */ -static inline u16 ks_tx_fifo_space(struct ks_net *ks) -{ - return ks_rdreg16(ks, KS_TXMIR) & 0x1fff; -} - -/** - * ks_save_cmd_reg - save the command register from the cache. - * @ks: The chip information - * - */ -static inline void ks_save_cmd_reg(struct ks_net *ks) -{ - /*ks8851 MLL has a bug to read back the command register. - * So rely on software to save the content of command register. - */ - ks->cmd_reg_cache_int = ks->cmd_reg_cache; -} - -/** - * ks_restore_cmd_reg - restore the command register from the cache and - * write to hardware register. - * @ks: The chip information - * - */ -static inline void ks_restore_cmd_reg(struct ks_net *ks) -{ - ks->cmd_reg_cache = ks->cmd_reg_cache_int; - iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd); -} - -/** - * ks_set_powermode - set power mode of the device - * @ks: The chip information - * @pwrmode: The power mode value to write to KS_PMECR. - * - * Change the power mode of the chip. - */ -static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode) -{ - unsigned pmecr; - - netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode); - - ks_rdreg16(ks, KS_GRR); - pmecr = ks_rdreg16(ks, KS_PMECR); - pmecr &= ~PMECR_PM_MASK; - pmecr |= pwrmode; - - ks_wrreg16(ks, KS_PMECR, pmecr); -} - -/** - * ks_read_config - read chip configuration of bus width. - * @ks: The chip information - * - */ -static void ks_read_config(struct ks_net *ks) -{ - u16 reg_data = 0; - - /* Regardless of bus width, 8 bit read should always work.*/ - reg_data = ks_rdreg16(ks, KS_CCR); - - /* addr/data bus are multiplexed */ - ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED; - - /* There are garbage data when reading data from QMU, - depending on bus-width. - */ - - if (reg_data & CCR_8BIT) { - ks->bus_width = ENUM_BUS_8BIT; - ks->extra_byte = 1; - } else if (reg_data & CCR_16BIT) { - ks->bus_width = ENUM_BUS_16BIT; - ks->extra_byte = 2; - } else { - ks->bus_width = ENUM_BUS_32BIT; - ks->extra_byte = 4; - } -} - -/** - * ks_soft_reset - issue one of the soft reset to the device - * @ks: The device state. - * @op: The bit(s) to set in the GRR - * - * Issue the relevant soft-reset command to the device's GRR register - * specified by @op. - * - * Note, the delays are in there as a caution to ensure that the reset - * has time to take effect and then complete. Since the datasheet does - * not currently specify the exact sequence, we have chosen something - * that seems to work with our device. - */ -static void ks_soft_reset(struct ks_net *ks, unsigned op) -{ - /* Disable interrupt first */ - ks_wrreg16(ks, KS_IER, 0x0000); - ks_wrreg16(ks, KS_GRR, op); - mdelay(10); /* wait a short time to effect reset */ - ks_wrreg16(ks, KS_GRR, 0); - mdelay(1); /* wait for condition to clear */ -} - - -static void ks_enable_qmu(struct ks_net *ks) -{ - u16 w; - - w = ks_rdreg16(ks, KS_TXCR); - /* Enables QMU Transmit (TXCR). */ - ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE); - - /* - * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame - * Enable - */ - - w = ks_rdreg16(ks, KS_RXQCR); - ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE); - - /* Enables QMU Receive (RXCR1). */ - w = ks_rdreg16(ks, KS_RXCR1); - ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE); - ks->enabled = true; -} /* ks_enable_qmu */ - -static void ks_disable_qmu(struct ks_net *ks) -{ - u16 w; - - w = ks_rdreg16(ks, KS_TXCR); - - /* Disables QMU Transmit (TXCR). */ - w &= ~TXCR_TXE; - ks_wrreg16(ks, KS_TXCR, w); - - /* Disables QMU Receive (RXCR1). */ - w = ks_rdreg16(ks, KS_RXCR1); - w &= ~RXCR1_RXE ; - ks_wrreg16(ks, KS_RXCR1, w); - - ks->enabled = false; - -} /* ks_disable_qmu */ - -/** - * ks_read_qmu - read 1 pkt data from the QMU. - * @ks: The chip information - * @buf: buffer address to save 1 pkt - * @len: Pkt length - * Here is the sequence to read 1 pkt: - * 1. set sudo DMA mode - * 2. read prepend data - * 3. read pkt data - * 4. reset sudo DMA Mode - */ -static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len) -{ - u32 r = ks->extra_byte & 0x1 ; - u32 w = ks->extra_byte - r; - - /* 1. set sudo DMA mode */ - ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI); - ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); - - /* 2. read prepend data */ - /** - * read 4 + extra bytes and discard them. - * extra bytes for dummy, 2 for status, 2 for len - */ - - /* use likely(r) for 8 bit access for performance */ - if (unlikely(r)) - ioread8(ks->hw_addr); - ks_inblk(ks, buf, w + 2 + 2); - - /* 3. read pkt data */ - ks_inblk(ks, buf, ALIGN(len, 4)); - - /* 4. reset sudo DMA Mode */ - ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); -} - -/** - * ks_rcv - read multiple pkts data from the QMU. - * @ks: The chip information - * @netdev: The network device being opened. - * - * Read all of header information before reading pkt content. - * It is not allowed only port of pkts in QMU after issuing - * interrupt ack. - */ -static void ks_rcv(struct ks_net *ks, struct net_device *netdev) -{ - u32 i; - struct type_frame_head *frame_hdr = ks->frame_head_info; - struct sk_buff *skb; - - ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8; - - /* read all header information */ - for (i = 0; i < ks->frame_cnt; i++) { - /* Checking Received packet status */ - frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR); - /* Get packet len from hardware */ - frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR); - frame_hdr++; - } - - frame_hdr = ks->frame_head_info; - while (ks->frame_cnt--) { - if (unlikely(!(frame_hdr->sts & RXFSHR_RXFV) || - frame_hdr->len >= RX_BUF_SIZE || - frame_hdr->len <= 0)) { - - /* discard an invalid packet */ - ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF)); - netdev->stats.rx_dropped++; - if (!(frame_hdr->sts & RXFSHR_RXFV)) - netdev->stats.rx_frame_errors++; - else - netdev->stats.rx_length_errors++; - frame_hdr++; - continue; - } - - skb = netdev_alloc_skb(netdev, frame_hdr->len + 16); - if (likely(skb)) { - skb_reserve(skb, 2); - /* read data block including CRC 4 bytes */ - ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len); - skb_put(skb, frame_hdr->len - 4); - skb->protocol = eth_type_trans(skb, netdev); - netif_rx(skb); - /* exclude CRC size */ - netdev->stats.rx_bytes += frame_hdr->len - 4; - netdev->stats.rx_packets++; - } else { - ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF)); - netdev->stats.rx_dropped++; - } - frame_hdr++; - } -} - -/** - * ks_update_link_status - link status update. - * @netdev: The network device being opened. - * @ks: The chip information - * - */ - -static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks) -{ - /* check the status of the link */ - u32 link_up_status; - if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) { - netif_carrier_on(netdev); - link_up_status = true; - } else { - netif_carrier_off(netdev); - link_up_status = false; - } - netif_dbg(ks, link, ks->netdev, - "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN"); -} - -/** - * ks_irq - device interrupt handler - * @irq: Interrupt number passed from the IRQ handler. - * @pw: The private word passed to register_irq(), our struct ks_net. - * - * This is the handler invoked to find out what happened - * - * Read the interrupt status, work out what needs to be done and then clear - * any of the interrupts that are not needed. - */ - -static irqreturn_t ks_irq(int irq, void *pw) -{ - struct net_device *netdev = pw; - struct ks_net *ks = netdev_priv(netdev); - unsigned long flags; - u16 status; - - spin_lock_irqsave(&ks->statelock, flags); - /*this should be the first in IRQ handler */ - ks_save_cmd_reg(ks); - - status = ks_rdreg16(ks, KS_ISR); - if (unlikely(!status)) { - ks_restore_cmd_reg(ks); - spin_unlock_irqrestore(&ks->statelock, flags); - return IRQ_NONE; - } - - ks_wrreg16(ks, KS_ISR, status); - - if (likely(status & IRQ_RXI)) - ks_rcv(ks, netdev); - - if (unlikely(status & IRQ_LCI)) - ks_update_link_status(netdev, ks); - - if (unlikely(status & IRQ_TXI)) - netif_wake_queue(netdev); - - if (unlikely(status & IRQ_LDI)) { - - u16 pmecr = ks_rdreg16(ks, KS_PMECR); - pmecr &= ~PMECR_WKEVT_MASK; - ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK); - } - - if (unlikely(status & IRQ_RXOI)) - ks->netdev->stats.rx_over_errors++; - /* this should be the last in IRQ handler*/ - ks_restore_cmd_reg(ks); - spin_unlock_irqrestore(&ks->statelock, flags); - return IRQ_HANDLED; -} - - -/** - * ks_net_open - open network device - * @netdev: The network device being opened. - * - * Called when the network device is marked active, such as a user executing - * 'ifconfig up' on the device. - */ -static int ks_net_open(struct net_device *netdev) -{ - struct ks_net *ks = netdev_priv(netdev); - int err; - -#define KS_INT_FLAGS IRQF_TRIGGER_LOW - /* lock the card, even if we may not actually do anything - * else at the moment. - */ - - netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__); - - /* reset the HW */ - err = request_irq(netdev->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev); - - if (err) { - pr_err("Failed to request IRQ: %d: %d\n", netdev->irq, err); - return err; - } - - /* wake up powermode to normal mode */ - ks_set_powermode(ks, PMECR_PM_NORMAL); - mdelay(1); /* wait for normal mode to take effect */ - - ks_wrreg16(ks, KS_ISR, 0xffff); - ks_enable_int(ks); - ks_enable_qmu(ks); - netif_start_queue(ks->netdev); - - netif_dbg(ks, ifup, ks->netdev, "network device up\n"); - - return 0; -} - -/** - * ks_net_stop - close network device - * @netdev: The device being closed. - * - * Called to close down a network device which has been active. Cancell any - * work, shutdown the RX and TX process and then place the chip into a low - * power state whilst it is not being used. - */ -static int ks_net_stop(struct net_device *netdev) -{ - struct ks_net *ks = netdev_priv(netdev); - - netif_info(ks, ifdown, netdev, "shutting down\n"); - - netif_stop_queue(netdev); - - mutex_lock(&ks->lock); - - /* turn off the IRQs and ack any outstanding */ - ks_wrreg16(ks, KS_IER, 0x0000); - ks_wrreg16(ks, KS_ISR, 0xffff); - - /* shutdown RX/TX QMU */ - ks_disable_qmu(ks); - ks_disable_int(ks); - - /* set powermode to soft power down to save power */ - ks_set_powermode(ks, PMECR_PM_SOFTDOWN); - free_irq(netdev->irq, netdev); - mutex_unlock(&ks->lock); - return 0; -} - - -/** - * ks_write_qmu - write 1 pkt data to the QMU. - * @ks: The chip information - * @pdata: buffer address to save 1 pkt - * @len: Pkt length in byte - * Here is the sequence to write 1 pkt: - * 1. set sudo DMA mode - * 2. write status/length - * 3. write pkt data - * 4. reset sudo DMA Mode - * 5. reset sudo DMA mode - * 6. Wait until pkt is out - */ -static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len) -{ - /* start header at txb[0] to align txw entries */ - ks->txh.txw[0] = 0; - ks->txh.txw[1] = cpu_to_le16(len); - - /* 1. set sudo-DMA mode */ - ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); - /* 2. write status/lenth info */ - ks_outblk(ks, ks->txh.txw, 4); - /* 3. write pkt data */ - ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4)); - /* 4. reset sudo-DMA mode */ - ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); - /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */ - ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE); - /* 6. wait until TXQCR_METFE is auto-cleared */ - while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE) - ; -} - -/** - * ks_start_xmit - transmit packet - * @skb : The buffer to transmit - * @netdev : The device used to transmit the packet. - * - * Called by the network layer to transmit the @skb. - * spin_lock_irqsave is required because tx and rx should be mutual exclusive. - * So while tx is in-progress, prevent IRQ interrupt from happenning. - */ -static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev) -{ - netdev_tx_t retv = NETDEV_TX_OK; - struct ks_net *ks = netdev_priv(netdev); - unsigned long flags; - - spin_lock_irqsave(&ks->statelock, flags); - - /* Extra space are required: - * 4 byte for alignment, 4 for status/length, 4 for CRC - */ - - if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) { - ks_write_qmu(ks, skb->data, skb->len); - /* add tx statistics */ - netdev->stats.tx_bytes += skb->len; - netdev->stats.tx_packets++; - dev_kfree_skb(skb); - } else - retv = NETDEV_TX_BUSY; - spin_unlock_irqrestore(&ks->statelock, flags); - return retv; -} - -/** - * ks_start_rx - ready to serve pkts - * @ks : The chip information - * - */ -static void ks_start_rx(struct ks_net *ks) -{ - u16 cntl; - - /* Enables QMU Receive (RXCR1). */ - cntl = ks_rdreg16(ks, KS_RXCR1); - cntl |= RXCR1_RXE ; - ks_wrreg16(ks, KS_RXCR1, cntl); -} /* ks_start_rx */ - -/** - * ks_stop_rx - stop to serve pkts - * @ks : The chip information - * - */ -static void ks_stop_rx(struct ks_net *ks) -{ - u16 cntl; - - /* Disables QMU Receive (RXCR1). */ - cntl = ks_rdreg16(ks, KS_RXCR1); - cntl &= ~RXCR1_RXE ; - ks_wrreg16(ks, KS_RXCR1, cntl); - -} /* ks_stop_rx */ - -static unsigned long const ethernet_polynomial = CRC32_POLY_BE; - -static unsigned long ether_gen_crc(int length, u8 *data) -{ - long crc = -1; - while (--length >= 0) { - u8 current_octet = *data++; - int bit; - - for (bit = 0; bit < 8; bit++, current_octet >>= 1) { - crc = (crc << 1) ^ - ((crc < 0) ^ (current_octet & 1) ? - ethernet_polynomial : 0); - } - } - return (unsigned long)crc; -} /* ether_gen_crc */ - -/** -* ks_set_grpaddr - set multicast information -* @ks : The chip information -*/ - -static void ks_set_grpaddr(struct ks_net *ks) -{ - u8 i; - u32 index, position, value; - - memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE); - - for (i = 0; i < ks->mcast_lst_size; i++) { - position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f; - index = position >> 3; - value = 1 << (position & 7); - ks->mcast_bits[index] |= (u8)value; - } - - for (i = 0; i < HW_MCAST_SIZE; i++) { - if (i & 1) { - ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1), - (ks->mcast_bits[i] << 8) | - ks->mcast_bits[i - 1]); - } - } -} /* ks_set_grpaddr */ - -/** -* ks_clear_mcast - clear multicast information -* -* @ks : The chip information -* This routine removes all mcast addresses set in the hardware. -*/ - -static void ks_clear_mcast(struct ks_net *ks) -{ - u16 i, mcast_size; - for (i = 0; i < HW_MCAST_SIZE; i++) - ks->mcast_bits[i] = 0; - - mcast_size = HW_MCAST_SIZE >> 2; - for (i = 0; i < mcast_size; i++) - ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0); -} - -static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode) -{ - u16 cntl; - ks->promiscuous = promiscuous_mode; - ks_stop_rx(ks); /* Stop receiving for reconfiguration */ - cntl = ks_rdreg16(ks, KS_RXCR1); - - cntl &= ~RXCR1_FILTER_MASK; - if (promiscuous_mode) - /* Enable Promiscuous mode */ - cntl |= RXCR1_RXAE | RXCR1_RXINVF; - else - /* Disable Promiscuous mode (default normal mode) */ - cntl |= RXCR1_RXPAFMA; - - ks_wrreg16(ks, KS_RXCR1, cntl); - - if (ks->enabled) - ks_start_rx(ks); - -} /* ks_set_promis */ - -static void ks_set_mcast(struct ks_net *ks, u16 mcast) -{ - u16 cntl; - - ks->all_mcast = mcast; - ks_stop_rx(ks); /* Stop receiving for reconfiguration */ - cntl = ks_rdreg16(ks, KS_RXCR1); - cntl &= ~RXCR1_FILTER_MASK; - if (mcast) - /* Enable "Perfect with Multicast address passed mode" */ - cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA); - else - /** - * Disable "Perfect with Multicast address passed - * mode" (normal mode). - */ - cntl |= RXCR1_RXPAFMA; - - ks_wrreg16(ks, KS_RXCR1, cntl); - - if (ks->enabled) - ks_start_rx(ks); -} /* ks_set_mcast */ - -static void ks_set_rx_mode(struct net_device *netdev) -{ - struct ks_net *ks = netdev_priv(netdev); - struct netdev_hw_addr *ha; - - /* Turn on/off promiscuous mode. */ - if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC) - ks_set_promis(ks, - (u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC)); - /* Turn on/off all mcast mode. */ - else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI) - ks_set_mcast(ks, - (u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)); - else - ks_set_promis(ks, false); - - if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) { - if (netdev_mc_count(netdev) <= MAX_MCAST_LST) { - int i = 0; - - netdev_for_each_mc_addr(ha, netdev) { - if (i >= MAX_MCAST_LST) - break; - memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN); - } - ks->mcast_lst_size = (u8)i; - ks_set_grpaddr(ks); - } else { - /** - * List too big to support so - * turn on all mcast mode. - */ - ks->mcast_lst_size = MAX_MCAST_LST; - ks_set_mcast(ks, true); - } - } else { - ks->mcast_lst_size = 0; - ks_clear_mcast(ks); - } -} /* ks_set_rx_mode */ - -static void ks_set_mac(struct ks_net *ks, u8 *data) -{ - u16 *pw = (u16 *)data; - u16 w, u; - - ks_stop_rx(ks); /* Stop receiving for reconfiguration */ - - u = *pw++; - w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF); - ks_wrreg16(ks, KS_MARH, w); - - u = *pw++; - w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF); - ks_wrreg16(ks, KS_MARM, w); - - u = *pw; - w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF); - ks_wrreg16(ks, KS_MARL, w); - - memcpy(ks->mac_addr, data, ETH_ALEN); - - if (ks->enabled) - ks_start_rx(ks); -} - -static int ks_set_mac_address(struct net_device *netdev, void *paddr) -{ - struct ks_net *ks = netdev_priv(netdev); - struct sockaddr *addr = paddr; - u8 *da; - - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - - da = (u8 *)netdev->dev_addr; - - ks_set_mac(ks, da); - return 0; -} - -static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) -{ - struct ks_net *ks = netdev_priv(netdev); - - if (!netif_running(netdev)) - return -EINVAL; - - return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL); -} - -static const struct net_device_ops ks_netdev_ops = { - .ndo_open = ks_net_open, - .ndo_stop = ks_net_stop, - .ndo_do_ioctl = ks_net_ioctl, - .ndo_start_xmit = ks_start_xmit, - .ndo_set_mac_address = ks_set_mac_address, - .ndo_set_rx_mode = ks_set_rx_mode, - .ndo_validate_addr = eth_validate_addr, -}; - -/* ethtool support */ - -static void ks_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *di) -{ - strlcpy(di->driver, DRV_NAME, sizeof(di->driver)); - strlcpy(di->version, "1.00", sizeof(di->version)); - strlcpy(di->bus_info, dev_name(netdev->dev.parent), - sizeof(di->bus_info)); -} - -static u32 ks_get_msglevel(struct net_device *netdev) -{ - struct ks_net *ks = netdev_priv(netdev); - return ks->msg_enable; -} - -static void ks_set_msglevel(struct net_device *netdev, u32 to) -{ - struct ks_net *ks = netdev_priv(netdev); - ks->msg_enable = to; -} - -static int ks_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *cmd) -{ - struct ks_net *ks = netdev_priv(netdev); - - mii_ethtool_get_link_ksettings(&ks->mii, cmd); - - return 0; -} - -static int ks_set_link_ksettings(struct net_device *netdev, - const struct ethtool_link_ksettings *cmd) -{ - struct ks_net *ks = netdev_priv(netdev); - return mii_ethtool_set_link_ksettings(&ks->mii, cmd); -} - -static u32 ks_get_link(struct net_device *netdev) -{ - struct ks_net *ks = netdev_priv(netdev); - return mii_link_ok(&ks->mii); -} - -static int ks_nway_reset(struct net_device *netdev) -{ - struct ks_net *ks = netdev_priv(netdev); - return mii_nway_restart(&ks->mii); -} - -static const struct ethtool_ops ks_ethtool_ops = { - .get_drvinfo = ks_get_drvinfo, - .get_msglevel = ks_get_msglevel, - .set_msglevel = ks_set_msglevel, - .get_link = ks_get_link, - .nway_reset = ks_nway_reset, - .get_link_ksettings = ks_get_link_ksettings, - .set_link_ksettings = ks_set_link_ksettings, -}; - -/* MII interface controls */ - -/** - * ks_phy_reg - convert MII register into a KS8851 register - * @reg: MII register number. - * - * Return the KS8851 register number for the corresponding MII PHY register - * if possible. Return zero if the MII register has no direct mapping to the - * KS8851 register set. - */ -static int ks_phy_reg(int reg) -{ - switch (reg) { - case MII_BMCR: - return KS_P1MBCR; - case MII_BMSR: - return KS_P1MBSR; - case MII_PHYSID1: - return KS_PHY1ILR; - case MII_PHYSID2: - return KS_PHY1IHR; - case MII_ADVERTISE: - return KS_P1ANAR; - case MII_LPA: - return KS_P1ANLPR; - } - - return 0x0; -} - -/** - * ks_phy_read - MII interface PHY register read. - * @netdev: The network device the PHY is on. - * @phy_addr: Address of PHY (ignored as we only have one) - * @reg: The register to read. - * - * This call reads data from the PHY register specified in @reg. Since the - * device does not support all the MII registers, the non-existent values - * are always returned as zero. - * - * We return zero for unsupported registers as the MII code does not check - * the value returned for any error status, and simply returns it to the - * caller. The mii-tool that the driver was tested with takes any -ve error - * as real PHY capabilities, thus displaying incorrect data to the user. - */ -static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg) -{ - struct ks_net *ks = netdev_priv(netdev); - int ksreg; - int result; - - ksreg = ks_phy_reg(reg); - if (!ksreg) - return 0x0; /* no error return allowed, so use zero */ - - mutex_lock(&ks->lock); - result = ks_rdreg16(ks, ksreg); - mutex_unlock(&ks->lock); - - return result; -} - -static void ks_phy_write(struct net_device *netdev, - int phy, int reg, int value) -{ - struct ks_net *ks = netdev_priv(netdev); - int ksreg; - - ksreg = ks_phy_reg(reg); - if (ksreg) { - mutex_lock(&ks->lock); - ks_wrreg16(ks, ksreg, value); - mutex_unlock(&ks->lock); - } -} - -/** - * ks_read_selftest - read the selftest memory info. - * @ks: The device state - * - * Read and check the TX/RX memory selftest information. - */ -static int ks_read_selftest(struct ks_net *ks) -{ - unsigned both_done = MBIR_TXMBF | MBIR_RXMBF; - int ret = 0; - unsigned rd; - - rd = ks_rdreg16(ks, KS_MBIR); - - if ((rd & both_done) != both_done) { - netdev_warn(ks->netdev, "Memory selftest not finished\n"); - return 0; - } - - if (rd & MBIR_TXMBFA) { - netdev_err(ks->netdev, "TX memory selftest fails\n"); - ret |= 1; - } - - if (rd & MBIR_RXMBFA) { - netdev_err(ks->netdev, "RX memory selftest fails\n"); - ret |= 2; - } - - netdev_info(ks->netdev, "the selftest passes\n"); - return ret; -} - -static void ks_setup(struct ks_net *ks) -{ - u16 w; - - /** - * Configure QMU Transmit - */ - - /* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */ - ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI); - - /* Setup Receive Frame Data Pointer Auto-Increment */ - ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI); - - /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */ - ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_RXFCT_MASK); - - /* Setup RxQ Command Control (RXQCR) */ - ks->rc_rxqcr = RXQCR_CMD_CNTL; - ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); - - /** - * set the force mode to half duplex, default is full duplex - * because if the auto-negotiation fails, most switch uses - * half-duplex. - */ - - w = ks_rdreg16(ks, KS_P1MBCR); - w &= ~BMCR_FULLDPLX; - ks_wrreg16(ks, KS_P1MBCR, w); - - w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP; - ks_wrreg16(ks, KS_TXCR, w); - - w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE | RXCR1_RXME | RXCR1_RXIPFCC; - - if (ks->promiscuous) /* bPromiscuous */ - w |= (RXCR1_RXAE | RXCR1_RXINVF); - else if (ks->all_mcast) /* Multicast address passed mode */ - w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA); - else /* Normal mode */ - w |= RXCR1_RXPAFMA; - - ks_wrreg16(ks, KS_RXCR1, w); -} /*ks_setup */ - - -static void ks_setup_int(struct ks_net *ks) -{ - ks->rc_ier = 0x00; - /* Clear the interrupts status of the hardware. */ - ks_wrreg16(ks, KS_ISR, 0xffff); - - /* Enables the interrupts of the hardware. */ - ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI); -} /* ks_setup_int */ - -static int ks_hw_init(struct ks_net *ks) -{ -#define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES) - ks->promiscuous = 0; - ks->all_mcast = 0; - ks->mcast_lst_size = 0; - - ks->frame_head_info = devm_kmalloc(&ks->pdev->dev, MHEADER_SIZE, - GFP_KERNEL); - if (!ks->frame_head_info) - return false; - - ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS); - return true; -} - -#if defined(CONFIG_OF) -static const struct of_device_id ks8851_ml_dt_ids[] = { - { .compatible = "micrel,ks8851-mll" }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, ks8851_ml_dt_ids); -#endif - -static int ks8851_probe(struct platform_device *pdev) -{ - int err; - struct net_device *netdev; - struct ks_net *ks; - u16 id, data; - const char *mac; - - netdev = alloc_etherdev(sizeof(struct ks_net)); - if (!netdev) - return -ENOMEM; - - SET_NETDEV_DEV(netdev, &pdev->dev); - - ks = netdev_priv(netdev); - ks->netdev = netdev; - - ks->hw_addr = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(ks->hw_addr)) { - err = PTR_ERR(ks->hw_addr); - goto err_free; - } - - ks->hw_addr_cmd = devm_platform_ioremap_resource(pdev, 1); - if (IS_ERR(ks->hw_addr_cmd)) { - err = PTR_ERR(ks->hw_addr_cmd); - goto err_free; - } - - err = ks_check_endian(ks); - if (err) - goto err_free; - - netdev->irq = platform_get_irq(pdev, 0); - - if ((int)netdev->irq < 0) { - err = netdev->irq; - goto err_free; - } - - ks->pdev = pdev; - - mutex_init(&ks->lock); - spin_lock_init(&ks->statelock); - - netdev->netdev_ops = &ks_netdev_ops; - netdev->ethtool_ops = &ks_ethtool_ops; - - /* setup mii state */ - ks->mii.dev = netdev; - ks->mii.phy_id = 1, - ks->mii.phy_id_mask = 1; - ks->mii.reg_num_mask = 0xf; - ks->mii.mdio_read = ks_phy_read; - ks->mii.mdio_write = ks_phy_write; - - netdev_info(netdev, "message enable is %d\n", msg_enable); - /* set the default message enable */ - ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV | - NETIF_MSG_PROBE | - NETIF_MSG_LINK)); - ks_read_config(ks); - - /* simple check for a valid chip being connected to the bus */ - if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) { - netdev_err(netdev, "failed to read device ID\n"); - err = -ENODEV; - goto err_free; - } - - if (ks_read_selftest(ks)) { - netdev_err(netdev, "failed to read device ID\n"); - err = -ENODEV; - goto err_free; - } - - err = register_netdev(netdev); - if (err) - goto err_free; - - platform_set_drvdata(pdev, netdev); - - ks_soft_reset(ks, GRR_GSR); - ks_hw_init(ks); - ks_disable_qmu(ks); - ks_setup(ks); - ks_setup_int(ks); - - data = ks_rdreg16(ks, KS_OBCR); - ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16mA); - - /* overwriting the default MAC address */ - if (pdev->dev.of_node) { - mac = of_get_mac_address(pdev->dev.of_node); - if (!IS_ERR(mac)) - ether_addr_copy(ks->mac_addr, mac); - } else { - struct ks8851_mll_platform_data *pdata; - - pdata = dev_get_platdata(&pdev->dev); - if (!pdata) { - netdev_err(netdev, "No platform data\n"); - err = -ENODEV; - goto err_pdata; - } - memcpy(ks->mac_addr, pdata->mac_addr, ETH_ALEN); - } - if (!is_valid_ether_addr(ks->mac_addr)) { - /* Use random MAC address if none passed */ - eth_random_addr(ks->mac_addr); - netdev_info(netdev, "Using random mac address\n"); - } - netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr); - - memcpy(netdev->dev_addr, ks->mac_addr, ETH_ALEN); - - ks_set_mac(ks, netdev->dev_addr); - - id = ks_rdreg16(ks, KS_CIDER); - - netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n", - (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7); - return 0; - -err_pdata: - unregister_netdev(netdev); -err_free: - free_netdev(netdev); - return err; -} - -static int ks8851_remove(struct platform_device *pdev) -{ - struct net_device *netdev = platform_get_drvdata(pdev); - - unregister_netdev(netdev); - free_netdev(netdev); - return 0; - -} - -static struct platform_driver ks8851_platform_driver = { - .driver = { - .name = DRV_NAME, - .of_match_table = of_match_ptr(ks8851_ml_dt_ids), - }, - .probe = ks8851_probe, - .remove = ks8851_remove, -}; - -module_platform_driver(ks8851_platform_driver); - -MODULE_DESCRIPTION("KS8851 MLL Network driver"); -MODULE_AUTHOR("David Choi <david.choi@micrel.com>"); -MODULE_LICENSE("GPL"); -module_param_named(message, msg_enable, int, 0); -MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)"); - diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c new file mode 100644 index 000000000000..3bab0cb2b1a5 --- /dev/null +++ b/drivers/net/ethernet/micrel/ks8851_par.c @@ -0,0 +1,357 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* drivers/net/ethernet/micrel/ks8851.c + * + * Copyright 2009 Simtec Electronics + * http://www.simtec.co.uk/ + * Ben Dooks <ben@simtec.co.uk> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define DEBUG + +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/iopoll.h> +#include <linux/mii.h> + +#include <linux/platform_device.h> +#include <linux/of_net.h> + +#include "ks8851.h" + +static int msg_enable; + +#define BE3 0x8000 /* Byte Enable 3 */ +#define BE2 0x4000 /* Byte Enable 2 */ +#define BE1 0x2000 /* Byte Enable 1 */ +#define BE0 0x1000 /* Byte Enable 0 */ + +/** + * struct ks8851_net_par - KS8851 Parallel driver private data + * @ks8851: KS8851 driver common private data + * @lock: Lock to ensure that the device is not accessed when busy. + * @hw_addr : start address of data register. + * @hw_addr_cmd : start address of command register. + * @cmd_reg_cache : command register cached. + * + * The @lock ensures that the chip is protected when certain operations are + * in progress. When the read or write packet transfer is in progress, most + * of the chip registers are not accessible until the transfer is finished + * and the DMA has been de-asserted. + */ +struct ks8851_net_par { + struct ks8851_net ks8851; + spinlock_t lock; + void __iomem *hw_addr; + void __iomem *hw_addr_cmd; + u16 cmd_reg_cache; +}; + +#define to_ks8851_par(ks) container_of((ks), struct ks8851_net_par, ks8851) + +/** + * ks8851_lock_par - register access lock + * @ks: The chip state + * @flags: Spinlock flags + * + * Claim chip register access lock + */ +static void ks8851_lock_par(struct ks8851_net *ks, unsigned long *flags) +{ + struct ks8851_net_par *ksp = to_ks8851_par(ks); + + spin_lock_irqsave(&ksp->lock, *flags); +} + +/** + * ks8851_unlock_par - register access unlock + * @ks: The chip state + * @flags: Spinlock flags + * + * Release chip register access lock + */ +static void ks8851_unlock_par(struct ks8851_net *ks, unsigned long *flags) +{ + struct ks8851_net_par *ksp = to_ks8851_par(ks); + + spin_unlock_irqrestore(&ksp->lock, *flags); +} + +/** + * ks_check_endian - Check whether endianness of the bus is correct + * @ks : The chip information + * + * The KS8851-16MLL EESK pin allows selecting the endianness of the 16bit + * bus. To maintain optimum performance, the bus endianness should be set + * such that it matches the endianness of the CPU. + */ +static int ks_check_endian(struct ks8851_net *ks) +{ + struct ks8851_net_par *ksp = to_ks8851_par(ks); + u16 cider; + + /* + * Read CIDER register first, however read it the "wrong" way around. + * If the endian strap on the KS8851-16MLL in incorrect and the chip + * is operating in different endianness than the CPU, then the meaning + * of BE[3:0] byte-enable bits is also swapped such that: + * BE[3,2,1,0] becomes BE[1,0,3,2] + * + * Luckily for us, the byte-enable bits are the top four MSbits of + * the address register and the CIDER register is at offset 0xc0. + * Hence, by reading address 0xc0c0, which is not impacted by endian + * swapping, we assert either BE[3:2] or BE[1:0] while reading the + * CIDER register. + * + * If the bus configuration is correct, reading 0xc0c0 asserts + * BE[3:2] and this read returns 0x0000, because to read register + * with bottom two LSbits of address set to 0, BE[1:0] must be + * asserted. + * + * If the bus configuration is NOT correct, reading 0xc0c0 asserts + * BE[1:0] and this read returns non-zero 0x8872 value. + */ + iowrite16(BE3 | BE2 | KS_CIDER, ksp->hw_addr_cmd); + cider = ioread16(ksp->hw_addr); + if (!cider) + return 0; + + netdev_err(ks->netdev, "incorrect EESK endian strap setting\n"); + + return -EINVAL; +} + +/** + * ks8851_wrreg16_par - write 16bit register value to chip + * @ks: The chip state + * @reg: The register address + * @val: The value to write + * + * Issue a write to put the value @val into the register specified in @reg. + */ +static void ks8851_wrreg16_par(struct ks8851_net *ks, unsigned int reg, + unsigned int val) +{ + struct ks8851_net_par *ksp = to_ks8851_par(ks); + + ksp->cmd_reg_cache = (u16)reg | ((BE1 | BE0) << (reg & 0x02)); + iowrite16(ksp->cmd_reg_cache, ksp->hw_addr_cmd); + iowrite16(val, ksp->hw_addr); +} + +/** + * ks8851_rdreg16_par - read 16 bit register from chip + * @ks: The chip information + * @reg: The register address + * + * Read a 16bit register from the chip, returning the result + */ +static unsigned int ks8851_rdreg16_par(struct ks8851_net *ks, unsigned int reg) +{ + struct ks8851_net_par *ksp = to_ks8851_par(ks); + + ksp->cmd_reg_cache = (u16)reg | ((BE1 | BE0) << (reg & 0x02)); + iowrite16(ksp->cmd_reg_cache, ksp->hw_addr_cmd); + return ioread16(ksp->hw_addr); +} + +/** + * ks8851_rdfifo_par - read data from the receive fifo + * @ks: The device state. + * @buff: The buffer address + * @len: The length of the data to read + * + * Issue an RXQ FIFO read command and read the @len amount of data from + * the FIFO into the buffer specified by @buff. + */ +static void ks8851_rdfifo_par(struct ks8851_net *ks, u8 *buff, unsigned int len) +{ + struct ks8851_net_par *ksp = to_ks8851_par(ks); + + netif_dbg(ks, rx_status, ks->netdev, + "%s: %d@%p\n", __func__, len, buff); + + ioread16_rep(ksp->hw_addr, (u16 *)buff + 1, len / 2); +} + +/** + * ks8851_wrfifo_par - write packet to TX FIFO + * @ks: The device state. + * @txp: The sk_buff to transmit. + * @irq: IRQ on completion of the packet. + * + * Send the @txp to the chip. This means creating the relevant packet header + * specifying the length of the packet and the other information the chip + * needs, such as IRQ on completion. Send the header and the packet data to + * the device. + */ +static void ks8851_wrfifo_par(struct ks8851_net *ks, struct sk_buff *txp, + bool irq) +{ + struct ks8851_net_par *ksp = to_ks8851_par(ks); + unsigned int len = ALIGN(txp->len, 4); + unsigned int fid = 0; + + netif_dbg(ks, tx_queued, ks->netdev, "%s: skb %p, %d@%p, irq %d\n", + __func__, txp, txp->len, txp->data, irq); + + fid = ks->fid++; + fid &= TXFR_TXFID_MASK; + + if (irq) + fid |= TXFR_TXIC; /* irq on completion */ + + iowrite16(fid, ksp->hw_addr); + iowrite16(txp->len, ksp->hw_addr); + + iowrite16_rep(ksp->hw_addr, txp->data, len / 2); +} + +/** + * ks8851_rx_skb_par - receive skbuff + * @ks: The device state. + * @skb: The skbuff + */ +static void ks8851_rx_skb_par(struct ks8851_net *ks, struct sk_buff *skb) +{ + netif_rx(skb); +} + +static unsigned int ks8851_rdreg16_par_txqcr(struct ks8851_net *ks) +{ + return ks8851_rdreg16_par(ks, KS_TXQCR); +} + +/** + * ks8851_start_xmit_par - transmit packet + * @skb: The buffer to transmit + * @dev: The device used to transmit the packet. + * + * Called by the network layer to transmit the @skb. Queue the packet for + * the device and schedule the necessary work to transmit the packet when + * it is free. + * + * We do this to firstly avoid sleeping with the network device locked, + * and secondly so we can round up more than one packet to transmit which + * means we can try and avoid generating too many transmit done interrupts. + */ +static netdev_tx_t ks8851_start_xmit_par(struct sk_buff *skb, + struct net_device *dev) +{ + struct ks8851_net *ks = netdev_priv(dev); + netdev_tx_t ret = NETDEV_TX_OK; + unsigned long flags; + unsigned int txqcr; + u16 txmir; + int err; + + netif_dbg(ks, tx_queued, ks->netdev, + "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data); + + ks8851_lock_par(ks, &flags); + + txmir = ks8851_rdreg16_par(ks, KS_TXMIR) & 0x1fff; + + if (likely(txmir >= skb->len + 12)) { + ks8851_wrreg16_par(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); + ks8851_wrfifo_par(ks, skb, false); + ks8851_wrreg16_par(ks, KS_RXQCR, ks->rc_rxqcr); + ks8851_wrreg16_par(ks, KS_TXQCR, TXQCR_METFE); + + err = readx_poll_timeout_atomic(ks8851_rdreg16_par_txqcr, ks, + txqcr, !(txqcr & TXQCR_METFE), + 5, 1000000); + if (err) + ret = NETDEV_TX_BUSY; + + ks8851_done_tx(ks, skb); + } else { + ret = NETDEV_TX_BUSY; + } + + ks8851_unlock_par(ks, &flags); + + return ret; +} + +static int ks8851_probe_par(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ks8851_net_par *ksp; + struct net_device *netdev; + struct ks8851_net *ks; + int ret; + + netdev = devm_alloc_etherdev(dev, sizeof(struct ks8851_net_par)); + if (!netdev) + return -ENOMEM; + + ks = netdev_priv(netdev); + + ks->lock = ks8851_lock_par; + ks->unlock = ks8851_unlock_par; + ks->rdreg16 = ks8851_rdreg16_par; + ks->wrreg16 = ks8851_wrreg16_par; + ks->rdfifo = ks8851_rdfifo_par; + ks->wrfifo = ks8851_wrfifo_par; + ks->start_xmit = ks8851_start_xmit_par; + ks->rx_skb = ks8851_rx_skb_par; + +#define STD_IRQ (IRQ_LCI | /* Link Change */ \ + IRQ_RXI | /* RX done */ \ + IRQ_RXPSI) /* RX process stop */ + ks->rc_ier = STD_IRQ; + + ksp = to_ks8851_par(ks); + spin_lock_init(&ksp->lock); + + ksp->hw_addr = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ksp->hw_addr)) + return PTR_ERR(ksp->hw_addr); + + ksp->hw_addr_cmd = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(ksp->hw_addr_cmd)) + return PTR_ERR(ksp->hw_addr_cmd); + + ret = ks_check_endian(ks); + if (ret) + return ret; + + netdev->irq = platform_get_irq(pdev, 0); + + return ks8851_probe_common(netdev, dev, msg_enable); +} + +static int ks8851_remove_par(struct platform_device *pdev) +{ + return ks8851_remove_common(&pdev->dev); +} + +static const struct of_device_id ks8851_match_table[] = { + { .compatible = "micrel,ks8851-mll" }, + { } +}; +MODULE_DEVICE_TABLE(of, ks8851_match_table); + +static struct platform_driver ks8851_driver = { + .driver = { + .name = "ks8851", + .of_match_table = ks8851_match_table, + .pm = &ks8851_pm_ops, + }, + .probe = ks8851_probe_par, + .remove = ks8851_remove_par, +}; +module_platform_driver(ks8851_driver); + +MODULE_DESCRIPTION("KS8851 Network driver"); +MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); +MODULE_LICENSE("GPL"); + +module_param_named(message, msg_enable, int, 0); +MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)"); diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c new file mode 100644 index 000000000000..4ec7f1615977 --- /dev/null +++ b/drivers/net/ethernet/micrel/ks8851_spi.c @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* drivers/net/ethernet/micrel/ks8851.c + * + * Copyright 2009 Simtec Electronics + * http://www.simtec.co.uk/ + * Ben Dooks <ben@simtec.co.uk> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define DEBUG + +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/cache.h> +#include <linux/crc32.h> +#include <linux/mii.h> +#include <linux/regulator/consumer.h> + +#include <linux/spi/spi.h> +#include <linux/gpio.h> +#include <linux/of_gpio.h> +#include <linux/of_net.h> + +#include "ks8851.h" + +static int msg_enable; + +/** + * struct ks8851_net_spi - KS8851 SPI driver private data + * @lock: Lock to ensure that the device is not accessed when busy. + * @tx_work: Work queue for tx packets + * @ks8851: KS8851 driver common private data + * @spidev: The spi device we're bound to. + * @spi_msg1: pre-setup SPI transfer with one message, @spi_xfer1. + * @spi_msg2: pre-setup SPI transfer with two messages, @spi_xfer2. + * @spi_xfer1: @spi_msg1 SPI transfer structure + * @spi_xfer2: @spi_msg2 SPI transfer structure + * + * The @lock ensures that the chip is protected when certain operations are + * in progress. When the read or write packet transfer is in progress, most + * of the chip registers are not ccessible until the transfer is finished and + * the DMA has been de-asserted. + */ +struct ks8851_net_spi { + struct ks8851_net ks8851; + struct mutex lock; + struct work_struct tx_work; + struct spi_device *spidev; + struct spi_message spi_msg1; + struct spi_message spi_msg2; + struct spi_transfer spi_xfer1; + struct spi_transfer spi_xfer2[2]; +}; + +#define to_ks8851_spi(ks) container_of((ks), struct ks8851_net_spi, ks8851) + +/* SPI frame opcodes */ +#define KS_SPIOP_RD 0x00 +#define KS_SPIOP_WR 0x40 +#define KS_SPIOP_RXFIFO 0x80 +#define KS_SPIOP_TXFIFO 0xC0 + +/* shift for byte-enable data */ +#define BYTE_EN(_x) ((_x) << 2) + +/* turn register number and byte-enable mask into data for start of packet */ +#define MK_OP(_byteen, _reg) \ + (BYTE_EN(_byteen) | (_reg) << (8 + 2) | (_reg) >> 6) + +/** + * ks8851_lock_spi - register access lock + * @ks: The chip state + * @flags: Spinlock flags + * + * Claim chip register access lock + */ +static void ks8851_lock_spi(struct ks8851_net *ks, unsigned long *flags) +{ + struct ks8851_net_spi *kss = to_ks8851_spi(ks); + + mutex_lock(&kss->lock); +} + +/** + * ks8851_unlock_spi - register access unlock + * @ks: The chip state + * @flags: Spinlock flags + * + * Release chip register access lock + */ +static void ks8851_unlock_spi(struct ks8851_net *ks, unsigned long *flags) +{ + struct ks8851_net_spi *kss = to_ks8851_spi(ks); + + mutex_unlock(&kss->lock); +} + +/* SPI register read/write calls. + * + * All these calls issue SPI transactions to access the chip's registers. They + * all require that the necessary lock is held to prevent accesses when the + * chip is busy transferring packet data (RX/TX FIFO accesses). + */ + +/** + * ks8851_wrreg16_spi - write 16bit register value to chip via SPI + * @ks: The chip state + * @reg: The register address + * @val: The value to write + * + * Issue a write to put the value @val into the register specified in @reg. + */ +static void ks8851_wrreg16_spi(struct ks8851_net *ks, unsigned int reg, + unsigned int val) +{ + struct ks8851_net_spi *kss = to_ks8851_spi(ks); + struct spi_transfer *xfer = &kss->spi_xfer1; + struct spi_message *msg = &kss->spi_msg1; + __le16 txb[2]; + int ret; + + txb[0] = cpu_to_le16(MK_OP(reg & 2 ? 0xC : 0x03, reg) | KS_SPIOP_WR); + txb[1] = cpu_to_le16(val); + + xfer->tx_buf = txb; + xfer->rx_buf = NULL; + xfer->len = 4; + + ret = spi_sync(kss->spidev, msg); + if (ret < 0) + netdev_err(ks->netdev, "spi_sync() failed\n"); +} + +/** + * ks8851_rdreg - issue read register command and return the data + * @ks: The device state + * @op: The register address and byte enables in message format. + * @rxb: The RX buffer to return the result into + * @rxl: The length of data expected. + * + * This is the low level read call that issues the necessary spi message(s) + * to read data from the register specified in @op. + */ +static void ks8851_rdreg(struct ks8851_net *ks, unsigned int op, + u8 *rxb, unsigned int rxl) +{ + struct ks8851_net_spi *kss = to_ks8851_spi(ks); + struct spi_transfer *xfer; + struct spi_message *msg; + __le16 *txb = (__le16 *)ks->txd; + u8 *trx = ks->rxd; + int ret; + + txb[0] = cpu_to_le16(op | KS_SPIOP_RD); + + if (kss->spidev->master->flags & SPI_MASTER_HALF_DUPLEX) { + msg = &kss->spi_msg2; + xfer = kss->spi_xfer2; + + xfer->tx_buf = txb; + xfer->rx_buf = NULL; + xfer->len = 2; + + xfer++; + xfer->tx_buf = NULL; + xfer->rx_buf = trx; + xfer->len = rxl; + } else { + msg = &kss->spi_msg1; + xfer = &kss->spi_xfer1; + + xfer->tx_buf = txb; + xfer->rx_buf = trx; + xfer->len = rxl + 2; + } + + ret = spi_sync(kss->spidev, msg); + if (ret < 0) + netdev_err(ks->netdev, "read: spi_sync() failed\n"); + else if (kss->spidev->master->flags & SPI_MASTER_HALF_DUPLEX) + memcpy(rxb, trx, rxl); + else + memcpy(rxb, trx + 2, rxl); +} + +/** + * ks8851_rdreg16_spi - read 16 bit register from device via SPI + * @ks: The chip information + * @reg: The register address + * + * Read a 16bit register from the chip, returning the result + */ +static unsigned int ks8851_rdreg16_spi(struct ks8851_net *ks, unsigned int reg) +{ + __le16 rx = 0; + + ks8851_rdreg(ks, MK_OP(reg & 2 ? 0xC : 0x3, reg), (u8 *)&rx, 2); + return le16_to_cpu(rx); +} + +/** + * ks8851_rdfifo_spi - read data from the receive fifo via SPI + * @ks: The device state. + * @buff: The buffer address + * @len: The length of the data to read + * + * Issue an RXQ FIFO read command and read the @len amount of data from + * the FIFO into the buffer specified by @buff. + */ +static void ks8851_rdfifo_spi(struct ks8851_net *ks, u8 *buff, unsigned int len) +{ + struct ks8851_net_spi *kss = to_ks8851_spi(ks); + struct spi_transfer *xfer = kss->spi_xfer2; + struct spi_message *msg = &kss->spi_msg2; + u8 txb[1]; + int ret; + + netif_dbg(ks, rx_status, ks->netdev, + "%s: %d@%p\n", __func__, len, buff); + + /* set the operation we're issuing */ + txb[0] = KS_SPIOP_RXFIFO; + + xfer->tx_buf = txb; + xfer->rx_buf = NULL; + xfer->len = 1; + + xfer++; + xfer->rx_buf = buff; + xfer->tx_buf = NULL; + xfer->len = len; + + ret = spi_sync(kss->spidev, msg); + if (ret < 0) + netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__); +} + +/** + * ks8851_wrfifo_spi - write packet to TX FIFO via SPI + * @ks: The device state. + * @txp: The sk_buff to transmit. + * @irq: IRQ on completion of the packet. + * + * Send the @txp to the chip. This means creating the relevant packet header + * specifying the length of the packet and the other information the chip + * needs, such as IRQ on completion. Send the header and the packet data to + * the device. + */ +static void ks8851_wrfifo_spi(struct ks8851_net *ks, struct sk_buff *txp, + bool irq) +{ + struct ks8851_net_spi *kss = to_ks8851_spi(ks); + struct spi_transfer *xfer = kss->spi_xfer2; + struct spi_message *msg = &kss->spi_msg2; + unsigned int fid = 0; + int ret; + + netif_dbg(ks, tx_queued, ks->netdev, "%s: skb %p, %d@%p, irq %d\n", + __func__, txp, txp->len, txp->data, irq); + + fid = ks->fid++; + fid &= TXFR_TXFID_MASK; + + if (irq) + fid |= TXFR_TXIC; /* irq on completion */ + + /* start header at txb[1] to align txw entries */ + ks->txh.txb[1] = KS_SPIOP_TXFIFO; + ks->txh.txw[1] = cpu_to_le16(fid); + ks->txh.txw[2] = cpu_to_le16(txp->len); + + xfer->tx_buf = &ks->txh.txb[1]; + xfer->rx_buf = NULL; + xfer->len = 5; + + xfer++; + xfer->tx_buf = txp->data; + xfer->rx_buf = NULL; + xfer->len = ALIGN(txp->len, 4); + + ret = spi_sync(kss->spidev, msg); + if (ret < 0) + netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__); +} + +/** + * ks8851_rx_skb_spi - receive skbuff + * @ks: The device state + * @skb: The skbuff + */ +static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb) +{ + netif_rx_ni(skb); +} + +/** + * ks8851_tx_work - process tx packet(s) + * @work: The work strucutre what was scheduled. + * + * This is called when a number of packets have been scheduled for + * transmission and need to be sent to the device. + */ +static void ks8851_tx_work(struct work_struct *work) +{ + struct ks8851_net_spi *kss; + struct ks8851_net *ks; + unsigned long flags; + struct sk_buff *txb; + bool last; + + kss = container_of(work, struct ks8851_net_spi, tx_work); + ks = &kss->ks8851; + last = skb_queue_empty(&ks->txq); + + ks8851_lock_spi(ks, &flags); + + while (!last) { + txb = skb_dequeue(&ks->txq); + last = skb_queue_empty(&ks->txq); + + if (txb) { + ks8851_wrreg16_spi(ks, KS_RXQCR, + ks->rc_rxqcr | RXQCR_SDA); + ks8851_wrfifo_spi(ks, txb, last); + ks8851_wrreg16_spi(ks, KS_RXQCR, ks->rc_rxqcr); + ks8851_wrreg16_spi(ks, KS_TXQCR, TXQCR_METFE); + + ks8851_done_tx(ks, txb); + } + } + + ks8851_unlock_spi(ks, &flags); +} + +/** + * ks8851_flush_tx_work_spi - flush outstanding TX work + * @ks: The device state + */ +static void ks8851_flush_tx_work_spi(struct ks8851_net *ks) +{ + struct ks8851_net_spi *kss = to_ks8851_spi(ks); + + flush_work(&kss->tx_work); +} + +/** + * calc_txlen - calculate size of message to send packet + * @len: Length of data + * + * Returns the size of the TXFIFO message needed to send + * this packet. + */ +static unsigned int calc_txlen(unsigned int len) +{ + return ALIGN(len + 4, 4); +} + +/** + * ks8851_start_xmit_spi - transmit packet using SPI + * @skb: The buffer to transmit + * @dev: The device used to transmit the packet. + * + * Called by the network layer to transmit the @skb. Queue the packet for + * the device and schedule the necessary work to transmit the packet when + * it is free. + * + * We do this to firstly avoid sleeping with the network device locked, + * and secondly so we can round up more than one packet to transmit which + * means we can try and avoid generating too many transmit done interrupts. + */ +static netdev_tx_t ks8851_start_xmit_spi(struct sk_buff *skb, + struct net_device *dev) +{ + unsigned int needed = calc_txlen(skb->len); + struct ks8851_net *ks = netdev_priv(dev); + netdev_tx_t ret = NETDEV_TX_OK; + struct ks8851_net_spi *kss; + + kss = to_ks8851_spi(ks); + + netif_dbg(ks, tx_queued, ks->netdev, + "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data); + + spin_lock(&ks->statelock); + + if (needed > ks->tx_space) { + netif_stop_queue(dev); + ret = NETDEV_TX_BUSY; + } else { + ks->tx_space -= needed; + skb_queue_tail(&ks->txq, skb); + } + + spin_unlock(&ks->statelock); + schedule_work(&kss->tx_work); + + return ret; +} + +static int ks8851_probe_spi(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct ks8851_net_spi *kss; + struct net_device *netdev; + struct ks8851_net *ks; + + netdev = devm_alloc_etherdev(dev, sizeof(struct ks8851_net_spi)); + if (!netdev) + return -ENOMEM; + + spi->bits_per_word = 8; + + ks = netdev_priv(netdev); + + ks->lock = ks8851_lock_spi; + ks->unlock = ks8851_unlock_spi; + ks->rdreg16 = ks8851_rdreg16_spi; + ks->wrreg16 = ks8851_wrreg16_spi; + ks->rdfifo = ks8851_rdfifo_spi; + ks->wrfifo = ks8851_wrfifo_spi; + ks->start_xmit = ks8851_start_xmit_spi; + ks->rx_skb = ks8851_rx_skb_spi; + ks->flush_tx_work = ks8851_flush_tx_work_spi; + +#define STD_IRQ (IRQ_LCI | /* Link Change */ \ + IRQ_TXI | /* TX done */ \ + IRQ_RXI | /* RX done */ \ + IRQ_SPIBEI | /* SPI bus error */ \ + IRQ_TXPSI | /* TX process stop */ \ + IRQ_RXPSI) /* RX process stop */ + ks->rc_ier = STD_IRQ; + + kss = to_ks8851_spi(ks); + + kss->spidev = spi; + mutex_init(&kss->lock); + INIT_WORK(&kss->tx_work, ks8851_tx_work); + + /* initialise pre-made spi transfer messages */ + spi_message_init(&kss->spi_msg1); + spi_message_add_tail(&kss->spi_xfer1, &kss->spi_msg1); + + spi_message_init(&kss->spi_msg2); + spi_message_add_tail(&kss->spi_xfer2[0], &kss->spi_msg2); + spi_message_add_tail(&kss->spi_xfer2[1], &kss->spi_msg2); + + netdev->irq = spi->irq; + + return ks8851_probe_common(netdev, dev, msg_enable); +} + +static int ks8851_remove_spi(struct spi_device *spi) +{ + return ks8851_remove_common(&spi->dev); +} + +static const struct of_device_id ks8851_match_table[] = { + { .compatible = "micrel,ks8851" }, + { } +}; +MODULE_DEVICE_TABLE(of, ks8851_match_table); + +static struct spi_driver ks8851_driver = { + .driver = { + .name = "ks8851", + .of_match_table = ks8851_match_table, + .pm = &ks8851_pm_ops, + }, + .probe = ks8851_probe_spi, + .remove = ks8851_remove_spi, +}; +module_spi_driver(ks8851_driver); + +MODULE_DESCRIPTION("KS8851 Network driver"); +MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); +MODULE_LICENSE("GPL"); + +module_param_named(message, msg_enable, int, 0); +MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)"); +MODULE_ALIAS("spi:ks8851"); diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index c798431ce2a1..9cfe1fd98c30 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -1204,18 +1204,19 @@ static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) struct ocelot *ocelot = priv->port.ocelot; int port = priv->chip_port; - /* The function is only used for PTP operations for now */ - if (!ocelot->ptp) - return -EOPNOTSUPP; - - switch (cmd) { - case SIOCSHWTSTAMP: - return ocelot_hwstamp_set(ocelot, port, ifr); - case SIOCGHWTSTAMP: - return ocelot_hwstamp_get(ocelot, port, ifr); - default: - return -EOPNOTSUPP; + /* If the attached PHY device isn't capable of timestamping operations, + * use our own (when possible). + */ + if (!phy_has_hwtstamp(dev->phydev) && ocelot->ptp) { + switch (cmd) { + case SIOCSHWTSTAMP: + return ocelot_hwstamp_set(ocelot, port, ifr); + case SIOCGHWTSTAMP: + return ocelot_hwstamp_get(ocelot, port, ifr); + } } + + return phy_mii_ioctl(dev->phydev, ifr, cmd); } static const struct net_device_ops ocelot_port_netdev_ops = { @@ -1472,7 +1473,7 @@ static void ocelot_port_attr_ageing_set(struct ocelot *ocelot, int port, unsigned long ageing_clock_t) { unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); - u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; + u32 ageing_time = jiffies_to_msecs(ageing_jiffies); ocelot_set_ageing_time(ocelot, ageing_time); } diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c index 67a8d61c926a..4a15d2ff8b70 100644 --- a/drivers/net/ethernet/mscc/ocelot_board.c +++ b/drivers/net/ethernet/mscc/ocelot_board.c @@ -189,7 +189,8 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) skb->offload_fwd_mark = 1; skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); + if (!skb_defer_rx_timestamp(skb)) + netif_rx(skb); dev->stats.rx_bytes += len; dev->stats.rx_packets++; } while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)); diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 1c76e1592ca2..ff844e5cc41f 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -209,7 +209,7 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, NFP_FL_OUT_FLAGS_USE_TUN); output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type); } else if (netif_is_lag_master(out_dev) && - priv->flower_ext_feats & NFP_FL_FEATS_LAG) { + priv->flower_en_feats & NFP_FL_ENABLE_LAG) { int gid; output->flags = cpu_to_be16(tmp_flags); @@ -956,7 +956,7 @@ nfp_flower_output_action(struct nfp_app *app, *a_len += sizeof(struct nfp_fl_output); - if (priv->flower_ext_feats & NFP_FL_FEATS_LAG) { + if (priv->flower_en_feats & NFP_FL_ENABLE_LAG) { /* nfp_fl_pre_lag returns -err or size of prelag action added. * This will be 0 if it is not egressing to a lag dev. */ diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index a595ddb92bff..a050cb898782 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -264,7 +264,7 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) nfp_flower_cmsg_portmod_rx(app, skb); break; case NFP_FLOWER_CMSG_TYPE_MERGE_HINT: - if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE) { + if (app_priv->flower_en_feats & NFP_FL_ENABLE_FLOW_MERGE) { nfp_flower_cmsg_merge_hint_rx(app, skb); break; } @@ -285,7 +285,7 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) nfp_flower_stats_rlim_reply(app, skb); break; case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG: - if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) { + if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG) { skb_stored = nfp_flower_lag_unprocessed_msg(app, skb); break; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index d8ad9346a26a..ca7032d22196 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -665,6 +665,77 @@ err_clear_nn: return err; } +static void nfp_flower_wait_host_bit(struct nfp_app *app) +{ + unsigned long err_at; + u64 feat; + int err; + + /* Wait for HOST_ACK flag bit to propagate */ + err_at = jiffies + msecs_to_jiffies(100); + do { + feat = nfp_rtsym_read_le(app->pf->rtbl, + "_abi_flower_combined_features_global", + &err); + if (time_is_before_eq_jiffies(err_at)) { + nfp_warn(app->cpp, + "HOST_ACK bit not propagated in FW.\n"); + break; + } + usleep_range(1000, 2000); + } while (!err && !(feat & NFP_FL_FEATS_HOST_ACK)); + + if (err) + nfp_warn(app->cpp, + "Could not read global features entry from FW\n"); +} + +static int nfp_flower_sync_feature_bits(struct nfp_app *app) +{ + struct nfp_flower_priv *app_priv = app->priv; + int err; + + /* Tell the firmware of the host supported features. */ + err = nfp_rtsym_write_le(app->pf->rtbl, "_abi_flower_host_mask", + app_priv->flower_ext_feats | + NFP_FL_FEATS_HOST_ACK); + if (!err) + nfp_flower_wait_host_bit(app); + else if (err != -ENOENT) + return err; + + /* Tell the firmware that the driver supports lag. */ + err = nfp_rtsym_write_le(app->pf->rtbl, + "_abi_flower_balance_sync_enable", 1); + if (!err) { + app_priv->flower_en_feats |= NFP_FL_ENABLE_LAG; + nfp_flower_lag_init(&app_priv->nfp_lag); + } else if (err == -ENOENT) { + nfp_warn(app->cpp, "LAG not supported by FW.\n"); + } else { + return err; + } + + if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MOD) { + /* Tell the firmware that the driver supports flow merging. */ + err = nfp_rtsym_write_le(app->pf->rtbl, + "_abi_flower_merge_hint_enable", 1); + if (!err) { + app_priv->flower_en_feats |= NFP_FL_ENABLE_FLOW_MERGE; + nfp_flower_internal_port_init(app_priv); + } else if (err == -ENOENT) { + nfp_warn(app->cpp, + "Flow merge not supported by FW.\n"); + } else { + return err; + } + } else { + nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n"); + } + + return 0; +} + static int nfp_flower_init(struct nfp_app *app) { u64 version, features, ctx_count, num_mems; @@ -753,35 +824,11 @@ static int nfp_flower_init(struct nfp_app *app) if (err) app_priv->flower_ext_feats = 0; else - app_priv->flower_ext_feats = features; + app_priv->flower_ext_feats = features & NFP_FL_FEATS_HOST; - /* Tell the firmware that the driver supports lag. */ - err = nfp_rtsym_write_le(app->pf->rtbl, - "_abi_flower_balance_sync_enable", 1); - if (!err) { - app_priv->flower_ext_feats |= NFP_FL_FEATS_LAG; - nfp_flower_lag_init(&app_priv->nfp_lag); - } else if (err == -ENOENT) { - nfp_warn(app->cpp, "LAG not supported by FW.\n"); - } else { - goto err_cleanup_metadata; - } - - if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MOD) { - /* Tell the firmware that the driver supports flow merging. */ - err = nfp_rtsym_write_le(app->pf->rtbl, - "_abi_flower_merge_hint_enable", 1); - if (!err) { - app_priv->flower_ext_feats |= NFP_FL_FEATS_FLOW_MERGE; - nfp_flower_internal_port_init(app_priv); - } else if (err == -ENOENT) { - nfp_warn(app->cpp, "Flow merge not supported by FW.\n"); - } else { - goto err_lag_clean; - } - } else { - nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n"); - } + err = nfp_flower_sync_feature_bits(app); + if (err) + goto err_cleanup; if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM) nfp_flower_qos_init(app); @@ -792,10 +839,9 @@ static int nfp_flower_init(struct nfp_app *app) return 0; -err_lag_clean: - if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) +err_cleanup: + if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG) nfp_flower_lag_cleanup(&app_priv->nfp_lag); -err_cleanup_metadata: nfp_flower_metadata_cleanup(app); err_free_app_priv: vfree(app->priv); @@ -813,10 +859,10 @@ static void nfp_flower_clean(struct nfp_app *app) if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM) nfp_flower_qos_cleanup(app); - if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) + if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG) nfp_flower_lag_cleanup(&app_priv->nfp_lag); - if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE) + if (app_priv->flower_en_feats & NFP_FL_ENABLE_FLOW_MERGE) nfp_flower_internal_port_cleanup(app_priv); nfp_flower_metadata_cleanup(app); @@ -886,7 +932,7 @@ static int nfp_flower_start(struct nfp_app *app) struct nfp_flower_priv *app_priv = app->priv; int err; - if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) { + if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG) { err = nfp_flower_lag_reset(&app_priv->nfp_lag); if (err) return err; @@ -907,7 +953,7 @@ nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev, struct nfp_flower_priv *app_priv = app->priv; int ret; - if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) { + if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG) { ret = nfp_flower_lag_netdev_event(app_priv, netdev, event, ptr); if (ret & NOTIFY_STOP_MASK) return ret; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index d55d0d33bc45..59abea2a39ad 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -44,8 +44,20 @@ struct nfp_app; #define NFP_FL_FEATS_FLOW_MOD BIT(5) #define NFP_FL_FEATS_PRE_TUN_RULES BIT(6) #define NFP_FL_FEATS_IPV6_TUN BIT(7) -#define NFP_FL_FEATS_FLOW_MERGE BIT(30) -#define NFP_FL_FEATS_LAG BIT(31) +#define NFP_FL_FEATS_HOST_ACK BIT(31) + +#define NFP_FL_ENABLE_FLOW_MERGE BIT(0) +#define NFP_FL_ENABLE_LAG BIT(1) + +#define NFP_FL_FEATS_HOST \ + (NFP_FL_FEATS_GENEVE | \ + NFP_FL_NBI_MTU_SETTING | \ + NFP_FL_FEATS_GENEVE_OPT | \ + NFP_FL_FEATS_VLAN_PCP | \ + NFP_FL_FEATS_VF_RLIM | \ + NFP_FL_FEATS_FLOW_MOD | \ + NFP_FL_FEATS_PRE_TUN_RULES | \ + NFP_FL_FEATS_IPV6_TUN) struct nfp_fl_mask_id { struct circ_buf mask_id_free_list; @@ -145,6 +157,7 @@ struct nfp_fl_internal_ports { * @mask_id_seed: Seed used for mask hash table * @flower_version: HW version of flower * @flower_ext_feats: Bitmap of extra features the HW supports + * @flower_en_feats: Bitmap of features enabled by HW * @stats_ids: List of free stats ids * @mask_ids: List of free mask ids * @mask_table: Hash table used to store masks @@ -180,6 +193,7 @@ struct nfp_flower_priv { u32 mask_id_seed; u64 flower_version; u64 flower_ext_feats; + u8 flower_en_feats; struct nfp_fl_stats_id stats_ids; struct nfp_fl_mask_id mask_ids; DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS); @@ -346,7 +360,7 @@ nfp_flower_internal_port_can_offload(struct nfp_app *app, { struct nfp_flower_priv *app_priv = app->priv; - if (!(app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE)) + if (!(app_priv->flower_en_feats & NFP_FL_ENABLE_FLOW_MERGE)) return false; if (!netdev->rtnl_link_ops) return false; diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 546bc01d507d..f7f01e2e3dce 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -74,9 +74,10 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port, return 0; } -static void +static int nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext, - struct nfp_flower_mac_mpls *msk, struct flow_rule *rule) + struct nfp_flower_mac_mpls *msk, struct flow_rule *rule, + struct netlink_ext_ack *extack) { memset(ext, 0, sizeof(struct nfp_flower_mac_mpls)); memset(msk, 0, sizeof(struct nfp_flower_mac_mpls)); @@ -97,14 +98,28 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext, u32 t_mpls; flow_rule_match_mpls(rule, &match); - t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.key->mpls_label) | - FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.key->mpls_tc) | - FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.key->mpls_bos) | + + /* Only support matching the first LSE */ + if (match.mask->used_lses != 1) { + NL_SET_ERR_MSG_MOD(extack, + "unsupported offload: invalid LSE depth for MPLS match offload"); + return -EOPNOTSUPP; + } + + t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, + match.key->ls[0].mpls_label) | + FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, + match.key->ls[0].mpls_tc) | + FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, + match.key->ls[0].mpls_bos) | NFP_FLOWER_MASK_MPLS_Q; ext->mpls_lse = cpu_to_be32(t_mpls); - t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.mask->mpls_label) | - FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.mask->mpls_tc) | - FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.mask->mpls_bos) | + t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, + match.mask->ls[0].mpls_label) | + FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, + match.mask->ls[0].mpls_tc) | + FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, + match.mask->ls[0].mpls_bos) | NFP_FLOWER_MASK_MPLS_Q; msk->mpls_lse = cpu_to_be32(t_mpls); } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { @@ -121,6 +136,8 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext, msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); } } + + return 0; } static void @@ -461,9 +478,12 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, msk += sizeof(struct nfp_flower_in_port); if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) { - nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext, - (struct nfp_flower_mac_mpls *)msk, - rule); + err = nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext, + (struct nfp_flower_mac_mpls *)msk, + rule, extack); + if (err) + return err; + ext += sizeof(struct nfp_flower_mac_mpls); msk += sizeof(struct nfp_flower_mac_mpls); } diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index c694dbc239d0..6b60771ccb19 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1440,7 +1440,8 @@ __nfp_flower_update_merge_stats(struct nfp_app *app, ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id); priv->stats[ctx_id].pkts += pkts; priv->stats[ctx_id].bytes += bytes; - max_t(u64, priv->stats[ctx_id].used, used); + priv->stats[ctx_id].used = max_t(u64, used, + priv->stats[ctx_id].used); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 1a636bad717d..7b76667acaba 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -110,6 +110,7 @@ struct src_ent { ALIGNED_TYPE_SIZE(union conn_context, p_hwfn) #define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context)) +#define XRC_SRQ_CXT_SIZE (sizeof(struct rdma_xrc_srq_context)) #define TYPE0_TASK_CXT_SIZE(p_hwfn) \ ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn) @@ -293,18 +294,40 @@ static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn, return NULL; } -static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs) +static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, + u32 num_srqs, u32 num_xrc_srqs) { struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; p_mgr->srq_count = num_srqs; + p_mgr->xrc_srq_count = num_xrc_srqs; } -u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn) +u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn, + enum ilt_clients ilt_client) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_ilt_client_cfg *p_cli = &p_mngr->clients[ilt_client]; + + return ILT_PAGE_IN_BYTES(p_cli->p_size.val); +} + +static u32 qed_cxt_xrc_srqs_per_page(struct qed_hwfn *p_hwfn) +{ + u32 page_size; + + page_size = qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM); + return page_size / XRC_SRQ_CXT_SIZE; +} + +u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; + u32 total_srqs; + + total_srqs = p_mgr->srq_count + p_mgr->xrc_srq_count; - return p_mgr->srq_count; + return total_srqs; } /* set the iids count per protocol */ @@ -692,7 +715,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count) } /* TSDM (SRQ CONTEXT) */ - total = qed_cxt_get_srq_count(p_hwfn); + total = qed_cxt_get_total_srq_count(p_hwfn); if (total) { p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]); @@ -1962,11 +1985,9 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, struct qed_rdma_pf_params *p_params, u32 num_tasks) { - u32 num_cons, num_qps, num_srqs; + u32 num_cons, num_qps; enum protocol_type proto; - num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs); - if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) { DP_NOTICE(p_hwfn, "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n"); @@ -1989,6 +2010,8 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, } if (num_cons && num_tasks) { + u32 num_srqs, num_xrc_srqs; + qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0); /* Deliberatly passing ROCE for tasks id. This is because @@ -1997,7 +2020,13 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE, QED_CXT_ROCE_TID_SEG, 1, num_tasks, false); - qed_cxt_set_srq_count(p_hwfn, num_srqs); + + num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs); + + /* XRC SRQs populate a single ILT page */ + num_xrc_srqs = qed_cxt_xrc_srqs_per_page(p_hwfn); + + qed_cxt_set_srq_count(p_hwfn, num_srqs, num_xrc_srqs); } else { DP_INFO(p_hwfn->cdev, "RDMA personality used without setting params!\n"); @@ -2163,10 +2192,17 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, p_blk = &p_cli->pf_blks[CDUC_BLK]; break; case QED_ELEM_SRQ: + /* The first ILT page is not used for regular SRQs. Skip it. */ + iid += p_hwfn->p_cxt_mngr->xrc_srq_count; p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; elem_size = SRQ_CXT_SIZE; p_blk = &p_cli->pf_blks[SRQ_BLK]; break; + case QED_ELEM_XRC_SRQ: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; + elem_size = XRC_SRQ_CXT_SIZE; + p_blk = &p_cli->pf_blks[SRQ_BLK]; + break; case QED_ELEM_TASK: p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn); @@ -2386,8 +2422,12 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto) return rc; /* Free TSDM CXT */ - rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, 0, - qed_cxt_get_srq_count(p_hwfn)); + rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_XRC_SRQ, 0, + p_hwfn->p_cxt_mngr->xrc_srq_count); + + rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, + p_hwfn->p_cxt_mngr->xrc_srq_count, + p_hwfn->p_cxt_mngr->srq_count); return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index c4e815f6cabd..ce08ae8d8498 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -82,7 +82,8 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn, enum qed_cxt_elem_type { QED_ELEM_CXT, QED_ELEM_SRQ, - QED_ELEM_TASK + QED_ELEM_TASK, + QED_ELEM_XRC_SRQ, }; u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, @@ -235,7 +236,6 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn, enum protocol_type type); u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn, enum protocol_type type); -u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn); int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto); #define QED_CTX_WORKING_MEM 0 @@ -358,6 +358,7 @@ struct qed_cxt_mngr { /* total number of SRQ's for this hwfn */ u32 srq_count; + u32 xrc_srq_count; /* Maximal number of L2 steering filters */ u32 arfs_count; @@ -372,4 +373,9 @@ u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn); u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn); u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn); +u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn, + enum ilt_clients ilt_client); + +u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn); + #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 6e857468e993..1eebf30fa798 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -2269,6 +2269,7 @@ int qed_resc_alloc(struct qed_dev *cdev) /* EQ */ n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain); if (QED_IS_RDMA_PERSONALITY(p_hwfn)) { + u32 n_srq = qed_cxt_get_total_srq_count(p_hwfn); enum protocol_type rdma_proto; if (QED_IS_ROCE_PERSONALITY(p_hwfn)) @@ -2279,7 +2280,10 @@ int qed_resc_alloc(struct qed_dev *cdev) num_cons = qed_cxt_get_proto_cid_count(p_hwfn, rdma_proto, NULL) * 2; - n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; + /* EQ should be able to get events from all SRQ's + * at the same time + */ + n_eqes += num_cons + 2 * MAX_NUM_VFS_BB + n_srq; } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { num_cons = qed_cxt_get_proto_cid_count(p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 38b1f402f7ed..98455f698f53 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -212,13 +212,22 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn) goto free_rdma_port; } + /* Allocate bit map for XRC Domains */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->xrcd_map, + QED_RDMA_MAX_XRCDS, "XRCD"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate xrcd_map,rc = %d\n", rc); + goto free_pd_map; + } + /* Allocate DPI bitmap */ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map, p_hwfn->dpi_count, "DPI"); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to allocate DPI bitmap, rc = %d\n", rc); - goto free_pd_map; + goto free_xrcd_map; } /* Allocate bitmap for cq's. The maximum number of CQs is bound to @@ -271,14 +280,27 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn) goto free_cid_map; } + /* The first SRQ follows the last XRC SRQ. This means that the + * SRQ IDs start from an offset equals to max_xrc_srqs. + */ + p_rdma_info->srq_id_offset = p_hwfn->p_cxt_mngr->xrc_srq_count; + rc = qed_rdma_bmap_alloc(p_hwfn, + &p_rdma_info->xrc_srq_map, + p_hwfn->p_cxt_mngr->xrc_srq_count, "XRC SRQ"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate xrc srq bitmap, rc = %d\n", rc); + goto free_real_cid_map; + } + /* Allocate bitmap for srqs */ - p_rdma_info->num_srqs = qed_cxt_get_srq_count(p_hwfn); + p_rdma_info->num_srqs = p_hwfn->p_cxt_mngr->srq_count; rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map, p_rdma_info->num_srqs, "SRQ"); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to allocate srq bitmap, rc = %d\n", rc); - goto free_real_cid_map; + goto free_xrc_srq_map; } if (QED_IS_IWARP_PERSONALITY(p_hwfn)) @@ -292,6 +314,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn) free_srq_map: kfree(p_rdma_info->srq_map.bitmap); +free_xrc_srq_map: + kfree(p_rdma_info->xrc_srq_map.bitmap); free_real_cid_map: kfree(p_rdma_info->real_cid_map.bitmap); free_cid_map: @@ -304,6 +328,8 @@ free_cq_map: kfree(p_rdma_info->cq_map.bitmap); free_dpi_map: kfree(p_rdma_info->dpi_map.bitmap); +free_xrcd_map: + kfree(p_rdma_info->xrcd_map.bitmap); free_pd_map: kfree(p_rdma_info->pd_map.bitmap); free_rdma_port: @@ -377,6 +403,7 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrc_srq_map, 1); kfree(p_rdma_info->port); kfree(p_rdma_info->dev); @@ -612,7 +639,10 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM); p_params_header->num_cnqs = params->desired_cnq; - + p_params_header->first_reg_srq_id = + cpu_to_le16(p_hwfn->p_rdma_info->srq_id_offset); + p_params_header->reg_srq_base_addr = + cpu_to_le32(qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM)); if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS) p_params_header->cq_ring_mode = 1; else @@ -983,6 +1013,41 @@ static void qed_rdma_free_pd(void *rdma_cxt, u16 pd) spin_unlock_bh(&p_hwfn->p_rdma_info->lock); } +static int qed_rdma_alloc_xrcd(void *rdma_cxt, u16 *xrcd_id) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + u32 returned_id; + int rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD\n"); + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + rc = qed_rdma_bmap_alloc_id(p_hwfn, + &p_hwfn->p_rdma_info->xrcd_map, + &returned_id); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + if (rc) { + DP_NOTICE(p_hwfn, "Failed in allocating xrcd id\n"); + return rc; + } + + *xrcd_id = (u16)returned_id; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD - done, rc = %d\n", rc); + return rc; +} + +static void qed_rdma_free_xrcd(void *rdma_cxt, u16 xrcd_id) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "xrcd_id = %08x\n", xrcd_id); + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, xrcd_id); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); +} + static enum qed_rdma_toggle_bit qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid) { @@ -1306,11 +1371,14 @@ qed_rdma_create_qp(void *rdma_cxt, qp->resp_offloaded = false; qp->e2e_flow_control_en = qp->use_srq ? false : true; qp->stats_queue = in_params->stats_queue; + qp->qp_type = in_params->qp_type; + qp->xrcd_id = in_params->xrcd_id; if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { rc = qed_iwarp_create_qp(p_hwfn, qp, out_params); qp->qpid = qp->icid; } else { + qp->edpm_mode = GET_FIELD(in_params->flags, QED_ROCE_EDPM_MODE); rc = qed_roce_alloc_cid(p_hwfn, &qp->icid); qp->qpid = ((0xFF << 16) | qp->icid); } @@ -1418,6 +1486,18 @@ static int qed_rdma_modify_qp(void *rdma_cxt, qp->cur_state); } + switch (qp->qp_type) { + case QED_RDMA_QP_TYPE_XRC_INI: + qp->has_req = 1; + break; + case QED_RDMA_QP_TYPE_XRC_TGT: + qp->has_resp = 1; + break; + default: + qp->has_req = 1; + qp->has_resp = 1; + } + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { enum qed_iwarp_qp_state new_state = qed_roce2iwarp_state(qp->cur_state); @@ -1657,6 +1737,15 @@ static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) return QED_AFFIN_HWFN(cdev); } +static struct qed_bmap *qed_rdma_get_srq_bmap(struct qed_hwfn *p_hwfn, + bool is_xrc) +{ + if (is_xrc) + return &p_hwfn->p_rdma_info->xrc_srq_map; + + return &p_hwfn->p_rdma_info->srq_map; +} + static int qed_rdma_modify_srq(void *rdma_cxt, struct qed_rdma_modify_srq_in_params *in_params) { @@ -1686,8 +1775,8 @@ static int qed_rdma_modify_srq(void *rdma_cxt, if (rc) return rc; - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x", - in_params->srq_id); + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x, is_xrc=%u\n", + in_params->srq_id, in_params->is_xrc); return rc; } @@ -1702,6 +1791,7 @@ qed_rdma_destroy_srq(void *rdma_cxt, struct qed_spq_entry *p_ent; struct qed_bmap *bmap; u16 opaque_fid; + u16 offset; int rc; opaque_fid = p_hwfn->hw_info.opaque_fid; @@ -1723,14 +1813,16 @@ qed_rdma_destroy_srq(void *rdma_cxt, if (rc) return rc; - bmap = &p_hwfn->p_rdma_info->srq_map; + bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc); + offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset; spin_lock_bh(&p_hwfn->p_rdma_info->lock); - qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id); + qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id - offset); spin_unlock_bh(&p_hwfn->p_rdma_info->lock); - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "SRQ destroyed Id = %x", - in_params->srq_id); + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "XRC/SRQ destroyed Id = %x, is_xrc=%u\n", + in_params->srq_id, in_params->is_xrc); return rc; } @@ -1748,24 +1840,26 @@ qed_rdma_create_srq(void *rdma_cxt, u16 opaque_fid, srq_id; struct qed_bmap *bmap; u32 returned_id; + u16 offset; int rc; - bmap = &p_hwfn->p_rdma_info->srq_map; + bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc); spin_lock_bh(&p_hwfn->p_rdma_info->lock); rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id); spin_unlock_bh(&p_hwfn->p_rdma_info->lock); if (rc) { - DP_NOTICE(p_hwfn, "failed to allocate srq id\n"); + DP_NOTICE(p_hwfn, + "failed to allocate xrc/srq id (is_xrc=%u)\n", + in_params->is_xrc); return rc; } - elem_type = QED_ELEM_SRQ; + elem_type = (in_params->is_xrc) ? (QED_ELEM_XRC_SRQ) : (QED_ELEM_SRQ); rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id); if (rc) goto err; - /* returned id is no greater than u16 */ - srq_id = (u16)returned_id; + opaque_fid = p_hwfn->hw_info.opaque_fid; opaque_fid = p_hwfn->hw_info.opaque_fid; @@ -1782,20 +1876,34 @@ qed_rdma_create_srq(void *rdma_cxt, DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr); p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages); p_ramrod->pd_id = cpu_to_le16(in_params->pd_id); - p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id); p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); p_ramrod->page_size = cpu_to_le16(in_params->page_size); DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr); + offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset; + srq_id = (u16)returned_id + offset; + p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id); + if (in_params->is_xrc) { + SET_FIELD(p_ramrod->flags, + RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG, 1); + SET_FIELD(p_ramrod->flags, + RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN, + in_params->reserved_key_en); + p_ramrod->xrc_srq_cq_cid = + cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | + in_params->cq_cid); + p_ramrod->xrc_domain = cpu_to_le16(in_params->xrcd_id); + } rc = qed_spq_post(p_hwfn, p_ent, NULL); if (rc) goto err; out_params->srq_id = srq_id; - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, - "SRQ created Id = %x\n", out_params->srq_id); - + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "XRC/SRQ created Id = %x (is_xrc=%u)\n", + out_params->srq_id, in_params->is_xrc); return rc; err: @@ -1961,6 +2069,8 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = { .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update, .rdma_alloc_pd = &qed_rdma_alloc_pd, .rdma_dealloc_pd = &qed_rdma_free_pd, + .rdma_alloc_xrcd = &qed_rdma_alloc_xrcd, + .rdma_dealloc_xrcd = &qed_rdma_free_xrcd, .rdma_create_cq = &qed_rdma_create_cq, .rdma_destroy_cq = &qed_rdma_destroy_cq, .rdma_create_qp = &qed_rdma_create_qp, diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h index 3689fe3e5935..3898cae61e7a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h @@ -63,6 +63,11 @@ #define QED_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1) #define QED_RDMA_MAX_CQE_16_BIT (0x7FFF - 1) +/* Up to 2^16 XRC Domains are supported, but the actual number of supported XRC + * SRQs is much smaller so there's no need to have that many domains. + */ +#define QED_RDMA_MAX_XRCDS (roundup_pow_of_two(RDMA_MAX_XRC_SRQS)) + enum qed_rdma_toggle_bit { QED_RDMA_TOGGLE_BIT_CLEAR = 0, QED_RDMA_TOGGLE_BIT_SET = 1 @@ -81,9 +86,11 @@ struct qed_rdma_info { struct qed_bmap cq_map; struct qed_bmap pd_map; + struct qed_bmap xrcd_map; struct qed_bmap tid_map; struct qed_bmap qp_map; struct qed_bmap srq_map; + struct qed_bmap xrc_srq_map; struct qed_bmap cid_map; struct qed_bmap tcp_cid_map; struct qed_bmap real_cid_map; @@ -111,6 +118,7 @@ struct qed_rdma_qp { u32 qpid; u16 icid; enum qed_roce_qp_state cur_state; + enum qed_rdma_qp_type qp_type; enum qed_iwarp_qp_state iwarp_state; bool use_srq; bool signal_all; @@ -153,18 +161,21 @@ struct qed_rdma_qp { dma_addr_t orq_phys_addr; u8 orq_num_pages; bool req_offloaded; + bool has_req; /* responder */ u8 max_rd_atomic_resp; u32 rq_psn; u16 rq_cq_id; u16 rq_num_pages; + u16 xrcd_id; dma_addr_t rq_pbl_ptr; void *irq; dma_addr_t irq_phys_addr; u8 irq_num_pages; bool resp_offloaded; u32 cq_prod; + bool has_resp; u8 remote_mac_addr[6]; u8 local_mac_addr[6]; @@ -172,8 +183,17 @@ struct qed_rdma_qp { void *shared_queue; dma_addr_t shared_queue_phys_addr; struct qed_iwarp_ep *ep; + u8 edpm_mode; }; +static inline bool qed_rdma_is_xrc_qp(struct qed_rdma_qp *qp) +{ + if (qp->qp_type == QED_RDMA_QP_TYPE_XRC_TGT || + qp->qp_type == QED_RDMA_QP_TYPE_XRC_INI) + return true; + + return false; +} #if IS_ENABLED(CONFIG_QED_RDMA) void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 475b89903f46..4566815f7b87 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -254,6 +254,9 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, int rc; u8 tc; + if (!qp->has_resp) + return 0; + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); /* Allocate DMA-able memory for IRQ */ @@ -315,6 +318,10 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, qp->min_rnr_nak_timer); + SET_FIELD(p_ramrod->flags, + ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG, + qed_rdma_is_xrc_qp(qp)); + p_ramrod->max_ird = qp->max_rd_atomic_resp; p_ramrod->traffic_class = qp->traffic_class_tos; p_ramrod->hop_limit = qp->hop_limit_ttl; @@ -335,6 +342,7 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id); + p_ramrod->xrc_domain = cpu_to_le16(qp->xrcd_id); tc = qed_roce_get_qp_tc(p_hwfn, qp); regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc); @@ -395,6 +403,9 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, int rc; u8 tc; + if (!qp->has_req) + return 0; + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); /* Allocate DMA-able memory for ORQ */ @@ -444,6 +455,13 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, qp->rnr_retry_cnt); + SET_FIELD(p_ramrod->flags, + ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG, + qed_rdma_is_xrc_qp(qp)); + + SET_FIELD(p_ramrod->flags2, + ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE, qp->edpm_mode); + p_ramrod->max_ord = qp->max_rd_atomic_req; p_ramrod->traffic_class = qp->traffic_class_tos; p_ramrod->hop_limit = qp->hop_limit_ttl; @@ -517,6 +535,9 @@ static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent; int rc; + if (!qp->has_resp) + return 0; + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); if (move_to_err && !qp->resp_offloaded) @@ -611,6 +632,9 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent; int rc; + if (!qp->has_req) + return 0; + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); if (move_to_err && !(qp->req_offloaded)) @@ -705,6 +729,11 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, dma_addr_t ramrod_res_phys; int rc; + if (!qp->has_resp) { + *cq_prod = 0; + return 0; + } + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); *cq_prod = qp->cq_prod; @@ -785,6 +814,9 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, dma_addr_t ramrod_res_phys; int rc = -ENOMEM; + if (!qp->has_req) + return 0; + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); if (!qp->req_offloaded) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 2a533280b124..29b9c728a65e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -3651,7 +3651,7 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) ahw->diag_cnt = 0; ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); if (ret) - goto fail_diag_irq; + goto fail_mbx_args; if (adapter->flags & QLCNIC_MSIX_ENABLED) intrpt_id = ahw->intr_tbl[0].id; @@ -3681,6 +3681,8 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) done: qlcnic_free_mbx_args(&cmd); + +fail_mbx_args: qlcnic_83xx_diag_free_res(netdev, drv_sds_rings); fail_diag_irq: diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 60d342f82fb3..e291e6ac40cb 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -2054,10 +2054,9 @@ static void cp_remove_one (struct pci_dev *pdev) free_netdev(dev); } -#ifdef CONFIG_PM -static int cp_suspend (struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused cp_suspend(struct device *device) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct cp_private *cp = netdev_priv(dev); unsigned long flags; @@ -2075,16 +2074,14 @@ static int cp_suspend (struct pci_dev *pdev, pm_message_t state) spin_unlock_irqrestore (&cp->lock, flags); - pci_save_state(pdev); - pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); + device_set_wakeup_enable(device, cp->wol_enabled); return 0; } -static int cp_resume (struct pci_dev *pdev) +static int __maybe_unused cp_resume(struct device *device) { - struct net_device *dev = pci_get_drvdata (pdev); + struct net_device *dev = dev_get_drvdata(device); struct cp_private *cp = netdev_priv(dev); unsigned long flags; @@ -2093,10 +2090,6 @@ static int cp_resume (struct pci_dev *pdev) netif_device_attach (dev); - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - pci_enable_wake(pdev, PCI_D0, 0); - /* FIXME: sh*t may happen if the Rx ring buffer is depleted */ cp_init_rings_index (cp); cp_init_hw (cp); @@ -2111,7 +2104,6 @@ static int cp_resume (struct pci_dev *pdev) return 0; } -#endif /* CONFIG_PM */ static const struct pci_device_id cp_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), }, @@ -2120,15 +2112,14 @@ static const struct pci_device_id cp_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, cp_pci_tbl); +static SIMPLE_DEV_PM_OPS(cp_pm_ops, cp_suspend, cp_resume); + static struct pci_driver cp_driver = { .name = DRV_NAME, .id_table = cp_pci_tbl, .probe = cp_init_one, .remove = cp_remove_one, -#ifdef CONFIG_PM - .resume = cp_resume, - .suspend = cp_suspend, -#endif + .driver.pm = &cp_pm_ops, }; module_pci_driver(cp_driver); diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 5caeb8368eab..227139d42227 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -2603,17 +2603,13 @@ static void rtl8139_set_rx_mode (struct net_device *dev) spin_unlock_irqrestore (&tp->lock, flags); } -#ifdef CONFIG_PM - -static int rtl8139_suspend (struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused rtl8139_suspend(struct device *device) { - struct net_device *dev = pci_get_drvdata (pdev); + struct net_device *dev = dev_get_drvdata(device); struct rtl8139_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; unsigned long flags; - pci_save_state (pdev); - if (!netif_running (dev)) return 0; @@ -2631,38 +2627,30 @@ static int rtl8139_suspend (struct pci_dev *pdev, pm_message_t state) spin_unlock_irqrestore (&tp->lock, flags); - pci_set_power_state (pdev, PCI_D3hot); - return 0; } - -static int rtl8139_resume (struct pci_dev *pdev) +static int __maybe_unused rtl8139_resume(struct device *device) { - struct net_device *dev = pci_get_drvdata (pdev); + struct net_device *dev = dev_get_drvdata(device); - pci_restore_state (pdev); if (!netif_running (dev)) return 0; - pci_set_power_state (pdev, PCI_D0); + rtl8139_init_ring (dev); rtl8139_hw_start (dev); netif_device_attach (dev); return 0; } -#endif /* CONFIG_PM */ - +static SIMPLE_DEV_PM_OPS(rtl8139_pm_ops, rtl8139_suspend, rtl8139_resume); static struct pci_driver rtl8139_pci_driver = { .name = DRV_NAME, .id_table = rtl8139_pci_tbl, .probe = rtl8139_init_one, .remove = rtl8139_remove_one, -#ifdef CONFIG_PM - .suspend = rtl8139_suspend, - .resume = rtl8139_resume, -#endif /* CONFIG_PM */ + .driver.pm = &rtl8139_pm_ops, }; diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index e35820c72264..4d2ec9742cee 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -1027,6 +1027,13 @@ static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr) RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0; } +static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type) +{ + /* based on RTL8168FP_OOBMAC_BASE in vendor driver */ + if (tp->mac_version == RTL_GIGA_MAC_VER_52 && type == ERIAR_OOB) + *cmd |= 0x7f0 << 18; +} + DECLARE_RTL_COND(rtl_eriar_cond) { return RTL_R32(tp, ERIAR) & ERIAR_FLAG; @@ -1035,9 +1042,12 @@ DECLARE_RTL_COND(rtl_eriar_cond) static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, u32 val, int type) { + u32 cmd = ERIAR_WRITE_CMD | type | mask | addr; + BUG_ON((addr & 3) || (mask == 0)); RTL_W32(tp, ERIDR, val); - RTL_W32(tp, ERIAR, ERIAR_WRITE_CMD | type | mask | addr); + r8168fp_adjust_ocp_cmd(tp, &cmd, type); + RTL_W32(tp, ERIAR, cmd); rtl_loop_wait_low(tp, &rtl_eriar_cond, 100, 100); } @@ -1050,7 +1060,10 @@ static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type) { - RTL_W32(tp, ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr); + u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr; + + r8168fp_adjust_ocp_cmd(tp, &cmd, type); + RTL_W32(tp, ERIAR, cmd); return rtl_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ? RTL_R32(tp, ERIDR) : ~0; @@ -1061,35 +1074,31 @@ static u32 rtl_eri_read(struct rtl8169_private *tp, int addr) return _rtl_eri_read(tp, addr, ERIAR_EXGMAC); } -static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p, - u32 m) +static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 p, u32 m) { - u32 val; + u32 val = rtl_eri_read(tp, addr); - val = rtl_eri_read(tp, addr); - rtl_eri_write(tp, addr, mask, (val & ~m) | p); + rtl_eri_write(tp, addr, ERIAR_MASK_1111, (val & ~m) | p); } -static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 mask, - u32 p) +static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 p) { - rtl_w0w1_eri(tp, addr, mask, p, 0); + rtl_w0w1_eri(tp, addr, p, 0); } -static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 mask, - u32 m) +static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 m) { - rtl_w0w1_eri(tp, addr, mask, 0, m); + rtl_w0w1_eri(tp, addr, 0, m); } -static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) +static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u16 reg) { - RTL_W32(tp, OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + RTL_W32(tp, OCPAR, 0x0fu << 12 | (reg & 0x0fff)); return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ? RTL_R32(tp, OCPDR) : ~0; } -static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) +static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u16 reg) { return _rtl_eri_read(tp, reg, ERIAR_OOB); } @@ -1131,12 +1140,12 @@ DECLARE_RTL_COND(rtl_dp_ocp_read_cond) reg = rtl8168_get_ocp_reg(tp); - return r8168dp_ocp_read(tp, 0x0f, reg) & 0x00000800; + return r8168dp_ocp_read(tp, reg) & 0x00000800; } DECLARE_RTL_COND(rtl_ep_ocp_read_cond) { - return r8168ep_ocp_read(tp, 0x0f, 0x124) & 0x00000001; + return r8168ep_ocp_read(tp, 0x124) & 0x00000001; } DECLARE_RTL_COND(rtl_ocp_tx_cond) @@ -1161,8 +1170,7 @@ static void rtl8168dp_driver_start(struct rtl8169_private *tp) static void rtl8168ep_driver_start(struct rtl8169_private *tp) { r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START); - r8168ep_ocp_write(tp, 0x01, 0x30, - r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01); + r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 10); } @@ -1193,8 +1201,7 @@ static void rtl8168ep_driver_stop(struct rtl8169_private *tp) { rtl8168ep_stop_cmac(tp); r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP); - r8168ep_ocp_write(tp, 0x01, 0x30, - r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01); + r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10); } @@ -1219,12 +1226,12 @@ static bool r8168dp_check_dash(struct rtl8169_private *tp) { u16 reg = rtl8168_get_ocp_reg(tp); - return !!(r8168dp_ocp_read(tp, 0x0f, reg) & 0x00008000); + return !!(r8168dp_ocp_read(tp, reg) & 0x00008000); } static bool r8168ep_check_dash(struct rtl8169_private *tp) { - return !!(r8168ep_ocp_read(tp, 0x0f, 0x128) & 0x00000001); + return r8168ep_ocp_read(tp, 0x128) & 0x00000001; } static bool r8168_check_dash(struct rtl8169_private *tp) @@ -1243,8 +1250,8 @@ static bool r8168_check_dash(struct rtl8169_private *tp) static void rtl_reset_packet_filter(struct rtl8169_private *tp) { - rtl_eri_clear_bits(tp, 0xdc, ERIAR_MASK_0001, BIT(0)); - rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_0001, BIT(0)); + rtl_eri_clear_bits(tp, 0xdc, BIT(0)); + rtl_eri_set_bits(tp, 0xdc, BIT(0)); } DECLARE_RTL_COND(rtl_efusear_cond) @@ -1371,11 +1378,9 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) if (rtl_is_8168evl_up(tp)) { tmp--; if (wolopts & WAKE_MAGIC) - rtl_eri_set_bits(tp, 0x0dc, ERIAR_MASK_0100, - MagicPacket_v2); + rtl_eri_set_bits(tp, 0x0dc, MagicPacket_v2); else - rtl_eri_clear_bits(tp, 0x0dc, ERIAR_MASK_0100, - MagicPacket_v2); + rtl_eri_clear_bits(tp, 0x0dc, MagicPacket_v2); } else if (rtl_is_8125(tp)) { tmp--; if (wolopts & WAKE_MAGIC) @@ -1871,6 +1876,15 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) RTL_W16(tp, IntrMitigate, w); + /* Meaning of PktCntrDisable bit changed from RTL8168e-vl */ + if (rtl_is_8168evl_up(tp)) { + if (!rx_fr && !tx_fr) + /* disable packet counter */ + tp->cp_cmd |= PktCntrDisable; + else + tp->cp_cmd &= ~PktCntrDisable; + } + tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01; RTL_W16(tp, CPlusCmd, tp->cp_cmd); rtl_pci_commit(tp); @@ -2110,7 +2124,7 @@ static void rtl8168_config_eee_mac(struct rtl8169_private *tp) if (tp->mac_version != RTL_GIGA_MAC_VER_38) RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); - rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_1111, 0x0003); + rtl_eri_set_bits(tp, 0x1b0, 0x0003); } static void rtl8125_config_eee_mac(struct rtl8169_private *tp) @@ -2274,7 +2288,7 @@ static void rtl_pll_power_down(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_40: case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_49: - rtl_eri_clear_bits(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000); + rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000); RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); break; default: @@ -2307,7 +2321,7 @@ static void rtl_pll_power_up(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_49: RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); - rtl_eri_set_bits(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000); + rtl_eri_set_bits(tp, 0x1a8, 0xfc000000); break; default: break; @@ -2516,36 +2530,6 @@ static void rtl_enable_rxdvgate(struct rtl8169_private *tp) rtl_wait_txrx_fifo_empty(tp); } -static void rtl8169_hw_reset(struct rtl8169_private *tp) -{ - /* Disable interrupts */ - rtl8169_irq_mask_and_ack(tp); - - rtl_rx_close(tp); - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: - case RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000); - break; - case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: - RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); - rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); - break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_61: - rtl_enable_rxdvgate(tp); - fsleep(2000); - break; - default: - RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); - udelay(100); - break; - } - - rtl_hw_reset(tp); -} - static void rtl_set_tx_config_registers(struct rtl8169_private *tp) { u32 val = TX_DMA_BURST << TxDMAShift | @@ -2912,12 +2896,14 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8168e_2); rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); - rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000); rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); + rtl_eri_set_bits(tp, 0x0d4, 0x1f00); + rtl_eri_set_bits(tp, 0x1d0, BIT(1)); + rtl_reset_packet_filter(tp); + rtl_eri_set_bits(tp, 0x1b0, BIT(4)); rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050); rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060); - rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_0001, BIT(4)); - rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00); rtl_disable_clock_request(tp); @@ -2937,11 +2923,11 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp) rtl_set_def_aspm_entry_latency(tp); rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); - rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000); rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); rtl_reset_packet_filter(tp); - rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_0001, BIT(4)); - rtl_eri_set_bits(tp, 0x1d0, ERIAR_MASK_0001, BIT(4)); + rtl_eri_set_bits(tp, 0x1b0, BIT(4)); + rtl_eri_set_bits(tp, 0x1d0, BIT(4) | BIT(1)); rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050); rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060); @@ -2970,7 +2956,7 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8168f_1); - rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00); + rtl_eri_set_bits(tp, 0x0d4, 0x1f00); } static void rtl_hw_start_8411(struct rtl8169_private *tp) @@ -2988,7 +2974,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8168f_1); - rtl_eri_set_bits(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00); + rtl_eri_set_bits(tp, 0x0d4, 0x0c00); } static void rtl_hw_start_8168g(struct rtl8169_private *tp) @@ -3005,11 +2991,12 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp) rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + rtl_eri_set_bits(tp, 0x0d4, 0x1f80); rtl8168_config_eee_mac(tp); - rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06); - rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12)); + rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06); + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); rtl_pcie_state_l2l3_disable(tp); } @@ -3235,9 +3222,8 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) rtl_reset_packet_filter(tp); - rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_1111, BIT(4)); - - rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f00); + rtl_eri_set_bits(tp, 0xd4, 0x1f00); + rtl_eri_set_bits(tp, 0xdc, 0x001c); rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); @@ -3253,7 +3239,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); - rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12)); + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); rtl_pcie_state_l2l3_disable(tp); @@ -3290,7 +3276,7 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp) rtl_reset_packet_filter(tp); - rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f80); + rtl_eri_set_bits(tp, 0xd4, 0x1f80); rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); @@ -3301,7 +3287,7 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp) rtl8168_config_eee_mac(tp); - rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06); + rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06); RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); @@ -3393,7 +3379,7 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp) rtl_reset_packet_filter(tp); - rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f90); + rtl_eri_set_bits(tp, 0xd4, 0x1f90); rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); @@ -3409,7 +3395,7 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp) RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); - rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12)); + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); rtl_pcie_state_l2l3_disable(tp); @@ -3534,7 +3520,7 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp) rtl_reset_packet_filter(tp); rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); - rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00); + rtl_w0w1_eri(tp, 0x0d4, 0x0e00, 0xff00); /* disable EEE */ rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000); @@ -3824,15 +3810,14 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) return 0; } -static inline void rtl8169_mark_to_asic(struct RxDesc *desc) +static void rtl8169_mark_to_asic(struct RxDesc *desc) { u32 eor = le32_to_cpu(desc->opts1) & RingEnd; desc->opts2 = 0; /* Force memory writes to complete before releasing descriptor */ dma_wmb(); - - desc->opts1 = cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE); + WRITE_ONCE(desc->opts1, cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE)); } static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp, @@ -3940,10 +3925,45 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, static void rtl8169_tx_clear(struct rtl8169_private *tp) { rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC); - tp->cur_tx = tp->dirty_tx = 0; netdev_reset_queue(tp->dev); } +static void rtl8169_hw_reset(struct rtl8169_private *tp) +{ + /* Give a racing hard_start_xmit a few cycles to complete. */ + synchronize_rcu(); + + /* Disable interrupts */ + rtl8169_irq_mask_and_ack(tp); + + rtl_rx_close(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000); + break; + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_61: + rtl_enable_rxdvgate(tp); + fsleep(2000); + break; + default: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + fsleep(100); + break; + } + + rtl_hw_reset(tp); + + rtl8169_tx_clear(tp); + rtl8169_init_ring_indexes(tp); +} + static void rtl_reset_work(struct rtl8169_private *tp) { struct net_device *dev = tp->dev; @@ -3951,16 +3971,12 @@ static void rtl_reset_work(struct rtl8169_private *tp) napi_disable(&tp->napi); netif_stop_queue(dev); - synchronize_rcu(); rtl8169_hw_reset(tp); for (i = 0; i < NUM_RX_DESC; i++) rtl8169_mark_to_asic(tp->RxDescArray + i); - rtl8169_tx_clear(tp); - rtl8169_init_ring_indexes(tp); - napi_enable(&tp->napi); rtl_hw_start(tp); netif_wake_queue(dev); @@ -4413,15 +4429,17 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget) { - unsigned int cur_rx, rx_left; - unsigned int count; + unsigned int cur_rx, rx_left, count; + struct device *d = tp_to_dev(tp); cur_rx = tp->cur_rx; for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) { - unsigned int entry = cur_rx % NUM_RX_DESC; - const void *rx_buf = page_address(tp->Rx_databuff[entry]); + unsigned int pkt_size, entry = cur_rx % NUM_RX_DESC; struct RxDesc *desc = tp->RxDescArray + entry; + struct sk_buff *skb; + const void *rx_buf; + dma_addr_t addr; u32 status; status = le32_to_cpu(desc->opts1); @@ -4443,62 +4461,57 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget dev->stats.rx_length_errors++; if (status & RxCRC) dev->stats.rx_crc_errors++; - if (status & (RxRUNT | RxCRC) && !(status & RxRWT) && - dev->features & NETIF_F_RXALL) { - goto process_pkt; - } - } else { - unsigned int pkt_size; - struct sk_buff *skb; - -process_pkt: - pkt_size = status & GENMASK(13, 0); - if (likely(!(dev->features & NETIF_F_RXFCS))) - pkt_size -= ETH_FCS_LEN; - /* - * The driver does not support incoming fragmented - * frames. They are seen as a symptom of over-mtu - * sized frames. - */ - if (unlikely(rtl8169_fragmented_frame(status))) { - dev->stats.rx_dropped++; - dev->stats.rx_length_errors++; - goto release_descriptor; - } - skb = napi_alloc_skb(&tp->napi, pkt_size); - if (unlikely(!skb)) { - dev->stats.rx_dropped++; + if (!(dev->features & NETIF_F_RXALL)) goto release_descriptor; - } + else if (status & RxRWT || !(status & (RxRUNT | RxCRC))) + goto release_descriptor; + } + + pkt_size = status & GENMASK(13, 0); + if (likely(!(dev->features & NETIF_F_RXFCS))) + pkt_size -= ETH_FCS_LEN; + + /* The driver does not support incoming fragmented frames. + * They are seen as a symptom of over-mtu sized frames. + */ + if (unlikely(rtl8169_fragmented_frame(status))) { + dev->stats.rx_dropped++; + dev->stats.rx_length_errors++; + goto release_descriptor; + } - dma_sync_single_for_cpu(tp_to_dev(tp), - le64_to_cpu(desc->addr), - pkt_size, DMA_FROM_DEVICE); - prefetch(rx_buf); - skb_copy_to_linear_data(skb, rx_buf, pkt_size); - skb->tail += pkt_size; - skb->len = pkt_size; + skb = napi_alloc_skb(&tp->napi, pkt_size); + if (unlikely(!skb)) { + dev->stats.rx_dropped++; + goto release_descriptor; + } - dma_sync_single_for_device(tp_to_dev(tp), - le64_to_cpu(desc->addr), - pkt_size, DMA_FROM_DEVICE); + addr = le64_to_cpu(desc->addr); + rx_buf = page_address(tp->Rx_databuff[entry]); - rtl8169_rx_csum(skb, status); - skb->protocol = eth_type_trans(skb, dev); + dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); + prefetch(rx_buf); + skb_copy_to_linear_data(skb, rx_buf, pkt_size); + skb->tail += pkt_size; + skb->len = pkt_size; + dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); - rtl8169_rx_vlan_tag(desc, skb); + rtl8169_rx_csum(skb, status); + skb->protocol = eth_type_trans(skb, dev); - if (skb->pkt_type == PACKET_MULTICAST) - dev->stats.multicast++; + rtl8169_rx_vlan_tag(desc, skb); - napi_gro_receive(&tp->napi, skb); + if (skb->pkt_type == PACKET_MULTICAST) + dev->stats.multicast++; + + napi_gro_receive(&tp->napi, skb); + + u64_stats_update_begin(&tp->rx_stats.syncp); + tp->rx_stats.packets++; + tp->rx_stats.bytes += pkt_size; + u64_stats_update_end(&tp->rx_stats.syncp); - u64_stats_update_begin(&tp->rx_stats.syncp); - tp->rx_stats.packets++; - tp->rx_stats.bytes += pkt_size; - u64_stats_update_end(&tp->rx_stats.syncp); - } release_descriptor: rtl8169_mark_to_asic(desc); } @@ -4614,25 +4627,21 @@ static int r8169_phy_connect(struct rtl8169_private *tp) return 0; } -static void rtl8169_down(struct net_device *dev) +static void rtl8169_down(struct rtl8169_private *tp) { - struct rtl8169_private *tp = netdev_priv(dev); + rtl_lock_work(tp); - phy_stop(tp->phydev); + /* Clear all task flags */ + bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); + phy_stop(tp->phydev); napi_disable(&tp->napi); - netif_stop_queue(dev); rtl8169_hw_reset(tp); - /* Give a racing hard_start_xmit a few cycles to complete. */ - synchronize_rcu(); - - rtl8169_tx_clear(tp); - - rtl8169_rx_clear(tp); - rtl_pll_power_down(tp); + + rtl_unlock_work(tp); } static int rtl8169_close(struct net_device *dev) @@ -4645,12 +4654,9 @@ static int rtl8169_close(struct net_device *dev) /* Update counters before going down */ rtl8169_update_counters(tp); - rtl_lock_work(tp); - /* Clear all task flags */ - bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); - - rtl8169_down(dev); - rtl_unlock_work(tp); + netif_stop_queue(dev); + rtl8169_down(tp); + rtl8169_rx_clear(tp); cancel_work_sync(&tp->wk.work); @@ -4804,44 +4810,30 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) pm_runtime_put_noidle(&pdev->dev); } -static void rtl8169_net_suspend(struct net_device *dev) +static void rtl8169_net_suspend(struct rtl8169_private *tp) { - struct rtl8169_private *tp = netdev_priv(dev); - - if (!netif_running(dev)) + if (!netif_running(tp->dev)) return; - phy_stop(tp->phydev); - netif_device_detach(dev); - - rtl_lock_work(tp); - napi_disable(&tp->napi); - /* Clear all task flags */ - bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); - - rtl_unlock_work(tp); - - rtl_pll_power_down(tp); + netif_device_detach(tp->dev); + rtl8169_down(tp); } #ifdef CONFIG_PM -static int rtl8169_suspend(struct device *device) +static int __maybe_unused rtl8169_suspend(struct device *device) { - struct net_device *dev = dev_get_drvdata(device); - struct rtl8169_private *tp = netdev_priv(dev); + struct rtl8169_private *tp = dev_get_drvdata(device); - rtl8169_net_suspend(dev); + rtl8169_net_suspend(tp); clk_disable_unprepare(tp->clk); return 0; } -static void __rtl8169_resume(struct net_device *dev) +static void __rtl8169_resume(struct rtl8169_private *tp) { - struct rtl8169_private *tp = netdev_priv(dev); - - netif_device_attach(dev); + netif_device_attach(tp->dev); rtl_pll_power_up(tp); rtl8169_init_phy(tp); @@ -4855,34 +4847,32 @@ static void __rtl8169_resume(struct net_device *dev) rtl_unlock_work(tp); } -static int rtl8169_resume(struct device *device) +static int __maybe_unused rtl8169_resume(struct device *device) { - struct net_device *dev = dev_get_drvdata(device); - struct rtl8169_private *tp = netdev_priv(dev); + struct rtl8169_private *tp = dev_get_drvdata(device); - rtl_rar_set(tp, dev->dev_addr); + rtl_rar_set(tp, tp->dev->dev_addr); clk_prepare_enable(tp->clk); - if (netif_running(dev)) - __rtl8169_resume(dev); + if (netif_running(tp->dev)) + __rtl8169_resume(tp); return 0; } static int rtl8169_runtime_suspend(struct device *device) { - struct net_device *dev = dev_get_drvdata(device); - struct rtl8169_private *tp = netdev_priv(dev); + struct rtl8169_private *tp = dev_get_drvdata(device); if (!tp->TxDescArray) return 0; rtl_lock_work(tp); - __rtl8169_set_wol(tp, WAKE_ANY); + __rtl8169_set_wol(tp, WAKE_PHY); rtl_unlock_work(tp); - rtl8169_net_suspend(dev); + rtl8169_net_suspend(tp); /* Update counters before going runtime suspend */ rtl8169_update_counters(tp); @@ -4892,10 +4882,9 @@ static int rtl8169_runtime_suspend(struct device *device) static int rtl8169_runtime_resume(struct device *device) { - struct net_device *dev = dev_get_drvdata(device); - struct rtl8169_private *tp = netdev_priv(dev); + struct rtl8169_private *tp = dev_get_drvdata(device); - rtl_rar_set(tp, dev->dev_addr); + rtl_rar_set(tp, tp->dev->dev_addr); if (!tp->TxDescArray) return 0; @@ -4904,40 +4893,28 @@ static int rtl8169_runtime_resume(struct device *device) __rtl8169_set_wol(tp, tp->saved_wolopts); rtl_unlock_work(tp); - __rtl8169_resume(dev); + __rtl8169_resume(tp); return 0; } static int rtl8169_runtime_idle(struct device *device) { - struct net_device *dev = dev_get_drvdata(device); + struct rtl8169_private *tp = dev_get_drvdata(device); - if (!netif_running(dev) || !netif_carrier_ok(dev)) + if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev)) pm_schedule_suspend(device, 10000); return -EBUSY; } static const struct dev_pm_ops rtl8169_pm_ops = { - .suspend = rtl8169_suspend, - .resume = rtl8169_resume, - .freeze = rtl8169_suspend, - .thaw = rtl8169_resume, - .poweroff = rtl8169_suspend, - .restore = rtl8169_resume, - .runtime_suspend = rtl8169_runtime_suspend, - .runtime_resume = rtl8169_runtime_resume, - .runtime_idle = rtl8169_runtime_idle, + SET_SYSTEM_SLEEP_PM_OPS(rtl8169_suspend, rtl8169_resume) + SET_RUNTIME_PM_OPS(rtl8169_runtime_suspend, rtl8169_runtime_resume, + rtl8169_runtime_idle) }; -#define RTL8169_PM_OPS (&rtl8169_pm_ops) - -#else /* !CONFIG_PM */ - -#define RTL8169_PM_OPS NULL - -#endif /* !CONFIG_PM */ +#endif /* CONFIG_PM */ static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp) { @@ -4958,13 +4935,12 @@ static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp) static void rtl_shutdown(struct pci_dev *pdev) { - struct net_device *dev = pci_get_drvdata(pdev); - struct rtl8169_private *tp = netdev_priv(dev); + struct rtl8169_private *tp = pci_get_drvdata(pdev); - rtl8169_net_suspend(dev); + rtl8169_net_suspend(tp); /* Restore original MAC address */ - rtl_rar_set(tp, dev->perm_addr); + rtl_rar_set(tp, tp->dev->perm_addr); rtl8169_hw_reset(tp); @@ -4981,23 +4957,20 @@ static void rtl_shutdown(struct pci_dev *pdev) static void rtl_remove_one(struct pci_dev *pdev) { - struct net_device *dev = pci_get_drvdata(pdev); - struct rtl8169_private *tp = netdev_priv(dev); + struct rtl8169_private *tp = pci_get_drvdata(pdev); - if (r8168_check_dash(tp)) - rtl8168_driver_stop(tp); + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); - netif_napi_del(&tp->napi); + unregister_netdev(tp->dev); - unregister_netdev(dev); + if (r8168_check_dash(tp)) + rtl8168_driver_stop(tp); rtl_release_firmware(tp); - if (pci_dev_run_wake(pdev)) - pm_runtime_get_noresume(&pdev->dev); - /* restore original MAC address */ - rtl_rar_set(tp, dev->perm_addr); + rtl_rar_set(tp, tp->dev->perm_addr); } static const struct net_device_ops rtl_netdev_ops = { @@ -5436,7 +5409,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (!tp->counters) return -ENOMEM; - pci_set_drvdata(pdev, dev); + pci_set_drvdata(pdev, tp); rc = r8169_mdio_register(tp); if (rc) @@ -5473,7 +5446,9 @@ static struct pci_driver rtl8169_pci_driver = { .probe = rtl_init_one, .remove = rtl_remove_one, .shutdown = rtl_shutdown, - .driver.pm = RTL8169_PM_OPS, +#ifdef CONFIG_PM + .driver.pm = &rtl8169_pm_ops, +#endif }; module_pci_driver(rtl8169_pci_driver); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 067ad25553b9..a442bcf64b9c 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1014,6 +1014,7 @@ static int ravb_phy_init(struct net_device *ndev) struct ravb_private *priv = netdev_priv(ndev); struct phy_device *phydev; struct device_node *pn; + phy_interface_t iface; int err; priv->link = 0; @@ -1032,8 +1033,13 @@ static int ravb_phy_init(struct net_device *ndev) } pn = of_node_get(np); } - phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, - priv->phy_interface); + + iface = priv->phy_interface; + if (priv->chip_id != RCAR_GEN2 && phy_interface_mode_is_rgmii(iface)) { + /* ravb_set_delay_mode() takes care of internal delay mode */ + iface = PHY_INTERFACE_MODE_RGMII; + } + phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface); of_node_put(pn); if (!phydev) { netdev_err(ndev, "failed to connect PHY\n"); diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c index 128ee7cda1ed..65c98837ec45 100644 --- a/drivers/net/ethernet/seeq/ether3.c +++ b/drivers/net/ethernet/seeq/ether3.c @@ -610,12 +610,9 @@ static int ether3_rx(struct net_device *dev, unsigned int maxcnt) ether3_readbuffer(dev, addrs+2, 12); if (next_ptr < RX_START || next_ptr >= RX_END) { - int i; printk("%s: bad next pointer @%04X: ", dev->name, priv(dev)->rx_head); printk("%02X %02X %02X %02X ", next_ptr >> 8, next_ptr & 255, status & 255, status >> 8); - for (i = 2; i < 14; i++) - printk("%02X ", addrs[i]); - printk("\n"); + printk("%pM %pM\n", addrs + 2, addrs + 8); next_ptr = priv(dev)->rx_head; break; } diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index e634e8110585..964c5e842cec 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -3228,7 +3228,6 @@ reset_nic: static int efx_ef10_set_mac_address(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN); - struct efx_ef10_nic_data *nic_data = efx->nic_data; bool was_enabled = efx->port_enabled; int rc; @@ -3256,6 +3255,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) #ifdef CONFIG_SFC_SRIOV if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { + struct efx_ef10_nic_data *nic_data = efx->nic_data; struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; if (rc == -EPERM) { diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 7305e8e86c51..6646eba9f57f 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -848,14 +848,14 @@ static int ioc3eth_probe(struct platform_device *pdev) ip = netdev_priv(dev); ip->dma_dev = pdev->dev.parent; ip->regs = devm_platform_ioremap_resource(pdev, 0); - if (!ip->regs) { - err = -ENOMEM; + if (IS_ERR(ip->regs)) { + err = PTR_ERR(ip->regs); goto out_free; } ip->ssram = devm_platform_ioremap_resource(pdev, 1); - if (!ip->ssram) { - err = -ENOMEM; + if (IS_ERR(ip->ssram)) { + err = PTR_ERR(ip->ssram); goto out_free; } diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 49a6a9167af4..fc168f85e7af 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2493,20 +2493,20 @@ static int smsc911x_drv_probe(struct platform_device *pdev) retval = smsc911x_init(dev); if (retval < 0) - goto out_disable_resources; + goto out_init_fail; netif_carrier_off(dev); retval = smsc911x_mii_init(pdev, dev); if (retval) { SMSC_WARN(pdata, probe, "Error %i initialising mii", retval); - goto out_disable_resources; + goto out_init_fail; } retval = register_netdev(dev); if (retval) { SMSC_WARN(pdata, probe, "Error %i registering device", retval); - goto out_disable_resources; + goto out_init_fail; } else { SMSC_TRACE(pdata, probe, "Network interface: \"%s\"", dev->name); @@ -2547,9 +2547,10 @@ static int smsc911x_drv_probe(struct platform_device *pdev) return 0; -out_disable_resources: +out_init_fail: pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); +out_disable_resources: (void)smsc911x_disable_resources(pdev); out_enable_resources_fail: smsc911x_free_resources(pdev); diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index b46f8d2ae6d7..36bd2e18f23b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -196,6 +196,19 @@ config DWMAC_SUN8I This selects Allwinner SoC glue layer support for the stmmac device driver. This driver is used for H3/A83T/A64 EMAC ethernet controller. + +config DWMAC_IMX8 + tristate "NXP IMX8 DWMAC support" + default ARCH_MXC + depends on OF && (ARCH_MXC || COMPILE_TEST) + select MFD_SYSCON + ---help--- + Support for ethernet controller on NXP i.MX8 SOCs. + + This selects NXP SoC glue layer support for the stmmac + device driver. This driver is used for i.MX8 series like + iMX8MP/iMX8DXL GMAC ethernet controller. + endif config DWMAC_INTEL diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index f9d024d6b69b..295615ab36a7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -27,6 +27,7 @@ obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o obj-$(CONFIG_DWMAC_SUN8I) += dwmac-sun8i.o obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o +obj-$(CONFIG_DWMAC_IMX8) += dwmac-imx.o stmmac-platform-objs:= stmmac_platform.o dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c new file mode 100644 index 000000000000..5010af7dab4a --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c @@ -0,0 +1,315 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * dwmac-imx.c - DWMAC Specific Glue layer for NXP imx8 + * + * Copyright 2020 NXP + * + */ + +#include <linux/clk.h> +#include <linux/gpio/consumer.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_net.h> +#include <linux/phy.h> +#include <linux/platform_device.h> +#include <linux/pm_wakeirq.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <linux/stmmac.h> + +#include "stmmac_platform.h" + +#define GPR_ENET_QOS_INTF_MODE_MASK GENMASK(21, 16) +#define GPR_ENET_QOS_INTF_SEL_MII (0x0 << 16) +#define GPR_ENET_QOS_INTF_SEL_RMII (0x4 << 16) +#define GPR_ENET_QOS_INTF_SEL_RGMII (0x1 << 16) +#define GPR_ENET_QOS_CLK_GEN_EN (0x1 << 19) +#define GPR_ENET_QOS_CLK_TX_CLK_SEL (0x1 << 20) +#define GPR_ENET_QOS_RGMII_EN (0x1 << 21) + +struct imx_dwmac_ops { + u32 addr_width; + bool mac_rgmii_txclk_auto_adj; + + int (*set_intf_mode)(struct plat_stmmacenet_data *plat_dat); +}; + +struct imx_priv_data { + struct device *dev; + struct clk *clk_tx; + struct clk *clk_mem; + struct regmap *intf_regmap; + u32 intf_reg_off; + bool rmii_refclk_ext; + + const struct imx_dwmac_ops *ops; + struct plat_stmmacenet_data *plat_dat; +}; + +static int imx8mp_set_intf_mode(struct plat_stmmacenet_data *plat_dat) +{ + struct imx_priv_data *dwmac = plat_dat->bsp_priv; + int val; + + switch (plat_dat->interface) { + case PHY_INTERFACE_MODE_MII: + val = GPR_ENET_QOS_INTF_SEL_MII; + break; + case PHY_INTERFACE_MODE_RMII: + val = GPR_ENET_QOS_INTF_SEL_RMII; + val |= (dwmac->rmii_refclk_ext ? 0 : GPR_ENET_QOS_CLK_TX_CLK_SEL); + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + val = GPR_ENET_QOS_INTF_SEL_RGMII | + GPR_ENET_QOS_RGMII_EN; + break; + default: + pr_debug("imx dwmac doesn't support %d interface\n", + plat_dat->interface); + return -EINVAL; + } + + val |= GPR_ENET_QOS_CLK_GEN_EN; + return regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off, + GPR_ENET_QOS_INTF_MODE_MASK, val); +}; + +static int +imx8dxl_set_intf_mode(struct plat_stmmacenet_data *plat_dat) +{ + int ret = 0; + + /* TBD: depends on imx8dxl scu interfaces to be upstreamed */ + return ret; +} + +static int imx_dwmac_init(struct platform_device *pdev, void *priv) +{ + struct plat_stmmacenet_data *plat_dat; + struct imx_priv_data *dwmac = priv; + int ret; + + plat_dat = dwmac->plat_dat; + + ret = clk_prepare_enable(dwmac->clk_mem); + if (ret) { + dev_err(&pdev->dev, "mem clock enable failed\n"); + return ret; + } + + ret = clk_prepare_enable(dwmac->clk_tx); + if (ret) { + dev_err(&pdev->dev, "tx clock enable failed\n"); + goto clk_tx_en_failed; + } + + if (dwmac->ops->set_intf_mode) { + ret = dwmac->ops->set_intf_mode(plat_dat); + if (ret) + goto intf_mode_failed; + } + + return 0; + +intf_mode_failed: + clk_disable_unprepare(dwmac->clk_tx); +clk_tx_en_failed: + clk_disable_unprepare(dwmac->clk_mem); + return ret; +} + +static void imx_dwmac_exit(struct platform_device *pdev, void *priv) +{ + struct imx_priv_data *dwmac = priv; + + if (dwmac->clk_tx) + clk_disable_unprepare(dwmac->clk_tx); + clk_disable_unprepare(dwmac->clk_mem); +} + +static void imx_dwmac_fix_speed(void *priv, unsigned int speed) +{ + struct plat_stmmacenet_data *plat_dat; + struct imx_priv_data *dwmac = priv; + unsigned long rate; + int err; + + plat_dat = dwmac->plat_dat; + + if (dwmac->ops->mac_rgmii_txclk_auto_adj || + (plat_dat->interface == PHY_INTERFACE_MODE_RMII) || + (plat_dat->interface == PHY_INTERFACE_MODE_MII)) + return; + + switch (speed) { + case SPEED_1000: + rate = 125000000; + break; + case SPEED_100: + rate = 25000000; + break; + case SPEED_10: + rate = 2500000; + break; + default: + dev_err(dwmac->dev, "invalid speed %u\n", speed); + return; + } + + err = clk_set_rate(dwmac->clk_tx, rate); + if (err < 0) + dev_err(dwmac->dev, "failed to set tx rate %lu\n", rate); +} + +static int +imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev) +{ + struct device_node *np = dev->of_node; + int err = 0; + + if (of_get_property(np, "snps,rmii_refclk_ext", NULL)) + dwmac->rmii_refclk_ext = true; + + dwmac->clk_tx = devm_clk_get(dev, "tx"); + if (IS_ERR(dwmac->clk_tx)) { + dev_err(dev, "failed to get tx clock\n"); + return PTR_ERR(dwmac->clk_tx); + } + + dwmac->clk_mem = NULL; + if (of_machine_is_compatible("fsl,imx8dxl")) { + dwmac->clk_mem = devm_clk_get(dev, "mem"); + if (IS_ERR(dwmac->clk_mem)) { + dev_err(dev, "failed to get mem clock\n"); + return PTR_ERR(dwmac->clk_mem); + } + } + + if (of_machine_is_compatible("fsl,imx8mp")) { + /* Binding doc describes the propety: + is required by i.MX8MP. + is optinoal for i.MX8DXL. + */ + dwmac->intf_regmap = syscon_regmap_lookup_by_phandle(np, "intf_mode"); + if (IS_ERR(dwmac->intf_regmap)) + return PTR_ERR(dwmac->intf_regmap); + + err = of_property_read_u32_index(np, "intf_mode", 1, &dwmac->intf_reg_off); + if (err) { + dev_err(dev, "Can't get intf mode reg offset (%d)\n", err); + return err; + } + } + + return err; +} + +static int imx_dwmac_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + struct imx_priv_data *dwmac; + const struct imx_dwmac_ops *data; + int ret; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); + if (!dwmac) + return PTR_ERR(dwmac); + + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + + data = of_device_get_match_data(&pdev->dev); + if (!data) { + dev_err(&pdev->dev, "failed to get match data\n"); + ret = -EINVAL; + goto err_match_data; + } + + dwmac->ops = data; + dwmac->dev = &pdev->dev; + + ret = imx_dwmac_parse_dt(dwmac, &pdev->dev); + if (ret) { + dev_err(&pdev->dev, "failed to parse OF data\n"); + goto err_parse_dt; + } + + ret = dma_set_mask_and_coherent(&pdev->dev, + DMA_BIT_MASK(dwmac->ops->addr_width)); + if (ret) { + dev_err(&pdev->dev, "DMA mask set failed\n"); + goto err_dma_mask; + } + + plat_dat->init = imx_dwmac_init; + plat_dat->exit = imx_dwmac_exit; + plat_dat->fix_mac_speed = imx_dwmac_fix_speed; + plat_dat->bsp_priv = dwmac; + dwmac->plat_dat = plat_dat; + + ret = imx_dwmac_init(pdev, dwmac); + if (ret) + goto err_dwmac_init; + + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) + goto err_drv_probe; + + return 0; + +err_dwmac_init: +err_drv_probe: + imx_dwmac_exit(pdev, plat_dat->bsp_priv); +err_dma_mask: +err_parse_dt: +err_match_data: + stmmac_remove_config_dt(pdev, plat_dat); + return ret; +} + +static struct imx_dwmac_ops imx8mp_dwmac_data = { + .addr_width = 34, + .mac_rgmii_txclk_auto_adj = false, + .set_intf_mode = imx8mp_set_intf_mode, +}; + +static struct imx_dwmac_ops imx8dxl_dwmac_data = { + .addr_width = 32, + .mac_rgmii_txclk_auto_adj = true, + .set_intf_mode = imx8dxl_set_intf_mode, +}; + +static const struct of_device_id imx_dwmac_match[] = { + { .compatible = "nxp,imx8mp-dwmac-eqos", .data = &imx8mp_dwmac_data }, + { .compatible = "nxp,imx8dxl-dwmac-eqos", .data = &imx8dxl_dwmac_data }, + { } +}; +MODULE_DEVICE_TABLE(of, imx_dwmac_match); + +static struct platform_driver imx_dwmac_driver = { + .probe = imx_dwmac_probe, + .remove = stmmac_pltfr_remove, + .driver = { + .name = "imx-dwmac", + .pm = &stmmac_pltfr_pm_ops, + .of_match_table = imx_dwmac_match, + }, +}; +module_platform_driver(imx_dwmac_driver); + +MODULE_AUTHOR("NXP"); +MODULE_DESCRIPTION("NXP imx8 DWMAC Specific Glue layer"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 6ae13dc19510..02102c781a8c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -319,6 +319,19 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) /* Enable PTP clock */ regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val); val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id); + switch (gmac->phy_mode) { + case PHY_INTERFACE_MODE_RGMII: + val |= NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) | + NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id); + break; + case PHY_INTERFACE_MODE_SGMII: + val |= NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) | + NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id); + break; + default: + /* We don't get here; the switch above will have errored out */ + unreachable(); + } regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val); if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8f08393f25dd..73677c3b33b6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -630,7 +630,8 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; ptp_v2 = PTP_TCR_TSVER2ENA; snap_type_sel = PTP_TCR_SNAPTYPSEL_1; - ts_event_en = PTP_TCR_TSEVNTENA; + if (priv->synopsys_id != DWMAC_CORE_5_10) + ts_event_en = PTP_TCR_TSEVNTENA; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; ptp_over_ethernet = PTP_TCR_TSIPENA; @@ -5181,8 +5182,6 @@ int stmmac_resume(struct device *dev) return ret; } - netif_device_attach(ndev); - mutex_lock(&priv->lock); stmmac_reset_queues_param(priv); @@ -5209,6 +5208,8 @@ int stmmac_resume(struct device *dev) phylink_mac_change(priv->phylink, true); + netif_device_attach(ndev); + return 0; } EXPORT_SYMBOL_GPL(stmmac_resume); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index bcda49dcf619..f32317fa75c8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -507,7 +507,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) if (of_device_is_compatible(np, "snps,dwmac-4.00") || of_device_is_compatible(np, "snps,dwmac-4.10a") || - of_device_is_compatible(np, "snps,dwmac-4.20a")) { + of_device_is_compatible(np, "snps,dwmac-4.20a") || + of_device_is_compatible(np, "snps,dwmac-5.10a")) { plat->has_gmac4 = 1; plat->has_gmac = 0; plat->pmt = 1; diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index e6e25960da4f..debd3c3fa6fb 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -4951,7 +4951,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) cas_cacheline_size)) { dev_err(&pdev->dev, "Could not set PCI cache " "line size\n"); - goto err_write_cacheline; + goto err_out_free_res; } } #endif @@ -5124,7 +5124,6 @@ err_out_iounmap: err_out_free_res: pci_release_regions(pdev); -err_write_cacheline: /* Try to restore it in case the error occurred after we * set it. */ diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 4a8229864ae4..87a4775ed53a 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -2083,8 +2083,9 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) ale_params.nu_switch_ale = true; common->ale = cpsw_ale_create(&ale_params); - if (!common->ale) { + if (IS_ERR(common->ale)) { dev_err(dev, "error initializing ale engine\n"); + ret = PTR_ERR(common->ale); goto err_of_clear; } diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index ce0645ada6e7..9b17bbbe102f 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1775,11 +1775,15 @@ static int cpsw_suspend(struct device *dev) struct cpsw_common *cpsw = dev_get_drvdata(dev); int i; + rtnl_lock(); + for (i = 0; i < cpsw->data.slaves; i++) if (cpsw->slaves[i].ndev) if (netif_running(cpsw->slaves[i].ndev)) cpsw_ndo_stop(cpsw->slaves[i].ndev); + rtnl_unlock(); + /* Select sleep pin state */ pinctrl_pm_select_sleep_state(dev); diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 0374e6936091..8dc6be11b2ff 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -955,7 +955,7 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params) ale = devm_kzalloc(params->dev, sizeof(*ale), GFP_KERNEL); if (!ale) - return NULL; + return ERR_PTR(-ENOMEM); ale->p0_untag_vid_mask = devm_kmalloc_array(params->dev, BITS_TO_LONGS(VLAN_N_VID), diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index 9d098c802c6d..d940628bff8d 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -504,9 +504,9 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs, ale_params.ale_ports = CPSW_ALE_PORTS_NUM; cpsw->ale = cpsw_ale_create(&ale_params); - if (!cpsw->ale) { + if (IS_ERR(cpsw->ale)) { dev_err(dev, "error initializing ale engine\n"); - return -ENODEV; + return PTR_ERR(cpsw->ale); } dma_params.dev = dev; diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 9d6e27fb710e..28093923a7fb 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -3704,9 +3704,9 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, ale_params.nu_switch_ale = true; } gbe_dev->ale = cpsw_ale_create(&ale_params); - if (!gbe_dev->ale) { + if (IS_ERR(gbe_dev->ale)) { dev_err(gbe_dev->dev, "error initializing ale engine\n"); - ret = -ENODEV; + ret = PTR_ERR(gbe_dev->ale); goto free_sec_ports; } else { dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n"); diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c index 1e0c024b0a93..8e4141552423 100644 --- a/drivers/net/hyperv/netvsc_bpf.c +++ b/drivers/net/hyperv/netvsc_bpf.c @@ -50,7 +50,6 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan, xdp->data_end = xdp->data + len; xdp->rxq = &nvchan->xdp_rxq; xdp->frame_sz = PAGE_SIZE; - xdp->handle = 0; memcpy(xdp->data, data, len); diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index d343dc94cb48..55226b264e3c 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -1358,7 +1358,7 @@ static void gsi_channel_update(struct gsi_channel *channel) * gsi_channel_poll_one() - Return a single completed transaction on a channel * @channel: Channel to be polled * - * @Return: Transaction pointer, or null if none are available + * @Return: Transaction pointer, or null if none are available * * This function returns the first entry on a channel's completed transaction * list. If that list is empty, the hardware is consulted to determine @@ -1405,6 +1405,7 @@ static int gsi_channel_poll(struct napi_struct *napi, int budget) while (count < budget) { struct gsi_trans *trans; + count++; trans = gsi_channel_poll_one(channel); if (!trans) break; diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c index 374491ea11cf..c5204fd58ac4 100644 --- a/drivers/net/ipa/ipa_clock.c +++ b/drivers/net/ipa/ipa_clock.c @@ -66,8 +66,8 @@ ipa_interconnect_init_one(struct device *dev, const char *name) path = of_icc_get(dev, name); if (IS_ERR(path)) - dev_err(dev, "error %ld getting memory interconnect\n", - PTR_ERR(path)); + dev_err(dev, "error %ld getting %s interconnect\n", + PTR_ERR(path), name); return path; } diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 82066a223a67..66649a806dd1 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -364,7 +364,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) /* We need one command per modem TX endpoint. We can get an upper * bound on that by assuming all initialized endpoints are modem->IPA. * That won't happen, and we could be more precise, but this is fine - * for now. We need to end the transactio with a "tag process." + * for now. We need to end the transaction with a "tag process." */ count = hweight32(initialized) + ipa_cmd_tag_process_count(); trans = ipa_cmd_trans_alloc(ipa, count); diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h index 3b297d65828e..58a245de488e 100644 --- a/drivers/net/ipa/ipa_endpoint.h +++ b/drivers/net/ipa/ipa_endpoint.h @@ -41,7 +41,6 @@ enum ipa_endpoint_name { /** * struct ipa_endpoint - IPA endpoint information - * @client: Client associated with the endpoint * @channel_id: EP's GSI channel * @evt_ring_id: EP's GSI channel event ring */ diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 9a419d5102ce..563aed5b3d9f 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -448,6 +448,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) int ret; rx_handler_result_t handle_res; + /* Packets from dev_loopback_xmit() do not have L2 header, bail out */ + if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) + return RX_HANDLER_PASS; + port = macvlan_port_get_rcu(skb->dev); if (is_multicast_ether_addr(eth->h_dest)) { unsigned int hash; diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index 68668a22b9dd..dc3ff0e20944 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -858,8 +858,7 @@ nsim_dev_devlink_trap_policer_counter_get(struct devlink *devlink, return -EINVAL; cnt = &nsim_dev->trap_data->trap_policers_cnt_arr[policer->id - 1]; - *p_drops = *cnt; - *cnt += jiffies % 64; + *p_drops = (*cnt)++; return 0; } diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 2a32f26ead0b..047c27087b10 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -184,7 +184,8 @@ config MDIO_MSCC_MIIM depends on HAS_IOMEM help This driver supports the MIIM (MDIO) interface found in the network - switches of the Microsemi SoCs + switches of the Microsemi SoCs; it is recommended to switch on + CONFIG_HIGH_RES_TIMERS config MDIO_MVUSB tristate "Marvell USB to MDIO Adapter" diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index acd51b29a476..97cbe593f0ea 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -126,7 +126,7 @@ #define ATH8031_PHY_ID 0x004dd074 #define ATH8032_PHY_ID 0x004dd023 #define ATH8035_PHY_ID 0x004dd072 -#define AT803X_PHY_ID_MASK 0xffffffef +#define AT8030_PHY_ID_MASK 0xffffffef MODULE_DESCRIPTION("Qualcomm Atheros AR803x PHY driver"); MODULE_AUTHOR("Matus Ujhelyi"); @@ -920,10 +920,16 @@ static int at803x_cable_test_one_pair(struct phy_device *phydev, int pair) static int at803x_cable_test_get_status(struct phy_device *phydev, bool *finished) { - unsigned long pair_mask = 0xf; + unsigned long pair_mask; int retries = 20; int pair, ret; + if (phydev->phy_id == ATH9331_PHY_ID || + phydev->phy_id == ATH8032_PHY_ID) + pair_mask = 0x3; + else + pair_mask = 0xf; + *finished = false; /* According to the datasheet the CDT can be performed when @@ -958,7 +964,9 @@ static int at803x_cable_test_start(struct phy_device *phydev) */ phy_write(phydev, MII_BMCR, BMCR_ANENABLE); phy_write(phydev, MII_ADVERTISE, ADVERTISE_CSMA); - phy_write(phydev, MII_CTRL1000, 0); + if (phydev->phy_id != ATH9331_PHY_ID && + phydev->phy_id != ATH8032_PHY_ID) + phy_write(phydev, MII_CTRL1000, 0); /* we do all the (time consuming) work later */ return 0; @@ -967,9 +975,8 @@ static int at803x_cable_test_start(struct phy_device *phydev) static struct phy_driver at803x_driver[] = { { /* Qualcomm Atheros AR8035 */ - .phy_id = ATH8035_PHY_ID, + PHY_ID_MATCH_EXACT(ATH8035_PHY_ID), .name = "Qualcomm Atheros AR8035", - .phy_id_mask = AT803X_PHY_ID_MASK, .flags = PHY_POLL_CABLE_TEST, .probe = at803x_probe, .remove = at803x_remove, @@ -991,7 +998,7 @@ static struct phy_driver at803x_driver[] = { /* Qualcomm Atheros AR8030 */ .phy_id = ATH8030_PHY_ID, .name = "Qualcomm Atheros AR8030", - .phy_id_mask = AT803X_PHY_ID_MASK, + .phy_id_mask = AT8030_PHY_ID_MASK, .probe = at803x_probe, .remove = at803x_remove, .config_init = at803x_config_init, @@ -1005,9 +1012,8 @@ static struct phy_driver at803x_driver[] = { .config_intr = at803x_config_intr, }, { /* Qualcomm Atheros AR8031/AR8033 */ - .phy_id = ATH8031_PHY_ID, + PHY_ID_MATCH_EXACT(ATH8031_PHY_ID), .name = "Qualcomm Atheros AR8031/AR8033", - .phy_id_mask = AT803X_PHY_ID_MASK, .flags = PHY_POLL_CABLE_TEST, .probe = at803x_probe, .remove = at803x_remove, @@ -1032,6 +1038,7 @@ static struct phy_driver at803x_driver[] = { .name = "Qualcomm Atheros AR8032", .probe = at803x_probe, .remove = at803x_remove, + .flags = PHY_POLL_CABLE_TEST, .config_init = at803x_config_init, .link_change_notify = at803x_link_change_notify, .set_wol = at803x_set_wol, @@ -1041,24 +1048,29 @@ static struct phy_driver at803x_driver[] = { /* PHY_BASIC_FEATURES */ .ack_interrupt = at803x_ack_interrupt, .config_intr = at803x_config_intr, + .cable_test_start = at803x_cable_test_start, + .cable_test_get_status = at803x_cable_test_get_status, }, { /* ATHEROS AR9331 */ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID), .name = "Qualcomm Atheros AR9331 built-in PHY", .suspend = at803x_suspend, .resume = at803x_resume, + .flags = PHY_POLL_CABLE_TEST, /* PHY_BASIC_FEATURES */ .ack_interrupt = &at803x_ack_interrupt, .config_intr = &at803x_config_intr, + .cable_test_start = at803x_cable_test_start, + .cable_test_get_status = at803x_cable_test_get_status, } }; module_phy_driver(at803x_driver); static struct mdio_device_id __maybe_unused atheros_tbl[] = { - { ATH8030_PHY_ID, AT803X_PHY_ID_MASK }, - { ATH8031_PHY_ID, AT803X_PHY_ID_MASK }, + { ATH8030_PHY_ID, AT8030_PHY_ID_MASK }, + { PHY_ID_MATCH_EXACT(ATH8031_PHY_ID) }, { PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) }, - { ATH8035_PHY_ID, AT803X_PHY_ID_MASK }, + { PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) }, { PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) }, { } }; diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c index f6dce6850850..df360e1c5069 100644 --- a/drivers/net/phy/bcm87xx.c +++ b/drivers/net/phy/bcm87xx.c @@ -55,7 +55,7 @@ static int bcm87xx_of_reg_init(struct phy_device *phydev) u16 mask = be32_to_cpup(paddr++); u16 val_bits = be32_to_cpup(paddr++); int val; - u32 regnum = MII_ADDR_C45 | (devid << 16) | reg; + u32 regnum = mdiobus_c45_addr(devid, reg); val = 0; if (mask) { val = phy_read(phydev, regnum); diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c index aac51362c0fe..40514a94e6ff 100644 --- a/drivers/net/phy/cortina.c +++ b/drivers/net/phy/cortina.c @@ -17,8 +17,7 @@ static int cortina_read_reg(struct phy_device *phydev, u16 regnum) { - return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, - MII_ADDR_C45 | regnum); + return mdiobus_c45_read(phydev->mdio.bus, phydev->mdio.addr, 0, regnum); } static int cortina_read_status(struct phy_device *phydev) diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c index 7996a4aea8d2..cfb22a21a2e6 100644 --- a/drivers/net/phy/dp83869.c +++ b/drivers/net/phy/dp83869.c @@ -65,7 +65,9 @@ #define DP83869_RGMII_RX_CLK_DELAY_EN BIT(0) /* STRAP_STS1 bits */ +#define DP83869_STRAP_OP_MODE_MASK GENMASK(2, 0) #define DP83869_STRAP_STS1_RESERVED BIT(11) +#define DP83869_STRAP_MIRROR_ENABLED BIT(12) /* PHYCTRL bits */ #define DP83869_RX_FIFO_SHIFT 12 @@ -160,6 +162,20 @@ static int dp83869_config_port_mirroring(struct phy_device *phydev) DP83869_CFG3_PORT_MIRROR_EN); } +static int dp83869_set_strapped_mode(struct phy_device *phydev) +{ + struct dp83869_private *dp83869 = phydev->priv; + int val; + + val = phy_read_mmd(phydev, DP83869_DEVADDR, DP83869_STRAP_STS1); + if (val < 0) + return val; + + dp83869->mode = val & DP83869_STRAP_OP_MODE_MASK; + + return 0; +} + #ifdef CONFIG_OF_MDIO static int dp83869_of_init(struct phy_device *phydev) { @@ -184,6 +200,10 @@ static int dp83869_of_init(struct phy_device *phydev) if (dp83869->mode < DP83869_RGMII_COPPER_ETHERNET || dp83869->mode > DP83869_SGMII_COPPER_ETHERNET) return -EINVAL; + } else { + ret = dp83869_set_strapped_mode(phydev); + if (ret) + return ret; } if (of_property_read_bool(of_node, "ti,max-output-impedance")) @@ -191,10 +211,18 @@ static int dp83869_of_init(struct phy_device *phydev) else if (of_property_read_bool(of_node, "ti,min-output-impedance")) dp83869->io_impedance = DP83869_IO_MUX_CFG_IO_IMPEDANCE_MIN; - if (of_property_read_bool(of_node, "enet-phy-lane-swap")) + if (of_property_read_bool(of_node, "enet-phy-lane-swap")) { dp83869->port_mirroring = DP83869_PORT_MIRRORING_EN; - else - dp83869->port_mirroring = DP83869_PORT_MIRRORING_DIS; + } else { + /* If the lane swap is not in the DT then check the straps */ + ret = phy_read_mmd(phydev, DP83869_DEVADDR, DP83869_STRAP_STS1); + if (ret < 0) + return ret; + if (ret & DP83869_STRAP_MIRROR_ENABLED) + dp83869->port_mirroring = DP83869_PORT_MIRRORING_EN; + else + dp83869->port_mirroring = DP83869_PORT_MIRRORING_DIS; + } if (of_property_read_u32(of_node, "rx-fifo-depth", &dp83869->rx_fifo_depth)) @@ -209,7 +237,7 @@ static int dp83869_of_init(struct phy_device *phydev) #else static int dp83869_of_init(struct phy_device *phydev) { - return 0; + return dp83869_set_strapped_mode(phydev); } #endif /* CONFIG_OF_MDIO */ diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 4bc7febf9248..4ea226566cec 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -42,6 +42,7 @@ #define MII_MARVELL_FIBER_PAGE 0x01 #define MII_MARVELL_MSCR_PAGE 0x02 #define MII_MARVELL_LED_PAGE 0x03 +#define MII_MARVELL_VCT5_PAGE 0x05 #define MII_MARVELL_MISC_TEST_PAGE 0x06 #define MII_MARVELL_VCT7_PAGE 0x07 #define MII_MARVELL_WOL_PAGE 0x11 @@ -164,6 +165,60 @@ #define MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII 0x1 /* SGMII to copper */ #define MII_88E1510_GEN_CTRL_REG_1_RESET 0x8000 /* Soft reset */ +#define MII_VCT5_TX_RX_MDI0_COUPLING 0x10 +#define MII_VCT5_TX_RX_MDI1_COUPLING 0x11 +#define MII_VCT5_TX_RX_MDI2_COUPLING 0x12 +#define MII_VCT5_TX_RX_MDI3_COUPLING 0x13 +#define MII_VCT5_TX_RX_AMPLITUDE_MASK 0x7f00 +#define MII_VCT5_TX_RX_AMPLITUDE_SHIFT 8 +#define MII_VCT5_TX_RX_COUPLING_POSITIVE_REFLECTION BIT(15) + +#define MII_VCT5_CTRL 0x17 +#define MII_VCT5_CTRL_ENABLE BIT(15) +#define MII_VCT5_CTRL_COMPLETE BIT(14) +#define MII_VCT5_CTRL_TX_SAME_CHANNEL (0x0 << 11) +#define MII_VCT5_CTRL_TX0_CHANNEL (0x4 << 11) +#define MII_VCT5_CTRL_TX1_CHANNEL (0x5 << 11) +#define MII_VCT5_CTRL_TX2_CHANNEL (0x6 << 11) +#define MII_VCT5_CTRL_TX3_CHANNEL (0x7 << 11) +#define MII_VCT5_CTRL_SAMPLES_2 (0x0 << 8) +#define MII_VCT5_CTRL_SAMPLES_4 (0x1 << 8) +#define MII_VCT5_CTRL_SAMPLES_8 (0x2 << 8) +#define MII_VCT5_CTRL_SAMPLES_16 (0x3 << 8) +#define MII_VCT5_CTRL_SAMPLES_32 (0x4 << 8) +#define MII_VCT5_CTRL_SAMPLES_64 (0x5 << 8) +#define MII_VCT5_CTRL_SAMPLES_128 (0x6 << 8) +#define MII_VCT5_CTRL_SAMPLES_DEFAULT (0x6 << 8) +#define MII_VCT5_CTRL_SAMPLES_256 (0x7 << 8) +#define MII_VCT5_CTRL_SAMPLES_SHIFT 8 +#define MII_VCT5_CTRL_MODE_MAXIMUM_PEEK (0x0 << 6) +#define MII_VCT5_CTRL_MODE_FIRST_LAST_PEEK (0x1 << 6) +#define MII_VCT5_CTRL_MODE_OFFSET (0x2 << 6) +#define MII_VCT5_CTRL_SAMPLE_POINT (0x3 << 6) +#define MII_VCT5_CTRL_PEEK_HYST_DEFAULT 3 + +#define MII_VCT5_SAMPLE_POINT_DISTANCE 0x18 +#define MII_VCT5_SAMPLE_POINT_DISTANCE_MAX 511 +#define MII_VCT5_TX_PULSE_CTRL 0x1c +#define MII_VCT5_TX_PULSE_CTRL_DONT_WAIT_LINK_DOWN BIT(12) +#define MII_VCT5_TX_PULSE_CTRL_PULSE_WIDTH_128nS (0x0 << 10) +#define MII_VCT5_TX_PULSE_CTRL_PULSE_WIDTH_96nS (0x1 << 10) +#define MII_VCT5_TX_PULSE_CTRL_PULSE_WIDTH_64nS (0x2 << 10) +#define MII_VCT5_TX_PULSE_CTRL_PULSE_WIDTH_32nS (0x3 << 10) +#define MII_VCT5_TX_PULSE_CTRL_PULSE_WIDTH_SHIFT 10 +#define MII_VCT5_TX_PULSE_CTRL_PULSE_AMPLITUDE_1000mV (0x0 << 8) +#define MII_VCT5_TX_PULSE_CTRL_PULSE_AMPLITUDE_750mV (0x1 << 8) +#define MII_VCT5_TX_PULSE_CTRL_PULSE_AMPLITUDE_500mV (0x2 << 8) +#define MII_VCT5_TX_PULSE_CTRL_PULSE_AMPLITUDE_250mV (0x3 << 8) +#define MII_VCT5_TX_PULSE_CTRL_PULSE_AMPLITUDE_SHIFT 8 +#define MII_VCT5_TX_PULSE_CTRL_MAX_AMP BIT(7) +#define MII_VCT5_TX_PULSE_CTRL_GT_140m_46_86mV (0x6 << 0) + +/* For TDR measurements less than 11 meters, a short pulse should be + * used. + */ +#define TDR_SHORT_CABLE_LENGTH 11 + #define MII_VCT7_PAIR_0_DISTANCE 0x10 #define MII_VCT7_PAIR_1_DISTANCE 0x11 #define MII_VCT7_PAIR_2_DISTANCE 0x12 @@ -220,6 +275,11 @@ struct marvell_priv { u64 stats[ARRAY_SIZE(marvell_hw_stats)]; char *hwmon_name; struct device *hwmon_dev; + bool cable_test_tdr; + u32 first; + u32 last; + u32 step; + s8 pair; }; static int marvell_read_page(struct phy_device *phydev) @@ -1690,7 +1750,150 @@ static void marvell_get_stats(struct phy_device *phydev, data[i] = marvell_get_stat(phydev, i); } -static int marvell_vct7_cable_test_start(struct phy_device *phydev) +static int marvell_vct5_wait_complete(struct phy_device *phydev) +{ + int i; + int val; + + for (i = 0; i < 32; i++) { + val = __phy_read(phydev, MII_VCT5_CTRL); + if (val < 0) + return val; + + if (val & MII_VCT5_CTRL_COMPLETE) + return 0; + } + + phydev_err(phydev, "Timeout while waiting for cable test to finish\n"); + return -ETIMEDOUT; +} + +static int marvell_vct5_amplitude(struct phy_device *phydev, int pair) +{ + int amplitude; + int val; + int reg; + + reg = MII_VCT5_TX_RX_MDI0_COUPLING + pair; + val = __phy_read(phydev, reg); + + if (val < 0) + return 0; + + amplitude = (val & MII_VCT5_TX_RX_AMPLITUDE_MASK) >> + MII_VCT5_TX_RX_AMPLITUDE_SHIFT; + + if (!(val & MII_VCT5_TX_RX_COUPLING_POSITIVE_REFLECTION)) + amplitude = -amplitude; + + return 1000 * amplitude / 128; +} + +static u32 marvell_vct5_distance2cm(int distance) +{ + return distance * 805 / 10; +} + +static u32 marvell_vct5_cm2distance(int cm) +{ + return cm * 10 / 805; +} + +static int marvell_vct5_amplitude_distance(struct phy_device *phydev, + int distance, int pair) +{ + u16 reg; + int err; + int mV; + int i; + + err = __phy_write(phydev, MII_VCT5_SAMPLE_POINT_DISTANCE, + distance); + if (err) + return err; + + reg = MII_VCT5_CTRL_ENABLE | + MII_VCT5_CTRL_TX_SAME_CHANNEL | + MII_VCT5_CTRL_SAMPLES_DEFAULT | + MII_VCT5_CTRL_SAMPLE_POINT | + MII_VCT5_CTRL_PEEK_HYST_DEFAULT; + err = __phy_write(phydev, MII_VCT5_CTRL, reg); + if (err) + return err; + + err = marvell_vct5_wait_complete(phydev); + if (err) + return err; + + for (i = 0; i < 4; i++) { + if (pair != PHY_PAIR_ALL && i != pair) + continue; + + mV = marvell_vct5_amplitude(phydev, i); + ethnl_cable_test_amplitude(phydev, i, mV); + } + + return 0; +} + +static int marvell_vct5_amplitude_graph(struct phy_device *phydev) +{ + struct marvell_priv *priv = phydev->priv; + int distance; + u16 width; + int page; + int err; + u16 reg; + + if (priv->first <= TDR_SHORT_CABLE_LENGTH) + width = MII_VCT5_TX_PULSE_CTRL_PULSE_WIDTH_32nS; + else + width = MII_VCT5_TX_PULSE_CTRL_PULSE_WIDTH_128nS; + + reg = MII_VCT5_TX_PULSE_CTRL_GT_140m_46_86mV | + MII_VCT5_TX_PULSE_CTRL_DONT_WAIT_LINK_DOWN | + MII_VCT5_TX_PULSE_CTRL_MAX_AMP | width; + + err = phy_write_paged(phydev, MII_MARVELL_VCT5_PAGE, + MII_VCT5_TX_PULSE_CTRL, reg); + if (err) + return err; + + /* Reading the TDR data is very MDIO heavy. We need to optimize + * access to keep the time to a minimum. So lock the bus once, + * and don't release it until complete. We can then avoid having + * to change the page for every access, greatly speeding things + * up. + */ + page = phy_select_page(phydev, MII_MARVELL_VCT5_PAGE); + if (page < 0) + goto restore_page; + + for (distance = priv->first; + distance <= priv->last; + distance += priv->step) { + err = marvell_vct5_amplitude_distance(phydev, distance, + priv->pair); + if (err) + goto restore_page; + + if (distance > TDR_SHORT_CABLE_LENGTH && + width == MII_VCT5_TX_PULSE_CTRL_PULSE_WIDTH_32nS) { + width = MII_VCT5_TX_PULSE_CTRL_PULSE_WIDTH_128nS; + reg = MII_VCT5_TX_PULSE_CTRL_GT_140m_46_86mV | + MII_VCT5_TX_PULSE_CTRL_DONT_WAIT_LINK_DOWN | + MII_VCT5_TX_PULSE_CTRL_MAX_AMP | width; + err = __phy_write(phydev, MII_VCT5_TX_PULSE_CTRL, reg); + if (err) + goto restore_page; + } + } + +restore_page: + return phy_restore_page(phydev, page, err); +} + +static int marvell_cable_test_start_common(struct phy_device *phydev) { int bmcr, bmsr, ret; @@ -1719,12 +1922,81 @@ static int marvell_vct7_cable_test_start(struct phy_device *phydev) if (bmsr & BMSR_LSTATUS) msleep(1500); + return 0; +} + +static int marvell_vct7_cable_test_start(struct phy_device *phydev) +{ + struct marvell_priv *priv = phydev->priv; + int ret; + + ret = marvell_cable_test_start_common(phydev); + if (ret) + return ret; + + priv->cable_test_tdr = false; + + /* Reset the VCT5 API control to defaults, otherwise + * VCT7 does not work correctly. + */ + ret = phy_write_paged(phydev, MII_MARVELL_VCT5_PAGE, + MII_VCT5_CTRL, + MII_VCT5_CTRL_TX_SAME_CHANNEL | + MII_VCT5_CTRL_SAMPLES_DEFAULT | + MII_VCT5_CTRL_MODE_MAXIMUM_PEEK | + MII_VCT5_CTRL_PEEK_HYST_DEFAULT); + if (ret) + return ret; + + ret = phy_write_paged(phydev, MII_MARVELL_VCT5_PAGE, + MII_VCT5_SAMPLE_POINT_DISTANCE, 0); + if (ret) + return ret; + return phy_write_paged(phydev, MII_MARVELL_VCT7_PAGE, MII_VCT7_CTRL, MII_VCT7_CTRL_RUN_NOW | MII_VCT7_CTRL_CENTIMETERS); } +static int marvell_vct5_cable_test_tdr_start(struct phy_device *phydev, + const struct phy_tdr_config *cfg) +{ + struct marvell_priv *priv = phydev->priv; + int ret; + + priv->cable_test_tdr = true; + priv->first = marvell_vct5_cm2distance(cfg->first); + priv->last = marvell_vct5_cm2distance(cfg->last); + priv->step = marvell_vct5_cm2distance(cfg->step); + priv->pair = cfg->pair; + + if (priv->first > MII_VCT5_SAMPLE_POINT_DISTANCE_MAX) + return -EINVAL; + + if (priv->last > MII_VCT5_SAMPLE_POINT_DISTANCE_MAX) + return -EINVAL; + + /* Disable VCT7 */ + ret = phy_write_paged(phydev, MII_MARVELL_VCT7_PAGE, + MII_VCT7_CTRL, 0); + if (ret) + return ret; + + ret = marvell_cable_test_start_common(phydev); + if (ret) + return ret; + + ret = ethnl_cable_test_pulse(phydev, 1000); + if (ret) + return ret; + + return ethnl_cable_test_step(phydev, + marvell_vct5_distance2cm(priv->first), + marvell_vct5_distance2cm(priv->last), + marvell_vct5_distance2cm(priv->step)); +} + static int marvell_vct7_distance_to_length(int distance, bool meter) { if (meter) @@ -1828,8 +2100,15 @@ static int marvell_vct7_cable_test_report(struct phy_device *phydev) static int marvell_vct7_cable_test_get_status(struct phy_device *phydev, bool *finished) { + struct marvell_priv *priv = phydev->priv; int ret; + if (priv->cable_test_tdr) { + ret = marvell_vct5_amplitude_graph(phydev); + *finished = true; + return ret; + } + *finished = false; ret = phy_read_paged(phydev, MII_MARVELL_VCT7_PAGE, @@ -2563,6 +2842,7 @@ static struct phy_driver marvell_drivers[] = { .get_tunable = m88e1011_get_tunable, .set_tunable = m88e1011_set_tunable, .cable_test_start = marvell_vct7_cable_test_start, + .cable_test_tdr_start = marvell_vct5_cable_test_tdr_start, .cable_test_get_status = marvell_vct7_cable_test_get_status, }, { @@ -2588,6 +2868,7 @@ static struct phy_driver marvell_drivers[] = { .get_tunable = m88e1540_get_tunable, .set_tunable = m88e1540_set_tunable, .cable_test_start = marvell_vct7_cable_test_start, + .cable_test_tdr_start = marvell_vct5_cable_test_tdr_start, .cable_test_get_status = marvell_vct7_cable_test_get_status, }, { @@ -2613,6 +2894,7 @@ static struct phy_driver marvell_drivers[] = { .get_tunable = m88e1540_get_tunable, .set_tunable = m88e1540_set_tunable, .cable_test_start = marvell_vct7_cable_test_start, + .cable_test_tdr_start = marvell_vct5_cable_test_tdr_start, .cable_test_get_status = marvell_vct7_cable_test_get_status, }, { @@ -2658,6 +2940,7 @@ static struct phy_driver marvell_drivers[] = { .get_tunable = m88e1540_get_tunable, .set_tunable = m88e1540_set_tunable, .cable_test_start = marvell_vct7_cable_test_start, + .cable_test_tdr_start = marvell_vct5_cable_test_tdr_start, .cable_test_get_status = marvell_vct7_cable_test_get_status, }, }; diff --git a/drivers/net/phy/mdio-mscc-miim.c b/drivers/net/phy/mdio-mscc-miim.c index badbc99bedd3..11f583fd4611 100644 --- a/drivers/net/phy/mdio-mscc-miim.c +++ b/drivers/net/phy/mdio-mscc-miim.c @@ -16,6 +16,7 @@ #include <linux/of_mdio.h> #define MSCC_MIIM_REG_STATUS 0x0 +#define MSCC_MIIM_STATUS_STAT_PENDING BIT(2) #define MSCC_MIIM_STATUS_STAT_BUSY BIT(3) #define MSCC_MIIM_REG_CMD 0x8 #define MSCC_MIIM_CMD_OPR_WRITE BIT(1) @@ -38,17 +39,35 @@ struct mscc_miim_dev { void __iomem *phy_regs; }; +/* When high resolution timers aren't built-in: we can't use usleep_range() as + * we would sleep way too long. Use udelay() instead. + */ +#define mscc_readl_poll_timeout(addr, val, cond, delay_us, timeout_us) \ +({ \ + if (!IS_ENABLED(CONFIG_HIGH_RES_TIMERS)) \ + readl_poll_timeout_atomic(addr, val, cond, delay_us, \ + timeout_us); \ + readl_poll_timeout(addr, val, cond, delay_us, timeout_us); \ +}) + static int mscc_miim_wait_ready(struct mii_bus *bus) { struct mscc_miim_dev *miim = bus->priv; u32 val; - readl_poll_timeout(miim->regs + MSCC_MIIM_REG_STATUS, val, - !(val & MSCC_MIIM_STATUS_STAT_BUSY), 100, 250000); - if (val & MSCC_MIIM_STATUS_STAT_BUSY) - return -ETIMEDOUT; + return mscc_readl_poll_timeout(miim->regs + MSCC_MIIM_REG_STATUS, val, + !(val & MSCC_MIIM_STATUS_STAT_BUSY), 50, + 10000); +} - return 0; +static int mscc_miim_wait_pending(struct mii_bus *bus) +{ + struct mscc_miim_dev *miim = bus->priv; + u32 val; + + return mscc_readl_poll_timeout(miim->regs + MSCC_MIIM_REG_STATUS, val, + !(val & MSCC_MIIM_STATUS_STAT_PENDING), + 50, 10000); } static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum) @@ -57,7 +76,7 @@ static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum) u32 val; int ret; - ret = mscc_miim_wait_ready(bus); + ret = mscc_miim_wait_pending(bus); if (ret) goto out; @@ -86,7 +105,7 @@ static int mscc_miim_write(struct mii_bus *bus, int mii_id, struct mscc_miim_dev *miim = bus->priv; int ret; - ret = mscc_miim_wait_ready(bus); + ret = mscc_miim_wait_pending(bus); if (ret < 0) goto out; diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h index acdd8ee61a39..f828c917b9f7 100644 --- a/drivers/net/phy/mscc/mscc.h +++ b/drivers/net/phy/mscc/mscc.h @@ -353,6 +353,8 @@ struct vsc8531_private { const struct vsc85xx_hw_stat *hw_stats; u64 *stats; int nstats; + /* PHY address within the package. */ + u8 addr; /* For multiple port PHYs; the MDIO address of the base PHY in the * package. */ diff --git a/drivers/net/phy/mscc/mscc_mac.h b/drivers/net/phy/mscc/mscc_mac.h index fcb5ba5e5d03..59b6837c60b3 100644 --- a/drivers/net/phy/mscc/mscc_mac.h +++ b/drivers/net/phy/mscc/mscc_mac.h @@ -152,8 +152,8 @@ #define MSCC_MAC_PAUSE_CFG_STATE_PAUSE_STATE BIT(0) #define MSCC_MAC_PAUSE_CFG_STATE_MAC_TX_PAUSE_GEN BIT(4) -#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL 0x2 -#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(x) (x) -#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M GENMASK(2, 0) +#define MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL 0x2 +#define MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(x) (x) +#define MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M GENMASK(2, 0) #endif /* _MSCC_PHY_LINE_MAC_H_ */ diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c index e99e2cd72a0c..b4d3dc4068e2 100644 --- a/drivers/net/phy/mscc/mscc_macsec.c +++ b/drivers/net/phy/mscc/mscc_macsec.c @@ -316,6 +316,8 @@ static void vsc8584_macsec_mac_init(struct phy_device *phydev, /* Must be called with mdio_lock taken */ static int __vsc8584_macsec_init(struct phy_device *phydev) { + struct vsc8531_private *priv = phydev->priv; + enum macsec_bank proc_bank; u32 val; vsc8584_macsec_block_init(phydev, MACSEC_INGR); @@ -351,12 +353,14 @@ static int __vsc8584_macsec_init(struct phy_device *phydev) val |= MSCC_FCBUF_ENA_CFG_TX_ENA | MSCC_FCBUF_ENA_CFG_RX_ENA; vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_ENA_CFG, val); - val = vsc8584_macsec_phy_read(phydev, IP_1588, - MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL); - val &= ~MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M; - val |= MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(4); - vsc8584_macsec_phy_write(phydev, IP_1588, - MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL, val); + proc_bank = (priv->addr < 2) ? PROC_0 : PROC_2; + + val = vsc8584_macsec_phy_read(phydev, proc_bank, + MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL); + val &= ~MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M; + val |= MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(4); + vsc8584_macsec_phy_write(phydev, proc_bank, + MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL, val); return 0; } diff --git a/drivers/net/phy/mscc/mscc_macsec.h b/drivers/net/phy/mscc/mscc_macsec.h index d0783944d106..d751f2946b79 100644 --- a/drivers/net/phy/mscc/mscc_macsec.h +++ b/drivers/net/phy/mscc/mscc_macsec.h @@ -64,7 +64,8 @@ enum macsec_bank { FC_BUFFER = 0x04, HOST_MAC = 0x05, LINE_MAC = 0x06, - IP_1588 = 0x0e, + PROC_0 = 0x0e, + PROC_2 = 0x0f, MACSEC_INGR = 0x38, MACSEC_EGR = 0x3c, }; diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 6508d6536134..7ed0285206d0 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -1303,6 +1303,8 @@ static void vsc8584_get_base_addr(struct phy_device *phydev) vsc8531->base_addr = phydev->mdio.addr + addr; else vsc8531->base_addr = phydev->mdio.addr - addr; + + vsc8531->addr = addr; } static int vsc8584_config_init(struct phy_device *phydev) @@ -1975,6 +1977,10 @@ static int vsc8574_probe(struct phy_device *phydev) phydev->priv = vsc8531; + vsc8584_get_base_addr(phydev); + devm_phy_package_join(&phydev->mdio.dev, phydev, + vsc8531->base_addr, 0); + vsc8531->nleds = 4; vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES; vsc8531->hw_stats = vsc8584_hw_stats; diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c index 0d4f9067ca71..a72fa0d2e7c7 100644 --- a/drivers/net/phy/nxp-tja11xx.c +++ b/drivers/net/phy/nxp-tja11xx.c @@ -53,6 +53,8 @@ #define MII_COMMSTAT 23 #define MII_COMMSTAT_LINK_UP BIT(15) +#define MII_COMMSTAT_SQI_STATE GENMASK(7, 5) +#define MII_COMMSTAT_SQI_MAX 7 #define MII_GENSTAT 24 #define MII_GENSTAT_PLL_LOCKED BIT(14) @@ -192,7 +194,7 @@ static int tja11xx_config_aneg_cable_test(struct phy_device *phydev) !phydev->drv->cable_test_get_status) return 0; - ret = ethnl_cable_test_alloc(phydev); + ret = ethnl_cable_test_alloc(phydev, ETHTOOL_MSG_CABLE_TEST_NTF); if (ret) return ret; @@ -329,6 +331,22 @@ static int tja11xx_read_status(struct phy_device *phydev) return 0; } +static int tja11xx_get_sqi(struct phy_device *phydev) +{ + int ret; + + ret = phy_read(phydev, MII_COMMSTAT); + if (ret < 0) + return ret; + + return FIELD_GET(MII_COMMSTAT_SQI_STATE, ret); +} + +static int tja11xx_get_sqi_max(struct phy_device *phydev) +{ + return MII_COMMSTAT_SQI_MAX; +} + static int tja11xx_get_sset_count(struct phy_device *phydev) { return ARRAY_SIZE(tja11xx_hw_stats); @@ -683,6 +701,8 @@ static struct phy_driver tja11xx_driver[] = { .config_aneg = tja11xx_config_aneg, .config_init = tja11xx_config_init, .read_status = tja11xx_read_status, + .get_sqi = tja11xx_get_sqi, + .get_sqi_max = tja11xx_get_sqi_max, .suspend = genphy_suspend, .resume = genphy_resume, .set_loopback = genphy_loopback, @@ -699,6 +719,8 @@ static struct phy_driver tja11xx_driver[] = { .config_aneg = tja11xx_config_aneg, .config_init = tja11xx_config_init, .read_status = tja11xx_read_status, + .get_sqi = tja11xx_get_sqi, + .get_sqi_max = tja11xx_get_sqi_max, .suspend = genphy_suspend, .resume = genphy_resume, .set_loopback = genphy_loopback, @@ -715,6 +737,8 @@ static struct phy_driver tja11xx_driver[] = { .config_aneg = tja11xx_config_aneg, .config_init = tja11xx_config_init, .read_status = tja11xx_read_status, + .get_sqi = tja11xx_get_sqi, + .get_sqi_max = tja11xx_get_sqi_max, .match_phy_device = tja1102_p0_match_phy_device, .suspend = genphy_suspend, .resume = genphy_resume, @@ -736,6 +760,8 @@ static struct phy_driver tja11xx_driver[] = { .config_aneg = tja11xx_config_aneg, .config_init = tja11xx_config_init, .read_status = tja11xx_read_status, + .get_sqi = tja11xx_get_sqi, + .get_sqi_max = tja11xx_get_sqi_max, .match_phy_device = tja1102_p1_match_phy_device, .suspend = genphy_suspend, .resume = genphy_resume, diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 66b8c61ca74c..46bd68e9ecfa 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -428,9 +428,8 @@ int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum) if (phydev->drv && phydev->drv->read_mmd) { val = phydev->drv->read_mmd(phydev, devad, regnum); } else if (phydev->is_c45) { - u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff); - - val = __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, addr); + val = __mdiobus_c45_read(phydev->mdio.bus, phydev->mdio.addr, + devad, regnum); } else { struct mii_bus *bus = phydev->mdio.bus; int phy_addr = phydev->mdio.addr; @@ -485,10 +484,8 @@ int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val) if (phydev->drv && phydev->drv->write_mmd) { ret = phydev->drv->write_mmd(phydev, devad, regnum, val); } else if (phydev->is_c45) { - u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff); - - ret = __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, - addr, val); + ret = __mdiobus_c45_write(phydev->mdio.bus, phydev->mdio.addr, + devad, regnum, val); } else { struct mii_bus *bus = phydev->mdio.bus; int phy_addr = phydev->mdio.addr; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index d4bbf79dab6c..1de3938628f4 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -58,13 +58,13 @@ static const char *phy_state_to_str(enum phy_state st) static void phy_link_up(struct phy_device *phydev) { - phydev->phy_link_change(phydev, true, true); + phydev->phy_link_change(phydev, true); phy_led_trigger_change_speed(phydev); } -static void phy_link_down(struct phy_device *phydev, bool do_carrier) +static void phy_link_down(struct phy_device *phydev) { - phydev->phy_link_change(phydev, false, do_carrier); + phydev->phy_link_change(phydev, false); phy_led_trigger_change_speed(phydev); } @@ -361,7 +361,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) if (mdio_phy_id_is_c45(mii_data->phy_id)) { prtad = mdio_phy_id_prtad(mii_data->phy_id); devad = mdio_phy_id_devad(mii_data->phy_id); - devad = MII_ADDR_C45 | devad << 16 | mii_data->reg_num; + devad = mdiobus_c45_addr(devad, mii_data->reg_num); } else { prtad = mii_data->phy_id; devad = mii_data->reg_num; @@ -374,7 +374,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) if (mdio_phy_id_is_c45(mii_data->phy_id)) { prtad = mdio_phy_id_prtad(mii_data->phy_id); devad = mdio_phy_id_devad(mii_data->phy_id); - devad = MII_ADDR_C45 | devad << 16 | mii_data->reg_num; + devad = mdiobus_c45_addr(devad, mii_data->reg_num); } else { prtad = mii_data->phy_id; devad = mii_data->reg_num; @@ -519,12 +519,12 @@ int phy_start_cable_test(struct phy_device *phydev, goto out; } - err = ethnl_cable_test_alloc(phydev); + err = ethnl_cable_test_alloc(phydev, ETHTOOL_MSG_CABLE_TEST_NTF); if (err) goto out; /* Mark the carrier down until the test is complete */ - phy_link_down(phydev, true); + phy_link_down(phydev); netif_testing_on(dev); err = phydev->drv->cable_test_start(phydev); @@ -552,6 +552,70 @@ out: } EXPORT_SYMBOL(phy_start_cable_test); +int phy_start_cable_test_tdr(struct phy_device *phydev, + struct netlink_ext_ack *extack, + const struct phy_tdr_config *config) +{ + struct net_device *dev = phydev->attached_dev; + int err = -ENOMEM; + + if (!(phydev->drv && + phydev->drv->cable_test_tdr_start && + phydev->drv->cable_test_get_status)) { + NL_SET_ERR_MSG(extack, + "PHY driver does not support cable test TDR"); + return -EOPNOTSUPP; + } + + mutex_lock(&phydev->lock); + if (phydev->state == PHY_CABLETEST) { + NL_SET_ERR_MSG(extack, + "PHY already performing a test"); + err = -EBUSY; + goto out; + } + + if (phydev->state < PHY_UP || + phydev->state > PHY_CABLETEST) { + NL_SET_ERR_MSG(extack, + "PHY not configured. Try setting interface up"); + err = -EBUSY; + goto out; + } + + err = ethnl_cable_test_alloc(phydev, ETHTOOL_MSG_CABLE_TEST_TDR_NTF); + if (err) + goto out; + + /* Mark the carrier down until the test is complete */ + phy_link_down(phydev); + + netif_testing_on(dev); + err = phydev->drv->cable_test_tdr_start(phydev, config); + if (err) { + netif_testing_off(dev); + phy_link_up(phydev); + goto out_free; + } + + phydev->state = PHY_CABLETEST; + + if (phy_polling_mode(phydev)) + phy_trigger_machine(phydev); + + mutex_unlock(&phydev->lock); + + return 0; + +out_free: + ethnl_cable_test_free(phydev); +out: + mutex_unlock(&phydev->lock); + + return err; +} +EXPORT_SYMBOL(phy_start_cable_test_tdr); + static int phy_config_aneg(struct phy_device *phydev) { if (phydev->drv->config_aneg) @@ -595,7 +659,7 @@ static int phy_check_link_status(struct phy_device *phydev) phy_link_up(phydev); } else if (!phydev->link && phydev->state != PHY_NOLINK) { phydev->state = PHY_NOLINK; - phy_link_down(phydev, true); + phy_link_down(phydev); } return 0; @@ -999,7 +1063,7 @@ void phy_state_machine(struct work_struct *work) case PHY_HALTED: if (phydev->link) { phydev->link = 0; - phy_link_down(phydev, true); + phy_link_down(phydev); } do_suspend = true; break; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index c3a107cf578e..04946de74fa0 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -675,16 +675,14 @@ EXPORT_SYMBOL(phy_device_create); static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr, u32 *devices_in_package) { - int phy_reg, reg_addr; + int phy_reg; - reg_addr = MII_ADDR_C45 | dev_addr << 16 | MDIO_DEVS2; - phy_reg = mdiobus_read(bus, addr, reg_addr); + phy_reg = mdiobus_c45_read(bus, addr, dev_addr, MDIO_DEVS2); if (phy_reg < 0) return -EIO; *devices_in_package = phy_reg << 16; - reg_addr = MII_ADDR_C45 | dev_addr << 16 | MDIO_DEVS1; - phy_reg = mdiobus_read(bus, addr, reg_addr); + phy_reg = mdiobus_c45_read(bus, addr, dev_addr, MDIO_DEVS1); if (phy_reg < 0) return -EIO; *devices_in_package |= phy_reg; @@ -709,11 +707,11 @@ static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr, * */ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id, - struct phy_c45_device_ids *c45_ids) { - int phy_reg; - int i, reg_addr; + struct phy_c45_device_ids *c45_ids) +{ const int num_ids = ARRAY_SIZE(c45_ids->device_ids); u32 *devs = &c45_ids->devices_in_package; + int i, phy_reg; /* Find first non-zero Devices In package. Device zero is reserved * for 802.3 c45 complied PHYs, so don't probe it at first. @@ -747,14 +745,12 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id, if (!(c45_ids->devices_in_package & (1 << i))) continue; - reg_addr = MII_ADDR_C45 | i << 16 | MII_PHYSID1; - phy_reg = mdiobus_read(bus, addr, reg_addr); + phy_reg = mdiobus_c45_read(bus, addr, i, MII_PHYSID1); if (phy_reg < 0) return -EIO; c45_ids->device_ids[i] = phy_reg << 16; - reg_addr = MII_ADDR_C45 | i << 16 | MII_PHYSID2; - phy_reg = mdiobus_read(bus, addr, reg_addr); + phy_reg = mdiobus_c45_read(bus, addr, i, MII_PHYSID2); if (phy_reg < 0) return -EIO; c45_ids->device_ids[i] |= phy_reg; @@ -916,16 +912,14 @@ struct phy_device *phy_find_first(struct mii_bus *bus) } EXPORT_SYMBOL(phy_find_first); -static void phy_link_change(struct phy_device *phydev, bool up, bool do_carrier) +static void phy_link_change(struct phy_device *phydev, bool up) { struct net_device *netdev = phydev->attached_dev; - if (do_carrier) { - if (up) - netif_carrier_on(netdev); - else - netif_carrier_off(netdev); - } + if (up) + netif_carrier_on(netdev); + else + netif_carrier_off(netdev); phydev->adjust_link(netdev); if (phydev->mii_ts && phydev->mii_ts->link_state) phydev->mii_ts->link_state(phydev->mii_ts, phydev); @@ -1237,7 +1231,7 @@ int phy_sfp_probe(struct phy_device *phydev, const struct sfp_upstream_ops *ops) { struct sfp_bus *bus; - int ret; + int ret = 0; if (phydev->mdio.dev.fwnode) { bus = sfp_bus_find_fwnode(phydev->mdio.dev.fwnode); @@ -1249,7 +1243,7 @@ int phy_sfp_probe(struct phy_device *phydev, ret = sfp_bus_add_upstream(bus, phydev, ops); sfp_bus_put(bus); } - return 0; + return ret; } EXPORT_SYMBOL(phy_sfp_probe); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 0f23bec431c1..0ab65fb75258 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -803,8 +803,7 @@ void phylink_destroy(struct phylink *pl) } EXPORT_SYMBOL_GPL(phylink_destroy); -static void phylink_phy_change(struct phy_device *phydev, bool up, - bool do_carrier) +static void phylink_phy_change(struct phy_device *phydev, bool up) { struct phylink *pl = phydev->phylink; bool tx_pause, rx_pause; @@ -1632,7 +1631,7 @@ static int phylink_phy_read(struct phylink *pl, unsigned int phy_id, if (mdio_phy_id_is_c45(phy_id)) { prtad = mdio_phy_id_prtad(phy_id); devad = mdio_phy_id_devad(phy_id); - devad = MII_ADDR_C45 | devad << 16 | reg; + devad = mdiobus_c45_addr(devad, reg); } else if (phydev->is_c45) { switch (reg) { case MII_BMCR: @@ -1655,7 +1654,7 @@ static int phylink_phy_read(struct phylink *pl, unsigned int phy_id, return -EINVAL; } prtad = phy_id; - devad = MII_ADDR_C45 | devad << 16 | reg; + devad = mdiobus_c45_addr(devad, reg); } else { prtad = phy_id; devad = reg; @@ -1672,7 +1671,7 @@ static int phylink_phy_write(struct phylink *pl, unsigned int phy_id, if (mdio_phy_id_is_c45(phy_id)) { prtad = mdio_phy_id_prtad(phy_id); devad = mdio_phy_id_devad(phy_id); - devad = MII_ADDR_C45 | devad << 16 | reg; + devad = mdiobus_c45_addr(devad, reg); } else if (phydev->is_c45) { switch (reg) { case MII_BMCR: @@ -1695,7 +1694,7 @@ static int phylink_phy_write(struct phylink *pl, unsigned int phy_id, return -EINVAL; } prtad = phy_id; - devad = MII_ADDR_C45 | devad << 16 | reg; + devad = mdiobus_c45_addr(devad, reg); } else { prtad = phy_id; devad = reg; @@ -2293,7 +2292,6 @@ void phylink_mii_c22_pcs_an_restart(struct mdio_device *pcs) } EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_an_restart); -#define C45_ADDR(d,a) (MII_ADDR_C45 | (d) << 16 | (a)) void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs, struct phylink_link_state *state) { @@ -2301,7 +2299,7 @@ void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs, int addr = pcs->addr; int stat; - stat = mdiobus_read(bus, addr, C45_ADDR(MDIO_MMD_PCS, MDIO_STAT1)); + stat = mdiobus_c45_read(bus, addr, MDIO_MMD_PCS, MDIO_STAT1); if (stat < 0) { state->link = false; return; diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 0cdb2ce47645..a657943c9f01 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -815,14 +815,21 @@ static const struct usb_device_id products[] = { .driver_info = 0, }, -/* Microsoft Surface 3 dock (based on Realtek RTL8153) */ +/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153) */ { USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = 0, }, - /* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ +/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153B) */ +{ + USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x0927, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + +/* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ { USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index b0eab6e5279d..31b1d4b959f6 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1324,6 +1324,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 1af72ec284ca..7d39f998535d 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -6884,6 +6884,7 @@ static const struct usb_device_id rtl8152_table[] = { {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)}, {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)}, + {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927)}, {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)}, diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile index 8cdbb63d1bb0..c5a167a1c85c 100644 --- a/drivers/net/vmxnet3/Makefile +++ b/drivers/net/vmxnet3/Makefile @@ -2,7 +2,7 @@ # # Linux driver for VMware's vmxnet3 ethernet NIC. # -# Copyright (C) 2007-2016, VMware, Inc. All Rights Reserved. +# Copyright (C) 2007-2020, VMware, Inc. All Rights Reserved. # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h index db9f1fde3aac..8c014c98471c 100644 --- a/drivers/net/vmxnet3/upt1_defs.h +++ b/drivers/net/vmxnet3/upt1_defs.h @@ -1,7 +1,7 @@ /* * Linux driver for VMware's vmxnet3 ethernet NIC. * - * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -92,5 +92,8 @@ enum { UPT1_F_RSS = cpu_to_le64(0x0002), UPT1_F_RXVLAN = cpu_to_le64(0x0004), /* VLAN tag stripping */ UPT1_F_LRO = cpu_to_le64(0x0008), + UPT1_F_RXINNEROFLD = cpu_to_le64(0x00010), /* Geneve/Vxlan rx csum + * offloading + */ }; #endif diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h index c3a31646189f..a8d5ebd47c71 100644 --- a/drivers/net/vmxnet3/vmxnet3_defs.h +++ b/drivers/net/vmxnet3/vmxnet3_defs.h @@ -1,7 +1,7 @@ /* * Linux driver for VMware's vmxnet3 ethernet NIC. * - * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -82,6 +82,7 @@ enum { VMXNET3_CMD_RESERVED3, VMXNET3_CMD_SET_COALESCE, VMXNET3_CMD_REGISTER_MEMREGS, + VMXNET3_CMD_SET_RSS_FIELDS, VMXNET3_CMD_FIRST_GET = 0xF00D0000, VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET, @@ -96,19 +97,20 @@ enum { VMXNET3_CMD_GET_RESERVED1, VMXNET3_CMD_GET_TXDATA_DESC_SIZE, VMXNET3_CMD_GET_COALESCE, + VMXNET3_CMD_GET_RSS_FIELDS, }; /* * Little Endian layout of bitfields - * Byte 0 : 7.....len.....0 - * Byte 1 : rsvd gen 13.len.8 + * Byte 1 : oco gen 13.len.8 * Byte 2 : 5.msscof.0 ext1 dtype * Byte 3 : 13...msscof...6 * * Big Endian layout of bitfields - * Byte 0: 13...msscof...6 * Byte 1 : 5.msscof.0 ext1 dtype - * Byte 2 : rsvd gen 13.len.8 + * Byte 2 : oco gen 13.len.8 * Byte 3 : 7.....len.....0 * * Thus, le32_to_cpu on the dword will allow the big endian driver to read @@ -123,13 +125,13 @@ struct Vmxnet3_TxDesc { u32 msscof:14; /* MSS, checksum offset, flags */ u32 ext1:1; u32 dtype:1; /* descriptor type */ - u32 rsvd:1; + u32 oco:1; u32 gen:1; /* generation bit */ u32 len:14; #else u32 len:14; u32 gen:1; /* generation bit */ - u32 rsvd:1; + u32 oco:1; u32 dtype:1; /* descriptor type */ u32 ext1:1; u32 msscof:14; /* MSS, checksum offset, flags */ @@ -155,9 +157,10 @@ struct Vmxnet3_TxDesc { }; /* TxDesc.OM values */ -#define VMXNET3_OM_NONE 0 -#define VMXNET3_OM_CSUM 2 -#define VMXNET3_OM_TSO 3 +#define VMXNET3_OM_NONE 0 +#define VMXNET3_OM_ENCAP 1 +#define VMXNET3_OM_CSUM 2 +#define VMXNET3_OM_TSO 3 /* fields in TxDesc we access w/o using bit fields */ #define VMXNET3_TXD_EOP_SHIFT 12 @@ -224,6 +227,8 @@ struct Vmxnet3_RxDesc { #define VMXNET3_RXD_BTYPE_SHIFT 14 #define VMXNET3_RXD_GEN_SHIFT 31 +#define VMXNET3_RCD_HDR_INNER_SHIFT 13 + struct Vmxnet3_RxCompDesc { #ifdef __BIG_ENDIAN_BITFIELD u32 ext2:1; @@ -685,12 +690,22 @@ struct Vmxnet3_MemRegs { struct Vmxnet3_MemoryRegion memRegs[1]; }; +enum Vmxnet3_RSSField { + VMXNET3_RSS_FIELDS_TCPIP4 = 0x0001, + VMXNET3_RSS_FIELDS_TCPIP6 = 0x0002, + VMXNET3_RSS_FIELDS_UDPIP4 = 0x0004, + VMXNET3_RSS_FIELDS_UDPIP6 = 0x0008, + VMXNET3_RSS_FIELDS_ESPIP4 = 0x0010, + VMXNET3_RSS_FIELDS_ESPIP6 = 0x0020, +}; + /* If the command data <= 16 bytes, use the shared memory directly. * otherwise, use variable length configuration descriptor. */ union Vmxnet3_CmdInfo { struct Vmxnet3_VariableLenConfDesc varConf; struct Vmxnet3_SetPolling setPolling; + enum Vmxnet3_RSSField setRssFields; __le64 data[2]; }; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 722cb054a5cd..ca395f9679d0 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1,7 +1,7 @@ /* * Linux driver for VMware's vmxnet3 ethernet NIC. * - * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -842,21 +842,46 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, u8 protocol = 0; if (ctx->mss) { /* TSO */ - ctx->eth_ip_hdr_size = skb_transport_offset(skb); - ctx->l4_hdr_size = tcp_hdrlen(skb); - ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; + if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) { + ctx->l4_offset = skb_inner_transport_offset(skb); + ctx->l4_hdr_size = inner_tcp_hdrlen(skb); + ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size; + } else { + ctx->l4_offset = skb_transport_offset(skb); + ctx->l4_hdr_size = tcp_hdrlen(skb); + ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size; + } } else { if (skb->ip_summed == CHECKSUM_PARTIAL) { - ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb); + /* For encap packets, skb_checksum_start_offset refers + * to inner L4 offset. Thus, below works for encap as + * well as non-encap case + */ + ctx->l4_offset = skb_checksum_start_offset(skb); + + if (VMXNET3_VERSION_GE_4(adapter) && + skb->encapsulation) { + struct iphdr *iph = inner_ip_hdr(skb); - if (ctx->ipv4) { - const struct iphdr *iph = ip_hdr(skb); + if (iph->version == 4) { + protocol = iph->protocol; + } else { + const struct ipv6hdr *ipv6h; + + ipv6h = inner_ipv6_hdr(skb); + protocol = ipv6h->nexthdr; + } + } else { + if (ctx->ipv4) { + const struct iphdr *iph = ip_hdr(skb); - protocol = iph->protocol; - } else if (ctx->ipv6) { - const struct ipv6hdr *ipv6h = ipv6_hdr(skb); + protocol = iph->protocol; + } else if (ctx->ipv6) { + const struct ipv6hdr *ipv6h; - protocol = ipv6h->nexthdr; + ipv6h = ipv6_hdr(skb); + protocol = ipv6h->nexthdr; + } } switch (protocol) { @@ -871,10 +896,10 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, break; } - ctx->copy_size = min(ctx->eth_ip_hdr_size + + ctx->copy_size = min(ctx->l4_offset + ctx->l4_hdr_size, skb->len); } else { - ctx->eth_ip_hdr_size = 0; + ctx->l4_offset = 0; ctx->l4_hdr_size = 0; /* copy as much as allowed */ ctx->copy_size = min_t(unsigned int, @@ -930,6 +955,25 @@ vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, static void +vmxnet3_prepare_inner_tso(struct sk_buff *skb, + struct vmxnet3_tx_ctx *ctx) +{ + struct tcphdr *tcph = inner_tcp_hdr(skb); + struct iphdr *iph = inner_ip_hdr(skb); + + if (iph->version == 4) { + iph->check = 0; + tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, + IPPROTO_TCP, 0); + } else { + struct ipv6hdr *iph = inner_ipv6_hdr(skb); + + tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, + IPPROTO_TCP, 0); + } +} + +static void vmxnet3_prepare_tso(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx) { @@ -987,6 +1031,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, /* Use temporary descriptor to avoid touching bits multiple times */ union Vmxnet3_GenericDesc tempTxDesc; #endif + struct udphdr *udph; count = txd_estimate(skb); @@ -1003,7 +1048,11 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, } tq->stats.copy_skb_header++; } - vmxnet3_prepare_tso(skb, &ctx); + if (skb->encapsulation) { + vmxnet3_prepare_inner_tso(skb, &ctx); + } else { + vmxnet3_prepare_tso(skb, &ctx); + } } else { if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) { @@ -1026,14 +1075,14 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, BUG_ON(ret <= 0 && ctx.copy_size != 0); /* hdrs parsed, check against other limits */ if (ctx.mss) { - if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size > + if (unlikely(ctx.l4_offset + ctx.l4_hdr_size > VMXNET3_MAX_TX_BUF_SIZE)) { tq->stats.drop_oversized_hdr++; goto drop_pkt; } } else { if (skb->ip_summed == CHECKSUM_PARTIAL) { - if (unlikely(ctx.eth_ip_hdr_size + + if (unlikely(ctx.l4_offset + skb->csum_offset > VMXNET3_MAX_CSUM_OFFSET)) { tq->stats.drop_oversized_hdr++; @@ -1080,16 +1129,34 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, #endif tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred); if (ctx.mss) { - gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size; - gdesc->txd.om = VMXNET3_OM_TSO; - gdesc->txd.msscof = ctx.mss; + if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) { + gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size; + gdesc->txd.om = VMXNET3_OM_ENCAP; + gdesc->txd.msscof = ctx.mss; + + udph = udp_hdr(skb); + if (udph->check) + gdesc->txd.oco = 1; + } else { + gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size; + gdesc->txd.om = VMXNET3_OM_TSO; + gdesc->txd.msscof = ctx.mss; + } num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss; } else { if (skb->ip_summed == CHECKSUM_PARTIAL) { - gdesc->txd.hlen = ctx.eth_ip_hdr_size; - gdesc->txd.om = VMXNET3_OM_CSUM; - gdesc->txd.msscof = ctx.eth_ip_hdr_size + - skb->csum_offset; + if (VMXNET3_VERSION_GE_4(adapter) && + skb->encapsulation) { + gdesc->txd.hlen = ctx.l4_offset + + ctx.l4_hdr_size; + gdesc->txd.om = VMXNET3_OM_ENCAP; + gdesc->txd.msscof = 0; /* Reserved */ + } else { + gdesc->txd.hlen = ctx.l4_offset; + gdesc->txd.om = VMXNET3_OM_CSUM; + gdesc->txd.msscof = ctx.l4_offset + + skb->csum_offset; + } } else { gdesc->txd.om = 0; gdesc->txd.msscof = 0; @@ -1168,13 +1235,21 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, (le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) { skb->ip_summed = CHECKSUM_UNNECESSARY; - BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); - BUG_ON(gdesc->rcd.frg); + WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && + !(le32_to_cpu(gdesc->dword[0]) & + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))); + WARN_ON_ONCE(gdesc->rcd.frg && + !(le32_to_cpu(gdesc->dword[0]) & + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))); } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) & (1 << VMXNET3_RCD_TUC_SHIFT))) { skb->ip_summed = CHECKSUM_UNNECESSARY; - BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); - BUG_ON(gdesc->rcd.frg); + WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && + !(le32_to_cpu(gdesc->dword[0]) & + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))); + WARN_ON_ONCE(gdesc->rcd.frg && + !(le32_to_cpu(gdesc->dword[0]) & + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))); } else { if (gdesc->rcd.csum) { skb->csum = htons(gdesc->rcd.csum); @@ -2429,6 +2504,10 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) devRead->misc.uptFeatures |= UPT1_F_RXVLAN; + if (adapter->netdev->features & (NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM)) + devRead->misc.uptFeatures |= UPT1_F_RXINNEROFLD; + devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); devRead->misc.queueDescLen = cpu_to_le32( @@ -2554,6 +2633,39 @@ vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter) spin_unlock_irqrestore(&adapter->cmd_lock, flags); } +static void +vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter) +{ + struct Vmxnet3_DriverShared *shared = adapter->shared; + union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; + unsigned long flags; + + if (!VMXNET3_VERSION_GE_4(adapter)) + return; + + spin_lock_irqsave(&adapter->cmd_lock, flags); + + if (adapter->default_rss_fields) { + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_GET_RSS_FIELDS); + adapter->rss_fields = + VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + } else { + cmdInfo->setRssFields = adapter->rss_fields; + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_SET_RSS_FIELDS); + /* Not all requested RSS may get applied, so get and + * cache what was actually applied. + */ + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_GET_RSS_FIELDS); + adapter->rss_fields = + VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + } + + spin_unlock_irqrestore(&adapter->cmd_lock, flags); +} + int vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) { @@ -2603,6 +2715,7 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) } vmxnet3_init_coalesce(adapter); + vmxnet3_init_rssfields(adapter); for (i = 0; i < adapter->num_rx_queues; i++) { VMXNET3_WRITE_BAR0_REG(adapter, @@ -3039,6 +3152,18 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64) NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_LRO; + + if (VMXNET3_VERSION_GE_4(adapter)) { + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->hw_enc_features = NETIF_F_SG | NETIF_F_RXCSUM | + NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + } + if (dma64) netdev->hw_features |= NETIF_F_HIGHDMA; netdev->vlan_features = netdev->hw_features & @@ -3382,7 +3507,12 @@ vmxnet3_probe_device(struct pci_dev *pdev, goto err_alloc_pci; ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS); - if (ver & (1 << VMXNET3_REV_3)) { + if (ver & (1 << VMXNET3_REV_4)) { + VMXNET3_WRITE_BAR1_REG(adapter, + VMXNET3_REG_VRRS, + 1 << VMXNET3_REV_4); + adapter->version = VMXNET3_REV_4 + 1; + } else if (ver & (1 << VMXNET3_REV_3)) { VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1 << VMXNET3_REV_3); @@ -3430,6 +3560,11 @@ vmxnet3_probe_device(struct pci_dev *pdev, adapter->default_coal_mode = true; } + if (VMXNET3_VERSION_GE_4(adapter)) { + adapter->default_rss_fields = true; + adapter->rss_fields = VMXNET3_RSS_FIELDS_DEFAULT; + } + SET_NETDEV_DEV(netdev, &pdev->dev); vmxnet3_declare_features(adapter, dma64); diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 6528940ce5f3..bfdda0f34b97 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -1,7 +1,7 @@ /* * Linux driver for VMware's vmxnet3 ethernet NIC. * - * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -267,14 +267,43 @@ netdev_features_t vmxnet3_fix_features(struct net_device *netdev, return features; } +static void vmxnet3_enable_encap_offloads(struct net_device *netdev) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + if (VMXNET3_VERSION_GE_4(adapter)) { + netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_RXCSUM | + NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + } +} + +static void vmxnet3_disable_encap_offloads(struct net_device *netdev) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + if (VMXNET3_VERSION_GE_4(adapter)) { + netdev->hw_enc_features &= ~(NETIF_F_SG | NETIF_F_RXCSUM | + NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM); + } +} + int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); unsigned long flags; netdev_features_t changed = features ^ netdev->features; + netdev_features_t tun_offload_mask = NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + u8 udp_tun_enabled = (netdev->features & tun_offload_mask) != 0; if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO | - NETIF_F_HW_VLAN_CTAG_RX)) { + NETIF_F_HW_VLAN_CTAG_RX | tun_offload_mask)) { if (features & NETIF_F_RXCSUM) adapter->shared->devRead.misc.uptFeatures |= UPT1_F_RXCSUM; @@ -297,6 +326,17 @@ int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features) adapter->shared->devRead.misc.uptFeatures &= ~UPT1_F_RXVLAN; + if ((features & tun_offload_mask) != 0 && !udp_tun_enabled) { + vmxnet3_enable_encap_offloads(netdev); + adapter->shared->devRead.misc.uptFeatures |= + UPT1_F_RXINNEROFLD; + } else if ((features & tun_offload_mask) == 0 && + udp_tun_enabled) { + vmxnet3_disable_encap_offloads(netdev); + adapter->shared->devRead.misc.uptFeatures &= + ~UPT1_F_RXINNEROFLD; + } + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_FEATURE); @@ -665,18 +705,232 @@ out: return err; } +static int +vmxnet3_get_rss_hash_opts(struct vmxnet3_adapter *adapter, + struct ethtool_rxnfc *info) +{ + enum Vmxnet3_RSSField rss_fields; + + if (netif_running(adapter->netdev)) { + unsigned long flags; + + spin_lock_irqsave(&adapter->cmd_lock, flags); + + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_GET_RSS_FIELDS); + rss_fields = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); + } else { + rss_fields = adapter->rss_fields; + } + + info->data = 0; + + /* Report default options for RSS on vmxnet3 */ + switch (info->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3 | + RXH_IP_SRC | RXH_IP_DST; + break; + case UDP_V4_FLOW: + if (rss_fields & VMXNET3_RSS_FIELDS_UDPIP4) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + info->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + if (rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fallthrough */ + case SCTP_V4_FLOW: + case IPV4_FLOW: + info->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case UDP_V6_FLOW: + if (rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + info->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + case IPV6_FLOW: + info->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int +vmxnet3_set_rss_hash_opt(struct net_device *netdev, + struct vmxnet3_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + enum Vmxnet3_RSSField rss_fields = adapter->rss_fields; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + rss_fields &= ~VMXNET3_RSS_FIELDS_UDPIP4; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rss_fields |= VMXNET3_RSS_FIELDS_UDPIP4; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + rss_fields &= ~VMXNET3_RSS_FIELDS_UDPIP6; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rss_fields |= VMXNET3_RSS_FIELDS_UDPIP6; + break; + default: + return -EINVAL; + } + break; + case ESP_V4_FLOW: + case AH_V4_FLOW: + case AH_ESP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + rss_fields &= ~VMXNET3_RSS_FIELDS_ESPIP4; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rss_fields |= VMXNET3_RSS_FIELDS_ESPIP4; + break; + default: + return -EINVAL; + } + break; + case ESP_V6_FLOW: + case AH_V6_FLOW: + case AH_ESP_V6_FLOW: + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (rss_fields != adapter->rss_fields) { + adapter->default_rss_fields = false; + if (netif_running(netdev)) { + struct Vmxnet3_DriverShared *shared = adapter->shared; + union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; + unsigned long flags; + + spin_lock_irqsave(&adapter->cmd_lock, flags); + cmdInfo->setRssFields = rss_fields; + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_SET_RSS_FIELDS); + + /* Not all requested RSS may get applied, so get and + * cache what was actually applied. + */ + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_GET_RSS_FIELDS); + adapter->rss_fields = + VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); + } else { + /* When the device is activated, we will try to apply + * these rules and cache the applied value later. + */ + adapter->rss_fields = rss_fields; + } + } + return 0; +} static int vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, u32 *rules) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); + int err = 0; + switch (info->cmd) { case ETHTOOL_GRXRINGS: info->data = adapter->num_rx_queues; - return 0; + break; + case ETHTOOL_GRXFH: + if (!VMXNET3_VERSION_GE_4(adapter)) { + err = -EOPNOTSUPP; + break; + } + err = vmxnet3_get_rss_hash_opts(adapter, info); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int +vmxnet3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + int err = 0; + + if (!VMXNET3_VERSION_GE_4(adapter)) { + err = -EOPNOTSUPP; + goto done; + } + + switch (info->cmd) { + case ETHTOOL_SRXFH: + err = vmxnet3_set_rss_hash_opt(netdev, adapter, info); + break; + default: + err = -EOPNOTSUPP; + break; } - return -EOPNOTSUPP; + +done: + return err; } #ifdef VMXNET3_RSS @@ -887,6 +1141,7 @@ static const struct ethtool_ops vmxnet3_ethtool_ops = { .get_ringparam = vmxnet3_get_ringparam, .set_ringparam = vmxnet3_set_ringparam, .get_rxnfc = vmxnet3_get_rxnfc, + .set_rxnfc = vmxnet3_set_rxnfc, #ifdef VMXNET3_RSS .get_rxfh_indir_size = vmxnet3_get_rss_indir_size, .get_rxfh = vmxnet3_get_rss, diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 1cc1cd4aaa59..5d2b062215a2 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -1,7 +1,7 @@ /* * Linux driver for VMware's vmxnet3 ethernet NIC. * - * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -69,18 +69,19 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.4.17.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.5.0.0-k" /* Each byte of this 32-bit integer encodes a version number in * VMXNET3_DRIVER_VERSION_STRING. */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01041100 +#define VMXNET3_DRIVER_VERSION_NUM 0x01050000 #if defined(CONFIG_PCI_MSI) /* RSS only makes sense if MSI-X is supported. */ #define VMXNET3_RSS #endif +#define VMXNET3_REV_4 3 /* Vmxnet3 Rev. 4 */ #define VMXNET3_REV_3 2 /* Vmxnet3 Rev. 3 */ #define VMXNET3_REV_2 1 /* Vmxnet3 Rev. 2 */ #define VMXNET3_REV_1 0 /* Vmxnet3 Rev. 1 */ @@ -218,10 +219,16 @@ struct vmxnet3_tx_ctx { bool ipv4; bool ipv6; u16 mss; - u32 eth_ip_hdr_size; /* only valid for pkts requesting tso or csum - * offloading + u32 l4_offset; /* only valid for pkts requesting tso or csum + * offloading. For encap offload, it refers to + * inner L4 offset i.e. it includes outer header + * encap header and inner eth and ip header size + */ + + u32 l4_hdr_size; /* only valid if mss != 0 + * Refers to inner L4 hdr size for encap + * offload */ - u32 l4_hdr_size; /* only valid if mss != 0 */ u32 copy_size; /* # of bytes copied into the data ring */ union Vmxnet3_GenericDesc *sop_txd; union Vmxnet3_GenericDesc *eop_txd; @@ -376,6 +383,8 @@ struct vmxnet3_adapter { u16 rxdata_desc_size; bool rxdataring_enabled; + bool default_rss_fields; + enum Vmxnet3_RSSField rss_fields; struct work_struct work; @@ -412,6 +421,8 @@ struct vmxnet3_adapter { (adapter->version >= VMXNET3_REV_2 + 1) #define VMXNET3_VERSION_GE_3(adapter) \ (adapter->version >= VMXNET3_REV_3 + 1) +#define VMXNET3_VERSION_GE_4(adapter) \ + (adapter->version >= VMXNET3_REV_4 + 1) /* must be a multiple of VMXNET3_RING_SIZE_ALIGN */ #define VMXNET3_DEF_TX_RING_SIZE 512 @@ -435,6 +446,8 @@ struct vmxnet3_adapter { #define VMXNET3_COAL_RBC_RATE(usecs) (1000000 / usecs) #define VMXNET3_COAL_RBC_USECS(rbc_rate) (1000000 / rbc_rate) +#define VMXNET3_RSS_FIELDS_DEFAULT (VMXNET3_RSS_FIELDS_TCPIP4 | \ + VMXNET3_RSS_FIELDS_TCPIP6) int vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index a5b415fed11e..39bc10a7fd2e 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -26,6 +26,7 @@ #include <net/netns/generic.h> #include <net/tun_proto.h> #include <net/vxlan.h> +#include <net/nexthop.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/ip6_tunnel.h> @@ -78,6 +79,9 @@ struct vxlan_fdb { u16 state; /* see ndm_state */ __be32 vni; u16 flags; /* see ndm_flags and below */ + struct list_head nh_list; + struct nexthop __rcu *nh; + struct vxlan_dev __rcu *vdev; }; #define NTF_VXLAN_ADDED_BY_USER 0x100 @@ -174,11 +178,15 @@ static inline struct hlist_head *vs_head(struct net *net, __be16 port) */ static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb) { + if (rcu_access_pointer(fdb->nh)) + return NULL; return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list); } static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb) { + if (rcu_access_pointer(fdb->nh)) + return NULL; return list_first_entry(&fdb->remotes, struct vxlan_rdst, list); } @@ -251,9 +259,12 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, { unsigned long now = jiffies; struct nda_cacheinfo ci; + bool send_ip, send_eth; struct nlmsghdr *nlh; + struct nexthop *nh; struct ndmsg *ndm; - bool send_ip, send_eth; + int nh_family; + u32 nh_id; nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags); if (nlh == NULL) @@ -264,16 +275,28 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, send_eth = send_ip = true; + rcu_read_lock(); + nh = rcu_dereference(fdb->nh); + if (nh) { + nh_family = nexthop_get_family(nh); + nh_id = nh->id; + } + rcu_read_unlock(); + if (type == RTM_GETNEIGH) { - send_ip = !vxlan_addr_any(&rdst->remote_ip); + if (rdst) { + send_ip = !vxlan_addr_any(&rdst->remote_ip); + ndm->ndm_family = send_ip ? rdst->remote_ip.sa.sa_family : AF_INET; + } else if (nh) { + ndm->ndm_family = nh_family; + } send_eth = !is_zero_ether_addr(fdb->eth_addr); - ndm->ndm_family = send_ip ? rdst->remote_ip.sa.sa_family : AF_INET; } else ndm->ndm_family = AF_BRIDGE; ndm->ndm_state = fdb->state; ndm->ndm_ifindex = vxlan->dev->ifindex; ndm->ndm_flags = fdb->flags; - if (rdst->offloaded) + if (rdst && rdst->offloaded) ndm->ndm_flags |= NTF_OFFLOADED; ndm->ndm_type = RTN_UNICAST; @@ -284,23 +307,30 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr)) goto nla_put_failure; + if (nh) { + if (nla_put_u32(skb, NDA_NH_ID, nh_id)) + goto nla_put_failure; + } else if (rdst) { + if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, + &rdst->remote_ip)) + goto nla_put_failure; + + if (rdst->remote_port && + rdst->remote_port != vxlan->cfg.dst_port && + nla_put_be16(skb, NDA_PORT, rdst->remote_port)) + goto nla_put_failure; + if (rdst->remote_vni != vxlan->default_dst.remote_vni && + nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni))) + goto nla_put_failure; + if (rdst->remote_ifindex && + nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex)) + goto nla_put_failure; + } - if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip)) - goto nla_put_failure; - - if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port && - nla_put_be16(skb, NDA_PORT, rdst->remote_port)) - goto nla_put_failure; - if (rdst->remote_vni != vxlan->default_dst.remote_vni && - nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni))) - goto nla_put_failure; if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && fdb->vni && nla_put_u32(skb, NDA_SRC_VNI, be32_to_cpu(fdb->vni))) goto nla_put_failure; - if (rdst->remote_ifindex && - nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex)) - goto nla_put_failure; ci.ndm_used = jiffies_to_clock_t(now - fdb->used); ci.ndm_confirmed = 0; @@ -401,7 +431,7 @@ static int vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, { int err; - if (swdev_notify) { + if (swdev_notify && rd) { switch (type) { case RTM_NEWNEIGH: err = vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, @@ -793,8 +823,9 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); } -static struct vxlan_fdb *vxlan_fdb_alloc(const u8 *mac, __u16 state, - __be32 src_vni, __u16 ndm_flags) +static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, const u8 *mac, + __u16 state, __be32 src_vni, + __u16 ndm_flags) { struct vxlan_fdb *f; @@ -805,6 +836,9 @@ static struct vxlan_fdb *vxlan_fdb_alloc(const u8 *mac, __u16 state, f->flags = ndm_flags; f->updated = f->used = jiffies; f->vni = src_vni; + f->nh = NULL; + RCU_INIT_POINTER(f->vdev, vxlan); + INIT_LIST_HEAD(&f->nh_list); INIT_LIST_HEAD(&f->remotes); memcpy(f->eth_addr, mac, ETH_ALEN); @@ -819,11 +853,78 @@ static void vxlan_fdb_insert(struct vxlan_dev *vxlan, const u8 *mac, vxlan_fdb_head(vxlan, mac, src_vni)); } +static int vxlan_fdb_nh_update(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, + u32 nhid, struct netlink_ext_ack *extack) +{ + struct nexthop *old_nh = rtnl_dereference(fdb->nh); + struct nh_group *nhg; + struct nexthop *nh; + int err = -EINVAL; + + if (old_nh && old_nh->id == nhid) + return 0; + + nh = nexthop_find_by_id(vxlan->net, nhid); + if (!nh) { + NL_SET_ERR_MSG(extack, "Nexthop id does not exist"); + goto err_inval; + } + + if (nh) { + if (!nexthop_get(nh)) { + NL_SET_ERR_MSG(extack, "Nexthop has been deleted"); + nh = NULL; + goto err_inval; + } + if (!nh->is_fdb_nh) { + NL_SET_ERR_MSG(extack, "Nexthop is not a fdb nexthop"); + goto err_inval; + } + + if (!nh->is_group || !nh->nh_grp->mpath) { + NL_SET_ERR_MSG(extack, "Nexthop is not a multipath group"); + goto err_inval; + } + + /* check nexthop group family */ + nhg = rtnl_dereference(nh->nh_grp); + switch (vxlan->default_dst.remote_ip.sa.sa_family) { + case AF_INET: + if (!nhg->has_v4) { + err = -EAFNOSUPPORT; + NL_SET_ERR_MSG(extack, "Nexthop group family not supported"); + goto err_inval; + } + break; + case AF_INET6: + if (nhg->has_v4) { + err = -EAFNOSUPPORT; + NL_SET_ERR_MSG(extack, "Nexthop group family not supported"); + goto err_inval; + } + } + } + + if (old_nh) { + list_del_rcu(&fdb->nh_list); + nexthop_put(old_nh); + } + rcu_assign_pointer(fdb->nh, nh); + list_add_tail_rcu(&fdb->nh_list, &nh->fdb_list); + return 1; + +err_inval: + if (nh) + nexthop_put(nh); + return err; +} + static int vxlan_fdb_create(struct vxlan_dev *vxlan, const u8 *mac, union vxlan_addr *ip, __u16 state, __be16 port, __be32 src_vni, __be32 vni, __u32 ifindex, __u16 ndm_flags, - struct vxlan_fdb **fdb) + u32 nhid, struct vxlan_fdb **fdb, + struct netlink_ext_ack *extack) { struct vxlan_rdst *rd = NULL; struct vxlan_fdb *f; @@ -834,24 +935,37 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, return -ENOSPC; netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); - f = vxlan_fdb_alloc(mac, state, src_vni, ndm_flags); + f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags); if (!f) return -ENOMEM; - rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); - if (rc < 0) { - kfree(f); - return rc; - } + if (nhid) + rc = vxlan_fdb_nh_update(vxlan, f, nhid, extack); + else + rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); + if (rc < 0) + goto errout; *fdb = f; return 0; + +errout: + kfree(f); + return rc; } static void __vxlan_fdb_free(struct vxlan_fdb *f) { struct vxlan_rdst *rd, *nd; + struct nexthop *nh; + + nh = rcu_dereference_raw(f->nh); + if (nh) { + rcu_assign_pointer(f->nh, NULL); + rcu_assign_pointer(f->vdev, NULL); + nexthop_put(nh); + } list_for_each_entry_safe(rd, nd, &f->remotes, list) { dst_cache_destroy(&rd->dst_cache); @@ -875,12 +989,18 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, netdev_dbg(vxlan->dev, "delete %pM\n", f->eth_addr); --vxlan->addrcnt; - if (do_notify) - list_for_each_entry(rd, &f->remotes, list) - vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, + if (do_notify) { + if (rcu_access_pointer(f->nh)) + vxlan_fdb_notify(vxlan, f, NULL, RTM_DELNEIGH, swdev_notify, NULL); + else + list_for_each_entry(rd, &f->remotes, list) + vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, + swdev_notify, NULL); + } hlist_del_rcu(&f->hlist); + list_del_rcu(&f->nh_list); call_rcu(&f->rcu, vxlan_fdb_free); } @@ -897,7 +1017,7 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan, __u16 state, __u16 flags, __be16 port, __be32 vni, __u32 ifindex, __u16 ndm_flags, - struct vxlan_fdb *f, + struct vxlan_fdb *f, u32 nhid, bool swdev_notify, struct netlink_ext_ack *extack) { @@ -908,6 +1028,18 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan, int rc = 0; int err; + if (nhid && !rcu_access_pointer(f->nh)) { + NL_SET_ERR_MSG(extack, + "Cannot replace an existing non nexthop fdb with a nexthop"); + return -EOPNOTSUPP; + } + + if (nhid && (flags & NLM_F_APPEND)) { + NL_SET_ERR_MSG(extack, + "Cannot append to a nexthop fdb"); + return -EOPNOTSUPP; + } + /* Do not allow an externally learned entry to take over an entry added * by the user. */ @@ -929,10 +1061,17 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan, /* Only change unicasts */ if (!(is_multicast_ether_addr(f->eth_addr) || is_zero_ether_addr(f->eth_addr))) { - rc = vxlan_fdb_replace(f, ip, port, vni, - ifindex, &oldrd); + if (nhid) { + rc = vxlan_fdb_nh_update(vxlan, f, nhid, extack); + if (rc < 0) + return rc; + } else { + rc = vxlan_fdb_replace(f, ip, port, vni, + ifindex, &oldrd); + } notify |= rc; } else { + NL_SET_ERR_MSG(extack, "Cannot replace non-unicast fdb entries"); return -EOPNOTSUPP; } } @@ -962,6 +1101,8 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan, return 0; err_notify: + if (nhid) + return err; if ((flags & NLM_F_REPLACE) && rc) *rd = oldrd; else if ((flags & NLM_F_APPEND) && rc) { @@ -975,7 +1116,7 @@ static int vxlan_fdb_update_create(struct vxlan_dev *vxlan, const u8 *mac, union vxlan_addr *ip, __u16 state, __u16 flags, __be16 port, __be32 src_vni, __be32 vni, - __u32 ifindex, __u16 ndm_flags, + __u32 ifindex, __u16 ndm_flags, u32 nhid, bool swdev_notify, struct netlink_ext_ack *extack) { @@ -990,7 +1131,7 @@ static int vxlan_fdb_update_create(struct vxlan_dev *vxlan, netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni, - vni, ifindex, fdb_flags, &f); + vni, ifindex, fdb_flags, nhid, &f, extack); if (rc < 0) return rc; @@ -1012,7 +1153,7 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan, const u8 *mac, union vxlan_addr *ip, __u16 state, __u16 flags, __be16 port, __be32 src_vni, __be32 vni, - __u32 ifindex, __u16 ndm_flags, + __u32 ifindex, __u16 ndm_flags, u32 nhid, bool swdev_notify, struct netlink_ext_ack *extack) { @@ -1028,14 +1169,15 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan, return vxlan_fdb_update_existing(vxlan, ip, state, flags, port, vni, ifindex, ndm_flags, f, - swdev_notify, extack); + nhid, swdev_notify, extack); } else { if (!(flags & NLM_F_CREATE)) return -ENOENT; return vxlan_fdb_update_create(vxlan, mac, ip, state, flags, port, src_vni, vni, ifindex, - ndm_flags, swdev_notify, extack); + ndm_flags, nhid, swdev_notify, + extack); } } @@ -1049,11 +1191,15 @@ static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, union vxlan_addr *ip, __be16 *port, __be32 *src_vni, - __be32 *vni, u32 *ifindex) + __be32 *vni, u32 *ifindex, u32 *nhid) { struct net *net = dev_net(vxlan->dev); int err; + if (tb[NDA_NH_ID] && (tb[NDA_DST] || tb[NDA_VNI] || tb[NDA_IFINDEX] || + tb[NDA_PORT])) + return -EINVAL; + if (tb[NDA_DST]) { err = vxlan_nla_get_addr(ip, tb[NDA_DST]); if (err) @@ -1109,6 +1255,11 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, *ifindex = 0; } + if (tb[NDA_NH_ID]) + *nhid = nla_get_u32(tb[NDA_NH_ID]); + else + *nhid = 0; + return 0; } @@ -1123,7 +1274,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], union vxlan_addr ip; __be16 port; __be32 src_vni, vni; - u32 ifindex; + u32 ifindex, nhid; u32 hash_index; int err; @@ -1133,10 +1284,11 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return -EINVAL; } - if (tb[NDA_DST] == NULL) + if (!tb || (!tb[NDA_DST] && !tb[NDA_NH_ID])) return -EINVAL; - err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex); + err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex, + &nhid); if (err) return err; @@ -1148,7 +1300,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags, port, src_vni, vni, ifindex, ndm->ndm_flags | NTF_VXLAN_ADDED_BY_USER, - true, extack); + nhid, true, extack); spin_unlock_bh(&vxlan->hash_lock[hash_index]); return err; @@ -1159,8 +1311,8 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan, __be16 port, __be32 src_vni, __be32 vni, u32 ifindex, bool swdev_notify) { - struct vxlan_fdb *f; struct vxlan_rdst *rd = NULL; + struct vxlan_fdb *f; int err = -ENOENT; f = vxlan_find_mac(vxlan, addr, src_vni); @@ -1195,12 +1347,13 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], struct vxlan_dev *vxlan = netdev_priv(dev); union vxlan_addr ip; __be32 src_vni, vni; - __be16 port; - u32 ifindex; + u32 ifindex, nhid; u32 hash_index; + __be16 port; int err; - err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex); + err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex, + &nhid); if (err) return err; @@ -1228,6 +1381,17 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { struct vxlan_rdst *rd; + if (rcu_access_pointer(f->nh)) { + err = vxlan_fdb_info(skb, vxlan, f, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + RTM_NEWNEIGH, + NLM_F_MULTI, NULL); + if (err < 0) + goto out; + continue; + } + list_for_each_entry_rcu(rd, &f->remotes, list) { if (*idx < cb->args[2]) goto skip; @@ -1311,6 +1475,10 @@ static bool vxlan_snoop(struct net_device *dev, if (f->state & (NUD_PERMANENT | NUD_NOARP)) return true; + /* Don't override an fdb with nexthop with a learnt entry */ + if (rcu_access_pointer(f->nh)) + return true; + if (net_ratelimit()) netdev_info(dev, "%pM migrated from %pIS to %pIS\n", @@ -1333,7 +1501,7 @@ static bool vxlan_snoop(struct net_device *dev, vxlan->cfg.dst_port, vni, vxlan->default_dst.remote_vni, - ifindex, NTF_SELF, true, NULL); + ifindex, NTF_SELF, 0, true, NULL); spin_unlock(&vxlan->hash_lock[hash_index]); } @@ -2616,6 +2784,38 @@ tx_error: kfree_skb(skb); } +static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev, + struct vxlan_fdb *f, __be32 vni, bool did_rsc) +{ + struct vxlan_rdst nh_rdst; + struct nexthop *nh; + bool do_xmit; + u32 hash; + + memset(&nh_rdst, 0, sizeof(struct vxlan_rdst)); + hash = skb_get_hash(skb); + + rcu_read_lock(); + nh = rcu_dereference(f->nh); + if (!nh) { + rcu_read_unlock(); + goto drop; + } + do_xmit = vxlan_fdb_nh_path_select(nh, hash, &nh_rdst); + rcu_read_unlock(); + + if (likely(do_xmit)) + vxlan_xmit_one(skb, dev, vni, &nh_rdst, did_rsc); + else + goto drop; + + return; + +drop: + dev->stats.tx_dropped++; + dev_kfree_skb(skb); +} + /* Transmit local packets over Vxlan * * Outer IP header inherits ECN and DF from inner header. @@ -2692,22 +2892,27 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) } } - list_for_each_entry_rcu(rdst, &f->remotes, list) { - struct sk_buff *skb1; + if (rcu_access_pointer(f->nh)) { + vxlan_xmit_nh(skb, dev, f, + (vni ? : vxlan->default_dst.remote_vni), did_rsc); + } else { + list_for_each_entry_rcu(rdst, &f->remotes, list) { + struct sk_buff *skb1; - if (!fdst) { - fdst = rdst; - continue; + if (!fdst) { + fdst = rdst; + continue; + } + skb1 = skb_clone(skb, GFP_ATOMIC); + if (skb1) + vxlan_xmit_one(skb1, dev, vni, rdst, did_rsc); } - skb1 = skb_clone(skb, GFP_ATOMIC); - if (skb1) - vxlan_xmit_one(skb1, dev, vni, rdst, did_rsc); + if (fdst) + vxlan_xmit_one(skb, dev, vni, fdst, did_rsc); + else + kfree_skb(skb); } - if (fdst) - vxlan_xmit_one(skb, dev, vni, fdst, did_rsc); - else - kfree_skb(skb); return NETDEV_TX_OK; } @@ -3615,7 +3820,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, dst->remote_vni, dst->remote_vni, dst->remote_ifindex, - NTF_SELF, &f); + NTF_SELF, 0, &f, extack); if (err) return err; } @@ -4013,7 +4218,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], vxlan->cfg.dst_port, conf.vni, conf.vni, conf.remote_ifindex, - NTF_SELF, true, extack); + NTF_SELF, 0, true, extack); if (err) { spin_unlock_bh(&vxlan->hash_lock[hash_index]); netdev_adjacent_change_abort(dst->remote_dev, @@ -4335,7 +4540,7 @@ vxlan_fdb_external_learn_add(struct net_device *dev, fdb_info->remote_vni, fdb_info->remote_ifindex, NTF_USE | NTF_SELF | NTF_EXT_LEARNED, - false, extack); + 0, false, extack); spin_unlock_bh(&vxlan->hash_lock[hash_index]); return err; @@ -4410,6 +4615,43 @@ static struct notifier_block vxlan_switchdev_notifier_block __read_mostly = { .notifier_call = vxlan_switchdev_event, }; +static void vxlan_fdb_nh_flush(struct nexthop *nh) +{ + struct vxlan_fdb *fdb; + struct vxlan_dev *vxlan; + u32 hash_index; + + rcu_read_lock(); + list_for_each_entry_rcu(fdb, &nh->fdb_list, nh_list) { + vxlan = rcu_dereference(fdb->vdev); + WARN_ON(!vxlan); + hash_index = fdb_head_index(vxlan, fdb->eth_addr, + vxlan->default_dst.remote_vni); + spin_lock_bh(&vxlan->hash_lock[hash_index]); + if (!hlist_unhashed(&fdb->hlist)) + vxlan_fdb_destroy(vxlan, fdb, false, false); + spin_unlock_bh(&vxlan->hash_lock[hash_index]); + } + rcu_read_unlock(); +} + +static int vxlan_nexthop_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct nexthop *nh = ptr; + + if (!nh || event != NEXTHOP_EVENT_DEL) + return NOTIFY_DONE; + + vxlan_fdb_nh_flush(nh); + + return NOTIFY_DONE; +} + +static struct notifier_block vxlan_nexthop_notifier_block __read_mostly = { + .notifier_call = vxlan_nexthop_event, +}; + static __net_init int vxlan_init_net(struct net *net) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); @@ -4421,7 +4663,7 @@ static __net_init int vxlan_init_net(struct net *net) for (h = 0; h < PORT_HASH_SIZE; ++h) INIT_HLIST_HEAD(&vn->sock_list[h]); - return 0; + return register_nexthop_notifier(net, &vxlan_nexthop_notifier_block); } static void vxlan_destroy_tunnels(struct net *net, struct list_head *head) @@ -4454,6 +4696,8 @@ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list) rtnl_lock(); list_for_each_entry(net, net_list, exit_list) + unregister_nexthop_notifier(net, &vxlan_nexthop_notifier_block); + list_for_each_entry(net, net_list, exit_list) vxlan_destroy_tunnels(net, &list); unregister_netdevice_many(&list); diff --git a/drivers/net/wireguard/messages.h b/drivers/net/wireguard/messages.h index b8a7b9ce32ba..208da72673fc 100644 --- a/drivers/net/wireguard/messages.h +++ b/drivers/net/wireguard/messages.h @@ -32,7 +32,7 @@ enum cookie_values { }; enum counter_values { - COUNTER_BITS_TOTAL = 2048, + COUNTER_BITS_TOTAL = 8192, COUNTER_REDUNDANT_BITS = BITS_PER_LONG, COUNTER_WINDOW_SIZE = COUNTER_BITS_TOTAL - COUNTER_REDUNDANT_BITS }; diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c index 708dc61c974f..626433690abb 100644 --- a/drivers/net/wireguard/noise.c +++ b/drivers/net/wireguard/noise.c @@ -104,6 +104,7 @@ static struct noise_keypair *keypair_create(struct wg_peer *peer) if (unlikely(!keypair)) return NULL; + spin_lock_init(&keypair->receiving_counter.lock); keypair->internal_id = atomic64_inc_return(&keypair_counter); keypair->entry.type = INDEX_HASHTABLE_KEYPAIR; keypair->entry.peer = peer; @@ -358,25 +359,16 @@ out: memzero_explicit(output, BLAKE2S_HASH_SIZE + 1); } -static void symmetric_key_init(struct noise_symmetric_key *key) -{ - spin_lock_init(&key->counter.receive.lock); - atomic64_set(&key->counter.counter, 0); - memset(key->counter.receive.backtrack, 0, - sizeof(key->counter.receive.backtrack)); - key->birthdate = ktime_get_coarse_boottime_ns(); - key->is_valid = true; -} - static void derive_keys(struct noise_symmetric_key *first_dst, struct noise_symmetric_key *second_dst, const u8 chaining_key[NOISE_HASH_LEN]) { + u64 birthdate = ktime_get_coarse_boottime_ns(); kdf(first_dst->key, second_dst->key, NULL, NULL, NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, 0, chaining_key); - symmetric_key_init(first_dst); - symmetric_key_init(second_dst); + first_dst->birthdate = second_dst->birthdate = birthdate; + first_dst->is_valid = second_dst->is_valid = true; } static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN], @@ -715,6 +707,7 @@ wg_noise_handshake_consume_response(struct message_handshake_response *src, u8 e[NOISE_PUBLIC_KEY_LEN]; u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN]; u8 static_private[NOISE_PUBLIC_KEY_LEN]; + u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]; down_read(&wg->static_identity.lock); @@ -733,6 +726,8 @@ wg_noise_handshake_consume_response(struct message_handshake_response *src, memcpy(chaining_key, handshake->chaining_key, NOISE_HASH_LEN); memcpy(ephemeral_private, handshake->ephemeral_private, NOISE_PUBLIC_KEY_LEN); + memcpy(preshared_key, handshake->preshared_key, + NOISE_SYMMETRIC_KEY_LEN); up_read(&handshake->lock); if (state != HANDSHAKE_CREATED_INITIATION) @@ -750,7 +745,7 @@ wg_noise_handshake_consume_response(struct message_handshake_response *src, goto fail; /* psk */ - mix_psk(chaining_key, hash, key, handshake->preshared_key); + mix_psk(chaining_key, hash, key, preshared_key); /* {} */ if (!message_decrypt(NULL, src->encrypted_nothing, @@ -783,6 +778,7 @@ out: memzero_explicit(chaining_key, NOISE_HASH_LEN); memzero_explicit(ephemeral_private, NOISE_PUBLIC_KEY_LEN); memzero_explicit(static_private, NOISE_PUBLIC_KEY_LEN); + memzero_explicit(preshared_key, NOISE_SYMMETRIC_KEY_LEN); up_read(&wg->static_identity.lock); return ret_peer; } diff --git a/drivers/net/wireguard/noise.h b/drivers/net/wireguard/noise.h index f532d59d3f19..c527253dba80 100644 --- a/drivers/net/wireguard/noise.h +++ b/drivers/net/wireguard/noise.h @@ -15,18 +15,14 @@ #include <linux/mutex.h> #include <linux/kref.h> -union noise_counter { - struct { - u64 counter; - unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG]; - spinlock_t lock; - } receive; - atomic64_t counter; +struct noise_replay_counter { + u64 counter; + spinlock_t lock; + unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG]; }; struct noise_symmetric_key { u8 key[NOISE_SYMMETRIC_KEY_LEN]; - union noise_counter counter; u64 birthdate; bool is_valid; }; @@ -34,7 +30,9 @@ struct noise_symmetric_key { struct noise_keypair { struct index_hashtable_entry entry; struct noise_symmetric_key sending; + atomic64_t sending_counter; struct noise_symmetric_key receiving; + struct noise_replay_counter receiving_counter; __le32 remote_index; bool i_am_the_initiator; struct kref refcount; diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h index 3432232afe06..c58df439dbbe 100644 --- a/drivers/net/wireguard/queueing.h +++ b/drivers/net/wireguard/queueing.h @@ -87,12 +87,20 @@ static inline bool wg_check_packet_protocol(struct sk_buff *skb) return real_protocol && skb->protocol == real_protocol; } -static inline void wg_reset_packet(struct sk_buff *skb) +static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating) { + u8 l4_hash = skb->l4_hash; + u8 sw_hash = skb->sw_hash; + u32 hash = skb->hash; skb_scrub_packet(skb, true); memset(&skb->headers_start, 0, offsetof(struct sk_buff, headers_end) - offsetof(struct sk_buff, headers_start)); + if (encapsulating) { + skb->l4_hash = l4_hash; + skb->sw_hash = sw_hash; + skb->hash = hash; + } skb->queue_mapping = 0; skb->nohdr = 0; skb->peeked = 0; diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c index 3bb5b9ae7cd1..91438144e4f7 100644 --- a/drivers/net/wireguard/receive.c +++ b/drivers/net/wireguard/receive.c @@ -245,20 +245,20 @@ static void keep_key_fresh(struct wg_peer *peer) } } -static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key) +static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair) { struct scatterlist sg[MAX_SKB_FRAGS + 8]; struct sk_buff *trailer; unsigned int offset; int num_frags; - if (unlikely(!key)) + if (unlikely(!keypair)) return false; - if (unlikely(!READ_ONCE(key->is_valid) || - wg_birthdate_has_expired(key->birthdate, REJECT_AFTER_TIME) || - key->counter.receive.counter >= REJECT_AFTER_MESSAGES)) { - WRITE_ONCE(key->is_valid, false); + if (unlikely(!READ_ONCE(keypair->receiving.is_valid) || + wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) || + keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) { + WRITE_ONCE(keypair->receiving.is_valid, false); return false; } @@ -283,7 +283,7 @@ static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key) if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0, PACKET_CB(skb)->nonce, - key->key)) + keypair->receiving.key)) return false; /* Another ugly situation of pushing and pulling the header so as to @@ -298,41 +298,41 @@ static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key) } /* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */ -static bool counter_validate(union noise_counter *counter, u64 their_counter) +static bool counter_validate(struct noise_replay_counter *counter, u64 their_counter) { unsigned long index, index_current, top, i; bool ret = false; - spin_lock_bh(&counter->receive.lock); + spin_lock_bh(&counter->lock); - if (unlikely(counter->receive.counter >= REJECT_AFTER_MESSAGES + 1 || + if (unlikely(counter->counter >= REJECT_AFTER_MESSAGES + 1 || their_counter >= REJECT_AFTER_MESSAGES)) goto out; ++their_counter; if (unlikely((COUNTER_WINDOW_SIZE + their_counter) < - counter->receive.counter)) + counter->counter)) goto out; index = their_counter >> ilog2(BITS_PER_LONG); - if (likely(their_counter > counter->receive.counter)) { - index_current = counter->receive.counter >> ilog2(BITS_PER_LONG); + if (likely(their_counter > counter->counter)) { + index_current = counter->counter >> ilog2(BITS_PER_LONG); top = min_t(unsigned long, index - index_current, COUNTER_BITS_TOTAL / BITS_PER_LONG); for (i = 1; i <= top; ++i) - counter->receive.backtrack[(i + index_current) & + counter->backtrack[(i + index_current) & ((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0; - counter->receive.counter = their_counter; + counter->counter = their_counter; } index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1; ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1), - &counter->receive.backtrack[index]); + &counter->backtrack[index]); out: - spin_unlock_bh(&counter->receive.lock); + spin_unlock_bh(&counter->lock); return ret; } @@ -472,19 +472,19 @@ int wg_packet_rx_poll(struct napi_struct *napi, int budget) if (unlikely(state != PACKET_STATE_CRYPTED)) goto next; - if (unlikely(!counter_validate(&keypair->receiving.counter, + if (unlikely(!counter_validate(&keypair->receiving_counter, PACKET_CB(skb)->nonce))) { net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n", peer->device->dev->name, PACKET_CB(skb)->nonce, - keypair->receiving.counter.receive.counter); + keypair->receiving_counter.counter); goto next; } if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb))) goto next; - wg_reset_packet(skb); + wg_reset_packet(skb, false); wg_packet_consume_data_done(peer, skb, &endpoint); free = false; @@ -511,8 +511,8 @@ void wg_packet_decrypt_worker(struct work_struct *work) struct sk_buff *skb; while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) { - enum packet_state state = likely(decrypt_packet(skb, - &PACKET_CB(skb)->keypair->receiving)) ? + enum packet_state state = + likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ? PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; wg_queue_enqueue_per_peer_napi(skb, state); if (need_resched()) diff --git a/drivers/net/wireguard/selftest/counter.c b/drivers/net/wireguard/selftest/counter.c index f4fbb9072ed7..ec3c156bf91b 100644 --- a/drivers/net/wireguard/selftest/counter.c +++ b/drivers/net/wireguard/selftest/counter.c @@ -6,18 +6,24 @@ #ifdef DEBUG bool __init wg_packet_counter_selftest(void) { + struct noise_replay_counter *counter; unsigned int test_num = 0, i; - union noise_counter counter; bool success = true; -#define T_INIT do { \ - memset(&counter, 0, sizeof(union noise_counter)); \ - spin_lock_init(&counter.receive.lock); \ + counter = kmalloc(sizeof(*counter), GFP_KERNEL); + if (unlikely(!counter)) { + pr_err("nonce counter self-test malloc: FAIL\n"); + return false; + } + +#define T_INIT do { \ + memset(counter, 0, sizeof(*counter)); \ + spin_lock_init(&counter->lock); \ } while (0) #define T_LIM (COUNTER_WINDOW_SIZE + 1) #define T(n, v) do { \ ++test_num; \ - if (counter_validate(&counter, n) != (v)) { \ + if (counter_validate(counter, n) != (v)) { \ pr_err("nonce counter self-test %u: FAIL\n", \ test_num); \ success = false; \ @@ -99,6 +105,7 @@ bool __init wg_packet_counter_selftest(void) if (success) pr_info("nonce counter self-tests: pass\n"); + kfree(counter); return success; } #endif diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c index 6687db699803..f74b9341ab0f 100644 --- a/drivers/net/wireguard/send.c +++ b/drivers/net/wireguard/send.c @@ -129,7 +129,7 @@ static void keep_key_fresh(struct wg_peer *peer) rcu_read_lock_bh(); keypair = rcu_dereference_bh(peer->keypairs.current_keypair); send = keypair && READ_ONCE(keypair->sending.is_valid) && - (atomic64_read(&keypair->sending.counter.counter) > REKEY_AFTER_MESSAGES || + (atomic64_read(&keypair->sending_counter) > REKEY_AFTER_MESSAGES || (keypair->i_am_the_initiator && wg_birthdate_has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME))); rcu_read_unlock_bh(); @@ -167,6 +167,11 @@ static bool encrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair) struct sk_buff *trailer; int num_frags; + /* Force hash calculation before encryption so that flow analysis is + * consistent over the inner packet. + */ + skb_get_hash(skb); + /* Calculate lengths. */ padding_len = calculate_skb_padding(skb); trailer_len = padding_len + noise_encrypted_len(0); @@ -295,7 +300,7 @@ void wg_packet_encrypt_worker(struct work_struct *work) skb_list_walk_safe(first, skb, next) { if (likely(encrypt_packet(skb, PACKET_CB(first)->keypair))) { - wg_reset_packet(skb); + wg_reset_packet(skb, true); } else { state = PACKET_STATE_DEAD; break; @@ -344,7 +349,6 @@ void wg_packet_purge_staged_packets(struct wg_peer *peer) void wg_packet_send_staged_packets(struct wg_peer *peer) { - struct noise_symmetric_key *key; struct noise_keypair *keypair; struct sk_buff_head packets; struct sk_buff *skb; @@ -364,10 +368,9 @@ void wg_packet_send_staged_packets(struct wg_peer *peer) rcu_read_unlock_bh(); if (unlikely(!keypair)) goto out_nokey; - key = &keypair->sending; - if (unlikely(!READ_ONCE(key->is_valid))) + if (unlikely(!READ_ONCE(keypair->sending.is_valid))) goto out_nokey; - if (unlikely(wg_birthdate_has_expired(key->birthdate, + if (unlikely(wg_birthdate_has_expired(keypair->sending.birthdate, REJECT_AFTER_TIME))) goto out_invalid; @@ -382,7 +385,7 @@ void wg_packet_send_staged_packets(struct wg_peer *peer) */ PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb); PACKET_CB(skb)->nonce = - atomic64_inc_return(&key->counter.counter) - 1; + atomic64_inc_return(&keypair->sending_counter) - 1; if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES)) goto out_invalid; } @@ -394,7 +397,7 @@ void wg_packet_send_staged_packets(struct wg_peer *peer) return; out_invalid: - WRITE_ONCE(key->is_valid, false); + WRITE_ONCE(keypair->sending.is_valid, false); out_nokey: wg_noise_keypair_put(keypair, false); diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index 6b3ff02a373d..b99fd0eff994 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -28,11 +28,10 @@ config ATH10K_AHB This module adds support for AHB bus config ATH10K_SDIO - tristate "Atheros ath10k SDIO support (EXPERIMENTAL)" + tristate "Atheros ath10k SDIO support" depends on ATH10K && MMC ---help--- - This module adds experimental support for SDIO/MMC bus. Currently - work in progress and will not fully work. + This module adds support for SDIO/MMC bus. config ATH10K_USB tristate "Atheros ath10k USB support (EXPERIMENTAL)" @@ -42,7 +41,7 @@ config ATH10K_USB work in progress and will not fully work. config ATH10K_SNOC - tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)" + tristate "Qualcomm ath10k SNOC support" depends on ATH10K depends on ARCH_QCOM || COMPILE_TEST select QCOM_QMI_HELPERS diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index 9711f0eb9117..75df79d43120 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -110,7 +110,7 @@ struct ath10k_ce_ring { struct ce_desc_64 *shadow_base; /* keep last */ - void *per_transfer_context[0]; + void *per_transfer_context[]; }; struct ath10k_ce_pipe { diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index ceac76553b8f..5c18f6c20462 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -1262,7 +1262,7 @@ struct ath10k { int coex_gpio_pin; /* must be last */ - u8 drv_priv[0] __aligned(sizeof(void *)); + u8 drv_priv[] __aligned(sizeof(void *)); }; static inline bool ath10k_peer_stats_enabled(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h index 8bf03e8c1d3a..e760ce1a5f1e 100644 --- a/drivers/net/wireless/ath/ath10k/coredump.h +++ b/drivers/net/wireless/ath/ath10k/coredump.h @@ -88,7 +88,7 @@ struct ath10k_dump_file_data { u8 unused[128]; /* struct ath10k_tlv_dump_data + more */ - u8 data[0]; + u8 data[]; } __packed; struct ath10k_dump_ram_data_hdr { @@ -100,7 +100,7 @@ struct ath10k_dump_ram_data_hdr { /* length of payload data, not including this header */ __le32 length; - u8 data[0]; + u8 data[]; }; /* magic number to fill the holes not copied due to sections in regions */ diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index 4cbfd9279d6f..997c1c80aba7 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -65,7 +65,7 @@ struct ath10k_pktlog_hdr { __le16 log_type; /* Type of log information foll this header */ __le16 size; /* Size of variable length log information in bytes */ __le32 timestamp; - u8 payload[0]; + u8 payload[]; } __packed; /* FIXME: How to calculate the buffer size sanely? */ diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 8f3710cf28f4..cad59494f175 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -289,12 +289,12 @@ struct htt_rx_ring_setup_hdr { struct htt_rx_ring_setup_32 { struct htt_rx_ring_setup_hdr hdr; - struct htt_rx_ring_setup_ring32 rings[0]; + struct htt_rx_ring_setup_ring32 rings[]; } __packed; struct htt_rx_ring_setup_64 { struct htt_rx_ring_setup_hdr hdr; - struct htt_rx_ring_setup_ring64 rings[0]; + struct htt_rx_ring_setup_ring64 rings[]; } __packed; /* @@ -732,7 +732,7 @@ struct htt_rx_indication { * %mpdu_ranges starts after &%prefix + roundup(%fw_rx_desc_bytes, 4) * and has %num_mpdu_ranges elements. */ - struct htt_rx_indication_mpdu_range mpdu_ranges[0]; + struct htt_rx_indication_mpdu_range mpdu_ranges[]; } __packed; /* High latency version of the RX indication */ @@ -741,7 +741,7 @@ struct htt_rx_indication_hl { struct htt_rx_indication_ppdu ppdu; struct htt_rx_indication_prefix prefix; struct fw_rx_desc_hl fw_desc; - struct htt_rx_indication_mpdu_range mpdu_ranges[0]; + struct htt_rx_indication_mpdu_range mpdu_ranges[]; } __packed; struct htt_hl_rx_desc { @@ -908,7 +908,7 @@ struct htt_append_retries { struct htt_data_tx_completion_ext { struct htt_append_retries a_retries; __le32 t_stamp; - __le16 msdus_rssi[0]; + __le16 msdus_rssi[]; } __packed; /** @@ -992,7 +992,7 @@ struct htt_data_tx_completion { } __packed; u8 num_msdus; u8 flags2; /* HTT_TX_CMPL_FLAG_DATA_RSSI */ - __le16 msdus[0]; /* variable length based on %num_msdus */ + __le16 msdus[]; /* variable length based on %num_msdus */ } __packed; #define HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK GENMASK(15, 0) @@ -1007,7 +1007,7 @@ struct htt_data_tx_ppdu_dur { struct htt_data_tx_compl_ppdu_dur { __le32 info0; /* HTT_TX_COMPL_PPDU_DUR_INFO0_ */ - struct htt_data_tx_ppdu_dur ppdu_dur[0]; + struct htt_data_tx_ppdu_dur ppdu_dur[]; } __packed; struct htt_tx_compl_ind_base { @@ -1033,7 +1033,7 @@ struct htt_rc_update { u8 addr[6]; u8 num_elems; u8 rsvd0; - struct htt_rc_tx_done_params params[0]; /* variable length %num_elems */ + struct htt_rc_tx_done_params params[]; /* variable length %num_elems */ } __packed; /* see htt_rx_indication for similar fields and descriptions */ @@ -1050,7 +1050,7 @@ struct htt_rx_fragment_indication { __le16 fw_rx_desc_bytes; __le16 rsvd0; - u8 fw_msdu_rx_desc[0]; + u8 fw_msdu_rx_desc[]; } __packed; #define ATH10K_IEEE80211_EXTIV BIT(5) @@ -1075,7 +1075,7 @@ struct htt_rx_pn_ind { u8 seqno_end; u8 pn_ie_count; u8 reserved; - u8 pn_ies[0]; + u8 pn_ies[]; } __packed; struct htt_rx_offload_msdu { @@ -1084,7 +1084,7 @@ struct htt_rx_offload_msdu { u8 vdev_id; u8 tid; u8 fw_desc; - u8 payload[0]; + u8 payload[]; } __packed; struct htt_rx_offload_ind { @@ -1167,7 +1167,7 @@ struct htt_rx_test { * a) num_ints * sizeof(__le32) * b) num_chars * sizeof(u8) aligned to 4bytes */ - u8 payload[0]; + u8 payload[]; } __packed; static inline __le32 *htt_rx_test_get_ints(struct htt_rx_test *rx_test) @@ -1201,7 +1201,7 @@ static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test) */ struct htt_pktlog_msg { u8 pad[3]; - u8 payload[0]; + u8 payload[]; } __packed; struct htt_dbg_stats_rx_reorder_stats { @@ -1490,7 +1490,7 @@ struct htt_stats_conf_item { } __packed; u8 pad; __le16 length; - u8 payload[0]; /* roundup(length, 4) long */ + u8 payload[]; /* roundup(length, 4) long */ } __packed; struct htt_stats_conf { @@ -1499,7 +1499,7 @@ struct htt_stats_conf { __le32 cookie_msb; /* each item has variable length! */ - struct htt_stats_conf_item items[0]; + struct htt_stats_conf_item items[]; } __packed; static inline struct htt_stats_conf_item *htt_stats_conf_next_item( @@ -1673,8 +1673,8 @@ struct htt_tx_fetch_ind { __le32 token; __le16 num_resp_ids; __le16 num_records; - struct htt_tx_fetch_record records[0]; __le32 resp_ids[0]; /* ath10k_htt_get_tx_fetch_ind_resp_ids() */ + struct htt_tx_fetch_record records[]; } __packed; static inline void * @@ -1689,13 +1689,13 @@ struct htt_tx_fetch_resp { __le16 fetch_seq_num; __le16 num_records; __le32 token; - struct htt_tx_fetch_record records[0]; + struct htt_tx_fetch_record records[]; } __packed; struct htt_tx_fetch_confirm { u8 pad0; __le16 num_resp_ids; - __le32 resp_ids[0]; + __le32 resp_ids[]; } __packed; enum htt_tx_mode_switch_mode { @@ -1727,7 +1727,7 @@ struct htt_tx_mode_switch_ind { __le16 info0; /* HTT_TX_MODE_SWITCH_IND_INFO0_ */ __le16 info1; /* HTT_TX_MODE_SWITCH_IND_INFO1_ */ u8 pad1[2]; - struct htt_tx_mode_switch_record records[0]; + struct htt_tx_mode_switch_record records[]; } __packed; struct htt_channel_change { @@ -1757,7 +1757,7 @@ struct htt_peer_tx_stats { u8 num_ppdu; u8 ppdu_len; u8 version; - u8 payload[0]; + u8 payload[]; } __packed; #define ATH10K_10_2_TX_STATS_OFFSET 136 @@ -2206,7 +2206,7 @@ struct htt_rx_desc { struct rx_ppdu_end ppdu_end; } __packed; u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN]; - u8 msdu_payload[0]; + u8 msdu_payload[]; }; #define HTT_RX_DESC_HL_INFO_SEQ_NUM_MASK 0x00000fff diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index d9907a4648a8..f16edcb9f326 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -165,7 +165,7 @@ enum qca9377_chip_id_rev { struct ath10k_fw_ie { __le32 id; __le32 len; - u8 data[0]; + u8 data[]; }; enum ath10k_fw_ie_type { diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 91f5444ecedb..919d15584d4a 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3967,6 +3967,9 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) if (ret) { ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n", ret); + /* remove this msdu from idr tracking */ + ath10k_wmi_cleanup_mgmt_tx_send(ar, skb); + dma_unmap_single(ar->dev, paddr, skb->len, DMA_TO_DEVICE); ieee80211_free_txskb(ar->hw, skb); diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index e3cbd259a2dc..862d0901c5b8 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -178,15 +178,16 @@ struct ath10k_pci { */ u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr); + struct ce_attr *attr; + struct ce_pipe_config *pipe_config; + struct ce_service_to_pipe *serv_to_pipe; + /* Keep this entry in the last, memory for struct ath10k_ahb is * allocated (ahb support enabled case) in the continuation of * this struct. */ - struct ath10k_ahb ahb[0]; + struct ath10k_ahb ahb[]; - struct ce_attr *attr; - struct ce_pipe_config *pipe_config; - struct ce_service_to_pipe *serv_to_pipe; }; static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c index 5ae829b46c3d..5468a41e928e 100644 --- a/drivers/net/wireless/ath/ath10k/qmi.c +++ b/drivers/net/wireless/ath/ath10k/qmi.c @@ -961,7 +961,16 @@ static void ath10k_qmi_del_server(struct qmi_handle *qmi_hdl, container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl); qmi->fw_ready = false; - ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_EXIT, NULL); + + /* + * The del_server event is to be processed only if coming from + * the qmi server. The qmi infrastructure sends del_server, when + * any client releases the qmi handle. In this case do not process + * this del_server event. + */ + if (qmi->state == ATH10K_QMI_STATE_INIT_DONE) + ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_EXIT, + NULL); } static struct qmi_ops ath10k_qmi_ops = { @@ -1046,6 +1055,7 @@ int ath10k_qmi_init(struct ath10k *ar, u32 msa_size) if (ret) goto err_qmi_lookup; + qmi->state = ATH10K_QMI_STATE_INIT_DONE; return 0; err_qmi_lookup: @@ -1064,6 +1074,7 @@ int ath10k_qmi_deinit(struct ath10k *ar) struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); struct ath10k_qmi *qmi = ar_snoc->qmi; + qmi->state = ATH10K_QMI_STATE_DEINIT; qmi_handle_release(&qmi->qmi_hdl); cancel_work_sync(&qmi->event_work); destroy_workqueue(qmi->event_wq); diff --git a/drivers/net/wireless/ath/ath10k/qmi.h b/drivers/net/wireless/ath/ath10k/qmi.h index 450be18b60ad..89464239fe96 100644 --- a/drivers/net/wireless/ath/ath10k/qmi.h +++ b/drivers/net/wireless/ath/ath10k/qmi.h @@ -83,6 +83,11 @@ struct ath10k_qmi_driver_event { void *data; }; +enum ath10k_qmi_state { + ATH10K_QMI_STATE_INIT_DONE, + ATH10K_QMI_STATE_DEINIT, +}; + struct ath10k_qmi { struct ath10k *ar; struct qmi_handle qmi_hdl; @@ -102,6 +107,7 @@ struct ath10k_qmi { char fw_build_timestamp[MAX_TIMESTAMP_LEN + 1]; struct ath10k_qmi_cal_data cal_data[MAX_NUM_CAL_V01]; bool msa_fixed_perm; + enum ath10k_qmi_state state; }; int ath10k_qmi_wlan_enable(struct ath10k *ar, @@ -109,7 +115,6 @@ int ath10k_qmi_wlan_enable(struct ath10k *ar, enum wlfw_driver_mode_enum_v01 mode, const char *version); int ath10k_qmi_wlan_disable(struct ath10k *ar); -int ath10k_qmi_register_service_notifier(struct notifier_block *nb); int ath10k_qmi_init(struct ath10k *ar, u32 msa_size); int ath10k_qmi_deinit(struct ath10k *ar); int ath10k_qmi_set_fw_log_mode(struct ath10k *ar, u8 fw_log_mode); diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h index 6b730f59fd5b..0dd484f85082 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-ops.h +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h @@ -140,6 +140,7 @@ struct wmi_ops { struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar, struct sk_buff *skb, dma_addr_t paddr); + int (*cleanup_mgmt_tx_send)(struct ath10k *ar, struct sk_buff *msdu); struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable, u32 log_level); struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter); @@ -449,6 +450,15 @@ ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar) } static inline int +ath10k_wmi_cleanup_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu) +{ + if (!ar->wmi.ops->cleanup_mgmt_tx_send) + return -EOPNOTSUPP; + + return ar->wmi.ops->cleanup_mgmt_tx_send(ar, msdu); +} + +static inline int ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, dma_addr_t paddr) { diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 9187b62b331c..932266d1111b 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -3010,12 +3010,24 @@ ath10k_wmi_tlv_op_gen_request_peer_stats_info(struct ath10k *ar, if (type == WMI_REQUEST_ONE_PEER_STATS_INFO) ether_addr_copy(cmd->peer_macaddr.addr, addr); - cmd->reset_after_request = reset; + cmd->reset_after_request = __cpu_to_le32(reset); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request peer stats info\n"); return skb; } static int +ath10k_wmi_tlv_op_cleanup_mgmt_tx_send(struct ath10k *ar, + struct sk_buff *msdu) +{ + struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu); + struct ath10k_wmi *wmi = &ar->wmi; + + idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id); + + return 0; +} + +static int ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb, dma_addr_t paddr) { @@ -3089,6 +3101,8 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, if (desc_id < 0) goto err_free_skb; + cb->msdu_id = desc_id; + ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD); @@ -4540,6 +4554,7 @@ static const struct wmi_ops wmi_tlv_ops = { .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang, /* .gen_mgmt_tx = not implemented; HTT is used */ .gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send, + .cleanup_mgmt_tx_send = ath10k_wmi_tlv_op_cleanup_mgmt_tx_send, .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg, .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable, .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable, diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h index 6e0537dabd1d..e77b97ca5c7f 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h @@ -1637,7 +1637,7 @@ wmi_tlv_svc_map_ext(const __le32 *in, unsigned long *out, size_t len) struct wmi_tlv { __le16 len; __le16 tag; - u8 value[0]; + u8 value[]; } __packed; struct ath10k_mgmt_tx_pkt_addr { @@ -2037,7 +2037,7 @@ struct wmi_tlv_bcn_tx_status_ev { struct wmi_tlv_bcn_prb_info { __le32 caps; __le32 erp; - u8 ies[0]; + u8 ies[]; } __packed; struct wmi_tlv_bcn_tmpl_cmd { @@ -2068,7 +2068,7 @@ struct wmi_tlv_diag_item { __le16 len; __le32 timestamp; __le32 code; - u8 payload[0]; + u8 payload[]; } __packed; struct wmi_tlv_diag_data_ev { diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 0f05405bebc0..511144b36231 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -2292,7 +2292,7 @@ struct wmi_service_ready_event { * where FW can access this memory directly (or) by DMA. */ __le32 num_mem_reqs; - struct wlan_host_mem_req mem_reqs[0]; + struct wlan_host_mem_req mem_reqs[]; } __packed; /* This is the definition from 10.X firmware branch */ @@ -2331,7 +2331,7 @@ struct wmi_10x_service_ready_event { */ __le32 num_mem_reqs; - struct wlan_host_mem_req mem_reqs[0]; + struct wlan_host_mem_req mem_reqs[]; } __packed; #define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ) @@ -3086,19 +3086,19 @@ struct wmi_chan_list_entry { struct wmi_chan_list { __le32 tag; /* WMI_CHAN_LIST_TAG */ __le32 num_chan; - struct wmi_chan_list_entry channel_list[0]; + struct wmi_chan_list_entry channel_list[]; } __packed; struct wmi_bssid_list { __le32 tag; /* WMI_BSSID_LIST_TAG */ __le32 num_bssid; - struct wmi_mac_addr bssid_list[0]; + struct wmi_mac_addr bssid_list[]; } __packed; struct wmi_ie_data { __le32 tag; /* WMI_IE_TAG */ __le32 ie_len; - u8 ie_data[0]; + u8 ie_data[]; } __packed; struct wmi_ssid { @@ -3109,7 +3109,7 @@ struct wmi_ssid { struct wmi_ssid_list { __le32 tag; /* WMI_SSID_LIST_TAG */ __le32 num_ssids; - struct wmi_ssid ssids[0]; + struct wmi_ssid ssids[]; } __packed; /* prefix used by scan requestor ids on the host */ @@ -3311,7 +3311,7 @@ struct wmi_stop_scan_arg { struct wmi_scan_chan_list_cmd { __le32 num_scan_chans; - struct wmi_channel chan_info[0]; + struct wmi_channel chan_info[]; } __packed; struct wmi_scan_chan_list_arg { @@ -3395,12 +3395,12 @@ struct wmi_mgmt_rx_hdr_v2 { struct wmi_mgmt_rx_event_v1 { struct wmi_mgmt_rx_hdr_v1 hdr; - u8 buf[0]; + u8 buf[]; } __packed; struct wmi_mgmt_rx_event_v2 { struct wmi_mgmt_rx_hdr_v2 hdr; - u8 buf[0]; + u8 buf[]; } __packed; struct wmi_10_4_mgmt_rx_hdr { @@ -3415,7 +3415,7 @@ struct wmi_10_4_mgmt_rx_hdr { struct wmi_10_4_mgmt_rx_event { struct wmi_10_4_mgmt_rx_hdr hdr; - u8 buf[0]; + u8 buf[]; } __packed; struct wmi_mgmt_rx_ext_info { @@ -3455,14 +3455,14 @@ struct wmi_phyerr { __le32 rssi_chains[4]; __le16 nf_chains[4]; __le32 buf_len; - u8 buf[0]; + u8 buf[]; } __packed; struct wmi_phyerr_event { __le32 num_phyerrs; __le32 tsf_l32; __le32 tsf_u32; - struct wmi_phyerr phyerrs[0]; + struct wmi_phyerr phyerrs[]; } __packed; struct wmi_10_4_phyerr_event { @@ -3479,7 +3479,7 @@ struct wmi_10_4_phyerr_event { __le32 phy_err_mask[2]; __le32 tsf_timestamp; __le32 buf_len; - u8 buf[0]; + u8 buf[]; } __packed; struct wmi_radar_found_info { @@ -3592,7 +3592,7 @@ struct wmi_mgmt_tx_hdr { struct wmi_mgmt_tx_cmd { struct wmi_mgmt_tx_hdr hdr; - u8 buf[0]; + u8 buf[]; } __packed; struct wmi_echo_event { @@ -4628,7 +4628,7 @@ struct wmi_stats_event { * By having a zero sized array, the pointer to data area * becomes available without increasing the struct size */ - u8 data[0]; + u8 data[]; } __packed; struct wmi_10_2_stats_event { @@ -4638,7 +4638,7 @@ struct wmi_10_2_stats_event { __le32 num_vdev_stats; __le32 num_peer_stats; __le32 num_bcnflt_stats; - u8 data[0]; + u8 data[]; } __packed; /* @@ -5033,7 +5033,7 @@ struct wmi_vdev_install_key_cmd { __le32 key_rxmic_len; /* contains key followed by tx mic followed by rx mic */ - u8 key_data[0]; + u8 key_data[]; } __packed; struct wmi_vdev_install_key_arg { @@ -5703,7 +5703,7 @@ struct wmi_bcn_tx_hdr { struct wmi_bcn_tx_cmd { struct wmi_bcn_tx_hdr hdr; - u8 *bcn[0]; + u8 *bcn[]; } __packed; struct wmi_bcn_tx_arg { @@ -6120,7 +6120,7 @@ struct wmi_bcn_info { struct wmi_host_swba_event { __le32 vdev_map; - struct wmi_bcn_info bcn_info[0]; + struct wmi_bcn_info bcn_info[]; } __packed; struct wmi_10_2_4_bcn_info { @@ -6130,7 +6130,7 @@ struct wmi_10_2_4_bcn_info { struct wmi_10_2_4_host_swba_event { __le32 vdev_map; - struct wmi_10_2_4_bcn_info bcn_info[0]; + struct wmi_10_2_4_bcn_info bcn_info[]; } __packed; /* 16 words = 512 client + 1 word = for guard */ @@ -6171,7 +6171,7 @@ struct wmi_10_4_bcn_info { struct wmi_10_4_host_swba_event { __le32 vdev_map; - struct wmi_10_4_bcn_info bcn_info[0]; + struct wmi_10_4_bcn_info bcn_info[]; } __packed; #define WMI_MAX_AP_VDEV 16 diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index 3b2b76d602f2..30092841ac46 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -10,6 +10,7 @@ #include <linux/dma-mapping.h> #include "ahb.h" #include "debug.h" +#include "hif.h" #include <linux/remoteproc.h> static const struct of_device_id ath11k_ahb_of_match[] = { @@ -434,6 +435,16 @@ enum ext_irq_num { tcl2host_status_ring, }; +static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset) +{ + return ioread32(ab->mem + offset); +} + +static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value) +{ + iowrite32(value, ab->mem + offset); +} + static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab) { int i; @@ -575,7 +586,7 @@ static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab) } } -int ath11k_ahb_start(struct ath11k_base *ab) +static int ath11k_ahb_start(struct ath11k_base *ab) { ath11k_ahb_ce_irqs_enable(ab); ath11k_ce_rx_post_buf(ab); @@ -583,7 +594,7 @@ int ath11k_ahb_start(struct ath11k_base *ab) return 0; } -void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab) +static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab) { int i; @@ -595,13 +606,13 @@ void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab) } } -void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab) +static void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab) { __ath11k_ahb_ext_irq_disable(ab); ath11k_ahb_sync_ext_irqs(ab); } -void ath11k_ahb_stop(struct ath11k_base *ab) +static void ath11k_ahb_stop(struct ath11k_base *ab) { if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags)) ath11k_ahb_ce_irqs_disable(ab); @@ -611,7 +622,7 @@ void ath11k_ahb_stop(struct ath11k_base *ab) ath11k_ce_cleanup_pipes(ab); } -int ath11k_ahb_power_up(struct ath11k_base *ab) +static int ath11k_ahb_power_up(struct ath11k_base *ab) { int ret; @@ -622,7 +633,7 @@ int ath11k_ahb_power_up(struct ath11k_base *ab) return ret; } -void ath11k_ahb_power_down(struct ath11k_base *ab) +static void ath11k_ahb_power_down(struct ath11k_base *ab) { rproc_shutdown(ab->tgt_rproc); } @@ -834,8 +845,8 @@ static int ath11k_ahb_config_irq(struct ath11k_base *ab) return ret; } -int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, - u8 *ul_pipe, u8 *dl_pipe) +static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe) { const struct service_to_pipe *entry; bool ul_set = false, dl_set = false; @@ -877,6 +888,18 @@ int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, return 0; } +static const struct ath11k_hif_ops ath11k_ahb_hif_ops = { + .start = ath11k_ahb_start, + .stop = ath11k_ahb_stop, + .read32 = ath11k_ahb_read32, + .write32 = ath11k_ahb_write32, + .irq_enable = ath11k_ahb_ext_irq_enable, + .irq_disable = ath11k_ahb_ext_irq_disable, + .map_service_to_pipe = ath11k_ahb_map_service_to_pipe, + .power_down = ath11k_ahb_power_down, + .power_up = ath11k_ahb_power_up, +}; + static int ath11k_ahb_probe(struct platform_device *pdev) { struct ath11k_base *ab; @@ -891,13 +914,7 @@ static int ath11k_ahb_probe(struct platform_device *pdev) return -EINVAL; } - mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem_res) { - dev_err(&pdev->dev, "failed to get IO memory resource\n"); - return -ENXIO; - } - - mem = devm_ioremap_resource(&pdev->dev, mem_res); + mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res); if (IS_ERR(mem)) { dev_err(&pdev->dev, "ioremap error\n"); return PTR_ERR(mem); @@ -909,12 +926,13 @@ static int ath11k_ahb_probe(struct platform_device *pdev) return ret; } - ab = ath11k_core_alloc(&pdev->dev); + ab = ath11k_core_alloc(&pdev->dev, 0, ATH11K_BUS_AHB); if (!ab) { dev_err(&pdev->dev, "failed to allocate ath11k base\n"); return -ENOMEM; } + ab->hif.ops = &ath11k_ahb_hif_ops; ab->pdev = pdev; ab->hw_rev = (enum ath11k_hw_rev)of_id->data; ab->mem = mem; @@ -993,12 +1011,17 @@ static struct platform_driver ath11k_ahb_driver = { .remove = ath11k_ahb_remove, }; -int ath11k_ahb_init(void) +static int ath11k_ahb_init(void) { return platform_driver_register(&ath11k_ahb_driver); } +module_init(ath11k_ahb_init); -void ath11k_ahb_exit(void) +static void ath11k_ahb_exit(void) { platform_driver_unregister(&ath11k_ahb_driver); } +module_exit(ath11k_ahb_exit); + +MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax wireless chip"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/ath/ath11k/ahb.h b/drivers/net/wireless/ath/ath11k/ahb.h index 93f46dfe22df..6c7b26ac6545 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.h +++ b/drivers/net/wireless/ath/ath11k/ahb.h @@ -10,26 +10,4 @@ #define ATH11K_AHB_RECOVERY_TIMEOUT (3 * HZ) struct ath11k_base; -static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset) -{ - return ioread32(ab->mem + offset); -} - -static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value) -{ - iowrite32(value, ab->mem + offset); -} - -void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab); -void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab); -int ath11k_ahb_start(struct ath11k_base *ab); -void ath11k_ahb_stop(struct ath11k_base *ab); -int ath11k_ahb_power_up(struct ath11k_base *ab); -void ath11k_ahb_power_down(struct ath11k_base *ab); -int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, - u8 *ul_pipe, u8 *dl_pipe); - -int ath11k_ahb_init(void); -void ath11k_ahb_exit(void); - #endif diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index bf5657d2ae18..02501cc154fe 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -7,11 +7,11 @@ #include <linux/slab.h> #include <linux/remoteproc.h> #include <linux/firmware.h> -#include "ahb.h" #include "core.h" #include "dp_tx.h" #include "dp_rx.h" #include "debug.h" +#include "hif.h" unsigned int ath11k_debug_mask; module_param_named(debug_mask, ath11k_debug_mask, uint, 0644); @@ -41,6 +41,7 @@ u8 ath11k_core_get_hw_mac_id(struct ath11k_base *ab, int pdev_idx) return ATH11K_INVALID_HW_MAC_ID; } } +EXPORT_SYMBOL(ath11k_core_get_hw_mac_id); static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name, size_t name_len) @@ -324,7 +325,7 @@ static void ath11k_core_stop(struct ath11k_base *ab) { if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags)) ath11k_qmi_firmware_stop(ab); - ath11k_ahb_stop(ab); + ath11k_hif_stop(ab); ath11k_wmi_detach(ab); ath11k_dp_pdev_reo_cleanup(ab); @@ -347,7 +348,7 @@ static int ath11k_core_soc_create(struct ath11k_base *ab) goto err_qmi_deinit; } - ret = ath11k_ahb_power_up(ab); + ret = ath11k_hif_power_up(ab); if (ret) { ath11k_err(ab, "failed to power up :%d\n", ret); goto err_debugfs_reg; @@ -415,7 +416,7 @@ static void ath11k_core_pdev_destroy(struct ath11k_base *ab) { ath11k_thermal_unregister(ab); ath11k_mac_unregister(ab); - ath11k_ahb_ext_irq_disable(ab); + ath11k_hif_irq_disable(ab); ath11k_dp_pdev_free(ab); ath11k_debug_pdev_destroy(ab); } @@ -443,7 +444,7 @@ static int ath11k_core_start(struct ath11k_base *ab, goto err_wmi_detach; } - ret = ath11k_ahb_start(ab); + ret = ath11k_hif_start(ab); if (ret) { ath11k_err(ab, "failed to start HIF: %d\n", ret); goto err_wmi_detach; @@ -522,7 +523,7 @@ err_reo_cleanup: err_mac_destroy: ath11k_mac_destroy(ab); err_hif_stop: - ath11k_ahb_stop(ab); + ath11k_hif_stop(ab); err_wmi_detach: ath11k_wmi_detach(ab); err_firmware_stop: @@ -559,7 +560,7 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab) ath11k_err(ab, "failed to create pdev core: %d\n", ret); goto err_core_stop; } - ath11k_ahb_ext_irq_enable(ab); + ath11k_hif_irq_enable(ab); mutex_unlock(&ab->core_lock); return 0; @@ -579,9 +580,9 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab) mutex_lock(&ab->core_lock); ath11k_thermal_unregister(ab); - ath11k_ahb_ext_irq_disable(ab); + ath11k_hif_irq_disable(ab); ath11k_dp_pdev_free(ab); - ath11k_ahb_stop(ab); + ath11k_hif_stop(ab); ath11k_wmi_detach(ab); ath11k_dp_pdev_reo_cleanup(ab); mutex_unlock(&ab->core_lock); @@ -744,7 +745,7 @@ void ath11k_core_deinit(struct ath11k_base *ab) mutex_unlock(&ab->core_lock); - ath11k_ahb_power_down(ab); + ath11k_hif_power_down(ab); ath11k_mac_destroy(ab); ath11k_core_soc_destroy(ab); } @@ -754,11 +755,12 @@ void ath11k_core_free(struct ath11k_base *ab) kfree(ab); } -struct ath11k_base *ath11k_core_alloc(struct device *dev) +struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size, + enum ath11k_bus bus) { struct ath11k_base *ab; - ab = kzalloc(sizeof(*ab), GFP_KERNEL); + ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL); if (!ab) return NULL; @@ -784,24 +786,3 @@ err_sc_free: kfree(ab); return NULL; } - -static int __init ath11k_init(void) -{ - int ret; - - ret = ath11k_ahb_init(); - if (ret) - printk(KERN_ERR "failed to register ath11k ahb driver: %d\n", - ret); - return ret; -} -module_init(ath11k_init); - -static void __exit ath11k_exit(void) -{ - ath11k_ahb_exit(); -} -module_exit(ath11k_exit); - -MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax wireless chip"); -MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 70ec544eee67..e04f0e711779 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -607,7 +607,9 @@ struct ath11k_base { void __iomem *mem; unsigned long mem_len; - const struct ath11k_hif_ops *hif_ops; + struct { + const struct ath11k_hif_ops *ops; + } hif; struct ath11k_ce ce; struct timer_list rx_replenish_retry; @@ -665,6 +667,9 @@ struct ath11k_base { /* Round robbin based TCL ring selector */ atomic_t tcl_ring_selector; + + /* must be last */ + u8 drv_priv[0] __aligned(sizeof(void *)); }; struct ath11k_fw_stats_pdev { @@ -801,7 +806,8 @@ struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab, int peer_id); int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab); int ath11k_core_init(struct ath11k_base *ath11k); void ath11k_core_deinit(struct ath11k_base *ath11k); -struct ath11k_base *ath11k_core_alloc(struct device *dev); +struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size, + enum ath11k_bus bus); void ath11k_core_free(struct ath11k_base *ath11k); int ath11k_core_fetch_bdf(struct ath11k_base *ath11k, struct ath11k_board_data *bd); diff --git a/drivers/net/wireless/ath/ath11k/debug_htt_stats.c b/drivers/net/wireless/ath/ath11k/debug_htt_stats.c index 5db0c27de475..6b532dc99c98 100644 --- a/drivers/net/wireless/ath/ath11k/debug_htt_stats.c +++ b/drivers/net/wireless/ath/ath11k/debug_htt_stats.c @@ -4306,6 +4306,7 @@ void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab, u32 len; u64 cookie; int ret; + bool send_completion = false; u8 pdev_id; msg = (struct ath11k_htt_extd_stats_msg *)skb->data; @@ -4330,11 +4331,11 @@ void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab, return; spin_lock_bh(&ar->debug.htt_stats.lock); - if (stats_req->done) { - spin_unlock_bh(&ar->debug.htt_stats.lock); - return; - } - stats_req->done = true; + + stats_req->done = FIELD_GET(HTT_T2H_EXT_STATS_INFO1_DONE, msg->info1); + if (stats_req->done) + send_completion = true; + spin_unlock_bh(&ar->debug.htt_stats.lock); len = FIELD_GET(HTT_T2H_EXT_STATS_INFO1_LENGTH, msg->info1); @@ -4344,7 +4345,8 @@ void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab, if (ret) ath11k_warn(ab, "Failed to parse tlv %d\n", ret); - complete(&stats_req->cmpln); + if (send_completion) + complete(&stats_req->cmpln); } static ssize_t ath11k_read_htt_stats_type(struct file *file, @@ -4497,28 +4499,54 @@ static int ath11k_open_htt_stats(struct inode *inode, struct file *file) if (type == ATH11K_DBG_HTT_EXT_STATS_RESET) return -EPERM; + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH11K_STATE_ON) { + ret = -ENETDOWN; + goto err_unlock; + } + + if (ar->debug.htt_stats.stats_req) { + ret = -EAGAIN; + goto err_unlock; + } + stats_req = vzalloc(sizeof(*stats_req) + ATH11K_HTT_STATS_BUF_SIZE); - if (!stats_req) - return -ENOMEM; + if (!stats_req) { + ret = -ENOMEM; + goto err_unlock; + } - mutex_lock(&ar->conf_mutex); ar->debug.htt_stats.stats_req = stats_req; stats_req->type = type; + ret = ath11k_dbg_htt_stats_req(ar); - mutex_unlock(&ar->conf_mutex); if (ret < 0) goto out; file->private_data = stats_req; + + mutex_unlock(&ar->conf_mutex); + return 0; out: vfree(stats_req); + ar->debug.htt_stats.stats_req = NULL; +err_unlock: + mutex_unlock(&ar->conf_mutex); + return ret; } static int ath11k_release_htt_stats(struct inode *inode, struct file *file) { + struct ath11k *ar = inode->i_private; + + mutex_lock(&ar->conf_mutex); vfree(file->private_data); + ar->debug.htt_stats.stats_req = NULL; + mutex_unlock(&ar->conf_mutex); + return 0; } diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c index 145015d2f49c..9ae743e528af 100644 --- a/drivers/net/wireless/ath/ath11k/dp.c +++ b/drivers/net/wireless/ath/ath11k/dp.c @@ -701,6 +701,7 @@ int ath11k_dp_service_srng(struct ath11k_base *ab, done: return tot_work_done; } +EXPORT_SYMBOL(ath11k_dp_service_srng); void ath11k_dp_pdev_free(struct ath11k_base *ab) { diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h index 222de10e4b93..058a5c1d86ff 100644 --- a/drivers/net/wireless/ath/ath11k/dp.h +++ b/drivers/net/wireless/ath/ath11k/dp.h @@ -1517,6 +1517,7 @@ struct htt_ext_stats_cfg_params { * 4 bytes. */ +#define HTT_T2H_EXT_STATS_INFO1_DONE BIT(11) #define HTT_T2H_EXT_STATS_INFO1_LENGTH GENMASK(31, 16) struct ath11k_htt_extd_stats_msg { diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index 85670608c3e2..a54610d75c40 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -2728,7 +2728,7 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n", buf_id); spin_unlock_bh(&rx_ring->idr_lock); - continue; + goto move_next; } idr_remove(&rx_ring->bufs_idr, buf_id); @@ -2747,13 +2747,16 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, tlv = (struct hal_tlv_hdr *)skb->data; if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_STATUS_BUFFER_DONE) { - ath11k_hal_srng_src_get_next_entry(ab, srng); - continue; + ath11k_warn(ab, "mon status DONE not set %lx\n", + FIELD_GET(HAL_TLV_HDR_TAG, + tlv->tl)); + dev_kfree_skb_any(skb); + goto move_next; } __skb_queue_tail(skb_list, skb); } - +move_next: skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, &buf_id, GFP_ATOMIC); diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c index 9e40c4bdd674..d63785178afa 100644 --- a/drivers/net/wireless/ath/ath11k/hal.c +++ b/drivers/net/wireless/ath/ath11k/hal.c @@ -3,10 +3,10 @@ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */ #include <linux/dma-mapping.h> -#include "ahb.h" #include "hal_tx.h" #include "debug.h" #include "hal_desc.h" +#include "hif.h" static const struct hal_srng_config hw_srng_config[] = { /* TODO: max_rings can populated by querying HW capabilities */ @@ -351,11 +351,12 @@ static void ath11k_hal_ce_dst_setup(struct ath11k_base *ab, addr = HAL_CE_DST_RING_CTRL + srng_config->reg_start[HAL_SRNG_REG_GRP_R0] + ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0]; - val = ath11k_ahb_read32(ab, addr); + + val = ath11k_hif_read32(ab, addr); val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN; val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN, srng->u.dst_ring.max_buffer_length); - ath11k_ahb_write32(ab, addr, val); + ath11k_hif_write32(ab, addr, val); } static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab, @@ -369,34 +370,34 @@ static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab, reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0]; if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) { - ath11k_ahb_write32(ab, reg_base + - HAL_REO1_RING_MSI1_BASE_LSB_OFFSET, + ath11k_hif_write32(ab, reg_base + + HAL_REO1_RING_MSI1_BASE_LSB_OFFSET, (u32)srng->msi_addr); val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR, ((u64)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT)) | HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE; - ath11k_ahb_write32(ab, reg_base + + ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MSI1_BASE_MSB_OFFSET, val); - ath11k_ahb_write32(ab, + ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET, srng->msi_data); } - ath11k_ahb_write32(ab, reg_base, (u32)srng->ring_base_paddr); + ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr); val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB, ((u64)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT)) | FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE, (srng->entry_size * srng->num_entries)); - ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET, val); + ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET, val); val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) | FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size); - ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET, val); + ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET, val); /* interrupt setup */ val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD, @@ -406,22 +407,22 @@ static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab, (srng->intr_batch_cntr_thres_entries * srng->entry_size)); - ath11k_ahb_write32(ab, + ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET, val); hp_addr = hal->rdp.paddr + ((unsigned long)srng->u.dst_ring.hp_addr - (unsigned long)hal->rdp.vaddr); - ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET, + ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET, hp_addr & HAL_ADDR_LSB_REG_MASK); - ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET, + ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET, hp_addr >> HAL_ADDR_MSB_REG_SHIFT); /* Initialize head and tail pointers to indicate ring is empty */ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2]; - ath11k_ahb_write32(ab, reg_base, 0); - ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET, 0); + ath11k_hif_write32(ab, reg_base, 0); + ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET, 0); *srng->u.dst_ring.hp_addr = 0; reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0]; @@ -434,7 +435,7 @@ static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab, val |= HAL_REO1_RING_MISC_MSI_SWAP; val |= HAL_REO1_RING_MISC_SRNG_ENABLE; - ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET, val); + ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET, val); } static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab, @@ -448,34 +449,34 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab, reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0]; if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) { - ath11k_ahb_write32(ab, reg_base + - HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET, + ath11k_hif_write32(ab, reg_base + + HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET, (u32)srng->msi_addr); val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR, ((u64)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT)) | HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE; - ath11k_ahb_write32(ab, reg_base + + ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET, val); - ath11k_ahb_write32(ab, reg_base + + ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MSI1_DATA_OFFSET, srng->msi_data); } - ath11k_ahb_write32(ab, reg_base, (u32)srng->ring_base_paddr); + ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr); val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB, ((u64)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT)) | FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE, (srng->entry_size * srng->num_entries)); - ath11k_ahb_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET, val); + ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET, val); val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size); - ath11k_ahb_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET, val); + ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET, val); /* interrupt setup */ /* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the @@ -488,7 +489,7 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab, (srng->intr_batch_cntr_thres_entries * srng->entry_size)); - ath11k_ahb_write32(ab, + ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET, val); @@ -497,7 +498,7 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab, val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD, srng->u.src_ring.low_threshold); } - ath11k_ahb_write32(ab, + ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET, val); @@ -505,18 +506,18 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab, tp_addr = hal->rdp.paddr + ((unsigned long)srng->u.src_ring.tp_addr - (unsigned long)hal->rdp.vaddr); - ath11k_ahb_write32(ab, + ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET, tp_addr & HAL_ADDR_LSB_REG_MASK); - ath11k_ahb_write32(ab, + ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET, tp_addr >> HAL_ADDR_MSB_REG_SHIFT); } /* Initialize head and tail pointers to indicate ring is empty */ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2]; - ath11k_ahb_write32(ab, reg_base, 0); - ath11k_ahb_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0); + ath11k_hif_write32(ab, reg_base, 0); + ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0); *srng->u.src_ring.tp_addr = 0; reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0]; @@ -533,7 +534,7 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab, val |= HAL_TCL1_RING_MISC_SRNG_ENABLE; - ath11k_ahb_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET, val); + ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET, val); } static void ath11k_hal_srng_hw_init(struct ath11k_base *ab, @@ -889,13 +890,13 @@ void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng) if (srng->ring_dir == HAL_SRNG_DIR_SRC) { srng->u.src_ring.last_tp = *(volatile u32 *)srng->u.src_ring.tp_addr; - ath11k_ahb_write32(ab, + ath11k_hif_write32(ab, (unsigned long)srng->u.src_ring.hp_addr - (unsigned long)ab->mem, srng->u.src_ring.hp); } else { srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr; - ath11k_ahb_write32(ab, + ath11k_hif_write32(ab, (unsigned long)srng->u.dst_ring.tp_addr - (unsigned long)ab->mem, srng->u.dst_ring.tp); @@ -929,20 +930,20 @@ void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab, HAL_WBM_IDLE_SCATTER_BUF_SIZE; } - ath11k_ahb_write32(ab, + ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR, FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) | FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1)); - ath11k_ahb_write32(ab, + ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR, FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST, reg_scatter_buf_sz * nsbufs)); - ath11k_ahb_write32(ab, + ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_RING_BASE_LSB, FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK)); - ath11k_ahb_write32(ab, + ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_RING_BASE_MSB, FIELD_PREP( @@ -953,12 +954,12 @@ void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab, BASE_ADDR_MATCH_TAG_VAL)); /* Setup head and tail pointers for the idle list */ - ath11k_ahb_write32(ab, + ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0, FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[nsbufs - 1].paddr)); - ath11k_ahb_write32(ab, + ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1, FIELD_PREP( @@ -967,18 +968,18 @@ void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab, HAL_ADDR_MSB_REG_SHIFT)) | FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1, (end_offset >> 2))); - ath11k_ahb_write32(ab, + ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0, FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[0].paddr)); - ath11k_ahb_write32(ab, + ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0, FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[0].paddr)); - ath11k_ahb_write32(ab, + ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1, FIELD_PREP( @@ -986,13 +987,13 @@ void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab, ((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) | FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1, 0)); - ath11k_ahb_write32(ab, + ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR, 2 * tot_link_desc); /* Enable the SRNG */ - ath11k_ahb_write32(ab, + ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_MISC_ADDR, 0x40); } diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h index a1f747c1c44d..8a592814efa0 100644 --- a/drivers/net/wireless/ath/ath11k/hal_desc.h +++ b/drivers/net/wireless/ath/ath11k/hal_desc.h @@ -2,6 +2,8 @@ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */ +#include "core.h" + #ifndef ATH11K_HAL_DESC_H #define ATH11K_HAL_DESC_H diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c index f277c9434a25..129c9e1efeb9 100644 --- a/drivers/net/wireless/ath/ath11k/hal_rx.c +++ b/drivers/net/wireless/ath/ath11k/hal_rx.c @@ -3,12 +3,12 @@ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */ -#include "ahb.h" #include "debug.h" #include "hal.h" #include "hal_tx.h" #include "hal_rx.h" #include "hal_desc.h" +#include "hif.h" static void ath11k_hal_reo_set_desc_hdr(struct hal_desc_header *hdr, u8 owner, u8 buffer_type, u32 magic) @@ -804,34 +804,34 @@ void ath11k_hal_reo_hw_setup(struct ath11k_base *ab, u32 ring_hash_map) u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG; u32 val; - val = ath11k_ahb_read32(ab, reo_base + HAL_REO1_GEN_ENABLE); + val = ath11k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE); val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING; val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING, HAL_SRNG_RING_ID_REO2SW1) | FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) | FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1); - ath11k_ahb_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val); + ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val); - ath11k_ahb_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0, + ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0, HAL_DEFAULT_REO_TIMEOUT_USEC); - ath11k_ahb_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1, + ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1, HAL_DEFAULT_REO_TIMEOUT_USEC); - ath11k_ahb_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2, + ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2, HAL_DEFAULT_REO_TIMEOUT_USEC); - ath11k_ahb_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3, + ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3, HAL_DEFAULT_REO_TIMEOUT_USEC); - ath11k_ahb_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0, + ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0, FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP, ring_hash_map)); - ath11k_ahb_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1, + ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1, FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP, ring_hash_map)); - ath11k_ahb_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2, + ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2, FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP, ring_hash_map)); - ath11k_ahb_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3, + ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3, FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP, ring_hash_map)); } diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.c b/drivers/net/wireless/ath/ath11k/hal_tx.c index e4aa7e8a1284..81937c29ffca 100644 --- a/drivers/net/wireless/ath/ath11k/hal_tx.c +++ b/drivers/net/wireless/ath/ath11k/hal_tx.c @@ -3,9 +3,10 @@ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */ -#include "ahb.h" +#include "hal_desc.h" #include "hal.h" #include "hal_tx.h" +#include "hif.h" #define DSCP_TID_MAP_TBL_ENTRY_SIZE 64 @@ -83,11 +84,11 @@ void ath11k_hal_tx_set_dscp_tid_map(struct ath11k_base *ab, int id) u32 value; int cnt = 0; - ctrl_reg_val = ath11k_ahb_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG + + ctrl_reg_val = ath11k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_CMN_CTRL_REG); /* Enable read/write access */ ctrl_reg_val |= HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN; - ath11k_ahb_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG + + ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_CMN_CTRL_REG, ctrl_reg_val); addr = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_DSCP_TID_MAP + @@ -118,15 +119,15 @@ void ath11k_hal_tx_set_dscp_tid_map(struct ath11k_base *ab, int id) } for (i = 0; i < HAL_DSCP_TID_TBL_SIZE; i += 4) { - ath11k_ahb_write32(ab, addr, *(u32 *)&hw_map_val[i]); + ath11k_hif_write32(ab, addr, *(u32 *)&hw_map_val[i]); addr += 4; } /* Disable read/write access */ - ctrl_reg_val = ath11k_ahb_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG + + ctrl_reg_val = ath11k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_CMN_CTRL_REG); ctrl_reg_val &= ~HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN; - ath11k_ahb_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG + + ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_CMN_CTRL_REG, ctrl_reg_val); } diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.h b/drivers/net/wireless/ath/ath11k/hal_tx.h index ce48a61bfb66..d4760a20fdac 100644 --- a/drivers/net/wireless/ath/ath11k/hal_tx.h +++ b/drivers/net/wireless/ath/ath11k/hal_tx.h @@ -7,6 +7,7 @@ #define ATH11K_HAL_TX_H #include "hal_desc.h" +#include "core.h" #define HAL_TX_ADDRX_EN 1 #define HAL_TX_ADDRY_EN 2 diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h new file mode 100644 index 000000000000..165f7e51c238 --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/hif.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + */ + +#include "core.h" + +struct ath11k_hif_ops { + u32 (*read32)(struct ath11k_base *sc, u32 address); + void (*write32)(struct ath11k_base *sc, u32 address, u32 data); + void (*irq_enable)(struct ath11k_base *sc); + void (*irq_disable)(struct ath11k_base *sc); + int (*start)(struct ath11k_base *sc); + void (*stop)(struct ath11k_base *sc); + int (*power_up)(struct ath11k_base *sc); + void (*power_down)(struct ath11k_base *sc); + int (*map_service_to_pipe)(struct ath11k_base *sc, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe); +}; + +static inline int ath11k_hif_start(struct ath11k_base *sc) +{ + return sc->hif.ops->start(sc); +} + +static inline void ath11k_hif_stop(struct ath11k_base *sc) +{ + sc->hif.ops->stop(sc); +} + +static inline void ath11k_hif_irq_enable(struct ath11k_base *sc) +{ + sc->hif.ops->irq_enable(sc); +} + +static inline void ath11k_hif_irq_disable(struct ath11k_base *sc) +{ + sc->hif.ops->irq_disable(sc); +} + +static inline int ath11k_hif_power_up(struct ath11k_base *sc) +{ + return sc->hif.ops->power_up(sc); +} + +static inline void ath11k_hif_power_down(struct ath11k_base *sc) +{ + sc->hif.ops->power_down(sc); +} + +static inline u32 ath11k_hif_read32(struct ath11k_base *sc, u32 address) +{ + return sc->hif.ops->read32(sc, address); +} + +static inline void ath11k_hif_write32(struct ath11k_base *sc, u32 address, u32 data) +{ + sc->hif.ops->write32(sc, address, data); +} + +static inline int ath11k_hif_map_service_to_pipe(struct ath11k_base *sc, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe) +{ + return sc->hif.ops->map_service_to_pipe(sc, service_id, ul_pipe, dl_pipe); +} diff --git a/drivers/net/wireless/ath/ath11k/htc.c b/drivers/net/wireless/ath/ath11k/htc.c index 8f54f58b83e6..ad13c648b679 100644 --- a/drivers/net/wireless/ath/ath11k/htc.c +++ b/drivers/net/wireless/ath/ath11k/htc.c @@ -5,8 +5,8 @@ #include <linux/skbuff.h> #include <linux/ctype.h> -#include "ahb.h" #include "debug.h" +#include "hif.h" struct sk_buff *ath11k_htc_alloc_skb(struct ath11k_base *ab, int size) { @@ -672,7 +672,7 @@ setup: /* copy all the callbacks */ ep->ep_ops = conn_req->ep_ops; - status = ath11k_ahb_map_service_to_pipe(htc->ab, + status = ath11k_hif_map_service_to_pipe(htc->ab, ep->service_id, &ep->ul_pipe_id, &ep->dl_pipe_id); diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h index cdec95644758..dc4434aefbbe 100644 --- a/drivers/net/wireless/ath/ath11k/hw.h +++ b/drivers/net/wireless/ath/ath11k/hw.h @@ -99,6 +99,11 @@ enum ath11k_hw_rate_ofdm { ATH11K_HW_RATE_OFDM_9M, }; +enum ath11k_bus { + ATH11K_BUS_AHB, + ATH11K_BUS_PCI, +}; + struct ath11k_hw_params { const char *name; struct { diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 5ffe55801ca4..2836a0f197ab 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -1178,8 +1178,7 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar, sizeof(arg->peer_he_cap_macinfo)); memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info, sizeof(arg->peer_he_cap_phyinfo)); - memcpy(&arg->peer_he_ops, &vif->bss_conf.he_operation, - sizeof(arg->peer_he_ops)); + arg->peer_he_ops = vif->bss_conf.he_oper.params; /* the top most byte is used to indicate BSS color info */ arg->peer_he_ops &= 0xffffff; @@ -3563,7 +3562,7 @@ static int ath11k_mac_copy_he_cap(struct ath11k *ar, memcpy(he_cap_elem->phy_cap_info, band_cap->he_cap_phy_info, sizeof(he_cap_elem->phy_cap_info)); - he_cap_elem->mac_cap_info[1] |= + he_cap_elem->mac_cap_info[1] &= IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK; he_cap_elem->phy_cap_info[4] &= ~IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK; @@ -3579,6 +3578,8 @@ static int ath11k_mac_copy_he_cap(struct ath11k *ar, switch (i) { case NL80211_IFTYPE_AP: + he_cap_elem->phy_cap_info[3] &= + ~IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK; he_cap_elem->phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU; break; @@ -3693,7 +3694,7 @@ static int __ath11k_set_antenna(struct ath11k *ar, u32 tx_ant, u32 rx_ant) int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx) { struct sk_buff *msdu = skb; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); + struct ieee80211_tx_info *info; struct ath11k *ar = ctx; struct ath11k_base *ab = ar->ab; diff --git a/drivers/net/wireless/ath/ath11k/thermal.c b/drivers/net/wireless/ath/ath11k/thermal.c index 259dddbda2c7..5a7e150c621b 100644 --- a/drivers/net/wireless/ath/ath11k/thermal.c +++ b/drivers/net/wireless/ath/ath11k/thermal.c @@ -174,9 +174,12 @@ int ath11k_thermal_register(struct ath11k_base *sc) if (IS_ERR(cdev)) { ath11k_err(sc, "failed to setup thermal device result: %ld\n", PTR_ERR(cdev)); - return -EINVAL; + ret = -EINVAL; + goto err_thermal_destroy; } + ar->thermal.cdev = cdev; + ret = sysfs_create_link(&ar->hw->wiphy->dev.kobj, &cdev->device.kobj, "cooling_device"); if (ret) { @@ -184,7 +187,6 @@ int ath11k_thermal_register(struct ath11k_base *sc) goto err_thermal_destroy; } - ar->thermal.cdev = cdev; if (!IS_REACHABLE(CONFIG_HWMON)) return 0; diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index 37cf602d8adf..67f8f2aa7a53 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -3249,22 +3249,19 @@ static int ath6kl_get_antenna(struct wiphy *wiphy, return 0; } -static void ath6kl_mgmt_frame_register(struct wiphy *wiphy, - struct wireless_dev *wdev, - u16 frame_type, bool reg) +static void ath6kl_update_mgmt_frame_registrations(struct wiphy *wiphy, + struct wireless_dev *wdev, + struct mgmt_frame_regs *upd) { struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev); - ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: frame_type=0x%x reg=%d\n", - __func__, frame_type, reg); - if (frame_type == IEEE80211_STYPE_PROBE_REQ) { - /* - * Note: This notification callback is not allowed to sleep, so - * we cannot send WMI_PROBE_REQ_REPORT_CMD here. Instead, we - * hardcode target to report Probe Request frames all the time. - */ - vif->probe_req_report = reg; - } + /* + * FIXME: send WMI_PROBE_REQ_REPORT_CMD here instead of hardcoding + * the reporting in the target all the time, this callback + * *is* allowed to sleep after all. + */ + vif->probe_req_report = + upd->interface_stypes & BIT(IEEE80211_STYPE_PROBE_REQ >> 4); } static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy, @@ -3464,7 +3461,8 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = { .remain_on_channel = ath6kl_remain_on_channel, .cancel_remain_on_channel = ath6kl_cancel_remain_on_channel, .mgmt_tx = ath6kl_mgmt_tx, - .mgmt_frame_register = ath6kl_mgmt_frame_register, + .update_mgmt_frame_registrations = + ath6kl_update_mgmt_frame_registrations, .get_antenna = ath6kl_get_antenna, .sched_scan_start = ath6kl_cfg80211_sscan_start, .sched_scan_stop = ath6kl_cfg80211_sscan_stop, diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 40a065028ebe..1d6ad8d46607 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -780,6 +780,8 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv, SET_IEEE80211_PERM_ADDR(hw, common->macaddr); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS); } static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv) diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 791f6633667c..2b7832b1c800 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -1251,6 +1251,7 @@ out: FIF_OTHER_BSS | \ FIF_BCN_PRBRESP_PROMISC | \ FIF_PROBE_REQ | \ + FIF_MCAST_ACTION | \ FIF_FCSFAIL) static void ath9k_htc_configure_filter(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 118e5550b10c..b353995bdd45 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -893,7 +893,8 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv) if (priv->rxfilter & FIF_PSPOLL) rfilt |= ATH9K_RX_FILTER_PSPOLL; - if (priv->nvifs > 1 || priv->rxfilter & FIF_OTHER_BSS) + if (priv->nvifs > 1 || + priv->rxfilter & (FIF_OTHER_BSS | FIF_MCAST_ACTION)) rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; return rfilt; diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 17c318902cb8..289a2444d534 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -1012,6 +1012,8 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS); + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS); } int ath9k_init_device(u16 devid, struct ath_softc *sc, diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 457e9b0d21ca..a47f6e978095 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1476,6 +1476,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) FIF_OTHER_BSS | \ FIF_BCN_PRBRESP_PROMISC | \ FIF_PROBE_REQ | \ + FIF_MCAST_ACTION | \ FIF_FCSFAIL) /* FIXME: sc->sc_full_reset ? */ diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 06e660858766..0c0624a3b40d 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -413,7 +413,8 @@ u32 ath_calcrxfilter(struct ath_softc *sc) if (sc->cur_chandef.width != NL80211_CHAN_WIDTH_20_NOHT) rfilt |= ATH9K_RX_FILTER_COMP_BAR; - if (sc->cur_chan->nvifs > 1 || (sc->cur_chan->rxfilter & FIF_OTHER_BSS)) { + if (sc->cur_chan->nvifs > 1 || + (sc->cur_chan->rxfilter & (FIF_OTHER_BSS | FIF_MCAST_ACTION))) { /* This is needed for older chips */ if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160) rfilt |= ATH9K_RX_FILTER_PROM; diff --git a/drivers/net/wireless/ath/carl9170/fwcmd.h b/drivers/net/wireless/ath/carl9170/fwcmd.h index ea1d80f9a50e..56999a3b9d3b 100644 --- a/drivers/net/wireless/ath/carl9170/fwcmd.h +++ b/drivers/net/wireless/ath/carl9170/fwcmd.h @@ -127,7 +127,7 @@ struct carl9170_write_reg { struct carl9170_write_reg_byte { __le32 addr; __le32 count; - u8 val[0]; + u8 val[]; } __packed; #define CARL9170FW_PHY_HT_ENABLE 0x4 diff --git a/drivers/net/wireless/ath/carl9170/hw.h b/drivers/net/wireless/ath/carl9170/hw.h index 08e0ae9c5836..555ad4975970 100644 --- a/drivers/net/wireless/ath/carl9170/hw.h +++ b/drivers/net/wireless/ath/carl9170/hw.h @@ -851,7 +851,7 @@ struct ar9170_stream { __le16 length; __le16 tag; - u8 payload[0]; + u8 payload[]; } __packed __aligned(4); #define AR9170_STREAM_LEN 4 diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h index 6ba0fd57c951..aab5a58616fc 100644 --- a/drivers/net/wireless/ath/wcn36xx/hal.h +++ b/drivers/net/wireless/ath/wcn36xx/hal.h @@ -2240,7 +2240,7 @@ struct wcn36xx_hal_process_ptt_msg_req_msg { struct wcn36xx_hal_msg_header header; /* Actual FTM Command body */ - u8 ptt_msg[0]; + u8 ptt_msg[]; } __packed; struct wcn36xx_hal_process_ptt_msg_rsp_msg { @@ -2249,7 +2249,7 @@ struct wcn36xx_hal_process_ptt_msg_rsp_msg { /* FTM Command response status */ u32 ptt_msg_resp_status; /* Actual FTM Command body */ - u8 ptt_msg[0]; + u8 ptt_msg[]; } __packed; struct update_edca_params_req_msg { diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index e49c306e0eef..702b689c06df 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -1339,7 +1339,7 @@ static int wcn36xx_probe(struct platform_device *pdev) if (addr && ret != ETH_ALEN) { wcn36xx_err("invalid local-mac-address\n"); ret = -EINVAL; - goto out_wq; + goto out_destroy_ept; } else if (addr) { wcn36xx_info("mac address: %pM\n", addr); SET_IEEE80211_PERM_ADDR(wcn->hw, addr); @@ -1347,7 +1347,7 @@ static int wcn36xx_probe(struct platform_device *pdev) ret = wcn36xx_platform_get_resources(wcn, pdev); if (ret) - goto out_wq; + goto out_destroy_ept; wcn36xx_init_ieee80211(wcn); ret = ieee80211_register_hw(wcn->hw); @@ -1359,6 +1359,8 @@ static int wcn36xx_probe(struct platform_device *pdev) out_unmap: iounmap(wcn->ccu_base); iounmap(wcn->dxe_base); +out_destroy_ept: + rpmsg_destroy_ept(wcn->smd_channel); out_wq: ieee80211_free_hw(hw); out_err: diff --git a/drivers/net/wireless/ath/wcn36xx/testmode.h b/drivers/net/wireless/ath/wcn36xx/testmode.h index 4c6cfdb46580..09d68fab9add 100644 --- a/drivers/net/wireless/ath/wcn36xx/testmode.h +++ b/drivers/net/wireless/ath/wcn36xx/testmode.h @@ -20,7 +20,7 @@ struct ftm_rsp_msg { u16 msg_id; u16 msg_body_length; u32 resp_status; - u8 msg_response[0]; + u8 msg_response[]; } __packed; /* The request buffer of FTM which contains a byte of command and the request */ diff --git a/drivers/net/wireless/ath/wil6210/fw.h b/drivers/net/wireless/ath/wil6210/fw.h index 540fa1607794..440614d61156 100644 --- a/drivers/net/wireless/ath/wil6210/fw.h +++ b/drivers/net/wireless/ath/wil6210/fw.h @@ -33,7 +33,7 @@ struct wil_fw_record_head { */ struct wil_fw_record_data { /* type == wil_fw_type_data */ __le32 addr; - __le32 data[0]; /* [data_size], see above */ + __le32 data[]; /* [data_size], see above */ } __packed; /* fill with constant @value, @size bytes starting from @addr */ @@ -61,7 +61,7 @@ struct wil_fw_record_capabilities { /* type == wil_fw_type_comment */ /* identifies capabilities record */ struct wil_fw_record_comment_hdr hdr; /* capabilities (variable size), see enum wmi_fw_capability */ - u8 capabilities[0]; + u8 capabilities[]; } __packed; /* FW VIF concurrency encoded inside a comment record @@ -80,7 +80,7 @@ struct wil_fw_concurrency_combo { u8 n_diff_channels; /* total number of different channels allowed */ u8 same_bi; /* for APs, 1 if all APs must have same BI */ /* keep last - concurrency limits, variable size by n_limits */ - struct wil_fw_concurrency_limit limits[0]; + struct wil_fw_concurrency_limit limits[]; } __packed; struct wil_fw_record_concurrency { /* type == wil_fw_type_comment */ @@ -93,7 +93,7 @@ struct wil_fw_record_concurrency { /* type == wil_fw_type_comment */ /* number of concurrency combinations that follow */ __le16 n_combos; /* keep last - combinations, variable size by n_combos */ - struct wil_fw_concurrency_combo combos[0]; + struct wil_fw_concurrency_combo combos[]; } __packed; /* brd file info encoded inside a comment record */ @@ -108,7 +108,7 @@ struct wil_fw_record_brd_file { /* type == wil_fw_type_comment */ /* identifies brd file record */ struct wil_fw_record_comment_hdr hdr; __le32 version; - struct brd_info brd_info[0]; + struct brd_info brd_info[]; } __packed; /* perform action @@ -116,7 +116,7 @@ struct wil_fw_record_brd_file { /* type == wil_fw_type_comment */ */ struct wil_fw_record_action { /* type == wil_fw_type_action */ __le32 action; /* action to perform: reset, wait for fw ready etc. */ - __le32 data[0]; /* action specific, [data_size], see above */ + __le32 data[]; /* action specific, [data_size], see above */ } __packed; /* data block for struct wil_fw_record_direct_write */ @@ -179,7 +179,7 @@ struct wil_fw_record_gateway_data { /* type == wil_fw_type_gateway_data */ #define WIL_FW_GW_CTL_BUSY BIT(29) /* gateway busy performing operation */ #define WIL_FW_GW_CTL_RUN BIT(30) /* start gateway operation */ __le32 command; - struct wil_fw_data_gw data[0]; /* total size [data_size], see above */ + struct wil_fw_data_gw data[]; /* total size [data_size], see above */ } __packed; /* 4-dword gateway */ @@ -201,7 +201,7 @@ struct wil_fw_record_gateway_data4 { /* type == wil_fw_type_gateway_data4 */ __le32 gateway_cmd_addr; __le32 gateway_ctrl_address; /* same logic as for 1-dword gw */ __le32 command; - struct wil_fw_data_gw4 data[0]; /* total size [data_size], see above */ + struct wil_fw_data_gw4 data[]; /* total size [data_size], see above */ } __packed; #endif /* __WIL_FW_H__ */ diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 23e1ed6a9d6d..c7136ce567ee 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -222,7 +222,7 @@ struct auth_no_hdr { __le16 auth_transaction; __le16 status_code; /* possibly followed by Challenge text */ - u8 variable[0]; + u8 variable[]; } __packed; u8 led_polarity = LED_POLARITY_LOW_ACTIVE; diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index e3558136e0c4..9affa4525609 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -474,7 +474,7 @@ struct wmi_start_scan_cmd { struct { u8 channel; u8 reserved; - } channel_list[0]; + } channel_list[]; } __packed; #define WMI_MAX_PNO_SSID_NUM (16) @@ -530,7 +530,7 @@ struct wmi_update_ft_ies_cmd { /* Length of the FT IEs */ __le16 ie_len; u8 reserved[2]; - u8 ie_info[0]; + u8 ie_info[]; } __packed; /* WMI_SET_PROBED_SSID_CMDID */ @@ -575,7 +575,7 @@ struct wmi_set_appie_cmd { u8 reserved; /* Length of the IE to be added to MGMT frame */ __le16 ie_len; - u8 ie_info[0]; + u8 ie_info[]; } __packed; /* WMI_PXMT_RANGE_CFG_CMDID */ @@ -850,7 +850,7 @@ struct wmi_pcp_start_cmd { struct wmi_sw_tx_req_cmd { u8 dst_mac[WMI_MAC_LEN]; __le16 len; - u8 payload[0]; + u8 payload[]; } __packed; /* WMI_SW_TX_REQ_EXT_CMDID */ @@ -861,7 +861,7 @@ struct wmi_sw_tx_req_ext_cmd { /* Channel to use, 0xFF for currently active channel */ u8 channel; u8 reserved[5]; - u8 payload[0]; + u8 payload[]; } __packed; /* WMI_VRING_SWITCH_TIMING_CONFIG_CMDID */ @@ -1423,7 +1423,7 @@ struct wmi_rf_xpm_write_cmd { u8 verify; u8 reserved1[3]; /* actual size=num_bytes */ - u8 data_bytes[0]; + u8 data_bytes[]; } __packed; /* Possible modes for temperature measurement */ @@ -1572,7 +1572,7 @@ struct wmi_tof_session_start_cmd { u8 aoa_type; __le16 num_of_dest; u8 reserved[4]; - struct wmi_ftm_dest_info ftm_dest_info[0]; + struct wmi_ftm_dest_info ftm_dest_info[]; } __packed; /* WMI_TOF_CFG_RESPONDER_CMDID */ @@ -1766,7 +1766,7 @@ struct wmi_internal_fw_ioctl_cmd { /* payload max size is WMI_MAX_IOCTL_PAYLOAD_SIZE * Must be the last member of the struct */ - __le32 payload[0]; + __le32 payload[]; } __packed; /* WMI_INTERNAL_FW_IOCTL_EVENTID */ @@ -1778,7 +1778,7 @@ struct wmi_internal_fw_ioctl_event { /* payload max size is WMI_MAX_IOCTL_REPLY_PAYLOAD_SIZE * Must be the last member of the struct */ - __le32 payload[0]; + __le32 payload[]; } __packed; /* WMI_INTERNAL_FW_EVENT_EVENTID */ @@ -1788,7 +1788,7 @@ struct wmi_internal_fw_event_event { /* payload max size is WMI_MAX_INTERNAL_EVENT_PAYLOAD_SIZE * Must be the last member of the struct */ - __le32 payload[0]; + __le32 payload[]; } __packed; /* WMI_SET_VRING_PRIORITY_WEIGHT_CMDID */ @@ -1818,7 +1818,7 @@ struct wmi_set_vring_priority_cmd { */ u8 num_of_vrings; u8 reserved[3]; - struct wmi_vring_priority vring_priority[0]; + struct wmi_vring_priority vring_priority[]; } __packed; /* WMI_BF_CONTROL_CMDID - deprecated */ @@ -1910,7 +1910,7 @@ struct wmi_bf_control_ex_cmd { u8 each_mcs_cfg_size; u8 reserved1; /* Configuration for each MCS */ - struct wmi_bf_control_ex_mcs each_mcs_cfg[0]; + struct wmi_bf_control_ex_mcs each_mcs_cfg[]; } __packed; /* WMI_LINK_STATS_CMD */ @@ -2192,7 +2192,7 @@ struct wmi_fw_ver_event { /* FW capabilities info * Must be the last member of the struct */ - __le32 fw_capabilities[0]; + __le32 fw_capabilities[]; } __packed; /* WMI_GET_RF_STATUS_EVENTID */ @@ -2270,7 +2270,7 @@ struct wmi_mac_addr_resp_event { struct wmi_eapol_rx_event { u8 src_mac[WMI_MAC_LEN]; __le16 eapol_len; - u8 eapol[0]; + u8 eapol[]; } __packed; /* WMI_READY_EVENTID */ @@ -2343,7 +2343,7 @@ struct wmi_connect_event { u8 aid; u8 reserved2[2]; /* not in use */ - u8 assoc_info[0]; + u8 assoc_info[]; } __packed; /* disconnect_reason */ @@ -2376,7 +2376,7 @@ struct wmi_disconnect_event { /* last assoc req may passed to host - not in used */ u8 assoc_resp_len; /* last assoc req may passed to host - not in used */ - u8 assoc_info[0]; + u8 assoc_info[]; } __packed; /* WMI_SCAN_COMPLETE_EVENTID */ @@ -2400,7 +2400,7 @@ struct wmi_ft_auth_status_event { u8 reserved[3]; u8 mac_addr[WMI_MAC_LEN]; __le16 ie_len; - u8 ie_info[0]; + u8 ie_info[]; } __packed; /* WMI_FT_REASSOC_STATUS_EVENTID */ @@ -2418,7 +2418,7 @@ struct wmi_ft_reassoc_status_event { __le16 reassoc_req_ie_len; __le16 reassoc_resp_ie_len; u8 reserved[4]; - u8 ie_info[0]; + u8 ie_info[]; } __packed; /* wmi_rx_mgmt_info */ @@ -2461,7 +2461,7 @@ struct wmi_stop_sched_scan_event { struct wmi_sched_scan_result_event { struct wmi_rx_mgmt_info info; - u8 payload[0]; + u8 payload[]; } __packed; /* WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT */ @@ -2492,7 +2492,7 @@ struct wmi_acs_passive_scan_complete_event { __le16 filled; u8 num_scanned_channels; u8 reserved; - struct scan_acs_info scan_info_list[0]; + struct scan_acs_info scan_info_list[]; } __packed; /* WMI_BA_STATUS_EVENTID */ @@ -2751,7 +2751,7 @@ struct wmi_rf_xpm_read_result_event { u8 status; u8 reserved[3]; /* requested num_bytes of data */ - u8 data_bytes[0]; + u8 data_bytes[]; } __packed; /* EVENT: WMI_RF_XPM_WRITE_RESULT_EVENTID */ @@ -2769,7 +2769,7 @@ struct wmi_tx_mgmt_packet_event { /* WMI_RX_MGMT_PACKET_EVENTID */ struct wmi_rx_mgmt_packet_event { struct wmi_rx_mgmt_info info; - u8 payload[0]; + u8 payload[]; } __packed; /* WMI_ECHO_RSP_EVENTID */ @@ -2969,7 +2969,7 @@ struct wmi_rs_cfg_ex_cmd { u8 each_mcs_cfg_size; u8 reserved[3]; /* Configuration for each MCS */ - struct wmi_rs_cfg_ex_mcs each_mcs_cfg[0]; + struct wmi_rs_cfg_ex_mcs each_mcs_cfg[]; } __packed; /* WMI_RS_CFG_EX_EVENTID */ @@ -3178,7 +3178,7 @@ struct wmi_get_detailed_rs_res_ex_event { u8 each_mcs_results_size; u8 reserved1[3]; /* Results for each MCS */ - struct wmi_rs_results_ex_mcs each_mcs_results[0]; + struct wmi_rs_results_ex_mcs each_mcs_results[]; } __packed; /* BRP antenna limit mode */ @@ -3320,7 +3320,7 @@ struct wmi_set_link_monitor_cmd { u8 rssi_hyst; u8 reserved[12]; u8 rssi_thresholds_list_size; - s8 rssi_thresholds_list[0]; + s8 rssi_thresholds_list[]; } __packed; /* wmi_link_monitor_event_type */ @@ -3637,7 +3637,7 @@ struct wmi_tof_ftm_per_dest_res_event { /* Measurments are from RFs, defined by the mask */ __le32 meas_rf_mask; u8 reserved0[3]; - struct wmi_responder_ftm_res responder_ftm_res[0]; + struct wmi_responder_ftm_res responder_ftm_res[]; } __packed; /* WMI_TOF_CFG_RESPONDER_EVENTID */ @@ -3669,7 +3669,7 @@ struct wmi_tof_channel_info_event { /* data report length */ u8 len; /* data report payload */ - u8 report[0]; + u8 report[]; } __packed; /* WMI_TOF_SET_TX_RX_OFFSET_EVENTID */ @@ -4085,7 +4085,7 @@ struct wmi_link_stats_event { u8 has_next; u8 reserved[5]; /* a stream of wmi_link_stats_record_s */ - u8 payload[0]; + u8 payload[]; } __packed; /* WMI_LINK_STATS_EVENT */ @@ -4094,7 +4094,7 @@ struct wmi_link_stats_record { u8 record_type_id; u8 reserved; __le16 record_size; - u8 record[0]; + u8 record[]; } __packed; /* WMI_LINK_STATS_TYPE_BASIC */ diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c index 74538085cfb7..d5875836068c 100644 --- a/drivers/net/wireless/atmel/atmel.c +++ b/drivers/net/wireless/atmel/atmel.c @@ -798,7 +798,6 @@ static void tx_update_descriptor(struct atmel_private *priv, int is_bcast, static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) { - static const u8 SNAP_RFC1024[6] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; struct atmel_private *priv = netdev_priv(dev); struct ieee80211_hdr header; unsigned long flags; @@ -853,7 +852,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) } if (priv->use_wpa) - memcpy(&header.addr4, SNAP_RFC1024, ETH_ALEN); + memcpy(&header.addr4, rfc1042_header, ETH_ALEN); header.frame_control = cpu_to_le16(frame_ctl); /* Copy the wireless header into the card */ diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index 39da1a4c30ac..3ad94dad2d89 100644 --- a/drivers/net/wireless/broadcom/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c @@ -5569,7 +5569,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev) /* fill hw info */ ieee80211_hw_set(hw, RX_INCLUDES_FCS); ieee80211_hw_set(hw, SIGNAL_DBM); - + ieee80211_hw_set(hw, MFP_CAPABLE); hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MESH_POINT) | diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c index 8b6b657c4b85..5208a39fd6f7 100644 --- a/drivers/net/wireless/broadcom/b43legacy/main.c +++ b/drivers/net/wireless/broadcom/b43legacy/main.c @@ -3801,6 +3801,7 @@ static int b43legacy_wireless_init(struct ssb_device *dev) /* fill hw info */ ieee80211_hw_set(hw, RX_INCLUDES_FCS); ieee80211_hw_set(hw, SIGNAL_DBM); + ieee80211_hw_set(hw, MFP_CAPABLE); /* Allow WPA3 in software */ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_AP) | diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index 22a17ae09e94..b1a66320ba54 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -43,7 +43,8 @@ #define SDIO_FUNC1_BLOCKSIZE 64 #define SDIO_FUNC2_BLOCKSIZE 512 -#define SDIO_4359_FUNC2_BLOCKSIZE 256 +#define SDIO_4373_FUNC2_BLOCKSIZE 256 +#define SDIO_435X_FUNC2_BLOCKSIZE 256 /* Maximum milliseconds to wait for F2 to come up */ #define SDIO_WAIT_F2RDY 3000 @@ -910,13 +911,28 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev) sdio_release_host(sdiodev->func1); goto out; } - if (sdiodev->func2->device == SDIO_DEVICE_ID_BROADCOM_4359) - f2_blksz = SDIO_4359_FUNC2_BLOCKSIZE; + switch (sdiodev->func2->device) { + case SDIO_DEVICE_ID_CYPRESS_4373: + f2_blksz = SDIO_4373_FUNC2_BLOCKSIZE; + break; + case SDIO_DEVICE_ID_BROADCOM_4359: + /* fallthrough */ + case SDIO_DEVICE_ID_BROADCOM_4354: + /* fallthrough */ + case SDIO_DEVICE_ID_BROADCOM_4356: + f2_blksz = SDIO_435X_FUNC2_BLOCKSIZE; + break; + default: + break; + } + ret = sdio_set_block_size(sdiodev->func2, f2_blksz); if (ret) { brcmf_err("Failed to set F2 blocksize\n"); sdio_release_host(sdiodev->func1); goto out; + } else { + brcmf_dbg(SDIO, "set F2 blocksize to %d\n", f2_blksz); } /* increase F2 timeout */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index f2f84af923a9..a757abd7a599 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -23,6 +23,7 @@ #include "p2p.h" #include "btcoex.h" #include "pno.h" +#include "fwsignal.h" #include "cfg80211.h" #include "feature.h" #include "fwil.h" @@ -1819,6 +1820,10 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) switch (sme->crypto.akm_suites[0]) { case WLAN_AKM_SUITE_SAE: val = WPA3_AUTH_SAE_PSK; + if (sme->crypto.sae_pwd) { + brcmf_dbg(INFO, "using SAE offload\n"); + profile->use_fwsup = BRCMF_PROFILE_FWSUP_SAE; + } break; default: bphy_err(drvr, "invalid cipher group (%d)\n", @@ -2104,11 +2109,6 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, goto done; } - if (sme->crypto.sae_pwd) { - brcmf_dbg(INFO, "using SAE offload\n"); - profile->use_fwsup = BRCMF_PROFILE_FWSUP_SAE; - } - if (sme->crypto.psk && profile->use_fwsup != BRCMF_PROFILE_FWSUP_SAE) { if (WARN_ON(profile->use_fwsup != BRCMF_PROFILE_FWSUP_NONE)) { @@ -2468,6 +2468,17 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, if (!ext_key) key->flags = BRCMF_PRIMARY_KEY; + if (params->seq && params->seq_len == 6) { + /* rx iv */ + u8 *ivptr; + + ivptr = (u8 *)params->seq; + key->rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) | + (ivptr[3] << 8) | ivptr[2]; + key->rxiv.lo = (ivptr[1] << 8) | ivptr[0]; + key->iv_initialized = true; + } + switch (params->cipher) { case WLAN_CIPHER_SUITE_WEP40: key->algo = CRYPTO_ALGO_WEP1; @@ -4613,6 +4624,48 @@ brcmf_config_ap_mgmt_ie(struct brcmf_cfg80211_vif *vif, } static s32 +brcmf_parse_configure_security(struct brcmf_if *ifp, + struct cfg80211_ap_settings *settings, + enum nl80211_iftype dev_role) +{ + const struct brcmf_tlv *rsn_ie; + const struct brcmf_vs_tlv *wpa_ie; + s32 err = 0; + + /* find the RSN_IE */ + rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail, + settings->beacon.tail_len, WLAN_EID_RSN); + + /* find the WPA_IE */ + wpa_ie = brcmf_find_wpaie((u8 *)settings->beacon.tail, + settings->beacon.tail_len); + + if (wpa_ie || rsn_ie) { + brcmf_dbg(TRACE, "WPA(2) IE is found\n"); + if (wpa_ie) { + /* WPA IE */ + err = brcmf_configure_wpaie(ifp, wpa_ie, false); + if (err < 0) + return err; + } else { + struct brcmf_vs_tlv *tmp_ie; + + tmp_ie = (struct brcmf_vs_tlv *)rsn_ie; + + /* RSN IE */ + err = brcmf_configure_wpaie(ifp, tmp_ie, true); + if (err < 0) + return err; + } + } else { + brcmf_dbg(TRACE, "No WPA(2) IEs found\n"); + brcmf_configure_opensecurity(ifp); + } + + return err; +} + +static s32 brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_ap_settings *settings) { @@ -4624,8 +4677,6 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, const struct brcmf_tlv *country_ie; struct brcmf_ssid_le ssid_le; s32 err = -EPERM; - const struct brcmf_tlv *rsn_ie; - const struct brcmf_vs_tlv *wpa_ie; struct brcmf_join_params join_params; enum nl80211_iftype dev_role; struct brcmf_fil_bss_enable_le bss_enable; @@ -4679,36 +4730,6 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, brcmf_configure_arp_nd_offload(ifp, false); } - /* find the RSN_IE */ - rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail, - settings->beacon.tail_len, WLAN_EID_RSN); - - /* find the WPA_IE */ - wpa_ie = brcmf_find_wpaie((u8 *)settings->beacon.tail, - settings->beacon.tail_len); - - if ((wpa_ie != NULL || rsn_ie != NULL)) { - brcmf_dbg(TRACE, "WPA(2) IE is found\n"); - if (wpa_ie != NULL) { - /* WPA IE */ - err = brcmf_configure_wpaie(ifp, wpa_ie, false); - if (err < 0) - goto exit; - } else { - struct brcmf_vs_tlv *tmp_ie; - - tmp_ie = (struct brcmf_vs_tlv *)rsn_ie; - - /* RSN IE */ - err = brcmf_configure_wpaie(ifp, tmp_ie, true); - if (err < 0) - goto exit; - } - } else { - brcmf_dbg(TRACE, "No WPA(2) IEs found\n"); - brcmf_configure_opensecurity(ifp); - } - /* Parameters shared by all radio interfaces */ if (!mbss) { if ((supports_11d) && (is_11d != ifp->vif->is_11d)) { @@ -4790,6 +4811,14 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, bphy_err(drvr, "BRCMF_C_UP error (%d)\n", err); goto exit; } + + err = brcmf_parse_configure_security(ifp, settings, + NL80211_IFTYPE_AP); + if (err < 0) { + bphy_err(drvr, "brcmf_parse_configure_security error\n"); + goto exit; + } + /* On DOWN the firmware removes the WEP keys, reconfigure * them if they were set. */ @@ -4822,6 +4851,14 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, chanspec, err); goto exit; } + + err = brcmf_parse_configure_security(ifp, settings, + NL80211_IFTYPE_P2P_GO); + if (err < 0) { + brcmf_err("brcmf_parse_configure_security error\n"); + goto exit; + } + err = brcmf_fil_bsscfg_data_set(ifp, "ssid", &ssid_le, sizeof(ssid_le)); if (err < 0) { @@ -4994,21 +5031,15 @@ brcmf_cfg80211_change_station(struct wiphy *wiphy, struct net_device *ndev, } static void -brcmf_cfg80211_mgmt_frame_register(struct wiphy *wiphy, - struct wireless_dev *wdev, - u16 frame_type, bool reg) +brcmf_cfg80211_update_mgmt_frame_registrations(struct wiphy *wiphy, + struct wireless_dev *wdev, + struct mgmt_frame_regs *upd) { struct brcmf_cfg80211_vif *vif; - u16 mgmt_type; - - brcmf_dbg(TRACE, "Enter, frame_type %04x, reg=%d\n", frame_type, reg); - mgmt_type = (frame_type & IEEE80211_FCTL_STYPE) >> 4; vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); - if (reg) - vif->mgmt_rx_reg |= BIT(mgmt_type); - else - vif->mgmt_rx_reg &= ~BIT(mgmt_type); + + vif->mgmt_rx_reg = upd->interface_stypes; } @@ -5423,7 +5454,8 @@ static struct cfg80211_ops brcmf_cfg80211_ops = { .change_station = brcmf_cfg80211_change_station, .sched_scan_start = brcmf_cfg80211_sched_scan_start, .sched_scan_stop = brcmf_cfg80211_sched_scan_stop, - .mgmt_frame_register = brcmf_cfg80211_mgmt_frame_register, + .update_mgmt_frame_registrations = + brcmf_cfg80211_update_mgmt_frame_registrations, .mgmt_tx = brcmf_cfg80211_mgmt_tx, .remain_on_channel = brcmf_p2p_remain_on_channel, .cancel_remain_on_channel = brcmf_cfg80211_cancel_remain_on_channel, @@ -5510,7 +5542,8 @@ static bool brcmf_is_linkup(struct brcmf_cfg80211_vif *vif, u32 event = e->event_code; u32 status = e->status; - if (vif->profile.use_fwsup == BRCMF_PROFILE_FWSUP_PSK && + if ((vif->profile.use_fwsup == BRCMF_PROFILE_FWSUP_PSK || + vif->profile.use_fwsup == BRCMF_PROFILE_FWSUP_SAE) && event == BRCMF_E_PSK_SUP && status == BRCMF_E_STATUS_FWSUP_COMPLETED) set_bit(BRCMF_VIF_STATUS_EAP_SUCCESS, &vif->sme_state); @@ -5586,12 +5619,151 @@ static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_info *cfg) conn_info->resp_ie_len = 0; } +u8 brcmf_map_prio_to_prec(void *config, u8 prio) +{ + struct brcmf_cfg80211_info *cfg = (struct brcmf_cfg80211_info *)config; + + if (!cfg) + return (prio == PRIO_8021D_NONE || prio == PRIO_8021D_BE) ? + (prio ^ 2) : prio; + + /* For those AC(s) with ACM flag set to 1, convert its 4-level priority + * to an 8-level precedence which is the same as BE's + */ + if (prio > PRIO_8021D_EE && + cfg->ac_priority[prio] == cfg->ac_priority[PRIO_8021D_BE]) + return cfg->ac_priority[prio] * 2; + + /* Conversion of 4-level priority to 8-level precedence */ + if (prio == PRIO_8021D_BE || prio == PRIO_8021D_BK || + prio == PRIO_8021D_CL || prio == PRIO_8021D_VO) + return cfg->ac_priority[prio] * 2; + else + return cfg->ac_priority[prio] * 2 + 1; +} + +u8 brcmf_map_prio_to_aci(void *config, u8 prio) +{ + /* Prio here refers to the 802.1d priority in range of 0 to 7. + * ACI here refers to the WLAN AC Index in range of 0 to 3. + * This function will return ACI corresponding to input prio. + */ + struct brcmf_cfg80211_info *cfg = (struct brcmf_cfg80211_info *)config; + + if (cfg) + return cfg->ac_priority[prio]; + + return prio; +} + +static void brcmf_init_wmm_prio(u8 *priority) +{ + /* Initialize AC priority array to default + * 802.1d priority as per following table: + * 802.1d prio 0,3 maps to BE + * 802.1d prio 1,2 maps to BK + * 802.1d prio 4,5 maps to VI + * 802.1d prio 6,7 maps to VO + */ + priority[0] = BRCMF_FWS_FIFO_AC_BE; + priority[3] = BRCMF_FWS_FIFO_AC_BE; + priority[1] = BRCMF_FWS_FIFO_AC_BK; + priority[2] = BRCMF_FWS_FIFO_AC_BK; + priority[4] = BRCMF_FWS_FIFO_AC_VI; + priority[5] = BRCMF_FWS_FIFO_AC_VI; + priority[6] = BRCMF_FWS_FIFO_AC_VO; + priority[7] = BRCMF_FWS_FIFO_AC_VO; +} + +static void brcmf_wifi_prioritize_acparams(const + struct brcmf_cfg80211_edcf_acparam *acp, u8 *priority) +{ + u8 aci; + u8 aifsn; + u8 ecwmin; + u8 ecwmax; + u8 acm; + u8 ranking_basis[EDCF_AC_COUNT]; + u8 aci_prio[EDCF_AC_COUNT]; /* AC_BE, AC_BK, AC_VI, AC_VO */ + u8 index; + + for (aci = 0; aci < EDCF_AC_COUNT; aci++, acp++) { + aifsn = acp->ACI & EDCF_AIFSN_MASK; + acm = (acp->ACI & EDCF_ACM_MASK) ? 1 : 0; + ecwmin = acp->ECW & EDCF_ECWMIN_MASK; + ecwmax = (acp->ECW & EDCF_ECWMAX_MASK) >> EDCF_ECWMAX_SHIFT; + brcmf_dbg(CONN, "ACI %d aifsn %d acm %d ecwmin %d ecwmax %d\n", + aci, aifsn, acm, ecwmin, ecwmax); + /* Default AC_VO will be the lowest ranking value */ + ranking_basis[aci] = aifsn + ecwmin + ecwmax; + /* Initialise priority starting at 0 (AC_BE) */ + aci_prio[aci] = 0; + + /* If ACM is set, STA can't use this AC as per 802.11. + * Change the ranking to BE + */ + if (aci != AC_BE && aci != AC_BK && acm == 1) + ranking_basis[aci] = ranking_basis[AC_BE]; + } + + /* Ranking method which works for AC priority + * swapping when values for cwmin, cwmax and aifsn are varied + * Compare each aci_prio against each other aci_prio + */ + for (aci = 0; aci < EDCF_AC_COUNT; aci++) { + for (index = 0; index < EDCF_AC_COUNT; index++) { + if (index != aci) { + /* Smaller ranking value has higher priority, + * so increment priority for each ACI which has + * a higher ranking value + */ + if (ranking_basis[aci] < ranking_basis[index]) + aci_prio[aci]++; + } + } + } + + /* By now, aci_prio[] will be in range of 0 to 3. + * Use ACI prio to get the new priority value for + * each 802.1d traffic type, in this range. + */ + if (!(aci_prio[AC_BE] == aci_prio[AC_BK] && + aci_prio[AC_BK] == aci_prio[AC_VI] && + aci_prio[AC_VI] == aci_prio[AC_VO])) { + /* 802.1d 0,3 maps to BE */ + priority[0] = aci_prio[AC_BE]; + priority[3] = aci_prio[AC_BE]; + + /* 802.1d 1,2 maps to BK */ + priority[1] = aci_prio[AC_BK]; + priority[2] = aci_prio[AC_BK]; + + /* 802.1d 4,5 maps to VO */ + priority[4] = aci_prio[AC_VI]; + priority[5] = aci_prio[AC_VI]; + + /* 802.1d 6,7 maps to VO */ + priority[6] = aci_prio[AC_VO]; + priority[7] = aci_prio[AC_VO]; + } else { + /* Initialize to default priority */ + brcmf_init_wmm_prio(priority); + } + + brcmf_dbg(CONN, "Adj prio BE 0->%d, BK 1->%d, BK 2->%d, BE 3->%d\n", + priority[0], priority[1], priority[2], priority[3]); + + brcmf_dbg(CONN, "Adj prio VI 4->%d, VI 5->%d, VO 6->%d, VO 7->%d\n", + priority[4], priority[5], priority[6], priority[7]); +} + static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp) { struct brcmf_pub *drvr = cfg->pub; struct brcmf_cfg80211_assoc_ielen_le *assoc_info; struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg); + struct brcmf_cfg80211_edcf_acparam edcf_acparam_info[EDCF_AC_COUNT]; u32 req_len; u32 resp_len; s32 err = 0; @@ -5640,6 +5812,17 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg, GFP_KERNEL); if (!conn_info->resp_ie) conn_info->resp_ie_len = 0; + + err = brcmf_fil_iovar_data_get(ifp, "wme_ac_sta", + edcf_acparam_info, + sizeof(edcf_acparam_info)); + if (err) { + brcmf_err("could not get wme_ac_sta (%d)\n", err); + return err; + } + + brcmf_wifi_prioritize_acparams(edcf_acparam_info, + cfg->ac_priority); } else { conn_info->resp_ie_len = 0; conn_info->resp_ie = NULL; @@ -6056,6 +6239,7 @@ static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg) mutex_init(&cfg->usr_sync); brcmf_init_escan(cfg); brcmf_init_conf(cfg->conf); + brcmf_init_wmm_prio(cfg->ac_priority); init_completion(&cfg->vif_disabled); return err; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h index 3ca8c07d6370..333fdf394f95 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h @@ -23,6 +23,23 @@ #define WL_ROAM_TRIGGER_LEVEL -75 #define WL_ROAM_DELTA 20 +/* WME Access Category Indices (ACIs) */ +#define AC_BE 0 /* Best Effort */ +#define AC_BK 1 /* Background */ +#define AC_VI 2 /* Video */ +#define AC_VO 3 /* Voice */ +#define EDCF_AC_COUNT 4 +#define MAX_8021D_PRIO 8 + +#define EDCF_ACI_MASK 0x60 +#define EDCF_ACI_SHIFT 5 +#define EDCF_ACM_MASK 0x10 +#define EDCF_ECWMIN_MASK 0x0f +#define EDCF_ECWMAX_SHIFT 4 +#define EDCF_AIFSN_MASK 0x0f +#define EDCF_AIFSN_MAX 15 +#define EDCF_ECWMAX_MASK 0xf0 + /* Keep BRCMF_ESCAN_BUF_SIZE below 64K (65536). Allocing over 64K can be * problematic on some systems and should be avoided. */ @@ -209,6 +226,12 @@ struct brcmf_cfg80211_assoc_ielen_le { __le32 resp_len; }; +struct brcmf_cfg80211_edcf_acparam { + u8 ACI; + u8 ECW; + u16 TXOP; /* stored in network order (ls octet first) */ +}; + /* dongle escan state */ enum wl_escan_state { WL_ESCAN_STATE_IDLE, @@ -327,6 +350,7 @@ struct brcmf_cfg80211_info { struct brcmf_assoclist_le assoclist; struct brcmf_cfg80211_wowl wowl; struct brcmf_pno_info *pno; + u8 ac_priority[MAX_8021D_PRIO]; }; /** diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h index 144cf4570bc3..8b5f49997c8b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h @@ -72,4 +72,8 @@ static inline void brcmf_dmi_probe(struct brcmf_mp_device *settings, u32 chip, u32 chiprev) {} #endif +u8 brcmf_map_prio_to_prec(void *cfg, u8 prio); + +u8 brcmf_map_prio_to_aci(void *cfg, u8 prio); + #endif /* BRCMFMAC_COMMON_H */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c index 8e9d067bdfed..096f6b969dd8 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c @@ -26,10 +26,10 @@ #define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16) static const u8 brcmf_flowring_prio2fifo[] = { - 1, - 0, 0, 1, + 1, + 0, 2, 2, 3, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index 2b7837887c0b..09701262330d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -311,28 +311,6 @@ struct brcmf_skbuff_cb { /* How long to defer borrowing in jiffies */ #define BRCMF_FWS_BORROW_DEFER_PERIOD (HZ / 10) -/** - * enum brcmf_fws_fifo - fifo indices used by dongle firmware. - * - * @BRCMF_FWS_FIFO_FIRST: first fifo, ie. background. - * @BRCMF_FWS_FIFO_AC_BK: fifo for background traffic. - * @BRCMF_FWS_FIFO_AC_BE: fifo for best-effort traffic. - * @BRCMF_FWS_FIFO_AC_VI: fifo for video traffic. - * @BRCMF_FWS_FIFO_AC_VO: fifo for voice traffic. - * @BRCMF_FWS_FIFO_BCMC: fifo for broadcast/multicast (AP only). - * @BRCMF_FWS_FIFO_ATIM: fifo for ATIM (AP only). - * @BRCMF_FWS_FIFO_COUNT: number of fifos. - */ -enum brcmf_fws_fifo { - BRCMF_FWS_FIFO_FIRST, - BRCMF_FWS_FIFO_AC_BK = BRCMF_FWS_FIFO_FIRST, - BRCMF_FWS_FIFO_AC_BE, - BRCMF_FWS_FIFO_AC_VI, - BRCMF_FWS_FIFO_AC_VO, - BRCMF_FWS_FIFO_BCMC, - BRCMF_FWS_FIFO_ATIM, - BRCMF_FWS_FIFO_COUNT -}; /** * enum brcmf_fws_txstatus - txstatus flag values. @@ -2130,8 +2108,10 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) skcb->if_flags = 0; skcb->state = BRCMF_FWS_SKBSTATE_NEW; brcmf_skb_if_flags_set_field(skb, INDEX, ifp->ifidx); + + /* mapping from 802.1d priority to firmware fifo index */ if (!multicast) - fifo = brcmf_fws_prio2fifo[skb->priority]; + fifo = brcmf_map_prio_to_aci(drvr->config, skb->priority); brcmf_fws_lock(fws); if (fifo != BRCMF_FWS_FIFO_AC_BE && fifo < BRCMF_FWS_FIFO_BCMC) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h index b486d578ec96..b16a9d1c0508 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h @@ -6,6 +6,29 @@ #ifndef FWSIGNAL_H_ #define FWSIGNAL_H_ +/** + * enum brcmf_fws_fifo - fifo indices used by dongle firmware. + * + * @BRCMF_FWS_FIFO_FIRST: first fifo, ie. background. + * @BRCMF_FWS_FIFO_AC_BK: fifo for background traffic. + * @BRCMF_FWS_FIFO_AC_BE: fifo for best-effort traffic. + * @BRCMF_FWS_FIFO_AC_VI: fifo for video traffic. + * @BRCMF_FWS_FIFO_AC_VO: fifo for voice traffic. + * @BRCMF_FWS_FIFO_BCMC: fifo for broadcast/multicast (AP only). + * @BRCMF_FWS_FIFO_ATIM: fifo for ATIM (AP only). + * @BRCMF_FWS_FIFO_COUNT: number of fifos. + */ +enum brcmf_fws_fifo { + BRCMF_FWS_FIFO_FIRST, + BRCMF_FWS_FIFO_AC_BK = BRCMF_FWS_FIFO_FIRST, + BRCMF_FWS_FIFO_AC_BE, + BRCMF_FWS_FIFO_AC_VI, + BRCMF_FWS_FIFO_AC_VO, + BRCMF_FWS_FIFO_BCMC, + BRCMF_FWS_FIFO_ATIM, + BRCMF_FWS_FIFO_COUNT +}; + struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr); void brcmf_fws_detach(struct brcmf_fws_info *fws); void brcmf_fws_debugfs_create(struct brcmf_pub *drvr); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index e32c24a2670d..d2795dc17c46 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -1268,6 +1268,30 @@ bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg, } /** + * brcmf_p2p_abort_action_frame() - abort action frame. + * + * @cfg: common configuration struct. + * + */ +static s32 brcmf_p2p_abort_action_frame(struct brcmf_cfg80211_info *cfg) +{ + struct brcmf_p2p_info *p2p = &cfg->p2p; + struct brcmf_cfg80211_vif *vif; + s32 err; + s32 int_val = 1; + + brcmf_dbg(TRACE, "Enter\n"); + + vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif; + err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe_abort", &int_val, + sizeof(s32)); + if (err) + brcmf_err(" aborting action frame has failed (%d)\n", err); + + return err; +} + +/** * brcmf_p2p_stop_wait_next_action_frame() - finish scan if af tx complete. * * @cfg: common configuration struct. @@ -1278,6 +1302,7 @@ brcmf_p2p_stop_wait_next_action_frame(struct brcmf_cfg80211_info *cfg) { struct brcmf_p2p_info *p2p = &cfg->p2p; struct brcmf_if *ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; + s32 err; if (test_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status) && (test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status) || @@ -1286,8 +1311,13 @@ brcmf_p2p_stop_wait_next_action_frame(struct brcmf_cfg80211_info *cfg) /* if channel is not zero, "actfame" uses off channel scan. * So abort scan for off channel completion. */ - if (p2p->af_sent_channel) - brcmf_notify_escan_complete(cfg, ifp, true, true); + if (p2p->af_sent_channel) { + /* abort actframe using actframe_abort or abort scan */ + err = brcmf_p2p_abort_action_frame(cfg); + if (err) + brcmf_notify_escan_complete(cfg, ifp, true, + true); + } } else if (test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN, &p2p->status)) { brcmf_dbg(TRACE, "*** Wake UP ** abort listen for next af frame\n"); @@ -1836,7 +1866,7 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg, dwell_overflow = brcmf_p2p_check_dwell_overflow(requested_dwell, dwell_jiffies); } - if (ack == false) { + if (!ack) { bphy_err(drvr, "Failed to send Action Frame(retry %d)\n", tx_retry); clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status); @@ -2203,7 +2233,7 @@ fail: return ERR_PTR(err); } -int brcmf_p2p_get_conn_idx(struct brcmf_cfg80211_info *cfg) +static int brcmf_p2p_get_conn_idx(struct brcmf_cfg80211_info *cfg) { int i; struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg)); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 3a08252f1a53..760b7737e745 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -41,9 +41,22 @@ /* watermark expressed in number of words */ #define DEFAULT_F2_WATERMARK 0x8 #define CY_4373_F2_WATERMARK 0x40 +#define CY_4373_F1_MESBUSYCTRL (CY_4373_F2_WATERMARK | SBSDIO_MESBUSYCTRL_ENAB) #define CY_43012_F2_WATERMARK 0x60 -#define CY_4359_F2_WATERMARK 0x40 -#define CY_4359_F1_MESBUSYCTRL (CY_4359_F2_WATERMARK | SBSDIO_MESBUSYCTRL_ENAB) +#define CY_43012_MES_WATERMARK 0x50 +#define CY_43012_MESBUSYCTRL (CY_43012_MES_WATERMARK | \ + SBSDIO_MESBUSYCTRL_ENAB) +#define CY_4339_F2_WATERMARK 48 +#define CY_4339_MES_WATERMARK 80 +#define CY_4339_MESBUSYCTRL (CY_4339_MES_WATERMARK | \ + SBSDIO_MESBUSYCTRL_ENAB) +#define CY_43455_F2_WATERMARK 0x60 +#define CY_43455_MES_WATERMARK 0x50 +#define CY_43455_MESBUSYCTRL (CY_43455_MES_WATERMARK | \ + SBSDIO_MESBUSYCTRL_ENAB) +#define CY_435X_F2_WATERMARK 0x40 +#define CY_435X_F1_MESBUSYCTRL (CY_435X_F2_WATERMARK | \ + SBSDIO_MESBUSYCTRL_ENAB) #ifdef DEBUG @@ -315,15 +328,6 @@ struct rte_console { #define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US) #define BRCMF_SDIO_MAX_ACCESS_ERRORS 5 -/* - * Conversion of 802.1D priority to precedence level - */ -static uint prio2prec(u32 prio) -{ - return (prio == PRIO_8021D_NONE || prio == PRIO_8021D_BE) ? - (prio^2) : prio; -} - #ifdef DEBUG /* Device console log buffer state */ struct brcmf_console { @@ -2774,7 +2778,13 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt) skb_push(pkt, bus->tx_hdrlen); /* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */ - prec = prio2prec((pkt->priority & PRIOMASK)); + /* In WLAN, priority is always set by the AP using WMM parameters + * and this need not always follow the standard 802.1d priority. + * Based on AP WMM config, map from 802.1d priority to corresponding + * precedence level. + */ + prec = brcmf_map_prio_to_prec(bus_if->drvr->config, + (pkt->priority & PRIOMASK)); /* Check for existing queue, current flow-control, pending event, or pending clock */ @@ -4198,8 +4208,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl, &err); brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL, - CY_4373_F2_WATERMARK | - SBSDIO_MESBUSYCTRL_ENAB, &err); + CY_4373_F1_MESBUSYCTRL, &err); break; case SDIO_DEVICE_ID_CYPRESS_43012: brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n", @@ -4211,19 +4220,51 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, devctl |= SBSDIO_DEVCTL_F2WM_ENAB; brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl, &err); + brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL, + CY_43012_MESBUSYCTRL, &err); + break; + case SDIO_DEVICE_ID_BROADCOM_4339: + brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes for 4339\n", + CY_4339_F2_WATERMARK); + brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK, + CY_4339_F2_WATERMARK, &err); + devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL, + &err); + devctl |= SBSDIO_DEVCTL_F2WM_ENAB; + brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl, + &err); + brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL, + CY_4339_MESBUSYCTRL, &err); + break; + case SDIO_DEVICE_ID_BROADCOM_43455: + brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes for 43455\n", + CY_43455_F2_WATERMARK); + brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK, + CY_43455_F2_WATERMARK, &err); + devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL, + &err); + devctl |= SBSDIO_DEVCTL_F2WM_ENAB; + brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl, + &err); + brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL, + CY_43455_MESBUSYCTRL, &err); break; case SDIO_DEVICE_ID_BROADCOM_4359: + /* fallthrough */ + case SDIO_DEVICE_ID_BROADCOM_4354: + /* fallthrough */ + case SDIO_DEVICE_ID_BROADCOM_4356: brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n", - CY_4359_F2_WATERMARK); + CY_435X_F2_WATERMARK); brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK, - CY_4359_F2_WATERMARK, &err); + CY_435X_F2_WATERMARK, &err); devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL, &err); devctl |= SBSDIO_DEVCTL_F2WM_ENAB; brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl, &err); brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL, - CY_4359_F1_MESBUSYCTRL, &err); + CY_435X_F1_MESBUSYCTRL, &err); break; default: brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK, diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 8363f91df7ea..827bb6d74815 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -1925,6 +1925,10 @@ static netdev_tx_t mpi_start_xmit(struct sk_buff *skb, airo_print_err(dev->name, "%s: skb == NULL!",__func__); return NETDEV_TX_OK; } + if (skb_padto(skb, ETH_ZLEN)) { + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } npacks = skb_queue_len (&ai->txq); if (npacks >= MAXTXQ - 1) { @@ -2127,6 +2131,10 @@ static netdev_tx_t airo_start_xmit(struct sk_buff *skb, airo_print_err(dev->name, "%s: skb == NULL!", __func__); return NETDEV_TX_OK; } + if (skb_padto(skb, ETH_ZLEN)) { + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } /* Find a vacant FID */ for( i = 0; i < MAX_FIDS / 2 && (fids[i] & 0xffff0000); i++ ); @@ -2201,6 +2209,10 @@ static netdev_tx_t airo_start_xmit11(struct sk_buff *skb, airo_print_err(dev->name, "%s: skb == NULL!", __func__); return NETDEV_TX_OK; } + if (skb_padto(skb, ETH_ZLEN)) { + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } /* Find a vacant FID */ for( i = MAX_FIDS / 2; i < MAX_FIDS && (fids[i] & 0xffff0000); i++ ); diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index e9e686ad57b1..661e63bfc892 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -3386,7 +3386,7 @@ struct ipw_fw { __le32 boot_size; __le32 ucode_size; __le32 fw_size; - u8 data[0]; + u8 data[]; }; static int ipw_get_fw(struct ipw_priv *priv, diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.h b/drivers/net/wireless/intel/ipw2x00/ipw2200.h index 4346520545c4..e1ec1c96dcd8 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.h +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.h @@ -448,7 +448,7 @@ struct tfd_command { u8 index; u8 length; __le16 reserved; - u8 payload[0]; + u8 payload[]; } __packed; struct tfd_data { @@ -675,7 +675,7 @@ struct ipw_rx_frame { // is identical) u8 rtscts_seen; // 0x1 RTS seen ; 0x2 CTS seen __le16 length; - u8 data[0]; + u8 data[]; } __packed; struct ipw_rx_header { @@ -1002,7 +1002,7 @@ struct ipw_cmd { /* XXX */ * Incoming parameters listed 1-st, followed by outcoming params. * nParams=(len+3)/4+status_len */ - u32 param[0]; + u32 param[]; } __packed; #define STATUS_HCMD_ACTIVE (1<<0) /**< host command in progress */ @@ -1108,7 +1108,7 @@ struct ipw_fw_error { /* XXX */ u32 log_len; struct ipw_error_elem *elem; struct ipw_event *log; - u8 payload[0]; + u8 payload[]; } __packed; #ifdef CONFIG_IPW2200_PROMISCUOUS @@ -1153,7 +1153,7 @@ struct ipw_rt_hdr { s8 rt_dbmsignal; /* signal in dbM, kluged to signed */ s8 rt_dbmnoise; u8 rt_antenna; /* antenna number */ - u8 payload[0]; /* payload... */ + u8 payload[]; /* payload... */ } __packed; #endif @@ -1329,7 +1329,7 @@ struct ipw_priv { s8 tx_power; - /* Track time in suspend using CLOCK_BOOTIME */ + /* Track time in suspend using CLOCK_BOOTTIME */ time64_t suspend_at; time64_t suspend_time; diff --git a/drivers/net/wireless/intel/ipw2x00/libipw.h b/drivers/net/wireless/intel/ipw2x00/libipw.h index e4a6ab4e8391..e87538a8b88b 100644 --- a/drivers/net/wireless/intel/ipw2x00/libipw.h +++ b/drivers/net/wireless/intel/ipw2x00/libipw.h @@ -334,7 +334,7 @@ struct libipw_hdr_1addr { __le16 frame_ctl; __le16 duration_id; u8 addr1[ETH_ALEN]; - u8 payload[0]; + u8 payload[]; } __packed; struct libipw_hdr_2addr { @@ -342,7 +342,7 @@ struct libipw_hdr_2addr { __le16 duration_id; u8 addr1[ETH_ALEN]; u8 addr2[ETH_ALEN]; - u8 payload[0]; + u8 payload[]; } __packed; struct libipw_hdr_3addr { @@ -352,7 +352,7 @@ struct libipw_hdr_3addr { u8 addr2[ETH_ALEN]; u8 addr3[ETH_ALEN]; __le16 seq_ctl; - u8 payload[0]; + u8 payload[]; } __packed; struct libipw_hdr_4addr { @@ -363,7 +363,7 @@ struct libipw_hdr_4addr { u8 addr3[ETH_ALEN]; __le16 seq_ctl; u8 addr4[ETH_ALEN]; - u8 payload[0]; + u8 payload[]; } __packed; struct libipw_hdr_3addrqos { @@ -380,7 +380,7 @@ struct libipw_hdr_3addrqos { struct libipw_info_element { u8 id; u8 len; - u8 data[0]; + u8 data[]; } __packed; /* @@ -406,7 +406,7 @@ struct libipw_auth { __le16 transaction; __le16 status; /* challenge */ - struct libipw_info_element info_element[0]; + struct libipw_info_element info_element[]; } __packed; struct libipw_channel_switch { @@ -442,7 +442,7 @@ struct libipw_disassoc { struct libipw_probe_request { struct libipw_hdr_3addr header; /* SSID, supported rates */ - struct libipw_info_element info_element[0]; + struct libipw_info_element info_element[]; } __packed; struct libipw_probe_response { @@ -452,7 +452,7 @@ struct libipw_probe_response { __le16 capability; /* SSID, supported rates, FH params, DS params, * CF params, IBSS params, TIM (if beacon), RSN */ - struct libipw_info_element info_element[0]; + struct libipw_info_element info_element[]; } __packed; /* Alias beacon for probe_response */ @@ -463,7 +463,7 @@ struct libipw_assoc_request { __le16 capability; __le16 listen_interval; /* SSID, supported rates, RSN */ - struct libipw_info_element info_element[0]; + struct libipw_info_element info_element[]; } __packed; struct libipw_reassoc_request { @@ -471,7 +471,7 @@ struct libipw_reassoc_request { __le16 capability; __le16 listen_interval; u8 current_ap[ETH_ALEN]; - struct libipw_info_element info_element[0]; + struct libipw_info_element info_element[]; } __packed; struct libipw_assoc_response { @@ -480,7 +480,7 @@ struct libipw_assoc_response { __le16 status; __le16 aid; /* supported rates */ - struct libipw_info_element info_element[0]; + struct libipw_info_element info_element[]; } __packed; struct libipw_txb { @@ -490,7 +490,7 @@ struct libipw_txb { u8 reserved; u16 frag_size; u16 payload_size; - struct sk_buff *fragments[0]; + struct sk_buff *fragments[]; }; /* SWEEP TABLE ENTRIES NUMBER */ @@ -594,7 +594,7 @@ struct libipw_ibss_dfs { struct libipw_info_element ie; u8 owner[ETH_ALEN]; u8 recovery_interval; - struct libipw_channel_map channel_map[0]; + struct libipw_channel_map channel_map[]; }; struct libipw_csa { @@ -830,7 +830,7 @@ struct libipw_device { /* This must be the last item so that it points to the data * allocated beyond this structure by alloc_libipw */ - u8 priv[0]; + u8 priv[]; }; #define IEEE_A (1<<0) diff --git a/drivers/net/wireless/intel/iwlegacy/commands.h b/drivers/net/wireless/intel/iwlegacy/commands.h index dd744135c956..89c6671b32bc 100644 --- a/drivers/net/wireless/intel/iwlegacy/commands.h +++ b/drivers/net/wireless/intel/iwlegacy/commands.h @@ -203,7 +203,7 @@ struct il_cmd_header { __le16 sequence; /* command or response/notification data follows immediately */ - u8 data[0]; + u8 data[]; } __packed; /** @@ -1112,7 +1112,7 @@ struct il_wep_cmd { u8 global_key_type; u8 flags; u8 reserved; - struct il_wep_key key[0]; + struct il_wep_key key[]; } __packed; #define WEP_KEY_WEP_TYPE 1 @@ -1166,7 +1166,7 @@ struct il3945_rx_frame_stats { u8 agc; __le16 sig_avg; __le16 noise_diff; - u8 payload[0]; + u8 payload[]; } __packed; struct il3945_rx_frame_hdr { @@ -1175,7 +1175,7 @@ struct il3945_rx_frame_hdr { u8 reserved1; u8 rate; __le16 len; - u8 payload[0]; + u8 payload[]; } __packed; struct il3945_rx_frame_end { @@ -1211,7 +1211,7 @@ struct il4965_rx_non_cfg_phy { __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */ __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */ u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */ - u8 pad[0]; + u8 pad[]; } __packed; /* @@ -1409,7 +1409,7 @@ struct il3945_tx_cmd { * length is 26 or 30 bytes, followed by payload data */ u8 payload[0]; - struct ieee80211_hdr hdr[0]; + struct ieee80211_hdr hdr[]; } __packed; /* @@ -1511,7 +1511,7 @@ struct il_tx_cmd { * length is 26 or 30 bytes, followed by payload data */ u8 payload[0]; - struct ieee80211_hdr hdr[0]; + struct ieee80211_hdr hdr[]; } __packed; /* TX command response is sent after *3945* transmission attempts. @@ -2520,7 +2520,7 @@ struct il3945_scan_cmd { * for one scan to complete (i.e. receive N_SCAN_COMPLETE) * before requesting another scan. */ - u8 data[0]; + u8 data[]; } __packed; struct il_scan_cmd { @@ -2564,7 +2564,7 @@ struct il_scan_cmd { * for one scan to complete (i.e. receive N_SCAN_COMPLETE) * before requesting another scan. */ - u8 data[0]; + u8 data[]; } __packed; /* Can abort will notify by complete notification with abort status. */ @@ -2664,7 +2664,7 @@ struct il3945_tx_beacon_cmd { __le16 tim_idx; u8 tim_size; u8 reserved1; - struct ieee80211_hdr frame[0]; /* beacon frame */ + struct ieee80211_hdr frame[]; /* beacon frame */ } __packed; struct il_tx_beacon_cmd { @@ -2672,7 +2672,7 @@ struct il_tx_beacon_cmd { __le16 tim_idx; u8 tim_size; u8 reserved1; - struct ieee80211_hdr frame[0]; /* beacon frame */ + struct ieee80211_hdr frame[]; /* beacon frame */ } __packed; /****************************************************************************** diff --git a/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h index a3b490501a70..1e8ab704dbfb 100644 --- a/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h +++ b/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h @@ -53,7 +53,7 @@ struct ieee80211_measurement_params { struct ieee80211_info_element { u8 id; u8 len; - u8 data[0]; + u8 data[]; } __packed; struct ieee80211_measurement_request { @@ -61,7 +61,7 @@ struct ieee80211_measurement_request { u8 token; u8 mode; u8 type; - struct ieee80211_measurement_params params[0]; + struct ieee80211_measurement_params params[]; } __packed; struct ieee80211_measurement_report { diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index 2f741f5e3a7d..efe427049a6e 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2019 Intel Corporation + * Copyright (C) 2018-2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -20,7 +20,7 @@ * BSD LICENSE * * Copyright(c) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2019 Intel Corporation + * Copyright (C) 2018-2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -57,7 +57,7 @@ #include "iwl-prph.h" /* Highest firmware API version supported */ -#define IWL_22000_UCODE_API_MAX 53 +#define IWL_22000_UCODE_API_MAX 56 /* Lowest firmware API version supported */ #define IWL_22000_UCODE_API_MIN 39 @@ -73,11 +73,8 @@ #define IWL_22000_SMEM_OFFSET 0x400000 #define IWL_22000_SMEM_LEN 0xD0000 -#define IWL_22000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-" -#define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-" -#define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-" -#define IWL_22000_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-" -#define IWL_22000_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-" +#define IWL_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-" +#define IWL_QNJ_B_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-" #define IWL_QU_C_HR_B_FW_PRE "iwlwifi-Qu-c0-hr-b0-" #define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-" #define IWL_QU_C_JF_B_FW_PRE "iwlwifi-Qu-c0-jf-b0-" @@ -85,22 +82,18 @@ #define IWL_QUZ_A_JF_B_FW_PRE "iwlwifi-QuZ-a0-jf-b0-" #define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-" #define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-" -#define IWL_22000_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-" -#define IWL_22000_SO_A_HR_B_FW_PRE "iwlwifi-so-a0-hr-b0-" -#define IWL_22000_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-" -#define IWL_22000_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-" -#define IWL_22000_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-" +#define IWL_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-" +#define IWL_SO_A_HR_B_FW_PRE "iwlwifi-so-a0-hr-b0-" +#define IWL_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-" +#define IWL_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-" +#define IWL_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-" #define IWL_SNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-" #define IWL_SNJ_A_GF_A_FW_PRE "iwlwifi-SoSnj-a0-gf-a0-" -#define IWL_22000_HR_MODULE_FIRMWARE(api) \ - IWL_22000_HR_FW_PRE __stringify(api) ".ucode" -#define IWL_22000_JF_MODULE_FIRMWARE(api) \ - IWL_22000_JF_FW_PRE __stringify(api) ".ucode" -#define IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(api) \ - IWL_22000_QU_B_HR_B_FW_PRE __stringify(api) ".ucode" -#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \ - IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode" +#define IWL_QU_B_HR_B_MODULE_FIRMWARE(api) \ + IWL_QU_B_HR_B_FW_PRE __stringify(api) ".ucode" +#define IWL_QNJ_B_HR_B_MODULE_FIRMWARE(api) \ + IWL_QNJ_B_HR_B_FW_PRE __stringify(api) ".ucode" #define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \ IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode" #define IWL_QUZ_A_JF_B_MODULE_FIRMWARE(api) \ @@ -113,14 +106,14 @@ IWL_QNJ_B_JF_B_FW_PRE __stringify(api) ".ucode" #define IWL_CC_A_MODULE_FIRMWARE(api) \ IWL_CC_A_FW_PRE __stringify(api) ".ucode" -#define IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(api) \ - IWL_22000_SO_A_JF_B_FW_PRE __stringify(api) ".ucode" -#define IWL_22000_SO_A_HR_B_MODULE_FIRMWARE(api) \ - IWL_22000_SO_A_HR_B_FW_PRE __stringify(api) ".ucode" -#define IWL_22000_SO_A_GF_A_MODULE_FIRMWARE(api) \ - IWL_22000_SO_A_GF_A_FW_PRE __stringify(api) ".ucode" -#define IWL_22000_TY_A_GF_A_MODULE_FIRMWARE(api) \ - IWL_22000_TY_A_GF_A_FW_PRE __stringify(api) ".ucode" +#define IWL_SO_A_JF_B_MODULE_FIRMWARE(api) \ + IWL_SO_A_JF_B_FW_PRE __stringify(api) ".ucode" +#define IWL_SO_A_HR_B_MODULE_FIRMWARE(api) \ + IWL_SO_A_HR_B_FW_PRE __stringify(api) ".ucode" +#define IWL_SO_A_GF_A_MODULE_FIRMWARE(api) \ + IWL_SO_A_GF_A_FW_PRE __stringify(api) ".ucode" +#define IWL_TY_A_GF_A_MODULE_FIRMWARE(api) \ + IWL_TY_A_GF_A_FW_PRE __stringify(api) ".ucode" #define IWL_SNJ_A_GF4_A_MODULE_FIRMWARE(api) \ IWL_SNJ_A_GF4_A_FW_PRE __stringify(api) ".ucode" #define IWL_SNJ_A_GF_A_MODULE_FIRMWARE(api) \ @@ -218,7 +211,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .trans.base_params = &iwl_ax210_base_params, \ .min_txq_size = 128, \ .gp2_reg_addr = 0xd02c68, \ - .min_256_ba_txq_size = 512, \ + .min_256_ba_txq_size = 1024, \ .mon_dram_regs = { \ .write_ptr = { \ .addr = DBGC_CUR_DBGBUF_STATUS, \ @@ -343,15 +336,16 @@ const struct iwl_cfg_trans_params iwl_ax200_trans_cfg = { }; const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz"; +const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz"; +const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101"; const char iwl_ax200_killer_1650w_name[] = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)"; const char iwl_ax200_killer_1650x_name[] = "Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)"; -const struct iwl_cfg iwl_ax101_cfg_qu_hr = { - .name = "Intel(R) Wi-Fi 6 AX101", - .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, +const struct iwl_cfg iwl_qu_b0_hr1_b0 = { + .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap @@ -365,7 +359,7 @@ const struct iwl_cfg iwl_ax101_cfg_qu_hr = { const struct iwl_cfg iwl_ax201_cfg_qu_hr = { .name = "Intel(R) Wi-Fi 6 AX201 160MHz", - .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, + .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap @@ -376,8 +370,7 @@ const struct iwl_cfg iwl_ax201_cfg_qu_hr = { .num_rbds = IWL_NUM_RBDS_22000_HE, }; -const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0 = { - .name = "Intel(R) Wi-Fi 6 AX101", +const struct iwl_cfg iwl_qu_c0_hr1_b0 = { .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, IWL_DEVICE_22500, /* @@ -403,8 +396,7 @@ const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = { .num_rbds = IWL_NUM_RBDS_22000_HE, }; -const struct iwl_cfg iwl_ax101_cfg_quz_hr = { - .name = "Intel(R) Wi-Fi 6 AX101", +const struct iwl_cfg iwl_quz_a0_hr1_b0 = { .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, IWL_DEVICE_22500, /* @@ -470,7 +462,7 @@ const struct iwl_cfg iwl_ax200_cfg_cc = { const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = { .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)", - .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, + .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap @@ -483,7 +475,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = { const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = { .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)", - .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, + .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap @@ -520,9 +512,8 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = { .num_rbds = IWL_NUM_RBDS_22000_HE, }; -const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = { - .name = "Intel(R) Dual Band Wireless AX 22000", - .fw_name_pre = IWL_22000_HR_B_FW_PRE, +const struct iwl_cfg iwl_qnj_b0_hr_b0_cfg = { + .fw_name_pre = IWL_QNJ_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap @@ -535,21 +526,21 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = { const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0 = { .name = "Intel(R) Wireless-AC 9560 160MHz", - .fw_name_pre = IWL_22000_SO_A_JF_B_FW_PRE, + .fw_name_pre = IWL_SO_A_JF_B_FW_PRE, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_NON_HE, }; const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = { .name = "Intel(R) Wi-Fi 6 AX210 160MHz", - .fw_name_pre = IWL_22000_SO_A_HR_B_FW_PRE, + .fw_name_pre = IWL_SO_A_HR_B_FW_PRE, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = { .name = "Intel(R) Wi-Fi 6 AX211 160MHz", - .fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE, + .fw_name_pre = IWL_SO_A_GF_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, @@ -557,7 +548,7 @@ const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = { const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long = { .name = "Intel(R) Wi-Fi 6 AX211 160MHz", - .fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE, + .fw_name_pre = IWL_SO_A_GF_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, @@ -567,7 +558,7 @@ const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long = { const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = { .name = "Intel(R) Wi-Fi 6 AX210 160MHz", - .fw_name_pre = IWL_22000_TY_A_GF_A_FW_PRE, + .fw_name_pre = IWL_TY_A_GF_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, @@ -575,7 +566,7 @@ const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = { const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = { .name = "Intel(R) Wi-Fi 6 AX411 160MHz", - .fw_name_pre = IWL_22000_SO_A_GF4_A_FW_PRE, + .fw_name_pre = IWL_SO_A_GF4_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, @@ -583,7 +574,7 @@ const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = { const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long = { .name = "Intel(R) Wi-Fi 6 AX411 160MHz", - .fw_name_pre = IWL_22000_SO_A_GF4_A_FW_PRE, + .fw_name_pre = IWL_SO_A_GF4_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, @@ -607,18 +598,17 @@ const struct iwl_cfg iwlax211_cfg_snj_gf_a0 = { .num_rbds = IWL_NUM_RBDS_AX210_HE, }; -MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QUZ_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_22000_SO_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_22000_SO_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_22000_TY_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_SO_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_SO_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_TY_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_SNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_SNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h index 8d8380026180..4bd792c06ff6 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2014, 2020 Intel Corporation. All rights reserved. * * Contact Information: * Intel Linux Wireless <linuxwifi@intel.com> @@ -810,7 +810,6 @@ struct iwl_priv { u8 bt_traffic_load, last_bt_traffic_load; bool bt_ch_announce; bool bt_full_concurrent; - bool bt_ant_couple_ok; __le32 kill_ack_mask; __le32 kill_cts_mask; __le16 bt_valid; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index 598ee7315558..b882705ff66d 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -1,9 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Deutschland GmbH - * Copyright (C) 2018 - 2019 Intel Corporation * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -53,7 +52,7 @@ #define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux" MODULE_DESCRIPTION(DRV_DESCRIPTION); -MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); +MODULE_AUTHOR(DRV_AUTHOR); MODULE_LICENSE("GPL"); /* Please keep this array *SORTED* by hex value. @@ -1370,12 +1369,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); - /* is antenna coupling more than 35dB ? */ - priv->bt_ant_couple_ok = - (iwlwifi_mod_params.antenna_coupling > - IWL_BT_ANTENNA_COUPLING_THRESHOLD) ? - true : false; - /* bt channel inhibition enabled*/ priv->bt_ch_announce = true; IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n", diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c index dac809df7f1d..4fa4eab2d7f3 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c @@ -2,6 +2,7 @@ /****************************************************************************** * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright (C) 2019 - 2020 Intel Corporation * * Contact Information: * Intel Linux Wireless <linuxwifi@intel.com> @@ -846,16 +847,6 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct iwl_scale_tbl_info *tbl; bool full_concurrent = priv->bt_full_concurrent; - if (priv->bt_ant_couple_ok) { - /* - * Is there a need to switch between - * full concurrency and 3-wire? - */ - if (priv->bt_ci_compliance) - full_concurrent = true; - else - full_concurrent = false; - } if ((priv->bt_traffic_load != priv->last_bt_traffic_load) || (priv->bt_full_concurrent != full_concurrent)) { priv->bt_full_concurrent = full_concurrent; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index e2184ba4d8b5..dc769b580431 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -58,44 +58,121 @@ * *****************************************************************************/ +#include <linux/uuid.h> #include "iwl-drv.h" #include "iwl-debug.h" #include "acpi.h" #include "fw/runtime.h" -void *iwl_acpi_get_object(struct device *dev, acpi_string method) +static const guid_t intel_wifi_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6, + 0xA5, 0xB3, 0x1F, 0x73, + 0x8E, 0x28, 0x5A, 0xDE); + +static int iwl_acpi_get_handle(struct device *dev, acpi_string method, + acpi_handle *ret_handle) { acpi_handle root_handle; - acpi_handle handle; - struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL}; acpi_status status; root_handle = ACPI_HANDLE(dev); if (!root_handle) { IWL_DEBUG_DEV_RADIO(dev, - "Could not retrieve root port ACPI handle\n"); - return ERR_PTR(-ENOENT); + "ACPI: Could not retrieve root port handle\n"); + return -ENOENT; } - /* Get the method's handle */ - status = acpi_get_handle(root_handle, method, &handle); + status = acpi_get_handle(root_handle, method, ret_handle); if (ACPI_FAILURE(status)) { - IWL_DEBUG_DEV_RADIO(dev, "%s method not found\n", method); - return ERR_PTR(-ENOENT); + IWL_DEBUG_DEV_RADIO(dev, + "ACPI: %s method not found\n", method); + return -ENOENT; } + return 0; +} + +void *iwl_acpi_get_object(struct device *dev, acpi_string method) +{ + struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL}; + acpi_handle handle; + acpi_status status; + int ret; + + ret = iwl_acpi_get_handle(dev, method, &handle); + if (ret) + return ERR_PTR(-ENOENT); /* Call the method with no arguments */ status = acpi_evaluate_object(handle, NULL, NULL, &buf); if (ACPI_FAILURE(status)) { - IWL_DEBUG_DEV_RADIO(dev, "%s invocation failed (0x%x)\n", + IWL_DEBUG_DEV_RADIO(dev, + "ACPI: %s method invocation failed (status: 0x%x)\n", method, status); return ERR_PTR(-ENOENT); } - return buf.pointer; } IWL_EXPORT_SYMBOL(iwl_acpi_get_object); +/** +* Generic function for evaluating a method defined in the device specific +* method (DSM) interface. The returned acpi object must be freed by calling +* function. +*/ +void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func, + union acpi_object *args) +{ + union acpi_object *obj; + + obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_wifi_guid, rev, func, + args); + if (!obj) { + IWL_DEBUG_DEV_RADIO(dev, + "ACPI: DSM method invocation failed (rev: %d, func:%d)\n", + rev, func); + return ERR_PTR(-ENOENT); + } + return obj; +} + +/** + * Evaluate a DSM with no arguments and a single u8 return value (inside a + * buffer object), verify and return that value. + */ +int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func) +{ + union acpi_object *obj; + int ret; + + obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL); + if (IS_ERR(obj)) + return -ENOENT; + + if (obj->type != ACPI_TYPE_BUFFER) { + IWL_DEBUG_DEV_RADIO(dev, + "ACPI: DSM method did not return a valid object, type=%d\n", + obj->type); + ret = -EINVAL; + goto out; + } + + if (obj->buffer.length != sizeof(u8)) { + IWL_DEBUG_DEV_RADIO(dev, + "ACPI: DSM method returned invalid buffer, length=%d\n", + obj->buffer.length); + ret = -EINVAL; + goto out; + } + + ret = obj->buffer.pointer[0]; + IWL_DEBUG_DEV_RADIO(dev, + "ACPI: DSM method evaluated: func=%d, ret=%d\n", + func, ret); +out: + ACPI_FREE(obj); + return ret; +} +IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8); + union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev, union acpi_object *data, int data_size, int *tbl_rev) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index 6a646dc524e1..0ada9eddb8b1 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h @@ -127,12 +127,23 @@ struct iwl_geo_profile { u8 values[ACPI_GEO_TABLE_SIZE]; }; +enum iwl_dsm_funcs_rev_0 { + DSM_FUNC_QUERY = 0, + DSM_FUNC_DISABLE_SRD = 1, + DSM_FUNC_ENABLE_INDONESIA_5G2 = 2, +}; + #ifdef CONFIG_ACPI struct iwl_fw_runtime; void *iwl_acpi_get_object(struct device *dev, acpi_string method); +void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func, + union acpi_object *args); + +int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func); + union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev, union acpi_object *data, int data_size, int *tbl_rev); @@ -192,6 +203,17 @@ static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method) return ERR_PTR(-ENOENT); } +static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev, + int func, union acpi_object *args) +{ + return ERR_PTR(-ENOENT); +} + +static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func) +{ + return -ENOENT; +} + static inline union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev, union acpi_object *data, int data_size, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h index b9d7ed93311c..74ac65bd545a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright (C) 2018 - 2019 Intel Corporation + * Copyright (C) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -25,7 +25,7 @@ * * BSD LICENSE * - * Copyright (C) 2018 - 2019 Intel Corporation + * Copyright (C) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -304,6 +304,7 @@ enum iwl_fw_ini_buffer_location { IWL_FW_INI_LOCATION_SRAM_PATH, IWL_FW_INI_LOCATION_DRAM_PATH, IWL_FW_INI_LOCATION_NPK_PATH, + IWL_FW_INI_LOCATION_NUM, }; /* FW_DEBUG_TLV_BUFFER_LOCATION_E_VER_1 */ /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h index 98e957ecbeed..94b1a1268476 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h @@ -5,10 +5,9 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2019 Intel Corporation + * Copyright(c) 2007 - 2014, 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,10 +27,9 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2019 Intel Corporation + * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -218,6 +216,8 @@ struct iwl_shared_mem_lmac_cfg { * @page_buff_size: size of %page_buff_addr * @lmac_num: number of LMACs (1 or 2) * @lmac_smem: per - LMAC smem data + * @rxfifo2_control_addr: start addr of RXF2C + * @rxfifo2_control_size: size of RXF2C */ struct iwl_shared_mem_cfg { __le32 shared_mem_addr; @@ -229,8 +229,10 @@ struct iwl_shared_mem_cfg { __le32 page_buff_addr; __le32 page_buff_size; __le32 lmac_num; - struct iwl_shared_mem_lmac_cfg lmac_smem[2]; -} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */ + struct iwl_shared_mem_lmac_cfg lmac_smem[3]; + __le32 rxfifo2_control_addr; + __le32 rxfifo2_control_size; +} __packed; /* SHARED_MEM_ALLOC_API_S_VER_4 */ /** * struct iwl_mfuart_load_notif - mfuart image version & status diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h index 0214e553d5ae..1df2e497fabf 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h @@ -6,8 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation - * Copyright (C) 2019 Intel Corporation + * Copyright (C) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,8 +27,7 @@ * BSD LICENSE * * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation - * Copyright (C) 2019 Intel Corporation + * Copyright (C) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -147,6 +145,7 @@ struct iwl_tof_config_cmd { * @IWL_TOF_BW_40: 40 MHz * @IWL_TOF_BW_80: 80 MHz * @IWL_TOF_BW_160: 160 MHz + * @IWL_TOF_BW_NUM: number of tof bandwidths */ enum iwl_tof_bandwidth { IWL_TOF_BW_20_LEGACY, @@ -154,6 +153,7 @@ enum iwl_tof_bandwidth { IWL_TOF_BW_40, IWL_TOF_BW_80, IWL_TOF_BW_160, + IWL_TOF_BW_NUM, }; /* LOCAT_BW_TYPE_E */ /* @@ -430,6 +430,9 @@ struct iwl_tof_range_req_ap_entry_v2 { * @IWL_INITIATOR_AP_FLAGS_NON_TB: Use non trigger based flow * @IWL_INITIATOR_AP_FLAGS_TB: Use trigger based flow * @IWL_INITIATOR_AP_FLAGS_SECURED: request secured measurement + * @IWL_INITIATOR_AP_FLAGS_LMR_FEEDBACK: Send LMR feedback + * @IWL_INITIATOR_AP_FLAGS_USE_CALIB: Use calibration values from the request + * instead of fw internal values. */ enum iwl_initiator_ap_flags { IWL_INITIATOR_AP_FLAGS_ASAP = BIT(1), @@ -442,6 +445,8 @@ enum iwl_initiator_ap_flags { IWL_INITIATOR_AP_FLAGS_NON_TB = BIT(9), IWL_INITIATOR_AP_FLAGS_TB = BIT(10), IWL_INITIATOR_AP_FLAGS_SECURED = BIT(11), + IWL_INITIATOR_AP_FLAGS_LMR_FEEDBACK = BIT(12), + IWL_INITIATOR_AP_FLAGS_USE_CALIB = BIT(13), }; /** @@ -508,7 +513,7 @@ enum iwl_location_bw { #define LOCATION_BW_POS 4 /** - * struct iwl_tof_range_req_ap_entry - AP configuration parameters + * struct iwl_tof_range_req_ap_entry_v4 - AP configuration parameters * @initiator_ap_flags: see &enum iwl_initiator_ap_flags. * @channel_num: AP Channel number * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. @@ -527,7 +532,7 @@ enum iwl_location_bw { * @hltk: HLTK to be used for secured 11az measurement * @tk: TK to be used for secured 11az measurement */ -struct iwl_tof_range_req_ap_entry { +struct iwl_tof_range_req_ap_entry_v4 { __le32 initiator_ap_flags; u8 channel_num; u8 format_bw; @@ -543,6 +548,65 @@ struct iwl_tof_range_req_ap_entry { } __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_4 */ /** + * enum iwl_location_cipher - location cipher selection + * @IWL_LOCATION_CIPHER_CCMP_128: CCMP 128 + * @IWL_LOCATION_CIPHER_GCMP_128: GCMP 128 + * @IWL_LOCATION_CIPHER_GCMP_256: GCMP 256 + */ +enum iwl_location_cipher { + IWL_LOCATION_CIPHER_CCMP_128, + IWL_LOCATION_CIPHER_GCMP_128, + IWL_LOCATION_CIPHER_GCMP_256, +}; + +/** + * struct iwl_tof_range_req_ap_entry - AP configuration parameters + * @initiator_ap_flags: see &enum iwl_initiator_ap_flags. + * @channel_num: AP Channel number + * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. + * bits 4 - 7: &enum iwl_location_bw. + * @ctrl_ch_position: Coding of the control channel position relative to the + * center frequency, see iwl_mvm_get_ctrl_pos(). + * @ftmr_max_retries: Max number of retries to send the FTMR in case of no + * reply from the AP. + * @bssid: AP's BSSID + * @burst_period: Recommended value to be sent to the AP. Measurement + * periodicity In units of 100ms. ignored if num_of_bursts_exp = 0 + * @samples_per_burst: the number of FTMs pairs in single Burst (1-31); + * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of + * the number of measurement iterations (min 2^0 = 1, max 2^14) + * @sta_id: the station id of the AP. Only relevant when associated to the AP, + * otherwise should be set to &IWL_MVM_INVALID_STA. + * @cipher: pairwise cipher suite for secured measurement. + * &enum iwl_location_cipher. + * @hltk: HLTK to be used for secured 11az measurement + * @tk: TK to be used for secured 11az measurement + * @calib: An array of calibration values per FTM rx bandwidth. + * If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the + * calibration value that corresponds to the rx bandwidth of the FTM + * frame. + * @beacon_interval: beacon interval of the AP in TUs. Only required if + * &IWL_INITIATOR_AP_FLAGS_TB is set. + */ +struct iwl_tof_range_req_ap_entry { + __le32 initiator_ap_flags; + u8 channel_num; + u8 format_bw; + u8 ctrl_ch_position; + u8 ftmr_max_retries; + u8 bssid[ETH_ALEN]; + __le16 burst_period; + u8 samples_per_burst; + u8 num_of_bursts; + u8 sta_id; + u8 cipher; + u8 hltk[HLTK_11AZ_LEN]; + u8 tk[TK_11AZ_LEN]; + __le16 calib[IWL_TOF_BW_NUM]; + __le16 beacon_interval; +} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_6 */ + +/** * enum iwl_tof_response_mode * @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as * possible (not supported for this release) @@ -676,7 +740,7 @@ struct iwl_tof_range_req_cmd_v7 { } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_7 */ /** - * struct iwl_tof_range_req_cmd - start measurement cmd + * struct iwl_tof_range_req_cmd_v8 - start measurement cmd * @initiator_flags: see flags @ iwl_tof_initiator_flags * @request_id: A Token incremented per request. The same Token will be * sent back in the range response @@ -693,7 +757,7 @@ struct iwl_tof_range_req_cmd_v7 { * @specific_calib: The specific calib value to inject to this measurement calc * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2. */ -struct iwl_tof_range_req_cmd { +struct iwl_tof_range_req_cmd_v8 { __le32 initiator_flags; u8 request_id; u8 num_of_ap; @@ -704,9 +768,37 @@ struct iwl_tof_range_req_cmd { __le32 tsf_mac_id; __le16 common_calib; __le16 specific_calib; - struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS]; + struct iwl_tof_range_req_ap_entry_v4 ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_8 */ +/** + * struct iwl_tof_range_req_cmd - start measurement cmd + * @initiator_flags: see flags @ iwl_tof_initiator_flags + * @request_id: A Token incremented per request. The same Token will be + * sent back in the range response + * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @range_req_bssid: ranging request BSSID + * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. + * Bits set to 1 shall be randomized by the UMAC + * @macaddr_template: MAC address template to use for non-randomized bits + * @req_timeout_ms: Requested timeout of the response in units of milliseconds. + * This is the session time for completing the measurement. + * @tsf_mac_id: report the measurement start time for each ap in terms of the + * TSF of this mac id. 0xff to disable TSF reporting. + * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2. + */ +struct iwl_tof_range_req_cmd { + __le32 initiator_flags; + u8 request_id; + u8 num_of_ap; + u8 range_req_bssid[ETH_ALEN]; + u8 macaddr_mask[ETH_ALEN]; + u8 macaddr_template[ETH_ALEN]; + __le32 req_timeout_ms; + __le32 tsf_mac_id; + struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS]; +} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_9 */ + /* * enum iwl_tof_range_request_status - status of the sent request * @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h index 2d230a7893c2..fd719c37428c 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(C) 2018 - 2019 Intel Corporation + * Copyright(C) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(C) 2018 - 2019 Intel Corporation + * Copyright(C) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -75,6 +75,11 @@ enum iwl_regulatory_and_nvm_subcmd_ids { NVM_ACCESS_COMPLETE = 0x0, /** + * @LARI_CONFIG_CHANGE: &struct iwl_lari_config_change_cmd + */ + LARI_CONFIG_CHANGE = 0x1, + + /** * @NVM_GET_INFO: * Command is &struct iwl_nvm_get_info, * response is &struct iwl_nvm_get_info_rsp @@ -446,4 +451,29 @@ struct iwl_tas_config_cmd { __le32 black_list_size; __le32 black_list_array[IWL_TAS_BLACK_LIST_MAX]; } __packed; /* TAS_CONFIG_CMD_API_S_VER_2 */ + +/** + * enum iwl_lari_configs - bit masks for the various LARI config operations + * @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine + * @LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK: ETSI 5.8GHz SRD passive scan + * @LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK: ETSI 5.8GHz SRD disabled + * @LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK: enable 5.15/5.35GHz bands in + * Indonesia + */ +enum iwl_lari_config_masks { + LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK = BIT(0), + LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK = BIT(1), + LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK = BIT(2), + LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK = BIT(3), +}; + +/** + * struct iwl_lari_config_change_cmd - change LARI configuration + * @config_bitmap: bit map of the config commands. each bit will trigger a + * different predefined FW config operation + */ +struct iwl_lari_config_change_cmd { + __le32 config_bitmap; +} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_1 */ + #endif /* __iwl_fw_api_nvm_reg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index 88bc7733065f..b8b36a4f9eb9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -5,10 +5,9 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,10 +27,9 @@ * * BSD LICENSE * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -535,9 +533,9 @@ struct iwl_rx_mpdu_desc_v3 { __le32 filter_match; /** - * @phy_data2: depends on info type (see @phy_data1) + * @phy_data3: depends on info type (see @phy_data1) */ - __le32 phy_data2; + __le32 phy_data3; }; /* DW8 - carries rss_hash only when rpa_en == 1 */ @@ -548,9 +546,9 @@ struct iwl_rx_mpdu_desc_v3 { __le32 rss_hash; /** - * @phy_data3: depends on info type (see @phy_data1) + * @phy_data2: depends on info type (see @phy_data1) */ - __le32 phy_data3; + __le32 phy_data2; }; /* DW9 */ /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 39c8332be3ac..4d3687cc83a4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -1367,33 +1367,57 @@ static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; u32 fid1 = le32_to_cpu(reg->fifos.fid[0]); u32 fid2 = le32_to_cpu(reg->fifos.fid[1]); - u32 fifo_idx; + u8 fifo_idx; if (!data) return; + /* make sure only one bit is set in only one fid */ + if (WARN_ONCE(hweight_long(fid1) + hweight_long(fid2) != 1, + "fid1=%x, fid2=%x\n", fid1, fid2)) + return; + memset(data, 0, sizeof(*data)); - if (WARN_ON_ONCE((fid1 && fid2) || (!fid1 && !fid2))) - return; + if (fid1) { + fifo_idx = ffs(fid1) - 1; + if (WARN_ONCE(fifo_idx >= MAX_NUM_LMAC, "fifo_idx=%d\n", + fifo_idx)) + return; - fifo_idx = ffs(fid1) - 1; - if (fid1 && !WARN_ON_ONCE((~BIT(fifo_idx) & fid1) || - fifo_idx >= MAX_NUM_LMAC)) { data->size = fwrt->smem_cfg.lmac[fifo_idx].rxfifo1_size; data->fifo_num = fifo_idx; - return; - } + } else { + u8 max_idx; + + fifo_idx = ffs(fid2) - 1; + if (iwl_fw_lookup_notif_ver(fwrt->fw, SYSTEM_GROUP, + SHARED_MEM_CFG_CMD, 0) <= 3) + max_idx = 0; + else + max_idx = 1; + + if (WARN_ONCE(fifo_idx > max_idx, + "invalid umac fifo idx %d", fifo_idx)) + return; - fifo_idx = ffs(fid2) - 1; - if (fid2 && !WARN_ON_ONCE(fifo_idx != 0)) { - data->size = fwrt->smem_cfg.rxfifo2_size; - data->offset = RXF_DIFF_FROM_PREV; /* use bit 31 to distinguish between umac and lmac rxf while * parsing the dump */ data->fifo_num = fifo_idx | IWL_RXF_UMAC_BIT; - return; + + switch (fifo_idx) { + case 0: + data->size = fwrt->smem_cfg.rxfifo2_size; + data->offset = iwl_umac_prph(fwrt->trans, + RXF_DIFF_FROM_PREV); + break; + case 1: + data->size = fwrt->smem_cfg.rxfifo2_control_size; + data->offset = iwl_umac_prph(fwrt->trans, + RXF2C_DIFF_FROM_PREV); + break; + } } } @@ -1934,6 +1958,7 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_dump_cfg_name *cfg_name; u32 size = sizeof(*tlv) + sizeof(*dump); u32 num_of_cfg_names = 0; + u32 hw_type; list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) { size += sizeof(*cfg_name); @@ -1962,7 +1987,26 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt, dump->ver_subtype = cpu_to_le32(fwrt->dump.fw_ver.subtype); dump->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev)); - dump->hw_type = cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->hw_rev)); + + /* + * Several HWs all have type == 0x42, so we'll override this value + * according to the detected HW + */ + hw_type = CSR_HW_REV_TYPE(fwrt->trans->hw_rev); + if (hw_type == IWL_AX210_HW_TYPE) { + u32 prph_val = iwl_read_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR); + u32 is_jacket = !!(prph_val & WFPM_OTP_CFG1_IS_JACKET_BIT); + u32 is_cdb = !!(prph_val & WFPM_OTP_CFG1_IS_CDB_BIT); + u32 masked_bits = is_jacket | (is_cdb << 1); + + /* + * The HW type depends on certain bits in this case, so add + * these bits to the HW type. We won't have collisions since we + * add these bits after the highest possible bit in the mask. + */ + hw_type |= masked_bits << IWL_AX210_HW_TYPE_ADDITION_SHIFT; + } + dump->hw_type = cpu_to_le32(hw_type); dump->rf_id_flavor = cpu_to_le32(CSR_HW_RFID_FLAVOR(fwrt->trans->hw_rf_id)); @@ -2095,7 +2139,11 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, u32 size = 0; u64 regions_mask = le64_to_cpu(trigger->regions_mask); - for (i = 0; i < 64; i++) { + BUILD_BUG_ON(sizeof(trigger->regions_mask) != sizeof(regions_mask)); + BUILD_BUG_ON((sizeof(trigger->regions_mask) * BITS_PER_BYTE) < + ARRAY_SIZE(fwrt->trans->dbg.active_regions)); + + for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions); i++) { u32 reg_type; struct iwl_fw_ini_region_tlv *reg; @@ -2174,12 +2222,11 @@ static u32 iwl_dump_ini_file_gen(struct iwl_fw_runtime *fwrt, } static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt, - const struct iwl_fw_dump_desc **desc) + const struct iwl_fw_dump_desc *desc) { - if (desc && *desc != &iwl_dump_desc_assert) - kfree(*desc); + if (desc && desc != &iwl_dump_desc_assert) + kfree(desc); - *desc = NULL; fwrt->dump.lmac_err_id[0] = 0; if (fwrt->smem_cfg.num_lmacs > 1) fwrt->dump.lmac_err_id[1] = 0; @@ -2291,7 +2338,7 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, unsigned long idx; if (iwl_trans_dbg_ini_valid(fwrt->trans)) { - iwl_fw_free_dump_desc(fwrt, &desc); + iwl_fw_free_dump_desc(fwrt, desc); return 0; } @@ -2312,7 +2359,7 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, wk_data = &fwrt->dump.wks[idx]; if (WARN_ON(wk_data->dump_data.desc)) - iwl_fw_free_dump_desc(fwrt, &wk_data->dump_data.desc); + iwl_fw_free_dump_desc(fwrt, wk_data->dump_data.desc); wk_data->dump_data.desc = desc; wk_data->dump_data.monitor_only = monitor_only; @@ -2569,10 +2616,12 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx) iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, false); out: - if (iwl_trans_dbg_ini_valid(fwrt->trans)) + if (iwl_trans_dbg_ini_valid(fwrt->trans)) { iwl_fw_error_dump_data_free(dump_data); - else - iwl_fw_free_dump_desc(fwrt, &dump_data->desc); + } else { + iwl_fw_free_dump_desc(fwrt, dump_data->desc); + dump_data->desc = NULL; + } clear_bit(wk_idx, &fwrt->dump.active_wks); } @@ -2731,7 +2780,7 @@ void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_params *params, bool stop) { - int ret = 0; + int ret __maybe_unused = 0; if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) return; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c index 89f74116569d..6e72c27f527b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c @@ -5,10 +5,9 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,10 +27,9 @@ * * BSD LICENSE * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -64,6 +62,7 @@ #include "api/commands.h" #include "debugfs.h" #include "dbg.h" +#include <linux/seq_file.h> #define FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \ struct dbgfs_##name##_data { \ @@ -329,11 +328,108 @@ static ssize_t iwl_dbgfs_fw_dbg_domain_read(struct iwl_fw_runtime *fwrt, FWRT_DEBUGFS_READ_FILE_OPS(fw_dbg_domain, 20); +struct iwl_dbgfs_fw_info_priv { + struct iwl_fw_runtime *fwrt; +}; + +struct iwl_dbgfs_fw_info_state { + loff_t pos; +}; + +static void *iwl_dbgfs_fw_info_seq_next(struct seq_file *seq, + void *v, loff_t *pos) +{ + struct iwl_dbgfs_fw_info_state *state = v; + struct iwl_dbgfs_fw_info_priv *priv = seq->private; + const struct iwl_fw *fw = priv->fwrt->fw; + + *pos = ++state->pos; + if (*pos >= fw->ucode_capa.n_cmd_versions) + return NULL; + + return state; +} + +static void iwl_dbgfs_fw_info_seq_stop(struct seq_file *seq, + void *v) +{ + kfree(v); +} + +static void *iwl_dbgfs_fw_info_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct iwl_dbgfs_fw_info_priv *priv = seq->private; + const struct iwl_fw *fw = priv->fwrt->fw; + struct iwl_dbgfs_fw_info_state *state; + + if (*pos >= fw->ucode_capa.n_cmd_versions) + return NULL; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + state->pos = *pos; + return state; +}; + +static int iwl_dbgfs_fw_info_seq_show(struct seq_file *seq, void *v) +{ + struct iwl_dbgfs_fw_info_state *state = v; + struct iwl_dbgfs_fw_info_priv *priv = seq->private; + const struct iwl_fw *fw = priv->fwrt->fw; + const struct iwl_fw_cmd_version *ver; + u32 cmd_id; + + if (!state->pos) + seq_puts(seq, "fw_api_ver:\n"); + + ver = &fw->ucode_capa.cmd_versions[state->pos]; + + cmd_id = iwl_cmd_id(ver->cmd, ver->group, 0); + + seq_printf(seq, " 0x%04x:\n", cmd_id); + seq_printf(seq, " name: %s\n", + iwl_get_cmd_string(priv->fwrt->trans, cmd_id)); + seq_printf(seq, " cmd_ver: %d\n", ver->cmd_ver); + seq_printf(seq, " notif_ver: %d\n", ver->notif_ver); + return 0; +} + +static const struct seq_operations iwl_dbgfs_info_seq_ops = { + .start = iwl_dbgfs_fw_info_seq_start, + .next = iwl_dbgfs_fw_info_seq_next, + .stop = iwl_dbgfs_fw_info_seq_stop, + .show = iwl_dbgfs_fw_info_seq_show, +}; + +static int iwl_dbgfs_fw_info_open(struct inode *inode, struct file *filp) +{ + struct iwl_dbgfs_fw_info_priv *priv; + + priv = __seq_open_private(filp, &iwl_dbgfs_info_seq_ops, + sizeof(*priv)); + + if (!priv) + return -ENOMEM; + + priv->fwrt = inode->i_private; + return 0; +} + +static const struct file_operations iwl_dbgfs_fw_info_ops = { + .owner = THIS_MODULE, + .open = iwl_dbgfs_fw_info_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, +}; + void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, struct dentry *dbgfs_dir) { INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk); FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200); + FWRT_DEBUGFS_ADD_FILE(fw_info, dbgfs_dir, 0200); FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200); FWRT_DEBUGFS_ADD_FILE(fw_dbg_domain, dbgfs_dir, 0400); } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h index f008e1bbfdf4..72bfc64580ab 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h @@ -8,7 +8,7 @@ * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2019 Intel Corporation + * Copyright (C) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2019 Intel Corporation + * Copyright (C) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -394,6 +394,15 @@ struct iwl_fw_ini_dump_cfg_name { u8 cfg_name[IWL_FW_INI_MAX_CFG_NAME]; } __packed; +/* AX210's HW type */ +#define IWL_AX210_HW_TYPE 0x42 +/* How many bits to roll when adding to the HW type of AX210 HW */ +#define IWL_AX210_HW_TYPE_ADDITION_SHIFT 12 +/* This prph is used to tell apart HW_TYPE == 0x42 NICs */ +#define WFPM_OTP_CFG1_ADDR 0xd03098 +#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(4) +#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(5) + /* struct iwl_fw_ini_dump_info - ini dump information * @version: dump version * @time_point: time point that caused the dump collection diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c index ba00d162ce72..b373606e1241 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/init.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2019 Intel Corporation + * Copyright(c) 2019 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -27,7 +27,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2019 Intel Corporation + * Copyright(c) 2019 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -62,6 +62,9 @@ #include "dbg.h" #include "debugfs.h" +#include "fw/api/soc.h" +#include "fw/api/commands.h" + void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, const struct iwl_fw *fw, const struct iwl_fw_runtime_ops *ops, void *ops_ctx, @@ -95,3 +98,51 @@ void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt) iwl_fw_resume_timestamp(fwrt); } IWL_EXPORT_SYMBOL(iwl_fw_runtime_resume); + +/* set device type and latency */ +int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt) +{ + struct iwl_soc_configuration_cmd cmd = {}; + struct iwl_host_cmd hcmd = { + .id = iwl_cmd_id(SOC_CONFIGURATION_CMD, SYSTEM_GROUP, 0), + .data[0] = &cmd, + .len[0] = sizeof(cmd), + }; + int ret; + + /* + * In VER_1 of this command, the discrete value is considered + * an integer; In VER_2, it's a bitmask. Since we have only 2 + * values in VER_1, this is backwards-compatible with VER_2, + * as long as we don't set any other bits. + */ + if (!fwrt->trans->trans_cfg->integrated) + cmd.flags = cpu_to_le32(SOC_CONFIG_CMD_FLAGS_DISCRETE); + + BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_NONE != + SOC_FLAGS_LTR_APPLY_DELAY_NONE); + BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_200US != + SOC_FLAGS_LTR_APPLY_DELAY_200); + BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_2500US != + SOC_FLAGS_LTR_APPLY_DELAY_2500); + BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_1820US != + SOC_FLAGS_LTR_APPLY_DELAY_1820); + + if (fwrt->trans->trans_cfg->ltr_delay != IWL_CFG_TRANS_LTR_DELAY_NONE && + !WARN_ON(!fwrt->trans->trans_cfg->integrated)) + cmd.flags |= le32_encode_bits(fwrt->trans->trans_cfg->ltr_delay, + SOC_FLAGS_LTR_APPLY_DELAY_MASK); + + if (iwl_fw_lookup_cmd_ver(fwrt->fw, IWL_ALWAYS_LONG_GROUP, + SCAN_REQ_UMAC) >= 2 && + fwrt->trans->trans_cfg->low_latency_xtal) + cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY); + + cmd.latency = cpu_to_le32(fwrt->trans->trans_cfg->xtal_latency); + + ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); + if (ret) + IWL_ERR(fwrt, "Failed to set soc latency: %d\n", ret); + return ret; +} +IWL_EXPORT_SYMBOL(iwl_set_soc_latency); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 9906d9b9bdd5..b5e5e32b6152 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2019 Intel Corporation + * Copyright (C) 2018-2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -27,7 +27,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2019 Intel Corporation + * Copyright (C) 2018-2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -86,6 +86,7 @@ struct iwl_fwrt_shared_mem_cfg { u32 rxfifo1_size; } lmac[MAX_NUM_LMAC]; u32 rxfifo2_size; + u32 rxfifo2_control_size; u32 internal_txfifo_addr; u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; }; @@ -241,5 +242,6 @@ int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type); void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt); void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt); +int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt); #endif /* __iwl_fw_runtime_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c index 409b2dd854ac..700fdab14209 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c @@ -5,10 +5,9 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,10 +27,9 @@ * * BSD LICENSE * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -71,6 +69,8 @@ static void iwl_parse_shared_mem_22000(struct iwl_fw_runtime *fwrt, struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data; int i, lmac; int lmac_num = le32_to_cpu(mem_cfg->lmac_num); + u8 api_ver = iwl_fw_lookup_notif_ver(fwrt->fw, SYSTEM_GROUP, + SHARED_MEM_CFG_CMD, 0); if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem))) return; @@ -80,6 +80,12 @@ static void iwl_parse_shared_mem_22000(struct iwl_fw_runtime *fwrt, ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size); fwrt->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size); + if (api_ver >= 4 && + !WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) < sizeof(*mem_cfg))) { + fwrt->smem_cfg.rxfifo2_control_size = + le32_to_cpu(mem_cfg->rxfifo2_control_size); + } + for (lmac = 0; lmac < lmac_num; lmac++) { struct iwl_shared_mem_lmac_cfg *lmac_cfg = &mem_cfg->lmac_smem[lmac]; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 3a9a33851793..244899f3f3bf 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -477,12 +477,16 @@ struct iwl_cfg { #define IWL_CFG_RF_TYPE_TH1 0x108 #define IWL_CFG_RF_TYPE_JF2 0x105 #define IWL_CFG_RF_TYPE_JF1 0x108 +#define IWL_CFG_RF_TYPE_HR2 0x10A +#define IWL_CFG_RF_TYPE_HR1 0x10C #define IWL_CFG_RF_ID_TH 0x1 #define IWL_CFG_RF_ID_TH1 0x1 #define IWL_CFG_RF_ID_JF 0x3 #define IWL_CFG_RF_ID_JF1 0x6 #define IWL_CFG_RF_ID_JF1_DIV 0xA +#define IWL_CFG_RF_ID_HR 0x7 +#define IWL_CFG_RF_ID_HR1 0x4 #define IWL_CFG_NO_160 0x0 #define IWL_CFG_160 0x1 @@ -535,6 +539,8 @@ extern const char iwl9260_killer_1550_name[]; extern const char iwl9560_killer_1550i_name[]; extern const char iwl9560_killer_1550s_name[]; extern const char iwl_ax200_name[]; +extern const char iwl_ax201_name[]; +extern const char iwl_ax101_name[]; extern const char iwl_ax200_killer_1650w_name[]; extern const char iwl_ax200_killer_1650x_name[]; @@ -609,9 +615,9 @@ extern const struct iwl_cfg iwl9560_qu_c0_jf_b0_cfg; extern const struct iwl_cfg iwl9560_quz_a0_jf_b0_cfg; extern const struct iwl_cfg iwl9560_qnj_b0_jf_b0_cfg; extern const struct iwl_cfg iwl9560_2ac_cfg_soc; -extern const struct iwl_cfg iwl_ax101_cfg_qu_hr; -extern const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0; -extern const struct iwl_cfg iwl_ax101_cfg_quz_hr; +extern const struct iwl_cfg iwl_qu_b0_hr1_b0; +extern const struct iwl_cfg iwl_qu_c0_hr1_b0; +extern const struct iwl_cfg iwl_quz_a0_hr1_b0; extern const struct iwl_cfg iwl_ax200_cfg_cc; extern const struct iwl_cfg iwl_ax201_cfg_qu_hr; extern const struct iwl_cfg iwl_ax201_cfg_qu_hr; @@ -625,8 +631,7 @@ extern const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0; extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0; extern const struct iwl_cfg killer1650x_2ax_cfg; extern const struct iwl_cfg killer1650w_2ax_cfg; -extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0_f0; -extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0; +extern const struct iwl_cfg iwl_qnj_b0_hr_b0_cfg; extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0; extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0; extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h index ebea99189ca9..9d7a04833cd0 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018, 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,7 +18,7 @@ * * BSD LICENSE * - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018, 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -93,6 +93,11 @@ enum iwl_prph_scratch_mtr_format { * @IWL_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd. * There are 4 optional values: 0: 16 bit, 1: 32 bit, 2: 64 bit, * 3: 256 bit. + * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_MASK: RB size full information, ignored + * by older firmware versions, so set IWL_PRPH_SCRATCH_RB_SIZE_4K + * appropriately; use the below values for this. + * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K: 8kB RB size + * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K: 12kB RB size */ enum iwl_prph_scratch_flags { IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4), @@ -103,6 +108,9 @@ enum iwl_prph_scratch_flags { IWL_PRPH_SCRATCH_RB_SIZE_4K = BIT(16), IWL_PRPH_SCRATCH_MTR_MODE = BIT(17), IWL_PRPH_SCRATCH_MTR_FORMAT = BIT(18) | BIT(19), + IWL_PRPH_SCRATCH_RB_SIZE_EXT_MASK = 0xf << 20, + IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K = 8 << 20, + IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K = 9 << 20, }; /* diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index 9eb8fbfaa2a2..7987a288917b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -165,38 +165,36 @@ static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv) { struct iwl_fw_ini_allocation_tlv *alloc = (void *)tlv->data; - u32 buf_location = le32_to_cpu(alloc->buf_location); - u32 alloc_id = le32_to_cpu(alloc->alloc_id); + u32 buf_location; + u32 alloc_id; - if (le32_to_cpu(tlv->length) != sizeof(*alloc) || - (buf_location != IWL_FW_INI_LOCATION_SRAM_PATH && - buf_location != IWL_FW_INI_LOCATION_DRAM_PATH && - buf_location != IWL_FW_INI_LOCATION_NPK_PATH)) { - IWL_ERR(trans, - "WRT: Invalid allocation TLV\n"); + if (le32_to_cpu(tlv->length) != sizeof(*alloc)) return -EINVAL; - } - if ((buf_location == IWL_FW_INI_LOCATION_SRAM_PATH || - buf_location == IWL_FW_INI_LOCATION_NPK_PATH) && - alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) { - IWL_ERR(trans, - "WRT: Allocation TLV for SMEM/NPK path must have id %u (current: %u)\n", - IWL_FW_INI_ALLOCATION_ID_DBGC1, alloc_id); - return -EINVAL; - } + buf_location = le32_to_cpu(alloc->buf_location); + alloc_id = le32_to_cpu(alloc->alloc_id); + + if (buf_location == IWL_FW_INI_LOCATION_INVALID || + buf_location >= IWL_FW_INI_LOCATION_NUM) + goto err; if (alloc_id == IWL_FW_INI_ALLOCATION_INVALID || - alloc_id >= IWL_FW_INI_ALLOCATION_NUM) { - IWL_ERR(trans, - "WRT: Invalid allocation id %u for allocation TLV\n", - alloc_id); - return -EINVAL; - } + alloc_id >= IWL_FW_INI_ALLOCATION_NUM) + goto err; + + if ((buf_location == IWL_FW_INI_LOCATION_SRAM_PATH || + buf_location == IWL_FW_INI_LOCATION_NPK_PATH) && + alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) + goto err; trans->dbg.fw_mon_cfg[alloc_id] = *alloc; return 0; +err: + IWL_ERR(trans, + "WRT: Invalid allocation id %u and/or location id %u for allocation TLV\n", + alloc_id, buf_location); + return -EINVAL; } static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index a483d389d9c2..04f14bfdd091 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -5,10 +5,9 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2007 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,10 +27,9 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -87,7 +85,7 @@ #define DRV_DESCRIPTION "Intel(R) Wireless WiFi driver for Linux" MODULE_DESCRIPTION(DRV_DESCRIPTION); -MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); +MODULE_AUTHOR(DRV_AUTHOR); MODULE_LICENSE("GPL"); #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -1776,7 +1774,6 @@ static int __init iwl_drv_init(void) INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv); pr_info(DRV_DESCRIPTION "\n"); - pr_info(DRV_COPYRIGHT "\n"); #ifdef CONFIG_IWLWIFI_DEBUGFS /* Create the root of iwlwifi debugfs subsystem. */ @@ -1824,11 +1821,6 @@ MODULE_PARM_DESC(amsdu_size, module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, 0444); MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)"); -module_param_named(antenna_coupling, iwlwifi_mod_params.antenna_coupling, - int, 0444); -MODULE_PARM_DESC(antenna_coupling, - "specify antenna coupling in dB (default: 0 dB)"); - module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, 0444); MODULE_PARM_DESC(nvm_file, "NVM file name"); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h index 2be30af7bdc3..8938a6467996 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2014, 2020 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify @@ -26,7 +26,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2014, 2020 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * @@ -63,8 +63,7 @@ /* for all modules */ #define DRV_NAME "iwlwifi" -#define DRV_COPYRIGHT "Copyright(c) 2003- 2015 Intel Corporation" -#define DRV_AUTHOR "<linuxwifi@intel.com>" +#define DRV_AUTHOR "Intel Corporation <linuxwifi@intel.com>" /* radio config bits (actual values from NVM definition) */ #define NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index bf673ce5f183..e77d8d13cb51 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -7,7 +7,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -29,7 +29,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -646,8 +646,7 @@ struct iwl_rb_status { #define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3) #define TFD_QUEUE_SIZE_BC_DUP (64) #define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP) -#define TFD_QUEUE_BC_SIZE_GEN3 (TFD_QUEUE_SIZE_MAX_GEN3 + \ - TFD_QUEUE_SIZE_BC_DUP) +#define TFD_QUEUE_BC_SIZE_GEN3 1024 #define IWL_TX_DMA_MASK DMA_BIT_MASK(36) #define IWL_NUM_OF_TBS 20 #define IWL_TFH_NUM_TBS 25 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index b094cc1e9be0..e8ce3a300857 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -5,8 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2007 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -26,8 +25,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -111,7 +109,6 @@ enum iwl_uapsd_disable { * @power_save: enable power save, default = false * @power_level: power level, default = 1 * @debug_level: levels are IWL_DL_* - * @antenna_coupling: antenna coupling in dB, default = 0 * @nvm_file: specifies a external NVM file * @uapsd_disable: disable U-APSD, see &enum iwl_uapsd_disable, default = * IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT @@ -131,7 +128,6 @@ struct iwl_mod_params { #ifdef CONFIG_IWLWIFI_DEBUG u32 debug_level; #endif - int antenna_coupling; char *nvm_file; u32 uapsd_disable; bool disable_11ac; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index d91a8e8349e6..ee410417761d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -240,6 +240,7 @@ enum iwl_nvm_channel_flags { * @REG_CAPA_40MHZ_FORBIDDEN: 11n channel with a width of 40Mhz is forbidden * for this regulatory domain (valid only in 5Ghz). * @REG_CAPA_DC_HIGH_ENABLED: DC HIGH allowed. + * @REG_CAPA_11AX_DISABLED: 11ax is forbidden for this regulatory domain. */ enum iwl_reg_capa_flags { REG_CAPA_BF_CCD_LOW_BAND = BIT(0), @@ -250,6 +251,7 @@ enum iwl_reg_capa_flags { REG_CAPA_MCS_9_ALLOWED = BIT(5), REG_CAPA_40MHZ_FORBIDDEN = BIT(7), REG_CAPA_DC_HIGH_ENABLED = BIT(9), + REG_CAPA_11AX_DISABLED = BIT(10), }; static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level, @@ -1115,6 +1117,9 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, flags |= NL80211_RRF_NO_160MHZ; } + if (cap_flags & REG_CAPA_11AX_DISABLED) + flags |= NL80211_RRF_NO_HE; + return flags; } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 1136d9784f9d..8e254c0eda13 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -5,10 +5,9 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH - * Copyright (C) 2018 - 2019 Intel Corporation + * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,10 +27,9 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH - * Copyright (C) 2018 - 2019 Intel Corporation + * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -326,6 +324,7 @@ #define RXF_SIZE_BYTE_CND_POS (7) #define RXF_SIZE_BYTE_CNT_MSK (0x3ff << RXF_SIZE_BYTE_CND_POS) #define RXF_DIFF_FROM_PREV (0x200) +#define RXF2C_DIFF_FROM_PREV (0x4e00) #define RXF_LD_FENCE_OFFSET_ADDR (0xa00c10) #define RXF_FIFO_RD_FENCE_ADDR (0xa00c0c) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index bba527b339b5..a301e2484cdb 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -795,6 +795,132 @@ struct iwl_trans_debug { u32 domains_bitmap; }; +struct iwl_dma_ptr { + dma_addr_t dma; + void *addr; + size_t size; +}; + +struct iwl_cmd_meta { + /* only for SYNC commands, iff the reply skb is wanted */ + struct iwl_host_cmd *source; + u32 flags; + u32 tbs; +}; + +/* + * The FH will write back to the first TB only, so we need to copy some data + * into the buffer regardless of whether it should be mapped or not. + * This indicates how big the first TB must be to include the scratch buffer + * and the assigned PN. + * Since PN location is 8 bytes at offset 12, it's 20 now. + * If we make it bigger then allocations will be bigger and copy slower, so + * that's probably not useful. + */ +#define IWL_FIRST_TB_SIZE 20 +#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64) + +struct iwl_pcie_txq_entry { + void *cmd; + struct sk_buff *skb; + /* buffer to free after command completes */ + const void *free_buf; + struct iwl_cmd_meta meta; +}; + +struct iwl_pcie_first_tb_buf { + u8 buf[IWL_FIRST_TB_SIZE_ALIGN]; +}; + +/** + * struct iwl_txq - Tx Queue for DMA + * @q: generic Rx/Tx queue descriptor + * @tfds: transmit frame descriptors (DMA memory) + * @first_tb_bufs: start of command headers, including scratch buffers, for + * the writeback -- this is DMA memory and an array holding one buffer + * for each command on the queue + * @first_tb_dma: DMA address for the first_tb_bufs start + * @entries: transmit entries (driver state) + * @lock: queue lock + * @stuck_timer: timer that fires if queue gets stuck + * @trans: pointer back to transport (for timer) + * @need_update: indicates need to update read/write index + * @ampdu: true if this queue is an ampdu queue for an specific RA/TID + * @wd_timeout: queue watchdog timeout (jiffies) - per queue + * @frozen: tx stuck queue timer is frozen + * @frozen_expiry_remainder: remember how long until the timer fires + * @bc_tbl: byte count table of the queue (relevant only for gen2 transport) + * @write_ptr: 1-st empty entry (index) host_w + * @read_ptr: last used entry (index) host_r + * @dma_addr: physical addr for BD's + * @n_window: safe queue window + * @id: queue id + * @low_mark: low watermark, resume queue if free space more than this + * @high_mark: high watermark, stop queue if free space less than this + * + * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame + * descriptors) and required locking structures. + * + * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware + * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless + * there might be HW changes in the future). For the normal TX + * queues, n_window, which is the size of the software queue data + * is also 256; however, for the command queue, n_window is only + * 32 since we don't need so many commands pending. Since the HW + * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. + * This means that we end up with the following: + * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | + * SW entries: | 0 | ... | 31 | + * where N is a number between 0 and 7. This means that the SW + * data is a window overlayed over the HW queue. + */ +struct iwl_txq { + void *tfds; + struct iwl_pcie_first_tb_buf *first_tb_bufs; + dma_addr_t first_tb_dma; + struct iwl_pcie_txq_entry *entries; + /* lock for syncing changes on the queue */ + spinlock_t lock; + unsigned long frozen_expiry_remainder; + struct timer_list stuck_timer; + struct iwl_trans *trans; + bool need_update; + bool frozen; + bool ampdu; + int block; + unsigned long wd_timeout; + struct sk_buff_head overflow_q; + struct iwl_dma_ptr bc_tbl; + + int write_ptr; + int read_ptr; + dma_addr_t dma_addr; + int n_window; + u32 id; + int low_mark; + int high_mark; + + bool overflow_tx; +}; + +/** + * struct iwl_trans_txqs - transport tx queues data + * + * @queue_used - bit mask of used queues + * @queue_stopped - bit mask of stopped queues + */ +struct iwl_trans_txqs { + unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; + unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; + struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; + struct { + u8 fifo; + u8 q_id; + unsigned int wdg_timeout; + } cmd; + +}; + /** * struct iwl_trans - transport common data * @@ -828,6 +954,7 @@ struct iwl_trans_debug { * @system_pm_mode: the system-wide power management mode in use. * This mode is set dynamically, depending on the WoWLAN values * configured from the userspace at runtime. + * @iwl_trans_txqs: transport tx queues data. */ struct iwl_trans { const struct iwl_trans_ops *ops; @@ -875,6 +1002,7 @@ struct iwl_trans { enum iwl_plat_pm_mode system_pm_mode; const char *name; + struct iwl_trans_txqs txqs; /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index 3d2abbc5c76c..5ae22cd7ecdb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify @@ -26,7 +26,7 @@ * * BSD LICENSE * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * @@ -216,8 +216,7 @@ int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm) goto send_cmd; } - mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE; - bt_cmd.mode = cpu_to_le32(mode); + bt_cmd.mode = cpu_to_le32(BT_COEX_NW); if (IWL_MVM_BT_COEX_SYNC2SCO) bt_cmd.enabled_modules |= diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 222775714859..2a94545d737f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -5,10 +5,9 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,10 +27,9 @@ * * BSD LICENSE * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -80,9 +78,6 @@ void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - if (iwlwifi_mod_params.swcrypto) - return; - mutex_lock(&mvm->mutex); memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN); @@ -843,18 +838,16 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm, return ret; } - if (!iwlwifi_mod_params.swcrypto) { - /* - * This needs to be unlocked due to lock ordering - * constraints. Since we're in the suspend path - * that isn't really a problem though. - */ - mutex_unlock(&mvm->mutex); - ret = iwl_mvm_wowlan_config_key_params(mvm, vif, CMD_ASYNC); - mutex_lock(&mvm->mutex); - if (ret) - return ret; - } + /* + * This needs to be unlocked due to lock ordering + * constraints. Since we're in the suspend path + * that isn't really a problem though. + */ + mutex_unlock(&mvm->mutex); + ret = iwl_mvm_wowlan_config_key_params(mvm, vif, CMD_ASYNC); + mutex_lock(&mvm->mutex); + if (ret) + return ret; ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, sizeof(*wowlan_config_cmd), @@ -1993,6 +1986,9 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) goto err; } + iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_END, + NULL); + ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image); if (ret) goto err; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 3beef8d077b8..8fae7e707374 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -5,10 +5,9 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,10 +27,9 @@ * * BSD LICENSE * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -481,6 +479,11 @@ static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_sta *sta, if (kstrtou16(buf, 0, &amsdu_len)) return -EINVAL; + /* only change from debug set <-> debug unset */ + if ((amsdu_len && mvmsta->orig_amsdu_len) || + (!!amsdu_len && mvmsta->orig_amsdu_len)) + return -EBUSY; + if (amsdu_len) { mvmsta->orig_amsdu_len = sta->max_amsdu_len; sta->max_amsdu_len = amsdu_len; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index cdb87139100d..5ca45915cf7c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -164,9 +164,10 @@ static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif, eth_broadcast_addr(cmd->range_req_bssid); } -static void iwl_mvm_ftm_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_tof_range_req_cmd *cmd, - struct cfg80211_pmsr_request *req) +static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_tof_range_req_cmd *cmd, + struct cfg80211_pmsr_request *req) { int i; @@ -210,6 +211,13 @@ static void iwl_mvm_ftm_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cmd->tsf_mac_id = cpu_to_le32(0xff); } +static void iwl_mvm_ftm_cmd_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_tof_range_req_cmd_v8 *cmd, + struct cfg80211_pmsr_request *req) +{ + iwl_mvm_ftm_cmd_common(mvm, vif, (void *)cmd, req); +} + static int iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm *mvm, struct cfg80211_pmsr_request_peer *peer, @@ -382,9 +390,10 @@ iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm, return 0; } -static int iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm, - struct cfg80211_pmsr_request_peer *peer, - struct iwl_tof_range_req_ap_entry *target) +static int +iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm, + struct cfg80211_pmsr_request_peer *peer, + struct iwl_tof_range_req_ap_entry_v4 *target) { int ret; @@ -394,11 +403,43 @@ static int iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm, if (ret) return ret; - iwl_mvm_ftm_put_target_common(mvm, peer, target); + iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target); return 0; } +static int +iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request_peer *peer, + struct iwl_tof_range_req_ap_entry *target) +{ + int ret; + + ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num, + &target->format_bw, + &target->ctrl_ch_position); + if (ret) + return ret; + + iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target); + + if (vif->bss_conf.assoc && + !memcmp(peer->addr, vif->bss_conf.bssid, ETH_ALEN)) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + target->sta_id = mvmvif->ap_sta_id; + } else { + target->sta_id = IWL_MVM_INVALID_STA; + } + + /* + * TODO: Beacon interval is currently unknown, so use the common value + * of 100 TUs. + */ + target->beacon_interval = cpu_to_le16(100); + return 0; +} + static int iwl_mvm_ftm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *hcmd) { u32 status; @@ -456,7 +497,7 @@ static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, * Versions 7 and 8 has the same structure except from the responders * list, so iwl_mvm_ftm_cmd() can be used for version 7 too. */ - iwl_mvm_ftm_cmd(mvm, vif, (void *)&cmd_v7, req); + iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd_v7, req); for (i = 0; i < cmd_v7.num_of_ap; i++) { struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; @@ -472,7 +513,7 @@ static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) { - struct iwl_tof_range_req_cmd cmd; + struct iwl_tof_range_req_cmd_v8 cmd; struct iwl_host_cmd hcmd = { .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), .dataflags[0] = IWL_HCMD_DFL_DUP, @@ -482,7 +523,7 @@ static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 i; int err; - iwl_mvm_ftm_cmd(mvm, vif, &cmd, req); + iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd, req); for (i = 0; i < cmd.num_of_ap; i++) { struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; @@ -495,6 +536,33 @@ static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return iwl_mvm_ftm_send_cmd(mvm, &hcmd); } +static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *req) +{ + struct iwl_tof_range_req_cmd cmd; + struct iwl_host_cmd hcmd = { + .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), + .dataflags[0] = IWL_HCMD_DFL_DUP, + .data[0] = &cmd, + .len[0] = sizeof(cmd), + }; + u8 i; + int err; + + iwl_mvm_ftm_cmd_common(mvm, vif, &cmd, req); + + for (i = 0; i < cmd.num_of_ap; i++) { + struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; + struct iwl_tof_range_req_ap_entry *target = &cmd.ap[i]; + + err = iwl_mvm_ftm_put_target(mvm, vif, peer, target); + if (err) + return err; + } + + return iwl_mvm_ftm_send_cmd(mvm, &hcmd); +} + int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) { @@ -511,11 +579,18 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP, TOF_RANGE_REQ_CMD); - if (cmd_ver == 8) + switch (cmd_ver) { + case 9: + case 10: + err = iwl_mvm_ftm_start_v9(mvm, vif, req); + break; + case 8: err = iwl_mvm_ftm_start_v8(mvm, vif, req); - else + break; + default: err = iwl_mvm_ftm_start_v7(mvm, vif, req); - + break; + } } else { err = iwl_mvm_ftm_start_v5(mvm, vif, req); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 164fc9e98c86..95a613537047 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -87,50 +87,6 @@ struct iwl_mvm_alive_data { u32 scd_base_addr; }; -/* set device type and latency */ -static int iwl_set_soc_latency(struct iwl_mvm *mvm) -{ - struct iwl_soc_configuration_cmd cmd = {}; - int ret; - - /* - * In VER_1 of this command, the discrete value is considered - * an integer; In VER_2, it's a bitmask. Since we have only 2 - * values in VER_1, this is backwards-compatible with VER_2, - * as long as we don't set any other bits. - */ - if (!mvm->trans->trans_cfg->integrated) - cmd.flags = cpu_to_le32(SOC_CONFIG_CMD_FLAGS_DISCRETE); - - BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_NONE != - SOC_FLAGS_LTR_APPLY_DELAY_NONE); - BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_200US != - SOC_FLAGS_LTR_APPLY_DELAY_200); - BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_2500US != - SOC_FLAGS_LTR_APPLY_DELAY_2500); - BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_1820US != - SOC_FLAGS_LTR_APPLY_DELAY_1820); - - if (mvm->trans->trans_cfg->ltr_delay != IWL_CFG_TRANS_LTR_DELAY_NONE && - !WARN_ON(!mvm->trans->trans_cfg->integrated)) - cmd.flags |= le32_encode_bits(mvm->trans->trans_cfg->ltr_delay, - SOC_FLAGS_LTR_APPLY_DELAY_MASK); - - if (iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, - SCAN_REQ_UMAC) >= 2 && - mvm->trans->trans_cfg->low_latency_xtal) - cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY); - - cmd.latency = cpu_to_le32(mvm->trans->trans_cfg->xtal_latency); - - ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SOC_CONFIGURATION_CMD, - SYSTEM_GROUP, 0), 0, - sizeof(cmd), &cmd); - if (ret) - IWL_ERR(mvm, "Failed to set soc latency: %d\n", ret); - return ret; -} - static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant) { struct iwl_tx_ant_cfg_cmd tx_ant_cmd = { @@ -787,13 +743,12 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) union { struct iwl_dev_tx_power_cmd v5; struct iwl_dev_tx_power_cmd_v4 v4; - } cmd; - + } cmd = { + .v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS), + }; int ret; u16 len = 0; - cmd.v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS); - if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_REDUCE_TX_POWER)) len = sizeof(cmd.v5); @@ -1033,6 +988,44 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm) if (ret < 0) IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret); } + +static bool iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm) +{ + int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0, + DSM_FUNC_ENABLE_INDONESIA_5G2); + + IWL_DEBUG_RADIO(mvm, + "Evaluated DSM function ENABLE_INDONESIA_5G2, ret=%d\n", + ret); + + return ret == 1; +} + +static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) +{ + int ret; + struct iwl_lari_config_change_cmd cmd = {}; + + if (iwl_mvm_eval_dsm_indonesia_5g2(mvm)) + cmd.config_bitmap |= + cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK); + + /* apply more config masks here */ + + if (cmd.config_bitmap) { + IWL_DEBUG_RADIO(mvm, + "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x\n", + le32_to_cpu(cmd.config_bitmap)); + ret = iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(REGULATORY_AND_NVM_GROUP, + LARI_CONFIG_CHANGE), + 0, sizeof(cmd), &cmd); + if (ret < 0) + IWL_DEBUG_RADIO(mvm, + "Failed to send LARI_CONFIG_CHANGE (%d)\n", + ret); + } +} #else /* CONFIG_ACPI */ inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, @@ -1064,6 +1057,10 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm) static void iwl_mvm_tas_init(struct iwl_mvm *mvm) { } + +static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) +{ +} #endif /* CONFIG_ACPI */ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags) @@ -1238,7 +1235,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) { - ret = iwl_set_soc_latency(mvm); + ret = iwl_set_soc_latency(&mvm->fwrt); if (ret) goto error; } @@ -1338,6 +1335,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (ret) goto error; + iwl_mvm_lari_cfg(mvm); /* * RTNL is not taken during Ct-kill, but we don't need to scan/Tx * anyway, so don't init MCC. diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 853ba7b8bf3f..77916231ff7d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -475,23 +475,23 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->n_cipher_suites++; } - /* Enable 11w if software crypto is not enabled (as the - * firmware will interpret some mgmt packets, so enabling it - * with software crypto isn't safe). - */ - if (!iwlwifi_mod_params.swcrypto) { - ieee80211_hw_set(hw, MFP_CAPABLE); + if (iwlwifi_mod_params.swcrypto) + IWL_ERR(mvm, + "iwlmvm doesn't allow to disable HW crypto, check swcrypto module parameter\n"); + if (!iwlwifi_mod_params.bt_coex_active) + IWL_ERR(mvm, + "iwlmvm doesn't allow to disable BT Coex, check bt_coex_active module parameter\n"); + + ieee80211_hw_set(hw, MFP_CAPABLE); + mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_AES_CMAC; + hw->wiphy->n_cipher_suites++; + if (iwl_mvm_has_new_rx_api(mvm)) { mvm->ciphers[hw->wiphy->n_cipher_suites] = - WLAN_CIPHER_SUITE_AES_CMAC; + WLAN_CIPHER_SUITE_BIP_GMAC_128; + hw->wiphy->n_cipher_suites++; + mvm->ciphers[hw->wiphy->n_cipher_suites] = + WLAN_CIPHER_SUITE_BIP_GMAC_256; hw->wiphy->n_cipher_suites++; - if (iwl_mvm_has_new_rx_api(mvm)) { - mvm->ciphers[hw->wiphy->n_cipher_suites] = - WLAN_CIPHER_SUITE_BIP_GMAC_128; - hw->wiphy->n_cipher_suites++; - mvm->ciphers[hw->wiphy->n_cipher_suites] = - WLAN_CIPHER_SUITE_BIP_GMAC_256; - hw->wiphy->n_cipher_suites++; - } } /* currently FW API supports only one optional cipher scheme */ @@ -697,10 +697,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) WIPHY_WOWLAN_EAP_IDENTITY_REQ | WIPHY_WOWLAN_RFKILL_RELEASE | WIPHY_WOWLAN_NET_DETECT; - if (!iwlwifi_mod_params.swcrypto) - mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | - WIPHY_WOWLAN_GTK_REKEY_FAILURE | - WIPHY_WOWLAN_4WAY_HANDSHAKE; + mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | + WIPHY_WOWLAN_GTK_REKEY_FAILURE | + WIPHY_WOWLAN_4WAY_HANDSHAKE; mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; @@ -1209,14 +1208,13 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) */ flush_work(&mvm->roc_done_wk); + iwl_mvm_rm_aux_sta(mvm); + iwl_mvm_stop_device(mvm); iwl_mvm_async_handlers_purge(mvm); /* async_handlers_list is empty and will stay empty: HW is stopped */ - /* the fw is stopped, the aux sta is dead: clean up driver state */ - iwl_mvm_del_aux_sta(mvm); - /* * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the * hw (as restart_complete() won't be called in this case) and mac80211 @@ -2180,6 +2178,15 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, flags |= STA_CTXT_HE_PACKET_EXT; } } + + if (sta->he_cap.he_cap_elem.mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP) + flags |= STA_CTXT_HE_32BIT_BA_BITMAP; + + if (sta->he_cap.he_cap_elem.mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_ACK_EN) + flags |= STA_CTXT_HE_ACK_ENABLED; + rcu_read_unlock(); /* Mark MU EDCA as enabled, unless none detected on some AC */ @@ -2204,11 +2211,6 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, cpu_to_le16(mu_edca->mu_edca_timer); } - if (vif->bss_conf.multi_sta_back_32bit) - flags |= STA_CTXT_HE_32BIT_BA_BITMAP; - - if (vif->bss_conf.ack_enabled) - flags |= STA_CTXT_HE_ACK_ENABLED; if (vif->bss_conf.uora_exists) { flags |= STA_CTXT_HE_TRIG_RND_ALLOC; @@ -3366,11 +3368,6 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, int ret, i; u8 key_offset; - if (iwlwifi_mod_params.swcrypto) { - IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n"); - return -EOPNOTSUPP; - } - switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: if (!mvm->trans->trans_cfg->gen2) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 9e2a0858108c..e2f7f6ec711e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -5,10 +5,9 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,10 +27,9 @@ * * BSD LICENSE * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -134,12 +132,10 @@ extern const struct ieee80211_ops iwl_mvm_hw_ops; * We will register to mac80211 to have testmode working. The NIC must not * be up'ed after the INIT fw asserted. This is useful to be able to use * proprietary tools over testmode to debug the INIT fw. - * @tfd_q_hang_detect: enabled the detection of hung transmit queues * @power_scheme: one of enum iwl_power_scheme */ struct iwl_mvm_mod_params { bool init_dbg; - bool tfd_q_hang_detect; int power_scheme; }; extern struct iwl_mvm_mod_params iwlmvm_mod_params; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index d0afc806706d..d095ff847be9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -5,10 +5,9 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,10 +27,9 @@ * * BSD LICENSE * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -86,7 +84,7 @@ #define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux" MODULE_DESCRIPTION(DRV_DESCRIPTION); -MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); +MODULE_AUTHOR(DRV_AUTHOR); MODULE_LICENSE("GPL"); static const struct iwl_op_mode_ops iwl_mvm_ops; @@ -94,7 +92,6 @@ static const struct iwl_op_mode_ops iwl_mvm_ops_mq; struct iwl_mvm_mod_params iwlmvm_mod_params = { .power_scheme = IWL_POWER_SCHEME_BPS, - .tfd_q_hang_detect = true /* rest of fields are 0 by default */ }; @@ -104,10 +101,6 @@ MODULE_PARM_DESC(init_dbg, module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444); MODULE_PARM_DESC(power_scheme, "power management scheme: 1-active, 2-balanced, 3-low power, default: 2"); -module_param_named(tfd_q_hang_detect, iwlmvm_mod_params.tfd_q_hang_detect, - bool, 0444); -MODULE_PARM_DESC(tfd_q_hang_detect, - "TFD queues hang detection (default: true"); /* * module init and exit functions diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index 15d11fb72aca..6f4d241d47e9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -369,14 +369,15 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, u16 size = le32_to_cpu(notif->amsdu_size); int i; - /* - * In debug sta->max_amsdu_len < size - * so also check with orig_amsdu_len which holds the original - * data before debugfs changed the value - */ - if (WARN_ON(sta->max_amsdu_len < size && - mvmsta->orig_amsdu_len < size)) + if (sta->max_amsdu_len < size) { + /* + * In debug sta->max_amsdu_len < size + * so also check with orig_amsdu_len which holds the + * original data before debugfs changed the value + */ + WARN_ON(mvmsta->orig_amsdu_len < size); goto out; + } mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled); mvmsta->max_amsdu_len = size; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 246265904b98..a7264b282d79 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -1,10 +1,9 @@ // SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation * * Contact Information: * Intel Linux Wireless <linuxwifi@intel.com> @@ -1430,7 +1429,8 @@ static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta) */ if (ieee80211_get_vht_max_nss(&vht_cap, IEEE80211_VHT_CHANWIDTH_160MHZ, - 0, true) < sta->rx_nss) + 0, true, + sta->rx_nss) < sta->rx_nss) return RATE_MCS_CHAN_WIDTH_80; return RATE_MCS_CHAN_WIDTH_160; case IEEE80211_STA_RX_BW_80: diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 56ae72debb96..fee01cbbd3ac 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -5,10 +5,9 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2012 - 2015, 2018 - 2020 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,10 +27,9 @@ * * BSD LICENSE * - * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2012 - 2015, 2018 - 2020 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -751,16 +749,23 @@ static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, size = max_t(u32, IWL_MGMT_QUEUE_SIZE, mvm->trans->cfg->min_txq_size); } - queue = iwl_trans_txq_alloc(mvm->trans, - cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE), - sta_id, tid, SCD_QUEUE_CFG, size, timeout); - if (queue < 0) { - IWL_DEBUG_TX_QUEUES(mvm, - "Failed allocating TXQ for sta %d tid %d, ret: %d\n", - sta_id, tid, queue); + do { + __le16 enable = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE); + + queue = iwl_trans_txq_alloc(mvm->trans, enable, + sta_id, tid, SCD_QUEUE_CFG, + size, timeout); + + if (queue < 0) + IWL_DEBUG_TX_QUEUES(mvm, + "Failed allocating TXQ of size %d for sta %d tid %d, ret: %d\n", + size, sta_id, tid, queue); + size /= 2; + } while (queue < 0 && size >= 16); + + if (queue < 0) return queue; - } IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", queue, sta_id, tid); @@ -1395,7 +1400,17 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) if (tid == IEEE80211_NUM_TIDS) tid = IWL_MAX_TID_COUNT; - iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid); + /* + * We can't really do much here, but if this fails we can't + * transmit anyway - so just don't transmit the frame etc. + * and let them back up ... we've tried our best to allocate + * a queue in the function itself. + */ + if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) { + list_del_init(&mvmtxq->list); + continue; + } + list_del_init(&mvmtxq->list); local_bh_disable(); iwl_mvm_mac_itxq_xmit(mvm->hw, txq); @@ -1965,9 +1980,8 @@ void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue, u8 sta_id, u8 fifo) { - unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? - mvm->trans->trans_cfg->base_params->wd_timeout : - IWL_WATCHDOG_DISABLED; + unsigned int wdg_timeout = + mvm->trans->trans_cfg->base_params->wd_timeout; struct iwl_trans_txq_scd_cfg cfg = { .fifo = fifo, .sta_id = sta_id, @@ -1983,9 +1997,8 @@ static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue, static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id) { - unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? - mvm->trans->trans_cfg->base_params->wd_timeout : - IWL_WATCHDOG_DISABLED; + unsigned int wdg_timeout = + mvm->trans->trans_cfg->base_params->wd_timeout; WARN_ON(!iwl_mvm_has_new_tx_api(mvm)); @@ -2080,16 +2093,24 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) return ret; } -void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm) +int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm) { - iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta); -} + int ret; -void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm) -{ lockdep_assert_held(&mvm->mutex); + iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0); + ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id); + if (ret) + IWL_WARN(mvm, "Failed sending remove station\n"); iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); + + return ret; +} + +void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm) +{ + iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta); } /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 8d70093847cb..da2d1ac01229 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -541,7 +541,7 @@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, u8 queue, bool start); int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm); -void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm); +int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm); int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index a8d0d17f79fd..2f6484e0d726 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -920,11 +920,8 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, * No need to lock amsdu_in_ampdu_allowed since it can't be modified * during an BA session. */ - if (info->flags & IEEE80211_TX_CTL_AMPDU && - !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) - return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); - - if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(mvmsta->vif)) || + if ((info->flags & IEEE80211_TX_CTL_AMPDU && + !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) || !(mvmsta->amsdu_enabled & BIT(tid))) return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 6096276cb0d0..be57b8391850 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -5,10 +5,9 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,10 +27,9 @@ * * BSD LICENSE * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -588,6 +586,23 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num) IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); } +static void iwl_mvm_dump_iml_error_log(struct iwl_mvm *mvm) +{ + struct iwl_trans *trans = mvm->trans; + u32 error; + + error = iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS); + + IWL_ERR(trans, "IML/ROM dump:\n"); + + if (error & 0xFFFF0000) + IWL_ERR(trans, "IML/ROM SYSASSERT:\n"); + + IWL_ERR(mvm, "0x%08X | IML/ROM error/state\n", error); + IWL_ERR(mvm, "0x%08X | IML/ROM data1\n", + iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS)); +} + void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) { if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) { @@ -603,6 +618,9 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) iwl_mvm_dump_umac_error_log(mvm); + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + iwl_mvm_dump_iml_error_log(mvm); + iwl_fw_error_print_fseq_regs(&mvm->fwrt); } @@ -952,8 +970,7 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) && vif && vif->type == NL80211_IFTYPE_AP) return IWL_WATCHDOG_DISABLED; - return iwlmvm_mod_params.tfd_q_hang_detect ? - default_timeout : IWL_WATCHDOG_DISABLED; + return default_timeout; } trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c index b6a5921a63c3..1ab136600415 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c @@ -138,9 +138,17 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, case IWL_AMSDU_2K: break; case IWL_AMSDU_4K: + control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K; + break; case IWL_AMSDU_8K: + control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K; + /* if firmware supports the ext size, tell it */ + control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K; + break; case IWL_AMSDU_12K: control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K; + /* if firmware supports the ext size, tell it */ + control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K; break; } @@ -213,7 +221,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, ctxt_info_gen3->tr_idx_arr_size = cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS); ctxt_info_gen3->mtr_base_addr = - cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr); + cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr); ctxt_info_gen3->mcr_base_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma); ctxt_info_gen3->mtr_size = diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c index b65405009d02..23abfbd096b0 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -20,7 +20,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -263,7 +263,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, /* initialize TX command queue */ ctxt_info->hcmd_cfg.cmd_queue_addr = - cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr); + cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr); ctxt_info->hcmd_cfg.cmd_queue_size = TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 2083eb4f2f15..65d65c6baf4c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -5,10 +5,9 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016-2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2007 - 2014, 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,11 +27,10 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -592,107 +590,72 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x2723, 0x1654, iwl_ax200_cfg_cc, iwl_ax200_killer_1650x_name), IWL_DEV_INFO(0x2723, IWL_CFG_ANY, iwl_ax200_cfg_cc, iwl_ax200_name), -/* Qu with Hr */ - IWL_DEV_INFO(0x43F0, 0x0044, iwl_ax101_cfg_qu_hr, NULL), + /* QnJ with Hr */ + IWL_DEV_INFO(0x2720, IWL_CFG_ANY, iwl_qnj_b0_hr_b0_cfg, iwl_ax201_name), + + /* Qu with Hr */ IWL_DEV_INFO(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x43F0, 0x0244, iwl_ax101_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), - IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x43F0, 0x4244, iwl_ax101_cfg_qu_hr, NULL), - IWL_DEV_INFO(0xA0F0, 0x0044, iwl_ax101_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0xA0F0, 0x0244, iwl_ax101_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x0A10, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0xA0F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0xA0F0, 0x4244, iwl_ax101_cfg_qu_hr, NULL), IWL_DEV_INFO(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL), - IWL_DEV_INFO(0x02F0, 0x0244, iwl_ax101_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL), - IWL_DEV_INFO(0x02F0, 0x4244, iwl_ax101_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL), - IWL_DEV_INFO(0x06F0, 0x0244, iwl_ax101_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL), - IWL_DEV_INFO(0x06F0, 0x4244, iwl_ax101_cfg_quz_hr, NULL), - IWL_DEV_INFO(0x34F0, 0x0044, iwl_ax101_cfg_qu_hr, NULL), IWL_DEV_INFO(0x34F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x34F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x34F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x34F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x34F0, 0x0244, iwl_ax101_cfg_qu_hr, NULL), IWL_DEV_INFO(0x34F0, 0x0310, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0x34F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x34F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x34F0, 0x4244, iwl_ax101_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x3DF0, 0x0044, iwl_ax101_cfg_qu_hr, NULL), IWL_DEV_INFO(0x3DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x3DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x3DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x3DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x3DF0, 0x0244, iwl_ax101_cfg_qu_hr, NULL), IWL_DEV_INFO(0x3DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x3DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0x3DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0x3DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x3DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x3DF0, 0x4244, iwl_ax101_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x4DF0, 0x0044, iwl_ax101_cfg_qu_hr, NULL), IWL_DEV_INFO(0x4DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x4DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x4DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x4DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x4DF0, 0x0244, iwl_ax101_cfg_qu_hr, NULL), IWL_DEV_INFO(0x4DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x4DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x4DF0, 0x4244, iwl_ax101_cfg_qu_hr, NULL), - - IWL_DEV_INFO(0x2720, 0x0000, iwl22000_2ax_cfg_qnj_hr_b0, NULL), - IWL_DEV_INFO(0x2720, 0x0040, iwl22000_2ax_cfg_qnj_hr_b0, NULL), - IWL_DEV_INFO(0x2720, 0x0044, iwl22000_2ax_cfg_qnj_hr_b0, NULL), - IWL_DEV_INFO(0x2720, 0x0070, iwl22000_2ax_cfg_qnj_hr_b0, NULL), - IWL_DEV_INFO(0x2720, 0x0074, iwl22000_2ax_cfg_qnj_hr_b0, NULL), - IWL_DEV_INFO(0x2720, 0x0078, iwl22000_2ax_cfg_qnj_hr_b0, NULL), - IWL_DEV_INFO(0x2720, 0x007C, iwl22000_2ax_cfg_qnj_hr_b0, NULL), - IWL_DEV_INFO(0x2720, 0x0244, iwl22000_2ax_cfg_qnj_hr_b0, NULL), - IWL_DEV_INFO(0x2720, 0x0310, iwl22000_2ax_cfg_qnj_hr_b0, NULL), - IWL_DEV_INFO(0x2720, 0x0A10, iwl22000_2ax_cfg_qnj_hr_b0, NULL), - IWL_DEV_INFO(0x2720, 0x1080, iwl22000_2ax_cfg_qnj_hr_b0, NULL), - IWL_DEV_INFO(0x2720, 0x1651, iwl22000_2ax_cfg_qnj_hr_b0, NULL), - IWL_DEV_INFO(0x2720, 0x1652, iwl22000_2ax_cfg_qnj_hr_b0, NULL), - IWL_DEV_INFO(0x2720, 0x2074, iwl22000_2ax_cfg_qnj_hr_b0, NULL), - IWL_DEV_INFO(0x2720, 0x4070, iwl22000_2ax_cfg_qnj_hr_b0, NULL), - IWL_DEV_INFO(0x2720, 0x4244, iwl22000_2ax_cfg_qnj_hr_b0, NULL), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, @@ -791,7 +754,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_CFG_NO_160, IWL_CFG_CORES_BT, iwl9260_2ac_cfg, iwl9260_name), - /* Qu with Jf */ +/* Qu with Jf */ /* Qu B step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, @@ -967,6 +930,29 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_NO_160, IWL_CFG_CORES_BT, iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550i_name), + +/* Qu with Hr */ + /* Qu B step */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, + iwl_qu_b0_hr1_b0, iwl_ax101_name), + + /* Qu C step */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, + iwl_qu_c0_hr1_b0, iwl_ax101_name), + + /* QuZ */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, + iwl_quz_a0_hr1_b0, iwl_ax101_name), + #endif /* CONFIG_IWLMVM */ }; @@ -1064,29 +1050,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) { iwl_trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0; } - } else if (cfg == &iwl_ax101_cfg_qu_hr) { - if ((CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == - CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && - iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) || - (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == - CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR1))) { - iwl_trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0; - } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == - CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && - iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) { - iwl_trans->cfg = &iwl_ax101_cfg_quz_hr; - } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == - CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { - iwl_trans->cfg = &iwl_ax101_cfg_qu_hr; - } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == - CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HRCDB)) { - IWL_ERR(iwl_trans, "RF ID HRCDB is not supported\n"); - return -EINVAL; - } else { - IWL_ERR(iwl_trans, "Unrecognized RF ID 0x%08x\n", - CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id)); - return -EINVAL; - } } /* @@ -1096,9 +1059,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * rest must be removed once we convert Qu with Hr as well. */ if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_C0) { - if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr) - iwl_trans->cfg = &iwl_ax101_cfg_qu_c0_hr_b0; - else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) + if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) iwl_trans->cfg = &iwl_ax201_cfg_qu_c0_hr_b0; else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0) iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0; @@ -1108,10 +1069,12 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* same thing for QuZ... */ if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) { - if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr) - iwl_trans->cfg = &iwl_ax101_cfg_quz_hr; - else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) + if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) iwl_trans->cfg = &iwl_ax201_cfg_quz_hr; + else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0) + iwl_trans->cfg = &iwl_ax1650s_cfg_quz_hr; + else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0) + iwl_trans->cfg = &iwl_ax1650i_cfg_quz_hr; } #endif @@ -1162,12 +1125,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* register transport layer debugfs here */ iwl_trans_pcie_dbgfs_register(iwl_trans); - /* The PCI device starts with a reference taken and we are - * supposed to release it here. But to simplify the - * interaction with the opmode, we don't do it now, but let - * the opmode release it when it's ready. - */ - return 0; out_free_trans: diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index abe649af689c..55808ba10d27 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -189,6 +189,8 @@ struct iwl_rx_completion_desc { * @rb_stts_dma: bus address of receive buffer status * @lock: * @queue: actual rx queue. Not used for multi-rx queue. + * @next_rb_is_fragment: indicates that the previous RB that we handled set + * the fragmented flag, so the next one is still another fragment * * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers */ @@ -214,7 +216,7 @@ struct iwl_rxq { u32 queue_size; struct list_head rx_free; struct list_head rx_used; - bool need_update; + bool need_update, next_rb_is_fragment; void *rb_stts; dma_addr_t rb_stts_dma; spinlock_t lock; @@ -244,12 +246,6 @@ struct iwl_rb_allocator { struct work_struct rx_alloc; }; -struct iwl_dma_ptr { - dma_addr_t dma; - void *addr; - size_t size; -}; - /** * iwl_queue_inc_wrap - increment queue index, wrap back to beginning * @index -- current index @@ -288,107 +284,6 @@ static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index) (trans->trans_cfg->base_params->max_tfd_queue_size - 1); } -struct iwl_cmd_meta { - /* only for SYNC commands, iff the reply skb is wanted */ - struct iwl_host_cmd *source; - u32 flags; - u32 tbs; -}; - -/* - * The FH will write back to the first TB only, so we need to copy some data - * into the buffer regardless of whether it should be mapped or not. - * This indicates how big the first TB must be to include the scratch buffer - * and the assigned PN. - * Since PN location is 8 bytes at offset 12, it's 20 now. - * If we make it bigger then allocations will be bigger and copy slower, so - * that's probably not useful. - */ -#define IWL_FIRST_TB_SIZE 20 -#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64) - -struct iwl_pcie_txq_entry { - void *cmd; - struct sk_buff *skb; - /* buffer to free after command completes */ - const void *free_buf; - struct iwl_cmd_meta meta; -}; - -struct iwl_pcie_first_tb_buf { - u8 buf[IWL_FIRST_TB_SIZE_ALIGN]; -}; - -/** - * struct iwl_txq - Tx Queue for DMA - * @q: generic Rx/Tx queue descriptor - * @tfds: transmit frame descriptors (DMA memory) - * @first_tb_bufs: start of command headers, including scratch buffers, for - * the writeback -- this is DMA memory and an array holding one buffer - * for each command on the queue - * @first_tb_dma: DMA address for the first_tb_bufs start - * @entries: transmit entries (driver state) - * @lock: queue lock - * @stuck_timer: timer that fires if queue gets stuck - * @trans_pcie: pointer back to transport (for timer) - * @need_update: indicates need to update read/write index - * @ampdu: true if this queue is an ampdu queue for an specific RA/TID - * @wd_timeout: queue watchdog timeout (jiffies) - per queue - * @frozen: tx stuck queue timer is frozen - * @frozen_expiry_remainder: remember how long until the timer fires - * @bc_tbl: byte count table of the queue (relevant only for gen2 transport) - * @write_ptr: 1-st empty entry (index) host_w - * @read_ptr: last used entry (index) host_r - * @dma_addr: physical addr for BD's - * @n_window: safe queue window - * @id: queue id - * @low_mark: low watermark, resume queue if free space more than this - * @high_mark: high watermark, stop queue if free space less than this - * - * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame - * descriptors) and required locking structures. - * - * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware - * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless - * there might be HW changes in the future). For the normal TX - * queues, n_window, which is the size of the software queue data - * is also 256; however, for the command queue, n_window is only - * 32 since we don't need so many commands pending. Since the HW - * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. - * This means that we end up with the following: - * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | - * SW entries: | 0 | ... | 31 | - * where N is a number between 0 and 7. This means that the SW - * data is a window overlayed over the HW queue. - */ -struct iwl_txq { - void *tfds; - struct iwl_pcie_first_tb_buf *first_tb_bufs; - dma_addr_t first_tb_dma; - struct iwl_pcie_txq_entry *entries; - spinlock_t lock; - unsigned long frozen_expiry_remainder; - struct timer_list stuck_timer; - struct iwl_trans_pcie *trans_pcie; - bool need_update; - bool frozen; - bool ampdu; - int block; - unsigned long wd_timeout; - struct sk_buff_head overflow_q; - struct iwl_dma_ptr bc_tbl; - - int write_ptr; - int read_ptr; - dma_addr_t dma_addr; - int n_window; - u32 id; - int low_mark; - int high_mark; - - bool overflow_tx; -}; - static inline dma_addr_t iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx) { @@ -556,11 +451,9 @@ struct iwl_trans_pcie { u32 scd_base_addr; struct iwl_dma_ptr scd_bc_tbls; struct iwl_dma_ptr kw; + struct dma_pool *bc_pool; struct iwl_txq *txq_memory; - struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; - unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; - unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; /* PCI bus related data */ struct pci_dev *pci_dev; @@ -574,10 +467,7 @@ struct iwl_trans_pcie { u8 page_offs, dev_cmd_offs; - u8 cmd_queue; u8 def_rx_queue; - u8 cmd_fifo; - unsigned int cmd_q_wdg_timeout; u8 n_no_reclaim_cmds; u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; u8 max_tbs; @@ -980,9 +870,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans); static inline void iwl_wake_queue(struct iwl_trans *trans, struct iwl_txq *txq) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) { + if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) { IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); iwl_op_mode_queue_not_full(trans->op_mode, txq->id); } @@ -991,9 +879,7 @@ static inline void iwl_wake_queue(struct iwl_trans *trans, static inline void iwl_stop_queue(struct iwl_trans *trans, struct iwl_txq *txq) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) { + if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) { iwl_op_mode_queue_full(trans->op_mode, txq->id); IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); } else diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 8c29071cb415..24cb1b1f21f0 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1284,7 +1284,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, int i) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; bool page_stolen = false; int max_len = trans_pcie->rx_buf_bytes; u32 offset = 0; @@ -1427,7 +1427,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, } static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, - struct iwl_rxq *rxq, int i) + struct iwl_rxq *rxq, int i, + bool *join) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rx_mem_buffer *rxb; @@ -1441,10 +1442,12 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, return rxb; } - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { vid = le16_to_cpu(rxq->cd[i].rbid); - else + *join = rxq->cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED; + } else { vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */ + } if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs)) goto out_err; @@ -1502,6 +1505,7 @@ restart: u32 rb_pending_alloc = atomic_read(&trans_pcie->rba.req_pending) * RX_CLAIM_REQ_ALLOC; + bool join = false; if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 && !emergency)) { @@ -1514,11 +1518,29 @@ restart: IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); - rxb = iwl_pcie_get_rxb(trans, rxq, i); + rxb = iwl_pcie_get_rxb(trans, rxq, i, &join); if (!rxb) goto out; - iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i); + if (unlikely(join || rxq->next_rb_is_fragment)) { + rxq->next_rb_is_fragment = join; + /* + * We can only get a multi-RB in the following cases: + * - firmware issue, sending a too big notification + * - sniffer mode with a large A-MSDU + * - large MTU frames (>2k) + * since the multi-RB functionality is limited to newer + * hardware that cannot put multiple entries into a + * single RB. + * + * Right now, the higher layers aren't set up to deal + * with that, so discard all of these. + */ + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } else { + iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i); + } i = (i + 1) & (rxq->queue_size - 1); @@ -1649,9 +1671,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) } for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { - if (!trans_pcie->txq[i]) + if (!trans->txqs.txq[i]) continue; - del_timer(&trans_pcie->txq[i]->stuck_timer); + del_timer(&trans->txqs.txq[i]->stuck_timer); } /* The STATUS_FW_ERROR bit is set in this function. This must happen diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index 19a2c72081ab..97c9e9c87436 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -20,7 +20,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -245,7 +245,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans) return -ENOMEM; /* Allocate or reset and init all Tx and Command queues */ - if (iwl_pcie_gen2_tx_init(trans, trans_pcie->cmd_queue, queue_size)) + if (iwl_pcie_gen2_tx_init(trans, trans->txqs.cmd.q_id, queue_size)) return -ENOMEM; /* enable shadow regs in HW */ @@ -262,8 +262,9 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr) iwl_pcie_reset_ict(trans); /* make sure all queue are not stopped/used */ - memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); - memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); + memset(trans->txqs.queue_stopped, 0, + sizeof(trans->txqs.queue_stopped)); + memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); /* now that we got alive we can free the fw image & the context info. * paging memory cannot be freed included since FW will still use it diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index a0daae058c1c..e5160d620868 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -5,10 +5,9 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2007 - 2015, 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,10 +27,9 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2007 - 2015, 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1495,14 +1493,10 @@ static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, int ret; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - /* - * Family IWL_DEVICE_FAMILY_AX210 and above persist mode is set by FW. - */ - if (!reset && trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) { + if (!reset) /* Enable persistence mode to avoid reset */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PERSIST_MODE); - } if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, @@ -1910,9 +1904,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - trans_pcie->cmd_queue = trans_cfg->cmd_queue; - trans_pcie->cmd_fifo = trans_cfg->cmd_fifo; - trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout; + trans->txqs.cmd.q_id = trans_cfg->cmd_queue; + trans->txqs.cmd.fifo = trans_cfg->cmd_fifo; + trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout; if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) trans_pcie->n_no_reclaim_cmds = 0; else @@ -2205,11 +2199,10 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans, unsigned long txqs, bool freeze) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int queue; for_each_set_bit(queue, &txqs, BITS_PER_LONG) { - struct iwl_txq *txq = trans_pcie->txq[queue]; + struct iwl_txq *txq = trans->txqs.txq[queue]; unsigned long now; spin_lock_bh(&txq->lock); @@ -2257,13 +2250,12 @@ next_queue: static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { - struct iwl_txq *txq = trans_pcie->txq[i]; + struct iwl_txq *txq = trans->txqs.txq[i]; - if (i == trans_pcie->cmd_queue) + if (i == trans->txqs.cmd.q_id) continue; spin_lock_bh(&txq->lock); @@ -2332,7 +2324,6 @@ static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq; unsigned long now = jiffies; bool overflow_tx; @@ -2342,11 +2333,11 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) if (test_bit(STATUS_TRANS_DEAD, &trans->status)) return -ENODEV; - if (!test_bit(txq_idx, trans_pcie->queue_used)) + if (!test_bit(txq_idx, trans->txqs.queue_used)) return -EINVAL; IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); - txq = trans_pcie->txq[txq_idx]; + txq = trans->txqs.txq[txq_idx]; spin_lock_bh(&txq->lock); overflow_tx = txq->overflow_tx || @@ -2394,7 +2385,6 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int cnt; int ret = 0; @@ -2403,9 +2393,9 @@ static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) cnt < trans->trans_cfg->base_params->num_of_queues; cnt++) { - if (cnt == trans_pcie->cmd_queue) + if (cnt == trans->txqs.cmd.q_id) continue; - if (!test_bit(cnt, trans_pcie->queue_used)) + if (!test_bit(cnt, trans->txqs.queue_used)) continue; if (!(BIT(cnt) & txq_bm)) continue; @@ -2579,13 +2569,12 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v) struct iwl_dbgfs_tx_queue_priv *priv = seq->private; struct iwl_dbgfs_tx_queue_state *state = v; struct iwl_trans *trans = priv->trans; - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txq[state->pos]; + struct iwl_txq *txq = trans->txqs.txq[state->pos]; seq_printf(seq, "hwq %.3u: used=%d stopped=%d ", (unsigned int)state->pos, - !!test_bit(state->pos, trans_pcie->queue_used), - !!test_bit(state->pos, trans_pcie->queue_stopped)); + !!test_bit(state->pos, trans->txqs.queue_used), + !!test_bit(state->pos, trans->txqs.queue_stopped)); if (txq) seq_printf(seq, "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d", @@ -2595,7 +2584,7 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v) else seq_puts(seq, "(unallocated)"); - if (state->pos == trans_pcie->cmd_queue) + if (state->pos == trans->txqs.cmd.q_id) seq_puts(seq, " (HCMD)"); seq_puts(seq, "\n"); @@ -3271,7 +3260,7 @@ static struct iwl_trans_dump_data { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_fw_error_dump_data *data; - struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id]; struct iwl_fw_error_dump_txcmd *txcmd; struct iwl_trans_dump_data *dump_data; u32 len, num_rbs = 0, monitor_len = 0; @@ -3672,6 +3661,25 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, init_waitqueue_head(&trans_pcie->sx_waitq); + /* + * For gen2 devices, we use a single allocation for each byte-count + * table, but they're pretty small (1k) so use a DMA pool that we + * allocate here. + */ + if (cfg_trans->gen2) { + size_t bc_tbl_size; + + if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_AX210) + bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl); + else + bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl); + + trans_pcie->bc_pool = dmam_pool_create("iwlwifi:bc", &pdev->dev, + bc_tbl_size, 256, 0); + if (!trans_pcie->bc_pool) + goto out_no_pci; + } + if (trans_pcie->msix_enabled) { ret = iwl_pcie_init_msix_handler(pdev, trans_pcie); if (ret) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 53747ac945b8..7fc7542535d8 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -20,7 +20,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -64,7 +64,6 @@ */ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int txq_id; /* @@ -72,12 +71,13 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans) * queues. This happens when we have an rfkill interrupt. * Since we stop Tx altogether - mark the queues as stopped. */ - memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); - memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); + memset(trans->txqs.queue_stopped, 0, + sizeof(trans->txqs.queue_stopped)); + memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); /* Unmap DMA from host system and free skb's */ - for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) { - if (!trans_pcie->txq[txq_id]) + for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) { + if (!trans->txqs.txq[txq_id]) continue; iwl_pcie_gen2_txq_unmap(trans, txq_id); } @@ -716,7 +716,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_cmd_meta *out_meta; - struct iwl_txq *txq = trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans->txqs.txq[txq_id]; u16 cmd_len; int idx; void *tfd; @@ -725,7 +725,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, "queue %d out of range", txq_id)) return -EINVAL; - if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), + if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), "TX on unused queue %d\n", txq_id)) return -EINVAL; @@ -819,7 +819,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; unsigned long flags; @@ -931,7 +931,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide)); out_cmd->hdr_wide.reserved = 0; out_cmd->hdr_wide.sequence = - cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | + cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | INDEX_TO_SEQ(txq->write_ptr)); cmd_pos = sizeof(struct iwl_cmd_header_wide); @@ -979,7 +979,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", iwl_get_cmd_string(trans, cmd->id), group_id, out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), - cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue); + cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); /* start the TFD with the minimum copy bytes */ tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); @@ -1056,7 +1056,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); - struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; int cmd_idx; int ret; @@ -1175,14 +1175,14 @@ int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans->txqs.txq[txq_id]; spin_lock_bh(&txq->lock); while (txq->write_ptr != txq->read_ptr) { IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", txq_id, txq->read_ptr); - if (txq_id != trans_pcie->cmd_queue) { + if (txq_id != trans->txqs.cmd.q_id) { int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr); struct sk_buff *skb = txq->entries[idx].skb; @@ -1224,7 +1224,9 @@ void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans, } kfree(txq->entries); - iwl_pcie_free_dma_ptr(trans, &txq->bc_tbl); + if (txq->bc_tbl.addr) + dma_pool_free(trans_pcie->bc_pool, txq->bc_tbl.addr, + txq->bc_tbl.dma); kfree(txq); } @@ -1238,7 +1240,6 @@ void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans, */ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq; int i; @@ -1246,7 +1247,7 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id) "queue %d out of range", txq_id)) return; - txq = trans_pcie->txq[txq_id]; + txq = trans->txqs.txq[txq_id]; if (WARN_ON(!txq)) return; @@ -1254,7 +1255,7 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id) iwl_pcie_gen2_txq_unmap(trans, txq_id); /* De-alloc array of command/tx buffers */ - if (txq_id == trans_pcie->cmd_queue) + if (txq_id == trans->txqs.cmd.q_id) for (i = 0; i < txq->n_window; i++) { kzfree(txq->entries[i].cmd); kzfree(txq->entries[i].free_buf); @@ -1263,27 +1264,38 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id) iwl_pcie_gen2_txq_free_memory(trans, txq); - trans_pcie->txq[txq_id] = NULL; + trans->txqs.txq[txq_id] = NULL; - clear_bit(txq_id, trans_pcie->queue_used); + clear_bit(txq_id, trans->txqs.queue_used); } int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans, struct iwl_txq **intxq, int size, unsigned int timeout) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + size_t bc_tbl_size, bc_tbl_entries; + struct iwl_txq *txq; int ret; - struct iwl_txq *txq; + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl); + bc_tbl_entries = bc_tbl_size / sizeof(u16); + } else { + bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl); + bc_tbl_entries = bc_tbl_size / sizeof(u16); + } + + if (WARN_ON(size > bc_tbl_entries)) + return -EINVAL; + txq = kzalloc(sizeof(*txq), GFP_KERNEL); if (!txq) return -ENOMEM; - ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl, - (trans->trans_cfg->device_family >= - IWL_DEVICE_FAMILY_AX210) ? - sizeof(struct iwl_gen3_bc_tbl) : - sizeof(struct iwlagn_scd_bc_tbl)); - if (ret) { + + txq->bc_tbl.addr = dma_pool_alloc(trans_pcie->bc_pool, GFP_KERNEL, + &txq->bc_tbl.dma); + if (!txq->bc_tbl.addr) { IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); kfree(txq); return -ENOMEM; @@ -1314,7 +1326,6 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq, struct iwl_host_cmd *hcmd) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tx_queue_cfg_rsp *rsp; int ret, qid; u32 wr_ptr; @@ -1329,20 +1340,20 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans, qid = le16_to_cpu(rsp->queue_number); wr_ptr = le16_to_cpu(rsp->write_pointer); - if (qid >= ARRAY_SIZE(trans_pcie->txq)) { + if (qid >= ARRAY_SIZE(trans->txqs.txq)) { WARN_ONCE(1, "queue index %d unsupported", qid); ret = -EIO; goto error_free_resp; } - if (test_and_set_bit(qid, trans_pcie->queue_used)) { + if (test_and_set_bit(qid, trans->txqs.queue_used)) { WARN_ONCE(1, "queue %d already used", qid); ret = -EIO; goto error_free_resp; } txq->id = qid; - trans_pcie->txq[qid] = txq; + trans->txqs.txq[qid] = txq; wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); /* Place first TFD at index corresponding to start sequence number */ @@ -1400,8 +1411,6 @@ error: void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - if (WARN(queue >= IWL_MAX_TVQM_QUEUES, "queue %d out of range", queue)) return; @@ -1412,7 +1421,7 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue) * allow the op_mode to call txq_disable after it already called * stop_device. */ - if (!test_and_clear_bit(queue, trans_pcie->queue_used)) { + if (!test_and_clear_bit(queue, trans->txqs.queue_used)) { WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), "queue %d not used", queue); return; @@ -1420,22 +1429,21 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue) iwl_pcie_gen2_txq_unmap(trans, queue); - iwl_pcie_gen2_txq_free_memory(trans, trans_pcie->txq[queue]); - trans_pcie->txq[queue] = NULL; + iwl_pcie_gen2_txq_free_memory(trans, trans->txqs.txq[queue]); + trans->txqs.txq[queue] = NULL; IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); } void iwl_pcie_gen2_tx_free(struct iwl_trans *trans) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; - memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); + memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); /* Free all TX queues */ - for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) { - if (!trans_pcie->txq[i]) + for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) { + if (!trans->txqs.txq[i]) continue; iwl_pcie_gen2_txq_free(trans, i); @@ -1444,35 +1452,34 @@ void iwl_pcie_gen2_tx_free(struct iwl_trans *trans) int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, int queue_size) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *queue; int ret; /* alloc and init the tx queue */ - if (!trans_pcie->txq[txq_id]) { + if (!trans->txqs.txq[txq_id]) { queue = kzalloc(sizeof(*queue), GFP_KERNEL); if (!queue) { IWL_ERR(trans, "Not enough memory for tx queue\n"); return -ENOMEM; } - trans_pcie->txq[txq_id] = queue; + trans->txqs.txq[txq_id] = queue; ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true); if (ret) { IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); goto error; } } else { - queue = trans_pcie->txq[txq_id]; + queue = trans->txqs.txq[txq_id]; } ret = iwl_pcie_txq_init(trans, queue, queue_size, - (txq_id == trans_pcie->cmd_queue)); + (txq_id == trans->txqs.cmd.q_id)); if (ret) { IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); goto error; } - trans_pcie->txq[txq_id]->id = txq_id; - set_bit(txq_id, trans_pcie->queue_used); + trans->txqs.txq[txq_id]->id = txq_id; + set_bit(txq_id, trans->txqs.queue_used); return 0; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 4582d418ba4d..5c6c3fa0d29f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -8,7 +8,7 @@ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -183,8 +183,7 @@ void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) static void iwl_pcie_txq_stuck_timer(struct timer_list *t) { struct iwl_txq *txq = from_timer(txq, t, stuck_timer); - struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; - struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); + struct iwl_trans *trans = txq->trans; spin_lock(&txq->lock); /* check if triggered erroneously */ @@ -262,7 +261,7 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); - if (txq_id != trans_pcie->cmd_queue) + if (txq_id != trans->txqs.cmd.q_id) sta_id = tx_cmd->sta_id; bc_ent = cpu_to_le16(1 | (sta_id << 12)); @@ -280,7 +279,6 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 reg = 0; int txq_id = txq->id; @@ -293,7 +291,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, * 3. there is a chance that the NIC is asleep */ if (!trans->trans_cfg->base_params->shadow_reg_enable && - txq_id != trans_pcie->cmd_queue && + txq_id != trans->txqs.cmd.q_id && test_bit(STATUS_TPOWER_PMI, &trans->status)) { /* * wake up nic if it's powered down ... @@ -324,13 +322,12 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { - struct iwl_txq *txq = trans_pcie->txq[i]; + struct iwl_txq *txq = trans->txqs.txq[i]; - if (!test_bit(i, trans_pcie->queue_used)) + if (!test_bit(i, trans->txqs.queue_used)) continue; spin_lock_bh(&txq->lock); @@ -535,7 +532,7 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, tfd_sz = trans_pcie->tfd_size * slots_num; timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0); - txq->trans_pcie = trans_pcie; + txq->trans = trans; txq->n_window = slots_num; @@ -661,14 +658,14 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans->txqs.txq[txq_id]; spin_lock_bh(&txq->lock); while (txq->write_ptr != txq->read_ptr) { IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", txq_id, txq->read_ptr); - if (txq_id != trans_pcie->cmd_queue) { + if (txq_id != trans->txqs.cmd.q_id) { struct sk_buff *skb = txq->entries[txq->read_ptr].skb; if (WARN_ON_ONCE(!skb)) @@ -683,7 +680,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) unsigned long flags; spin_lock_irqsave(&trans_pcie->reg_lock, flags); - if (txq_id == trans_pcie->cmd_queue) + if (txq_id == trans->txqs.cmd.q_id) iwl_pcie_clear_cmd_in_flight(trans); spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); } @@ -712,7 +709,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans->txqs.txq[txq_id]; struct device *dev = trans->dev; int i; @@ -722,7 +719,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) iwl_pcie_txq_unmap(trans, txq_id); /* De-alloc array of command/tx buffers */ - if (txq_id == trans_pcie->cmd_queue) + if (txq_id == trans->txqs.cmd.q_id) for (i = 0; i < txq->n_window; i++) { kzfree(txq->entries[i].cmd); kzfree(txq->entries[i].free_buf); @@ -761,8 +758,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); /* make sure all queue are not stopped/used */ - memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); - memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); + memset(trans->txqs.queue_stopped, 0, + sizeof(trans->txqs.queue_stopped)); + memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); trans_pcie->scd_base_addr = iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); @@ -784,9 +782,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) if (trans->trans_cfg->base_params->scd_chain_ext_wa) iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); - iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, - trans_pcie->cmd_fifo, - trans_pcie->cmd_q_wdg_timeout); + iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id, + trans->txqs.cmd.fifo, + trans->txqs.cmd.wdg_timeout); /* Activate all Tx DMA/FIFO channels */ iwl_scd_activate_fifos(trans); @@ -822,7 +820,7 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { - struct iwl_txq *txq = trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans->txqs.txq[txq_id]; if (trans->trans_cfg->use_tfh) iwl_write_direct64(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), @@ -898,8 +896,9 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans) * queues. This happens when we have an rfkill interrupt. * Since we stop Tx altogether - mark the queues as stopped. */ - memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); - memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); + memset(trans->txqs.queue_stopped, 0, + sizeof(trans->txqs.queue_stopped)); + memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); /* This can happen: start_hw, stop_device */ if (!trans_pcie->txq_memory) @@ -923,7 +922,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans) int txq_id; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); + memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); /* Tx queues */ if (trans_pcie->txq_memory) { @@ -931,7 +930,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans) txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { iwl_pcie_txq_free(trans, txq_id); - trans_pcie->txq[txq_id] = NULL; + trans->txqs.txq[txq_id] = NULL; } } @@ -954,10 +953,10 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues; - bc_tbls_size *= (trans->trans_cfg->device_family >= - IWL_DEVICE_FAMILY_AX210) ? - sizeof(struct iwl_gen3_bc_tbl) : - sizeof(struct iwlagn_scd_bc_tbl); + if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)) + return -EINVAL; + + bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl); /*It is not allowed to alloc twice, so warn when this happens. * We cannot rely on the previous allocation, so free and fail */ @@ -992,7 +991,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) /* Alloc and init all Tx queues, including the command queue (#4/#9) */ for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { - bool cmd_queue = (txq_id == trans_pcie->cmd_queue); + bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); if (cmd_queue) slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, @@ -1000,14 +999,14 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) else slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, trans->cfg->min_256_ba_txq_size); - trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id]; - ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id], + trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; + ret = iwl_pcie_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num, cmd_queue); if (ret) { IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); goto error; } - trans_pcie->txq[txq_id]->id = txq_id; + trans->txqs.txq[txq_id]->id = txq_id; } return 0; @@ -1046,7 +1045,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) /* Alloc and init all Tx queues, including the command queue (#4/#9) */ for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { - bool cmd_queue = (txq_id == trans_pcie->cmd_queue); + bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); if (cmd_queue) slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, @@ -1054,7 +1053,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) else slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, trans->cfg->min_256_ba_txq_size); - ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id], + ret = iwl_pcie_txq_init(trans, trans->txqs.txq[txq_id], slots_num, cmd_queue); if (ret) { IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); @@ -1068,7 +1067,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) * Circular buffer (TFD queue in DRAM) physical base address */ iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), - trans_pcie->txq[txq_id]->dma_addr >> 8); + trans->txqs.txq[txq_id]->dma_addr >> 8); } iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); @@ -1113,18 +1112,18 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct sk_buff_head *skbs) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans->txqs.txq[txq_id]; int tfd_num = iwl_pcie_get_cmd_index(txq, ssn); int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr); int last_to_free; /* This function is not meant to release cmd queue*/ - if (WARN_ON(txq_id == trans_pcie->cmd_queue)) + if (WARN_ON(txq_id == trans->txqs.cmd.q_id)) return; spin_lock_bh(&txq->lock); - if (!test_bit(txq_id, trans_pcie->queue_used)) { + if (!test_bit(txq_id, trans->txqs.queue_used)) { IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", txq_id, ssn); goto out; @@ -1176,7 +1175,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, iwl_pcie_txq_progress(txq); if (iwl_queue_space(trans, txq) > txq->low_mark && - test_bit(txq_id, trans_pcie->queue_stopped)) { + test_bit(txq_id, trans->txqs.queue_stopped)) { struct sk_buff_head overflow_skbs; __skb_queue_head_init(&overflow_skbs); @@ -1229,8 +1228,7 @@ out: /* Set wr_ptr of specific device and txq */ void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans->txqs.txq[txq_id]; spin_lock_bh(&txq->lock); @@ -1290,7 +1288,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans->txqs.txq[txq_id]; unsigned long flags; int nfreed = 0; u16 r; @@ -1302,7 +1300,7 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || (!iwl_queue_used(txq, idx))) { - WARN_ONCE(test_bit(txq_id, trans_pcie->queue_used), + WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used), "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", __func__, txq_id, idx, trans->trans_cfg->base_params->max_tfd_queue_size, @@ -1364,11 +1362,11 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, unsigned int wdg_timeout) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans->txqs.txq[txq_id]; int fifo = -1; bool scd_bug = false; - if (test_and_set_bit(txq_id, trans_pcie->queue_used)) + if (test_and_set_bit(txq_id, trans->txqs.queue_used)) WARN_ONCE(1, "queue %d already used - expect issues", txq_id); txq->wd_timeout = msecs_to_jiffies(wdg_timeout); @@ -1377,7 +1375,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, fifo = cfg->fifo; /* Disable the scheduler prior configuring the cmd queue */ - if (txq_id == trans_pcie->cmd_queue && + if (txq_id == trans->txqs.cmd.q_id && trans_pcie->scd_set_active) iwl_scd_enable_set_active(trans, 0); @@ -1385,7 +1383,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, iwl_scd_txq_set_inactive(trans, txq_id); /* Set this queue as a chain-building queue unless it is CMD */ - if (txq_id != trans_pcie->cmd_queue) + if (txq_id != trans->txqs.cmd.q_id) iwl_scd_txq_set_chain(trans, txq_id); if (cfg->aggregate) { @@ -1455,7 +1453,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, SCD_QUEUE_STTS_REG_MSK); /* enable the scheduler for this queue (only) */ - if (txq_id == trans_pcie->cmd_queue && + if (txq_id == trans->txqs.cmd.q_id && trans_pcie->scd_set_active) iwl_scd_enable_set_active(trans, BIT(txq_id)); @@ -1474,8 +1472,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, bool shared_mode) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans->txqs.txq[txq_id]; txq->ampdu = !shared_mode; } @@ -1488,8 +1485,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, SCD_TX_STTS_QUEUE_OFFSET(txq_id); static const u32 zero_val[4] = {}; - trans_pcie->txq[txq_id]->frozen_expiry_remainder = 0; - trans_pcie->txq[txq_id]->frozen = false; + trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0; + trans->txqs.txq[txq_id]->frozen = false; /* * Upon HW Rfkill - we stop the device, and then stop the queues @@ -1497,7 +1494,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, * allow the op_mode to call txq_disable after it already called * stop_device. */ - if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { + if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) { WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), "queue %d not used", txq_id); return; @@ -1511,7 +1508,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, } iwl_pcie_txq_unmap(trans, txq_id); - trans_pcie->txq[txq_id]->ampdu = false; + trans->txqs.txq[txq_id]->ampdu = false; IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); } @@ -1531,7 +1528,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; unsigned long flags; @@ -1657,7 +1654,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, sizeof(struct iwl_cmd_header_wide)); out_cmd->hdr_wide.reserved = 0; out_cmd->hdr_wide.sequence = - cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | + cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | INDEX_TO_SEQ(txq->write_ptr)); cmd_pos = sizeof(struct iwl_cmd_header_wide); @@ -1665,7 +1662,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, } else { out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); out_cmd->hdr.sequence = - cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | + cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | INDEX_TO_SEQ(txq->write_ptr)); out_cmd->hdr.group_id = 0; @@ -1716,7 +1713,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, iwl_get_cmd_string(trans, cmd->id), group_id, out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), - cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue); + cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); /* start the TFD with the minimum copy bytes */ tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); @@ -1816,14 +1813,14 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, struct iwl_device_cmd *cmd; struct iwl_cmd_meta *meta; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; /* If a Tx command is being handled and it isn't in the actual * command queue then there a command routing bug has been introduced * in the queue management code. */ - if (WARN(txq_id != trans_pcie->cmd_queue, + if (WARN(txq_id != trans->txqs.cmd.q_id, "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", - txq_id, trans_pcie->cmd_queue, sequence, txq->read_ptr, + txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr, txq->write_ptr)) { iwl_print_hex_error(trans, pkt, 32); return; @@ -1895,7 +1892,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; int cmd_idx; int ret; @@ -2129,7 +2126,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, u16 tb1_len) { struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; - struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; + struct iwl_trans_pcie *trans_pcie = + IWL_TRANS_GET_PCIE_TRANS(txq->trans); struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; unsigned int mss = skb_shinfo(skb)->gso_size; @@ -2332,9 +2330,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, u16 wifi_seq; bool amsdu; - txq = trans_pcie->txq[txq_id]; + txq = trans->txqs.txq[txq_id]; - if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), + if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), "TX on unused queue %d\n", txq_id)) return -EINVAL; diff --git a/drivers/net/wireless/intersil/prism54/isl_oid.h b/drivers/net/wireless/intersil/prism54/isl_oid.h index 5441c1f9f2fc..1afc2ccf94ca 100644 --- a/drivers/net/wireless/intersil/prism54/isl_oid.h +++ b/drivers/net/wireless/intersil/prism54/isl_oid.h @@ -37,7 +37,7 @@ struct obj_mlmeex { u16 state; u16 code; u16 size; - u8 data[0]; + u8 data[]; } __packed; struct obj_buffer { @@ -68,12 +68,12 @@ struct obj_bss { struct obj_bsslist { u32 nr; - struct obj_bss bsslist[0]; + struct obj_bss bsslist[]; } __packed; struct obj_frequencies { u16 nr; - u16 mhz[0]; + u16 mhz[]; } __packed; struct obj_attachment { @@ -81,7 +81,7 @@ struct obj_attachment { char reserved; short id; short size; - char data[0]; + char data[]; } __packed; /* diff --git a/drivers/net/wireless/intersil/prism54/islpci_mgt.h b/drivers/net/wireless/intersil/prism54/islpci_mgt.h index d6bbbac46b4a..1f87d0aea60c 100644 --- a/drivers/net/wireless/intersil/prism54/islpci_mgt.h +++ b/drivers/net/wireless/intersil/prism54/islpci_mgt.h @@ -99,7 +99,7 @@ struct islpci_mgmtframe { pimfor_header_t *header; /* payload header, points into buf */ void *data; /* payload ex header, points into buf */ struct work_struct ws; /* argument for schedule_work() */ - char buf[0]; /* fragment buffer */ + char buf[]; /* fragment buffer */ }; int diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 0528d4cb4d37..1356e8cbe617 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -1068,6 +1068,47 @@ static int hwsim_unicast_netgroup(struct mac80211_hwsim_data *data, return res; } +static void mac80211_hwsim_config_mac_nl(struct ieee80211_hw *hw, + const u8 *addr, bool add) +{ + struct mac80211_hwsim_data *data = hw->priv; + u32 _portid = READ_ONCE(data->wmediumd); + struct sk_buff *skb; + void *msg_head; + + if (!_portid && !hwsim_virtio_enabled) + return; + + skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC); + if (!skb) + return; + + msg_head = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0, + add ? HWSIM_CMD_ADD_MAC_ADDR : + HWSIM_CMD_DEL_MAC_ADDR); + if (!msg_head) { + pr_debug("mac80211_hwsim: problem with msg_head\n"); + goto nla_put_failure; + } + + if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER, + ETH_ALEN, data->addresses[1].addr)) + goto nla_put_failure; + + if (nla_put(skb, HWSIM_ATTR_ADDR_RECEIVER, ETH_ALEN, addr)) + goto nla_put_failure; + + genlmsg_end(skb, msg_head); + + if (hwsim_virtio_enabled) + hwsim_tx_virtio(data, skb); + else + hwsim_unicast_netgroup(data, skb, _portid); + return; +nla_put_failure: + nlmsg_free(skb); +} + static inline u16 trans_tx_rate_flags_ieee2hwsim(struct ieee80211_tx_rate *rate) { u16 result = 0; @@ -1545,6 +1586,9 @@ static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw, vif->addr); hwsim_set_magic(vif); + if (vif->type != NL80211_IFTYPE_MONITOR) + mac80211_hwsim_config_mac_nl(hw, vif->addr, true); + vif->cab_queue = 0; vif->hw_queue[IEEE80211_AC_VO] = 0; vif->hw_queue[IEEE80211_AC_VI] = 1; @@ -1584,6 +1628,8 @@ static void mac80211_hwsim_remove_interface( vif->addr); hwsim_check_magic(vif); hwsim_clear_magic(vif); + if (vif->type != NL80211_IFTYPE_MONITOR) + mac80211_hwsim_config_mac_nl(hw, vif->addr, false); } static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, @@ -1781,6 +1827,8 @@ static void mac80211_hwsim_configure_filter(struct ieee80211_hw *hw, data->rx_filter = 0; if (*total_flags & FIF_ALLMULTI) data->rx_filter |= FIF_ALLMULTI; + if (*total_flags & FIF_MCAST_ACTION) + data->rx_filter |= FIF_MCAST_ACTION; *total_flags = data->rx_filter; } @@ -2104,6 +2152,8 @@ static void hw_scan_work(struct work_struct *work) hwsim->hw_scan_vif = NULL; hwsim->tmp_chan = NULL; mutex_unlock(&hwsim->mutex); + mac80211_hwsim_config_mac_nl(hwsim->hw, hwsim->scan_addr, + false); return; } @@ -2177,6 +2227,7 @@ static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw, memset(hwsim->survey_data, 0, sizeof(hwsim->survey_data)); mutex_unlock(&hwsim->mutex); + mac80211_hwsim_config_mac_nl(hw, hwsim->scan_addr, true); wiphy_dbg(hw->wiphy, "hwsim hw_scan request\n"); ieee80211_queue_delayed_work(hwsim->hw, &hwsim->hw_scan, 0); @@ -2220,6 +2271,7 @@ static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw, pr_debug("hwsim sw_scan request, prepping stuff\n"); memcpy(hwsim->scan_addr, mac_addr, ETH_ALEN); + mac80211_hwsim_config_mac_nl(hw, hwsim->scan_addr, true); hwsim->scanning = true; memset(hwsim->survey_data, 0, sizeof(hwsim->survey_data)); @@ -2236,6 +2288,7 @@ static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw, pr_debug("hwsim sw_scan_complete\n"); hwsim->scanning = false; + mac80211_hwsim_config_mac_nl(hw, hwsim->scan_addr, false); eth_zero_addr(hwsim->scan_addr); mutex_unlock(&hwsim->mutex); @@ -2413,6 +2466,11 @@ static void mac80211_hwsim_get_et_stats(struct ieee80211_hw *hw, WARN_ON(i != MAC80211_HWSIM_SSTATS_LEN); } +static int mac80211_hwsim_tx_last_beacon(struct ieee80211_hw *hw) +{ + return 1; +} + #define HWSIM_COMMON_OPS \ .tx = mac80211_hwsim_tx, \ .start = mac80211_hwsim_start, \ @@ -2423,6 +2481,7 @@ static void mac80211_hwsim_get_et_stats(struct ieee80211_hw *hw, .config = mac80211_hwsim_config, \ .configure_filter = mac80211_hwsim_configure_filter, \ .bss_info_changed = mac80211_hwsim_bss_info_changed, \ + .tx_last_beacon = mac80211_hwsim_tx_last_beacon, \ .sta_add = mac80211_hwsim_sta_add, \ .sta_remove = mac80211_hwsim_sta_remove, \ .sta_notify = mac80211_hwsim_sta_notify, \ @@ -2995,6 +3054,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | WIPHY_FLAG_AP_UAPSD | + WIPHY_FLAG_SUPPORTS_5_10_MHZ | WIPHY_FLAG_HAS_CHANNEL_SWITCH; hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR | NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | @@ -3003,6 +3063,10 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_BEACON_PROTECTION); + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS); + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_BEACON_RATE_LEGACY); hw->wiphy->interface_modes = param->iftypes; diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h index 28ade92adcb4..9dceed77c5d6 100644 --- a/drivers/net/wireless/mac80211_hwsim.h +++ b/drivers/net/wireless/mac80211_hwsim.h @@ -75,6 +75,12 @@ enum hwsim_tx_control_flags { * @HWSIM_CMD_DEL_RADIO: destroy a radio, reply is multicasted * @HWSIM_CMD_GET_RADIO: fetch information about existing radios, uses: * %HWSIM_ATTR_RADIO_ID + * @HWSIM_CMD_ADD_MAC_ADDR: add a receive MAC address (given in the + * %HWSIM_ATTR_ADDR_RECEIVER attribute) to a device identified by + * %HWSIM_ATTR_ADDR_TRANSMITTER. This lets wmediumd forward frames + * to this receiver address for a given station. + * @HWSIM_CMD_DEL_MAC_ADDR: remove the MAC address again, the attributes + * are the same as to @HWSIM_CMD_ADD_MAC_ADDR. * @__HWSIM_CMD_MAX: enum limit */ enum { @@ -85,6 +91,8 @@ enum { HWSIM_CMD_NEW_RADIO, HWSIM_CMD_DEL_RADIO, HWSIM_CMD_GET_RADIO, + HWSIM_CMD_ADD_MAC_ADDR, + HWSIM_CMD_DEL_MAC_ADDR, __HWSIM_CMD_MAX, }; #define HWSIM_CMD_MAX (_HWSIM_CMD_MAX - 1) diff --git a/drivers/net/wireless/marvell/libertas/rx.c b/drivers/net/wireless/marvell/libertas/rx.c index 58a1fc433b73..f28aa09d1f9e 100644 --- a/drivers/net/wireless/marvell/libertas/rx.c +++ b/drivers/net/wireless/marvell/libertas/rx.c @@ -62,9 +62,6 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb) struct rxpd *p_rx_pd; int hdrchop; struct ethhdr *p_ethhdr; - static const u8 rfc1042_eth_hdr[] = { - 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 - }; BUG_ON(!skb); @@ -102,7 +99,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb) sizeof(p_rx_pkt->eth803_hdr.src_addr)); if (memcmp(&p_rx_pkt->rfc1042_hdr, - rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr)) == 0) { + rfc1042_header, sizeof(rfc1042_header)) == 0) { /* * Replace the 803 header and rfc1042 header (llc/snap) with an * EthernetII header, keep the src/dst and snap_type (ethertype) diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 1566d2197906..4e4f59c17ded 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -269,17 +269,12 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, * CFG802.11 operation handler to register a mgmt frame. */ static void -mwifiex_cfg80211_mgmt_frame_register(struct wiphy *wiphy, - struct wireless_dev *wdev, - u16 frame_type, bool reg) +mwifiex_cfg80211_update_mgmt_frame_registrations(struct wiphy *wiphy, + struct wireless_dev *wdev, + struct mgmt_frame_regs *upd) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev); - u32 mask; - - if (reg) - mask = priv->mgmt_frame_mask | BIT(frame_type >> 4); - else - mask = priv->mgmt_frame_mask & ~BIT(frame_type >> 4); + u32 mask = upd->interface_stypes; if (mask != priv->mgmt_frame_mask) { priv->mgmt_frame_mask = mask; @@ -1496,7 +1491,8 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *mac, struct station_info *sinfo) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); - static struct mwifiex_sta_node *node; + struct mwifiex_sta_node *node; + int i; if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && priv->media_connected && idx == 0) { @@ -1506,13 +1502,10 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev, mwifiex_send_cmd(priv, HOST_CMD_APCMD_STA_LIST, HostCmd_ACT_GEN_GET, 0, NULL, true); - if (node && (&node->list == &priv->sta_list)) { - node = NULL; - return -ENOENT; - } - - node = list_prepare_entry(node, &priv->sta_list, list); - list_for_each_entry_continue(node, &priv->sta_list, list) { + i = 0; + list_for_each_entry(node, &priv->sta_list, list) { + if (i++ != idx) + continue; ether_addr_copy(mac, node->mac_addr); return mwifiex_dump_station_info(priv, node, sinfo); } @@ -4189,7 +4182,8 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = { .del_key = mwifiex_cfg80211_del_key, .set_default_mgmt_key = mwifiex_cfg80211_set_default_mgmt_key, .mgmt_tx = mwifiex_cfg80211_mgmt_tx, - .mgmt_frame_register = mwifiex_cfg80211_mgmt_frame_register, + .update_mgmt_frame_registrations = + mwifiex_cfg80211_update_mgmt_frame_registrations, .remain_on_channel = mwifiex_cfg80211_remain_on_channel, .cancel_remain_on_channel = mwifiex_cfg80211_cancel_remain_on_channel, .set_default_key = mwifiex_cfg80211_set_default_key, @@ -4341,6 +4335,11 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta; wiphy->n_iface_combinations = 1; + if (adapter->max_sta_conn > adapter->max_p2p_conn) + wiphy->max_ap_assoc_sta = adapter->max_sta_conn; + else + wiphy->max_ap_assoc_sta = adapter->max_p2p_conn; + /* Initialize cipher suits */ wiphy->cipher_suites = mwifiex_cipher_suites; wiphy->n_cipher_suites = ARRAY_SIZE(mwifiex_cipher_suites); diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index 7e4b8cd52605..d068b9075c32 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -1495,6 +1495,7 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, struct mwifiex_adapter *adapter = priv->adapter; struct mwifiex_ie_types_header *tlv; struct hw_spec_api_rev *api_rev; + struct hw_spec_max_conn *max_conn; u16 resp_size, api_id; int i, left_len, parsed_len = 0; @@ -1581,8 +1582,21 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, adapter->fw_api_ver = api_rev->major_ver; mwifiex_dbg(adapter, INFO, - "Firmware api version %d\n", - adapter->fw_api_ver); + "Firmware api version %d.%d\n", + adapter->fw_api_ver, + api_rev->minor_ver); + break; + case UAP_FW_API_VER_ID: + mwifiex_dbg(adapter, INFO, + "uAP api version %d.%d\n", + api_rev->major_ver, + api_rev->minor_ver); + break; + case CHANRPT_API_VER_ID: + mwifiex_dbg(adapter, INFO, + "channel report api version %d.%d\n", + api_rev->major_ver, + api_rev->minor_ver); break; default: mwifiex_dbg(adapter, FATAL, @@ -1591,6 +1605,17 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, break; } break; + case TLV_TYPE_MAX_CONN: + max_conn = (struct hw_spec_max_conn *)tlv; + adapter->max_p2p_conn = max_conn->max_p2p_conn; + adapter->max_sta_conn = max_conn->max_sta_conn; + mwifiex_dbg(adapter, INFO, + "max p2p connections: %u\n", + adapter->max_p2p_conn); + mwifiex_dbg(adapter, INFO, + "max sta connections: %u\n", + adapter->max_sta_conn); + break; default: mwifiex_dbg(adapter, FATAL, "Unknown GET_HW_SPEC TLV type: %#x\n", diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index a415d73a73e6..8047e307892e 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -220,6 +220,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define TLV_TYPE_BSS_MODE (PROPRIETARY_TLV_BASE_ID + 206) #define TLV_TYPE_RANDOM_MAC (PROPRIETARY_TLV_BASE_ID + 236) #define TLV_TYPE_CHAN_ATTR_CFG (PROPRIETARY_TLV_BASE_ID + 237) +#define TLV_TYPE_MAX_CONN (PROPRIETARY_TLV_BASE_ID + 279) #define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048 @@ -1052,6 +1053,8 @@ struct host_cmd_ds_802_11_ps_mode_enh { enum API_VER_ID { KEY_API_VER_ID = 1, FW_API_VER_ID = 2, + UAP_FW_API_VER_ID = 3, + CHANRPT_API_VER_ID = 4, }; struct hw_spec_api_rev { @@ -2386,4 +2389,11 @@ struct mwifiex_opt_sleep_confirm { __le16 action; __le16 resp_ctrl; } __packed; + +struct hw_spec_max_conn { + struct mwifiex_ie_types_header header; + u8 max_p2p_conn; + u8 max_sta_conn; +} __packed; + #endif /* !_MWIFIEX_FW_H_ */ diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index afaffc325452..5923c5c14c8d 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -1022,6 +1022,7 @@ struct mwifiex_adapter { bool ext_scan; u8 fw_api_ver; u8 key_api_major_ver, key_api_minor_ver; + u8 max_p2p_conn, max_sta_conn; struct memory_type_mapping *mem_type_mapping_tbl; u8 num_mem_types; bool scan_chan_gap_enabled; diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c index 47fb4b3ea004..97f23f93f6e7 100644 --- a/drivers/net/wireless/marvell/mwl8k.c +++ b/drivers/net/wireless/marvell/mwl8k.c @@ -2668,7 +2668,7 @@ struct mwl8k_cmd_mac_multicast_adr { struct mwl8k_cmd_pkt header; __le16 action; __le16 numaddr; - __u8 addr[0][ETH_ALEN]; + __u8 addr[][ETH_ALEN]; }; #define MWL8K_ENABLE_RX_DIRECTED 0x0001 diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig index cbc2d8a5d354..41533a0e1720 100644 --- a/drivers/net/wireless/mediatek/mt76/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/Kconfig @@ -24,3 +24,4 @@ source "drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig" source "drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig" source "drivers/net/wireless/mediatek/mt76/mt7603/Kconfig" source "drivers/net/wireless/mediatek/mt76/mt7615/Kconfig" +source "drivers/net/wireless/mediatek/mt76/mt7915/Kconfig" diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile index d7a1ddc9e407..ef663b873b0b 100644 --- a/drivers/net/wireless/mediatek/mt76/Makefile +++ b/drivers/net/wireless/mediatek/mt76/Makefile @@ -26,4 +26,5 @@ mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o obj-$(CONFIG_MT76x0_COMMON) += mt76x0/ obj-$(CONFIG_MT76x2_COMMON) += mt76x2/ obj-$(CONFIG_MT7603E) += mt7603/ -obj-$(CONFIG_MT7615E) += mt7615/ +obj-$(CONFIG_MT7615_COMMON) += mt7615/ +obj-$(CONFIG_MT7915E) += mt7915/ diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c index f77f03530259..df25c00d9e06 100644 --- a/drivers/net/wireless/mediatek/mt76/agg-rx.c +++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c @@ -119,7 +119,7 @@ static void mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames) { struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; - struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; + struct ieee80211_bar *bar = mt76_skb_get_hdr(skb); struct mt76_wcid *wcid = status->wcid; struct mt76_rx_tid *tid; u16 seqno; @@ -147,13 +147,13 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames) void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames) { struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); struct mt76_wcid *wcid = status->wcid; struct ieee80211_sta *sta; struct mt76_rx_tid *tid; bool sn_less; - u16 seqno, head, size; - u8 ackp, idx; + u16 seqno, head, size, idx; + u8 ackp; __skb_queue_tail(frames, skb); @@ -239,7 +239,7 @@ out: } int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno, - u16 ssn, u8 size) + u16 ssn, u16 size) { struct mt76_rx_tid *tid; @@ -264,7 +264,7 @@ EXPORT_SYMBOL_GPL(mt76_rx_aggr_start); static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid) { - u8 size = tid->size; + u16 size = tid->size; int i; spin_lock_bh(&tid->lock); diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c index d2202acb8dc6..3a5de1d1b121 100644 --- a/drivers/net/wireless/mediatek/mt76/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/debugfs.c @@ -46,6 +46,25 @@ int mt76_queues_read(struct seq_file *s, void *data) } EXPORT_SYMBOL_GPL(mt76_queues_read); +static int mt76_rx_queues_read(struct seq_file *s, void *data) +{ + struct mt76_dev *dev = dev_get_drvdata(s->private); + int i, queued; + + mt76_for_each_q_rx(dev, i) { + struct mt76_queue *q = &dev->q_rx[i]; + + if (!q->ndesc) + continue; + + queued = mt76_is_usb(dev) ? q->ndesc - q->queued : q->queued; + seq_printf(s, "%d: queued=%d head=%d tail=%d\n", + i, queued, q->head, q->tail); + } + + return 0; +} + void mt76_seq_puts_array(struct seq_file *file, const char *str, s8 *val, int len) { @@ -92,6 +111,8 @@ struct dentry *mt76_register_debugfs(struct mt76_dev *dev) debugfs_create_blob("otp", 0400, dir, &dev->otp); debugfs_create_devm_seqfile(dev->dev, "rate_txpower", dir, mt76_read_rate_txpower); + debugfs_create_devm_seqfile(dev->dev, "rx-queues", dir, + mt76_rx_queues_read); return dir; } diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 75e659774e07..f4d6074fe32a 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -576,7 +576,7 @@ mt76_dma_init(struct mt76_dev *dev) init_dummy_netdev(&dev->napi_dev); - for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) { + mt76_for_each_q_rx(dev, i) { netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll, 64); mt76_dma_rx_fill(dev, &dev->q_rx[i]); @@ -610,7 +610,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev) for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) mt76_dma_tx_cleanup(dev, i, true); - for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) { + mt76_for_each_q_rx(dev, i) { netif_napi_del(&dev->napi[i]); mt76_dma_rx_cleanup(dev, &dev->q_rx[i]); } diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index f44f99184c10..907098101898 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -116,12 +116,12 @@ static void mt76_led_cleanup(struct mt76_dev *dev) led_classdev_unregister(&dev->led_cdev); } -static void mt76_init_stream_cap(struct mt76_dev *dev, +static void mt76_init_stream_cap(struct mt76_phy *phy, struct ieee80211_supported_band *sband, bool vht) { struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap; - int i, nstream = hweight8(dev->phy.antenna_mask); + int i, nstream = hweight8(phy->antenna_mask); struct ieee80211_sta_vht_cap *vht_cap; u16 mcs_map = 0; @@ -153,12 +153,12 @@ static void mt76_init_stream_cap(struct mt76_dev *dev, vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); } -void mt76_set_stream_caps(struct mt76_dev *dev, bool vht) +void mt76_set_stream_caps(struct mt76_phy *phy, bool vht) { - if (dev->cap.has_2ghz) - mt76_init_stream_cap(dev, &dev->phy.sband_2g.sband, false); - if (dev->cap.has_5ghz) - mt76_init_stream_cap(dev, &dev->phy.sband_5g.sband, vht); + if (phy->dev->cap.has_2ghz) + mt76_init_stream_cap(phy, &phy->sband_2g.sband, false); + if (phy->dev->cap.has_5ghz) + mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht); } EXPORT_SYMBOL_GPL(mt76_set_stream_caps); @@ -198,9 +198,8 @@ mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband, ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; - ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4; - mt76_init_stream_cap(dev, sband, vht); + mt76_init_stream_cap(&dev->phy, sband, vht); if (!vht) return 0; @@ -279,7 +278,8 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw) SET_IEEE80211_PERM_ADDR(hw, dev->macaddr); wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; - wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; + wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH | + WIPHY_FLAG_SUPPORTS_TDLS; wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS); @@ -294,7 +294,6 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw) hw->max_tx_fragments = 16; ieee80211_hw_set(hw, SIGNAL_DBM); - ieee80211_hw_set(hw, PS_NULLFUNC_STACK); ieee80211_hw_set(hw, AMPDU_AGGREGATION); ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); @@ -314,6 +313,8 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw) #ifdef CONFIG_MAC80211_MESH BIT(NL80211_IFTYPE_MESH_POINT) | #endif + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_ADHOC); } @@ -677,7 +678,6 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb, struct ieee80211_hw **hw, struct ieee80211_sta **sta) { - struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct mt76_rx_status mstat; @@ -689,6 +689,9 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb, status->enc_flags = mstat.enc_flags; status->encoding = mstat.encoding; status->bw = mstat.bw; + status->he_ru = mstat.he_ru; + status->he_gi = mstat.he_gi; + status->he_dcm = mstat.he_dcm; status->rate_idx = mstat.rate_idx; status->nss = mstat.nss; status->band = mstat.band; @@ -725,7 +728,7 @@ mt76_check_ccmp_pn(struct sk_buff *skb) * Validate the first fragment both here and in mac80211 * All further fragments will be validated by mac80211 only. */ - hdr = (struct ieee80211_hdr *)skb->data; + hdr = mt76_skb_get_hdr(skb); if (ieee80211_is_frag(hdr) && !ieee80211_is_first_frag(hdr->frame_control)) return 0; @@ -798,7 +801,7 @@ mt76_airtime_flush_ampdu(struct mt76_dev *dev) static void mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb) { - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; struct mt76_wcid *wcid = status->wcid; @@ -835,7 +838,7 @@ static void mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb) { struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); struct ieee80211_sta *sta; struct ieee80211_hw *hw; struct mt76_wcid *wcid = status->wcid; diff --git a/drivers/net/wireless/mediatek/mt76/mcu.c b/drivers/net/wireless/mediatek/mt76/mcu.c index 4048f446e3ee..ade61a5334c6 100644 --- a/drivers/net/wireless/mediatek/mt76/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mcu.c @@ -6,10 +6,11 @@ #include "mt76.h" struct sk_buff * -mt76_mcu_msg_alloc(const void *data, int head_len, - int data_len, int tail_len) +mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data, + int data_len) { - int length = head_len + data_len + tail_len; + const struct mt76_mcu_ops *ops = dev->mcu_ops; + int length = ops->headroom + data_len + ops->tailroom; struct sk_buff *skb; skb = alloc_skb(length, GFP_KERNEL); @@ -17,7 +18,7 @@ mt76_mcu_msg_alloc(const void *data, int head_len, return NULL; memset(skb->head, 0, length); - skb_reserve(skb, head_len); + skb_reserve(skb, ops->headroom); if (data && data_len) skb_put_data(skb, data, data_len); diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c index 7ead6620bb8b..26353b6bce97 100644 --- a/drivers/net/wireless/mediatek/mt76/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mmio.c @@ -73,7 +73,8 @@ void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, spin_lock_irqsave(&dev->mmio.irq_lock, flags); dev->mmio.irqmask &= ~clear; dev->mmio.irqmask |= set; - mt76_mmio_wr(dev, addr, dev->mmio.irqmask); + if (addr) + mt76_mmio_wr(dev, addr, dev->mmio.irqmask); spin_unlock_irqrestore(&dev->mmio.irq_lock, flags); } EXPORT_SYMBOL_GPL(mt76_set_irq_mask); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 8e4759bc8f59..dfe625a53c63 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -60,6 +60,7 @@ enum mt76_txq_id { MT_TXQ_BK = IEEE80211_AC_BK, MT_TXQ_PSD, MT_TXQ_MCU, + MT_TXQ_MCU_WA, MT_TXQ_BEACON, MT_TXQ_CAB, MT_TXQ_FWDL, @@ -69,6 +70,7 @@ enum mt76_txq_id { enum mt76_rxq_id { MT_RXQ_MAIN, MT_RXQ_MCU, + MT_RXQ_MCU_WA, __MT_RXQ_MAX }; @@ -137,6 +139,9 @@ struct mt76_sw_queue { }; struct mt76_mcu_ops { + u32 headroom; + u32 tailroom; + int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data, int len, bool wait_resp); int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb, @@ -178,7 +183,7 @@ enum mt76_wcid_flags { MT_WCID_FLAG_PS, }; -#define MT76_N_WCIDS 128 +#define MT76_N_WCIDS 288 /* stored in ieee80211_tx_info::hw_queue */ #define MT_TX_HW_QUEUE_EXT_PHY BIT(3) @@ -198,7 +203,7 @@ struct mt76_wcid { struct ewma_signal rssi; int inactive_count; - u8 idx; + u16 idx; u8 hw_key_idx; u8 sta:1; @@ -241,8 +246,8 @@ struct mt76_rx_tid { struct delayed_work reorder_work; u16 head; - u8 size; - u8 nframes; + u16 size; + u16 nframes; u8 num; @@ -265,7 +270,7 @@ struct mt76_rx_tid { struct mt76_tx_cb { unsigned long jiffies; - u8 wcid; + u16 wcid; u8 pktid; u8 flags; }; @@ -275,10 +280,16 @@ enum { MT76_STATE_RUNNING, MT76_STATE_MCU_RUNNING, MT76_SCANNING, + MT76_HW_SCANNING, + MT76_HW_SCHED_SCANNING, + MT76_RESTART, MT76_RESET, MT76_MCU_RESET, MT76_REMOVED, MT76_READING_STATS, + MT76_STATE_POWER_OFF, + MT76_STATE_SUSPEND, + MT76_STATE_ROC, }; struct mt76_hw_cap { @@ -372,6 +383,7 @@ enum mt_vendor_req { MT_VEND_READ_CFG = 0x47, MT_VEND_READ_EXT = 0x63, MT_VEND_WRITE_EXT = 0x66, + MT_VEND_FEATURE_SET = 0x91, }; enum mt76u_in_ep { @@ -435,7 +447,7 @@ struct mt76_mmio { struct mt76_rx_status { union { struct mt76_wcid *wcid; - u8 wcid_idx; + u16 wcid_idx; }; unsigned long reorder_time; @@ -452,7 +464,8 @@ struct mt76_rx_status { u16 freq; u32 flag; u8 enc_flags; - u8 encoding:2, bw:3; + u8 encoding:2, bw:3, he_ru:3; + u8 he_gi:2, he_dcm:1; u8 rate_idx; u8 nss; u8 band; @@ -524,8 +537,8 @@ struct mt76_dev { wait_queue_head_t tx_wait; struct sk_buff_head status_list; - unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG]; - unsigned long wcid_phy_mask[MT76_N_WCIDS / BITS_PER_LONG]; + u32 wcid_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)]; + u32 wcid_phy_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)]; struct mt76_wcid global_wcid; struct mt76_wcid __rcu *wcid[MT76_N_WCIDS]; @@ -570,6 +583,10 @@ enum mt76_phy_type { MT_PHY_TYPE_HT, MT_PHY_TYPE_HT_GF, MT_PHY_TYPE_VHT, + MT_PHY_TYPE_HE_SU = 8, + MT_PHY_TYPE_HE_EXT_SU, + MT_PHY_TYPE_HE_TB, + MT_PHY_TYPE_HE_MU, }; #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__) @@ -611,7 +628,7 @@ enum mt76_phy_type { #define mt76_hw(dev) (dev)->mphy.hw static inline struct ieee80211_hw * -mt76_wcid_hw(struct mt76_dev *dev, u8 wcid) +mt76_wcid_hw(struct mt76_dev *dev, u16 wcid) { if (wcid <= MT76_N_WCIDS && mt76_wcid_mask_test(dev->wcid_phy_mask, wcid)) @@ -654,6 +671,10 @@ static inline u16 mt76_rev(struct mt76_dev *dev) #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) +#define mt76_for_each_q_rx(dev, i) \ + for (i = 0; i < ARRAY_SIZE((dev)->q_rx) && \ + (dev)->q_rx[i].ndesc; i++) + struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size, const struct ieee80211_ops *ops, const struct mt76_driver_ops *drv_ops); @@ -735,6 +756,25 @@ static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb) return ((void *)IEEE80211_SKB_CB(skb)->status.status_driver_data); } +static inline void *mt76_skb_get_hdr(struct sk_buff *skb) +{ + struct mt76_rx_status mstat; + u8 *data = skb->data; + + /* Alignment concerns */ + BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) % 4); + BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) % 4); + + mstat = *((struct mt76_rx_status *)skb->cb); + + if (mstat.flag & RX_FLAG_RADIOTAP_HE) + data += sizeof(struct ieee80211_radiotap_he); + if (mstat.flag & RX_FLAG_RADIOTAP_HE_MU) + data += sizeof(struct ieee80211_radiotap_he_mu); + + return data; +} + static inline void mt76_insert_hdr_pad(struct sk_buff *skb) { int len = ieee80211_get_hdrlen_from_skb(skb); @@ -785,10 +825,10 @@ void mt76_set_channel(struct mt76_phy *phy); void mt76_update_survey(struct mt76_dev *dev); int mt76_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey); -void mt76_set_stream_caps(struct mt76_dev *dev, bool vht); +void mt76_set_stream_caps(struct mt76_phy *phy, bool vht); int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid, - u16 ssn, u8 size); + u16 ssn, u16 size); void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid); void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, @@ -911,8 +951,8 @@ int mt76u_resume_rx(struct mt76_dev *dev); void mt76u_queues_deinit(struct mt76_dev *dev); struct sk_buff * -mt76_mcu_msg_alloc(const void *data, int head_len, - int data_len, int tail_len); +mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data, + int data_len); void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb); struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev, unsigned long expires); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c index cc7c788abedd..8ce6880b2bb8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c @@ -113,7 +113,7 @@ void mt7603_init_debugfs(struct mt7603_dev *dev) return; debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat); - debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir, + debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", dir, mt76_queues_read); debugfs_create_file("edcca", 0600, dir, dev, &fops_edcca); debugfs_create_u32("reset_test", 0600, dir, &dev->reset_test); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c index 2b6a4d8a8dc7..3ee06e2577b8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: ISC +#include <linux/of.h> #include "mt7603.h" #include "eeprom.h" @@ -100,10 +101,14 @@ mt7603_apply_cal_free_data(struct mt7603_dev *dev, u8 *efuse) MT_EE_TX_POWER_1_START_2G, MT_EE_TX_POWER_1_START_2G + 1, }; + struct device_node *np = dev->mt76.dev->of_node; u8 *eeprom = dev->mt76.eeprom.data; int n = ARRAY_SIZE(cal_free_bytes); int i; + if (!np || !of_property_read_bool(np, "mediatek,eeprom-merge-otp")) + return; + if (!mt7603_has_cal_free_data(dev, efuse)) return; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c index f641a8b56b39..94196599797e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c @@ -342,6 +342,8 @@ static const struct ieee80211_iface_limit if_limits[] = { #ifdef CONFIG_MAC80211_MESH BIT(NL80211_IFTYPE_MESH_POINT) | #endif + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_AP) }, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c index 39b7c5d6e6cd..8060c1514396 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c @@ -51,10 +51,11 @@ void mt7603_mac_set_timing(struct mt7603_dev *dev) int offset = 3 * dev->coverage_class; u32 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); + bool is_5ghz = dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ; int sifs; u32 val; - if (dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ) + if (is_5ghz) sifs = 16; else sifs = 10; @@ -71,7 +72,7 @@ void mt7603_mac_set_timing(struct mt7603_dev *dev) FIELD_PREP(MT_IFS_SIFS, sifs) | FIELD_PREP(MT_IFS_SLOT, dev->slottime)); - if (dev->slottime < 20) + if (dev->slottime < 20 || is_5ghz) val = MT7603_CFEND_RATE_DEFAULT; else val = MT7603_CFEND_RATE_11B; @@ -318,11 +319,16 @@ void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta) { struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; int idx = msta->wcid.idx; + u8 ampdu_density; u32 addr; u32 val; addr = mt7603_wtbl1_addr(idx); + ampdu_density = sta->ht_cap.ampdu_density; + if (ampdu_density < IEEE80211_HT_MPDU_DENSITY_4) + ampdu_density = IEEE80211_HT_MPDU_DENSITY_4; + val = mt76_rr(dev, addr + 2 * 4); val &= MT_WTBL1_W2_KEY_TYPE | MT_WTBL1_W2_ADMISSION_CONTROL; val |= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR, sta->ht_cap.ampdu_factor) | @@ -467,7 +473,7 @@ mt7603_rx_get_wcid(struct mt7603_dev *dev, u8 idx, bool unicast) struct mt7603_sta *sta; struct mt76_wcid *wcid; - if (idx >= ARRAY_SIZE(dev->mt76.wcid)) + if (idx >= MT7603_WTBL_SIZE) return NULL; wcid = rcu_dereference(dev->mt76.wcid[idx]); @@ -1097,7 +1103,7 @@ mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta, if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU)) info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU; - first_idx = max_t(int, 0, last_idx - (count + 1) / MT7603_RATE_RETRY); + first_idx = max_t(int, 0, last_idx - (count - 1) / MT7603_RATE_RETRY); if (fixed_rate && !probe) { info->status.rates[0].count = count; @@ -1232,7 +1238,7 @@ void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data) if (pid == MT_PACKET_ID_NO_ACK) return; - if (wcidx >= ARRAY_SIZE(dev->mt76.wcid)) + if (wcidx >= MT7603_WTBL_SIZE) return; rcu_read_lock(); @@ -1432,8 +1438,9 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev) for (i = 0; i < __MT_TXQ_MAX; i++) mt76_queue_tx_cleanup(dev, i, true); - for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++) + mt76_for_each_q_rx(&dev->mt76, i) { mt76_queue_rx_reset(dev, i); + } mt7603_dma_sched_reset(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c index 77985d81c447..a47a3a644ecc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c @@ -62,7 +62,7 @@ mt7603_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, struct sk_buff *skb; int ret, seq; - skb = mt7603_mcu_msg_alloc(data, len); + skb = mt76_mcu_msg_alloc(mdev, data, len); if (!skb) return -ENOMEM; @@ -265,6 +265,7 @@ out: int mt7603_mcu_init(struct mt7603_dev *dev) { static const struct mt76_mcu_ops mt7603_mcu_ops = { + .headroom = sizeof(struct mt7603_mcu_txd), .mcu_send_msg = mt7603_mcu_msg_send, .mcu_restart = mt7603_mcu_restart, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.h index 1bba369d5c8a..30df8a3fd11a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.h @@ -100,11 +100,4 @@ enum { MCU_EXT_EVENT_BCN_UPDATE = 0x31, }; -static inline struct sk_buff * -mt7603_mcu_msg_alloc(const void *data, int len) -{ - return mt76_mcu_msg_alloc(data, sizeof(struct mt7603_mcu_txd), - len, 0); -} - #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c index 68efb300c0d8..de170765e938 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c @@ -20,10 +20,8 @@ mt76_wmac_probe(struct platform_device *pdev) return irq; mem_base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(mem_base)) { - dev_err(&pdev->dev, "Failed to get memory resource\n"); + if (IS_ERR(mem_base)) return PTR_ERR(mem_base); - } mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops, &mt7603_drv_ops); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig index 6afd4aea67ed..e25db1135eda 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig @@ -1,7 +1,12 @@ # SPDX-License-Identifier: GPL-2.0-only -config MT7615E - tristate "MediaTek MT7615E (PCIe) support" + +config MT7615_COMMON + tristate select MT76_CORE + +config MT7615E + tristate "MediaTek MT7615E and MT7663E (PCIe) support" + select MT7615_COMMON depends on MAC80211 depends on PCI help @@ -22,3 +27,14 @@ config MT7622_WMAC This adds support for the built-in WMAC on MT7622 SoC devices which has the same feature set as a MT7615, but limited to 2.4 GHz only. + +config MT7663U + tristate "MediaTek MT7663U (USB) support" + select MT76_USB + select MT7615_COMMON + depends on MAC80211 + depends on USB + help + This adds support for MT7663U 802.11ax 2x2:2 wireless devices. + + To compile this driver as a module, choose M here. diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile index 5c6a220ed7e3..99f353b8b9aa 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile +++ b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile @@ -1,9 +1,15 @@ #SPDX-License-Identifier: ISC +obj-$(CONFIG_MT7615_COMMON) += mt7615-common.o obj-$(CONFIG_MT7615E) += mt7615e.o +obj-$(CONFIG_MT7663U) += mt7663u.o CFLAGS_trace.o := -I$(src) -mt7615e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o mmio.o \ - debugfs.o trace.o +mt7615-common-y := main.o init.o mcu.o eeprom.o mac.o \ + debugfs.o trace.o + +mt7615e-y := pci.o pci_init.o dma.o pci_mac.o mmio.o mt7615e-$(CONFIG_MT7622_WMAC) += soc.o + +mt7663u-y := usb.o usb_mcu.o usb_init.o diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c index b4d0795154e3..fd3ef483a87c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c @@ -20,11 +20,15 @@ static int mt7615_scs_set(void *data, u64 val) { struct mt7615_dev *dev = data; + struct mt7615_phy *ext_phy; if (!mt7615_wait_for_mcu_init(dev)) return 0; - mt7615_mac_set_scs(dev, val); + mt7615_mac_set_scs(&dev->phy, val); + ext_phy = mt7615_ext_phy(dev); + if (ext_phy) + mt7615_mac_set_scs(ext_phy, val); return 0; } @@ -34,7 +38,7 @@ mt7615_scs_get(void *data, u64 *val) { struct mt7615_dev *dev = data; - *val = dev->scs_en; + *val = dev->phy.scs_en; return 0; } @@ -120,28 +124,52 @@ mt7615_reset_test_set(void *data, u64 val) DEFINE_DEBUGFS_ATTRIBUTE(fops_reset_test, NULL, mt7615_reset_test_set, "%lld\n"); -static int -mt7615_ampdu_stat_read(struct seq_file *file, void *data) +static void +mt7615_ampdu_stat_read_phy(struct mt7615_phy *phy, + struct seq_file *file) { struct mt7615_dev *dev = file->private; + u32 reg = is_mt7663(&dev->mt76) ? MT_MIB_ARNG(0) : MT_AGG_ASRCR0; + bool ext_phy = phy != &dev->phy; int bound[7], i, range; - range = mt76_rr(dev, MT_AGG_ASRCR0); + if (!phy) + return; + + range = mt76_rr(dev, reg); for (i = 0; i < 4; i++) bound[i] = MT_AGG_ASRCR_RANGE(range, i) + 1; - range = mt76_rr(dev, MT_AGG_ASRCR1); + + range = mt76_rr(dev, reg + 4); for (i = 0; i < 3; i++) bound[i + 4] = MT_AGG_ASRCR_RANGE(range, i) + 1; + seq_printf(file, "\nPhy %d\n", ext_phy); + seq_printf(file, "Length: %8d | ", bound[0]); for (i = 0; i < ARRAY_SIZE(bound) - 1; i++) seq_printf(file, "%3d -%3d | ", bound[i], bound[i + 1]); seq_puts(file, "\nCount: "); + + range = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0; for (i = 0; i < ARRAY_SIZE(bound); i++) - seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i]); + seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i + range]); seq_puts(file, "\n"); + seq_printf(file, "BA miss count: %d\n", phy->mib.ba_miss_cnt); + seq_printf(file, "PER: %ld.%1ld%%\n", + phy->mib.aggr_per / 10, phy->mib.aggr_per % 10); +} + +static int +mt7615_ampdu_stat_read(struct seq_file *file, void *data) +{ + struct mt7615_dev *dev = file->private; + + mt7615_ampdu_stat_read_phy(&dev->phy, file); + mt7615_ampdu_stat_read_phy(mt7615_ext_phy(dev), file); + return 0; } @@ -265,10 +293,10 @@ int mt7615_init_debugfs(struct mt7615_dev *dev) return -ENOMEM; if (is_mt7615(&dev->mt76)) - debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir, + debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", dir, mt7615_queues_read); else - debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir, + debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", dir, mt76_queues_read); debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir, mt7615_queues_acq); @@ -297,3 +325,4 @@ int mt7615_init_debugfs(struct mt7615_dev *dev) return 0; } +EXPORT_SYMBOL_GPL(mt7615_init_debugfs); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c index b19f208e3d54..5a124610d4af 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c @@ -94,45 +94,6 @@ mt7615_init_tx_queues(struct mt7615_dev *dev) return 0; } -void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, - struct sk_buff *skb) -{ - struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); - __le32 *rxd = (__le32 *)skb->data; - __le32 *end = (__le32 *)&skb->data[skb->len]; - enum rx_pkt_type type; - u16 flag; - - type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0])); - flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0])); - if (type == PKT_TYPE_RX_EVENT && flag == 0x1) - type = PKT_TYPE_NORMAL_MCU; - - switch (type) { - case PKT_TYPE_TXS: - for (rxd++; rxd + 7 <= end; rxd += 7) - mt7615_mac_add_txs(dev, rxd); - dev_kfree_skb(skb); - break; - case PKT_TYPE_TXRX_NOTIFY: - mt7615_mac_tx_free(dev, skb); - break; - case PKT_TYPE_RX_EVENT: - mt7615_mcu_rx_event(dev, skb); - break; - case PKT_TYPE_NORMAL_MCU: - case PKT_TYPE_NORMAL: - if (!mt7615_mac_fill_rx(dev, skb)) { - mt76_rx(&dev->mt76, q, skb); - return; - } - /* fall through */ - default: - dev_kfree_skb(skb); - break; - } -} - static void mt7615_tx_cleanup(struct mt7615_dev *dev) { @@ -160,13 +121,52 @@ static int mt7615_poll_tx(struct napi_struct *napi, int budget) mt7615_tx_cleanup(dev); + rcu_read_lock(); mt7615_mac_sta_poll(dev); + rcu_read_unlock(); tasklet_schedule(&dev->mt76.tx_tasklet); return 0; } +int mt7615_wait_pdma_busy(struct mt7615_dev *dev) +{ + struct mt76_dev *mdev = &dev->mt76; + + if (!is_mt7663(mdev)) { + u32 mask = MT_PDMA_TX_BUSY | MT_PDMA_RX_BUSY; + u32 reg = mt7615_reg_map(dev, MT_PDMA_BUSY); + + if (!mt76_poll_msec(dev, reg, mask, 0, 1000)) { + dev_err(mdev->dev, "PDMA engine busy\n"); + return -EIO; + } + + return 0; + } + + if (!mt76_poll_msec(dev, MT_PDMA_BUSY_STATUS, + MT_PDMA_TX_IDX_BUSY, 0, 1000)) { + dev_err(mdev->dev, "PDMA engine tx busy\n"); + return -EIO; + } + + if (!mt76_poll_msec(dev, MT_PSE_PG_INFO, + MT_PSE_SRC_CNT, 0, 1000)) { + dev_err(mdev->dev, "PSE engine busy\n"); + return -EIO; + } + + if (!mt76_poll_msec(dev, MT_PDMA_BUSY_STATUS, + MT_PDMA_BUSY_IDX, 0, 1000)) { + dev_err(mdev->dev, "PDMA engine busy\n"); + return -EIO; + } + + return 0; +} + static void mt7622_dma_sched_init(struct mt7615_dev *dev) { u32 reg = mt7615_reg_map(dev, MT_DMASHDL_BASE); @@ -229,8 +229,13 @@ static void mt7663_dma_sched_init(struct mt7615_dev *dev) int mt7615_dma_init(struct mt7615_dev *dev) { int rx_ring_size = MT7615_RX_RING_SIZE; + int rx_buf_size = MT_RX_BUF_SIZE; int ret; + /* Increase buffer size to receive large VHT MPDUs */ + if (dev->mt76.cap.has_5ghz) + rx_buf_size *= 2; + mt76_dma_attach(&dev->mt76); mt76_wr(dev, MT_WPDMA_GLO_CFG, @@ -271,7 +276,7 @@ int mt7615_dma_init(struct mt7615_dev *dev) /* init rx queues */ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1, - MT7615_RX_MCU_RING_SIZE, MT_RX_BUF_SIZE, + MT7615_RX_MCU_RING_SIZE, rx_buf_size, MT_RX_RING_BASE); if (ret) return ret; @@ -280,7 +285,7 @@ int mt7615_dma_init(struct mt7615_dev *dev) rx_ring_size /= 2; ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0, - rx_ring_size, MT_RX_BUF_SIZE, MT_RX_RING_BASE); + rx_ring_size, rx_buf_size, MT_RX_RING_BASE); if (ret) return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c index dfa9a08b896d..edac37e7847b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c @@ -5,6 +5,7 @@ * Felix Fietkau <nbd@nbd.name> */ +#include <linux/of.h> #include "mt7615.h" #include "eeprom.h" @@ -40,11 +41,11 @@ static int mt7615_efuse_read(struct mt7615_dev *dev, u32 base, return 0; } -static int mt7615_efuse_init(struct mt7615_dev *dev) +static int mt7615_efuse_init(struct mt7615_dev *dev, u32 base) { - u32 val, base = mt7615_reg_map(dev, MT_EFUSE_BASE); int i, len = MT7615_EEPROM_SIZE; void *buf; + u32 val; val = mt76_rr(dev, base + MT_EFUSE_BASE_CTRL); if (val & MT_EFUSE_BASE_CTRL_EMPTY) @@ -67,15 +68,16 @@ static int mt7615_efuse_init(struct mt7615_dev *dev) return 0; } -static int mt7615_eeprom_load(struct mt7615_dev *dev) +static int mt7615_eeprom_load(struct mt7615_dev *dev, u32 addr) { int ret; - ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_SIZE); + ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_SIZE + + MT7615_EEPROM_EXTRA_DATA); if (ret < 0) return ret; - return mt7615_efuse_init(dev); + return mt7615_efuse_init(dev, addr); } static int mt7615_check_eeprom(struct mt76_dev *dev) @@ -109,6 +111,12 @@ mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev) return; } + if (is_mt7611(&dev->mt76)) { + /* 5GHz only */ + dev->mt76.cap.has_5ghz = true; + return; + } + val = FIELD_GET(MT_EE_NIC_WIFI_CONF_BAND_SEL, eeprom[MT_EE_WIFI_CONF]); switch (val) { @@ -128,14 +136,15 @@ mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev) static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev) { u8 *eeprom = dev->mt76.eeprom.data; - u8 tx_mask; + u8 tx_mask, max_nss; mt7615_eeprom_parse_hw_band_cap(dev); if (is_mt7663(&dev->mt76)) { - tx_mask = 2; + max_nss = 2; + tx_mask = FIELD_GET(MT_EE_HW_CONF1_TX_MASK, + eeprom[MT7663_EE_HW_CONF1]); } else { - u8 max_nss; u32 val; /* read tx-rx mask from eeprom */ @@ -144,21 +153,46 @@ static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev) tx_mask = FIELD_GET(MT_EE_NIC_CONF_TX_MASK, eeprom[MT_EE_NIC_CONF_0]); - if (!tx_mask || tx_mask > max_nss) - tx_mask = max_nss; } + if (!tx_mask || tx_mask > max_nss) + tx_mask = max_nss; dev->chainmask = BIT(tx_mask) - 1; dev->mphy.antenna_mask = dev->chainmask; dev->phy.chainmask = dev->chainmask; } -int mt7615_eeprom_get_power_index(struct mt7615_dev *dev, - struct ieee80211_channel *chan, - u8 chain_idx) +static int mt7663_eeprom_get_target_power_index(struct mt7615_dev *dev, + struct ieee80211_channel *chan, + u8 chain_idx) +{ + int index, group; + + if (chain_idx > 1) + return -EINVAL; + + if (chan->band == NL80211_BAND_2GHZ) + return MT7663_EE_TX0_2G_TARGET_POWER + (chain_idx << 4); + + group = mt7615_get_channel_group(chan->hw_value); + if (chain_idx == 1) + index = MT7663_EE_TX1_5G_G0_TARGET_POWER; + else + index = MT7663_EE_TX0_5G_G0_TARGET_POWER; + + return index + group * 3; +} + +int mt7615_eeprom_get_target_power_index(struct mt7615_dev *dev, + struct ieee80211_channel *chan, + u8 chain_idx) { int index; + if (is_mt7663(&dev->mt76)) + return mt7663_eeprom_get_target_power_index(dev, chan, + chain_idx); + if (chain_idx > 3) return -EINVAL; @@ -197,6 +231,23 @@ int mt7615_eeprom_get_power_index(struct mt7615_dev *dev, return index; } +int mt7615_eeprom_get_power_delta_index(struct mt7615_dev *dev, + enum nl80211_band band) +{ + /* assume the first rate has the highest power offset */ + if (is_mt7663(&dev->mt76)) { + if (band == NL80211_BAND_2GHZ) + return MT_EE_TX0_5G_G0_TARGET_POWER; + else + return MT7663_EE_5G_RATE_POWER; + } + + if (band == NL80211_BAND_2GHZ) + return MT_EE_2G_RATE_POWER; + else + return MT_EE_5G_RATE_POWER; +} + static void mt7615_apply_cal_free_data(struct mt7615_dev *dev) { static const u16 ical[] = { @@ -255,30 +306,38 @@ static void mt7622_apply_cal_free_data(struct mt7615_dev *dev) static void mt7615_cal_free_data(struct mt7615_dev *dev) { + struct device_node *np = dev->mt76.dev->of_node; + + if (!np || !of_property_read_bool(np, "mediatek,eeprom-merge-otp")) + return; + switch (mt76_chip(&dev->mt76)) { case 0x7622: mt7622_apply_cal_free_data(dev); break; case 0x7615: + case 0x7611: mt7615_apply_cal_free_data(dev); break; } } -int mt7615_eeprom_init(struct mt7615_dev *dev) +int mt7615_eeprom_init(struct mt7615_dev *dev, u32 addr) { int ret; - ret = mt7615_eeprom_load(dev); + ret = mt7615_eeprom_load(dev, addr); if (ret < 0) return ret; ret = mt7615_check_eeprom(&dev->mt76); - if (ret && dev->mt76.otp.data) + if (ret && dev->mt76.otp.data) { memcpy(dev->mt76.eeprom.data, dev->mt76.otp.data, MT7615_EEPROM_SIZE); - else + } else { + dev->flash_eeprom = true; mt7615_cal_free_data(dev); + } mt7615_eeprom_parse_hw_cap(dev); memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR, @@ -288,3 +347,4 @@ int mt7615_eeprom_init(struct mt7615_dev *dev) return 0; } +EXPORT_SYMBOL_GPL(mt7615_eeprom_init); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h index 8a2a64b7fcd3..40fed7adc58a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h @@ -6,6 +6,21 @@ #include "mt7615.h" + +#define MT7615_EEPROM_DCOC_OFFSET MT7615_EEPROM_SIZE +#define MT7615_EEPROM_DCOC_SIZE 256 +#define MT7615_EEPROM_DCOC_COUNT 34 + +#define MT7615_EEPROM_TXDPD_OFFSET (MT7615_EEPROM_SIZE + \ + MT7615_EEPROM_DCOC_COUNT * \ + MT7615_EEPROM_DCOC_SIZE) +#define MT7615_EEPROM_TXDPD_SIZE 216 +#define MT7615_EEPROM_TXDPD_COUNT (44 + 3) + +#define MT7615_EEPROM_EXTRA_DATA (MT7615_EEPROM_TXDPD_OFFSET + \ + MT7615_EEPROM_TXDPD_COUNT * \ + MT7615_EEPROM_TXDPD_SIZE) + enum mt7615_eeprom_field { MT_EE_CHIP_ID = 0x000, MT_EE_VERSION = 0x002, @@ -13,23 +28,39 @@ enum mt7615_eeprom_field { MT_EE_NIC_CONF_0 = 0x034, MT_EE_NIC_CONF_1 = 0x036, MT_EE_WIFI_CONF = 0x03e, + MT_EE_CALDATA_FLASH = 0x052, MT_EE_TX0_2G_TARGET_POWER = 0x058, MT_EE_TX0_5G_G0_TARGET_POWER = 0x070, + MT7663_EE_5G_RATE_POWER = 0x089, MT_EE_TX1_5G_G0_TARGET_POWER = 0x098, + MT_EE_2G_RATE_POWER = 0x0be, + MT_EE_5G_RATE_POWER = 0x0d5, + MT7663_EE_TX0_2G_TARGET_POWER = 0x0e3, MT_EE_EXT_PA_2G_TARGET_POWER = 0x0f2, MT_EE_EXT_PA_5G_TARGET_POWER = 0x0f3, - MT7663_EE_TX0_2G_TARGET_POWER = 0x123, MT_EE_TX2_5G_G0_TARGET_POWER = 0x142, MT_EE_TX3_5G_G0_TARGET_POWER = 0x16a, + MT7663_EE_HW_CONF1 = 0x1b0, + MT7663_EE_TX0_5G_G0_TARGET_POWER = 0x245, + MT7663_EE_TX1_5G_G0_TARGET_POWER = 0x2b5, MT7615_EE_MAX = 0x3bf, MT7622_EE_MAX = 0x3db, MT7663_EE_MAX = 0x400, }; +#define MT_EE_RATE_POWER_MASK GENMASK(5, 0) +#define MT_EE_RATE_POWER_SIGN BIT(6) +#define MT_EE_RATE_POWER_EN BIT(7) + +#define MT_EE_CALDATA_FLASH_TX_DPD BIT(0) +#define MT_EE_CALDATA_FLASH_RX_CAL BIT(1) + #define MT_EE_NIC_CONF_TX_MASK GENMASK(7, 4) #define MT_EE_NIC_CONF_RX_MASK GENMASK(3, 0) +#define MT_EE_HW_CONF1_TX_MASK GENMASK(2, 0) + #define MT_EE_NIC_CONF_TSSI_2G BIT(5) #define MT_EE_NIC_CONF_TSSI_5G BIT(6) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c index 03b1e56534d6..e2d80518e5af 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c @@ -12,17 +12,18 @@ #include "mac.h" #include "eeprom.h" -static void mt7615_phy_init(struct mt7615_dev *dev) +void mt7615_phy_init(struct mt7615_dev *dev) { /* disable rf low power beacon mode */ mt76_set(dev, MT_WF_PHY_WF2_RFCTRL0(0), MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN); mt76_set(dev, MT_WF_PHY_WF2_RFCTRL0(1), MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN); } +EXPORT_SYMBOL_GPL(mt7615_phy_init); static void mt7615_init_mac_chain(struct mt7615_dev *dev, int chain) { - u32 val, mask, set; + u32 val; if (!chain) val = MT_CFG_CCR_MAC_D0_1X_GC_EN | MT_CFG_CCR_MAC_D0_2X_GC_EN; @@ -62,18 +63,23 @@ mt7615_init_mac_chain(struct mt7615_dev *dev, int chain) FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), MT7615_RATE_RETRY - 1) | FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), MT7615_RATE_RETRY - 1)); - mask = MT_DMA_RCFR0_MCU_RX_MGMT | - MT_DMA_RCFR0_MCU_RX_CTL_NON_BAR | - MT_DMA_RCFR0_MCU_RX_CTL_BAR | - MT_DMA_RCFR0_MCU_RX_BYPASS | - MT_DMA_RCFR0_RX_DROPPED_UCAST | - MT_DMA_RCFR0_RX_DROPPED_MCAST; - set = FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_UCAST, 2) | - FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_MCAST, 2); - mt76_rmw(dev, MT_DMA_RCFR0(chain), mask, set); + mt76_clear(dev, MT_DMA_RCFR0(chain), MT_DMA_RCFR0_MCU_RX_TDLS); + if (!mt7615_firmware_offload(dev)) { + u32 mask, set; + + mask = MT_DMA_RCFR0_MCU_RX_MGMT | + MT_DMA_RCFR0_MCU_RX_CTL_NON_BAR | + MT_DMA_RCFR0_MCU_RX_CTL_BAR | + MT_DMA_RCFR0_MCU_RX_BYPASS | + MT_DMA_RCFR0_RX_DROPPED_UCAST | + MT_DMA_RCFR0_RX_DROPPED_MCAST; + set = FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_UCAST, 2) | + FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_MCAST, 2); + mt76_rmw(dev, MT_DMA_RCFR0(chain), mask, set); + } } -static void mt7615_mac_init(struct mt7615_dev *dev) +void mt7615_mac_init(struct mt7615_dev *dev) { int i; @@ -90,7 +96,7 @@ static void mt7615_mac_init(struct mt7615_dev *dev) MT_TMAC_CTCR0_INS_DDLMT_EN); mt7615_mcu_set_rts_thresh(&dev->phy, 0x92b); - mt7615_mac_set_scs(dev, true); + mt7615_mac_set_scs(&dev->phy, true); mt76_rmw(dev, MT_AGG_SCR, MT_AGG_SCR_NLNAV_MID_PTEC_DIS, MT_AGG_SCR_NLNAV_MID_PTEC_DIS); @@ -112,67 +118,59 @@ static void mt7615_mac_init(struct mt7615_dev *dev) mt76_wr(dev, MT_DMA_DCR0, FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 3072) | MT_DMA_DCR0_RX_VEC_DROP); + /* disable TDLS filtering */ + mt76_clear(dev, MT_WF_PFCR, MT_WF_PFCR_TDLS_EN); + mt76_set(dev, MT_WF_MIB_SCR0, MT_MIB_SCR0_AGG_CNT_RANGE_EN); if (is_mt7663(&dev->mt76)) { - mt76_wr(dev, MT_CSR(0x010), 0x8208); - mt76_wr(dev, 0x44064, 0x2000000); mt76_wr(dev, MT_WF_AGG(0x160), 0x5c341c02); mt76_wr(dev, MT_WF_AGG(0x164), 0x70708040); } else { mt7615_init_mac_chain(dev, 1); } } +EXPORT_SYMBOL_GPL(mt7615_mac_init); -bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev) +void mt7615_check_offload_capability(struct mt7615_dev *dev) { - flush_work(&dev->mcu_work); - - return test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); -} - -static void mt7615_init_work(struct work_struct *work) -{ - struct mt7615_dev *dev = container_of(work, struct mt7615_dev, mcu_work); + struct ieee80211_hw *hw = mt76_hw(dev); + struct wiphy *wiphy = hw->wiphy; - if (mt7615_mcu_init(dev)) - return; + if (mt7615_firmware_offload(dev)) { + ieee80211_hw_set(hw, SUPPORTS_PS); + ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); - mt7615_mcu_set_eeprom(dev); - mt7615_mac_init(dev); - mt7615_phy_init(dev); - mt7615_mcu_del_wtbl_all(dev); + wiphy->max_remain_on_channel_duration = 5000; + wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | + NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR | + WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | + NL80211_FEATURE_P2P_GO_CTWIN | + NL80211_FEATURE_P2P_GO_OPPPS; + } else { + dev->ops->hw_scan = NULL; + dev->ops->cancel_hw_scan = NULL; + dev->ops->sched_scan_start = NULL; + dev->ops->sched_scan_stop = NULL; + dev->ops->set_rekey_data = NULL; + dev->ops->remain_on_channel = NULL; + dev->ops->cancel_remain_on_channel = NULL; + + wiphy->max_sched_scan_plan_interval = 0; + wiphy->max_sched_scan_ie_len = 0; + wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; + wiphy->max_sched_scan_ssids = 0; + wiphy->max_match_sets = 0; + wiphy->max_sched_scan_reqs = 0; + } } +EXPORT_SYMBOL_GPL(mt7615_check_offload_capability); -static int mt7615_init_hardware(struct mt7615_dev *dev) +bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev) { - int ret, idx; - - mt76_wr(dev, MT_INT_SOURCE_CSR, ~0); - - INIT_WORK(&dev->mcu_work, mt7615_init_work); - spin_lock_init(&dev->token_lock); - idr_init(&dev->token); - - ret = mt7615_eeprom_init(dev); - if (ret < 0) - return ret; - - ret = mt7615_dma_init(dev); - if (ret) - return ret; - - set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state); - - /* Beacon and mgmt frames should occupy wcid 0 */ - idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1); - if (idx) - return -ENOSPC; - - dev->mt76.global_wcid.idx = idx; - dev->mt76.global_wcid.hw_key_idx = -1; - rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid); + flush_work(&dev->mcu_work); - return 0; + return test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); } +EXPORT_SYMBOL_GPL(mt7615_wait_for_mcu_init); #define CCK_RATE(_idx, _rate) { \ .bitrate = _rate, \ @@ -187,7 +185,7 @@ static int mt7615_init_hardware(struct mt7615_dev *dev) .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \ } -static struct ieee80211_rate mt7615_rates[] = { +struct ieee80211_rate mt7615_rates[] = { CCK_RATE(0, 10), CCK_RATE(1, 20), CCK_RATE(2, 55), @@ -201,6 +199,7 @@ static struct ieee80211_rate mt7615_rates[] = { OFDM_RATE(8, 480), OFDM_RATE(12, 540), }; +EXPORT_SYMBOL_GPL(mt7615_rates); static const struct ieee80211_iface_limit if_limits[] = { { @@ -212,6 +211,8 @@ static const struct ieee80211_iface_limit if_limits[] = { #ifdef CONFIG_MAC80211_MESH BIT(NL80211_IFTYPE_MESH_POINT) | #endif + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_STATION) } }; @@ -226,68 +227,26 @@ static const struct ieee80211_iface_combination if_comb[] = { } }; -static void -mt7615_led_set_config(struct led_classdev *led_cdev, - u8 delay_on, u8 delay_off) -{ - struct mt7615_dev *dev; - struct mt76_dev *mt76; - u32 val, addr; - - mt76 = container_of(led_cdev, struct mt76_dev, led_cdev); - dev = container_of(mt76, struct mt7615_dev, mt76); - val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) | - FIELD_PREP(MT_LED_STATUS_OFF, delay_off) | - FIELD_PREP(MT_LED_STATUS_ON, delay_on); - - addr = mt7615_reg_map(dev, MT_LED_STATUS_0(mt76->led_pin)); - mt76_wr(dev, addr, val); - addr = mt7615_reg_map(dev, MT_LED_STATUS_1(mt76->led_pin)); - mt76_wr(dev, addr, val); - - val = MT_LED_CTRL_REPLAY(mt76->led_pin) | - MT_LED_CTRL_KICK(mt76->led_pin); - if (mt76->led_al) - val |= MT_LED_CTRL_POLARITY(mt76->led_pin); - addr = mt7615_reg_map(dev, MT_LED_CTRL); - mt76_wr(dev, addr, val); -} - -static int -mt7615_led_set_blink(struct led_classdev *led_cdev, - unsigned long *delay_on, - unsigned long *delay_off) -{ - u8 delta_on, delta_off; - - delta_off = max_t(u8, *delay_off / 10, 1); - delta_on = max_t(u8, *delay_on / 10, 1); - - mt7615_led_set_config(led_cdev, delta_on, delta_off); - - return 0; -} - -static void -mt7615_led_set_brightness(struct led_classdev *led_cdev, - enum led_brightness brightness) -{ - if (!brightness) - mt7615_led_set_config(led_cdev, 0, 0xff); - else - mt7615_led_set_config(led_cdev, 0xff, 0); -} - -static void -mt7615_init_txpower(struct mt7615_dev *dev, - struct ieee80211_supported_band *sband) +void mt7615_init_txpower(struct mt7615_dev *dev, + struct ieee80211_supported_band *sband) { int i, n_chains = hweight8(dev->mphy.antenna_mask), target_chains; + int delta_idx, delta = mt76_tx_power_nss_delta(n_chains); u8 *eep = (u8 *)dev->mt76.eeprom.data; enum nl80211_band band = sband->band; - int delta = mt76_tx_power_nss_delta(n_chains); + u8 rate_val; + + delta_idx = mt7615_eeprom_get_power_delta_index(dev, band); + rate_val = eep[delta_idx]; + if ((rate_val & ~MT_EE_RATE_POWER_MASK) == + (MT_EE_RATE_POWER_EN | MT_EE_RATE_POWER_SIGN)) + delta += rate_val & MT_EE_RATE_POWER_MASK; + + if (!is_mt7663(&dev->mt76) && mt7615_ext_pa_enabled(dev, band)) + target_chains = 1; + else + target_chains = n_chains; - target_chains = mt7615_ext_pa_enabled(dev, band) ? 1 : n_chains; for (i = 0; i < sband->n_channels; i++) { struct ieee80211_channel *chan = &sband->channels[i]; u8 target_power = 0; @@ -296,7 +255,10 @@ mt7615_init_txpower(struct mt7615_dev *dev, for (j = 0; j < target_chains; j++) { int index; - index = mt7615_eeprom_get_power_index(dev, chan, j); + index = mt7615_eeprom_get_target_power_index(dev, chan, j); + if (index < 0) + continue; + target_power = max(target_power, eep[index]); } @@ -306,6 +268,7 @@ mt7615_init_txpower(struct mt7615_dev *dev, chan->orig_mpwr = target_power; } } +EXPORT_SYMBOL_GPL(mt7615_init_txpower); static void mt7615_regd_notifier(struct wiphy *wiphy, @@ -345,8 +308,18 @@ mt7615_init_wiphy(struct ieee80211_hw *hw) wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); wiphy->reg_notifier = mt7615_regd_notifier; + wiphy->max_sched_scan_plan_interval = MT7615_MAX_SCHED_SCAN_INTERVAL; + wiphy->max_sched_scan_ie_len = IEEE80211_MAX_DATA_LEN; + wiphy->max_scan_ie_len = MT7615_SCAN_IE_LEN; + wiphy->max_sched_scan_ssids = MT7615_MAX_SCHED_SCAN_SSID; + wiphy->max_match_sets = MT7615_MAX_SCAN_MATCH; + wiphy->max_sched_scan_reqs = 1; + wiphy->max_scan_ssids = 4; + + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS); + ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN); if (is_mt7615(&phy->dev->mt76)) @@ -368,7 +341,7 @@ mt7615_cap_dbdc_enable(struct mt7615_dev *dev) dev->phy.chainmask = dev->mphy.antenna_mask; dev->mphy.hw->wiphy->available_antennas_rx = dev->phy.chainmask; dev->mphy.hw->wiphy->available_antennas_tx = dev->phy.chainmask; - mt76_set_stream_caps(&dev->mt76, true); + mt76_set_stream_caps(&dev->mphy, true); } static void @@ -381,7 +354,7 @@ mt7615_cap_dbdc_disable(struct mt7615_dev *dev) dev->phy.chainmask = dev->chainmask; dev->mphy.hw->wiphy->available_antennas_rx = dev->chainmask; dev->mphy.hw->wiphy->available_antennas_tx = dev->chainmask; - mt76_set_stream_caps(&dev->mt76, true); + mt76_set_stream_caps(&dev->mphy, true); } int mt7615_register_ext_phy(struct mt7615_dev *dev) @@ -411,6 +384,16 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev) mphy->antenna_mask = BIT(hweight8(phy->chainmask)) - 1; mt7615_init_wiphy(mphy->hw); + INIT_DELAYED_WORK(&phy->mac_work, mt7615_mac_work); + INIT_DELAYED_WORK(&phy->scan_work, mt7615_scan_work); + skb_queue_head_init(&phy->scan_event_list); + + INIT_WORK(&phy->roc_work, mt7615_roc_work); + timer_setup(&phy->roc_timer, mt7615_roc_timer, 0); + init_waitqueue_head(&phy->roc_wait); + + mt7615_mac_set_scs(phy, true); + /* * Make the secondary PHY MAC address local without overlapping with * the usual MAC address allocation scheme on multiple virtual interfaces @@ -431,6 +414,7 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev) return ret; } +EXPORT_SYMBOL_GPL(mt7615_register_ext_phy); void mt7615_unregister_ext_phy(struct mt7615_dev *dev) { @@ -444,6 +428,7 @@ void mt7615_unregister_ext_phy(struct mt7615_dev *dev) mt76_unregister_phy(mphy); ieee80211_free_hw(mphy->hw); } +EXPORT_SYMBOL_GPL(mt7615_unregister_ext_phy); void mt7615_init_device(struct mt7615_dev *dev) { @@ -452,11 +437,17 @@ void mt7615_init_device(struct mt7615_dev *dev) dev->phy.dev = dev; dev->phy.mt76 = &dev->mt76.phy; dev->mt76.phy.priv = &dev->phy; - INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7615_mac_work); + INIT_DELAYED_WORK(&dev->phy.mac_work, mt7615_mac_work); + INIT_DELAYED_WORK(&dev->phy.scan_work, mt7615_scan_work); + skb_queue_head_init(&dev->phy.scan_event_list); INIT_LIST_HEAD(&dev->sta_poll_list); spin_lock_init(&dev->sta_poll_lock); init_waitqueue_head(&dev->reset_wait); + init_waitqueue_head(&dev->phy.roc_wait); + INIT_WORK(&dev->reset_work, mt7615_mac_reset_work); + INIT_WORK(&dev->phy.roc_work, mt7615_roc_work); + timer_setup(&dev->phy.roc_timer, mt7615_roc_timer, 0); mt7615_init_wiphy(hw); dev->mphy.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; @@ -467,62 +458,4 @@ void mt7615_init_device(struct mt7615_dev *dev) mt7615_cap_dbdc_disable(dev); dev->phy.dfs_state = -1; } - -int mt7615_register_device(struct mt7615_dev *dev) -{ - int ret; - - mt7615_init_device(dev); - - /* init led callbacks */ - if (IS_ENABLED(CONFIG_MT76_LEDS)) { - dev->mt76.led_cdev.brightness_set = mt7615_led_set_brightness; - dev->mt76.led_cdev.blink_set = mt7615_led_set_blink; - } - - ret = mt7622_wmac_init(dev); - if (ret) - return ret; - - ret = mt7615_init_hardware(dev); - if (ret) - return ret; - - ret = mt76_register_device(&dev->mt76, true, mt7615_rates, - ARRAY_SIZE(mt7615_rates)); - if (ret) - return ret; - - ieee80211_queue_work(mt76_hw(dev), &dev->mcu_work); - mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband); - mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband); - - return mt7615_init_debugfs(dev); -} - -void mt7615_unregister_device(struct mt7615_dev *dev) -{ - struct mt76_txwi_cache *txwi; - bool mcu_running; - int id; - - mcu_running = mt7615_wait_for_mcu_init(dev); - - mt7615_unregister_ext_phy(dev); - mt76_unregister_device(&dev->mt76); - if (mcu_running) - mt7615_mcu_exit(dev); - mt7615_dma_cleanup(dev); - - spin_lock_bh(&dev->token_lock); - idr_for_each_entry(&dev->token, txwi, id) { - mt7615_txp_skb_unmap(&dev->mt76, txwi); - if (txwi->skb) - dev_kfree_skb_any(txwi->skb); - mt76_put_txwi(&dev->mt76, txwi); - } - spin_unlock_bh(&dev->token_lock); - idr_destroy(&dev->token); - - mt76_free_device(&dev->mt76); -} +EXPORT_SYMBOL_GPL(mt7615_init_device); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index a27a6d164009..9f1c6ca7a665 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -61,7 +61,7 @@ static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev, struct mt7615_sta *sta; struct mt76_wcid *wcid; - if (idx >= ARRAY_SIZE(dev->mt76.wcid)) + if (idx >= MT7615_WTBL_SIZE) return NULL; wcid = rcu_dereference(dev->mt76.wcid[idx]); @@ -82,8 +82,10 @@ void mt7615_mac_reset_counters(struct mt7615_dev *dev) { int i; - for (i = 0; i < 4; i++) - mt76_rr(dev, MT_TX_AGG_CNT(i)); + for (i = 0; i < 4; i++) { + mt76_rr(dev, MT_TX_AGG_CNT(0, i)); + mt76_rr(dev, MT_TX_AGG_CNT(1, i)); + } memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats)); dev->mt76.phy.survey_time = ktime_get_boottime(); @@ -113,10 +115,14 @@ void mt7615_mac_set_timing(struct mt7615_phy *phy) u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | - FIELD_PREP(MT_TIMEOUT_VAL_CCA, 24); + FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); int sifs, offset; + bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ; + + if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) + return; - if (phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ) + if (is_5ghz) sifs = 16; else sifs = 10; @@ -149,7 +155,7 @@ void mt7615_mac_set_timing(struct mt7615_phy *phy) FIELD_PREP(MT_IFS_SIFS, sifs) | FIELD_PREP(MT_IFS_SLOT, phy->slottime)); - if (phy->slottime < 20) + if (phy->slottime < 20 || is_5ghz) val = MT7615_CFEND_RATE_DEFAULT; else val = MT7615_CFEND_RATE_11B; @@ -164,7 +170,23 @@ void mt7615_mac_set_timing(struct mt7615_phy *phy) } -int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) +static void +mt7615_get_status_freq_info(struct mt7615_dev *dev, struct mt76_phy *mphy, + struct mt76_rx_status *status, u8 chfreq) +{ + if (!test_bit(MT76_HW_SCANNING, &mphy->state) && + !test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) && + !test_bit(MT76_STATE_ROC, &mphy->state)) { + status->freq = mphy->chandef.chan->center_freq; + status->band = mphy->chandef.chan->band; + return; + } + + status->band = chfreq <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; + status->freq = ieee80211_channel_to_frequency(chfreq, status->band); +} + +static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) { struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; struct mt76_phy *mphy = &dev->mt76.phy; @@ -282,11 +304,10 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) status->ext_phy = true; } - if (chfreq != phy->chfreq) + if (!mt7615_firmware_offload(dev) && chfreq != phy->chfreq) return -EINVAL; - status->freq = mphy->chandef.chan->center_freq; - status->band = mphy->chandef.chan->band; + mt7615_get_status_freq_info(dev, mphy, status, chfreq); if (status->band == NL80211_BAND_5GHZ) sband = &mphy->sband_5g.sband; else @@ -408,40 +429,7 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) { } - -void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, - struct mt76_queue_entry *e) -{ - if (!e->txwi) { - dev_kfree_skb_any(e->skb); - return; - } - - /* error path */ - if (e->skb == DMA_DUMMY_DATA) { - struct mt76_txwi_cache *t; - struct mt7615_dev *dev; - struct mt7615_txp_common *txp; - u16 token; - - dev = container_of(mdev, struct mt7615_dev, mt76); - txp = mt7615_txwi_to_txp(mdev, e->txwi); - - if (is_mt7615(&dev->mt76)) - token = le16_to_cpu(txp->fw.token); - else - token = le16_to_cpu(txp->hw.msdu_id[0]) & - ~MT_MSDU_ID_VALID; - - spin_lock_bh(&dev->token_lock); - t = idr_remove(&dev->token, token); - spin_unlock_bh(&dev->token_lock); - e->skb = t ? t->skb : NULL; - } - - if (e->skb) - mt76_tx_complete_skb(mdev, e->skb); -} +EXPORT_SYMBOL_GPL(mt7615_sta_ps); static u16 mt7615_mac_tx_rate_val(struct mt7615_dev *dev, @@ -512,11 +500,12 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, struct ieee80211_vif *vif = info->control.vif; struct mt76_phy *mphy = &dev->mphy; bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY; + bool is_usb = mt76_is_usb(&dev->mt76); int tx_count = 8; u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; __le16 fc = hdr->frame_control; + u32 val, sz_txd = is_usb ? MT_USB_TXD_SIZE : MT_TXD_SIZE; u16 seqno = 0; - u32 val; if (vif) { struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; @@ -540,7 +529,7 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, if (ieee80211_is_data(fc) || ieee80211_is_bufferable_mmpdu(fc)) { q_idx = wmm_idx * MT7615_MAX_WMM_SETS + skb_get_queue_mapping(skb); - p_fmt = MT_TX_TYPE_CT; + p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; } else if (beacon) { if (ext_phy) q_idx = MT_LMAC_BCN1; @@ -552,10 +541,10 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, q_idx = MT_LMAC_ALTX1; else q_idx = MT_LMAC_ALTX0; - p_fmt = MT_TX_TYPE_CT; + p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; } - val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) | + val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) | FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_LMAC) | FIELD_PREP(MT_TXD0_Q_IDX, q_idx); txwi[0] = cpu_to_le32(val); @@ -621,8 +610,11 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, } if (!ieee80211_is_beacon(fc)) { - val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT | - FIELD_PREP(MT_TXD5_PID, pid); + struct ieee80211_hw *hw = mt76_hw(dev); + + val = MT_TXD5_TX_STATUS_HOST | FIELD_PREP(MT_TXD5_PID, pid); + if (!ieee80211_hw_check(hw, SUPPORTS_PS)) + val |= MT_TXD5_SW_POWER_MGMT; txwi[5] = cpu_to_le32(val); } else { txwi[5] = 0; @@ -648,10 +640,15 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK); txwi[7] = FIELD_PREP(MT_TXD7_TYPE, fc_type) | - FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype); + FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype) | + FIELD_PREP(MT_TXD7_SPE_IDX, 0x18); + if (is_usb) + txwi[8] = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) | + FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype); return 0; } +EXPORT_SYMBOL_GPL(mt7615_mac_write_txwi); static void mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp) @@ -666,24 +663,27 @@ mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp) static void mt7615_txp_skb_unmap_hw(struct mt76_dev *dev, struct mt7615_hw_txp *txp) { + u32 last_mask; int i; + last_mask = is_mt7663(dev) ? MT_TXD_LEN_LAST : MT_TXD_LEN_MSDU_LAST; + for (i = 0; i < ARRAY_SIZE(txp->ptr); i++) { struct mt7615_txp_ptr *ptr = &txp->ptr[i]; bool last; u16 len; len = le16_to_cpu(ptr->len0); - last = len & MT_TXD_LEN_MSDU_LAST; - len &= ~MT_TXD_LEN_MSDU_LAST; + last = len & last_mask; + len &= MT_TXD_LEN_MASK; dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len, DMA_TO_DEVICE); if (last) break; len = le16_to_cpu(ptr->len1); - last = len & MT_TXD_LEN_MSDU_LAST; - len &= ~MT_TXD_LEN_MSDU_LAST; + last = len & last_mask; + len &= MT_TXD_LEN_MASK; dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len, DMA_TO_DEVICE); if (last) @@ -702,11 +702,7 @@ void mt7615_txp_skb_unmap(struct mt76_dev *dev, else mt7615_txp_skb_unmap_hw(dev, &txp->hw); } - -static u32 mt7615_mac_wtbl_addr(struct mt7615_dev *dev, int wcid) -{ - return MT_WTBL_BASE(dev) + wcid * MT_WTBL_ENTRY_SIZE; -} +EXPORT_SYMBOL_GPL(mt7615_txp_skb_unmap); bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask) { @@ -734,22 +730,20 @@ void mt7615_mac_sta_poll(struct mt7615_dev *dev) struct ieee80211_sta *sta; struct mt7615_sta *msta; u32 addr, tx_time[4], rx_time[4]; + struct list_head sta_poll_list; int i; - rcu_read_lock(); + INIT_LIST_HEAD(&sta_poll_list); + spin_lock_bh(&dev->sta_poll_lock); + list_splice_init(&dev->sta_poll_list, &sta_poll_list); + spin_unlock_bh(&dev->sta_poll_lock); - while (true) { + while (!list_empty(&sta_poll_list)) { bool clear = false; - spin_lock_bh(&dev->sta_poll_lock); - if (list_empty(&dev->sta_poll_list)) { - spin_unlock_bh(&dev->sta_poll_lock); - break; - } - msta = list_first_entry(&dev->sta_poll_list, - struct mt7615_sta, poll_list); + msta = list_first_entry(&sta_poll_list, struct mt7615_sta, + poll_list); list_del_init(&msta->poll_list); - spin_unlock_bh(&dev->sta_poll_lock); addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4; @@ -789,30 +783,22 @@ void mt7615_mac_sta_poll(struct mt7615_dev *dev) rx_cur); } } - - rcu_read_unlock(); } +EXPORT_SYMBOL_GPL(mt7615_mac_sta_poll); -void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, - struct ieee80211_tx_rate *probe_rate, - struct ieee80211_tx_rate *rates) +static void +mt7615_mac_update_rate_desc(struct mt7615_phy *phy, struct mt7615_sta *sta, + struct ieee80211_tx_rate *probe_rate, + struct ieee80211_tx_rate *rates, + struct mt7615_rate_desc *rd) { struct mt7615_dev *dev = phy->dev; struct mt76_phy *mphy = phy->mt76; struct ieee80211_tx_rate *ref; - int wcid = sta->wcid.idx; - u32 addr = mt7615_mac_wtbl_addr(dev, wcid); - bool stbc = false; + bool rateset, stbc = false; int n_rates = sta->n_rates; - u8 bw, bw_prev, bw_idx = 0; - u16 val[4]; - u16 probe_val; - u32 w5, w27; - bool rateset; - int i, k; - - if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) - return; + u8 bw, bw_prev; + int i, j; for (i = n_rates; i < 4; i++) rates[i] = rates[n_rates - 1]; @@ -840,10 +826,10 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI) rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI; - for (k = 0; k < i; k++) { - if (rates[i].idx != rates[k].idx) + for (j = 0; j < i; j++) { + if (rates[i].idx != rates[j].idx) continue; - if ((rates[i].flags ^ rates[k].flags) & + if ((rates[i].flags ^ rates[j].flags) & (IEEE80211_TX_RC_40_MHZ_WIDTH | IEEE80211_TX_RC_80_MHZ_WIDTH | IEEE80211_TX_RC_160_MHZ_WIDTH)) @@ -856,65 +842,114 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, } } - val[0] = mt7615_mac_tx_rate_val(dev, mphy, &rates[0], stbc, &bw); + rd->val[0] = mt7615_mac_tx_rate_val(dev, mphy, &rates[0], stbc, &bw); bw_prev = bw; if (probe_rate) { - probe_val = mt7615_mac_tx_rate_val(dev, mphy, probe_rate, - stbc, &bw); + rd->probe_val = mt7615_mac_tx_rate_val(dev, mphy, probe_rate, + stbc, &bw); if (bw) - bw_idx = 1; + rd->bw_idx = 1; else bw_prev = 0; } else { - probe_val = val[0]; + rd->probe_val = rd->val[0]; } - val[1] = mt7615_mac_tx_rate_val(dev, mphy, &rates[1], stbc, &bw); + rd->val[1] = mt7615_mac_tx_rate_val(dev, mphy, &rates[1], stbc, &bw); if (bw_prev) { - bw_idx = 3; + rd->bw_idx = 3; bw_prev = bw; } - val[2] = mt7615_mac_tx_rate_val(dev, mphy, &rates[2], stbc, &bw); + rd->val[2] = mt7615_mac_tx_rate_val(dev, mphy, &rates[2], stbc, &bw); if (bw_prev) { - bw_idx = 5; + rd->bw_idx = 5; bw_prev = bw; } - val[3] = mt7615_mac_tx_rate_val(dev, mphy, &rates[3], stbc, &bw); + rd->val[3] = mt7615_mac_tx_rate_val(dev, mphy, &rates[3], stbc, &bw); if (bw_prev) - bw_idx = 7; + rd->bw_idx = 7; + + rd->rateset = rateset; + rd->bw = bw; +} + +static int +mt7615_mac_queue_rate_update(struct mt7615_phy *phy, struct mt7615_sta *sta, + struct ieee80211_tx_rate *probe_rate, + struct ieee80211_tx_rate *rates) +{ + struct mt7615_dev *dev = phy->dev; + struct mt7615_wtbl_desc *wd; + + wd = kzalloc(sizeof(*wd), GFP_ATOMIC); + if (!wd) + return -ENOMEM; + + wd->type = MT7615_WTBL_RATE_DESC; + wd->sta = sta; + + mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates, + &wd->rate); + list_add_tail(&wd->node, &dev->wd_head); + queue_work(dev->mt76.usb.wq, &dev->wtbl_work); + + return 0; +} + +void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, + struct ieee80211_tx_rate *probe_rate, + struct ieee80211_tx_rate *rates) +{ + int wcid = sta->wcid.idx, n_rates = sta->n_rates; + struct mt7615_dev *dev = phy->dev; + struct mt7615_rate_desc rd; + u32 w5, w27, addr; + + if (mt76_is_usb(&dev->mt76)) { + mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates); + return; + } + + if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) + return; + memset(&rd, 0, sizeof(struct mt7615_rate_desc)); + mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates, &rd); + + addr = mt7615_mac_wtbl_addr(dev, wcid); w27 = mt76_rr(dev, addr + 27 * 4); w27 &= ~MT_WTBL_W27_CC_BW_SEL; - w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, bw); + w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rd.bw); w5 = mt76_rr(dev, addr + 5 * 4); w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE | MT_WTBL_W5_MPDU_OK_COUNT | MT_WTBL_W5_MPDU_FAIL_COUNT | MT_WTBL_W5_RATE_IDX); - w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, bw) | - FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE, bw_idx ? bw_idx - 1 : 7); + w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rd.bw) | + FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE, + rd.bw_idx ? rd.bw_idx - 1 : 7); mt76_wr(dev, MT_WTBL_RIUCR0, w5); mt76_wr(dev, MT_WTBL_RIUCR1, - FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) | - FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) | - FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[1])); + FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rd.probe_val) | + FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rd.val[0]) | + FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rd.val[1])); mt76_wr(dev, MT_WTBL_RIUCR2, - FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[1] >> 8) | - FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) | - FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[2]) | - FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2])); + FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rd.val[1] >> 8) | + FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rd.val[1]) | + FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rd.val[2]) | + FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rd.val[2])); mt76_wr(dev, MT_WTBL_RIUCR3, - FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) | - FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[3]) | - FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3])); + FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rd.val[2] >> 4) | + FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rd.val[3]) | + FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rd.val[3])); mt76_wr(dev, MT_WTBL_UPDATE, FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) | @@ -924,7 +959,8 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, mt76_wr(dev, addr + 27 * 4, w27); mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */ - sta->rate_set_tsf = (mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0)) | rateset; + sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0); + sta->rate_set_tsf |= rd.rateset; if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET)) mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); @@ -932,59 +968,33 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates; sta->wcid.tx_info |= MT_WCID_TX_INFO_SET; } +EXPORT_SYMBOL_GPL(mt7615_mac_set_rates); -static enum mt7615_cipher_type -mt7615_mac_get_cipher(int cipher) -{ - switch (cipher) { - case WLAN_CIPHER_SUITE_WEP40: - return MT_CIPHER_WEP40; - case WLAN_CIPHER_SUITE_WEP104: - return MT_CIPHER_WEP104; - case WLAN_CIPHER_SUITE_TKIP: - return MT_CIPHER_TKIP; - case WLAN_CIPHER_SUITE_AES_CMAC: - return MT_CIPHER_BIP_CMAC_128; - case WLAN_CIPHER_SUITE_CCMP: - return MT_CIPHER_AES_CCMP; - case WLAN_CIPHER_SUITE_CCMP_256: - return MT_CIPHER_CCMP_256; - case WLAN_CIPHER_SUITE_GCMP: - return MT_CIPHER_GCMP; - case WLAN_CIPHER_SUITE_GCMP_256: - return MT_CIPHER_GCMP_256; - case WLAN_CIPHER_SUITE_SMS4: - return MT_CIPHER_WAPI; - default: - return MT_CIPHER_NONE; - } -} - -static int -mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid, - struct ieee80211_key_conf *key, - enum mt7615_cipher_type cipher, - enum set_key_cmd cmd) +int mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, + struct mt76_wcid *wcid, + u8 *key, u8 keylen, + enum mt7615_cipher_type cipher, + enum set_key_cmd cmd) { u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4; u8 data[32] = {}; - if (key->keylen > sizeof(data)) + if (keylen > sizeof(data)) return -EINVAL; mt76_rr_copy(dev, addr, data, sizeof(data)); if (cmd == SET_KEY) { if (cipher == MT_CIPHER_TKIP) { /* Rx/Tx MIC keys are swapped */ - memcpy(data + 16, key->key + 24, 8); - memcpy(data + 24, key->key + 16, 8); + memcpy(data + 16, key + 24, 8); + memcpy(data + 24, key + 16, 8); } if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher) memmove(data + 16, data, 16); if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher) - memcpy(data, key->key, key->keylen); + memcpy(data, key, keylen); else if (cipher == MT_CIPHER_BIP_CMAC_128) - memcpy(data + 16, key->key, 16); + memcpy(data + 16, key, 16); } else { if (wcid->cipher & ~BIT(cipher)) { if (cipher != MT_CIPHER_BIP_CMAC_128) @@ -998,11 +1008,12 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid, return 0; } +EXPORT_SYMBOL_GPL(mt7615_mac_wtbl_update_key); -static int -mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid, - enum mt7615_cipher_type cipher, int keyidx, - enum set_key_cmd cmd) +int mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, + struct mt76_wcid *wcid, + enum mt7615_cipher_type cipher, + int keyidx, enum set_key_cmd cmd) { u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1; @@ -1034,11 +1045,12 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid, return 0; } +EXPORT_SYMBOL_GPL(mt7615_mac_wtbl_update_pk); -static void -mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid, - enum mt7615_cipher_type cipher, - enum set_key_cmd cmd) +void mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, + struct mt76_wcid *wcid, + enum mt7615_cipher_type cipher, + enum set_key_cmd cmd) { u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx); @@ -1056,6 +1068,7 @@ mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid, mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE); } } +EXPORT_SYMBOL_GPL(mt7615_mac_wtbl_update_cipher); int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, struct mt76_wcid *wcid, @@ -1072,7 +1085,8 @@ int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, spin_lock_bh(&dev->mt76.lock); mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cmd); - err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cmd); + err = mt7615_mac_wtbl_update_key(dev, wcid, key->key, key->keylen, + cipher, cmd); if (err < 0) goto out; @@ -1092,136 +1106,6 @@ out: return err; } -static void -mt7615_write_hw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info, - void *txp_ptr, u32 id) -{ - struct mt7615_hw_txp *txp = txp_ptr; - struct mt7615_txp_ptr *ptr = &txp->ptr[0]; - int nbuf = tx_info->nbuf - 1; - int i; - - tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp); - tx_info->nbuf = 1; - - txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID); - - for (i = 0; i < nbuf; i++) { - u32 addr = tx_info->buf[i + 1].addr; - u16 len = tx_info->buf[i + 1].len; - - if (i == nbuf - 1) - len |= MT_TXD_LEN_MSDU_LAST | - MT_TXD_LEN_AMSDU_LAST; - - if (i & 1) { - ptr->buf1 = cpu_to_le32(addr); - ptr->len1 = cpu_to_le16(len); - ptr++; - } else { - ptr->buf0 = cpu_to_le32(addr); - ptr->len0 = cpu_to_le16(len); - } - } -} - -static void -mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info, - void *txp_ptr, u32 id) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); - struct ieee80211_key_conf *key = info->control.hw_key; - struct ieee80211_vif *vif = info->control.vif; - struct mt7615_fw_txp *txp = txp_ptr; - int nbuf = tx_info->nbuf - 1; - int i; - - for (i = 0; i < nbuf; i++) { - txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr); - txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len); - } - txp->nbuf = nbuf; - - /* pass partial skb header to fw */ - tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp); - tx_info->buf[1].len = MT_CT_PARSE_LEN; - tx_info->nbuf = MT_CT_DMA_BUF_NUM; - - txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD); - - if (!key) - txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME); - - if (ieee80211_is_mgmt(hdr->frame_control)) - txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME); - - if (vif) { - struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; - - txp->bss_idx = mvif->idx; - } - - txp->token = cpu_to_le16(id); - txp->rept_wds_wcid = 0xff; -} - -int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, - enum mt76_txq_id qid, struct mt76_wcid *wcid, - struct ieee80211_sta *sta, - struct mt76_tx_info *tx_info) -{ - struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); - struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid); - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); - struct ieee80211_key_conf *key = info->control.hw_key; - int pid, id; - u8 *txwi = (u8 *)txwi_ptr; - struct mt76_txwi_cache *t; - void *txp; - - if (!wcid) - wcid = &dev->mt76.global_wcid; - - pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); - - if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) { - struct mt7615_phy *phy = &dev->phy; - - if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && mdev->phy2) - phy = mdev->phy2->priv; - - spin_lock_bh(&dev->mt76.lock); - mt7615_mac_set_rates(phy, msta, &info->control.rates[0], - msta->rates); - msta->rate_probe = true; - spin_unlock_bh(&dev->mt76.lock); - } - - t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); - t->skb = tx_info->skb; - - spin_lock_bh(&dev->token_lock); - id = idr_alloc(&dev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC); - spin_unlock_bh(&dev->token_lock); - if (id < 0) - return id; - - mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta, - pid, key, false); - - txp = txwi + MT_TXD_SIZE; - memset(txp, 0, sizeof(struct mt7615_txp_common)); - if (is_mt7615(&dev->mt76)) - mt7615_write_fw_txp(dev, tx_info, txp, id); - else - mt7615_write_hw_txp(dev, tx_info, txp, id); - - tx_info->skb = DMA_DUMMY_DATA; - - return 0; -} - static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta, struct ieee80211_tx_info *info, __le32 *txs_data) { @@ -1266,7 +1150,7 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta, if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU)) info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU; - first_idx = max_t(int, 0, last_idx - (count + 1) / MT7615_RATE_RETRY); + first_idx = max_t(int, 0, last_idx - (count - 1) / MT7615_RATE_RETRY); if (fixed_rate && !probe) { info->status.rates[0].count = count; @@ -1399,7 +1283,7 @@ static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev, return !!skb; } -void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data) +static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data) { struct ieee80211_tx_info info = {}; struct ieee80211_sta *sta = NULL; @@ -1419,7 +1303,7 @@ void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data) if (pid == MT_PACKET_ID_NO_ACK) return; - if (wcidx >= ARRAY_SIZE(dev->mt76.wcid)) + if (wcidx >= MT7615_WTBL_SIZE) return; rcu_read_lock(); @@ -1476,7 +1360,7 @@ mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token) mt76_put_txwi(mdev, txwi); } -void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb) +static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb) { struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data; u8 i, count; @@ -1497,58 +1381,118 @@ void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb) dev_kfree_skb(skb); } +void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, + struct sk_buff *skb) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + __le32 *rxd = (__le32 *)skb->data; + __le32 *end = (__le32 *)&skb->data[skb->len]; + enum rx_pkt_type type; + u16 flag; + + type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0])); + flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0])); + if (type == PKT_TYPE_RX_EVENT && flag == 0x1) + type = PKT_TYPE_NORMAL_MCU; + + switch (type) { + case PKT_TYPE_TXS: + for (rxd++; rxd + 7 <= end; rxd += 7) + mt7615_mac_add_txs(dev, rxd); + dev_kfree_skb(skb); + break; + case PKT_TYPE_TXRX_NOTIFY: + mt7615_mac_tx_free(dev, skb); + break; + case PKT_TYPE_RX_EVENT: + mt7615_mcu_rx_event(dev, skb); + break; + case PKT_TYPE_NORMAL_MCU: + case PKT_TYPE_NORMAL: + if (!mt7615_mac_fill_rx(dev, skb)) { + mt76_rx(&dev->mt76, q, skb); + return; + } + /* fall through */ + default: + dev_kfree_skb(skb); + break; + } +} +EXPORT_SYMBOL_GPL(mt7615_queue_rx_skb); + static void -mt7615_mac_set_default_sensitivity(struct mt7615_phy *phy) +mt7615_mac_set_sensitivity(struct mt7615_phy *phy, int val, bool ofdm) { struct mt7615_dev *dev = phy->dev; bool ext_phy = phy != &dev->phy; - mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy), - MT_WF_PHY_PD_OFDM_MASK(ext_phy), - MT_WF_PHY_PD_OFDM(ext_phy, 0x13c)); - mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy), - MT_WF_PHY_PD_CCK_MASK(ext_phy), - MT_WF_PHY_PD_CCK(ext_phy, 0x92)); + if (is_mt7663(&dev->mt76)) { + if (ofdm) + mt76_rmw(dev, MT7663_WF_PHY_MIN_PRI_PWR(ext_phy), + MT_WF_PHY_PD_OFDM_MASK(0), + MT_WF_PHY_PD_OFDM(0, val)); + else + mt76_rmw(dev, MT7663_WF_PHY_RXTD_CCK_PD(ext_phy), + MT_WF_PHY_PD_CCK_MASK(ext_phy), + MT_WF_PHY_PD_CCK(ext_phy, val)); + return; + } + + if (ofdm) + mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy), + MT_WF_PHY_PD_OFDM_MASK(ext_phy), + MT_WF_PHY_PD_OFDM(ext_phy, val)); + else + mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy), + MT_WF_PHY_PD_CCK_MASK(ext_phy), + MT_WF_PHY_PD_CCK(ext_phy, val)); +} + +static void +mt7615_mac_set_default_sensitivity(struct mt7615_phy *phy) +{ + /* ofdm */ + mt7615_mac_set_sensitivity(phy, 0x13c, true); + /* cck */ + mt7615_mac_set_sensitivity(phy, 0x92, false); phy->ofdm_sensitivity = -98; phy->cck_sensitivity = -110; phy->last_cca_adj = jiffies; } -void mt7615_mac_set_scs(struct mt7615_dev *dev, bool enable) +void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable) { - struct mt7615_phy *ext_phy; + struct mt7615_dev *dev = phy->dev; + bool ext_phy = phy != &dev->phy; + u32 reg, mask; mutex_lock(&dev->mt76.mutex); - if (dev->scs_en == enable) + if (phy->scs_en == enable) goto out; - if (is_mt7663(&dev->mt76)) - goto out; + if (is_mt7663(&dev->mt76)) { + reg = MT7663_WF_PHY_MIN_PRI_PWR(ext_phy); + mask = MT_WF_PHY_PD_BLK(0); + } else { + reg = MT_WF_PHY_MIN_PRI_PWR(ext_phy); + mask = MT_WF_PHY_PD_BLK(ext_phy); + } if (enable) { - mt76_set(dev, MT_WF_PHY_MIN_PRI_PWR(0), - MT_WF_PHY_PD_BLK(0)); - mt76_set(dev, MT_WF_PHY_MIN_PRI_PWR(1), - MT_WF_PHY_PD_BLK(1)); + mt76_set(dev, reg, mask); if (is_mt7622(&dev->mt76)) { - mt76_set(dev, MT_MIB_M0_MISC_CR, 0x7 << 8); - mt76_set(dev, MT_MIB_M0_MISC_CR, 0x7); + mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7 << 8); + mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7); } } else { - mt76_clear(dev, MT_WF_PHY_MIN_PRI_PWR(0), - MT_WF_PHY_PD_BLK(0)); - mt76_clear(dev, MT_WF_PHY_MIN_PRI_PWR(1), - MT_WF_PHY_PD_BLK(1)); + mt76_clear(dev, reg, mask); } - mt7615_mac_set_default_sensitivity(&dev->phy); - ext_phy = mt7615_ext_phy(dev); - if (ext_phy) - mt7615_mac_set_default_sensitivity(ext_phy); - - dev->scs_en = enable; + mt7615_mac_set_default_sensitivity(phy); + phy->scs_en = enable; out: mutex_unlock(&dev->mt76.mutex); @@ -1556,10 +1500,12 @@ out: void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy) { - u32 rxtd; + u32 rxtd, reg; if (is_mt7663(&dev->mt76)) - return; + reg = MT7663_WF_PHY_R0_PHYMUX_5; + else + reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy); if (ext_phy) rxtd = MT_WF_PHY_RXTD2(10); @@ -1567,15 +1513,21 @@ void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy) rxtd = MT_WF_PHY_RXTD(12); mt76_set(dev, rxtd, BIT(18) | BIT(29)); - mt76_set(dev, MT_WF_PHY_R0_PHYMUX_5(ext_phy), 0x5 << 12); + mt76_set(dev, reg, 0x5 << 12); } void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy) { struct mt7615_dev *dev = phy->dev; bool ext_phy = phy != &dev->phy; - u32 reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy); + u32 reg; + if (is_mt7663(&dev->mt76)) + reg = MT7663_WF_PHY_R0_PHYMUX_5; + else + reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy); + + /* reset PD and MDRDY counters */ mt76_clear(dev, reg, GENMASK(22, 20)); mt76_set(dev, reg, BIT(22) | BIT(20)); } @@ -1627,19 +1579,9 @@ mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy, } if (update) { - u16 val; + u16 val = ofdm ? *sensitivity * 2 + 512 : *sensitivity + 256; - if (ofdm) { - val = *sensitivity * 2 + 512; - mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy), - MT_WF_PHY_PD_OFDM_MASK(ext_phy), - MT_WF_PHY_PD_OFDM(ext_phy, val)); - } else { - val = *sensitivity + 256; - mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy), - MT_WF_PHY_PD_CCK_MASK(ext_phy), - MT_WF_PHY_PD_CCK(ext_phy, val)); - } + mt7615_mac_set_sensitivity(phy, val, ofdm); phy->last_cca_adj = jiffies; } } @@ -1653,14 +1595,20 @@ mt7615_mac_scs_check(struct mt7615_phy *phy) u32 mdrdy_cck, mdrdy_ofdm, pd_cck, pd_ofdm; bool ext_phy = phy != &dev->phy; - if (!dev->scs_en) + if (!phy->scs_en) return; - val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS0(ext_phy)); + if (is_mt7663(&dev->mt76)) + val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS0(ext_phy)); + else + val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS0(ext_phy)); pd_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_CCK, val); pd_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_OFDM, val); - val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS5(ext_phy)); + if (is_mt7663(&dev->mt76)) + val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS5(ext_phy)); + else + val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS5(ext_phy)); mdrdy_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_CCK, val); mdrdy_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_OFDM, val); @@ -1685,10 +1633,14 @@ static u8 mt7615_phy_get_nf(struct mt7615_dev *dev, int idx) { static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; - u32 reg = idx ? MT_WF_PHY_RXTD2(17) : MT_WF_PHY_RXTD(20); - u32 val, sum = 0, n = 0; + u32 reg, val, sum = 0, n = 0; int i; + if (is_mt7663(&dev->mt76)) + reg = MT7663_WF_PHY_RXTD(20); + else + reg = idx ? MT_WF_PHY_RXTD2(17) : MT_WF_PHY_RXTD(20); + for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { val = mt76_rr(dev, reg); sum += val * nf_power[i]; @@ -1744,6 +1696,7 @@ void mt7615_update_channel(struct mt76_dev *mdev) /* reset obss airtime */ mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR); } +EXPORT_SYMBOL_GPL(mt7615_update_channel); static void mt7615_mac_update_mib_stats(struct mt7615_phy *phy) @@ -1751,64 +1704,71 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy) struct mt7615_dev *dev = phy->dev; struct mib_stats *mib = &phy->mib; bool ext_phy = phy != &dev->phy; - int i; + int i, aggr; + u32 val, val2; memset(mib, 0, sizeof(*mib)); mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy), MT_MIB_SDR3_FCS_ERR_MASK); + val = mt76_get_field(dev, MT_MIB_SDR14(ext_phy), + MT_MIB_AMPDU_MPDU_COUNT); + if (val) { + val2 = mt76_get_field(dev, MT_MIB_SDR15(ext_phy), + MT_MIB_AMPDU_ACK_COUNT); + mib->aggr_per = 1000 * (val - val2) / val; + } + + aggr = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0; for (i = 0; i < 4; i++) { - u32 data, val, val2; - - val = mt76_get_field(dev, MT_MIB_MB_SDR1(ext_phy, i), - MT_MIB_ACK_FAIL_COUNT_MASK); - if (val > mib->ack_fail_cnt) - mib->ack_fail_cnt = val; - - val2 = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i)); - data = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val2); - if (data > mib->rts_retries_cnt) { - mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val2); - mib->rts_retries_cnt = data; + val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i)); + + val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val); + if (val2 > mib->ack_fail_cnt) + mib->ack_fail_cnt = val2; + + val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); + if (val2 > mib->ba_miss_cnt) + mib->ba_miss_cnt = val2; + + val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i)); + val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val); + if (val2 > mib->rts_retries_cnt) { + mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); + mib->rts_retries_cnt = val2; } + + val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i)); + + dev->mt76.aggr_stats[aggr++] += val & 0xffff; + dev->mt76.aggr_stats[aggr++] += val >> 16; } } void mt7615_mac_work(struct work_struct *work) { - struct mt7615_dev *dev; - struct mt7615_phy *ext_phy; - int i, idx; + struct mt7615_phy *phy; + struct mt76_dev *mdev; - dev = (struct mt7615_dev *)container_of(work, struct mt76_dev, + phy = (struct mt7615_phy *)container_of(work, struct mt7615_phy, mac_work.work); + mdev = &phy->dev->mt76; - mutex_lock(&dev->mt76.mutex); - mt76_update_survey(&dev->mt76); - if (++dev->mac_work_count == 5) { - ext_phy = mt7615_ext_phy(dev); - - mt7615_mac_update_mib_stats(&dev->phy); - mt7615_mac_scs_check(&dev->phy); - if (ext_phy) { - mt7615_mac_update_mib_stats(ext_phy); - mt7615_mac_scs_check(ext_phy); - } + mutex_lock(&mdev->mutex); - dev->mac_work_count = 0; - } + mt76_update_survey(mdev); + if (++phy->mac_work_count == 5) { + phy->mac_work_count = 0; - for (i = 0, idx = 0; i < 4; i++) { - u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i)); - - dev->mt76.aggr_stats[idx++] += val & 0xffff; - dev->mt76.aggr_stats[idx++] += val >> 16; + mt7615_mac_update_mib_stats(phy); + mt7615_mac_scs_check(phy); } - mutex_unlock(&dev->mt76.mutex); - mt76_tx_status_check(&dev->mt76, NULL, false); - ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work, + mutex_unlock(&mdev->mutex); + + mt76_tx_status_check(mdev, NULL, false); + ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work, MT7615_WATCHDOG_TIME); } @@ -1848,8 +1808,7 @@ mt7615_update_beacons(struct mt7615_dev *dev) mt7615_update_vif_beacon, dev->mt76.phy2->hw); } -static void -mt7615_dma_reset(struct mt7615_dev *dev) +void mt7615_dma_reset(struct mt7615_dev *dev) { int i; @@ -1861,36 +1820,49 @@ mt7615_dma_reset(struct mt7615_dev *dev) for (i = 0; i < __MT_TXQ_MAX; i++) mt76_queue_tx_cleanup(dev, i, true); - for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++) + mt76_for_each_q_rx(&dev->mt76, i) { mt76_queue_rx_reset(dev, i); + } mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); } +EXPORT_SYMBOL_GPL(mt7615_dma_reset); void mt7615_mac_reset_work(struct work_struct *work) { + struct mt7615_phy *phy2; + struct mt76_phy *ext_phy; struct mt7615_dev *dev; dev = container_of(work, struct mt7615_dev, reset_work); + ext_phy = dev->mt76.phy2; + phy2 = ext_phy ? ext_phy->priv : NULL; if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_PDMA)) return; ieee80211_stop_queues(mt76_hw(dev)); - if (dev->mt76.phy2) - ieee80211_stop_queues(dev->mt76.phy2->hw); + if (ext_phy) + ieee80211_stop_queues(ext_phy->hw); set_bit(MT76_RESET, &dev->mphy.state); set_bit(MT76_MCU_RESET, &dev->mphy.state); wake_up(&dev->mt76.mcu.wait); - cancel_delayed_work_sync(&dev->mt76.mac_work); + cancel_delayed_work_sync(&dev->phy.mac_work); + del_timer_sync(&dev->phy.roc_timer); + cancel_work_sync(&dev->phy.roc_work); + if (phy2) { + cancel_delayed_work_sync(&phy2->mac_work); + del_timer_sync(&phy2->roc_timer); + cancel_work_sync(&phy2->roc_work); + } /* lock/unlock all queues to ensure that no tx is pending */ mt76_txq_schedule_all(&dev->mphy); - if (dev->mt76.phy2) - mt76_txq_schedule_all(dev->mt76.phy2); + if (ext_phy) + mt76_txq_schedule_all(ext_phy); tasklet_disable(&dev->mt76.tx_tasklet); napi_disable(&dev->mt76.napi[0]); @@ -1924,8 +1896,8 @@ void mt7615_mac_reset_work(struct work_struct *work) napi_schedule(&dev->mt76.napi[1]); ieee80211_wake_queues(mt76_hw(dev)); - if (dev->mt76.phy2) - ieee80211_wake_queues(dev->mt76.phy2->hw); + if (ext_phy) + ieee80211_wake_queues(ext_phy->hw); mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); @@ -1934,8 +1906,12 @@ void mt7615_mac_reset_work(struct work_struct *work) mt7615_update_beacons(dev); - ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work, + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->phy.mac_work, MT7615_WATCHDOG_TIME); + if (phy2) + ieee80211_queue_delayed_work(ext_phy->hw, &phy2->mac_work, + MT7615_WATCHDOG_TIME); + } static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy) @@ -2031,6 +2007,9 @@ int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy) bool ext_phy = phy != &dev->phy; int err; + if (is_mt7663(&dev->mt76)) + return 0; + if (dev->mt76.region == NL80211_DFS_UNSET) { phy->dfs_state = -1; if (phy->rdd_state) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h index e0b89257db90..f0d4b29a52a2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h @@ -167,6 +167,10 @@ enum tx_phy_bandwidth { #define MT_TXD_SIZE (8 * 4) +#define MT_USB_TXD_SIZE (MT_TXD_SIZE + 8 * 4) +#define MT_USB_HDR_SIZE 4 +#define MT_USB_TAIL_SIZE 4 + #define MT_TXD0_P_IDX BIT(31) #define MT_TXD0_Q_IDX GENMASK(30, 26) #define MT_TXD0_UDP_TCP_SUM BIT(24) @@ -252,8 +256,11 @@ enum tx_phy_bandwidth { #define MT_MSDU_ID_VALID BIT(15) +#define MT_TXD_LEN_MASK GENMASK(11, 0) #define MT_TXD_LEN_MSDU_LAST BIT(14) #define MT_TXD_LEN_AMSDU_LAST BIT(15) +/* mt7663 */ +#define MT_TXD_LEN_LAST BIT(15) struct mt7615_txp_ptr { __le32 buf0; @@ -393,6 +400,33 @@ enum mt7615_cipher_type { MT_CIPHER_GCMP_256, }; +static inline enum mt7615_cipher_type +mt7615_mac_get_cipher(int cipher) +{ + switch (cipher) { + case WLAN_CIPHER_SUITE_WEP40: + return MT_CIPHER_WEP40; + case WLAN_CIPHER_SUITE_WEP104: + return MT_CIPHER_WEP104; + case WLAN_CIPHER_SUITE_TKIP: + return MT_CIPHER_TKIP; + case WLAN_CIPHER_SUITE_AES_CMAC: + return MT_CIPHER_BIP_CMAC_128; + case WLAN_CIPHER_SUITE_CCMP: + return MT_CIPHER_AES_CCMP; + case WLAN_CIPHER_SUITE_CCMP_256: + return MT_CIPHER_CCMP_256; + case WLAN_CIPHER_SUITE_GCMP: + return MT_CIPHER_GCMP; + case WLAN_CIPHER_SUITE_GCMP_256: + return MT_CIPHER_GCMP_256; + case WLAN_CIPHER_SUITE_SMS4: + return MT_CIPHER_WAPI; + default: + return MT_CIPHER_NONE; + } +} + static inline struct mt7615_txp_common * mt7615_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t) { @@ -406,4 +440,9 @@ mt7615_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t) return (struct mt7615_txp_common *)(txwi + MT_TXD_SIZE); } +static inline u32 mt7615_mac_wtbl_addr(struct mt7615_dev *dev, int wcid) +{ + return MT_WTBL_BASE(dev) + wcid * MT_WTBL_ENTRY_SIZE; +} + #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 6586176c29af..c26f99b368d9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -4,11 +4,10 @@ * Author: Roy Luo <royluo@google.com> * Ryder Lee <ryder.lee@mediatek.com> * Felix Fietkau <nbd@nbd.name> + * Lorenzo Bianconi <lorenzo@kernel.org> */ #include <linux/etherdevice.h> -#include <linux/platform_device.h> -#include <linux/pci.h> #include <linux/module.h> #include "mt7615.h" #include "mcu.h" @@ -50,19 +49,17 @@ static int mt7615_start(struct ieee80211_hw *hw) mt7615_mac_enable_nf(dev, 1); } + mt7615_mcu_set_channel_domain(phy); mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH); set_bit(MT76_STATE_RUNNING, &phy->mt76->state); - if (running) - goto out; - - mt7615_mac_reset_counters(dev); - - ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work, + ieee80211_queue_delayed_work(hw, &phy->mac_work, MT7615_WATCHDOG_TIME); -out: + if (!running) + mt7615_mac_reset_counters(dev); + mutex_unlock(&dev->mt76.mutex); return 0; @@ -73,9 +70,14 @@ static void mt7615_stop(struct ieee80211_hw *hw) struct mt7615_dev *dev = mt7615_hw_dev(hw); struct mt7615_phy *phy = mt7615_hw_phy(hw); + cancel_delayed_work_sync(&phy->mac_work); + del_timer_sync(&phy->roc_timer); + cancel_work_sync(&phy->roc_work); + mutex_lock(&dev->mt76.mutex); clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); + cancel_delayed_work_sync(&phy->scan_work); if (phy != &dev->phy) { mt7615_mcu_set_pm(dev, 1, 1); @@ -83,8 +85,6 @@ static void mt7615_stop(struct ieee80211_hw *hw) } if (!mt7615_dev_running(dev)) { - cancel_delayed_work_sync(&dev->mt76.mac_work); - mt7615_mcu_set_pm(dev, 0, 1); mt7615_mcu_set_mac_enable(dev, 0, false); } @@ -157,10 +157,6 @@ static int mt7615_add_interface(struct ieee80211_hw *hw, else mvif->wmm_idx = mvif->idx % MT7615_MAX_WMM_SETS; - ret = mt7615_mcu_add_dev_info(dev, vif, true); - if (ret) - goto out; - dev->vif_mask |= BIT(mvif->idx); dev->omac_mask |= BIT(mvif->omac_idx); phy->omac_mask |= BIT(mvif->omac_idx); @@ -183,6 +179,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw, mt76_txq_init(&dev->mt76, vif->txq); } + ret = mt7615_mcu_add_dev_info(dev, vif, true); out: mutex_unlock(&dev->mt76.mutex); @@ -218,20 +215,44 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw, spin_unlock_bh(&dev->sta_poll_lock); } +static void mt7615_init_dfs_state(struct mt7615_phy *phy) +{ + struct mt76_phy *mphy = phy->mt76; + struct ieee80211_hw *hw = mphy->hw; + struct cfg80211_chan_def *chandef = &hw->conf.chandef; + + if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) + return; + + if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR)) + return; + + if (mphy->chandef.chan->center_freq == chandef->chan->center_freq && + mphy->chandef.width == chandef->width) + return; + + phy->dfs_state = -1; +} + static int mt7615_set_channel(struct mt7615_phy *phy) { struct mt7615_dev *dev = phy->dev; bool ext_phy = phy != &dev->phy; int ret; - cancel_delayed_work_sync(&dev->mt76.mac_work); + cancel_delayed_work_sync(&phy->mac_work); mutex_lock(&dev->mt76.mutex); set_bit(MT76_RESET, &phy->mt76->state); - phy->dfs_state = -1; + mt7615_init_dfs_state(phy); mt76_set_channel(phy->mt76); + if (is_mt7615(&dev->mt76) && dev->flash_eeprom) { + mt7615_mcu_apply_rx_dcoc(phy); + mt7615_mcu_apply_tx_dpd(phy); + } + ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_CHANNEL_SWITCH); if (ret) goto out; @@ -250,11 +271,41 @@ out: mutex_unlock(&dev->mt76.mutex); mt76_txq_schedule_all(phy->mt76); - ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work, + ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work, MT7615_WATCHDOG_TIME); return ret; } +static int +mt7615_queue_key_update(struct mt7615_dev *dev, enum set_key_cmd cmd, + struct mt7615_sta *msta, + struct ieee80211_key_conf *key) +{ + struct mt7615_wtbl_desc *wd; + + wd = kzalloc(sizeof(*wd), GFP_KERNEL); + if (!wd) + return -ENOMEM; + + wd->type = MT7615_WTBL_KEY_DESC; + wd->sta = msta; + + wd->key.key = kmemdup(key->key, key->keylen, GFP_KERNEL); + if (!wd->key.key) { + kfree(wd); + return -ENOMEM; + } + wd->key.cipher = key->cipher; + wd->key.keyidx = key->keyidx; + wd->key.keylen = key->keylen; + wd->key.cmd = cmd; + + list_add_tail(&wd->node, &dev->wd_head); + queue_work(dev->mt76.usb.wq, &dev->wtbl_work); + + return 0; +} + static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) @@ -303,6 +354,9 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, mt76_wcid_key_setup(&dev->mt76, wcid, cmd == SET_KEY ? key : NULL); + if (mt76_is_usb(&dev->mt76)) + return mt7615_queue_key_update(dev, cmd, msta, key); + return mt7615_mac_wtbl_set_key(dev, wcid, key, cmd); } @@ -408,15 +462,12 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw, u32 changed) { struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt7615_phy *phy = mt7615_hw_phy(hw); mutex_lock(&dev->mt76.mutex); - if (changed & BSS_CHANGED_ASSOC) - mt7615_mcu_add_bss_info(dev, vif, info->assoc); - if (changed & BSS_CHANGED_ERP_SLOT) { int slottime = info->use_short_slot ? 9 : 20; - struct mt7615_phy *phy = mt7615_hw_phy(hw); if (slottime != phy->slottime) { phy->slottime = slottime; @@ -425,14 +476,20 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw, } if (changed & BSS_CHANGED_BEACON_ENABLED) { - mt7615_mcu_add_bss_info(dev, vif, info->enable_beacon); + mt7615_mcu_add_bss_info(phy, vif, NULL, info->enable_beacon); mt7615_mcu_sta_add(dev, vif, NULL, info->enable_beacon); + + if (vif->p2p && info->enable_beacon) + mt7615_mcu_set_p2p_oppps(hw, vif); } if (changed & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED)) mt7615_mcu_add_beacon(dev, hw, vif, info->enable_beacon); + if (changed & BSS_CHANGED_PS) + mt7615_mcu_set_vif_ps(dev, vif); + mutex_unlock(&dev->mt76.mutex); } @@ -466,13 +523,19 @@ int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, msta->wcid.idx = idx; msta->wcid.ext_phy = mvif->band_idx; + if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { + struct mt7615_phy *phy; + + phy = mvif->band_idx ? mt7615_ext_phy(dev) : &dev->phy; + mt7615_mcu_add_bss_info(phy, vif, sta, true); + } mt7615_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - mt7615_mcu_sta_add(dev, vif, sta, true); return 0; } +EXPORT_SYMBOL_GPL(mt7615_mac_sta_add); void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta) @@ -483,12 +546,20 @@ void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, mt7615_mcu_sta_add(dev, vif, sta, false); mt7615_mac_wtbl_update(dev, msta->wcid.idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct mt7615_phy *phy; + + phy = mvif->band_idx ? mt7615_ext_phy(dev) : &dev->phy; + mt7615_mcu_add_bss_info(phy, vif, sta, false); + } spin_lock_bh(&dev->sta_poll_lock); if (!list_empty(&msta->poll_list)) list_del_init(&msta->poll_list); spin_unlock_bh(&dev->sta_poll_lock); } +EXPORT_SYMBOL_GPL(mt7615_mac_sta_remove); static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -694,13 +765,242 @@ mt7615_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) } phy->chainmask = tx_ant; - mt76_set_stream_caps(&dev->mt76, true); + mt76_set_stream_caps(phy->mt76, true); mutex_unlock(&dev->mt76.mutex); return 0; } +static void mt7615_roc_iter(void *priv, u8 *mac, + struct ieee80211_vif *vif) +{ + struct mt7615_phy *phy = priv; + + mt7615_mcu_set_roc(phy, vif, NULL, 0); +} + +void mt7615_roc_work(struct work_struct *work) +{ + struct mt7615_phy *phy; + + phy = (struct mt7615_phy *)container_of(work, struct mt7615_phy, + roc_work); + + if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) + return; + + ieee80211_iterate_active_interfaces(phy->mt76->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7615_roc_iter, phy); + ieee80211_remain_on_channel_expired(phy->mt76->hw); +} + +void mt7615_roc_timer(struct timer_list *timer) +{ + struct mt7615_phy *phy = from_timer(phy, timer, roc_timer); + + ieee80211_queue_work(phy->mt76->hw, &phy->roc_work); +} + +void mt7615_scan_work(struct work_struct *work) +{ + struct mt7615_phy *phy; + + phy = (struct mt7615_phy *)container_of(work, struct mt7615_phy, + scan_work.work); + + while (true) { + struct mt7615_mcu_rxd *rxd; + struct sk_buff *skb; + + spin_lock_bh(&phy->dev->mt76.lock); + skb = __skb_dequeue(&phy->scan_event_list); + spin_unlock_bh(&phy->dev->mt76.lock); + + if (!skb) + break; + + rxd = (struct mt7615_mcu_rxd *)skb->data; + if (rxd->eid == MCU_EVENT_SCHED_SCAN_DONE) { + ieee80211_sched_scan_results(phy->mt76->hw); + } else if (test_and_clear_bit(MT76_HW_SCANNING, + &phy->mt76->state)) { + struct cfg80211_scan_info info = { + .aborted = false, + }; + + ieee80211_scan_completed(phy->mt76->hw, &info); + } + dev_kfree_skb(skb); + } +} + +static int +mt7615_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_scan_request *req) +{ + struct mt76_phy *mphy = hw->priv; + + return mt7615_mcu_hw_scan(mphy->priv, vif, req); +} + +static void +mt7615_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt76_phy *mphy = hw->priv; + + mt7615_mcu_cancel_hw_scan(mphy->priv, vif); +} + +static int +mt7615_start_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_scan_ies *ies) +{ + struct mt76_phy *mphy = hw->priv; + int err; + + err = mt7615_mcu_sched_scan_req(mphy->priv, vif, req); + if (err < 0) + return err; + + return mt7615_mcu_sched_scan_enable(mphy->priv, vif, true); +} + +static int +mt7615_stop_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt76_phy *mphy = hw->priv; + + return mt7615_mcu_sched_scan_enable(mphy->priv, vif, false); +} + +static int mt7615_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel *chan, + int duration, + enum ieee80211_roc_type type) +{ + struct mt7615_phy *phy = mt7615_hw_phy(hw); + int err; + + if (test_and_set_bit(MT76_STATE_ROC, &phy->mt76->state)) + return 0; + + err = mt7615_mcu_set_roc(phy, vif, chan, duration); + if (err < 0) { + clear_bit(MT76_STATE_ROC, &phy->mt76->state); + return err; + } + + if (!wait_event_timeout(phy->roc_wait, phy->roc_grant, HZ)) { + mt7615_mcu_set_roc(phy, vif, NULL, 0); + clear_bit(MT76_STATE_ROC, &phy->mt76->state); + + return -ETIMEDOUT; + } + + return 0; +} + +static int mt7615_cancel_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt7615_phy *phy = mt7615_hw_phy(hw); + + if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) + return 0; + + del_timer_sync(&phy->roc_timer); + cancel_work_sync(&phy->roc_work); + + mt7615_mcu_set_roc(phy, vif, NULL, 0); + + return 0; +} + +#ifdef CONFIG_PM +static int mt7615_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt7615_phy *phy = mt7615_hw_phy(hw); + bool ext_phy = phy != &dev->phy; + int err = 0; + + mutex_lock(&dev->mt76.mutex); + + clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); + cancel_delayed_work_sync(&phy->scan_work); + cancel_delayed_work_sync(&phy->mac_work); + + mt76_set(dev, MT_WF_RFCR(ext_phy), MT_WF_RFCR_DROP_OTHER_BEACON); + + set_bit(MT76_STATE_SUSPEND, &phy->mt76->state); + ieee80211_iterate_active_interfaces(hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7615_mcu_set_suspend_iter, phy); + + if (!mt7615_dev_running(dev)) + err = mt7615_mcu_set_hif_suspend(dev, true); + + mutex_unlock(&dev->mt76.mutex); + + return err; +} + +static int mt7615_resume(struct ieee80211_hw *hw) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt7615_phy *phy = mt7615_hw_phy(hw); + bool running, ext_phy = phy != &dev->phy; + + mutex_lock(&dev->mt76.mutex); + + running = mt7615_dev_running(dev); + set_bit(MT76_STATE_RUNNING, &phy->mt76->state); + + if (!running) { + int err; + + err = mt7615_mcu_set_hif_suspend(dev, false); + if (err < 0) { + mutex_unlock(&dev->mt76.mutex); + return err; + } + } + + clear_bit(MT76_STATE_SUSPEND, &phy->mt76->state); + ieee80211_iterate_active_interfaces(hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7615_mcu_set_suspend_iter, phy); + + ieee80211_queue_delayed_work(hw, &phy->mac_work, + MT7615_WATCHDOG_TIME); + mt76_clear(dev, MT_WF_RFCR(ext_phy), MT_WF_RFCR_DROP_OTHER_BEACON); + + mutex_unlock(&dev->mt76.mutex); + + return 0; +} + +static void mt7615_set_wakeup(struct ieee80211_hw *hw, bool enabled) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt76_dev *mdev = &dev->mt76; + + device_set_wakeup_enable(mdev->dev, enabled); +} + +static void mt7615_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data) +{ + mt7615_mcu_update_gtk_rekey(hw, vif, data); +} +#endif /* CONFIG_PM */ + const struct ieee80211_ops mt7615_ops = { .tx = mt7615_tx, .start = mt7615_start, @@ -730,32 +1030,19 @@ const struct ieee80211_ops mt7615_ops = { .get_antenna = mt76_get_antenna, .set_antenna = mt7615_set_antenna, .set_coverage_class = mt7615_set_coverage_class, + .hw_scan = mt7615_hw_scan, + .cancel_hw_scan = mt7615_cancel_hw_scan, + .sched_scan_start = mt7615_start_sched_scan, + .sched_scan_stop = mt7615_stop_sched_scan, + .remain_on_channel = mt7615_remain_on_channel, + .cancel_remain_on_channel = mt7615_cancel_remain_on_channel, +#ifdef CONFIG_PM + .suspend = mt7615_suspend, + .resume = mt7615_resume, + .set_wakeup = mt7615_set_wakeup, + .set_rekey_data = mt7615_set_rekey_data, +#endif /* CONFIG_PM */ }; +EXPORT_SYMBOL_GPL(mt7615_ops); -static int __init mt7615_init(void) -{ - int ret; - - ret = pci_register_driver(&mt7615_pci_driver); - if (ret) - return ret; - - if (IS_ENABLED(CONFIG_MT7622_WMAC)) { - ret = platform_driver_register(&mt7622_wmac_driver); - if (ret) - pci_unregister_driver(&mt7615_pci_driver); - } - - return ret; -} - -static void __exit mt7615_exit(void) -{ - if (IS_ENABLED(CONFIG_MT7622_WMAC)) - platform_driver_unregister(&mt7622_wmac_driver); - pci_unregister_driver(&mt7615_pci_driver); -} - -module_init(mt7615_init); -module_exit(mt7615_exit); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index 610cfa918c7b..6e869b8c5e26 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -11,6 +11,11 @@ #include "mac.h" #include "eeprom.h" +static bool prefer_offload_fw = true; +module_param(prefer_offload_fw, bool, 0644); +MODULE_PARM_DESC(prefer_offload_fw, + "Prefer client mode offload firmware (MT7663)"); + struct mt7615_patch_hdr { char build_date[16]; char platform[4]; @@ -135,16 +140,24 @@ void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb, mcu_txd->pkt_type = MCU_PKT_ID; mcu_txd->seq = seq; - if (cmd & MCU_FW_PREFIX) { + switch (cmd & ~MCU_CMD_MASK) { + case MCU_FW_PREFIX: mcu_txd->set_query = MCU_Q_NA; mcu_txd->cid = mcu_cmd; - } else { + break; + case MCU_CE_PREFIX: + mcu_txd->set_query = MCU_Q_SET; + mcu_txd->cid = mcu_cmd; + break; + default: mcu_txd->cid = MCU_CMD_EXT_CID; mcu_txd->set_query = MCU_Q_SET; mcu_txd->ext_cid = cmd; mcu_txd->ext_cid_ack = 1; + break; } } +EXPORT_SYMBOL_GPL(mt7615_mcu_fill_msg); static int __mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb, int cmd, int *wait_seq) @@ -179,6 +192,19 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd, skb_pull(skb, sizeof(*rxd)); ret = le32_to_cpu(*(__le32 *)skb->data); break; + case MCU_UNI_CMD_DEV_INFO_UPDATE: + case MCU_UNI_CMD_BSS_INFO_UPDATE: + case MCU_UNI_CMD_STA_REC_UPDATE: + case MCU_UNI_CMD_HIF_CTRL: + case MCU_UNI_CMD_OFFLOAD: + case MCU_UNI_CMD_SUSPEND: { + struct mt7615_mcu_uni_event *event; + + skb_pull(skb, sizeof(*rxd)); + event = (struct mt7615_mcu_uni_event *)skb->data; + ret = le32_to_cpu(event->status); + break; + } default: break; } @@ -208,6 +234,7 @@ int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq) return ret; } +EXPORT_SYMBOL_GPL(mt7615_mcu_wait_response); static int mt7615_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, @@ -231,18 +258,18 @@ out: return ret; } -static int -mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, - int len, bool wait_resp) +int mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, + int len, bool wait_resp) { struct sk_buff *skb; - skb = mt7615_mcu_msg_alloc(data, len); + skb = mt76_mcu_msg_alloc(mdev, data, len); if (!skb) return -ENOMEM; return __mt76_mcu_skb_send_msg(mdev, skb, cmd, wait_resp); } +EXPORT_SYMBOL_GPL(mt7615_mcu_msg_send); static void mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) @@ -311,6 +338,110 @@ mt7615_mcu_rx_ext_event(struct mt7615_dev *dev, struct sk_buff *skb) } static void +mt7615_mcu_scan_event(struct mt7615_dev *dev, struct sk_buff *skb) +{ + u8 *seq_num = skb->data + sizeof(struct mt7615_mcu_rxd); + struct mt7615_phy *phy; + struct mt76_phy *mphy; + + if (*seq_num & BIT(7) && dev->mt76.phy2) + mphy = dev->mt76.phy2; + else + mphy = &dev->mt76.phy; + + phy = (struct mt7615_phy *)mphy->priv; + + spin_lock_bh(&dev->mt76.lock); + __skb_queue_tail(&phy->scan_event_list, skb); + spin_unlock_bh(&dev->mt76.lock); + + ieee80211_queue_delayed_work(mphy->hw, &phy->scan_work, + MT7615_HW_SCAN_TIMEOUT); +} + +static void +mt7615_mcu_roc_event(struct mt7615_dev *dev, struct sk_buff *skb) +{ + struct mt7615_roc_tlv *event; + struct mt7615_phy *phy; + struct mt76_phy *mphy; + int duration; + + skb_pull(skb, sizeof(struct mt7615_mcu_rxd)); + event = (struct mt7615_roc_tlv *)skb->data; + + if (event->dbdc_band && dev->mt76.phy2) + mphy = dev->mt76.phy2; + else + mphy = &dev->mt76.phy; + + ieee80211_ready_on_channel(mphy->hw); + + phy = (struct mt7615_phy *)mphy->priv; + phy->roc_grant = true; + wake_up(&phy->roc_wait); + + duration = le32_to_cpu(event->max_interval); + mod_timer(&phy->roc_timer, + round_jiffies_up(jiffies + msecs_to_jiffies(duration))); +} + +static void +mt7615_mcu_beacon_loss_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct mt7615_beacon_loss_event *event = priv; + + if (mvif->idx != event->bss_idx) + return; + + if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) + return; + + ieee80211_beacon_loss(vif); +} + +static void +mt7615_mcu_beacon_loss_event(struct mt7615_dev *dev, struct sk_buff *skb) +{ + struct mt7615_beacon_loss_event *event; + struct mt76_phy *mphy; + u8 band_idx = 0; /* DBDC support */ + + skb_pull(skb, sizeof(struct mt7615_mcu_rxd)); + event = (struct mt7615_beacon_loss_event *)skb->data; + if (band_idx && dev->mt76.phy2) + mphy = dev->mt76.phy2; + else + mphy = &dev->mt76.phy; + + ieee80211_iterate_active_interfaces_atomic(mphy->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7615_mcu_beacon_loss_iter, event); +} + +static void +mt7615_mcu_bss_event(struct mt7615_dev *dev, struct sk_buff *skb) +{ + struct mt7615_mcu_bss_event *event; + struct mt76_phy *mphy; + u8 band_idx = 0; /* DBDC support */ + + event = (struct mt7615_mcu_bss_event *)(skb->data + + sizeof(struct mt7615_mcu_rxd)); + + if (band_idx && dev->mt76.phy2) + mphy = dev->mt76.phy2; + else + mphy = &dev->mt76.phy; + + if (event->is_absent) + ieee80211_stop_queues(mphy->hw); + else + ieee80211_wake_queues(mphy->hw); +} + +static void mt7615_mcu_rx_unsolicited_event(struct mt7615_dev *dev, struct sk_buff *skb) { struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data; @@ -319,6 +450,19 @@ mt7615_mcu_rx_unsolicited_event(struct mt7615_dev *dev, struct sk_buff *skb) case MCU_EVENT_EXT: mt7615_mcu_rx_ext_event(dev, skb); break; + case MCU_EVENT_BSS_BEACON_LOSS: + mt7615_mcu_beacon_loss_event(dev, skb); + break; + case MCU_EVENT_ROC: + mt7615_mcu_roc_event(dev, skb); + break; + case MCU_EVENT_SCHED_SCAN_DONE: + case MCU_EVENT_SCAN_DONE: + mt7615_mcu_scan_event(dev, skb); + return; + case MCU_EVENT_BSS_ABSENCE: + mt7615_mcu_bss_event(dev, skb); + break; default: break; } @@ -333,6 +477,11 @@ void mt7615_mcu_rx_event(struct mt7615_dev *dev, struct sk_buff *skb) rxd->ext_eid == MCU_EXT_EVENT_FW_LOG_2_HOST || rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP || rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC || + rxd->eid == MCU_EVENT_BSS_BEACON_LOSS || + rxd->eid == MCU_EVENT_SCHED_SCAN_DONE || + rxd->eid == MCU_EVENT_BSS_ABSENCE || + rxd->eid == MCU_EVENT_SCAN_DONE || + rxd->eid == MCU_EVENT_ROC || !rxd->seq) mt7615_mcu_rx_unsolicited_event(dev, skb); else @@ -493,7 +642,8 @@ mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int band, int state) } static struct sk_buff * -mt7615_mcu_alloc_sta_req(struct mt7615_vif *mvif, struct mt7615_sta *msta) +mt7615_mcu_alloc_sta_req(struct mt7615_dev *dev, struct mt7615_vif *mvif, + struct mt7615_sta *msta) { struct sta_req_hdr hdr = { .bss_idx = mvif->idx, @@ -503,7 +653,7 @@ mt7615_mcu_alloc_sta_req(struct mt7615_vif *mvif, struct mt7615_sta *msta) }; struct sk_buff *skb; - skb = mt7615_mcu_msg_alloc(NULL, MT7615_STA_UPDATE_MAX_SIZE); + skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, MT7615_STA_UPDATE_MAX_SIZE); if (!skb) return ERR_PTR(-ENOMEM); @@ -513,8 +663,8 @@ mt7615_mcu_alloc_sta_req(struct mt7615_vif *mvif, struct mt7615_sta *msta) } static struct wtbl_req_hdr * -mt7615_mcu_alloc_wtbl_req(struct mt7615_sta *msta, int cmd, - void *sta_wtbl, struct sk_buff **skb) +mt7615_mcu_alloc_wtbl_req(struct mt7615_dev *dev, struct mt7615_sta *msta, + int cmd, void *sta_wtbl, struct sk_buff **skb) { struct tlv *sta_hdr = sta_wtbl; struct wtbl_req_hdr hdr = { @@ -524,7 +674,8 @@ mt7615_mcu_alloc_wtbl_req(struct mt7615_sta *msta, int cmd, struct sk_buff *nskb = *skb; if (!nskb) { - nskb = mt7615_mcu_msg_alloc(NULL, MT7615_WTBL_UPDATE_BA_SIZE); + nskb = mt76_mcu_msg_alloc(&dev->mt76, NULL, + MT7615_WTBL_UPDATE_BA_SIZE); if (!nskb) return ERR_PTR(-ENOMEM); @@ -572,12 +723,12 @@ mt7615_mcu_add_tlv(struct sk_buff *skb, int tag, int len) static int mt7615_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, - bool enable) + struct ieee80211_sta *sta, bool enable) { struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + u32 type = vif->p2p ? NETWORK_P2P : NETWORK_INFRA; struct bss_info_basic *bss; u8 wlan_idx = mvif->sta.wcid.idx; - u32 type = NETWORK_INFRA; struct tlv *tlv; tlv = mt7615_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss)); @@ -588,20 +739,11 @@ mt7615_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, break; case NL80211_IFTYPE_STATION: /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */ - if (enable) { - struct ieee80211_sta *sta; + if (enable && sta) { struct mt7615_sta *msta; - rcu_read_lock(); - sta = ieee80211_find_sta(vif, vif->bss_conf.bssid); - if (!sta) { - rcu_read_unlock(); - return -EINVAL; - } - msta = (struct mt7615_sta *)sta->drv_priv; wlan_idx = msta->wcid.idx; - rcu_read_unlock(); } break; case NL80211_IFTYPE_ADHOC: @@ -638,10 +780,16 @@ mt7615_mcu_bss_omac_tlv(struct sk_buff *skb, struct ieee80211_vif *vif) switch (vif->type) { case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_AP: - type = CONNECTION_INFRA_AP; + if (vif->p2p) + type = CONNECTION_P2P_GO; + else + type = CONNECTION_INFRA_AP; break; case NL80211_IFTYPE_STATION: - type = CONNECTION_INFRA_STA; + if (vif->p2p) + type = CONNECTION_P2P_GC; + else + type = CONNECTION_INFRA_STA; break; case NL80211_IFTYPE_ADHOC: type = CONNECTION_IBSS_ADHOC; @@ -704,6 +852,7 @@ mt7615_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, { struct sta_rec_basic *basic; struct tlv *tlv; + int conn_type; tlv = mt7615_mcu_add_tlv(skb, STA_REC_BASIC, sizeof(*basic)); @@ -726,13 +875,24 @@ mt7615_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, switch (vif->type) { case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_AP: - basic->conn_type = cpu_to_le32(CONNECTION_INFRA_STA); + if (vif->p2p) + conn_type = CONNECTION_P2P_GC; + else + conn_type = CONNECTION_INFRA_STA; + basic->conn_type = cpu_to_le32(conn_type); + basic->aid = cpu_to_le16(sta->aid); break; case NL80211_IFTYPE_STATION: - basic->conn_type = cpu_to_le32(CONNECTION_INFRA_AP); + if (vif->p2p) + conn_type = CONNECTION_P2P_GO; + else + conn_type = CONNECTION_INFRA_AP; + basic->conn_type = cpu_to_le32(conn_type); + basic->aid = cpu_to_le16(vif->bss_conf.aid); break; case NL80211_IFTYPE_ADHOC: basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC); + basic->aid = cpu_to_le16(sta->aid); break; default: WARN_ON(1); @@ -740,7 +900,6 @@ mt7615_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, } memcpy(basic->peer_addr, sta->addr, ETH_ALEN); - basic->aid = cpu_to_le16(sta->aid); basic->qos = sta->wme; } @@ -815,6 +974,7 @@ mt7615_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; struct wtbl_generic *generic; struct wtbl_rx *rx; + struct wtbl_spe *spe; struct tlv *tlv; tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_GENERIC, sizeof(*generic), @@ -823,8 +983,11 @@ mt7615_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, generic = (struct wtbl_generic *)tlv; if (sta) { + if (vif->type == NL80211_IFTYPE_STATION) + generic->partial_aid = cpu_to_le16(vif->bss_conf.aid); + else + generic->partial_aid = cpu_to_le16(sta->aid); memcpy(generic->peer_addr, sta->addr, ETH_ALEN); - generic->partial_aid = cpu_to_le16(sta->aid); generic->muar_idx = mvif->omac_idx; generic->qos = sta->wme; } else { @@ -839,6 +1002,11 @@ mt7615_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, rx->rca1 = sta ? vif->type != NL80211_IFTYPE_AP : 1; rx->rca2 = 1; rx->rv = 1; + + tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_SPE, sizeof(*spe), + wtbl_tlv, sta_wtbl); + spe = (struct wtbl_spe *)tlv; + spe->spe_idx = 24; } static void @@ -846,11 +1014,10 @@ mt7615_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, void *sta_wtbl, void *wtbl_tlv) { struct tlv *tlv; + struct wtbl_ht *ht = NULL; u32 flags = 0; if (sta->ht_cap.ht_supported) { - struct wtbl_ht *ht; - tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht), wtbl_tlv, sta_wtbl); ht = (struct wtbl_ht *)tlv; @@ -867,6 +1034,7 @@ mt7615_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, if (sta->vht_cap.vht_supported) { struct wtbl_vht *vht; + u8 af; tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_VHT, sizeof(*vht), wtbl_tlv, sta_wtbl); @@ -874,6 +1042,13 @@ mt7615_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, vht->ldpc = sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC, vht->vht = 1; + af = (sta->vht_cap.cap & + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >> + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; + + if (ht) + ht->af = max(ht->af, af); + if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80) flags |= MT_WTBL_W5_SHORT_GI_80; if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160) @@ -908,20 +1083,21 @@ mt7615_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, } static int -mt7615_mcu_add_bss(struct mt7615_dev *dev, struct ieee80211_vif *vif, - bool enable) +mt7615_mcu_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, bool enable) { struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct mt7615_dev *dev = phy->dev; struct sk_buff *skb; - skb = mt7615_mcu_alloc_sta_req(mvif, NULL); + skb = mt7615_mcu_alloc_sta_req(dev, mvif, NULL); if (IS_ERR(skb)) return PTR_ERR(skb); if (enable) mt7615_mcu_bss_omac_tlv(skb, vif); - mt7615_mcu_bss_basic_tlv(skb, vif, enable); + mt7615_mcu_bss_basic_tlv(skb, vif, sta, enable); if (enable && mvif->omac_idx > EXT_BSSID_START) mt7615_mcu_bss_ext_tlv(skb, mvif); @@ -941,7 +1117,7 @@ mt7615_mcu_wtbl_tx_ba(struct mt7615_dev *dev, struct sk_buff *skb = NULL; int err; - wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_SET, NULL, &skb); + wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, NULL, &skb); if (IS_ERR(wtbl_hdr)) return PTR_ERR(wtbl_hdr); @@ -952,7 +1128,7 @@ mt7615_mcu_wtbl_tx_ba(struct mt7615_dev *dev, if (err < 0) return err; - skb = mt7615_mcu_alloc_sta_req(mvif, msta); + skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -973,7 +1149,7 @@ mt7615_mcu_wtbl_rx_ba(struct mt7615_dev *dev, struct sk_buff *skb; int err; - skb = mt7615_mcu_alloc_sta_req(mvif, msta); + skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -985,7 +1161,7 @@ mt7615_mcu_wtbl_rx_ba(struct mt7615_dev *dev, return err; skb = NULL; - wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_SET, NULL, &skb); + wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, NULL, &skb); if (IS_ERR(wtbl_hdr)) return PTR_ERR(wtbl_hdr); @@ -1007,7 +1183,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_dev *dev, struct ieee80211_vif *vif, msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta; - sskb = mt7615_mcu_alloc_sta_req(mvif, msta); + sskb = mt7615_mcu_alloc_sta_req(dev, mvif, msta); if (IS_ERR(sskb)) return PTR_ERR(sskb); @@ -1015,8 +1191,8 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_dev *dev, struct ieee80211_vif *vif, if (enable && sta) mt7615_mcu_sta_ht_tlv(sskb, sta); - wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_RESET_AND_SET, NULL, - &wskb); + wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET, + NULL, &wskb); if (IS_ERR(wtbl_hdr)) return PTR_ERR(wtbl_hdr); @@ -1060,7 +1236,7 @@ mt7615_mcu_sta_ba(struct mt7615_dev *dev, struct tlv *sta_wtbl; struct sk_buff *skb; - skb = mt7615_mcu_alloc_sta_req(mvif, msta); + skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -1068,7 +1244,8 @@ mt7615_mcu_sta_ba(struct mt7615_dev *dev, sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv)); - wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_SET, sta_wtbl, &skb); + wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl, + &skb); mt7615_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr); return __mt76_mcu_skb_send_msg(&dev->mt76, skb, @@ -1103,7 +1280,7 @@ mt7615_mcu_add_sta_cmd(struct mt7615_dev *dev, struct ieee80211_vif *vif, msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta; - skb = mt7615_mcu_alloc_sta_req(mvif, msta); + skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -1113,7 +1290,7 @@ mt7615_mcu_add_sta_cmd(struct mt7615_dev *dev, struct ieee80211_vif *vif, sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv)); - wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_RESET_AND_SET, + wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET, sta_wtbl, &skb); if (enable) { mt7615_mcu_wtbl_generic_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr); @@ -1148,7 +1325,7 @@ mt7615_mcu_uni_add_dev(struct mt7615_dev *dev, { struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; struct { - struct req_hdr { + struct { u8 omac_idx; u8 band_idx; __le16 pad; @@ -1160,7 +1337,7 @@ mt7615_mcu_uni_add_dev(struct mt7615_dev *dev, u8 pad; u8 omac_addr[ETH_ALEN]; } __packed tlv; - } data = { + } dev_req = { .hdr = { .omac_idx = mvif->omac_idx, .band_idx = mvif->band_idx, @@ -1171,11 +1348,65 @@ mt7615_mcu_uni_add_dev(struct mt7615_dev *dev, .active = enable, }, }; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt7615_bss_basic_tlv basic; + } basic_req = { + .hdr = { + .bss_idx = mvif->idx, + }, + .basic = { + .tag = cpu_to_le16(UNI_BSS_INFO_BASIC), + .len = cpu_to_le16(sizeof(struct mt7615_bss_basic_tlv)), + .omac_idx = mvif->omac_idx, + .band_idx = mvif->band_idx, + .wmm_idx = mvif->wmm_idx, + .active = enable, + .bmc_tx_wlan_idx = cpu_to_le16(mvif->sta.wcid.idx), + .sta_idx = cpu_to_le16(mvif->sta.wcid.idx), + .conn_state = 1, + }, + }; + int err, idx, cmd, len; + void *data; - memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN); + switch (vif->type) { + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_AP: + basic_req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP); + break; + case NL80211_IFTYPE_STATION: + basic_req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_STA); + break; + case NL80211_IFTYPE_ADHOC: + basic_req.basic.conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC); + break; + default: + WARN_ON(1); + break; + } - return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_DEV_INFO_UPDATE, - &data, sizeof(data), true); + idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx; + basic_req.basic.hw_bss_idx = idx; + + memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN); + + cmd = enable ? MCU_UNI_CMD_DEV_INFO_UPDATE : MCU_UNI_CMD_BSS_INFO_UPDATE; + data = enable ? (void *)&dev_req : (void *)&basic_req; + len = enable ? sizeof(dev_req) : sizeof(basic_req); + + err = __mt76_mcu_send_msg(&dev->mt76, cmd, data, len, true); + if (err < 0) + return err; + + cmd = enable ? MCU_UNI_CMD_BSS_INFO_UPDATE : MCU_UNI_CMD_DEV_INFO_UPDATE; + data = enable ? (void *)&basic_req : (void *)&dev_req; + len = enable ? sizeof(basic_req) : sizeof(dev_req); + + return __mt76_mcu_send_msg(&dev->mt76, cmd, data, len, true); } static int @@ -1185,90 +1416,142 @@ mt7615_mcu_uni_ctrl_pm_state(struct mt7615_dev *dev, int band, int state) } static int -mt7615_mcu_uni_add_bss(struct mt7615_dev *dev, - struct ieee80211_vif *vif, bool enable) +mt7615_mcu_uni_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, bool enable) { struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct cfg80211_chan_def *chandef = &phy->mt76->chandef; + int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2; + struct mt7615_dev *dev = phy->dev; struct { - struct req_hdr { + struct { u8 bss_idx; u8 pad[3]; } __packed hdr; - struct basic_tlv { - __le16 tag; - __le16 len; - u8 active; - u8 omac_idx; - u8 hw_bss_idx; - u8 band_idx; - __le32 conn_type; - u8 conn_state; - u8 wmm_idx; - u8 bssid[ETH_ALEN]; - __le16 bmc_tx_wlan_idx; - __le16 bcn_interval; - u8 dtim_period; - u8 phymode; - __le16 sta_idx; - u8 nonht_basic_phy; - u8 pad[3]; - } __packed basic; - } req = { + struct mt7615_bss_basic_tlv basic; + } basic_req = { .hdr = { .bss_idx = mvif->idx, }, .basic = { .tag = cpu_to_le16(UNI_BSS_INFO_BASIC), - .len = cpu_to_le16(sizeof(struct basic_tlv)), + .len = cpu_to_le16(sizeof(struct mt7615_bss_basic_tlv)), .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int), .dtim_period = vif->bss_conf.dtim_period, .omac_idx = mvif->omac_idx, .band_idx = mvif->band_idx, .wmm_idx = mvif->wmm_idx, - .active = enable, + .active = true, /* keep bss deactivated */ + .phymode = 0x38, }, }; - u8 idx, tx_wlan_idx = 0; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct rlm_tlv { + __le16 tag; + __le16 len; + u8 control_channel; + u8 center_chan; + u8 center_chan2; + u8 bw; + u8 tx_streams; + u8 rx_streams; + u8 short_st; + u8 ht_op_info; + u8 sco; + u8 pad[3]; + } __packed rlm; + } __packed rlm_req = { + .hdr = { + .bss_idx = mvif->idx, + }, + .rlm = { + .tag = cpu_to_le16(UNI_BSS_INFO_RLM), + .len = cpu_to_le16(sizeof(struct rlm_tlv)), + .control_channel = chandef->chan->hw_value, + .center_chan = ieee80211_frequency_to_channel(freq1), + .center_chan2 = ieee80211_frequency_to_channel(freq2), + .tx_streams = hweight8(phy->mt76->antenna_mask), + .rx_streams = phy->chainmask, + .short_st = true, + }, + }; + int err, conn_type; + u8 idx; idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx; - req.basic.hw_bss_idx = idx; + basic_req.basic.hw_bss_idx = idx; switch (vif->type) { case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_AP: - req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP); - tx_wlan_idx = mvif->sta.wcid.idx; + if (vif->p2p) + conn_type = CONNECTION_P2P_GO; + else + conn_type = CONNECTION_INFRA_AP; + basic_req.basic.conn_type = cpu_to_le32(conn_type); break; case NL80211_IFTYPE_STATION: - if (enable) { - struct ieee80211_sta *sta; - struct mt7615_sta *msta; + if (vif->p2p) + conn_type = CONNECTION_P2P_GC; + else + conn_type = CONNECTION_INFRA_STA; + basic_req.basic.conn_type = cpu_to_le32(conn_type); + break; + case NL80211_IFTYPE_ADHOC: + basic_req.basic.conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC); + break; + default: + WARN_ON(1); + break; + } - rcu_read_lock(); - sta = ieee80211_find_sta(vif, vif->bss_conf.bssid); - if (!sta) { - rcu_read_unlock(); - return -EINVAL; - } + memcpy(basic_req.basic.bssid, vif->bss_conf.bssid, ETH_ALEN); + basic_req.basic.bmc_tx_wlan_idx = cpu_to_le16(mvif->sta.wcid.idx); + basic_req.basic.sta_idx = cpu_to_le16(mvif->sta.wcid.idx); + basic_req.basic.conn_state = !enable; - msta = (struct mt7615_sta *)sta->drv_priv; - tx_wlan_idx = msta->wcid.idx; - rcu_read_unlock(); - } - req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_STA); + err = __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE, + &basic_req, sizeof(basic_req), true); + if (err < 0) + return err; + + switch (chandef->width) { + case NL80211_CHAN_WIDTH_40: + rlm_req.rlm.bw = CMD_CBW_40MHZ; + break; + case NL80211_CHAN_WIDTH_80: + rlm_req.rlm.bw = CMD_CBW_80MHZ; + break; + case NL80211_CHAN_WIDTH_80P80: + rlm_req.rlm.bw = CMD_CBW_8080MHZ; + break; + case NL80211_CHAN_WIDTH_160: + rlm_req.rlm.bw = CMD_CBW_160MHZ; + break; + case NL80211_CHAN_WIDTH_5: + rlm_req.rlm.bw = CMD_CBW_5MHZ; + break; + case NL80211_CHAN_WIDTH_10: + rlm_req.rlm.bw = CMD_CBW_10MHZ; break; + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: default: - WARN_ON(1); + rlm_req.rlm.bw = CMD_CBW_20MHZ; break; } - memcpy(req.basic.bssid, vif->bss_conf.bssid, ETH_ALEN); - req.basic.bmc_tx_wlan_idx = cpu_to_le16(tx_wlan_idx); - req.basic.sta_idx = cpu_to_le16(tx_wlan_idx); - req.basic.conn_state = !enable; + if (rlm_req.rlm.control_channel < rlm_req.rlm.center_chan) + rlm_req.rlm.sco = 1; /* SCA */ + else if (rlm_req.rlm.control_channel > rlm_req.rlm.center_chan) + rlm_req.rlm.sco = 3; /* SCB */ return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE, - &req, sizeof(req), true); + &rlm_req, sizeof(rlm_req), true); } static int @@ -1355,13 +1638,14 @@ mt7615_mcu_uni_tx_ba(struct mt7615_dev *dev, struct sk_buff *skb; int err; - skb = mt7615_mcu_alloc_sta_req(mvif, msta); + skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta); if (IS_ERR(skb)) return PTR_ERR(skb); sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv)); - wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_SET, sta_wtbl, &skb); + wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl, + &skb); if (IS_ERR(wtbl_hdr)) return PTR_ERR(wtbl_hdr); @@ -1373,7 +1657,7 @@ mt7615_mcu_uni_tx_ba(struct mt7615_dev *dev, if (err < 0) return err; - skb = mt7615_mcu_alloc_sta_req(mvif, msta); + skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -1395,7 +1679,7 @@ mt7615_mcu_uni_rx_ba(struct mt7615_dev *dev, struct sk_buff *skb; int err; - skb = mt7615_mcu_alloc_sta_req(mvif, msta); + skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -1406,13 +1690,14 @@ mt7615_mcu_uni_rx_ba(struct mt7615_dev *dev, if (err < 0 || !enable) return err; - skb = mt7615_mcu_alloc_sta_req(mvif, msta); + skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta); if (IS_ERR(skb)) return PTR_ERR(skb); sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv)); - wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_SET, sta_wtbl, &skb); + wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl, + &skb); if (IS_ERR(wtbl_hdr)) return PTR_ERR(wtbl_hdr); @@ -1447,8 +1732,7 @@ static int mt7615_mcu_send_firmware(struct mt7615_dev *dev, const void *data, int ret = 0, cur_len; while (len > 0) { - cur_len = min_t(int, 4096 - sizeof(struct mt7615_mcu_txd), - len); + cur_len = min_t(int, 4096 - dev->mt76.mcu_ops->headroom, len); ret = __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_FW_SCATTER, data, cur_len, false); @@ -1480,11 +1764,12 @@ static int mt7615_mcu_start_firmware(struct mt7615_dev *dev, u32 addr, &req, sizeof(req), true); } -static int mt7615_mcu_restart(struct mt76_dev *dev) +int mt7615_mcu_restart(struct mt76_dev *dev) { return __mt76_mcu_send_msg(dev, MCU_CMD_RESTART_DL_REQ, NULL, 0, true); } +EXPORT_SYMBOL_GPL(mt7615_mcu_restart); static int mt7615_mcu_patch_sem_ctrl(struct mt7615_dev *dev, bool get) { @@ -1521,24 +1806,29 @@ static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en) !en * MT_INFRACFG_MISC_AP2CONN_WAKE); } -static int mt7615_driver_own(struct mt7615_dev *dev) +int mt7615_driver_own(struct mt7615_dev *dev) { + struct mt76_dev *mdev = &dev->mt76; u32 addr; - addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST; + addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST; mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN); mt7622_trigger_hif_int(dev, true); + + addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST; if (!mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000)) { dev_err(dev->mt76.dev, "Timeout for driver own\n"); return -EIO; } + mt7622_trigger_hif_int(dev, false); return 0; } +EXPORT_SYMBOL_GPL(mt7615_driver_own); -static int mt7615_firmware_own(struct mt7615_dev *dev) +int mt7615_firmware_own(struct mt7615_dev *dev) { u32 addr; @@ -1547,9 +1837,8 @@ static int mt7615_firmware_own(struct mt7615_dev *dev) mt76_wr(dev, addr, MT_CFG_LPCR_HOST_FW_OWN); - if (is_mt7622(&dev->mt76) && - !mt76_poll_msec(dev, MT_CFG_LPCR_HOST, - MT_CFG_LPCR_HOST_FW_OWN, + if (!is_mt7615(&dev->mt76) && + !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, MT_CFG_LPCR_HOST_FW_OWN, 3000)) { dev_err(dev->mt76.dev, "Timeout for firmware own\n"); return -EIO; @@ -1558,6 +1847,7 @@ static int mt7615_firmware_own(struct mt7615_dev *dev) return 0; } +EXPORT_SYMBOL_GPL(mt7615_firmware_own); static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name) { @@ -1576,7 +1866,7 @@ static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name) return -EAGAIN; } - ret = request_firmware(&fw, name, dev->mt76.dev); + ret = firmware_request_nowarn(&fw, name, dev->mt76.dev); if (ret) goto out; @@ -1671,6 +1961,15 @@ mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev, return 0; } +static const struct wiphy_wowlan_support mt7615_wowlan_support = { + .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | WIPHY_WOWLAN_NET_DETECT, + .n_patterns = 1, + .pattern_min_len = 1, + .pattern_max_len = MT7615_WOW_PATTEN_MAX_LEN, + .max_nd_match_sets = 10, +}; + static int mt7615_load_n9(struct mt7615_dev *dev, const char *name) { const struct mt7615_fw_trailer *hdr; @@ -1848,7 +2147,7 @@ int mt7615_mcu_fw_log_2_host(struct mt7615_dev *dev, u8 ctrl) static int mt7663_load_n9(struct mt7615_dev *dev, const char *name) { - u32 offset = 0, override_addr = 0, flag = 0; + u32 offset = 0, override_addr = 0, flag = FW_START_DLYCAL; const struct mt7663_fw_trailer *hdr; const struct mt7663_fw_buf *buf; const struct firmware *fw; @@ -1904,18 +2203,21 @@ static int mt7663_load_n9(struct mt7615_dev *dev, const char *name) } } - if (is_mt7663(&dev->mt76)) { - flag |= FW_START_DLYCAL; - if (override_addr) - flag |= FW_START_OVERRIDE; + if (override_addr) + flag |= FW_START_OVERRIDE; - dev_info(dev->mt76.dev, "override_addr = 0x%08x, option = %d\n", - override_addr, flag); - } + dev_info(dev->mt76.dev, "override_addr = 0x%08x, option = %d\n", + override_addr, flag); ret = mt7615_mcu_start_firmware(dev, override_addr, flag); - if (ret) + if (ret) { dev_err(dev->mt76.dev, "Failed to start N9 firmware\n"); + goto out; + } + + snprintf(dev->mt76.hw->wiphy->fw_version, + sizeof(dev->mt76.hw->wiphy->fw_version), + "%.10s-%.15s", hdr->fw_ver, hdr->build_date); out: release_firmware(fw); @@ -1923,11 +2225,50 @@ out: return ret; } -static int mt7663_load_firmware(struct mt7615_dev *dev) +static int +mt7663_load_rom_patch(struct mt7615_dev *dev, const char **n9_firmware) { + const char *selected_rom, *secondary_rom = MT7663_ROM_PATCH; + const char *primary_rom = MT7663_OFFLOAD_ROM_PATCH; int ret; - mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH); + if (!prefer_offload_fw) { + secondary_rom = MT7663_OFFLOAD_ROM_PATCH; + primary_rom = MT7663_ROM_PATCH; + } + selected_rom = primary_rom; + + ret = mt7615_load_patch(dev, MT7663_PATCH_ADDRESS, primary_rom); + if (ret) { + dev_info(dev->mt76.dev, "%s not found, switching to %s", + primary_rom, secondary_rom); + ret = mt7615_load_patch(dev, MT7663_PATCH_ADDRESS, + secondary_rom); + if (ret) { + dev_err(dev->mt76.dev, "failed to load %s", + secondary_rom); + return ret; + } + selected_rom = secondary_rom; + } + + if (!strcmp(selected_rom, MT7663_OFFLOAD_ROM_PATCH)) { + *n9_firmware = MT7663_OFFLOAD_FIRMWARE_N9; + dev->fw_ver = MT7615_FIRMWARE_V3; + dev->mcu_ops = &uni_update_ops; + } else { + *n9_firmware = MT7663_FIRMWARE_N9; + dev->fw_ver = MT7615_FIRMWARE_V2; + dev->mcu_ops = &sta_update_ops; + } + + return 0; +} + +int __mt7663_load_firmware(struct mt7615_dev *dev) +{ + const char *n9_firmware; + int ret; ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY); if (ret) { @@ -1935,14 +2276,11 @@ static int mt7663_load_firmware(struct mt7615_dev *dev) return -EIO; } - ret = mt7615_load_patch(dev, MT7663_PATCH_ADDRESS, MT7663_ROM_PATCH); + ret = mt7663_load_rom_patch(dev, &n9_firmware); if (ret) return ret; - dev->fw_ver = MT7615_FIRMWARE_V3; - dev->mcu_ops = &uni_update_ops; - - ret = mt7663_load_n9(dev, MT7663_FIRMWARE_N9); + ret = mt7663_load_n9(dev, n9_firmware); if (ret) return ret; @@ -1954,16 +2292,36 @@ static int mt7663_load_firmware(struct mt7615_dev *dev) return -EIO; } - mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH); +#ifdef CONFIG_PM + if (mt7615_firmware_offload(dev)) + dev->mt76.hw->wiphy->wowlan = &mt7615_wowlan_support; +#endif /* CONFIG_PM */ dev_dbg(dev->mt76.dev, "Firmware init done\n"); return 0; } +EXPORT_SYMBOL_GPL(__mt7663_load_firmware); + +static int mt7663_load_firmware(struct mt7615_dev *dev) +{ + int ret; + + mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH); + + ret = __mt7663_load_firmware(dev); + if (ret) + return ret; + + mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH); + + return 0; +} int mt7615_mcu_init(struct mt7615_dev *dev) { static const struct mt76_mcu_ops mt7615_mcu_ops = { + .headroom = sizeof(struct mt7615_mcu_txd), .mcu_skb_send_msg = mt7615_mcu_send_message, .mcu_send_msg = mt7615_mcu_msg_send, .mcu_restart = mt7615_mcu_restart, @@ -1997,6 +2355,7 @@ int mt7615_mcu_init(struct mt7615_dev *dev) return 0; } +EXPORT_SYMBOL_GPL(mt7615_mcu_init); void mt7615_mcu_exit(struct mt7615_dev *dev) { @@ -2004,6 +2363,7 @@ void mt7615_mcu_exit(struct mt7615_dev *dev) mt7615_firmware_own(dev); skb_queue_purge(&dev->mt76.mcu.res_q); } +EXPORT_SYMBOL_GPL(mt7615_mcu_exit); int mt7615_mcu_set_eeprom(struct mt7615_dev *dev) { @@ -2036,7 +2396,7 @@ int mt7615_mcu_set_eeprom(struct mt7615_dev *dev) req_hdr.len = cpu_to_le16(eep_len); - skb = mt7615_mcu_msg_alloc(NULL, sizeof(req_hdr) + eep_len); + skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req_hdr) + eep_len); if (!skb) return -ENOMEM; @@ -2046,6 +2406,7 @@ int mt7615_mcu_set_eeprom(struct mt7615_dev *dev) return __mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_EFUSE_BUFFER_MODE, true); } +EXPORT_SYMBOL_GPL(mt7615_mcu_set_eeprom); int mt7615_mcu_set_mac_enable(struct mt7615_dev *dev, int band, bool enable) { @@ -2187,6 +2548,7 @@ int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev) return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE, &req, sizeof(req), true); } +EXPORT_SYMBOL_GPL(mt7615_mcu_del_wtbl_all); int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev, enum mt7615_rdd_cmd cmd, u8 index, @@ -2313,6 +2675,25 @@ static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku) } } +static u8 mt7615_mcu_chan_bw(struct cfg80211_chan_def *chandef) +{ + static const u8 width_to_bw[] = { + [NL80211_CHAN_WIDTH_40] = CMD_CBW_40MHZ, + [NL80211_CHAN_WIDTH_80] = CMD_CBW_80MHZ, + [NL80211_CHAN_WIDTH_80P80] = CMD_CBW_8080MHZ, + [NL80211_CHAN_WIDTH_160] = CMD_CBW_160MHZ, + [NL80211_CHAN_WIDTH_5] = CMD_CBW_5MHZ, + [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ, + [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ, + [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ, + }; + + if (chandef->width >= ARRAY_SIZE(width_to_bw)) + return 0; + + return width_to_bw[chandef->width]; +} + int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd) { struct mt7615_dev *dev = phy->dev; @@ -2353,32 +2734,7 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd) req.switch_reason = CH_SWITCH_NORMAL; req.band_idx = phy != &dev->phy; - - switch (chandef->width) { - case NL80211_CHAN_WIDTH_40: - req.bw = CMD_CBW_40MHZ; - break; - case NL80211_CHAN_WIDTH_80: - req.bw = CMD_CBW_80MHZ; - break; - case NL80211_CHAN_WIDTH_80P80: - req.bw = CMD_CBW_8080MHZ; - break; - case NL80211_CHAN_WIDTH_160: - req.bw = CMD_CBW_160MHZ; - break; - case NL80211_CHAN_WIDTH_5: - req.bw = CMD_CBW_5MHZ; - break; - case NL80211_CHAN_WIDTH_10: - req.bw = CMD_CBW_10MHZ; - break; - case NL80211_CHAN_WIDTH_20_NOHT: - case NL80211_CHAN_WIDTH_20: - default: - req.bw = CMD_CBW_20MHZ; - break; - } + req.bw = mt7615_mcu_chan_bw(chandef); mt7615_mcu_set_txpower_sku(phy, req.txpower_sku); @@ -2415,3 +2771,906 @@ int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable) return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TX_POWER_FEATURE_CTRL, &req, sizeof(req), true); } + +int mt7615_mcu_set_vif_ps(struct mt7615_dev *dev, struct ieee80211_vif *vif) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct { + u8 bss_idx; + u8 ps_state; /* 0: device awake + * 1: static power save + * 2: dynamic power saving + */ + } req = { + .bss_idx = mvif->idx, + .ps_state = vif->bss_conf.ps ? 2 : 0, + }; + + if (vif->type != NL80211_IFTYPE_STATION) + return -ENOTSUPP; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_PS_PROFILE, + &req, sizeof(req), false); +} + +int mt7615_mcu_set_channel_domain(struct mt7615_phy *phy) +{ + struct mt76_phy *mphy = phy->mt76; + struct mt7615_dev *dev = phy->dev; + struct mt7615_mcu_channel_domain { + __le32 country_code; /* regulatory_request.alpha2 */ + u8 bw_2g; /* BW_20_40M 0 + * BW_20M 1 + * BW_20_40_80M 2 + * BW_20_40_80_160M 3 + * BW_20_40_80_8080M 4 + */ + u8 bw_5g; + __le16 pad; + u8 n_2ch; + u8 n_5ch; + __le16 pad2; + } __packed hdr = { + .bw_2g = 0, + .bw_5g = 3, + .n_2ch = mphy->sband_2g.sband.n_channels, + .n_5ch = mphy->sband_5g.sband.n_channels, + }; + struct mt7615_mcu_chan { + __le16 hw_value; + __le16 pad; + __le32 flags; + } __packed; + int i, n_channels = hdr.n_2ch + hdr.n_5ch; + int len = sizeof(hdr) + n_channels * sizeof(struct mt7615_mcu_chan); + struct sk_buff *skb; + + if (!mt7615_firmware_offload(dev)) + return 0; + + skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, len); + if (!skb) + return -ENOMEM; + + skb_put_data(skb, &hdr, sizeof(hdr)); + + for (i = 0; i < n_channels; i++) { + struct ieee80211_channel *chan; + struct mt7615_mcu_chan channel; + + if (i < hdr.n_2ch) + chan = &mphy->sband_2g.sband.channels[i]; + else + chan = &mphy->sband_5g.sband.channels[i - hdr.n_2ch]; + + channel.hw_value = cpu_to_le16(chan->hw_value); + channel.flags = cpu_to_le32(chan->flags); + channel.pad = 0; + + skb_put_data(skb, &channel, sizeof(channel)); + } + + return __mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_CMD_SET_CHAN_DOMAIN, false); +} + +#define MT7615_SCAN_CHANNEL_TIME 60 +int mt7615_mcu_hw_scan(struct mt7615_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_scan_request *scan_req) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct cfg80211_scan_request *sreq = &scan_req->req; + int n_ssids = 0, err, i, duration = MT7615_SCAN_CHANNEL_TIME; + int ext_channels_num = max_t(int, sreq->n_channels - 32, 0); + struct ieee80211_channel **scan_list = sreq->channels; + struct mt7615_dev *dev = phy->dev; + bool ext_phy = phy != &dev->phy; + struct mt7615_mcu_scan_channel *chan; + struct mt7615_hw_scan_req *req; + struct sk_buff *skb; + + /* fall-back to sw-scan */ + if (!mt7615_firmware_offload(dev)) + return 1; + + skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(*req)); + if (!skb) + return -ENOMEM; + + set_bit(MT76_HW_SCANNING, &phy->mt76->state); + mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f; + + req = (struct mt7615_hw_scan_req *)skb_put(skb, sizeof(*req)); + + req->seq_num = mvif->scan_seq_num | ext_phy << 7; + req->bss_idx = mvif->idx; + req->scan_type = sreq->n_ssids ? 1 : 0; + req->probe_req_num = sreq->n_ssids ? 2 : 0; + req->version = 1; + + for (i = 0; i < sreq->n_ssids; i++) { + if (!sreq->ssids[i].ssid_len) + continue; + + req->ssids[i].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len); + memcpy(req->ssids[i].ssid, sreq->ssids[i].ssid, + sreq->ssids[i].ssid_len); + n_ssids++; + } + req->ssid_type = n_ssids ? BIT(2) : BIT(0); + req->ssid_type_ext = n_ssids ? BIT(0) : 0; + req->ssids_num = n_ssids; + + /* increase channel time for passive scan */ + if (!sreq->n_ssids) + duration *= 2; + req->timeout_value = cpu_to_le16(sreq->n_channels * duration); + req->channel_min_dwell_time = cpu_to_le16(duration); + req->channel_dwell_time = cpu_to_le16(duration); + + req->channels_num = min_t(u8, sreq->n_channels, 32); + req->ext_channels_num = min_t(u8, ext_channels_num, 32); + for (i = 0; i < req->channels_num + req->ext_channels_num; i++) { + if (i >= 32) + chan = &req->ext_channels[i - 32]; + else + chan = &req->channels[i]; + + chan->band = scan_list[i]->band == NL80211_BAND_2GHZ ? 1 : 2; + chan->channel_num = scan_list[i]->hw_value; + } + req->channel_type = sreq->n_channels ? 4 : 0; + + if (sreq->ie_len > 0) { + memcpy(req->ies, sreq->ie, sreq->ie_len); + req->ies_len = cpu_to_le16(sreq->ie_len); + } + + memcpy(req->bssid, sreq->bssid, ETH_ALEN); + if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { + get_random_mask_addr(req->random_mac, sreq->mac_addr, + sreq->mac_addr_mask); + req->scan_func = 1; + } + + err = __mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_CMD_START_HW_SCAN, + false); + if (err < 0) + clear_bit(MT76_HW_SCANNING, &phy->mt76->state); + + return err; +} + +int mt7615_mcu_cancel_hw_scan(struct mt7615_phy *phy, + struct ieee80211_vif *vif) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct mt7615_dev *dev = phy->dev; + struct { + u8 seq_num; + u8 is_ext_channel; + u8 rsv[2]; + } __packed req = { + .seq_num = mvif->scan_seq_num, + }; + + if (test_and_clear_bit(MT76_HW_SCANNING, &phy->mt76->state)) { + struct cfg80211_scan_info info = { + .aborted = true, + }; + + ieee80211_scan_completed(phy->mt76->hw, &info); + } + + return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_CANCEL_HW_SCAN, &req, + sizeof(req), false); +} + +int mt7615_mcu_sched_scan_req(struct mt7615_phy *phy, + struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *sreq) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct ieee80211_channel **scan_list = sreq->channels; + struct mt7615_dev *dev = phy->dev; + bool ext_phy = phy != &dev->phy; + struct mt7615_mcu_scan_channel *chan; + struct mt7615_sched_scan_req *req; + struct cfg80211_match_set *match; + struct cfg80211_ssid *ssid; + struct sk_buff *skb; + int i; + + if (!mt7615_firmware_offload(dev)) + return -ENOTSUPP; + + skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, + sizeof(*req) + sreq->ie_len); + if (!skb) + return -ENOMEM; + + mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f; + + req = (struct mt7615_sched_scan_req *)skb_put(skb, sizeof(*req)); + req->version = 1; + req->seq_num = mvif->scan_seq_num | ext_phy << 7; + + if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { + get_random_mask_addr(req->random_mac, sreq->mac_addr, + sreq->mac_addr_mask); + req->scan_func = 1; + } + + req->ssids_num = sreq->n_ssids; + for (i = 0; i < req->ssids_num; i++) { + ssid = &sreq->ssids[i]; + memcpy(req->ssids[i].ssid, ssid->ssid, ssid->ssid_len); + req->ssids[i].ssid_len = cpu_to_le32(ssid->ssid_len); + } + + req->match_num = sreq->n_match_sets; + for (i = 0; i < req->match_num; i++) { + match = &sreq->match_sets[i]; + memcpy(req->match[i].ssid, match->ssid.ssid, + match->ssid.ssid_len); + req->match[i].rssi_th = cpu_to_le32(match->rssi_thold); + req->match[i].ssid_len = match->ssid.ssid_len; + } + + req->channel_type = sreq->n_channels ? 4 : 0; + req->channels_num = min_t(u8, sreq->n_channels, 64); + for (i = 0; i < req->channels_num; i++) { + chan = &req->channels[i]; + chan->band = scan_list[i]->band == NL80211_BAND_2GHZ ? 1 : 2; + chan->channel_num = scan_list[i]->hw_value; + } + + req->intervals_num = sreq->n_scan_plans; + for (i = 0; i < req->intervals_num; i++) + req->intervals[i] = cpu_to_le16(sreq->scan_plans[i].interval); + + if (sreq->ie_len > 0) { + req->ie_len = cpu_to_le16(sreq->ie_len); + memcpy(skb_put(skb, sreq->ie_len), sreq->ie, sreq->ie_len); + } + + return __mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_CMD_SCHED_SCAN_REQ, false); +} + +int mt7615_mcu_sched_scan_enable(struct mt7615_phy *phy, + struct ieee80211_vif *vif, + bool enable) +{ + struct mt7615_dev *dev = phy->dev; + struct { + u8 active; /* 0: enabled 1: disabled */ + u8 rsv[3]; + } __packed req = { + .active = !enable, + }; + + if (!mt7615_firmware_offload(dev)) + return -ENOTSUPP; + + if (enable) + set_bit(MT76_HW_SCHED_SCANNING, &phy->mt76->state); + else + clear_bit(MT76_HW_SCHED_SCANNING, &phy->mt76->state); + + return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SCHED_SCAN_ENABLE, + &req, sizeof(req), false); +} + +static int mt7615_find_freq_idx(const u16 *freqs, int n_freqs, u16 cur) +{ + int i; + + for (i = 0; i < n_freqs; i++) + if (cur == freqs[i]) + return i; + + return -1; +} + +static int mt7615_dcoc_freq_idx(u16 freq, u8 bw) +{ + static const u16 freq_list[] = { + 4980, 5805, 5905, 5190, + 5230, 5270, 5310, 5350, + 5390, 5430, 5470, 5510, + 5550, 5590, 5630, 5670, + 5710, 5755, 5795, 5835, + 5875, 5210, 5290, 5370, + 5450, 5530, 5610, 5690, + 5775, 5855 + }; + static const u16 freq_bw40[] = { + 5190, 5230, 5270, 5310, + 5350, 5390, 5430, 5470, + 5510, 5550, 5590, 5630, + 5670, 5710, 5755, 5795, + 5835, 5875 + }; + int offset_2g = ARRAY_SIZE(freq_list); + int idx; + + if (freq < 4000) { + if (freq < 2427) + return offset_2g; + if (freq < 2442) + return offset_2g + 1; + if (freq < 2457) + return offset_2g + 2; + + return offset_2g + 3; + } + + switch (bw) { + case NL80211_CHAN_WIDTH_80: + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_160: + break; + default: + idx = mt7615_find_freq_idx(freq_bw40, ARRAY_SIZE(freq_bw40), + freq + 10); + if (idx >= 0) { + freq = freq_bw40[idx]; + break; + } + + idx = mt7615_find_freq_idx(freq_bw40, ARRAY_SIZE(freq_bw40), + freq - 10); + if (idx >= 0) { + freq = freq_bw40[idx]; + break; + } + /* fall through */ + case NL80211_CHAN_WIDTH_40: + idx = mt7615_find_freq_idx(freq_bw40, ARRAY_SIZE(freq_bw40), + freq); + if (idx >= 0) + break; + + return -1; + + } + + return mt7615_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), freq); +} + +int mt7615_mcu_apply_rx_dcoc(struct mt7615_phy *phy) +{ + struct mt7615_dev *dev = phy->dev; + struct cfg80211_chan_def *chandef = &phy->mt76->chandef; + int freq2 = chandef->center_freq2; + int ret; + struct { + u8 direction; + u8 runtime_calibration; + u8 _rsv[2]; + + __le16 center_freq; + u8 bw; + u8 band; + u8 is_freq2; + u8 success; + u8 dbdc_en; + + u8 _rsv2; + + struct { + __le32 sx0_i_lna[4]; + __le32 sx0_q_lna[4]; + + __le32 sx2_i_lna[4]; + __le32 sx2_q_lna[4]; + } dcoc_data[4]; + } req = { + .direction = 1, + + .bw = mt7615_mcu_chan_bw(chandef), + .band = chandef->center_freq1 > 4000, + .dbdc_en = !!dev->mt76.phy2, + }; + u16 center_freq = chandef->center_freq1; + int freq_idx; + u8 *eep = dev->mt76.eeprom.data; + + if (!(eep[MT_EE_CALDATA_FLASH] & MT_EE_CALDATA_FLASH_RX_CAL)) + return 0; + + if (chandef->width == NL80211_CHAN_WIDTH_160) { + freq2 = center_freq + 40; + center_freq -= 40; + } + +again: + req.runtime_calibration = 1; + freq_idx = mt7615_dcoc_freq_idx(center_freq, chandef->width); + if (freq_idx < 0) + goto out; + + memcpy(req.dcoc_data, eep + MT7615_EEPROM_DCOC_OFFSET + + freq_idx * MT7615_EEPROM_DCOC_SIZE, + sizeof(req.dcoc_data)); + req.runtime_calibration = 0; + +out: + req.center_freq = cpu_to_le16(center_freq); + ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RXDCOC_CAL, &req, + sizeof(req), true); + + if ((chandef->width == NL80211_CHAN_WIDTH_80P80 || + chandef->width == NL80211_CHAN_WIDTH_160) && !req.is_freq2) { + req.is_freq2 = true; + center_freq = freq2; + goto again; + } + + return ret; +} + +static int mt7615_dpd_freq_idx(u16 freq, u8 bw) +{ + static const u16 freq_list[] = { + 4920, 4940, 4960, 4980, + 5040, 5060, 5080, 5180, + 5200, 5220, 5240, 5260, + 5280, 5300, 5320, 5340, + 5360, 5380, 5400, 5420, + 5440, 5460, 5480, 5500, + 5520, 5540, 5560, 5580, + 5600, 5620, 5640, 5660, + 5680, 5700, 5720, 5745, + 5765, 5785, 5805, 5825, + 5845, 5865, 5885, 5905 + }; + int offset_2g = ARRAY_SIZE(freq_list); + int idx; + + if (freq < 4000) { + if (freq < 2432) + return offset_2g; + if (freq < 2457) + return offset_2g + 1; + + return offset_2g + 2; + } + + if (bw != NL80211_CHAN_WIDTH_20) { + idx = mt7615_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), + freq + 10); + if (idx >= 0) + return idx; + + idx = mt7615_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), + freq - 10); + if (idx >= 0) + return idx; + } + + return mt7615_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), freq); +} + + +int mt7615_mcu_apply_tx_dpd(struct mt7615_phy *phy) +{ + struct mt7615_dev *dev = phy->dev; + struct cfg80211_chan_def *chandef = &phy->mt76->chandef; + int freq2 = chandef->center_freq2; + int ret; + struct { + u8 direction; + u8 runtime_calibration; + u8 _rsv[2]; + + __le16 center_freq; + u8 bw; + u8 band; + u8 is_freq2; + u8 success; + u8 dbdc_en; + + u8 _rsv2; + + struct { + struct { + u32 dpd_g0; + u8 data[32]; + } wf0, wf1; + + struct { + u32 dpd_g0_prim; + u32 dpd_g0_sec; + u8 data_prim[32]; + u8 data_sec[32]; + } wf2, wf3; + } dpd_data; + } req = { + .direction = 1, + + .bw = mt7615_mcu_chan_bw(chandef), + .band = chandef->center_freq1 > 4000, + .dbdc_en = !!dev->mt76.phy2, + }; + u16 center_freq = chandef->center_freq1; + int freq_idx; + u8 *eep = dev->mt76.eeprom.data; + + if (!(eep[MT_EE_CALDATA_FLASH] & MT_EE_CALDATA_FLASH_TX_DPD)) + return 0; + + if (chandef->width == NL80211_CHAN_WIDTH_160) { + freq2 = center_freq + 40; + center_freq -= 40; + } + +again: + req.runtime_calibration = 1; + freq_idx = mt7615_dpd_freq_idx(center_freq, chandef->width); + if (freq_idx < 0) + goto out; + + memcpy(&req.dpd_data, eep + MT7615_EEPROM_TXDPD_OFFSET + + freq_idx * MT7615_EEPROM_TXDPD_SIZE, + sizeof(req.dpd_data)); + req.runtime_calibration = 0; + +out: + req.center_freq = cpu_to_le16(center_freq); + ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TXDPD_CAL, &req, + sizeof(req), true); + + if ((chandef->width == NL80211_CHAN_WIDTH_80P80 || + chandef->width == NL80211_CHAN_WIDTH_160) && !req.is_freq2) { + req.is_freq2 = true; + center_freq = freq2; + goto again; + } + + return ret; +} + +#ifdef CONFIG_PM +int mt7615_mcu_set_hif_suspend(struct mt7615_dev *dev, bool suspend) +{ + struct { + struct { + u8 hif_type; /* 0x0: HIF_SDIO + * 0x1: HIF_USB + * 0x2: HIF_PCIE + */ + u8 pad[3]; + } __packed hdr; + struct hif_suspend_tlv { + __le16 tag; + __le16 len; + u8 suspend; + } __packed hif_suspend; + } req = { + .hif_suspend = { + .tag = cpu_to_le16(0), /* 0: UNI_HIF_CTRL_BASIC */ + .len = cpu_to_le16(sizeof(struct hif_suspend_tlv)), + .suspend = suspend, + }, + }; + + if (mt76_is_mmio(&dev->mt76)) + req.hdr.hif_type = 2; + else if (mt76_is_usb(&dev->mt76)) + req.hdr.hif_type = 1; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_HIF_CTRL, + &req, sizeof(req), true); +} +EXPORT_SYMBOL_GPL(mt7615_mcu_set_hif_suspend); + +static int +mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif, + bool enable) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct { + u8 bss_idx; + u8 dtim_period; + __le16 aid; + __le16 bcn_interval; + __le16 atim_window; + u8 uapsd; + u8 bmc_delivered_ac; + u8 bmc_triggered_ac; + u8 pad; + } req = { + .bss_idx = mvif->idx, + .aid = cpu_to_le16(vif->bss_conf.aid), + .dtim_period = vif->bss_conf.dtim_period, + .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int), + }; + struct { + u8 bss_idx; + u8 pad[3]; + } req_hdr = { + .bss_idx = mvif->idx, + }; + int err; + + if (vif->type != NL80211_IFTYPE_STATION || + !mt7615_firmware_offload(dev)) + return -ENOTSUPP; + + err = __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_ABORT, + &req_hdr, sizeof(req_hdr), false); + if (err < 0 || !enable) + return err; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_CONNECTED, + &req, sizeof(req), false); +} + +static int +mt7615_mcu_set_wow_ctrl(struct mt7615_phy *phy, struct ieee80211_vif *vif, + bool suspend, struct cfg80211_wowlan *wowlan) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct mt7615_dev *dev = phy->dev; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt7615_wow_ctrl_tlv wow_ctrl_tlv; + } req = { + .hdr = { + .bss_idx = mvif->idx, + }, + .wow_ctrl_tlv = { + .tag = cpu_to_le16(UNI_SUSPEND_WOW_CTRL), + .len = cpu_to_le16(sizeof(struct mt7615_wow_ctrl_tlv)), + .cmd = suspend ? 1 : 2, + }, + }; + + if (wowlan->magic_pkt) + req.wow_ctrl_tlv.trigger |= BIT(0); + if (wowlan->disconnect) + req.wow_ctrl_tlv.trigger |= BIT(2); + if (wowlan->nd_config) { + mt7615_mcu_sched_scan_req(phy, vif, wowlan->nd_config); + req.wow_ctrl_tlv.trigger |= BIT(5); + mt7615_mcu_sched_scan_enable(phy, vif, suspend); + } + + if (mt76_is_mmio(&dev->mt76)) + req.wow_ctrl_tlv.wakeup_hif = 2; + else if (mt76_is_usb(&dev->mt76)) + req.wow_ctrl_tlv.wakeup_hif = 1; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_SUSPEND, + &req, sizeof(req), true); +} + +static int +mt7615_mcu_set_wow_pattern(struct mt7615_dev *dev, + struct ieee80211_vif *vif, + u8 index, bool enable, + struct cfg80211_pkt_pattern *pattern) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct mt7615_wow_pattern_tlv *ptlv; + struct sk_buff *skb; + struct req_hdr { + u8 bss_idx; + u8 pad[3]; + } __packed hdr = { + .bss_idx = mvif->idx, + }; + + skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, + sizeof(hdr) + sizeof(*ptlv)); + if (!skb) + return -ENOMEM; + + skb_put_data(skb, &hdr, sizeof(hdr)); + ptlv = (struct mt7615_wow_pattern_tlv *)skb_put(skb, sizeof(*ptlv)); + ptlv->tag = cpu_to_le16(UNI_SUSPEND_WOW_PATTERN); + ptlv->len = cpu_to_le16(sizeof(*ptlv)); + ptlv->data_len = pattern->pattern_len; + ptlv->enable = enable; + ptlv->index = index; + + memcpy(ptlv->pattern, pattern->pattern, pattern->pattern_len); + memcpy(ptlv->mask, pattern->mask, pattern->pattern_len / 8); + + return __mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_UNI_CMD_SUSPEND, true); +} + +static int +mt7615_mcu_set_suspend_mode(struct mt7615_dev *dev, + struct ieee80211_vif *vif, + bool enable, u8 mdtim, bool wow_suspend) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt7615_suspend_tlv suspend_tlv; + } req = { + .hdr = { + .bss_idx = mvif->idx, + }, + .suspend_tlv = { + .tag = cpu_to_le16(UNI_SUSPEND_MODE_SETTING), + .len = cpu_to_le16(sizeof(struct mt7615_suspend_tlv)), + .enable = enable, + .mdtim = mdtim, + .wow_suspend = wow_suspend, + }, + }; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_SUSPEND, + &req, sizeof(req), true); +} + +static int +mt7615_mcu_set_gtk_rekey(struct mt7615_dev *dev, + struct ieee80211_vif *vif, + bool suspend) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt7615_gtk_rekey_tlv gtk_tlv; + } __packed req = { + .hdr = { + .bss_idx = mvif->idx, + }, + .gtk_tlv = { + .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_GTK_REKEY), + .len = cpu_to_le16(sizeof(struct mt7615_gtk_rekey_tlv)), + .rekey_mode = !suspend, + }, + }; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_OFFLOAD, + &req, sizeof(req), true); +} + +void mt7615_mcu_set_suspend_iter(void *priv, u8 *mac, + struct ieee80211_vif *vif) +{ + struct mt7615_phy *phy = priv; + bool suspend = test_bit(MT76_STATE_SUSPEND, &phy->mt76->state); + struct ieee80211_hw *hw = phy->mt76->hw; + struct cfg80211_wowlan *wowlan = hw->wiphy->wowlan_config; + int i; + + mt7615_mcu_set_bss_pm(phy->dev, vif, suspend); + + mt7615_mcu_set_gtk_rekey(phy->dev, vif, suspend); + + mt7615_mcu_set_suspend_mode(phy->dev, vif, suspend, 1, true); + + for (i = 0; i < wowlan->n_patterns; i++) + mt7615_mcu_set_wow_pattern(phy->dev, vif, i, suspend, + &wowlan->patterns[i]); + mt7615_mcu_set_wow_ctrl(phy, vif, suspend, wowlan); +} + +static void +mt7615_mcu_key_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct ieee80211_key_conf *key, + void *data) +{ + struct mt7615_gtk_rekey_tlv *gtk_tlv = data; + u32 cipher; + + if (key->cipher != WLAN_CIPHER_SUITE_AES_CMAC && + key->cipher != WLAN_CIPHER_SUITE_CCMP && + key->cipher != WLAN_CIPHER_SUITE_TKIP) + return; + + if (key->cipher == WLAN_CIPHER_SUITE_TKIP) { + gtk_tlv->proto = cpu_to_le32(NL80211_WPA_VERSION_1); + cipher = BIT(3); + } else { + gtk_tlv->proto = cpu_to_le32(NL80211_WPA_VERSION_2); + cipher = BIT(4); + } + + /* we are assuming here to have a single pairwise key */ + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { + gtk_tlv->pairwise_cipher = cpu_to_le32(cipher); + gtk_tlv->group_cipher = cpu_to_le32(cipher); + gtk_tlv->keyid = key->keyidx; + } +} + +int mt7615_mcu_update_gtk_rekey(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *key) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt7615_gtk_rekey_tlv *gtk_tlv; + struct sk_buff *skb; + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr = { + .bss_idx = mvif->idx, + }; + + skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, + sizeof(hdr) + sizeof(*gtk_tlv)); + if (!skb) + return -ENOMEM; + + skb_put_data(skb, &hdr, sizeof(hdr)); + gtk_tlv = (struct mt7615_gtk_rekey_tlv *)skb_put(skb, + sizeof(*gtk_tlv)); + gtk_tlv->tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_GTK_REKEY); + gtk_tlv->len = cpu_to_le16(sizeof(*gtk_tlv)); + gtk_tlv->rekey_mode = 2; + gtk_tlv->option = 1; + + rcu_read_lock(); + ieee80211_iter_keys_rcu(hw, vif, mt7615_mcu_key_iter, gtk_tlv); + rcu_read_unlock(); + + memcpy(gtk_tlv->kek, key->kek, NL80211_KEK_LEN); + memcpy(gtk_tlv->kck, key->kck, NL80211_KCK_LEN); + memcpy(gtk_tlv->replay_ctr, key->replay_ctr, NL80211_REPLAY_CTR_LEN); + + return __mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_UNI_CMD_OFFLOAD, true); +} +#endif /* CONFIG_PM */ + +int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_channel *chan, int duration) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct mt7615_dev *dev = phy->dev; + struct mt7615_roc_tlv req = { + .bss_idx = mvif->idx, + .active = !chan, + .max_interval = cpu_to_le32(duration), + .primary_chan = chan ? chan->hw_value : 0, + .band = chan ? chan->band : 0, + .req_type = 2, + }; + + phy->roc_grant = false; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_ROC, &req, + sizeof(req), false); +} + +int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + int ct_window = vif->bss_conf.p2p_noa_attr.oppps_ctwindow; + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct { + __le32 ct_win; + u8 bss_idx; + u8 rsv[3]; + } __packed req = { + .ct_win = cpu_to_le32(ct_window), + .bss_idx = mvif->idx, + }; + + if (!mt7615_firmware_offload(dev)) + return -ENOTSUPP; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_P2P_OPPPS, + &req, sizeof(req), false); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h index d1f7391472fc..2314d0b23af1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h @@ -81,7 +81,12 @@ enum { MCU_EVENT_GENERIC = 0x01, MCU_EVENT_ACCESS_REG = 0x02, MCU_EVENT_MT_PATCH_SEM = 0x04, + MCU_EVENT_SCAN_DONE = 0x0d, + MCU_EVENT_ROC = 0x10, + MCU_EVENT_BSS_ABSENCE = 0x11, + MCU_EVENT_BSS_BEACON_LOSS = 0x13, MCU_EVENT_CH_PRIVILEGE = 0x18, + MCU_EVENT_SCHED_SCAN_DONE = 0x23, MCU_EVENT_EXT = 0xed, MCU_EVENT_RESTART_DL = 0xef, }; @@ -232,7 +237,9 @@ enum { #define MCU_FW_PREFIX BIT(31) #define MCU_UNI_PREFIX BIT(30) -#define MCU_CMD_MASK ~(MCU_FW_PREFIX | MCU_UNI_PREFIX) +#define MCU_CE_PREFIX BIT(29) +#define MCU_CMD_MASK ~(MCU_FW_PREFIX | MCU_UNI_PREFIX | \ + MCU_CE_PREFIX) enum { MCU_CMD_TARGET_ADDRESS_LEN_REQ = MCU_FW_PREFIX | 0x01, @@ -265,6 +272,8 @@ enum { MCU_EXT_CMD_BCN_OFFLOAD = 0x49, MCU_EXT_CMD_SET_RX_PATH = 0x4e, MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58, + MCU_EXT_CMD_RXDCOC_CAL = 0x59, + MCU_EXT_CMD_TXDPD_CAL = 0x60, MCU_EXT_CMD_SET_RDD_TH = 0x7c, MCU_EXT_CMD_SET_RDD_PATTERN = 0x7d, }; @@ -273,6 +282,281 @@ enum { MCU_UNI_CMD_DEV_INFO_UPDATE = MCU_UNI_PREFIX | 0x01, MCU_UNI_CMD_BSS_INFO_UPDATE = MCU_UNI_PREFIX | 0x02, MCU_UNI_CMD_STA_REC_UPDATE = MCU_UNI_PREFIX | 0x03, + MCU_UNI_CMD_SUSPEND = MCU_UNI_PREFIX | 0x05, + MCU_UNI_CMD_OFFLOAD = MCU_UNI_PREFIX | 0x06, + MCU_UNI_CMD_HIF_CTRL = MCU_UNI_PREFIX | 0x07, +}; + +struct mt7615_mcu_uni_event { + u8 cid; + u8 pad[3]; + __le32 status; /* 0: success, others: fail */ +} __packed; + +struct mt7615_beacon_loss_event { + u8 bss_idx; + u8 reason; + u8 pad[2]; +} __packed; + +struct mt7615_mcu_scan_ssid { + __le32 ssid_len; + u8 ssid[IEEE80211_MAX_SSID_LEN]; +} __packed; + +struct mt7615_mcu_scan_channel { + u8 band; /* 1: 2.4GHz + * 2: 5.0GHz + * Others: Reserved + */ + u8 channel_num; +} __packed; + +struct mt7615_mcu_scan_match { + __le32 rssi_th; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 ssid_len; + u8 rsv[3]; +} __packed; + +struct mt7615_hw_scan_req { + u8 seq_num; + u8 bss_idx; + u8 scan_type; /* 0: PASSIVE SCAN + * 1: ACTIVE SCAN + */ + u8 ssid_type; /* BIT(0) wildcard SSID + * BIT(1) P2P wildcard SSID + * BIT(2) specified SSID + wildcard SSID + * BIT(2) + ssid_type_ext BIT(0) specified SSID only + */ + u8 ssids_num; + u8 probe_req_num; /* Number of probe request for each SSID */ + u8 scan_func; /* BIT(0) Enable random MAC scan + * BIT(1) Disable DBDC scan type 1~3. + * BIT(2) Use DBDC scan type 3 (dedicated one RF to scan). + */ + u8 version; /* 0: Not support fields after ies. + * 1: Support fields after ies. + */ + struct mt7615_mcu_scan_ssid ssids[4]; + __le16 probe_delay_time; + __le16 channel_dwell_time; /* channel Dwell interval */ + __le16 timeout_value; + u8 channel_type; /* 0: Full channels + * 1: Only 2.4GHz channels + * 2: Only 5GHz channels + * 3: P2P social channel only (channel #1, #6 and #11) + * 4: Specified channels + * Others: Reserved + */ + u8 channels_num; /* valid when channel_type is 4 */ + /* valid when channels_num is set */ + struct mt7615_mcu_scan_channel channels[32]; + __le16 ies_len; + u8 ies[MT7615_SCAN_IE_LEN]; + /* following fields are valid if version > 0 */ + u8 ext_channels_num; + u8 ext_ssids_num; + __le16 channel_min_dwell_time; + struct mt7615_mcu_scan_channel ext_channels[32]; + struct mt7615_mcu_scan_ssid ext_ssids[6]; + u8 bssid[ETH_ALEN]; + u8 random_mac[ETH_ALEN]; /* valid when BIT(1) in scan_func is set. */ + u8 pad[63]; + u8 ssid_type_ext; +} __packed; + +#define SCAN_DONE_EVENT_MAX_CHANNEL_NUM 64 +struct mt7615_hw_scan_done { + u8 seq_num; + u8 sparse_channel_num; + struct mt7615_mcu_scan_channel sparse_channel; + u8 complete_channel_num; + u8 current_state; + u8 version; + u8 pad; + __le32 beacon_scan_num; + u8 pno_enabled; + u8 pad2[3]; + u8 sparse_channel_valid_num; + u8 pad3[3]; + u8 channel_num[SCAN_DONE_EVENT_MAX_CHANNEL_NUM]; + /* idle format for channel_idle_time + * 0: first bytes: idle time(ms) 2nd byte: dwell time(ms) + * 1: first bytes: idle time(8ms) 2nd byte: dwell time(8ms) + * 2: dwell time (16us) + */ + __le16 channel_idle_time[SCAN_DONE_EVENT_MAX_CHANNEL_NUM]; + /* beacon and probe response count */ + u8 beacon_probe_num[SCAN_DONE_EVENT_MAX_CHANNEL_NUM]; + u8 mdrdy_count[SCAN_DONE_EVENT_MAX_CHANNEL_NUM]; + __le32 beacon_2g_num; + __le32 beacon_5g_num; +} __packed; + +struct mt7615_sched_scan_req { + u8 version; + u8 seq_num; + u8 stop_on_match; + u8 ssids_num; + u8 match_num; + u8 pad; + __le16 ie_len; + struct mt7615_mcu_scan_ssid ssids[MT7615_MAX_SCHED_SCAN_SSID]; + struct mt7615_mcu_scan_match match[MT7615_MAX_SCAN_MATCH]; + u8 channel_type; + u8 channels_num; + u8 intervals_num; + u8 scan_func; /* BIT(0) eable random mac address */ + struct mt7615_mcu_scan_channel channels[64]; + __le16 intervals[MT7615_MAX_SCHED_SCAN_INTERVAL]; + u8 random_mac[ETH_ALEN]; /* valid when BIT(0) in scan_func is set */ + u8 pad2[58]; +} __packed; + +struct nt7615_sched_scan_done { + u8 seq_num; + u8 status; /* 0: ssid found */ + __le16 pad; +} __packed; + +struct mt7615_mcu_bss_event { + u8 bss_idx; + u8 is_absent; + u8 free_quota; + u8 pad; +} __packed; + +struct mt7615_bss_basic_tlv { + __le16 tag; + __le16 len; + u8 active; + u8 omac_idx; + u8 hw_bss_idx; + u8 band_idx; + __le32 conn_type; + u8 conn_state; + u8 wmm_idx; + u8 bssid[ETH_ALEN]; + __le16 bmc_tx_wlan_idx; + __le16 bcn_interval; + u8 dtim_period; + u8 phymode; /* bit(0): A + * bit(1): B + * bit(2): G + * bit(3): GN + * bit(4): AN + * bit(5): AC + */ + __le16 sta_idx; + u8 nonht_basic_phy; + u8 pad[3]; +} __packed; + +struct mt7615_wow_ctrl_tlv { + __le16 tag; + __le16 len; + u8 cmd; /* 0x1: PM_WOWLAN_REQ_START + * 0x2: PM_WOWLAN_REQ_STOP + * 0x3: PM_WOWLAN_PARAM_CLEAR + */ + u8 trigger; /* 0: NONE + * BIT(0): NL80211_WOWLAN_TRIG_MAGIC_PKT + * BIT(1): NL80211_WOWLAN_TRIG_ANY + * BIT(2): NL80211_WOWLAN_TRIG_DISCONNECT + * BIT(3): NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE + * BIT(4): BEACON_LOST + * BIT(5): NL80211_WOWLAN_TRIG_NET_DETECT + */ + u8 wakeup_hif; /* 0x0: HIF_SDIO + * 0x1: HIF_USB + * 0x2: HIF_PCIE + * 0x3: HIF_GPIO + */ + u8 pad; + u8 rsv[4]; +} __packed; + +#define MT7615_WOW_MASK_MAX_LEN 16 +#define MT7615_WOW_PATTEN_MAX_LEN 128 +struct mt7615_wow_pattern_tlv { + __le16 tag; + __le16 len; + u8 index; /* pattern index */ + u8 enable; /* 0: disable + * 1: enable + */ + u8 data_len; /* pattern length */ + u8 pad; + u8 mask[MT7615_WOW_MASK_MAX_LEN]; + u8 pattern[MT7615_WOW_PATTEN_MAX_LEN]; + u8 rsv[4]; +} __packed; + +struct mt7615_suspend_tlv { + __le16 tag; + __le16 len; + u8 enable; /* 0: suspend mode disabled + * 1: suspend mode enabled + */ + u8 mdtim; /* LP parameter */ + u8 wow_suspend; /* 0: update by origin policy + * 1: update by wow dtim + */ + u8 pad[5]; +} __packed; + +struct mt7615_gtk_rekey_tlv { + __le16 tag; + __le16 len; + u8 kek[NL80211_KEK_LEN]; + u8 kck[NL80211_KCK_LEN]; + u8 replay_ctr[NL80211_REPLAY_CTR_LEN]; + u8 rekey_mode; /* 0: rekey offload enable + * 1: rekey offload disable + * 2: rekey update + */ + u8 keyid; + u8 pad[2]; + __le32 proto; /* WPA-RSN-WAPI-OPSN */ + __le32 pairwise_cipher; + __le32 group_cipher; + __le32 key_mgmt; /* NONE-PSK-IEEE802.1X */ + __le32 mgmt_group_cipher; + u8 option; /* 1: rekey data update without enabling offload */ + u8 reserverd[3]; +} __packed; + +struct mt7615_roc_tlv { + u8 bss_idx; + u8 token; + u8 active; + u8 primary_chan; + u8 sco; + u8 band; + u8 width; /* To support 80/160MHz bandwidth */ + u8 freq_seg1; /* To support 80/160MHz bandwidth */ + u8 freq_seg2; /* To support 80/160MHz bandwidth */ + u8 req_type; + u8 dbdc_band; + u8 rsv0; + __le32 max_interval; /* ms */ + u8 rsv1[8]; +} __packed; + +/* offload mcu commands */ +enum { + MCU_CMD_START_HW_SCAN = MCU_CE_PREFIX | 0x03, + MCU_CMD_SET_PS_PROFILE = MCU_CE_PREFIX | 0x05, + MCU_CMD_SET_CHAN_DOMAIN = MCU_CE_PREFIX | 0x0f, + MCU_CMD_SET_BSS_CONNECTED = MCU_CE_PREFIX | 0x16, + MCU_CMD_SET_BSS_ABORT = MCU_CE_PREFIX | 0x17, + MCU_CMD_CANCEL_HW_SCAN = MCU_CE_PREFIX | 0x1b, + MCU_CMD_SET_ROC = MCU_CE_PREFIX | 0x1c, + MCU_CMD_SET_P2P_OPPPS = MCU_CE_PREFIX | 0x33, + MCU_CMD_SCHED_SCAN_ENABLE = MCU_CE_PREFIX | 0x61, + MCU_CMD_SCHED_SCAN_REQ = MCU_CE_PREFIX | 0x62, }; #define MCU_CMD_ACK BIT(0) @@ -283,10 +567,26 @@ enum { enum { UNI_BSS_INFO_BASIC = 0, + UNI_BSS_INFO_RLM = 2, UNI_BSS_INFO_BCN_CONTENT = 7, }; enum { + UNI_SUSPEND_MODE_SETTING, + UNI_SUSPEND_WOW_CTRL, + UNI_SUSPEND_WOW_GPIO_PARAM, + UNI_SUSPEND_WOW_WAKEUP_PORT, + UNI_SUSPEND_WOW_PATTERN, +}; + +enum { + UNI_OFFLOAD_OFFLOAD_ARPNS_IPV4, + UNI_OFFLOAD_OFFLOAD_ARPNS_IPV6, + UNI_OFFLOAD_OFFLOAD_GTK_REKEY, + UNI_OFFLOAD_OFFLOAD_BMC_RPY_DETECT, +}; + +enum { PATCH_SEM_RELEASE = 0x0, PATCH_SEM_GET = 0x1 }; @@ -306,6 +606,11 @@ enum { FW_STATE_CR4_RDY = 7 }; +enum { + FW_STATE_PWR_ON = 1, + FW_STATE_N9_RDY = 2, +}; + #define STA_TYPE_STA BIT(0) #define STA_TYPE_AP BIT(1) #define STA_TYPE_ADHOC BIT(2) @@ -704,11 +1009,4 @@ enum { CH_SWITCH_SCAN_BYPASS_DPD = 9 }; -static inline struct sk_buff * -mt7615_mcu_msg_alloc(const void *data, int len) -{ - return mt76_mcu_msg_alloc(data, sizeof(struct mt7615_mcu_txd), - len, 0); -} - #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c index d2eff5442824..e670393506f0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c @@ -1,5 +1,7 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pci.h> #include "mt7615.h" #include "regs.h" @@ -13,12 +15,15 @@ const u32 mt7615e_reg_map[] = { [MT_ARB_BASE] = 0x20c00, [MT_HIF_BASE] = 0x04000, [MT_CSR_BASE] = 0x07000, + [MT_PLE_BASE] = 0x08000, + [MT_PSE_BASE] = 0x0c000, [MT_PHY_BASE] = 0x10000, [MT_CFG_BASE] = 0x20200, [MT_AGG_BASE] = 0x20a00, [MT_TMAC_BASE] = 0x21000, [MT_RMAC_BASE] = 0x21200, [MT_DMA_BASE] = 0x21800, + [MT_PF_BASE] = 0x22000, [MT_WTBL_BASE_ON] = 0x23000, [MT_WTBL_BASE_OFF] = 0x23400, [MT_LPON_BASE] = 0x24200, @@ -37,12 +42,15 @@ const u32 mt7663e_reg_map[] = { [MT_ARB_BASE] = 0x20c00, [MT_HIF_BASE] = 0x04000, [MT_CSR_BASE] = 0x07000, + [MT_PLE_BASE] = 0x08000, + [MT_PSE_BASE] = 0x0c000, [MT_PHY_BASE] = 0x10000, [MT_CFG_BASE] = 0x20000, [MT_AGG_BASE] = 0x22000, [MT_TMAC_BASE] = 0x24000, [MT_RMAC_BASE] = 0x25000, [MT_DMA_BASE] = 0x27000, + [MT_PF_BASE] = 0x28000, [MT_WTBL_BASE_ON] = 0x29000, [MT_WTBL_BASE_OFF] = 0x29800, [MT_LPON_BASE] = 0x2b000, @@ -80,30 +88,42 @@ mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q) static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance) { struct mt7615_dev *dev = dev_instance; - u32 intr; - intr = mt76_rr(dev, MT_INT_SOURCE_CSR); - mt76_wr(dev, MT_INT_SOURCE_CSR, intr); + mt76_wr(dev, MT_INT_MASK_CSR, 0); if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state)) return IRQ_NONE; - trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask); + tasklet_schedule(&dev->irq_tasklet); + + return IRQ_HANDLED; +} + +static void mt7615_irq_tasklet(unsigned long data) +{ + struct mt7615_dev *dev = (struct mt7615_dev *)data; + u32 intr, mask = 0; + mt76_wr(dev, MT_INT_MASK_CSR, 0); + + intr = mt76_rr(dev, MT_INT_SOURCE_CSR); + mt76_wr(dev, MT_INT_SOURCE_CSR, intr); + + trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask); intr &= dev->mt76.mmio.irqmask; if (intr & MT_INT_TX_DONE_ALL) { - mt7615_irq_disable(dev, MT_INT_TX_DONE_ALL); + mask |= MT_INT_TX_DONE_ALL; napi_schedule(&dev->mt76.tx_napi); } if (intr & MT_INT_RX_DONE(0)) { - mt7615_irq_disable(dev, MT_INT_RX_DONE(0)); + mask |= MT_INT_RX_DONE(0); napi_schedule(&dev->mt76.napi[0]); } if (intr & MT_INT_RX_DONE(1)) { - mt7615_irq_disable(dev, MT_INT_RX_DONE(1)); + mask |= MT_INT_RX_DONE(1); napi_schedule(&dev->mt76.napi[1]); } @@ -117,7 +137,7 @@ static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance) } } - return IRQ_HANDLED; + mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0); } int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base, @@ -139,18 +159,25 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base, .sta_remove = mt7615_mac_sta_remove, .update_survey = mt7615_update_channel, }; + struct ieee80211_ops *ops; struct mt7615_dev *dev; struct mt76_dev *mdev; int ret; - mdev = mt76_alloc_device(pdev, sizeof(*dev), &mt7615_ops, &drv_ops); + ops = devm_kmemdup(pdev, &mt7615_ops, sizeof(mt7615_ops), GFP_KERNEL); + if (!ops) + return -ENOMEM; + + mdev = mt76_alloc_device(pdev, sizeof(*dev), ops, &drv_ops); if (!mdev) return -ENOMEM; dev = container_of(mdev, struct mt7615_dev, mt76); mt76_mmio_init(&dev->mt76, mem_base); + tasklet_init(&dev->irq_tasklet, mt7615_irq_tasklet, (unsigned long)dev); dev->reg_map = map; + dev->ops = ops; mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) | (mt76_rr(dev, MT_HW_REV) & 0xff); dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev); @@ -172,3 +199,31 @@ error: ieee80211_free_hw(mt76_hw(dev)); return ret; } + +static int __init mt7615_init(void) +{ + int ret; + + ret = pci_register_driver(&mt7615_pci_driver); + if (ret) + return ret; + + if (IS_ENABLED(CONFIG_MT7622_WMAC)) { + ret = platform_driver_register(&mt7622_wmac_driver); + if (ret) + pci_unregister_driver(&mt7615_pci_driver); + } + + return ret; +} + +static void __exit mt7615_exit(void) +{ + if (IS_ENABLED(CONFIG_MT7622_WMAC)) + platform_driver_unregister(&mt7622_wmac_driver); + pci_unregister_driver(&mt7615_pci_driver); +} + +module_init(mt7615_init); +module_exit(mt7615_exit); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index 676ca622c35a..d6176d316bee 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -12,12 +12,14 @@ #define MT7615_MAX_INTERFACES 4 #define MT7615_MAX_WMM_SETS 4 +#define MT7663_WTBL_SIZE 32 #define MT7615_WTBL_SIZE 128 -#define MT7615_WTBL_RESERVED (MT7615_WTBL_SIZE - 1) +#define MT7615_WTBL_RESERVED (mt7615_wtbl_size(dev) - 1) #define MT7615_WTBL_STA (MT7615_WTBL_RESERVED - \ MT7615_MAX_INTERFACES) #define MT7615_WATCHDOG_TIME (HZ / 10) +#define MT7615_HW_SCAN_TIMEOUT (HZ / 10) #define MT7615_RESET_TIMEOUT (30 * HZ) #define MT7615_RATE_RETRY 2 @@ -40,8 +42,10 @@ #define MT7615_FIRMWARE_V2 2 #define MT7615_FIRMWARE_V3 3 -#define MT7663_ROM_PATCH "mediatek/mt7663pr2h_v3.bin" -#define MT7663_FIRMWARE_N9 "mediatek/mt7663_n9_v3.bin" +#define MT7663_OFFLOAD_ROM_PATCH "mediatek/mt7663pr2h.bin" +#define MT7663_OFFLOAD_FIRMWARE_N9 "mediatek/mt7663_n9_v3.bin" +#define MT7663_ROM_PATCH "mediatek/mt7663pr2h_rebb.bin" +#define MT7663_FIRMWARE_N9 "mediatek/mt7663_n9_rebb.bin" #define MT7615_EEPROM_SIZE 1024 #define MT7615_TOKEN_SIZE 4096 @@ -57,10 +61,16 @@ #define MT7615_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */ #define MT7615_CFEND_RATE_11B 0x03 /* 11B LP, 11M */ +#define MT7615_SCAN_IE_LEN 600 +#define MT7615_MAX_SCHED_SCAN_INTERVAL 10 +#define MT7615_MAX_SCHED_SCAN_SSID 10 +#define MT7615_MAX_SCAN_MATCH 16 + struct mt7615_vif; struct mt7615_sta; struct mt7615_dfs_pulse; struct mt7615_dfs_pattern; +enum mt7615_cipher_type; enum mt7615_hw_txq_id { MT7615_TXQ_MAIN, @@ -84,6 +94,39 @@ struct mt7615_rate_set { struct ieee80211_tx_rate rates[4]; }; +struct mt7615_rate_desc { + bool rateset; + u16 probe_val; + u16 val[4]; + u8 bw_idx; + u8 bw; +}; + +enum mt7615_wtbl_desc_type { + MT7615_WTBL_RATE_DESC, + MT7615_WTBL_KEY_DESC +}; + +struct mt7615_key_desc { + enum set_key_cmd cmd; + u32 cipher; + s8 keyidx; + u8 keylen; + u8 *key; +}; + +struct mt7615_wtbl_desc { + struct list_head node; + + enum mt7615_wtbl_desc_type type; + struct mt7615_sta *sta; + + union { + struct mt7615_rate_desc rate; + struct mt7615_key_desc key; + }; +}; + struct mt7615_sta { struct mt76_wcid wcid; /* must be first */ @@ -108,15 +151,18 @@ struct mt7615_vif { u8 omac_idx; u8 band_idx; u8 wmm_idx; + u8 scan_seq_num; struct mt7615_sta sta; }; struct mib_stats { - u32 ack_fail_cnt; - u32 fcs_err_cnt; - u32 rts_cnt; - u32 rts_retries_cnt; + u16 ack_fail_cnt; + u16 fcs_err_cnt; + u16 rts_cnt; + u16 rts_retries_cnt; + u16 ba_miss_cnt; + unsigned long aggr_per; }; struct mt7615_phy { @@ -128,6 +174,8 @@ struct mt7615_phy { u16 noise; + bool scs_en; + unsigned long last_cca_adj; int false_cca_ofdm, false_cca_cck; s8 ofdm_sensitivity; @@ -146,13 +194,24 @@ struct mt7615_phy { u32 ampdu_ref; struct mib_stats mib; + + struct delayed_work mac_work; + u8 mac_work_count; + + struct sk_buff_head scan_event_list; + struct delayed_work scan_work; + + struct work_struct roc_work; + struct timer_list roc_timer; + wait_queue_head_t roc_wait; + bool roc_grant; }; #define mt7615_mcu_add_tx_ba(dev, ...) (dev)->mcu_ops->add_tx_ba((dev), __VA_ARGS__) #define mt7615_mcu_add_rx_ba(dev, ...) (dev)->mcu_ops->add_rx_ba((dev), __VA_ARGS__) #define mt7615_mcu_sta_add(dev, ...) (dev)->mcu_ops->sta_add((dev), __VA_ARGS__) #define mt7615_mcu_add_dev_info(dev, ...) (dev)->mcu_ops->add_dev_info((dev), __VA_ARGS__) -#define mt7615_mcu_add_bss_info(dev, ...) (dev)->mcu_ops->add_bss_info((dev), __VA_ARGS__) +#define mt7615_mcu_add_bss_info(phy, ...) (phy->dev)->mcu_ops->add_bss_info((phy), __VA_ARGS__) #define mt7615_mcu_add_beacon(dev, ...) (dev)->mcu_ops->add_beacon_offload((dev), __VA_ARGS__) #define mt7615_mcu_set_pm(dev, ...) (dev)->mcu_ops->set_pm_state((dev), __VA_ARGS__) struct mt7615_mcu_ops { @@ -167,8 +226,8 @@ struct mt7615_mcu_ops { struct ieee80211_sta *sta, bool enable); int (*add_dev_info)(struct mt7615_dev *dev, struct ieee80211_vif *vif, bool enable); - int (*add_bss_info)(struct mt7615_dev *dev, struct ieee80211_vif *vif, - bool enable); + int (*add_bss_info)(struct mt7615_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, bool enable); int (*add_beacon_offload)(struct mt7615_dev *dev, struct ieee80211_hw *hw, struct ieee80211_vif *vif, bool enable); @@ -181,12 +240,15 @@ struct mt7615_dev { struct mt76_phy mphy; }; + struct tasklet_struct irq_tasklet; + struct mt7615_phy phy; u32 vif_mask; u32 omac_mask; u16 chainmask; + struct ieee80211_ops *ops; const struct mt7615_mcu_ops *mcu_ops; struct regmap *infracfg; const u32 *reg_map; @@ -208,14 +270,16 @@ struct mt7615_dev { } radar_pattern; u32 hw_pattern; - u8 mac_work_count; - bool scs_en; bool fw_debug; + bool flash_eeprom; spinlock_t token_lock; struct idr token; u8 fw_ver; + + struct work_struct wtbl_work; + struct list_head wd_head; }; enum { @@ -289,6 +353,7 @@ mt7615_ext_phy(struct mt7615_dev *dev) return phy->priv; } +extern struct ieee80211_rate mt7615_rates[12]; extern const struct ieee80211_ops mt7615_ops; extern const u32 mt7615e_reg_map[__MT_BASE_MAX]; extern const u32 mt7663e_reg_map[__MT_BASE_MAX]; @@ -308,15 +373,19 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base, int irq, const u32 *map); u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr); +void mt7615_check_offload_capability(struct mt7615_dev *dev); void mt7615_init_device(struct mt7615_dev *dev); int mt7615_register_device(struct mt7615_dev *dev); void mt7615_unregister_device(struct mt7615_dev *dev); int mt7615_register_ext_phy(struct mt7615_dev *dev); void mt7615_unregister_ext_phy(struct mt7615_dev *dev); -int mt7615_eeprom_init(struct mt7615_dev *dev); -int mt7615_eeprom_get_power_index(struct mt7615_dev *dev, - struct ieee80211_channel *chan, - u8 chain_idx); +int mt7615_eeprom_init(struct mt7615_dev *dev, u32 addr); +int mt7615_eeprom_get_target_power_index(struct mt7615_dev *dev, + struct ieee80211_channel *chan, + u8 chain_idx); +int mt7615_eeprom_get_power_delta_index(struct mt7615_dev *dev, + enum nl80211_band band); +int mt7615_wait_pdma_busy(struct mt7615_dev *dev); int mt7615_dma_init(struct mt7615_dev *dev); void mt7615_dma_cleanup(struct mt7615_dev *dev); int mt7615_mcu_init(struct mt7615_dev *dev); @@ -345,7 +414,7 @@ static inline bool is_mt7622(struct mt76_dev *dev) static inline bool is_mt7615(struct mt76_dev *dev) { - return mt76_chip(dev) == 0x7615; + return mt76_chip(dev) == 0x7615 || mt76_chip(dev) == 0x7611; } static inline bool is_mt7663(struct mt76_dev *dev) @@ -353,21 +422,46 @@ static inline bool is_mt7663(struct mt76_dev *dev) return mt76_chip(dev) == 0x7663; } +static inline bool is_mt7611(struct mt76_dev *dev) +{ + return mt76_chip(dev) == 0x7611; +} + static inline void mt7615_irq_enable(struct mt7615_dev *dev, u32 mask) { - mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask); + mt76_set_irq_mask(&dev->mt76, 0, 0, mask); + + tasklet_schedule(&dev->irq_tasklet); } -static inline void mt7615_irq_disable(struct mt7615_dev *dev, u32 mask) +static inline bool mt7615_firmware_offload(struct mt7615_dev *dev) { - mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0); + return dev->fw_ver > MT7615_FIRMWARE_V2; } +static inline u16 mt7615_wtbl_size(struct mt7615_dev *dev) +{ + if (is_mt7663(&dev->mt76) && mt7615_firmware_offload(dev)) + return MT7663_WTBL_SIZE; + else + return MT7615_WTBL_SIZE; +} + +void mt7615_dma_reset(struct mt7615_dev *dev); +void mt7615_scan_work(struct work_struct *work); +void mt7615_roc_work(struct work_struct *work); +void mt7615_roc_timer(struct timer_list *timer); +void mt7615_init_txpower(struct mt7615_dev *dev, + struct ieee80211_supported_band *sband); +void mt7615_phy_init(struct mt7615_dev *dev); +void mt7615_mac_init(struct mt7615_dev *dev); + +int mt7615_mcu_restart(struct mt76_dev *dev); void mt7615_update_channel(struct mt76_dev *mdev); bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask); void mt7615_mac_reset_counters(struct mt7615_dev *dev); void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy); -void mt7615_mac_set_scs(struct mt7615_dev *dev, bool enable); +void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable); void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy); void mt7615_mac_sta_poll(struct mt7615_dev *dev); int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, @@ -375,15 +469,27 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, struct ieee80211_sta *sta, int pid, struct ieee80211_key_conf *key, bool beacon); void mt7615_mac_set_timing(struct mt7615_phy *phy); -int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb); -void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data); -void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb); int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, struct mt76_wcid *wcid, struct ieee80211_key_conf *key, enum set_key_cmd cmd); +int mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, + struct mt76_wcid *wcid, + enum mt7615_cipher_type cipher, + int keyidx, enum set_key_cmd cmd); +void mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, + struct mt76_wcid *wcid, + enum mt7615_cipher_type cipher, + enum set_key_cmd cmd); +int mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, + struct mt76_wcid *wcid, + u8 *key, u8 keylen, + enum mt7615_cipher_type cipher, + enum set_key_cmd cmd); void mt7615_mac_reset_work(struct work_struct *work); int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq); +int mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, + int len, bool wait_resp); int mt7615_mcu_set_dbdc(struct mt7615_dev *dev); int mt7615_mcu_set_eeprom(struct mt7615_dev *dev); int mt7615_mcu_set_mac_enable(struct mt7615_dev *dev, int band, bool enable); @@ -392,6 +498,17 @@ int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index); void mt7615_mcu_exit(struct mt7615_dev *dev); void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb, int cmd, int *wait_seq); +int mt7615_mcu_set_channel_domain(struct mt7615_phy *phy); +int mt7615_mcu_hw_scan(struct mt7615_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_scan_request *scan_req); +int mt7615_mcu_cancel_hw_scan(struct mt7615_phy *phy, + struct ieee80211_vif *vif); +int mt7615_mcu_sched_scan_req(struct mt7615_phy *phy, + struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *sreq); +int mt7615_mcu_sched_scan_enable(struct mt7615_phy *phy, + struct ieee80211_vif *vif, + bool enable); int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, enum mt76_txq_id qid, struct mt76_wcid *wcid, @@ -417,8 +534,33 @@ int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev, int mt7615_mcu_set_radar_th(struct mt7615_dev *dev, int index, const struct mt7615_dfs_pattern *pattern); int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable); +int mt7615_mcu_apply_rx_dcoc(struct mt7615_phy *phy); +int mt7615_mcu_apply_tx_dpd(struct mt7615_phy *phy); +int mt7615_mcu_set_vif_ps(struct mt7615_dev *dev, struct ieee80211_vif *vif); int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy); +int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_channel *chan, int duration); +int mt7615_firmware_own(struct mt7615_dev *dev); +int mt7615_driver_own(struct mt7615_dev *dev); + int mt7615_init_debugfs(struct mt7615_dev *dev); +int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq); + +int mt7615_mcu_set_hif_suspend(struct mt7615_dev *dev, bool suspend); +void mt7615_mcu_set_suspend_iter(void *priv, u8 *mac, + struct ieee80211_vif *vif); +int mt7615_mcu_update_gtk_rekey(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *key); + +int __mt7663_load_firmware(struct mt7615_dev *dev); + +/* usb */ +void mt7663u_wtbl_work(struct work_struct *work); +int mt7663u_mcu_init(struct mt7615_dev *dev); +int mt7663u_register_device(struct mt7615_dev *dev); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c index c8d0f893a47f..ba12f199bce0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c @@ -14,6 +14,7 @@ static const struct pci_device_id mt7615_pci_device_table[] = { { PCI_DEVICE(0x14c3, 0x7615) }, { PCI_DEVICE(0x14c3, 0x7663) }, + { PCI_DEVICE(0x14c3, 0x7611) }, { }, }; @@ -33,13 +34,27 @@ static int mt7615_pci_probe(struct pci_dev *pdev, pci_set_master(pdev); + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); + if (ret < 0) + return ret; + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) - return ret; + goto error; + + mt76_pci_disable_aspm(pdev); map = id->device == 0x7663 ? mt7663e_reg_map : mt7615e_reg_map; - return mt7615_mmio_probe(&pdev->dev, pcim_iomap_table(pdev)[0], - pdev->irq, map); + ret = mt7615_mmio_probe(&pdev->dev, pcim_iomap_table(pdev)[0], + pdev->irq, map); + if (ret) + goto error; + + return 0; +error: + pci_free_irq_vectors(pdev); + + return ret; } static void mt7615_pci_remove(struct pci_dev *pdev) @@ -48,18 +63,132 @@ static void mt7615_pci_remove(struct pci_dev *pdev) struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); mt7615_unregister_device(dev); + devm_free_irq(&pdev->dev, pdev->irq, dev); + pci_free_irq_vectors(pdev); +} + +#ifdef CONFIG_PM +static int mt7615_pci_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct mt76_dev *mdev = pci_get_drvdata(pdev); + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + bool hif_suspend; + int i, err; + + hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) && + mt7615_firmware_offload(dev); + if (hif_suspend) { + err = mt7615_mcu_set_hif_suspend(dev, true); + if (err) + return err; + } + + napi_disable(&mdev->tx_napi); + tasklet_kill(&mdev->tx_tasklet); + + mt76_for_each_q_rx(mdev, i) { + napi_disable(&mdev->napi[i]); + } + tasklet_kill(&dev->irq_tasklet); + + mt7615_dma_reset(dev); + + err = mt7615_wait_pdma_busy(dev); + if (err) + goto restore; + + if (is_mt7663(mdev)) { + mt76_set(dev, MT_PDMA_SLP_PROT, MT_PDMA_AXI_SLPPROT_ENABLE); + if (!mt76_poll_msec(dev, MT_PDMA_SLP_PROT, + MT_PDMA_AXI_SLPPROT_RDY, + MT_PDMA_AXI_SLPPROT_RDY, 1000)) { + dev_err(mdev->dev, "PDMA sleep protection failed\n"); + err = -EIO; + goto restore; + } + } + + pci_enable_wake(pdev, pci_choose_state(pdev, state), true); + pci_save_state(pdev); + err = pci_set_power_state(pdev, pci_choose_state(pdev, state)); + if (err) + goto restore; + + err = mt7615_firmware_own(dev); + if (err) + goto restore; + + return 0; + +restore: + mt76_for_each_q_rx(mdev, i) { + napi_enable(&mdev->napi[i]); + } + napi_enable(&mdev->tx_napi); + if (hif_suspend) + mt7615_mcu_set_hif_suspend(dev, false); + + return err; +} + +static int mt7615_pci_resume(struct pci_dev *pdev) +{ + struct mt76_dev *mdev = pci_get_drvdata(pdev); + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + bool pdma_reset; + int i, err; + + err = mt7615_driver_own(dev); + if (err < 0) + return err; + + err = pci_set_power_state(pdev, PCI_D0); + if (err) + return err; + + pci_restore_state(pdev); + + if (is_mt7663(&dev->mt76)) { + mt76_clear(dev, MT_PDMA_SLP_PROT, MT_PDMA_AXI_SLPPROT_ENABLE); + mt76_wr(dev, MT_PCIE_IRQ_ENABLE, 1); + } + + pdma_reset = !mt76_rr(dev, MT_WPDMA_TX_RING0_CTRL0) && + !mt76_rr(dev, MT_WPDMA_TX_RING0_CTRL1); + if (pdma_reset) + dev_err(mdev->dev, "PDMA engine must be reinitialized\n"); + + mt76_for_each_q_rx(mdev, i) { + napi_enable(&mdev->napi[i]); + napi_schedule(&mdev->napi[i]); + } + napi_enable(&mdev->tx_napi); + napi_schedule(&mdev->tx_napi); + + if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) && + mt7615_firmware_offload(dev)) + err = mt7615_mcu_set_hif_suspend(dev, false); + + return err; } +#endif /* CONFIG_PM */ struct pci_driver mt7615_pci_driver = { .name = KBUILD_MODNAME, .id_table = mt7615_pci_device_table, .probe = mt7615_pci_probe, .remove = mt7615_pci_remove, +#ifdef CONFIG_PM + .suspend = mt7615_pci_suspend, + .resume = mt7615_pci_resume, +#endif /* CONFIG_PM */ }; MODULE_DEVICE_TABLE(pci, mt7615_pci_device_table); MODULE_FIRMWARE(MT7615_FIRMWARE_CR4); MODULE_FIRMWARE(MT7615_FIRMWARE_N9); MODULE_FIRMWARE(MT7615_ROM_PATCH); +MODULE_FIRMWARE(MT7663_OFFLOAD_FIRMWARE_N9); +MODULE_FIRMWARE(MT7663_OFFLOAD_ROM_PATCH); MODULE_FIRMWARE(MT7663_FIRMWARE_N9); MODULE_FIRMWARE(MT7663_ROM_PATCH); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c new file mode 100644 index 000000000000..69cba8609edf --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2019 MediaTek Inc. + * + * Author: Roy Luo <royluo@google.com> + * Ryder Lee <ryder.lee@mediatek.com> + * Felix Fietkau <nbd@nbd.name> + * Lorenzo Bianconi <lorenzo@kernel.org> + */ + +#include <linux/etherdevice.h> +#include "mt7615.h" +#include "mac.h" +#include "eeprom.h" + +static void mt7615_init_work(struct work_struct *work) +{ + struct mt7615_dev *dev = container_of(work, struct mt7615_dev, + mcu_work); + + if (mt7615_mcu_init(dev)) + return; + + mt7615_mcu_set_eeprom(dev); + mt7615_mac_init(dev); + mt7615_phy_init(dev); + mt7615_mcu_del_wtbl_all(dev); + mt7615_check_offload_capability(dev); +} + +static int mt7615_init_hardware(struct mt7615_dev *dev) +{ + u32 addr = mt7615_reg_map(dev, MT_EFUSE_BASE); + int ret, idx; + + mt76_wr(dev, MT_INT_SOURCE_CSR, ~0); + + INIT_WORK(&dev->mcu_work, mt7615_init_work); + spin_lock_init(&dev->token_lock); + idr_init(&dev->token); + + ret = mt7615_eeprom_init(dev, addr); + if (ret < 0) + return ret; + + ret = mt7615_dma_init(dev); + if (ret) + return ret; + + set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state); + + /* Beacon and mgmt frames should occupy wcid 0 */ + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1); + if (idx) + return -ENOSPC; + + dev->mt76.global_wcid.idx = idx; + dev->mt76.global_wcid.hw_key_idx = -1; + rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid); + + return 0; +} + +static void +mt7615_led_set_config(struct led_classdev *led_cdev, + u8 delay_on, u8 delay_off) +{ + struct mt7615_dev *dev; + struct mt76_dev *mt76; + u32 val, addr; + + mt76 = container_of(led_cdev, struct mt76_dev, led_cdev); + dev = container_of(mt76, struct mt7615_dev, mt76); + val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) | + FIELD_PREP(MT_LED_STATUS_OFF, delay_off) | + FIELD_PREP(MT_LED_STATUS_ON, delay_on); + + addr = mt7615_reg_map(dev, MT_LED_STATUS_0(mt76->led_pin)); + mt76_wr(dev, addr, val); + addr = mt7615_reg_map(dev, MT_LED_STATUS_1(mt76->led_pin)); + mt76_wr(dev, addr, val); + + val = MT_LED_CTRL_REPLAY(mt76->led_pin) | + MT_LED_CTRL_KICK(mt76->led_pin); + if (mt76->led_al) + val |= MT_LED_CTRL_POLARITY(mt76->led_pin); + addr = mt7615_reg_map(dev, MT_LED_CTRL); + mt76_wr(dev, addr, val); +} + +static int +mt7615_led_set_blink(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + u8 delta_on, delta_off; + + delta_off = max_t(u8, *delay_off / 10, 1); + delta_on = max_t(u8, *delay_on / 10, 1); + + mt7615_led_set_config(led_cdev, delta_on, delta_off); + + return 0; +} + +static void +mt7615_led_set_brightness(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + if (!brightness) + mt7615_led_set_config(led_cdev, 0, 0xff); + else + mt7615_led_set_config(led_cdev, 0xff, 0); +} + +int mt7615_register_device(struct mt7615_dev *dev) +{ + int ret; + + mt7615_init_device(dev); + + /* init led callbacks */ + if (IS_ENABLED(CONFIG_MT76_LEDS)) { + dev->mt76.led_cdev.brightness_set = mt7615_led_set_brightness; + dev->mt76.led_cdev.blink_set = mt7615_led_set_blink; + } + + ret = mt7622_wmac_init(dev); + if (ret) + return ret; + + ret = mt7615_init_hardware(dev); + if (ret) + return ret; + + ret = mt76_register_device(&dev->mt76, true, mt7615_rates, + ARRAY_SIZE(mt7615_rates)); + if (ret) + return ret; + + ieee80211_queue_work(mt76_hw(dev), &dev->mcu_work); + mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband); + mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband); + + return mt7615_init_debugfs(dev); +} + +void mt7615_unregister_device(struct mt7615_dev *dev) +{ + struct mt76_txwi_cache *txwi; + bool mcu_running; + int id; + + mcu_running = mt7615_wait_for_mcu_init(dev); + + mt7615_unregister_ext_phy(dev); + mt76_unregister_device(&dev->mt76); + if (mcu_running) + mt7615_mcu_exit(dev); + mt7615_dma_cleanup(dev); + + spin_lock_bh(&dev->token_lock); + idr_for_each_entry(&dev->token, txwi, id) { + mt7615_txp_skb_unmap(&dev->mt76, txwi); + if (txwi->skb) + dev_kfree_skb_any(txwi->skb); + mt76_put_txwi(&dev->mt76, txwi); + } + spin_unlock_bh(&dev->token_lock); + idr_destroy(&dev->token); + + tasklet_disable(&dev->irq_tasklet); + + mt76_free_device(&dev->mt76); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c new file mode 100644 index 000000000000..7ec91c0856f5 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 MediaTek Inc. + * + * Author: Ryder Lee <ryder.lee@mediatek.com> + * Roy Luo <royluo@google.com> + * Felix Fietkau <nbd@nbd.name> + * Lorenzo Bianconi <lorenzo@kernel.org> + */ + +#include <linux/etherdevice.h> +#include <linux/timekeeping.h> + +#include "mt7615.h" +#include "../dma.h" +#include "mac.h" + +void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, + struct mt76_queue_entry *e) +{ + if (!e->txwi) { + dev_kfree_skb_any(e->skb); + return; + } + + /* error path */ + if (e->skb == DMA_DUMMY_DATA) { + struct mt76_txwi_cache *t; + struct mt7615_dev *dev; + struct mt7615_txp_common *txp; + u16 token; + + dev = container_of(mdev, struct mt7615_dev, mt76); + txp = mt7615_txwi_to_txp(mdev, e->txwi); + + if (is_mt7615(&dev->mt76)) + token = le16_to_cpu(txp->fw.token); + else + token = le16_to_cpu(txp->hw.msdu_id[0]) & + ~MT_MSDU_ID_VALID; + + spin_lock_bh(&dev->token_lock); + t = idr_remove(&dev->token, token); + spin_unlock_bh(&dev->token_lock); + e->skb = t ? t->skb : NULL; + } + + if (e->skb) + mt76_tx_complete_skb(mdev, e->skb); +} + +static void +mt7615_write_hw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info, + void *txp_ptr, u32 id) +{ + struct mt7615_hw_txp *txp = txp_ptr; + struct mt7615_txp_ptr *ptr = &txp->ptr[0]; + int i, nbuf = tx_info->nbuf - 1; + u32 last_mask; + + tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp); + tx_info->nbuf = 1; + + txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID); + + if (is_mt7663(&dev->mt76)) + last_mask = MT_TXD_LEN_LAST; + else + last_mask = MT_TXD_LEN_AMSDU_LAST | + MT_TXD_LEN_MSDU_LAST; + + for (i = 0; i < nbuf; i++) { + u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK; + u32 addr = tx_info->buf[i + 1].addr; + + if (i == nbuf - 1) + len |= last_mask; + + if (i & 1) { + ptr->buf1 = cpu_to_le32(addr); + ptr->len1 = cpu_to_le16(len); + ptr++; + } else { + ptr->buf0 = cpu_to_le32(addr); + ptr->len0 = cpu_to_le16(len); + } + } +} + +static void +mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info, + void *txp_ptr, u32 id) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); + struct ieee80211_key_conf *key = info->control.hw_key; + struct ieee80211_vif *vif = info->control.vif; + struct mt7615_fw_txp *txp = txp_ptr; + int nbuf = tx_info->nbuf - 1; + int i; + + for (i = 0; i < nbuf; i++) { + txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr); + txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len); + } + txp->nbuf = nbuf; + + /* pass partial skb header to fw */ + tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp); + tx_info->buf[1].len = MT_CT_PARSE_LEN; + tx_info->nbuf = MT_CT_DMA_BUF_NUM; + + txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD); + + if (!key) + txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME); + + if (ieee80211_is_mgmt(hdr->frame_control)) + txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME); + + if (vif) { + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + + txp->bss_idx = mvif->idx; + } + + txp->token = cpu_to_le16(id); + txp->rept_wds_wcid = 0xff; +} + +int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + enum mt76_txq_id qid, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, + struct mt76_tx_info *tx_info) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); + struct ieee80211_key_conf *key = info->control.hw_key; + int pid, id; + u8 *txwi = (u8 *)txwi_ptr; + struct mt76_txwi_cache *t; + void *txp; + + if (!wcid) + wcid = &dev->mt76.global_wcid; + + pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); + + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) { + struct mt7615_phy *phy = &dev->phy; + + if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && mdev->phy2) + phy = mdev->phy2->priv; + + spin_lock_bh(&dev->mt76.lock); + mt7615_mac_set_rates(phy, msta, &info->control.rates[0], + msta->rates); + msta->rate_probe = true; + spin_unlock_bh(&dev->mt76.lock); + } + + t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); + t->skb = tx_info->skb; + + spin_lock_bh(&dev->token_lock); + id = idr_alloc(&dev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC); + spin_unlock_bh(&dev->token_lock); + if (id < 0) + return id; + + mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta, + pid, key, false); + + txp = txwi + MT_TXD_SIZE; + memset(txp, 0, sizeof(struct mt7615_txp_common)); + if (is_mt7615(&dev->mt76)) + mt7615_write_fw_txp(dev, tx_info, txp, id); + else + mt7615_write_hw_txp(dev, tx_info, txp, id); + + tx_info->skb = DMA_DUMMY_DATA; + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h index 1e0d95b917e1..aee433a9eff6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h @@ -12,12 +12,15 @@ enum mt7615_reg_base { MT_ARB_BASE, MT_HIF_BASE, MT_CSR_BASE, + MT_PLE_BASE, + MT_PSE_BASE, MT_PHY_BASE, MT_CFG_BASE, MT_AGG_BASE, MT_TMAC_BASE, MT_RMAC_BASE, MT_DMA_BASE, + MT_PF_BASE, MT_WTBL_BASE_ON, MT_WTBL_BASE_OFF, MT_LPON_BASE, @@ -43,6 +46,7 @@ enum mt7615_reg_base { #define MT_TOP_MISC2_FW_STATE GENMASK(2, 0) #define MT7663_TOP_MISC2_FW_STATE GENMASK(3, 1) +#define MT_TOP_MISC2_FW_PWR_ON BIT(1) #define MT_MCU_BASE 0x2000 #define MT_MCU(ofs) (MT_MCU_BASE + (ofs)) @@ -58,6 +62,19 @@ enum mt7615_reg_base { #define MT_PCIE_REMAP_BASE_2 ((dev)->reg_map[MT_PCIE_REMAP_BASE2]) #define MT_HIF(ofs) ((dev)->reg_map[MT_HIF_BASE] + (ofs)) +#define MT_HIF_RST MT_HIF(0x100) +#define MT_HIF_LOGIC_RST_N BIT(4) + +#define MT_PDMA_SLP_PROT MT_HIF(0x154) +#define MT_PDMA_AXI_SLPPROT_ENABLE BIT(0) +#define MT_PDMA_AXI_SLPPROT_RDY BIT(16) + +#define MT_PDMA_BUSY_STATUS MT_HIF(0x168) +#define MT_PDMA_TX_IDX_BUSY BIT(2) +#define MT_PDMA_BUSY_IDX BIT(31) + +#define MT_WPDMA_TX_RING0_CTRL0 MT_HIF(0x300) +#define MT_WPDMA_TX_RING0_CTRL1 MT_HIF(0x304) #define MT7663_MCU_PCIE_REMAP_2_OFFSET GENMASK(15, 0) #define MT7663_MCU_PCIE_REMAP_2_BASE GENMASK(31, 16) @@ -65,6 +82,7 @@ enum mt7615_reg_base { #define MT_HIF2_BASE 0xf0000 #define MT_HIF2(ofs) (MT_HIF2_BASE + (ofs)) #define MT_PCIE_IRQ_ENABLE MT_HIF2(0x188) +#define MT_PCIE_DOORBELL_PUSH MT_HIF2(0x1484) #define MT_CFG_LPCR_HOST MT_HIF(0x1f0) #define MT_CFG_LPCR_HOST_FW_OWN BIT(0) @@ -133,8 +151,7 @@ enum mt7615_reg_base { #define MT_CSR(ofs) ((dev)->reg_map[MT_CSR_BASE] + (ofs)) #define MT_CONN_HIF_ON_LPCTL MT_CSR(0x000) -#define MT_PLE_BASE 0x8000 -#define MT_PLE(ofs) (MT_PLE_BASE + (ofs)) +#define MT_PLE(ofs) ((dev)->reg_map[MT_PLE_BASE] + (ofs)) #define MT_PLE_FL_Q0_CTRL MT_PLE(0x1b0) #define MT_PLE_FL_Q1_CTRL MT_PLE(0x1b4) @@ -144,6 +161,14 @@ enum mt7615_reg_base { #define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x300 + 0x10 * (ac) + \ ((n) << 2)) +#define MT_PSE(ofs) ((dev)->reg_map[MT_PSE_BASE] + (ofs)) +#define MT_PSE_QUEUE_EMPTY MT_PSE(0x0b4) +#define MT_HIF_0_EMPTY_MASK BIT(16) +#define MT_HIF_1_EMPTY_MASK BIT(17) +#define MT_HIF_ALL_EMPTY_MASK GENMASK(17, 16) +#define MT_PSE_PG_INFO MT_PSE(0x194) +#define MT_PSE_SRC_CNT GENMASK(27, 16) + #define MT_WF_PHY_BASE ((dev)->reg_map[MT_PHY_BASE]) #define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs)) @@ -151,29 +176,40 @@ enum mt7615_reg_base { #define MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN BIT(9) #define MT_WF_PHY_R0_PHYMUX_5(_phy) MT_WF_PHY(0x0614 + ((_phy) << 9)) +#define MT7663_WF_PHY_R0_PHYMUX_5 MT_WF_PHY(0x0414) #define MT_WF_PHY_R0_PHYCTRL_STS0(_phy) MT_WF_PHY(0x020c + ((_phy) << 9)) #define MT_WF_PHYCTRL_STAT_PD_OFDM GENMASK(31, 16) #define MT_WF_PHYCTRL_STAT_PD_CCK GENMASK(15, 0) +#define MT7663_WF_PHY_R0_PHYCTRL_STS0(_phy) MT_WF_PHY(0x0210 + ((_phy) << 12)) + #define MT_WF_PHY_R0_PHYCTRL_STS5(_phy) MT_WF_PHY(0x0220 + ((_phy) << 9)) #define MT_WF_PHYCTRL_STAT_MDRDY_OFDM GENMASK(31, 16) #define MT_WF_PHYCTRL_STAT_MDRDY_CCK GENMASK(15, 0) +#define MT7663_WF_PHY_R0_PHYCTRL_STS5(_phy) MT_WF_PHY(0x0224 + ((_phy) << 12)) + #define MT_WF_PHY_MIN_PRI_PWR(_phy) MT_WF_PHY((_phy) ? 0x084 : 0x229c) #define MT_WF_PHY_PD_OFDM_MASK(_phy) ((_phy) ? GENMASK(24, 16) : \ GENMASK(28, 20)) #define MT_WF_PHY_PD_OFDM(_phy, v) ((v) << ((_phy) ? 16 : 20)) #define MT_WF_PHY_PD_BLK(_phy) ((_phy) ? BIT(25) : BIT(19)) +#define MT7663_WF_PHY_MIN_PRI_PWR(_phy) MT_WF_PHY((_phy) ? 0x2aec : 0x22f0) + #define MT_WF_PHY_RXTD_BASE MT_WF_PHY(0x2200) #define MT_WF_PHY_RXTD(_n) (MT_WF_PHY_RXTD_BASE + ((_n) << 2)) +#define MT7663_WF_PHY_RXTD(_n) (MT_WF_PHY(0x25b0) + ((_n) << 2)) + #define MT_WF_PHY_RXTD_CCK_PD(_phy) MT_WF_PHY((_phy) ? 0x2314 : 0x2310) #define MT_WF_PHY_PD_CCK_MASK(_phy) (_phy) ? GENMASK(31, 24) : \ GENMASK(8, 1) #define MT_WF_PHY_PD_CCK(_phy, v) ((v) << ((_phy) ? 24 : 1)) +#define MT7663_WF_PHY_RXTD_CCK_PD(_phy) MT_WF_PHY((_phy) ? 0x2350 : 0x234c) + #define MT_WF_PHY_RXTD2_BASE MT_WF_PHY(0x2a00) #define MT_WF_PHY_RXTD2(_n) (MT_WF_PHY_RXTD2_BASE + ((_n) << 2)) @@ -306,10 +342,17 @@ enum mt7615_reg_base { #define MT_DMA_RCFR0_MCU_RX_MGMT BIT(2) #define MT_DMA_RCFR0_MCU_RX_CTL_NON_BAR BIT(3) #define MT_DMA_RCFR0_MCU_RX_CTL_BAR BIT(4) +#define MT_DMA_RCFR0_MCU_RX_TDLS BIT(19) #define MT_DMA_RCFR0_MCU_RX_BYPASS BIT(21) #define MT_DMA_RCFR0_RX_DROPPED_UCAST GENMASK(25, 24) #define MT_DMA_RCFR0_RX_DROPPED_MCAST GENMASK(27, 26) +#define MT_WF_PF_BASE ((dev)->reg_map[MT_PF_BASE]) +#define MT_WF_PF(ofs) (MT_WF_PF_BASE + (ofs)) + +#define MT_WF_PFCR MT_WF_PF(0x000) +#define MT_WF_PFCR_TDLS_EN BIT(9) + #define MT_WTBL_BASE(dev) ((dev)->reg_map[MT_WTBL_BASE_ADDR]) #define MT_WTBL_ENTRY_SIZE 256 @@ -379,34 +422,44 @@ enum mt7615_reg_base { #define MT_LPON_UTTR1 MT_LPON(0x01c) #define MT_WF_MIB_BASE (dev->reg_map[MT_MIB_BASE]) -#define MT_WF_MIB(ofs) (MT_WF_MIB_BASE + (ofs)) +#define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE + (ofs) + (_band) * 0x200) -#define MT_MIB_M0_MISC_CR MT_WF_MIB(0x00c) +#define MT_WF_MIB_SCR0 MT_WF_MIB(0, 0) +#define MT_MIB_SCR0_AGG_CNT_RANGE_EN BIT(21) -#define MT_MIB_SDR3(n) MT_WF_MIB(0x014 + ((n) << 9)) +#define MT_MIB_M0_MISC_CR(_band) MT_WF_MIB(_band, 0x00c) + +#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x014) #define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(15, 0) -#define MT_MIB_SDR9(n) MT_WF_MIB(0x02c + ((n) << 9)) +#define MT_MIB_SDR9(_band) MT_WF_MIB(_band, 0x02c) #define MT_MIB_SDR9_BUSY_MASK GENMASK(23, 0) -#define MT_MIB_SDR16(n) MT_WF_MIB(0x048 + ((n) << 9)) +#define MT_MIB_SDR14(_band) MT_WF_MIB(_band, 0x040) +#define MT_MIB_AMPDU_MPDU_COUNT GENMASK(23, 0) + +#define MT_MIB_SDR15(_band) MT_WF_MIB(_band, 0x044) +#define MT_MIB_AMPDU_ACK_COUNT GENMASK(23, 0) + +#define MT_MIB_SDR16(_band) MT_WF_MIB(_band, 0x048) #define MT_MIB_SDR16_BUSY_MASK GENMASK(23, 0) -#define MT_MIB_SDR36(n) MT_WF_MIB(0x098 + ((n) << 9)) +#define MT_MIB_SDR36(_band) MT_WF_MIB(_band, 0x098) #define MT_MIB_SDR36_TXTIME_MASK GENMASK(23, 0) -#define MT_MIB_SDR37(n) MT_WF_MIB(0x09c + ((n) << 9)) +#define MT_MIB_SDR37(_band) MT_WF_MIB(_band, 0x09c) #define MT_MIB_SDR37_RXTIME_MASK GENMASK(23, 0) -#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(0x100 + ((_band) << 9) + \ - ((n) << 4)) +#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, 0x100 + ((n) << 4)) #define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16) #define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0) -#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(0x104 + ((_band) << 9) + \ - ((n) << 4)) +#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(_band, 0x104 + ((n) << 4)) +#define MT_MIB_BA_MISS_COUNT_MASK GENMASK(15, 0) #define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(31, 16) -#define MT_TX_AGG_CNT(n) MT_WF_MIB(0xa8 + ((n) << 2)) +#define MT_MIB_ARNG(n) MT_WF_MIB(0, 0x4b8 + ((n) << 2)) + +#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0xa8 + ((n) << 2)) #define MT_DMA_SHDL(ofs) (dev->reg_map[MT_DMA_SHDL_BASE] + (ofs)) @@ -449,6 +502,10 @@ enum mt7615_reg_base { #define MT_LED_STATUS_ON GENMASK(23, 16) #define MT_LED_STATUS_DURATION GENMASK(15, 0) +#define MT_PDMA_BUSY 0x82000504 +#define MT_PDMA_TX_BUSY BIT(0) +#define MT_PDMA_RX_BUSY BIT(1) + #define MT_EFUSE_BASE ((dev)->reg_map[MT_EFUSE_ADDR_BASE]) #define MT_EFUSE_BASE_CTRL 0x000 #define MT_EFUSE_BASE_CTRL_EMPTY BIT(30) @@ -470,4 +527,27 @@ enum mt7615_reg_base { #define MT_INFRACFG_MISC 0x700 #define MT_INFRACFG_MISC_AP2CONN_WAKE BIT(1) +#define MT_UMAC_BASE 0x7c000000 +#define MT_UMAC(ofs) (MT_UMAC_BASE + (ofs)) +#define MT_UDMA_TX_QSEL MT_UMAC(0x008) +#define MT_FW_DL_EN BIT(3) + +#define MT_UDMA_WLCFG_1 MT_UMAC(0x00c) +#define MT_WL_RX_AGG_PKT_LMT GENMASK(7, 0) +#define MT_WL_TX_TMOUT_LMT GENMASK(27, 8) + +#define MT_UDMA_WLCFG_0 MT_UMAC(0x18) +#define MT_WL_RX_AGG_TO GENMASK(7, 0) +#define MT_WL_RX_AGG_LMT GENMASK(15, 8) +#define MT_WL_TX_TMOUT_FUNC_EN BIT(16) +#define MT_WL_TX_DPH_CHK_EN BIT(17) +#define MT_WL_RX_MPSZ_PAD0 BIT(18) +#define MT_WL_RX_FLUSH BIT(19) +#define MT_TICK_1US_EN BIT(20) +#define MT_WL_RX_AGG_EN BIT(21) +#define MT_WL_RX_EN BIT(22) +#define MT_WL_TX_EN BIT(23) +#define MT_WL_RX_BUSY BIT(30) +#define MT_WL_TX_BUSY BIT(31) + #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/soc.c b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c index 43aa49706c66..9aa5183c7a56 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/soc.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c @@ -36,10 +36,8 @@ static int mt7622_wmac_probe(struct platform_device *pdev) int irq; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "Failed to get device IRQ\n"); + if (irq < 0) return irq; - } mem_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(mem_base)) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c new file mode 100644 index 000000000000..a50077eb24d7 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c @@ -0,0 +1,447 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2019 MediaTek Inc. + * + * Author: Felix Fietkau <nbd@nbd.name> + * Lorenzo Bianconi <lorenzo@kernel.org> + * Sean Wang <sean.wang@mediatek.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/usb.h> + +#include "mt7615.h" +#include "mac.h" +#include "mcu.h" +#include "regs.h" + +static const u32 mt7663u_reg_map[] = { + [MT_TOP_CFG_BASE] = 0x80020000, + [MT_HW_BASE] = 0x80000000, + [MT_DMA_SHDL_BASE] = 0x5000a000, + [MT_HIF_BASE] = 0x50000000, + [MT_CSR_BASE] = 0x40000000, + [MT_EFUSE_ADDR_BASE] = 0x78011000, + [MT_TOP_MISC_BASE] = 0x81020000, + [MT_PLE_BASE] = 0x82060000, + [MT_PSE_BASE] = 0x82068000, + [MT_PHY_BASE] = 0x82070000, + [MT_WTBL_BASE_ADDR] = 0x820e0000, + [MT_CFG_BASE] = 0x820f0000, + [MT_AGG_BASE] = 0x820f2000, + [MT_ARB_BASE] = 0x820f3000, + [MT_TMAC_BASE] = 0x820f4000, + [MT_RMAC_BASE] = 0x820f5000, + [MT_DMA_BASE] = 0x820f7000, + [MT_PF_BASE] = 0x820f8000, + [MT_WTBL_BASE_ON] = 0x820f9000, + [MT_WTBL_BASE_OFF] = 0x820f9800, + [MT_LPON_BASE] = 0x820fb000, + [MT_MIB_BASE] = 0x820fd000, +}; + +static const struct usb_device_id mt7615_device_table[] = { + { USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7663, 0xff, 0xff, 0xff) }, + { }, +}; + +static void mt7663u_stop(struct ieee80211_hw *hw) +{ + struct mt7615_phy *phy = mt7615_hw_phy(hw); + struct mt7615_dev *dev = hw->priv; + + clear_bit(MT76_STATE_RUNNING, &dev->mphy.state); + del_timer_sync(&phy->roc_timer); + cancel_work_sync(&phy->roc_work); + cancel_delayed_work_sync(&phy->scan_work); + cancel_delayed_work_sync(&phy->mac_work); + mt76u_stop_tx(&dev->mt76); +} + +static void mt7663u_cleanup(struct mt7615_dev *dev) +{ + clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state); + mt76u_queues_deinit(&dev->mt76); +} + +static void +mt7663u_mac_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid, + enum mt76_txq_id qid, struct ieee80211_sta *sta, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_key_conf *key = info->control.hw_key; + __le32 *txwi; + int pid; + + if (!wcid) + wcid = &dev->mt76.global_wcid; + + pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb); + + txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE); + memset(txwi, 0, MT_USB_TXD_SIZE); + mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, false); + skb_push(skb, MT_USB_TXD_SIZE); +} + +static int +__mt7663u_mac_set_rates(struct mt7615_dev *dev, + struct mt7615_wtbl_desc *wd) +{ + struct mt7615_rate_desc *rate = &wd->rate; + struct mt7615_sta *sta = wd->sta; + u32 w5, w27, addr, val; + + lockdep_assert_held(&dev->mt76.mutex); + + if (!sta) + return -EINVAL; + + if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) + return -ETIMEDOUT; + + addr = mt7615_mac_wtbl_addr(dev, sta->wcid.idx); + + w27 = mt76_rr(dev, addr + 27 * 4); + w27 &= ~MT_WTBL_W27_CC_BW_SEL; + w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rate->bw); + + w5 = mt76_rr(dev, addr + 5 * 4); + w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE | + MT_WTBL_W5_MPDU_OK_COUNT | + MT_WTBL_W5_MPDU_FAIL_COUNT | + MT_WTBL_W5_RATE_IDX); + w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rate->bw) | + FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE, + rate->bw_idx ? rate->bw_idx - 1 : 7); + + mt76_wr(dev, MT_WTBL_RIUCR0, w5); + + mt76_wr(dev, MT_WTBL_RIUCR1, + FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rate->probe_val) | + FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rate->val[0]) | + FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rate->val[1])); + + mt76_wr(dev, MT_WTBL_RIUCR2, + FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rate->val[1] >> 8) | + FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rate->val[1]) | + FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rate->val[2]) | + FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rate->val[2])); + + mt76_wr(dev, MT_WTBL_RIUCR3, + FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rate->val[2] >> 4) | + FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rate->val[3]) | + FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rate->val[3])); + + mt76_wr(dev, MT_WTBL_UPDATE, + FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, sta->wcid.idx) | + MT_WTBL_UPDATE_RATE_UPDATE | + MT_WTBL_UPDATE_TX_COUNT_CLEAR); + + mt76_wr(dev, addr + 27 * 4, w27); + + mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */ + val = mt76_rr(dev, MT_LPON_UTTR0); + sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset; + + if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET)) + mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); + + sta->rate_count = 2 * MT7615_RATE_RETRY * sta->n_rates; + sta->wcid.tx_info |= MT_WCID_TX_INFO_SET; + + return 0; +} + +static int +__mt7663u_mac_set_key(struct mt7615_dev *dev, + struct mt7615_wtbl_desc *wd) +{ + struct mt7615_key_desc *key = &wd->key; + struct mt7615_sta *sta = wd->sta; + enum mt7615_cipher_type cipher; + struct mt76_wcid *wcid; + int err; + + lockdep_assert_held(&dev->mt76.mutex); + + if (!sta) + return -EINVAL; + + cipher = mt7615_mac_get_cipher(key->cipher); + if (cipher == MT_CIPHER_NONE) + return -EOPNOTSUPP; + + wcid = &wd->sta->wcid; + + mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, key->cmd); + err = mt7615_mac_wtbl_update_key(dev, wcid, key->key, key->keylen, + cipher, key->cmd); + if (err < 0) + return err; + + err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx, + key->cmd); + if (err < 0) + return err; + + if (key->cmd == SET_KEY) + wcid->cipher |= BIT(cipher); + else + wcid->cipher &= ~BIT(cipher); + + return 0; +} + +void mt7663u_wtbl_work(struct work_struct *work) +{ + struct mt7615_wtbl_desc *wd, *wd_next; + struct mt7615_dev *dev; + + dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, + wtbl_work); + + list_for_each_entry_safe(wd, wd_next, &dev->wd_head, node) { + spin_lock_bh(&dev->mt76.lock); + list_del(&wd->node); + spin_unlock_bh(&dev->mt76.lock); + + mutex_lock(&dev->mt76.mutex); + switch (wd->type) { + case MT7615_WTBL_RATE_DESC: + __mt7663u_mac_set_rates(dev, wd); + break; + case MT7615_WTBL_KEY_DESC: + __mt7663u_mac_set_key(dev, wd); + break; + } + mutex_unlock(&dev->mt76.mutex); + + kfree(wd); + } +} + +static void +mt7663u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, + struct mt76_queue_entry *e) +{ + skb_pull(e->skb, MT_USB_HDR_SIZE + MT_USB_TXD_SIZE); + mt76_tx_complete_skb(mdev, e->skb); +} + +static int +mt7663u_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + enum mt76_txq_id qid, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, + struct mt76_tx_info *tx_info) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); + + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) { + struct mt7615_sta *msta; + + msta = container_of(wcid, struct mt7615_sta, wcid); + spin_lock_bh(&dev->mt76.lock); + mt7615_mac_set_rates(&dev->phy, msta, &info->control.rates[0], + msta->rates); + msta->rate_probe = true; + spin_unlock_bh(&dev->mt76.lock); + } + mt7663u_mac_write_txwi(dev, wcid, qid, sta, tx_info->skb); + + return mt76u_skb_dma_info(tx_info->skb, tx_info->skb->len); +} + +static bool mt7663u_tx_status_data(struct mt76_dev *mdev, u8 *update) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + + mutex_lock(&dev->mt76.mutex); + mt7615_mac_sta_poll(dev); + mutex_unlock(&dev->mt76.mutex); + + return 0; +} + +static int mt7663u_probe(struct usb_interface *usb_intf, + const struct usb_device_id *id) +{ + static const struct mt76_driver_ops drv_ops = { + .txwi_size = MT_USB_TXD_SIZE, + .drv_flags = MT_DRV_RX_DMA_HDR, + .tx_prepare_skb = mt7663u_tx_prepare_skb, + .tx_complete_skb = mt7663u_tx_complete_skb, + .tx_status_data = mt7663u_tx_status_data, + .rx_skb = mt7615_queue_rx_skb, + .sta_ps = mt7615_sta_ps, + .sta_add = mt7615_mac_sta_add, + .sta_remove = mt7615_mac_sta_remove, + .update_survey = mt7615_update_channel, + }; + struct usb_device *udev = interface_to_usbdev(usb_intf); + struct ieee80211_ops *ops; + struct mt7615_dev *dev; + struct mt76_dev *mdev; + int ret; + + ops = devm_kmemdup(&usb_intf->dev, &mt7615_ops, sizeof(mt7615_ops), + GFP_KERNEL); + if (!ops) + return -ENOMEM; + + ops->stop = mt7663u_stop; + + mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), ops, &drv_ops); + if (!mdev) + return -ENOMEM; + + dev = container_of(mdev, struct mt7615_dev, mt76); + udev = usb_get_dev(udev); + usb_reset_device(udev); + + usb_set_intfdata(usb_intf, dev); + + dev->reg_map = mt7663u_reg_map; + dev->ops = ops; + ret = mt76u_init(mdev, usb_intf, true); + if (ret < 0) + goto error; + + mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) | + (mt76_rr(dev, MT_HW_REV) & 0xff); + dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev); + + if (mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON, + FW_STATE_PWR_ON << 1, 500)) { + dev_dbg(dev->mt76.dev, "Usb device already powered on\n"); + set_bit(MT76_STATE_POWER_OFF, &dev->mphy.state); + goto alloc_queues; + } + + ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON, + USB_DIR_OUT | USB_TYPE_VENDOR, + 0x0, 0x1, NULL, 0); + if (ret) + goto error; + + if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON, + FW_STATE_PWR_ON << 1, 500)) { + dev_err(dev->mt76.dev, "Timeout for power on\n"); + return -EIO; + } + +alloc_queues: + ret = mt76u_alloc_mcu_queue(&dev->mt76); + if (ret) + goto error; + + ret = mt76u_alloc_queues(&dev->mt76); + if (ret) + goto error; + + ret = mt7663u_register_device(dev); + if (ret) + goto error_freeq; + + return 0; + +error_freeq: + mt76u_queues_deinit(&dev->mt76); +error: + mt76u_deinit(&dev->mt76); + usb_set_intfdata(usb_intf, NULL); + usb_put_dev(interface_to_usbdev(usb_intf)); + + ieee80211_free_hw(mdev->hw); + + return ret; +} + +static void mt7663u_disconnect(struct usb_interface *usb_intf) +{ + struct mt7615_dev *dev = usb_get_intfdata(usb_intf); + + if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state)) + return; + + ieee80211_unregister_hw(dev->mt76.hw); + mt7663u_cleanup(dev); + + usb_set_intfdata(usb_intf, NULL); + usb_put_dev(interface_to_usbdev(usb_intf)); + + mt76u_deinit(&dev->mt76); + ieee80211_free_hw(dev->mt76.hw); +} + +#ifdef CONFIG_PM +static int mt7663u_suspend(struct usb_interface *intf, pm_message_t state) +{ + struct mt7615_dev *dev = usb_get_intfdata(intf); + + if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) && + mt7615_firmware_offload(dev)) { + int err; + + err = mt7615_mcu_set_hif_suspend(dev, true); + if (err < 0) + return err; + } + + mt76u_stop_rx(&dev->mt76); + + mt76u_stop_tx(&dev->mt76); + tasklet_kill(&dev->mt76.tx_tasklet); + + return 0; +} + +static int mt7663u_resume(struct usb_interface *intf) +{ + struct mt7615_dev *dev = usb_get_intfdata(intf); + int err; + + err = mt76u_vendor_request(&dev->mt76, MT_VEND_FEATURE_SET, + USB_DIR_OUT | USB_TYPE_VENDOR, + 0x5, 0x0, NULL, 0); + if (err) + return err; + + err = mt76u_resume_rx(&dev->mt76); + if (err < 0) + return err; + + if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) && + mt7615_firmware_offload(dev)) + err = mt7615_mcu_set_hif_suspend(dev, false); + + return err; +} +#endif /* CONFIG_PM */ + +MODULE_DEVICE_TABLE(usb, mt7615_device_table); +MODULE_FIRMWARE(MT7663_OFFLOAD_FIRMWARE_N9); +MODULE_FIRMWARE(MT7663_OFFLOAD_ROM_PATCH); +MODULE_FIRMWARE(MT7663_FIRMWARE_N9); +MODULE_FIRMWARE(MT7663_ROM_PATCH); + +static struct usb_driver mt7663u_driver = { + .name = KBUILD_MODNAME, + .id_table = mt7615_device_table, + .probe = mt7663u_probe, + .disconnect = mt7663u_disconnect, +#ifdef CONFIG_PM + .suspend = mt7663u_suspend, + .resume = mt7663u_resume, + .reset_resume = mt7663u_resume, +#endif /* CONFIG_PM */ + .soft_unbind = 1, + .disable_hub_initiated_lpm = 1, +}; +module_usb_driver(mt7663u_driver); + +MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); +MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_init.c new file mode 100644 index 000000000000..1fbc9601391d --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_init.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2019 MediaTek Inc. + * + * Author: Felix Fietkau <nbd@nbd.name> + * Lorenzo Bianconi <lorenzo@kernel.org> + * Sean Wang <sean.wang@mediatek.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> + +#include "mt7615.h" +#include "mac.h" +#include "regs.h" + +static int mt7663u_dma_sched_init(struct mt7615_dev *dev) +{ + int i; + + mt76_rmw(dev, MT_DMA_SHDL(MT_DMASHDL_PKT_MAX_SIZE), + MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE, + FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) | + FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 8)); + + /* disable refill group 5 - group 15 and raise group 2 + * and 3 as high priority. + */ + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_REFILL), 0xffe00006); + mt76_clear(dev, MT_DMA_SHDL(MT_DMASHDL_PAGE), BIT(16)); + + for (i = 0; i < 5; i++) + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(i)), + FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x3) | + FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x1ff)); + + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(0)), 0x42104210); + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(1)), 0x42104210); + + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(2)), 0x4444); + + /* group pririority from high to low: + * 15 (cmd groups) > 4 > 3 > 2 > 1 > 0. + */ + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET0), 0x6501234f); + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET1), 0xedcba987); + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_OPTIONAL), 0x7004801c); + + mt76_wr(dev, MT_UDMA_WLCFG_1, + FIELD_PREP(MT_WL_TX_TMOUT_LMT, 80000) | + FIELD_PREP(MT_WL_RX_AGG_PKT_LMT, 1)); + + /* setup UDMA Rx Flush */ + mt76_clear(dev, MT_UDMA_WLCFG_0, MT_WL_RX_FLUSH); + /* hif reset */ + mt76_set(dev, MT_HIF_RST, MT_HIF_LOGIC_RST_N); + + mt76_set(dev, MT_UDMA_WLCFG_0, + MT_WL_RX_AGG_EN | MT_WL_RX_EN | MT_WL_TX_EN | + MT_WL_RX_MPSZ_PAD0 | MT_TICK_1US_EN | + MT_WL_TX_TMOUT_FUNC_EN); + mt76_rmw(dev, MT_UDMA_WLCFG_0, MT_WL_RX_AGG_LMT | MT_WL_RX_AGG_TO, + FIELD_PREP(MT_WL_RX_AGG_LMT, 32) | + FIELD_PREP(MT_WL_RX_AGG_TO, 100)); + + return 0; +} + +static int mt7663u_init_hardware(struct mt7615_dev *dev) +{ + int ret, idx; + + ret = mt7615_eeprom_init(dev, MT_EFUSE_BASE); + if (ret < 0) + return ret; + + ret = mt7663u_dma_sched_init(dev); + if (ret) + return ret; + + set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state); + + /* Beacon and mgmt frames should occupy wcid 0 */ + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1); + if (idx) + return -ENOSPC; + + dev->mt76.global_wcid.idx = idx; + dev->mt76.global_wcid.hw_key_idx = -1; + rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid); + + return 0; +} + +static void mt7663u_init_work(struct work_struct *work) +{ + struct mt7615_dev *dev; + + dev = container_of(work, struct mt7615_dev, mcu_work); + if (mt7663u_mcu_init(dev)) + return; + + mt7615_mcu_set_eeprom(dev); + mt7615_mac_init(dev); + mt7615_phy_init(dev); + mt7615_mcu_del_wtbl_all(dev); + mt7615_check_offload_capability(dev); +} + +int mt7663u_register_device(struct mt7615_dev *dev) +{ + struct ieee80211_hw *hw = mt76_hw(dev); + int err; + + INIT_WORK(&dev->wtbl_work, mt7663u_wtbl_work); + INIT_WORK(&dev->mcu_work, mt7663u_init_work); + INIT_LIST_HEAD(&dev->wd_head); + mt7615_init_device(dev); + + err = mt7663u_init_hardware(dev); + if (err) + return err; + + hw->extra_tx_headroom += MT_USB_HDR_SIZE + MT_USB_TXD_SIZE; + /* check hw sg support in order to enable AMSDU */ + hw->max_tx_fragments = dev->mt76.usb.sg_en ? MT_HW_TXP_MAX_BUF_NUM : 1; + + err = mt76_register_device(&dev->mt76, true, mt7615_rates, + ARRAY_SIZE(mt7615_rates)); + if (err < 0) + return err; + + if (!dev->mt76.usb.sg_en) { + struct ieee80211_sta_vht_cap *vht_cap; + + /* decrease max A-MSDU size if SG is not supported */ + vht_cap = &dev->mphy.sband_5g.sband.vht_cap; + vht_cap->cap &= ~IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; + } + + ieee80211_queue_work(hw, &dev->mcu_work); + mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband); + mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband); + + return mt7615_init_debugfs(dev); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c new file mode 100644 index 000000000000..cd709fd617db --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2019 MediaTek Inc. + * + * Author: Felix Fietkau <nbd@nbd.name> + * Lorenzo Bianconi <lorenzo@kernel.org> + * Sean Wang <sean.wang@mediatek.com> + */ +#include <linux/kernel.h> +#include <linux/module.h> + +#include "mt7615.h" +#include "mac.h" +#include "mcu.h" +#include "regs.h" + +static int +mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, + int cmd, bool wait_resp) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + int ret, seq, ep; + + mutex_lock(&mdev->mcu.mutex); + + mt7615_mcu_fill_msg(dev, skb, cmd, &seq); + if (cmd != MCU_CMD_FW_SCATTER) + ep = MT_EP_OUT_INBAND_CMD; + else + ep = MT_EP_OUT_AC_BE; + + ret = mt76u_skb_dma_info(skb, skb->len); + if (ret < 0) + goto out; + + ret = mt76u_bulk_msg(&dev->mt76, skb->data, skb->len, NULL, + 1000, ep); + dev_kfree_skb(skb); + if (ret < 0) + goto out; + + if (wait_resp) + ret = mt7615_mcu_wait_response(dev, cmd, seq); + +out: + mutex_unlock(&mdev->mcu.mutex); + + return ret; +} + +int mt7663u_mcu_init(struct mt7615_dev *dev) +{ + static const struct mt76_mcu_ops mt7663u_mcu_ops = { + .headroom = MT_USB_HDR_SIZE + sizeof(struct mt7615_mcu_txd), + .tailroom = MT_USB_TAIL_SIZE, + .mcu_skb_send_msg = mt7663u_mcu_send_message, + .mcu_send_msg = mt7615_mcu_msg_send, + .mcu_restart = mt7615_mcu_restart, + }; + int ret; + + dev->mt76.mcu_ops = &mt7663u_mcu_ops, + + mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN); + + if (test_and_clear_bit(MT76_STATE_POWER_OFF, &dev->mphy.state)) { + mt7615_mcu_restart(&dev->mt76); + if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, + MT_TOP_MISC2_FW_PWR_ON, 0, 500)) + return -EIO; + + ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON, + USB_DIR_OUT | USB_TYPE_VENDOR, + 0x0, 0x1, NULL, 0); + if (ret) + return ret; + + if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, + MT_TOP_MISC2_FW_PWR_ON, + FW_STATE_PWR_ON << 1, 500)) { + dev_err(dev->mt76.dev, "Timeout for power on\n"); + return -EIO; + } + } + + ret = __mt7663_load_firmware(dev); + if (ret) + return ret; + + mt76_clear(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN); + set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c index 57f8d56737eb..dc8bf4c6969a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c @@ -12,24 +12,6 @@ #include "initvals.h" #include "../mt76x02_phy.h" -static void mt76x0_vht_cap_mask(struct ieee80211_supported_band *sband) -{ - struct ieee80211_sta_vht_cap *vht_cap = &sband->vht_cap; - u16 mcs_map = 0; - int i; - - vht_cap->cap &= ~IEEE80211_VHT_CAP_RXLDPC; - for (i = 0; i < 8; i++) { - if (!i) - mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_7 << (i * 2)); - else - mcs_map |= - (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2)); - } - vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); - vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); -} - static void mt76x0_set_wlan_state(struct mt76x02_dev *dev, u32 val, bool enable) { @@ -263,9 +245,11 @@ int mt76x0_register_device(struct mt76x02_dev *dev) return ret; if (dev->mt76.cap.has_5ghz) { - /* overwrite unsupported features */ - mt76x0_vht_cap_mask(&dev->mphy.sband_5g.sband); - mt76x0_init_txpower(dev, &dev->mphy.sband_5g.sband); + struct ieee80211_supported_band *sband; + + sband = &dev->mphy.sband_5g.sband; + sband->vht_cap.cap &= ~IEEE80211_VHT_CAP_RXLDPC; + mt76x0_init_txpower(dev, sband); } if (dev->mt76.cap.has_2ghz) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c index 0b520ae08d01..f7ec3400e368 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c @@ -29,6 +29,7 @@ static void mt76x0e_stop_hw(struct mt76x02_dev *dev) { cancel_delayed_work_sync(&dev->cal_work); cancel_delayed_work_sync(&dev->mt76.mac_work); + clear_bit(MT76_RESTART, &dev->mphy.state); if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY, 0, 1000)) @@ -83,6 +84,7 @@ static const struct ieee80211_ops mt76x0e_ops = { .set_coverage_class = mt76x02_set_coverage_class, .set_rts_threshold = mt76x02_set_rts_threshold, .get_antenna = mt76_get_antenna, + .reconfig_complete = mt76x02_reconfig_complete, }; static int mt76x0e_register_device(struct mt76x02_dev *dev) @@ -216,6 +218,7 @@ mt76x0e_remove(struct pci_dev *pdev) } static const struct pci_device_id mt76x0e_device_table[] = { + { PCI_DEVICE(0x14c3, 0x7610) }, { PCI_DEVICE(0x14c3, 0x7630) }, { PCI_DEVICE(0x14c3, 0x7650) }, { }, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h index 23040c193ca5..4c9bbc7ce023 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h @@ -15,6 +15,7 @@ #include "mt76x02_dfs.h" #include "mt76x02_dma.h" +#define MT76x02_N_WCIDS 128 #define MT_CALIBRATE_INTERVAL HZ #define MT_MAC_WORK_INTERVAL (HZ / 10) @@ -187,6 +188,8 @@ void mt76x02_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps); void mt76x02_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u32 changed); +void mt76x02_reconfig_complete(struct ieee80211_hw *hw, + enum ieee80211_reconfig_type reconfig_type); struct beacon_bc_data { struct mt76x02_dev *dev; @@ -216,6 +219,7 @@ static inline bool is_mt76x0(struct mt76x02_dev *dev) static inline bool is_mt76x2(struct mt76x02_dev *dev) { return mt76_chip(&dev->mt76) == 0x7612 || + mt76_chip(&dev->mt76) == 0x7632 || mt76_chip(&dev->mt76) == 0x7662 || mt76_chip(&dev->mt76) == 0x7602; } @@ -243,7 +247,7 @@ mt76x02_rx_get_sta(struct mt76_dev *dev, u8 idx) { struct mt76_wcid *wcid; - if (idx >= ARRAY_SIZE(dev->wcid)) + if (idx >= MT76x02_N_WCIDS) return NULL; wcid = rcu_dereference(dev->wcid[idx]); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c index 68b40d63a46d..ff448a1ad4e3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c @@ -144,7 +144,7 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev) if (!dir) return; - debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir, + debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", dir, mt76_queues_read); debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp); debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c index 8b072277ea10..e4e03beabe43 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c @@ -409,6 +409,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi, txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ; if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) { u8 ba_size = IEEE80211_MIN_AMPDU_BUF; + u8 ampdu_density = sta->ht_cap.ampdu_density; ba_size <<= sta->ht_cap.ampdu_factor; ba_size = min_t(int, 63, ba_size - 1); @@ -416,9 +417,11 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi, ba_size = 0; txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size); + if (ampdu_density < IEEE80211_HT_MPDU_DENSITY_4) + ampdu_density = IEEE80211_HT_MPDU_DENSITY_4; + txwi_flags |= MT_TXWI_FLAGS_AMPDU | - FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY, - sta->ht_cap.ampdu_density); + FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY, ampdu_density); } if (ieee80211_is_probe_resp(hdr->frame_control) || @@ -558,7 +561,7 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, rcu_read_lock(); - if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid)) + if (stat->wcid < MT76x02_N_WCIDS) wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]); if (wcid && wcid->sta) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c index 5664749ad6c1..267058086a90 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c @@ -20,7 +20,10 @@ int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, int ret; u8 seq; - skb = mt76x02_mcu_msg_alloc(data, len); + if (dev->mcu_timeout) + return -EIO; + + skb = mt76_mcu_msg_alloc(mdev, data, len); if (!skb) return -ENOMEM; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h index c81a9655c4c9..5fba1266c648 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h @@ -85,12 +85,6 @@ struct mt76x02_patch_header { u8 pad[2]; }; -static inline struct sk_buff * -mt76x02_mcu_msg_alloc(const void *data, int len) -{ - return mt76_mcu_msg_alloc(data, 0, len, 0); -} - int mt76x02_mcu_cleanup(struct mt76x02_dev *dev); int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param); int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c index 7dcc5d342e9f..cbbe986655fe 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c @@ -415,7 +415,7 @@ static void mt76x02_reset_state(struct mt76x02_dev *dev) ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL); rcu_read_unlock(); - for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) { + for (i = 0; i < MT76x02_N_WCIDS; i++) { struct ieee80211_sta *sta; struct ieee80211_vif *vif; struct mt76x02_sta *msta; @@ -489,8 +489,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) for (i = 0; i < __MT_TXQ_MAX; i++) mt76_queue_tx_cleanup(dev, i, true); - for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++) + mt76_for_each_q_rx(&dev->mt76, i) { mt76_queue_rx_reset(dev, i); + } mt76x02_mac_start(dev); @@ -520,6 +521,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) } if (restart) { + set_bit(MT76_RESTART, &dev->mphy.state); mt76x02_mcu_function_select(dev, Q_SELECT, 1); ieee80211_restart_hw(dev->mt76.hw); } else { @@ -528,8 +530,23 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) } } +void mt76x02_reconfig_complete(struct ieee80211_hw *hw, + enum ieee80211_reconfig_type reconfig_type) +{ + struct mt76x02_dev *dev = hw->priv; + + if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) + return; + + clear_bit(MT76_RESTART, &dev->mphy.state); +} +EXPORT_SYMBOL_GPL(mt76x02_reconfig_complete); + static void mt76x02_check_tx_hang(struct mt76x02_dev *dev) { + if (test_bit(MT76_RESTART, &dev->mphy.state)) + return; + if (mt76x02_tx_hang(dev)) { if (++dev->tx_hang_check >= MT_TX_HANG_TH) goto restart; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c index 843b86560ed4..a30bb536fc8a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c @@ -123,7 +123,7 @@ mt76x02u_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data, struct sk_buff *skb; int err; - skb = mt76_mcu_msg_alloc(data, MT_CMD_HDR_LEN, len, 8); + skb = mt76_mcu_msg_alloc(dev, data, len); if (!skb) return -ENOMEM; @@ -291,6 +291,8 @@ EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_send_data); void mt76x02u_init_mcu(struct mt76_dev *dev) { static const struct mt76_mcu_ops mt76x02u_mcu_ops = { + .headroom = MT_CMD_HDR_LEN, + .tailroom = 8, .mcu_send_msg = mt76x02u_mcu_send_msg, .mcu_wr_rp = mt76x02u_mcu_wr_rp, .mcu_rd_rp = mt76x02u_mcu_rd_rp, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c index b7a120b0856d..44822a849eb1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c @@ -46,6 +46,8 @@ static const struct ieee80211_iface_limit mt76x02_if_limits[] = { #ifdef CONFIG_MAC80211_MESH BIT(NL80211_IFTYPE_MESH_POINT) | #endif + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_AP) }, }; @@ -60,6 +62,8 @@ static const struct ieee80211_iface_limit mt76x02u_if_limits[] = { #ifdef CONFIG_MAC80211_MESH BIT(NL80211_IFTYPE_MESH_POINT) | #endif + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_AP) }, }; @@ -245,7 +249,7 @@ int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, memset(msta, 0, sizeof(*msta)); - idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid)); + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT76x02_N_WCIDS); if (idx < 0) return -ENOSPC; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c index 4a748a6f0ce2..410ffce3baff 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c @@ -4,6 +4,7 @@ */ #include <linux/module.h> +#include <linux/of.h> #include <asm/unaligned.h> #include "mt76x2.h" #include "eeprom.h" @@ -76,6 +77,7 @@ mt76x2_apply_cal_free_data(struct mt76x02_dev *dev, u8 *efuse) MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN, MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN + 1, }; + struct device_node *np = dev->mt76.dev->of_node; u8 *eeprom = dev->mt76.eeprom.data; u8 prev_grp0[4] = { eeprom[MT_EE_TX_POWER_0_START_5G], @@ -86,6 +88,9 @@ mt76x2_apply_cal_free_data(struct mt76x02_dev *dev, u8 *efuse) u16 val; int i; + if (!np || !of_property_read_bool(np, "mediatek,eeprom-merge-otp")) + return; + if (!mt76x2_has_cal_free_data(dev, efuse)) return; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c index c69579e5f647..f27774f57438 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c @@ -256,6 +256,7 @@ void mt76x2_stop_hardware(struct mt76x02_dev *dev) cancel_delayed_work_sync(&dev->cal_work); cancel_delayed_work_sync(&dev->mt76.mac_work); cancel_delayed_work_sync(&dev->wdt_work); + clear_bit(MT76_RESTART, &dev->mphy.state); mt76x02_mcu_set_radio_state(dev, false); mt76x2_mac_stop(dev, false); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c index 105e5b99b3f9..98f4cf398320 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c @@ -10,12 +10,9 @@ static int mt76x2_start(struct ieee80211_hw *hw) { struct mt76x02_dev *dev = hw->priv; - int ret; mt76x02_mac_start(dev); - ret = mt76x2_phy_start(dev); - if (ret) - return ret; + mt76x2_phy_start(dev); ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work, MT_MAC_WORK_INTERVAL); @@ -35,11 +32,9 @@ mt76x2_stop(struct ieee80211_hw *hw) mt76x2_stop_hardware(dev); } -static int +static void mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) { - int ret; - cancel_delayed_work_sync(&dev->cal_work); tasklet_disable(&dev->mt76.pre_tbtt_tasklet); tasklet_disable(&dev->dfs_pd.dfs_tasklet); @@ -50,7 +45,7 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) mt76_set_channel(&dev->mphy); mt76x2_mac_stop(dev, true); - ret = mt76x2_phy_set_channel(dev, chandef); + mt76x2_phy_set_channel(dev, chandef); mt76x02_mac_cc_reset(dev); mt76x02_dfs_init_params(dev); @@ -64,15 +59,12 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) tasklet_enable(&dev->mt76.pre_tbtt_tasklet); mt76_txq_schedule_all(&dev->mphy); - - return ret; } static int mt76x2_config(struct ieee80211_hw *hw, u32 changed) { struct mt76x02_dev *dev = hw->priv; - int ret = 0; mutex_lock(&dev->mt76.mutex); @@ -101,11 +93,11 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed) if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { ieee80211_stop_queues(hw); - ret = mt76x2_set_channel(dev, &hw->conf.chandef); + mt76x2_set_channel(dev, &hw->conf.chandef); ieee80211_wake_queues(hw); } - return ret; + return 0; } static void @@ -127,7 +119,7 @@ static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, dev->chainmask = (tx_ant == 3) ? 0x202 : 0x101; dev->mphy.antenna_mask = tx_ant; - mt76_set_stream_caps(&dev->mt76, true); + mt76_set_stream_caps(&dev->mphy, true); mt76x2_phy_set_antenna(dev); mutex_unlock(&dev->mt76.mutex); @@ -162,5 +154,6 @@ const struct ieee80211_ops mt76x2_ops = { .set_antenna = mt76x2_set_antenna, .get_antenna = mt76_get_antenna, .set_rts_threshold = mt76x02_set_rts_threshold, + .reconfig_complete = mt76x02_reconfig_complete, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c index eafa283ca699..3a4e41724af1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c @@ -16,8 +16,10 @@ static const struct usb_device_id mt76x2u_device_table[] = { { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USBAC1200 - Alfa AWUS036ACM */ { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */ { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */ + { USB_DEVICE(0x2c4e, 0x0103) }, /* Mercury UD13 */ { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */ { USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */ + { USB_DEVICE(0x045e, 0x02fe) }, /* XBox One Wireless Adapter */ { }, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig new file mode 100644 index 000000000000..d98225da694c --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: ISC +config MT7915E + tristate "MediaTek MT7915E (PCIe) support" + select MT76_CORE + depends on MAC80211 + depends on PCI + help + This adds support for MT7915-based wireless PCIe devices, + which support concurrent dual-band operation at both 5GHz + and 2.4GHz IEEE 802.11ax 4x4:4SS 1024-QAM, 160MHz channels, + OFDMA, spatial reuse and dual carrier modulation. + + To compile this driver as a module, choose M here. diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Makefile b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile new file mode 100644 index 000000000000..57fe726cc38b --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile @@ -0,0 +1,6 @@ +#SPDX-License-Identifier: ISC + +obj-$(CONFIG_MT7915E) += mt7915e.o + +mt7915e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o \ + debugfs.o diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c new file mode 100644 index 000000000000..5278bee812f1 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c @@ -0,0 +1,463 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 MediaTek Inc. */ + +#include "mt7915.h" +#include "eeprom.h" + +/** global debugfs **/ + +/* test knob of system layer 1/2 error recovery */ +static int mt7915_ser_trigger_set(void *data, u64 val) +{ + enum { + SER_SET_RECOVER_L1 = 1, + SER_SET_RECOVER_L2, + SER_ENABLE = 2, + SER_RECOVER + }; + struct mt7915_dev *dev = data; + int ret = 0; + + switch (val) { + case SER_SET_RECOVER_L1: + case SER_SET_RECOVER_L2: + /* fall through */ + ret = mt7915_mcu_set_ser(dev, SER_ENABLE, BIT(val), 0); + if (ret) + return ret; + + return mt7915_mcu_set_ser(dev, SER_RECOVER, val, 0); + default: + break; + } + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_ser_trigger, NULL, + mt7915_ser_trigger_set, "%lld\n"); + +static int +mt7915_radar_trigger(void *data, u64 val) +{ + struct mt7915_dev *dev = data; + + return mt7915_mcu_rdd_cmd(dev, RDD_RADAR_EMULATE, 1, 0, 0); +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_trigger, NULL, + mt7915_radar_trigger, "%lld\n"); + +static int +mt7915_dbdc_set(void *data, u64 val) +{ + struct mt7915_dev *dev = data; + + if (val) + mt7915_register_ext_phy(dev); + else + mt7915_unregister_ext_phy(dev); + + return 0; +} + +static int +mt7915_dbdc_get(void *data, u64 *val) +{ + struct mt7915_dev *dev = data; + + *val = !!mt7915_ext_phy(dev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_dbdc, mt7915_dbdc_get, + mt7915_dbdc_set, "%lld\n"); + +static int +mt7915_fw_debug_set(void *data, u64 val) +{ + struct mt7915_dev *dev = data; + enum { + DEBUG_TXCMD = 62, + DEBUG_CMD_RPT_TX, + DEBUG_CMD_RPT_TRIG, + DEBUG_SPL, + DEBUG_RPT_RX, + } debug; + + dev->fw_debug = !!val; + + mt7915_mcu_fw_log_2_host(dev, dev->fw_debug ? 2 : 0); + + for (debug = DEBUG_TXCMD; debug <= DEBUG_RPT_RX; debug++) + mt7915_mcu_fw_dbg_ctrl(dev, debug, dev->fw_debug); + + return 0; +} + +static int +mt7915_fw_debug_get(void *data, u64 *val) +{ + struct mt7915_dev *dev = data; + + *val = dev->fw_debug; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug, mt7915_fw_debug_get, + mt7915_fw_debug_set, "%lld\n"); + +static void +mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy, + struct seq_file *file) +{ + struct mt7915_dev *dev = file->private; + bool ext_phy = phy != &dev->phy; + int bound[15], range[4], i, n; + + if (!phy) + return; + + /* Tx ampdu stat */ + for (i = 0; i < ARRAY_SIZE(range); i++) + range[i] = mt76_rr(dev, MT_MIB_ARNG(ext_phy, i)); + + for (i = 0; i < ARRAY_SIZE(bound); i++) + bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i) + 1; + + seq_printf(file, "\nPhy %d\n", ext_phy); + + seq_printf(file, "Length: %8d | ", bound[0]); + for (i = 0; i < ARRAY_SIZE(bound) - 1; i++) + seq_printf(file, "%3d -%3d | ", + bound[i] + 1, bound[i + 1]); + + seq_puts(file, "\nCount: "); + n = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0; + for (i = 0; i < ARRAY_SIZE(bound); i++) + seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i + n]); + seq_puts(file, "\n"); + + seq_printf(file, "BA miss count: %d\n", phy->mib.ba_miss_cnt); +} + +static void +mt7915_txbf_stat_read_phy(struct mt7915_phy *phy, struct seq_file *s) +{ + struct mt7915_dev *dev = s->private; + bool ext_phy = phy != &dev->phy; + int cnt; + + if (!phy) + return; + + /* Tx Beamformer monitor */ + seq_puts(s, "\nTx Beamformer applied PPDU counts: "); + + cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(ext_phy)); + seq_printf(s, "iBF: %ld, eBF: %ld\n", + FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt), + FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt)); + + /* Tx Beamformer Rx feedback monitor */ + seq_puts(s, "Tx Beamformer Rx feedback statistics: "); + + cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(ext_phy)); + seq_printf(s, "All: %ld, HE: %ld, VHT: %ld, HT: %ld\n", + FIELD_GET(MT_ETBF_RX_FB_ALL, cnt), + FIELD_GET(MT_ETBF_RX_FB_HE, cnt), + FIELD_GET(MT_ETBF_RX_FB_VHT, cnt), + FIELD_GET(MT_ETBF_RX_FB_HT, cnt)); + + /* Tx Beamformee Rx NDPA & Tx feedback report */ + cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(ext_phy)); + seq_printf(s, "Tx Beamformee successful feedback frames: %ld\n", + FIELD_GET(MT_ETBF_TX_FB_CPL, cnt)); + seq_printf(s, "Tx Beamformee feedback triggered counts: %ld\n", + FIELD_GET(MT_ETBF_TX_FB_TRI, cnt)); + + /* Tx SU counters */ + cnt = mt76_rr(dev, MT_MIB_DR11(ext_phy)); + seq_printf(s, "Tx single-user successful MPDU counts: %d\n", cnt); + + seq_puts(s, "\n"); +} + +static int +mt7915_tx_stats_read(struct seq_file *file, void *data) +{ + struct mt7915_dev *dev = file->private; + int stat[8], i, n; + + mt7915_ampdu_stat_read_phy(&dev->phy, file); + mt7915_txbf_stat_read_phy(&dev->phy, file); + + mt7915_ampdu_stat_read_phy(mt7915_ext_phy(dev), file); + mt7915_txbf_stat_read_phy(mt7915_ext_phy(dev), file); + + /* Tx amsdu info */ + seq_puts(file, "Tx MSDU stat:\n"); + for (i = 0, n = 0; i < ARRAY_SIZE(stat); i++) { + stat[i] = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i)); + n += stat[i]; + } + + for (i = 0; i < ARRAY_SIZE(stat); i++) { + seq_printf(file, "AMSDU pack count of %d MSDU in TXD: 0x%x ", + i + 1, stat[i]); + if (n != 0) + seq_printf(file, "(%d%%)\n", stat[i] * 100 / n); + else + seq_puts(file, "\n"); + } + + return 0; +} + +static int +mt7915_tx_stats_open(struct inode *inode, struct file *f) +{ + return single_open(f, mt7915_tx_stats_read, inode->i_private); +} + +static const struct file_operations fops_tx_stats = { + .open = mt7915_tx_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int mt7915_read_temperature(struct seq_file *s, void *data) +{ + struct mt7915_dev *dev = dev_get_drvdata(s->private); + int temp; + + /* cpu */ + temp = mt7915_mcu_get_temperature(dev, 0); + seq_printf(s, "Temperature: %d\n", temp); + + return 0; +} + +static int +mt7915_queues_acq(struct seq_file *s, void *data) +{ + struct mt7915_dev *dev = dev_get_drvdata(s->private); + int i; + + for (i = 0; i < 16; i++) { + int j, acs = i / 4, index = i % 4; + u32 ctrl, val, qlen = 0; + + val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index)); + ctrl = BIT(31) | BIT(15) | (acs << 8); + + for (j = 0; j < 32; j++) { + if (val & BIT(j)) + continue; + + mt76_wr(dev, MT_PLE_FL_Q0_CTRL, + ctrl | (j + (index << 5))); + qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL, + GENMASK(11, 0)); + } + seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen); + } + + return 0; +} + +static int +mt7915_queues_read(struct seq_file *s, void *data) +{ + struct mt7915_dev *dev = dev_get_drvdata(s->private); + static const struct { + char *queue; + int id; + } queue_map[] = { + { "WFDMA0", MT_TXQ_BE }, + { "MCUWM", MT_TXQ_MCU }, + { "MCUWA", MT_TXQ_MCU_WA }, + { "MCUFWQ", MT_TXQ_FWDL }, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(queue_map); i++) { + struct mt76_sw_queue *q = &dev->mt76.q_tx[queue_map[i].id]; + + if (!q->q) + continue; + + seq_printf(s, + "%s: queued=%d head=%d tail=%d\n", + queue_map[i].queue, q->q->queued, q->q->head, + q->q->tail); + } + + return 0; +} + +static void +mt7915_puts_rate_txpower(struct seq_file *s, s8 *delta, + s8 txpower_cur, int band) +{ + static const char * const sku_group_name[] = { + "CCK", "OFDM", "HT20", "HT40", + "VHT20", "VHT40", "VHT80", "VHT160", + "RU26", "RU52", "RU106", "RU242/SU20", + "RU484/SU40", "RU996/SU80", "RU2x996/SU160" + }; + s8 txpower[MT7915_SKU_RATE_NUM]; + int i, idx = 0; + + for (i = 0; i < MT7915_SKU_RATE_NUM; i++) + txpower[i] = DIV_ROUND_UP(txpower_cur + delta[i], 2); + + for (i = 0; i < MAX_SKU_RATE_GROUP_NUM; i++) { + const struct sku_group *sku = &mt7915_sku_groups[i]; + u32 offset = sku->offset[band]; + + if (!offset) { + idx += sku->len; + continue; + } + + mt76_seq_puts_array(s, sku_group_name[i], + txpower + idx, sku->len); + idx += sku->len; + } +} + +static int +mt7915_read_rate_txpower(struct seq_file *s, void *data) +{ + struct mt7915_dev *dev = dev_get_drvdata(s->private); + struct mt76_phy *mphy = &dev->mphy; + enum nl80211_band band = mphy->chandef.chan->band; + s8 *delta = dev->rate_power[band]; + s8 txpower_base = mphy->txpower_cur - delta[MT7915_SKU_MAX_DELTA_IDX]; + + seq_puts(s, "Band 0:\n"); + mt7915_puts_rate_txpower(s, delta, txpower_base, band); + + if (dev->mt76.phy2) { + mphy = dev->mt76.phy2; + band = mphy->chandef.chan->band; + delta = dev->rate_power[band]; + txpower_base = mphy->txpower_cur - + delta[MT7915_SKU_MAX_DELTA_IDX]; + + seq_puts(s, "Band 1:\n"); + mt7915_puts_rate_txpower(s, delta, txpower_base, band); + } + + return 0; +} + +int mt7915_init_debugfs(struct mt7915_dev *dev) +{ + struct dentry *dir; + + dir = mt76_register_debugfs(&dev->mt76); + if (!dir) + return -ENOMEM; + + debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir, + mt7915_queues_read); + debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir, + mt7915_queues_acq); + debugfs_create_file("tx_stats", 0400, dir, dev, &fops_tx_stats); + debugfs_create_file("dbdc", 0600, dir, dev, &fops_dbdc); + debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug); + debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern); + /* test knobs */ + debugfs_create_file("radar_trigger", 0200, dir, dev, + &fops_radar_trigger); + debugfs_create_file("ser_trigger", 0200, dir, dev, &fops_ser_trigger); + debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir, + mt7915_read_temperature); + debugfs_create_devm_seqfile(dev->mt76.dev, "txpower_sku", dir, + mt7915_read_rate_txpower); + + return 0; +} + +/** per-station debugfs **/ + +/* usage: <tx mode> <ldpc> <stbc> <bw> <gi> <nss> <mcs> */ +static int mt7915_sta_fixed_rate_set(void *data, u64 rate) +{ + struct ieee80211_sta *sta = data; + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + + return mt7915_mcu_set_fixed_rate(msta->vif->dev, sta, rate); +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_fixed_rate, NULL, + mt7915_sta_fixed_rate_set, "%llx\n"); + +static int +mt7915_sta_stats_read(struct seq_file *s, void *data) +{ + struct ieee80211_sta *sta = s->private; + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + struct mt7915_sta_stats *stats = &msta->stats; + struct rate_info *rate = &stats->prob_rate; + static const char * const bw[] = { + "BW20", "BW5", "BW10", "BW40", + "BW80", "BW160", "BW_HE_RU" + }; + + if (!rate->legacy && !rate->flags) + return 0; + + seq_puts(s, "Probing rate - "); + if (rate->flags & RATE_INFO_FLAGS_MCS) + seq_puts(s, "HT "); + else if (rate->flags & RATE_INFO_FLAGS_VHT_MCS) + seq_puts(s, "VHT "); + else if (rate->flags & RATE_INFO_FLAGS_HE_MCS) + seq_puts(s, "HE "); + else + seq_printf(s, "Bitrate %d\n", rate->legacy); + + if (rate->flags) { + seq_printf(s, "%s NSS%d MCS%d ", + bw[rate->bw], rate->nss, rate->mcs); + + if (rate->flags & RATE_INFO_FLAGS_SHORT_GI) + seq_puts(s, "SGI "); + else if (rate->he_gi) + seq_puts(s, "HE GI "); + + if (rate->he_dcm) + seq_puts(s, "DCM "); + } + + seq_printf(s, "\nPPDU PER: %ld.%1ld%%\n", + stats->per / 10, stats->per % 10); + + return 0; +} + +static int +mt7915_sta_stats_open(struct inode *inode, struct file *f) +{ + return single_open(f, mt7915_sta_stats_read, inode->i_private); +} + +static const struct file_operations fops_sta_stats = { + .open = mt7915_sta_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct dentry *dir) +{ + debugfs_create_file("fixed_rate", 0600, dir, sta, &fops_fixed_rate); + debugfs_create_file("stats", 0400, dir, sta, &fops_sta_stats); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c new file mode 100644 index 000000000000..766185d1aa21 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 MediaTek Inc. */ + +#include "mt7915.h" +#include "../dma.h" +#include "mac.h" + +static int +mt7915_init_tx_queues(struct mt7915_dev *dev, int n_desc) +{ + struct mt76_sw_queue *q; + struct mt76_queue *hwq; + int err, i; + + hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL); + if (!hwq) + return -ENOMEM; + + err = mt76_queue_alloc(dev, hwq, MT7915_TXQ_BAND0, n_desc, 0, + MT_TX_RING_BASE); + if (err < 0) + return err; + + for (i = 0; i < MT_TXQ_MCU; i++) { + q = &dev->mt76.q_tx[i]; + INIT_LIST_HEAD(&q->swq); + q->q = hwq; + } + + return 0; +} + +static int +mt7915_init_mcu_queue(struct mt7915_dev *dev, struct mt76_sw_queue *q, + int idx, int n_desc) +{ + struct mt76_queue *hwq; + int err; + + hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL); + if (!hwq) + return -ENOMEM; + + err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE); + if (err < 0) + return err; + + INIT_LIST_HEAD(&q->swq); + q->q = hwq; + + return 0; +} + +void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, + struct sk_buff *skb) +{ + struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); + __le32 *rxd = (__le32 *)skb->data; + enum rx_pkt_type type; + + type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0])); + + switch (type) { + case PKT_TYPE_TXRX_NOTIFY: + mt7915_mac_tx_free(dev, skb); + break; + case PKT_TYPE_RX_EVENT: + mt7915_mcu_rx_event(dev, skb); + break; + case PKT_TYPE_NORMAL: + if (!mt7915_mac_fill_rx(dev, skb)) { + mt76_rx(&dev->mt76, q, skb); + return; + } + /* fall through */ + default: + dev_kfree_skb(skb); + break; + } +} + +static int mt7915_poll_tx(struct napi_struct *napi, int budget) +{ + static const u8 queue_map[] = { + MT_TXQ_MCU, + MT_TXQ_MCU_WA, + MT_TXQ_BE + }; + struct mt7915_dev *dev; + int i; + + dev = container_of(napi, struct mt7915_dev, mt76.tx_napi); + + for (i = 0; i < ARRAY_SIZE(queue_map); i++) + mt76_queue_tx_cleanup(dev, queue_map[i], false); + + if (napi_complete_done(napi, 0)) + mt7915_irq_enable(dev, MT_INT_TX_DONE_ALL); + + for (i = 0; i < ARRAY_SIZE(queue_map); i++) + mt76_queue_tx_cleanup(dev, queue_map[i], false); + + mt7915_mac_sta_poll(dev); + + tasklet_schedule(&dev->mt76.tx_tasklet); + + return 0; +} + +void mt7915_dma_prefetch(struct mt7915_dev *dev) +{ +#define PREFETCH(base, depth) ((base) << 16 | (depth)) + + mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0, 0x4)); + mt76_wr(dev, MT_WFDMA0_RX_RING1_EXT_CTRL, PREFETCH(0x40, 0x4)); + mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x80, 0x0)); + + mt76_wr(dev, MT_WFDMA1_TX_RING0_EXT_CTRL, PREFETCH(0x80, 0x4)); + mt76_wr(dev, MT_WFDMA1_TX_RING1_EXT_CTRL, PREFETCH(0xc0, 0x4)); + mt76_wr(dev, MT_WFDMA1_TX_RING2_EXT_CTRL, PREFETCH(0x100, 0x4)); + mt76_wr(dev, MT_WFDMA1_TX_RING3_EXT_CTRL, PREFETCH(0x140, 0x4)); + mt76_wr(dev, MT_WFDMA1_TX_RING4_EXT_CTRL, PREFETCH(0x180, 0x4)); + mt76_wr(dev, MT_WFDMA1_TX_RING5_EXT_CTRL, PREFETCH(0x1c0, 0x4)); + mt76_wr(dev, MT_WFDMA1_TX_RING6_EXT_CTRL, PREFETCH(0x200, 0x4)); + mt76_wr(dev, MT_WFDMA1_TX_RING7_EXT_CTRL, PREFETCH(0x240, 0x4)); + + mt76_wr(dev, MT_WFDMA1_TX_RING16_EXT_CTRL, PREFETCH(0x280, 0x4)); + mt76_wr(dev, MT_WFDMA1_TX_RING17_EXT_CTRL, PREFETCH(0x2c0, 0x4)); + mt76_wr(dev, MT_WFDMA1_TX_RING18_EXT_CTRL, PREFETCH(0x300, 0x4)); + mt76_wr(dev, MT_WFDMA1_TX_RING19_EXT_CTRL, PREFETCH(0x340, 0x4)); + mt76_wr(dev, MT_WFDMA1_TX_RING20_EXT_CTRL, PREFETCH(0x380, 0x4)); + mt76_wr(dev, MT_WFDMA1_TX_RING21_EXT_CTRL, PREFETCH(0x3c0, 0x0)); + + mt76_wr(dev, MT_WFDMA1_RX_RING0_EXT_CTRL, PREFETCH(0x3c0, 0x4)); + mt76_wr(dev, MT_WFDMA1_RX_RING1_EXT_CTRL, PREFETCH(0x400, 0x4)); + mt76_wr(dev, MT_WFDMA1_RX_RING2_EXT_CTRL, PREFETCH(0x440, 0x4)); + mt76_wr(dev, MT_WFDMA1_RX_RING3_EXT_CTRL, PREFETCH(0x480, 0x0)); +} + +int mt7915_dma_init(struct mt7915_dev *dev) +{ + /* Increase buffer size to receive large VHT/HE MPDUs */ + int rx_buf_size = MT_RX_BUF_SIZE * 2; + int ret; + + mt76_dma_attach(&dev->mt76); + + /* configure global setting */ + mt76_set(dev, MT_WFDMA1_GLO_CFG, + MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | + MT_WFDMA1_GLO_CFG_OMIT_RX_INFO); + + /* configure perfetch settings */ + mt7915_dma_prefetch(dev); + + /* reset dma idx */ + mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0); + mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR, ~0); + + /* configure delay interrupt */ + mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0); + mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0); + + /* init tx queue */ + ret = mt7915_init_tx_queues(dev, MT7915_TX_RING_SIZE); + if (ret) + return ret; + + /* command to WM */ + ret = mt7915_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU], + MT7915_TXQ_MCU_WM, + MT7915_TX_MCU_RING_SIZE); + if (ret) + return ret; + + /* command to WA */ + ret = mt7915_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU_WA], + MT7915_TXQ_MCU_WA, + MT7915_TX_MCU_RING_SIZE); + if (ret) + return ret; + + /* firmware download */ + ret = mt7915_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_FWDL], + MT7915_TXQ_FWDL, + MT7915_TX_FWDL_RING_SIZE); + if (ret) + return ret; + + /* event from WM */ + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], + MT7915_RXQ_MCU_WM, MT7915_RX_MCU_RING_SIZE, + rx_buf_size, MT_RX_EVENT_RING_BASE); + if (ret) + return ret; + + /* event from WA */ + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA], + MT7915_RXQ_MCU_WA, MT7915_RX_MCU_RING_SIZE, + rx_buf_size, MT_RX_EVENT_RING_BASE); + if (ret) + return ret; + + /* rx data */ + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0, + MT7915_RX_RING_SIZE, rx_buf_size, + MT_RX_DATA_RING_BASE); + if (ret) + return ret; + + ret = mt76_init_queues(dev); + if (ret < 0) + return ret; + + netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi, + mt7915_poll_tx, NAPI_POLL_WEIGHT); + napi_enable(&dev->mt76.tx_napi); + + /* hif wait WFDMA idle */ + mt76_set(dev, MT_WFDMA0_BUSY_ENA, + MT_WFDMA0_BUSY_ENA_TX_FIFO0 | + MT_WFDMA0_BUSY_ENA_TX_FIFO1 | + MT_WFDMA0_BUSY_ENA_RX_FIFO); + + mt76_set(dev, MT_WFDMA1_BUSY_ENA, + MT_WFDMA1_BUSY_ENA_TX_FIFO0 | + MT_WFDMA1_BUSY_ENA_TX_FIFO1 | + MT_WFDMA1_BUSY_ENA_RX_FIFO); + + mt76_set(dev, MT_WFDMA0_PCIE1_BUSY_ENA, + MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 | + MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 | + MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO); + + mt76_set(dev, MT_WFDMA1_PCIE1_BUSY_ENA, + MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 | + MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 | + MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO); + + mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC, + MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000); + + /* set WFDMA Tx/Rx */ + mt76_set(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); + mt76_set(dev, MT_WFDMA1_GLO_CFG, + MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN); + + /* enable interrupts for TX/RX rings */ + mt7915_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | + MT_INT_MCU_CMD); + + return 0; +} + +void mt7915_dma_cleanup(struct mt7915_dev *dev) +{ + /* disable */ + mt76_clear(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_DMA_EN | + MT_WFDMA0_GLO_CFG_RX_DMA_EN); + mt76_clear(dev, MT_WFDMA1_GLO_CFG, + MT_WFDMA1_GLO_CFG_TX_DMA_EN | + MT_WFDMA1_GLO_CFG_RX_DMA_EN); + + /* reset */ + mt76_clear(dev, MT_WFDMA1_RST, + MT_WFDMA1_RST_DMASHDL_ALL_RST | + MT_WFDMA1_RST_LOGIC_RST); + + mt76_set(dev, MT_WFDMA1_RST, + MT_WFDMA1_RST_DMASHDL_ALL_RST | + MT_WFDMA1_RST_LOGIC_RST); + + mt76_clear(dev, MT_WFDMA0_RST, + MT_WFDMA0_RST_DMASHDL_ALL_RST | + MT_WFDMA0_RST_LOGIC_RST); + + mt76_set(dev, MT_WFDMA0_RST, + MT_WFDMA0_RST_DMASHDL_ALL_RST | + MT_WFDMA0_RST_LOGIC_RST); + + tasklet_kill(&dev->mt76.tx_tasklet); + mt76_dma_cleanup(&dev->mt76); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c new file mode 100644 index 000000000000..7deba7ebd68a --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 MediaTek Inc. */ + +#include "mt7915.h" +#include "eeprom.h" + +static inline bool mt7915_efuse_valid(u8 val) +{ + return !(val == 0xff); +} + +u32 mt7915_eeprom_read(struct mt7915_dev *dev, u32 offset) +{ + u8 *data = dev->mt76.eeprom.data; + + if (!mt7915_efuse_valid(data[offset])) + mt7915_mcu_get_eeprom(dev, offset); + + return data[offset]; +} + +static int mt7915_eeprom_load(struct mt7915_dev *dev) +{ + int ret; + + ret = mt76_eeprom_init(&dev->mt76, MT7915_EEPROM_SIZE); + if (ret < 0) + return ret; + + memset(dev->mt76.eeprom.data, -1, MT7915_EEPROM_SIZE); + + return 0; +} + +static int mt7915_check_eeprom(struct mt7915_dev *dev) +{ + u16 val; + u8 *eeprom = dev->mt76.eeprom.data; + + mt7915_eeprom_read(dev, 0); + val = get_unaligned_le16(eeprom); + + switch (val) { + case 0x7915: + return 0; + default: + return -EINVAL; + } +} + +static void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev) +{ + u8 *eeprom = dev->mt76.eeprom.data; + u8 tx_mask, max_nss = 4; + u32 val = mt7915_eeprom_read(dev, MT_EE_WIFI_CONF); + + val = FIELD_GET(MT_EE_WIFI_CONF_BAND_SEL, val); + switch (val) { + case MT_EE_5GHZ: + dev->mt76.cap.has_5ghz = true; + break; + case MT_EE_2GHZ: + dev->mt76.cap.has_2ghz = true; + break; + default: + dev->mt76.cap.has_2ghz = true; + dev->mt76.cap.has_5ghz = true; + break; + } + + /* read tx mask from eeprom */ + tx_mask = FIELD_GET(MT_EE_WIFI_CONF_TX_MASK, + eeprom[MT_EE_WIFI_CONF]); + if (!tx_mask || tx_mask > max_nss) + tx_mask = max_nss; + + dev->chainmask = BIT(tx_mask) - 1; + dev->mphy.antenna_mask = dev->chainmask; + dev->phy.chainmask = dev->chainmask; +} + +int mt7915_eeprom_init(struct mt7915_dev *dev) +{ + int ret; + + ret = mt7915_eeprom_load(dev); + if (ret < 0) + return ret; + + ret = mt7915_check_eeprom(dev); + if (ret) + return ret; + + mt7915_eeprom_parse_hw_cap(dev); + memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR, + ETH_ALEN); + + mt76_eeprom_override(&dev->mt76); + + return 0; +} + +int mt7915_eeprom_get_target_power(struct mt7915_dev *dev, + struct ieee80211_channel *chan, + u8 chain_idx) +{ + int index; + bool tssi_on; + + if (chain_idx > 3) + return -EINVAL; + + tssi_on = mt7915_tssi_enabled(dev, chan->band); + + if (chan->band == NL80211_BAND_2GHZ) { + index = MT_EE_TX0_POWER_2G + chain_idx * 3 + !tssi_on; + } else { + int group = tssi_on ? + mt7915_get_channel_group(chan->hw_value) : 8; + + index = MT_EE_TX0_POWER_5G + chain_idx * 12 + group; + } + + return mt7915_eeprom_read(dev, index); +} + +static const u8 sku_cck_delta_map[] = { + SKU_CCK_GROUP0, + SKU_CCK_GROUP0, + SKU_CCK_GROUP1, + SKU_CCK_GROUP1, +}; + +static const u8 sku_ofdm_delta_map[] = { + SKU_OFDM_GROUP0, + SKU_OFDM_GROUP0, + SKU_OFDM_GROUP1, + SKU_OFDM_GROUP1, + SKU_OFDM_GROUP2, + SKU_OFDM_GROUP2, + SKU_OFDM_GROUP3, + SKU_OFDM_GROUP4, +}; + +static const u8 sku_mcs_delta_map[] = { + SKU_MCS_GROUP0, + SKU_MCS_GROUP1, + SKU_MCS_GROUP1, + SKU_MCS_GROUP2, + SKU_MCS_GROUP2, + SKU_MCS_GROUP3, + SKU_MCS_GROUP4, + SKU_MCS_GROUP5, + SKU_MCS_GROUP6, + SKU_MCS_GROUP7, + SKU_MCS_GROUP8, + SKU_MCS_GROUP9, +}; + +#define SKU_GROUP(_mode, _len, _ofs_2g, _ofs_5g, _map) \ + [_mode] = { \ + .len = _len, \ + .offset = { \ + _ofs_2g, \ + _ofs_5g, \ + }, \ + .delta_map = _map \ +} + +const struct sku_group mt7915_sku_groups[] = { + SKU_GROUP(SKU_CCK, 4, 0x252, 0, sku_cck_delta_map), + SKU_GROUP(SKU_OFDM, 8, 0x254, 0x29d, sku_ofdm_delta_map), + + SKU_GROUP(SKU_HT_BW20, 8, 0x259, 0x2a2, sku_mcs_delta_map), + SKU_GROUP(SKU_HT_BW40, 9, 0x262, 0x2ab, sku_mcs_delta_map), + SKU_GROUP(SKU_VHT_BW20, 12, 0x259, 0x2a2, sku_mcs_delta_map), + SKU_GROUP(SKU_VHT_BW40, 12, 0x262, 0x2ab, sku_mcs_delta_map), + SKU_GROUP(SKU_VHT_BW80, 12, 0, 0x2b4, sku_mcs_delta_map), + SKU_GROUP(SKU_VHT_BW160, 12, 0, 0, sku_mcs_delta_map), + + SKU_GROUP(SKU_HE_RU26, 12, 0x27f, 0x2dd, sku_mcs_delta_map), + SKU_GROUP(SKU_HE_RU52, 12, 0x289, 0x2e7, sku_mcs_delta_map), + SKU_GROUP(SKU_HE_RU106, 12, 0x293, 0x2f1, sku_mcs_delta_map), + SKU_GROUP(SKU_HE_RU242, 12, 0x26b, 0x2bf, sku_mcs_delta_map), + SKU_GROUP(SKU_HE_RU484, 12, 0x275, 0x2c9, sku_mcs_delta_map), + SKU_GROUP(SKU_HE_RU996, 12, 0, 0x2d3, sku_mcs_delta_map), + SKU_GROUP(SKU_HE_RU2x996, 12, 0, 0, sku_mcs_delta_map), +}; + +static s8 +mt7915_get_sku_delta(struct mt7915_dev *dev, u32 addr) +{ + u32 val = mt7915_eeprom_read(dev, addr); + s8 delta = FIELD_GET(SKU_DELTA_VAL, val); + + if (!(val & SKU_DELTA_EN)) + return 0; + + return val & SKU_DELTA_ADD ? delta : -delta; +} + +static void +mt7915_eeprom_init_sku_band(struct mt7915_dev *dev, + struct ieee80211_supported_band *sband) +{ + int i, band = sband->band; + s8 *rate_power = dev->rate_power[band], max_delta = 0; + u8 idx = 0; + + for (i = 0; i < ARRAY_SIZE(mt7915_sku_groups); i++) { + const struct sku_group *sku = &mt7915_sku_groups[i]; + u32 offset = sku->offset[band]; + int j; + + if (!offset) { + idx += sku->len; + continue; + } + + rate_power[idx++] = mt7915_get_sku_delta(dev, offset); + if (rate_power[idx - 1] > max_delta) + max_delta = rate_power[idx - 1]; + + if (i == SKU_HT_BW20 || i == SKU_VHT_BW20) + offset += 1; + + for (j = 1; j < sku->len; j++) { + u32 addr = offset + sku->delta_map[j]; + + rate_power[idx++] = mt7915_get_sku_delta(dev, addr); + if (rate_power[idx - 1] > max_delta) + max_delta = rate_power[idx - 1]; + } + } + + rate_power[idx] = max_delta; +} + +void mt7915_eeprom_init_sku(struct mt7915_dev *dev) +{ + mt7915_eeprom_init_sku_band(dev, &dev->mphy.sband_2g.sband); + mt7915_eeprom_init_sku_band(dev, &dev->mphy.sband_5g.sband); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h new file mode 100644 index 000000000000..4e31d6ab4fa6 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2020 MediaTek Inc. */ + +#ifndef __MT7915_EEPROM_H +#define __MT7915_EEPROM_H + +#include "mt7915.h" + +struct cal_data { + u8 count; + u16 offset[60]; +}; + +enum mt7915_eeprom_field { + MT_EE_CHIP_ID = 0x000, + MT_EE_VERSION = 0x002, + MT_EE_MAC_ADDR = 0x004, + MT_EE_DDIE_FT_VERSION = 0x050, + MT_EE_WIFI_CONF = 0x190, + MT_EE_TX0_POWER_2G = 0x2fc, + MT_EE_TX0_POWER_5G = 0x34b, + MT_EE_ADIE_FT_VERSION = 0x9a0, + + __MT_EE_MAX = 0xe00 +}; + +#define MT_EE_WIFI_CONF_TX_MASK GENMASK(2, 0) +#define MT_EE_WIFI_CONF_BAND_SEL GENMASK(7, 6) +#define MT_EE_WIFI_CONF_TSSI0_2G BIT(0) +#define MT_EE_WIFI_CONF_TSSI0_5G BIT(2) +#define MT_EE_WIFI_CONF_TSSI1_5G BIT(4) + +enum mt7915_eeprom_band { + MT_EE_DUAL_BAND, + MT_EE_5GHZ, + MT_EE_2GHZ, + MT_EE_DBDC, +}; + +#define SKU_DELTA_VAL GENMASK(5, 0) +#define SKU_DELTA_ADD BIT(6) +#define SKU_DELTA_EN BIT(7) + +enum mt7915_sku_delta_group { + SKU_CCK_GROUP0, + SKU_CCK_GROUP1, + + SKU_OFDM_GROUP0 = 0, + SKU_OFDM_GROUP1, + SKU_OFDM_GROUP2, + SKU_OFDM_GROUP3, + SKU_OFDM_GROUP4, + + SKU_MCS_GROUP0 = 0, + SKU_MCS_GROUP1, + SKU_MCS_GROUP2, + SKU_MCS_GROUP3, + SKU_MCS_GROUP4, + SKU_MCS_GROUP5, + SKU_MCS_GROUP6, + SKU_MCS_GROUP7, + SKU_MCS_GROUP8, + SKU_MCS_GROUP9, +}; + +enum mt7915_sku_rate_group { + SKU_CCK, + SKU_OFDM, + SKU_HT_BW20, + SKU_HT_BW40, + SKU_VHT_BW20, + SKU_VHT_BW40, + SKU_VHT_BW80, + SKU_VHT_BW160, + SKU_HE_RU26, + SKU_HE_RU52, + SKU_HE_RU106, + SKU_HE_RU242, + SKU_HE_RU484, + SKU_HE_RU996, + SKU_HE_RU2x996, + MAX_SKU_RATE_GROUP_NUM, +}; + +struct sku_group { + u8 len; + u16 offset[2]; + const u8 *delta_map; +}; + +static inline int +mt7915_get_channel_group(int channel) +{ + if (channel >= 184 && channel <= 196) + return 0; + if (channel <= 48) + return 1; + if (channel <= 64) + return 2; + if (channel <= 96) + return 3; + if (channel <= 112) + return 4; + if (channel <= 128) + return 5; + if (channel <= 144) + return 6; + return 7; +} + +static inline bool +mt7915_tssi_enabled(struct mt7915_dev *dev, enum nl80211_band band) +{ + u8 *eep = dev->mt76.eeprom.data; + + /* TODO: DBDC */ + if (band == NL80211_BAND_5GHZ) + return eep[MT_EE_WIFI_CONF + 7] & MT_EE_WIFI_CONF_TSSI0_5G; + else + return eep[MT_EE_WIFI_CONF + 7] & MT_EE_WIFI_CONF_TSSI0_2G; +} + +extern const struct sku_group mt7915_sku_groups[]; + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c new file mode 100644 index 000000000000..aadf56e80bae --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c @@ -0,0 +1,702 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 MediaTek Inc. */ + +#include <linux/etherdevice.h> +#include "mt7915.h" +#include "mac.h" +#include "eeprom.h" + +static void +mt7915_mac_init_band(struct mt7915_dev *dev, u8 band) +{ + u32 mask, set; + + mt76_rmw_field(dev, MT_TMAC_CTCR0(band), + MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f); + mt76_set(dev, MT_TMAC_CTCR0(band), + MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN | + MT_TMAC_CTCR0_INS_DDLMT_EN); + + mask = MT_MDP_RCFR0_MCU_RX_MGMT | + MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR | + MT_MDP_RCFR0_MCU_RX_CTL_BAR; + set = FIELD_PREP(MT_MDP_RCFR0_MCU_RX_MGMT, MT_MDP_TO_HIF) | + FIELD_PREP(MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR, MT_MDP_TO_HIF) | + FIELD_PREP(MT_MDP_RCFR0_MCU_RX_CTL_BAR, MT_MDP_TO_HIF); + mt76_rmw(dev, MT_MDP_BNRCFR0(band), mask, set); + + mask = MT_MDP_RCFR1_MCU_RX_BYPASS | + MT_MDP_RCFR1_RX_DROPPED_UCAST | + MT_MDP_RCFR1_RX_DROPPED_MCAST; + set = FIELD_PREP(MT_MDP_RCFR1_MCU_RX_BYPASS, MT_MDP_TO_HIF) | + FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_UCAST, MT_MDP_TO_HIF) | + FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_MCAST, MT_MDP_TO_HIF); + mt76_rmw(dev, MT_MDP_BNRCFR1(band), mask, set); + + mt76_set(dev, MT_WF_RMAC_MIB_TIME0(band), MT_WF_RMAC_MIB_RXTIME_EN); + mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band), MT_WF_RMAC_MIB_RXTIME_EN); +} + +static void mt7915_mac_init(struct mt7915_dev *dev) +{ + int i; + + mt76_rmw_field(dev, MT_DMA_DCR0, MT_DMA_DCR0_MAX_RX_LEN, 1536); + mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, 1536); + /* enable rx rate report */ + mt76_set(dev, MT_DMA_DCR0, MT_DMA_DCR0_RXD_G5_EN); + /* disable hardware de-agg */ + mt76_clear(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN); + + for (i = 0; i < MT7915_WTBL_SIZE; i++) + mt7915_mac_wtbl_update(dev, i, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + + mt7915_mac_init_band(dev, 0); + mt7915_mac_init_band(dev, 1); + mt7915_mcu_set_rts_thresh(&dev->phy, 0x92b); +} + +static int mt7915_txbf_init(struct mt7915_dev *dev) +{ + int ret; + + /* + * TODO: DBDC & check whether iBF phase calibration data has + * been stored in eeprom offset 0x651~0x7b8, then write down + * 0x1111 into 0x651 and 0x651 to trigger iBF. + */ + + /* trigger sounding packets */ + ret = mt7915_mcu_set_txbf_sounding(dev); + if (ret) + return ret; + + /* enable iBF & eBF */ + return mt7915_mcu_set_txbf_type(dev); +} + +static void +mt7915_init_txpower_band(struct mt7915_dev *dev, + struct ieee80211_supported_band *sband) +{ + int i, n_chains = hweight8(dev->mphy.antenna_mask); + + for (i = 0; i < sband->n_channels; i++) { + struct ieee80211_channel *chan = &sband->channels[i]; + u32 target_power = 0; + int j; + + for (j = 0; j < n_chains; j++) { + u32 val; + + val = mt7915_eeprom_get_target_power(dev, chan, j); + target_power = max(target_power, val); + } + + chan->max_power = min_t(int, chan->max_reg_power, + target_power / 2); + chan->orig_mpwr = target_power / 2; + } +} + +static void mt7915_init_txpower(struct mt7915_dev *dev) +{ + mt7915_init_txpower_band(dev, &dev->mphy.sband_2g.sband); + mt7915_init_txpower_band(dev, &dev->mphy.sband_5g.sband); + + mt7915_eeprom_init_sku(dev); +} + +static void mt7915_init_work(struct work_struct *work) +{ + struct mt7915_dev *dev = container_of(work, struct mt7915_dev, + init_work); + + mt7915_mcu_set_eeprom(dev); + mt7915_mac_init(dev); + mt7915_init_txpower(dev); + mt7915_txbf_init(dev); +} + +static int mt7915_init_hardware(struct mt7915_dev *dev) +{ + int ret, idx; + + mt76_wr(dev, MT_INT_SOURCE_CSR, ~0); + + INIT_WORK(&dev->init_work, mt7915_init_work); + spin_lock_init(&dev->token_lock); + idr_init(&dev->token); + + ret = mt7915_dma_init(dev); + if (ret) + return ret; + + set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state); + + ret = mt7915_mcu_init(dev); + if (ret) + return ret; + + ret = mt7915_eeprom_init(dev); + if (ret < 0) + return ret; + + /* Beacon and mgmt frames should occupy wcid 0 */ + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA - 1); + if (idx) + return -ENOSPC; + + dev->mt76.global_wcid.idx = idx; + dev->mt76.global_wcid.hw_key_idx = -1; + dev->mt76.global_wcid.tx_info |= MT_WCID_TX_INFO_SET; + rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid); + + return 0; +} + +#define CCK_RATE(_idx, _rate) { \ + .bitrate = _rate, \ + .flags = IEEE80211_RATE_SHORT_PREAMBLE, \ + .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \ + .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + (_idx)), \ +} + +#define OFDM_RATE(_idx, _rate) { \ + .bitrate = _rate, \ + .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \ + .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \ +} + +static struct ieee80211_rate mt7915_rates[] = { + CCK_RATE(0, 10), + CCK_RATE(1, 20), + CCK_RATE(2, 55), + CCK_RATE(3, 110), + OFDM_RATE(11, 60), + OFDM_RATE(15, 90), + OFDM_RATE(10, 120), + OFDM_RATE(14, 180), + OFDM_RATE(9, 240), + OFDM_RATE(13, 360), + OFDM_RATE(8, 480), + OFDM_RATE(12, 540), +}; + +static const struct ieee80211_iface_limit if_limits[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_ADHOC) + }, { + .max = MT7915_MAX_INTERFACES, + .types = BIT(NL80211_IFTYPE_AP) | +#ifdef CONFIG_MAC80211_MESH + BIT(NL80211_IFTYPE_MESH_POINT) | +#endif + BIT(NL80211_IFTYPE_STATION) + } +}; + +static const struct ieee80211_iface_combination if_comb[] = { + { + .limits = if_limits, + .n_limits = ARRAY_SIZE(if_limits), + .max_interfaces = 4, + .num_different_channels = 1, + .beacon_int_infra_match = true, + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80) | + BIT(NL80211_CHAN_WIDTH_160) | + BIT(NL80211_CHAN_WIDTH_80P80), + } +}; + +static void +mt7915_regd_notifier(struct wiphy *wiphy, + struct regulatory_request *request) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct mt7915_dev *dev = mt7915_hw_dev(hw); + struct mt76_phy *mphy = hw->priv; + struct mt7915_phy *phy = mphy->priv; + struct cfg80211_chan_def *chandef = &mphy->chandef; + + dev->mt76.region = request->dfs_region; + + if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR)) + return; + + mt7915_dfs_init_radar_detector(phy); +} + +static void +mt7915_init_wiphy(struct ieee80211_hw *hw) +{ + struct mt7915_phy *phy = mt7915_hw_phy(hw); + struct wiphy *wiphy = hw->wiphy; + + hw->queues = 4; + hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; + hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; + + phy->slottime = 9; + + hw->sta_data_size = sizeof(struct mt7915_sta); + hw->vif_data_size = sizeof(struct mt7915_vif); + + wiphy->iface_combinations = if_comb; + wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); + wiphy->reg_notifier = mt7915_regd_notifier; + wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; + + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS); + + ieee80211_hw_set(hw, HAS_RATE_CONTROL); + + hw->max_tx_fragments = 4; +} + +void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy) +{ + int nss = hweight8(phy->chainmask); + u32 *cap = &phy->mt76->sband_5g.sband.vht_cap.cap; + + *cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | + (3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT); + + *cap &= ~(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK | + IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE); + + if (nss < 2) + return; + + *cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE | + FIELD_PREP(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK, + nss - 1); +} + +static void +mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap, + int vif, int nss) +{ + struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem; + struct ieee80211_he_mcs_nss_supp *mcs = &he_cap->he_mcs_nss_supp; + u8 c; + +#ifdef CONFIG_MAC80211_MESH + if (vif == NL80211_IFTYPE_MESH_POINT) + return; +#endif + + elem->phy_cap_info[3] &= ~IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER; + elem->phy_cap_info[4] &= ~IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER; + + c = IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK | + IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK; + elem->phy_cap_info[5] &= ~c; + + c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB | + IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB; + elem->phy_cap_info[6] &= ~c; + + elem->phy_cap_info[7] &= ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK; + + c = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | + IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO; + elem->phy_cap_info[2] |= c; + + c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE | + IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 | + IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4; + elem->phy_cap_info[4] |= c; + + /* do not support NG16 due to spec D4.0 changes subcarrier idx */ + c = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU | + IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU; + + if (vif == NL80211_IFTYPE_STATION) + c |= IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO; + + elem->phy_cap_info[6] |= c; + + if (nss < 2) + return; + + if (vif != NL80211_IFTYPE_AP) + return; + + elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER; + elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER; + + /* num_snd_dim */ + c = (nss - 1) | (max_t(int, mcs->tx_mcs_160, 1) << 3); + elem->phy_cap_info[5] |= c; + + c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB | + IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB; + elem->phy_cap_info[6] |= c; + + /* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */ + elem->phy_cap_info[7] |= min_t(int, nss - 1, 2) << 3; +} + +static void +mt7915_gen_ppe_thresh(u8 *he_ppet) +{ + int ru, nss, max_nss = 1, max_ru = 3; + u8 bit = 7, ru_bit_mask = 0x7; + u8 ppet16_ppet8_ru3_ru0[] = {0x1c, 0xc7, 0x71}; + + he_ppet[0] = max_nss & IEEE80211_PPE_THRES_NSS_MASK; + he_ppet[0] |= (ru_bit_mask << + IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS) & + IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK; + + for (nss = 0; nss <= max_nss; nss++) { + for (ru = 0; ru < max_ru; ru++) { + u8 val; + int i; + + if (!(ru_bit_mask & BIT(ru))) + continue; + + val = (ppet16_ppet8_ru3_ru0[nss] >> (ru * 6)) & + 0x3f; + val = ((val >> 3) & 0x7) | ((val & 0x7) << 3); + for (i = 5; i >= 0; i--) { + he_ppet[bit / 8] |= + ((val >> i) & 0x1) << ((bit % 8)); + bit++; + } + } + } +} + +static int +mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band, + struct ieee80211_sband_iftype_data *data) +{ + int i, idx = 0; + int nss = hweight8(phy->chainmask); + u16 mcs_map = 0; + + for (i = 0; i < 8; i++) { + if (i < nss) + mcs_map |= (IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2)); + else + mcs_map |= (IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2)); + } + + for (i = 0; i < NUM_NL80211_IFTYPES; i++) { + struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap; + struct ieee80211_he_cap_elem *he_cap_elem = + &he_cap->he_cap_elem; + struct ieee80211_he_mcs_nss_supp *he_mcs = + &he_cap->he_mcs_nss_supp; + + switch (i) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_AP: +#ifdef CONFIG_MAC80211_MESH + case NL80211_IFTYPE_MESH_POINT: +#endif + break; + default: + continue; + } + + data[idx].types_mask = BIT(i); + he_cap->has_he = true; + + he_cap_elem->mac_cap_info[0] = + IEEE80211_HE_MAC_CAP0_HTC_HE; + he_cap_elem->mac_cap_info[1] = + IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_0US | + IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_1; + he_cap_elem->mac_cap_info[2] = + IEEE80211_HE_MAC_CAP2_BSR; + he_cap_elem->mac_cap_info[3] = + IEEE80211_HE_MAC_CAP3_OMI_CONTROL | + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED; + he_cap_elem->mac_cap_info[4] = + IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU; + + if (band == NL80211_BAND_2GHZ) + he_cap_elem->phy_cap_info[0] = + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; + else if (band == NL80211_BAND_5GHZ) + he_cap_elem->phy_cap_info[0] = + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G; + + he_cap_elem->phy_cap_info[1] = + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD; + he_cap_elem->phy_cap_info[2] = + IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ; + + /* TODO: OFDMA */ + + switch (i) { + case NL80211_IFTYPE_AP: + he_cap_elem->mac_cap_info[0] |= + IEEE80211_HE_MAC_CAP0_TWT_RES; + he_cap_elem->mac_cap_info[4] |= + IEEE80211_HE_MAC_CAP4_BQR; + he_cap_elem->phy_cap_info[3] |= + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK | + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK; + he_cap_elem->phy_cap_info[6] |= + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT; + he_cap_elem->phy_cap_info[9] |= + IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU; + break; + case NL80211_IFTYPE_STATION: + he_cap_elem->mac_cap_info[0] |= + IEEE80211_HE_MAC_CAP0_TWT_REQ; + he_cap_elem->mac_cap_info[3] |= + IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED; + + if (band == NL80211_BAND_2GHZ) + he_cap_elem->phy_cap_info[0] |= + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G; + else if (band == NL80211_BAND_5GHZ) + he_cap_elem->phy_cap_info[0] |= + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G; + + he_cap_elem->phy_cap_info[1] |= + IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A; + he_cap_elem->phy_cap_info[8] |= + IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | + IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | + IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU; + he_cap_elem->phy_cap_info[9] |= + IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU; + break; +#ifdef CONFIG_MAC80211_MESH + case NL80211_IFTYPE_MESH_POINT: + break; +#endif + } + + he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map); + he_mcs->tx_mcs_80 = cpu_to_le16(mcs_map); + he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map); + he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map); + he_mcs->rx_mcs_80p80 = cpu_to_le16(mcs_map); + he_mcs->tx_mcs_80p80 = cpu_to_le16(mcs_map); + + mt7915_set_stream_he_txbf_caps(he_cap, i, nss); + + memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres)); + if (he_cap_elem->phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { + mt7915_gen_ppe_thresh(he_cap->ppe_thres); + } else { + he_cap_elem->phy_cap_info[9] |= + IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US; + } + idx++; + } + + return idx; +} + +void mt7915_set_stream_he_caps(struct mt7915_phy *phy) +{ + struct ieee80211_sband_iftype_data *data; + struct ieee80211_supported_band *band; + struct mt76_dev *mdev = &phy->dev->mt76; + int n; + + if (mdev->cap.has_2ghz) { + data = phy->iftype[NL80211_BAND_2GHZ]; + n = mt7915_init_he_caps(phy, NL80211_BAND_2GHZ, data); + + band = &phy->mt76->sband_2g.sband; + band->iftype_data = data; + band->n_iftype_data = n; + } + + if (mdev->cap.has_5ghz) { + data = phy->iftype[NL80211_BAND_5GHZ]; + n = mt7915_init_he_caps(phy, NL80211_BAND_5GHZ, data); + + band = &phy->mt76->sband_5g.sband; + band->iftype_data = data; + band->n_iftype_data = n; + } +} + +static void +mt7915_cap_dbdc_enable(struct mt7915_dev *dev) +{ + dev->mphy.sband_5g.sband.vht_cap.cap &= + ~(IEEE80211_VHT_CAP_SHORT_GI_160 | + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ); + + if (dev->chainmask == 0xf) + dev->mphy.antenna_mask = dev->chainmask >> 2; + else + dev->mphy.antenna_mask = dev->chainmask >> 1; + + dev->phy.chainmask = dev->mphy.antenna_mask; + dev->mphy.hw->wiphy->available_antennas_rx = dev->phy.chainmask; + dev->mphy.hw->wiphy->available_antennas_tx = dev->phy.chainmask; + + mt76_set_stream_caps(&dev->mphy, true); + mt7915_set_stream_vht_txbf_caps(&dev->phy); + mt7915_set_stream_he_caps(&dev->phy); +} + +static void +mt7915_cap_dbdc_disable(struct mt7915_dev *dev) +{ + dev->mphy.sband_5g.sband.vht_cap.cap |= + IEEE80211_VHT_CAP_SHORT_GI_160 | + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ; + + dev->mphy.antenna_mask = dev->chainmask; + dev->phy.chainmask = dev->chainmask; + dev->mphy.hw->wiphy->available_antennas_rx = dev->chainmask; + dev->mphy.hw->wiphy->available_antennas_tx = dev->chainmask; + + mt76_set_stream_caps(&dev->mphy, true); + mt7915_set_stream_vht_txbf_caps(&dev->phy); + mt7915_set_stream_he_caps(&dev->phy); +} + +int mt7915_register_ext_phy(struct mt7915_dev *dev) +{ + struct mt7915_phy *phy = mt7915_ext_phy(dev); + struct mt76_phy *mphy; + int ret; + bool bound; + + /* TODO: enble DBDC */ + bound = mt7915_l1_rr(dev, MT_HW_BOUND) & BIT(5); + if (!bound) + return -EINVAL; + + if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) + return -EINVAL; + + if (phy) + return 0; + + mt7915_cap_dbdc_enable(dev); + mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7915_ops); + if (!mphy) + return -ENOMEM; + + phy = mphy->priv; + phy->dev = dev; + phy->mt76 = mphy; + phy->chainmask = dev->chainmask & ~dev->phy.chainmask; + mphy->antenna_mask = BIT(hweight8(phy->chainmask)) - 1; + mt7915_init_wiphy(mphy->hw); + + INIT_DELAYED_WORK(&phy->mac_work, mt7915_mac_work); + + /* + * Make the secondary PHY MAC address local without overlapping with + * the usual MAC address allocation scheme on multiple virtual interfaces + */ + mphy->hw->wiphy->perm_addr[0] |= 2; + mphy->hw->wiphy->perm_addr[0] ^= BIT(7); + + /* The second interface does not get any packets unless it has a vif */ + ieee80211_hw_set(mphy->hw, WANT_MONITOR_VIF); + + ret = mt76_register_phy(mphy); + if (ret) + ieee80211_free_hw(mphy->hw); + + return ret; +} + +void mt7915_unregister_ext_phy(struct mt7915_dev *dev) +{ + struct mt7915_phy *phy = mt7915_ext_phy(dev); + struct mt76_phy *mphy = dev->mt76.phy2; + + if (!phy) + return; + + mt7915_cap_dbdc_disable(dev); + mt76_unregister_phy(mphy); + ieee80211_free_hw(mphy->hw); +} + +int mt7915_register_device(struct mt7915_dev *dev) +{ + struct ieee80211_hw *hw = mt76_hw(dev); + int ret; + + dev->phy.dev = dev; + dev->phy.mt76 = &dev->mt76.phy; + dev->mt76.phy.priv = &dev->phy; + INIT_DELAYED_WORK(&dev->phy.mac_work, mt7915_mac_work); + INIT_LIST_HEAD(&dev->sta_poll_list); + spin_lock_init(&dev->sta_poll_lock); + + init_waitqueue_head(&dev->reset_wait); + INIT_WORK(&dev->reset_work, mt7915_mac_reset_work); + + ret = mt7915_init_hardware(dev); + if (ret) + return ret; + + mt7915_init_wiphy(hw); + dev->mphy.sband_2g.sband.ht_cap.cap |= + IEEE80211_HT_CAP_LDPC_CODING | + IEEE80211_HT_CAP_MAX_AMSDU; + dev->mphy.sband_5g.sband.ht_cap.cap |= + IEEE80211_HT_CAP_LDPC_CODING | + IEEE80211_HT_CAP_MAX_AMSDU; + dev->mphy.sband_5g.sband.vht_cap.cap |= + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 | + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; + mt7915_cap_dbdc_disable(dev); + dev->phy.dfs_state = -1; + + ret = mt76_register_device(&dev->mt76, true, mt7915_rates, + ARRAY_SIZE(mt7915_rates)); + if (ret) + return ret; + + ieee80211_queue_work(mt76_hw(dev), &dev->init_work); + + return mt7915_init_debugfs(dev); +} + +void mt7915_unregister_device(struct mt7915_dev *dev) +{ + struct mt76_txwi_cache *txwi; + int id; + + mt7915_unregister_ext_phy(dev); + mt76_unregister_device(&dev->mt76); + mt7915_mcu_exit(dev); + mt7915_dma_cleanup(dev); + + spin_lock_bh(&dev->token_lock); + idr_for_each_entry(&dev->token, txwi, id) { + mt7915_txp_skb_unmap(&dev->mt76, txwi); + if (txwi->skb) + dev_kfree_skb_any(txwi->skb); + mt76_put_txwi(&dev->mt76, txwi); + } + spin_unlock_bh(&dev->token_lock); + idr_destroy(&dev->token); + + mt76_free_device(&dev->mt76); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c new file mode 100644 index 000000000000..a264e304a3df --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -0,0 +1,1477 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 MediaTek Inc. */ + +#include <linux/etherdevice.h> +#include <linux/timekeeping.h> +#include "mt7915.h" +#include "../dma.h" +#include "mac.h" + +#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2) + +#define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f) +#define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\ + IEEE80211_RADIOTAP_HE_##f) + +static const struct mt7915_dfs_radar_spec etsi_radar_specs = { + .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, + .radar_pattern = { + [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 }, + [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 }, + [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 }, + [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 }, + [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 }, + [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 }, + [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 }, + [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 }, + }, +}; + +static const struct mt7915_dfs_radar_spec fcc_radar_specs = { + .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, + .radar_pattern = { + [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, + [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, + [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, + [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, + [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, + }, +}; + +static const struct mt7915_dfs_radar_spec jp_radar_specs = { + .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, + .radar_pattern = { + [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, + [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, + [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, + [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, + [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, + [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 }, + [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 }, + [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 }, + }, +}; + +static struct mt76_wcid *mt7915_rx_get_wcid(struct mt7915_dev *dev, + u16 idx, bool unicast) +{ + struct mt7915_sta *sta; + struct mt76_wcid *wcid; + + if (idx >= ARRAY_SIZE(dev->mt76.wcid)) + return NULL; + + wcid = rcu_dereference(dev->mt76.wcid[idx]); + if (unicast || !wcid) + return wcid; + + if (!wcid->sta) + return NULL; + + sta = container_of(wcid, struct mt7915_sta, wcid); + if (!sta->vif) + return NULL; + + return &sta->vif->sta.wcid; +} + +void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) +{ +} + +bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask) +{ + mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, + FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); + + return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, + 0, 5000); +} + +static u32 mt7915_mac_wtbl_lmac_read(struct mt7915_dev *dev, u16 wcid, + u16 addr) +{ + mt76_wr(dev, MT_WTBLON_TOP_WDUCR, + FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7))); + + return mt76_rr(dev, MT_WTBL_LMAC_OFFS(wcid, addr)); +} + +/* TODO: use txfree airtime info to avoid runtime accessing in the long run */ +void mt7915_mac_sta_poll(struct mt7915_dev *dev) +{ + static const u8 ac_to_tid[] = { + [IEEE80211_AC_BE] = 0, + [IEEE80211_AC_BK] = 1, + [IEEE80211_AC_VI] = 4, + [IEEE80211_AC_VO] = 6 + }; + static const u8 hw_queue_map[] = { + [IEEE80211_AC_BK] = 0, + [IEEE80211_AC_BE] = 1, + [IEEE80211_AC_VI] = 2, + [IEEE80211_AC_VO] = 3, + }; + struct ieee80211_sta *sta; + struct mt7915_sta *msta; + u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; + int i; + + rcu_read_lock(); + + while (true) { + bool clear = false; + u16 idx; + + spin_lock_bh(&dev->sta_poll_lock); + if (list_empty(&dev->sta_poll_list)) { + spin_unlock_bh(&dev->sta_poll_lock); + break; + } + msta = list_first_entry(&dev->sta_poll_list, + struct mt7915_sta, poll_list); + list_del_init(&msta->poll_list); + spin_unlock_bh(&dev->sta_poll_lock); + + for (i = 0, idx = msta->wcid.idx; i < IEEE80211_NUM_ACS; i++) { + u32 tx_last = msta->airtime_ac[i]; + u32 rx_last = msta->airtime_ac[i + IEEE80211_NUM_ACS]; + + msta->airtime_ac[i] = + mt7915_mac_wtbl_lmac_read(dev, idx, 20 + i); + msta->airtime_ac[i + IEEE80211_NUM_ACS] = + mt7915_mac_wtbl_lmac_read(dev, idx, 21 + i); + tx_time[i] = msta->airtime_ac[i] - tx_last; + rx_time[i] = msta->airtime_ac[i + IEEE80211_NUM_ACS] - + rx_last; + + if ((tx_last | rx_last) & BIT(30)) + clear = true; + } + + if (clear) { + mt7915_mac_wtbl_update(dev, idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); + } + + if (!msta->wcid.sta) + continue; + + sta = container_of((void *)msta, struct ieee80211_sta, + drv_priv); + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + u32 tx_cur = tx_time[i]; + u32 rx_cur = rx_time[hw_queue_map[i]]; + u8 tid = ac_to_tid[i]; + + if (!tx_cur && !rx_cur) + continue; + + ieee80211_sta_register_airtime(sta, tid, tx_cur, + rx_cur); + } + } + + rcu_read_unlock(); +} + +static void +mt7915_mac_decode_he_radiotap_ru(struct mt76_rx_status *status, + struct mt7915_rxv *rxv, + struct ieee80211_radiotap_he *he) +{ + u32 ru_h, ru_l; + u8 ru, offs = 0; + + ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv->v[0])); + ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv->v[1])); + ru = (u8)(ru_l | ru_h << 4); + + status->bw = RATE_INFO_BW_HE_RU; + + switch (ru) { + case 0 ... 36: + status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26; + offs = ru; + break; + case 37 ... 52: + status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52; + offs = ru - 37; + break; + case 53 ... 60: + status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; + offs = ru - 53; + break; + case 61 ... 64: + status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242; + offs = ru - 61; + break; + case 65 ... 66: + status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484; + offs = ru - 65; + break; + case 67: + status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996; + break; + case 68: + status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; + break; + } + + he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN); + he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) | + le16_encode_bits(offs, + IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET); +} + +static void +mt7915_mac_decode_he_radiotap(struct sk_buff *skb, + struct mt76_rx_status *status, + struct mt7915_rxv *rxv) +{ + /* TODO: struct ieee80211_radiotap_he_mu */ + static const struct ieee80211_radiotap_he known = { + .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) | + HE_BITS(DATA1_DATA_DCM_KNOWN) | + HE_BITS(DATA1_STBC_KNOWN) | + HE_BITS(DATA1_CODING_KNOWN) | + HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) | + HE_BITS(DATA1_DOPPLER_KNOWN) | + HE_BITS(DATA1_BSS_COLOR_KNOWN), + .data2 = HE_BITS(DATA2_GI_KNOWN) | + HE_BITS(DATA2_TXBF_KNOWN) | + HE_BITS(DATA2_PE_DISAMBIG_KNOWN) | + HE_BITS(DATA2_TXOP_KNOWN), + }; + struct ieee80211_radiotap_he *he = NULL; + __le32 v2 = rxv->v[2]; + __le32 v11 = rxv->v[11]; + __le32 v14 = rxv->v[14]; + u32 ltf_size = le32_get_bits(v2, MT_CRXV_HE_LTF_SIZE) + 1; + + he = skb_push(skb, sizeof(known)); + memcpy(he, &known, sizeof(known)); + + he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, v14) | + HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, v2); + he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, v2) | + le16_encode_bits(ltf_size, + IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE); + he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, v14) | + HE_PREP(DATA6_DOPPLER, DOPPLER, v14); + + switch (rxv->phy) { + case MT_PHY_TYPE_HE_SU: + he->data1 |= HE_BITS(DATA1_FORMAT_SU) | + HE_BITS(DATA1_UL_DL_KNOWN) | + HE_BITS(DATA1_BEAM_CHANGE_KNOWN) | + HE_BITS(DATA1_SPTL_REUSE_KNOWN); + + he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, v14) | + HE_PREP(DATA3_UL_DL, UPLINK, v2); + he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, v11); + break; + case MT_PHY_TYPE_HE_EXT_SU: + he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) | + HE_BITS(DATA1_UL_DL_KNOWN); + + he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, v2); + break; + case MT_PHY_TYPE_HE_MU: + he->data1 |= HE_BITS(DATA1_FORMAT_MU) | + HE_BITS(DATA1_UL_DL_KNOWN) | + HE_BITS(DATA1_SPTL_REUSE_KNOWN); + + he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, v2); + he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, v11); + + mt7915_mac_decode_he_radiotap_ru(status, rxv, he); + break; + case MT_PHY_TYPE_HE_TB: + he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) | + HE_BITS(DATA1_SPTL_REUSE_KNOWN) | + HE_BITS(DATA1_SPTL_REUSE2_KNOWN) | + HE_BITS(DATA1_SPTL_REUSE3_KNOWN) | + HE_BITS(DATA1_SPTL_REUSE4_KNOWN); + + he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, v11) | + HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, v11) | + HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, v11) | + HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, v11); + + mt7915_mac_decode_he_radiotap_ru(status, rxv, he); + break; + default: + break; + } +} + +int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) +{ + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt7915_phy *phy = &dev->phy; + struct ieee80211_supported_band *sband; + struct ieee80211_hdr *hdr; + struct mt7915_rxv rxv = {}; + __le32 *rxd = (__le32 *)skb->data; + u32 rxd1 = le32_to_cpu(rxd[1]); + u32 rxd2 = le32_to_cpu(rxd[2]); + u32 rxd3 = le32_to_cpu(rxd[3]); + bool unicast, insert_ccmp_hdr = false; + u8 remove_pad; + int i, idx; + + memset(status, 0, sizeof(*status)); + + if (rxd1 & MT_RXD1_NORMAL_BAND_IDX) { + mphy = dev->mt76.phy2; + if (!mphy) + return -EINVAL; + + phy = mphy->priv; + status->ext_phy = true; + } + + if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) + return -EINVAL; + + unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; + idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); + status->wcid = mt7915_rx_get_wcid(dev, idx, unicast); + + if (status->wcid) { + struct mt7915_sta *msta; + + msta = container_of(status->wcid, struct mt7915_sta, wcid); + spin_lock_bh(&dev->sta_poll_lock); + if (list_empty(&msta->poll_list)) + list_add_tail(&msta->poll_list, &dev->sta_poll_list); + spin_unlock_bh(&dev->sta_poll_lock); + } + + status->freq = mphy->chandef.chan->center_freq; + status->band = mphy->chandef.chan->band; + if (status->band == NL80211_BAND_5GHZ) + sband = &mphy->sband_5g.sband; + else + sband = &mphy->sband_2g.sband; + + if (!sband->channels) + return -EINVAL; + + if (rxd1 & MT_RXD1_NORMAL_FCS_ERR) + status->flag |= RX_FLAG_FAILED_FCS_CRC; + + if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR) + status->flag |= RX_FLAG_MMIC_ERROR; + + if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 && + !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) { + status->flag |= RX_FLAG_DECRYPTED; + status->flag |= RX_FLAG_IV_STRIPPED; + status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; + } + + if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { + status->flag |= RX_FLAG_AMPDU_DETAILS; + + /* all subframes of an A-MPDU have the same timestamp */ + if (phy->rx_ampdu_ts != rxd[14]) { + if (!++phy->ampdu_ref) + phy->ampdu_ref++; + } + phy->rx_ampdu_ts = rxd[14]; + + status->ampdu_ref = phy->ampdu_ref; + } + + remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2); + + if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) + return -EINVAL; + + rxd += 6; + if (rxd1 & MT_RXD1_NORMAL_GROUP_4) { + rxd += 4; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; + } + + if (rxd1 & MT_RXD1_NORMAL_GROUP_1) { + u8 *data = (u8 *)rxd; + + if (status->flag & RX_FLAG_DECRYPTED) { + status->iv[0] = data[5]; + status->iv[1] = data[4]; + status->iv[2] = data[3]; + status->iv[3] = data[2]; + status->iv[4] = data[1]; + status->iv[5] = data[0]; + + insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); + } + rxd += 4; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; + } + + if (rxd1 & MT_RXD1_NORMAL_GROUP_2) { + rxd += 2; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; + } + + /* RXD Group 3 - P-RXV */ + if (rxd1 & MT_RXD1_NORMAL_GROUP_3) { + u32 v0, v1, v2; + + memcpy(rxv.v, rxd, sizeof(rxv.v)); + + rxd += 2; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; + + v0 = le32_to_cpu(rxv.v[0]); + v1 = le32_to_cpu(rxv.v[1]); + v2 = le32_to_cpu(rxv.v[2]); + + if (v0 & MT_PRXV_HT_AD_CODE) + status->enc_flags |= RX_ENC_FLAG_LDPC; + + status->chains = mphy->antenna_mask; + status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1); + status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1); + status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1); + status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1); + status->signal = status->chain_signal[0]; + + for (i = 1; i < hweight8(mphy->antenna_mask); i++) { + if (!(status->chains & BIT(i))) + continue; + + status->signal = max(status->signal, + status->chain_signal[i]); + } + + /* RXD Group 5 - C-RXV */ + if (rxd1 & MT_RXD1_NORMAL_GROUP_5) { + u8 stbc = FIELD_GET(MT_CRXV_HT_STBC, v2); + u8 gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2); + bool cck = false; + + rxd += 18; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; + + idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0); + rxv.phy = FIELD_GET(MT_CRXV_TX_MODE, v2); + + switch (rxv.phy) { + case MT_PHY_TYPE_CCK: + cck = true; + /* fall through */ + case MT_PHY_TYPE_OFDM: + i = mt76_get_rate(&dev->mt76, sband, i, cck); + break; + case MT_PHY_TYPE_HT_GF: + case MT_PHY_TYPE_HT: + status->encoding = RX_ENC_HT; + if (i > 31) + return -EINVAL; + break; + case MT_PHY_TYPE_VHT: + status->nss = + FIELD_GET(MT_PRXV_NSTS, v0) + 1; + status->encoding = RX_ENC_VHT; + if (i > 9) + return -EINVAL; + break; + case MT_PHY_TYPE_HE_MU: + status->flag |= RX_FLAG_RADIOTAP_HE_MU; + /* fall through */ + case MT_PHY_TYPE_HE_SU: + case MT_PHY_TYPE_HE_EXT_SU: + case MT_PHY_TYPE_HE_TB: + status->nss = + FIELD_GET(MT_PRXV_NSTS, v0) + 1; + status->encoding = RX_ENC_HE; + status->flag |= RX_FLAG_RADIOTAP_HE; + i &= GENMASK(3, 0); + + if (gi <= NL80211_RATE_INFO_HE_GI_3_2) + status->he_gi = gi; + + if (idx & MT_PRXV_TX_DCM) + status->he_dcm = true; + break; + default: + return -EINVAL; + } + status->rate_idx = i; + + switch (FIELD_GET(MT_CRXV_FRAME_MODE, v2)) { + case IEEE80211_STA_RX_BW_20: + break; + case IEEE80211_STA_RX_BW_40: + if (rxv.phy & MT_PHY_TYPE_HE_EXT_SU && + (idx & MT_PRXV_TX_ER_SU_106T)) { + status->bw = RATE_INFO_BW_HE_RU; + status->he_ru = + NL80211_RATE_INFO_HE_RU_ALLOC_106; + } else { + status->bw = RATE_INFO_BW_40; + } + break; + case IEEE80211_STA_RX_BW_80: + status->bw = RATE_INFO_BW_80; + break; + case IEEE80211_STA_RX_BW_160: + status->bw = RATE_INFO_BW_160; + break; + default: + return -EINVAL; + } + + status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; + if (rxv.phy < MT_PHY_TYPE_HE_SU && gi) + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + } + } + + skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad); + + if (insert_ccmp_hdr) { + u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); + + mt76_insert_ccmp_hdr(skb, key_id); + } + + if (status->flag & RX_FLAG_RADIOTAP_HE) + mt7915_mac_decode_he_radiotap(skb, status, &rxv); + + hdr = mt76_skb_get_hdr(skb); + if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control)) + return 0; + + status->aggr = unicast && + !ieee80211_is_qos_nullfunc(hdr->frame_control); + status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; + status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); + + return 0; +} + +void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_key_conf *key, bool beacon) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + bool multicast = is_multicast_ether_addr(hdr->addr1); + struct ieee80211_vif *vif = info->control.vif; + struct mt76_phy *mphy = &dev->mphy; + bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY; + u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; + __le16 fc = hdr->frame_control; + u16 tx_count = 4, seqno = 0; + u32 val; + + if (vif) { + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + + omac_idx = mvif->omac_idx; + wmm_idx = mvif->wmm_idx; + } + + if (ext_phy && dev->mt76.phy2) + mphy = dev->mt76.phy2; + + fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; + fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; + + if (ieee80211_is_data(fc) || ieee80211_is_bufferable_mmpdu(fc)) { + q_idx = wmm_idx * MT7915_MAX_WMM_SETS + + skb_get_queue_mapping(skb); + p_fmt = MT_TX_TYPE_CT; + } else if (beacon) { + q_idx = MT_LMAC_BCN0; + p_fmt = MT_TX_TYPE_FW; + } else { + q_idx = MT_LMAC_ALTX0; + p_fmt = MT_TX_TYPE_CT; + } + + val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) | + FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) | + FIELD_PREP(MT_TXD0_Q_IDX, q_idx); + txwi[0] = cpu_to_le32(val); + + val = MT_TXD1_LONG_FORMAT | + FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) | + FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | + FIELD_PREP(MT_TXD1_HDR_INFO, + ieee80211_get_hdrlen_from_skb(skb) / 2) | + FIELD_PREP(MT_TXD1_TID, + skb->priority & IEEE80211_QOS_CTL_TID_MASK) | + FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); + if (ext_phy && q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0) + val |= MT_TXD1_TGID; + + txwi[1] = cpu_to_le32(val); + + val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | + FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) | + FIELD_PREP(MT_TXD2_MULTICAST, multicast); + if (key) { + if (multicast && ieee80211_is_robust_mgmt_frame(skb) && + key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { + val |= MT_TXD2_BIP; + txwi[3] = 0; + } else { + txwi[3] = cpu_to_le32(MT_TXD3_PROTECT_FRAME); + } + } else { + txwi[3] = 0; + } + txwi[2] = cpu_to_le32(val); + + txwi[4] = 0; + txwi[5] = 0; + txwi[6] = 0; + + if (!ieee80211_is_data(fc) || multicast) { + u16 rate; + + /* hardware won't add HTC for mgmt/ctrl frame */ + txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE | MT_TXD2_HTC_VLD); + + if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) + rate = MT7915_5G_RATE_DEFAULT; + else + rate = MT7915_2G_RATE_DEFAULT; + + val = MT_TXD6_FIXED_BW | + FIELD_PREP(MT_TXD6_TX_RATE, rate); + txwi[6] |= cpu_to_le32(val); + txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); + } + + if (!ieee80211_is_beacon(fc)) + txwi[3] |= cpu_to_le32(MT_TXD3_SW_POWER_MGMT); + else + tx_count = 0x1f; + + if (info->flags & IEEE80211_TX_CTL_NO_ACK) + txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK); + + val = FIELD_PREP(MT_TXD7_TYPE, fc_type) | + FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype); + txwi[7] = cpu_to_le32(val); + + val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count); + if (ieee80211_is_data_qos(fc)) { + seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); + val |= MT_TXD3_SN_VALID; + } else if (ieee80211_is_back_req(fc)) { + struct ieee80211_bar *bar; + + bar = (struct ieee80211_bar *)skb->data; + seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num)); + val |= MT_TXD3_SN_VALID; + } + val |= FIELD_PREP(MT_TXD3_SEQ, seqno); + txwi[3] |= cpu_to_le32(val); +} + +int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + enum mt76_txq_id qid, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, + struct mt76_tx_info *tx_info) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data; + struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); + struct ieee80211_key_conf *key = info->control.hw_key; + struct ieee80211_vif *vif = info->control.vif; + struct mt76_tx_cb *cb = mt76_tx_skb_cb(tx_info->skb); + struct mt76_txwi_cache *t; + struct mt7915_txp *txp; + int id, i, nbuf = tx_info->nbuf - 1; + u8 *txwi = (u8 *)txwi_ptr; + + if (!wcid) + wcid = &dev->mt76.global_wcid; + + cb->wcid = wcid->idx; + + mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key, + false); + + txp = (struct mt7915_txp *)(txwi + MT_TXD_SIZE); + for (i = 0; i < nbuf; i++) { + txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr); + txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len); + } + txp->nbuf = nbuf; + + /* pass partial skb header to fw */ + tx_info->buf[1].len = MT_CT_PARSE_LEN; + tx_info->nbuf = MT_CT_DMA_BUF_NUM; + + txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD); + + if (!key) + txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME); + + if (ieee80211_is_mgmt(hdr->frame_control)) + txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME); + + if (vif) { + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + + txp->bss_idx = mvif->idx; + } + + t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); + t->skb = tx_info->skb; + + spin_lock_bh(&dev->token_lock); + id = idr_alloc(&dev->token, t, 0, MT7915_TOKEN_SIZE, GFP_ATOMIC); + spin_unlock_bh(&dev->token_lock); + if (id < 0) + return id; + + txp->token = cpu_to_le16(id); + txp->rept_wds_wcid = 0xff; + tx_info->skb = DMA_DUMMY_DATA; + + return 0; +} + +static inline bool +mt7915_tx_check_aggr_tid(struct mt7915_sta *msta, u8 tid) +{ + bool ret = false; + + spin_lock_bh(&msta->ampdu_lock); + if (msta->ampdu_state[tid] == MT7915_AGGR_STOP) + ret = true; + spin_unlock_bh(&msta->ampdu_lock); + + return ret; +} + +static void +mt7915_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct mt7915_sta *msta; + u16 tid; + + if (!sta->ht_cap.ht_supported) + return; + + if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO) + return; + + if (unlikely(!ieee80211_is_data_qos(hdr->frame_control))) + return; + + if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) + return; + + msta = (struct mt7915_sta *)sta->drv_priv; + tid = ieee80211_get_tid(hdr); + + if (mt7915_tx_check_aggr_tid(msta, tid)) { + ieee80211_start_tx_ba_session(sta, tid, 0); + mt7915_set_aggr_state(msta, tid, MT7915_AGGR_PROGRESS); + } +} + +static inline void +mt7915_tx_status(struct ieee80211_sta *sta, struct ieee80211_hw *hw, + struct ieee80211_tx_info *info, struct sk_buff *skb) +{ + struct ieee80211_tx_status status = { + .sta = sta, + .info = info, + }; + + if (skb) + status.skb = skb; + + if (sta) { + struct mt7915_sta *msta; + + msta = (struct mt7915_sta *)sta->drv_priv; + status.rate = &msta->stats.tx_rate; + } + + /* use status_ext to report HE rate */ + ieee80211_tx_status_ext(hw, &status); +} + +static void +mt7915_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb, + struct ieee80211_sta *sta, u8 stat) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hw *hw; + + hw = mt76_tx_status_get_hw(mdev, skb); + + if (info->flags & IEEE80211_TX_CTL_AMPDU) + info->flags |= IEEE80211_TX_STAT_AMPDU; + else if (sta) + mt7915_tx_check_aggr(sta, skb); + + if (stat) + ieee80211_tx_info_clear_status(info); + + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) + info->flags |= IEEE80211_TX_STAT_ACK; + + info->status.tx_time = 0; + + if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) { + mt7915_tx_status(sta, hw, info, skb); + return; + } + + if (sta || !(info->flags & IEEE80211_TX_CTL_NO_ACK)) + mt7915_tx_status(sta, hw, info, NULL); + + dev_kfree_skb(skb); +} + +void mt7915_txp_skb_unmap(struct mt76_dev *dev, + struct mt76_txwi_cache *t) +{ + struct mt7915_txp *txp; + int i; + + txp = mt7915_txwi_to_txp(dev, t); + for (i = 1; i < txp->nbuf; i++) + dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]), + le16_to_cpu(txp->len[i]), DMA_TO_DEVICE); +} + +void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb) +{ + struct mt7915_tx_free *free = (struct mt7915_tx_free *)skb->data; + struct mt76_dev *mdev = &dev->mt76; + struct mt76_txwi_cache *txwi; + struct ieee80211_sta *sta = NULL; + u8 i, count; + + /* + * TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE, + * to the time ack is received or dropped by hw (air + hw queue time). + * Should avoid accessing WTBL to get Tx airtime, and use it instead. + */ + count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl)); + for (i = 0; i < count; i++) { + u32 msdu, info = le32_to_cpu(free->info[i]); + u8 stat; + + /* + * 1'b1: new wcid pair. + * 1'b0: msdu_id with the same 'wcid pair' as above. + */ + if (info & MT_TX_FREE_PAIR) { + struct mt7915_sta *msta; + struct mt76_wcid *wcid; + u16 idx; + + count++; + idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info); + wcid = rcu_dereference(dev->mt76.wcid[idx]); + sta = wcid_to_sta(wcid); + if (!sta) + continue; + + msta = container_of(wcid, struct mt7915_sta, wcid); + ieee80211_queue_work(mt76_hw(dev), &msta->stats_work); + continue; + } + + msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info); + stat = FIELD_GET(MT_TX_FREE_STATUS, info); + + spin_lock_bh(&dev->token_lock); + txwi = idr_remove(&dev->token, msdu); + spin_unlock_bh(&dev->token_lock); + + if (!txwi) + continue; + + mt7915_txp_skb_unmap(mdev, txwi); + if (txwi->skb) { + mt7915_tx_complete_status(mdev, txwi->skb, sta, stat); + txwi->skb = NULL; + } + + mt76_put_txwi(mdev, txwi); + } + dev_kfree_skb(skb); +} + +void mt7915_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, + struct mt76_queue_entry *e) +{ + struct mt7915_dev *dev; + + if (!e->txwi) { + dev_kfree_skb_any(e->skb); + return; + } + + dev = container_of(mdev, struct mt7915_dev, mt76); + + /* error path */ + if (e->skb == DMA_DUMMY_DATA) { + struct mt76_txwi_cache *t; + struct mt7915_txp *txp; + + txp = mt7915_txwi_to_txp(mdev, e->txwi); + + spin_lock_bh(&dev->token_lock); + t = idr_remove(&dev->token, le16_to_cpu(txp->token)); + spin_unlock_bh(&dev->token_lock); + e->skb = t ? t->skb : NULL; + } + + if (e->skb) { + struct mt76_tx_cb *cb = mt76_tx_skb_cb(e->skb); + struct mt76_wcid *wcid; + + wcid = rcu_dereference(dev->mt76.wcid[cb->wcid]); + + mt7915_tx_complete_status(mdev, e->skb, wcid_to_sta(wcid), 0); + } +} + +void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy) +{ + struct mt7915_dev *dev = phy->dev; + bool ext_phy = phy != &dev->phy; + u32 reg = MT_WF_PHY_RX_CTRL1(ext_phy); + + mt7915_l2_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN); + mt7915_l2_set(dev, reg, BIT(11) | BIT(9)); +} + +void mt7915_mac_reset_counters(struct mt7915_phy *phy) +{ + struct mt7915_dev *dev = phy->dev; + bool ext_phy = phy != &dev->phy; + int i; + + for (i = 0; i < 4; i++) { + mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i)); + mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i)); + } + + if (ext_phy) { + dev->mt76.phy2->survey_time = ktime_get_boottime(); + i = ARRAY_SIZE(dev->mt76.aggr_stats) / 2; + } else { + dev->mt76.phy.survey_time = ktime_get_boottime(); + i = 0; + } + memset(&dev->mt76.aggr_stats[i], 0, sizeof(dev->mt76.aggr_stats) / 2); + + /* reset airtime counters */ + mt76_rr(dev, MT_MIB_SDR9(ext_phy)); + mt76_rr(dev, MT_MIB_SDR36(ext_phy)); + mt76_rr(dev, MT_MIB_SDR37(ext_phy)); + + mt76_set(dev, MT_WF_RMAC_MIB_TIME0(ext_phy), + MT_WF_RMAC_MIB_RXTIME_CLR); + mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(ext_phy), + MT_WF_RMAC_MIB_RXTIME_CLR); +} + +void mt7915_mac_set_timing(struct mt7915_phy *phy) +{ + s16 coverage_class = phy->coverage_class; + struct mt7915_dev *dev = phy->dev; + bool ext_phy = phy != &dev->phy; + u32 val, reg_offset; + u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | + FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); + u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | + FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); + int sifs, offset; + bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ; + + if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) + return; + + if (is_5ghz) + sifs = 16; + else + sifs = 10; + + if (ext_phy) { + coverage_class = max_t(s16, dev->phy.coverage_class, + coverage_class); + } else { + struct mt7915_phy *phy_ext = mt7915_ext_phy(dev); + + if (phy_ext) + coverage_class = max_t(s16, phy_ext->coverage_class, + coverage_class); + } + mt76_set(dev, MT_ARB_SCR(ext_phy), + MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); + udelay(1); + + offset = 3 * coverage_class; + reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | + FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); + + mt76_wr(dev, MT_TMAC_CDTR(ext_phy), cck + reg_offset); + mt76_wr(dev, MT_TMAC_ODTR(ext_phy), ofdm + reg_offset); + mt76_wr(dev, MT_TMAC_ICR0(ext_phy), + FIELD_PREP(MT_IFS_EIFS, 360) | + FIELD_PREP(MT_IFS_RIFS, 2) | + FIELD_PREP(MT_IFS_SIFS, sifs) | + FIELD_PREP(MT_IFS_SLOT, phy->slottime)); + + if (phy->slottime < 20 || is_5ghz) + val = MT7915_CFEND_RATE_DEFAULT; + else + val = MT7915_CFEND_RATE_11B; + + mt76_rmw_field(dev, MT_AGG_ACR0(ext_phy), MT_AGG_ACR_CFEND_RATE, val); + mt76_clear(dev, MT_ARB_SCR(ext_phy), + MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); +} + +/* + * TODO: mib counters are read-clear and there're many HE functionalities need + * such info, hence firmware prepares a task to read the fields out to a shared + * structure. User should switch to use event format to avoid race condition. + */ +static void +mt7915_phy_update_channel(struct mt76_phy *mphy, int idx) +{ + struct mt7915_dev *dev = container_of(mphy->dev, struct mt7915_dev, mt76); + struct mt76_channel_state *state; + u64 busy_time, tx_time, rx_time, obss_time; + + busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx), + MT_MIB_SDR9_BUSY_MASK); + tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx), + MT_MIB_SDR36_TXTIME_MASK); + rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx), + MT_MIB_SDR37_RXTIME_MASK); + obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_AIRTIME14(idx), + MT_MIB_OBSSTIME_MASK); + + /* TODO: state->noise */ + state = mphy->chan_state; + state->cc_busy += busy_time; + state->cc_tx += tx_time; + state->cc_rx += rx_time + obss_time; + state->cc_bss_rx += rx_time; +} + +void mt7915_update_channel(struct mt76_dev *mdev) +{ + struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); + + mt7915_phy_update_channel(&mdev->phy, 0); + if (mdev->phy2) + mt7915_phy_update_channel(mdev->phy2, 1); + + /* reset obss airtime */ + mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR); + if (mdev->phy2) + mt76_set(dev, MT_WF_RMAC_MIB_TIME0(1), + MT_WF_RMAC_MIB_RXTIME_CLR); +} + +static bool +mt7915_wait_reset_state(struct mt7915_dev *dev, u32 state) +{ + bool ret; + + ret = wait_event_timeout(dev->reset_wait, + (READ_ONCE(dev->reset_state) & state), + MT7915_RESET_TIMEOUT); + + WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); + return ret; +} + +static void +mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct ieee80211_hw *hw = priv; + + mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon); +} + +static void +mt7915_update_beacons(struct mt7915_dev *dev) +{ + ieee80211_iterate_active_interfaces(dev->mt76.hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7915_update_vif_beacon, dev->mt76.hw); + + if (!dev->mt76.phy2) + return; + + ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7915_update_vif_beacon, dev->mt76.phy2->hw); +} + +static void +mt7915_dma_reset(struct mt7915_dev *dev) +{ + int i; + + mt76_clear(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); + mt76_clear(dev, MT_WFDMA1_GLO_CFG, + MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN); + usleep_range(1000, 2000); + + for (i = 0; i < __MT_TXQ_MAX; i++) + mt76_queue_tx_cleanup(dev, i, true); + + mt76_for_each_q_rx(&dev->mt76, i) { + mt76_queue_rx_reset(dev, i); + } + + /* re-init prefetch settings after reset */ + mt7915_dma_prefetch(dev); + + mt76_set(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); + mt76_set(dev, MT_WFDMA1_GLO_CFG, + MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN); +} + +/* system error recovery */ +void mt7915_mac_reset_work(struct work_struct *work) +{ + struct mt7915_phy *phy2; + struct mt76_phy *ext_phy; + struct mt7915_dev *dev; + + dev = container_of(work, struct mt7915_dev, reset_work); + ext_phy = dev->mt76.phy2; + phy2 = ext_phy ? ext_phy->priv : NULL; + + if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA)) + return; + + ieee80211_stop_queues(mt76_hw(dev)); + if (ext_phy) + ieee80211_stop_queues(ext_phy->hw); + + set_bit(MT76_RESET, &dev->mphy.state); + set_bit(MT76_MCU_RESET, &dev->mphy.state); + wake_up(&dev->mt76.mcu.wait); + cancel_delayed_work_sync(&dev->phy.mac_work); + if (phy2) + cancel_delayed_work_sync(&phy2->mac_work); + + /* lock/unlock all queues to ensure that no tx is pending */ + mt76_txq_schedule_all(&dev->mphy); + if (ext_phy) + mt76_txq_schedule_all(ext_phy); + + tasklet_disable(&dev->mt76.tx_tasklet); + napi_disable(&dev->mt76.napi[0]); + napi_disable(&dev->mt76.napi[1]); + napi_disable(&dev->mt76.napi[2]); + napi_disable(&dev->mt76.tx_napi); + + mutex_lock(&dev->mt76.mutex); + + mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED); + + if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { + mt7915_dma_reset(dev); + + mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); + mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); + } + + clear_bit(MT76_MCU_RESET, &dev->mphy.state); + clear_bit(MT76_RESET, &dev->mphy.state); + + tasklet_enable(&dev->mt76.tx_tasklet); + napi_enable(&dev->mt76.tx_napi); + napi_schedule(&dev->mt76.tx_napi); + + napi_enable(&dev->mt76.napi[0]); + napi_schedule(&dev->mt76.napi[0]); + + napi_enable(&dev->mt76.napi[1]); + napi_schedule(&dev->mt76.napi[1]); + + napi_enable(&dev->mt76.napi[2]); + napi_schedule(&dev->mt76.napi[2]); + + ieee80211_wake_queues(mt76_hw(dev)); + if (ext_phy) + ieee80211_wake_queues(ext_phy->hw); + + mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); + mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); + + mutex_unlock(&dev->mt76.mutex); + + mt7915_update_beacons(dev); + + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->phy.mac_work, + MT7915_WATCHDOG_TIME); + if (phy2) + ieee80211_queue_delayed_work(ext_phy->hw, &phy2->mac_work, + MT7915_WATCHDOG_TIME); +} + +static void +mt7915_mac_update_mib_stats(struct mt7915_phy *phy) +{ + struct mt7915_dev *dev = phy->dev; + struct mib_stats *mib = &phy->mib; + bool ext_phy = phy != &dev->phy; + int i, aggr0, aggr1; + + memset(mib, 0, sizeof(*mib)); + + mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy), + MT_MIB_SDR3_FCS_ERR_MASK); + + aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0; + for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) { + u32 val, val2; + + val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i)); + + val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val); + if (val2 > mib->ack_fail_cnt) + mib->ack_fail_cnt = val2; + + val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); + if (val2 > mib->ba_miss_cnt) + mib->ba_miss_cnt = val2; + + val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i)); + val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val); + if (val2 > mib->rts_retries_cnt) { + mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); + mib->rts_retries_cnt = val2; + } + + val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i)); + val2 = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i)); + + dev->mt76.aggr_stats[aggr0++] += val & 0xffff; + dev->mt76.aggr_stats[aggr0++] += val >> 16; + dev->mt76.aggr_stats[aggr1++] += val2 & 0xffff; + dev->mt76.aggr_stats[aggr1++] += val2 >> 16; + } +} + +void mt7915_mac_sta_stats_work(struct work_struct *work) +{ + struct ieee80211_sta *sta; + struct ieee80211_vif *vif; + struct mt7915_sta_stats *stats; + struct mt7915_sta *msta; + struct mt7915_dev *dev; + + msta = container_of(work, struct mt7915_sta, stats_work); + sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); + vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); + dev = msta->vif->dev; + stats = &msta->stats; + + /* use MT_TX_FREE_RATE to report Tx rate for further devices */ + if (time_after(jiffies, stats->jiffies + HZ)) { + mt7915_mcu_get_rate_info(dev, RATE_CTRL_RU_INFO, + msta->wcid.idx); + + stats->jiffies = jiffies; + } + + if (test_and_clear_bit(IEEE80211_RC_SUPP_RATES_CHANGED | + IEEE80211_RC_NSS_CHANGED | + IEEE80211_RC_BW_CHANGED, &stats->changed)) + mt7915_mcu_add_rate_ctrl(dev, vif, sta); + + if (test_and_clear_bit(IEEE80211_RC_SMPS_CHANGED, &stats->changed)) + mt7915_mcu_add_smps(dev, vif, sta); + + spin_lock_bh(&dev->sta_poll_lock); + if (list_empty(&msta->poll_list)) + list_add_tail(&msta->poll_list, &dev->sta_poll_list); + spin_unlock_bh(&dev->sta_poll_lock); +} + +void mt7915_mac_work(struct work_struct *work) +{ + struct mt7915_phy *phy; + struct mt76_dev *mdev; + + phy = (struct mt7915_phy *)container_of(work, struct mt7915_phy, + mac_work.work); + mdev = &phy->dev->mt76; + + mutex_lock(&mdev->mutex); + + mt76_update_survey(mdev); + if (++phy->mac_work_count == 5) { + phy->mac_work_count = 0; + + mt7915_mac_update_mib_stats(phy); + } + + mutex_unlock(&mdev->mutex); + + ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work, + MT7915_WATCHDOG_TIME); +} + +static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy) +{ + struct mt7915_dev *dev = phy->dev; + + if (phy->rdd_state & BIT(0)) + mt7915_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0); + if (phy->rdd_state & BIT(1)) + mt7915_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0); +} + +static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain) +{ + int err; + + err = mt7915_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0); + if (err < 0) + return err; + + return mt7915_mcu_rdd_cmd(dev, RDD_DET_MODE, chain, MT_RX_SEL0, 1); +} + +static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy) +{ + struct cfg80211_chan_def *chandef = &phy->mt76->chandef; + struct mt7915_dev *dev = phy->dev; + bool ext_phy = phy != &dev->phy; + int err; + + /* start CAC */ + err = mt7915_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0); + if (err < 0) + return err; + + err = mt7915_dfs_start_rdd(dev, ext_phy); + if (err < 0) + return err; + + phy->rdd_state |= BIT(ext_phy); + + if (chandef->width == NL80211_CHAN_WIDTH_160 || + chandef->width == NL80211_CHAN_WIDTH_80P80) { + err = mt7915_dfs_start_rdd(dev, 1); + if (err < 0) + return err; + + phy->rdd_state |= BIT(1); + } + + return 0; +} + +static int +mt7915_dfs_init_radar_specs(struct mt7915_phy *phy) +{ + const struct mt7915_dfs_radar_spec *radar_specs; + struct mt7915_dev *dev = phy->dev; + int err, i; + + switch (dev->mt76.region) { + case NL80211_DFS_FCC: + radar_specs = &fcc_radar_specs; + err = mt7915_mcu_set_fcc5_lpn(dev, 8); + if (err < 0) + return err; + break; + case NL80211_DFS_ETSI: + radar_specs = &etsi_radar_specs; + break; + case NL80211_DFS_JP: + radar_specs = &jp_radar_specs; + break; + default: + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { + err = mt7915_mcu_set_radar_th(dev, i, + &radar_specs->radar_pattern[i]); + if (err < 0) + return err; + } + + return mt7915_mcu_set_pulse_th(dev, &radar_specs->pulse_th); +} + +int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy) +{ + struct cfg80211_chan_def *chandef = &phy->mt76->chandef; + struct mt7915_dev *dev = phy->dev; + bool ext_phy = phy != &dev->phy; + int err; + + if (dev->mt76.region == NL80211_DFS_UNSET) { + phy->dfs_state = -1; + if (phy->rdd_state) + goto stop; + + return 0; + } + + if (test_bit(MT76_SCANNING, &phy->mt76->state)) + return 0; + + if (phy->dfs_state == chandef->chan->dfs_state) + return 0; + + err = mt7915_dfs_init_radar_specs(phy); + if (err < 0) { + phy->dfs_state = -1; + goto stop; + } + + phy->dfs_state = chandef->chan->dfs_state; + + if (chandef->chan->flags & IEEE80211_CHAN_RADAR) { + if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE) + return mt7915_dfs_start_radar_detector(phy); + + return mt7915_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy, + MT_RX_SEL0, 0); + } + +stop: + err = mt7915_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy, + MT_RX_SEL0, 0); + if (err < 0) + return err; + + mt7915_dfs_stop_radar_detector(phy); + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h new file mode 100644 index 000000000000..b9bc8b25b031 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h @@ -0,0 +1,346 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2020 MediaTek Inc. */ + +#ifndef __MT7915_MAC_H +#define __MT7915_MAC_H + +#define MT_CT_PARSE_LEN 72 +#define MT_CT_DMA_BUF_NUM 2 + +#define MT_RXD0_LENGTH GENMASK(15, 0) +#define MT_RXD0_PKT_TYPE GENMASK(31, 27) + +#define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16) +#define MT_RXD0_NORMAL_IP_SUM BIT(23) +#define MT_RXD0_NORMAL_UDP_TCP_SUM BIT(24) + +enum rx_pkt_type { + PKT_TYPE_TXS, + PKT_TYPE_TXRXV, + PKT_TYPE_NORMAL, + PKT_TYPE_RX_DUP_RFB, + PKT_TYPE_RX_TMR, + PKT_TYPE_RETRIEVE, + PKT_TYPE_TXRX_NOTIFY, + PKT_TYPE_RX_EVENT, +}; + +/* RXD DW1 */ +#define MT_RXD1_NORMAL_WLAN_IDX GENMASK(9, 0) +#define MT_RXD1_NORMAL_GROUP_1 BIT(11) +#define MT_RXD1_NORMAL_GROUP_2 BIT(12) +#define MT_RXD1_NORMAL_GROUP_3 BIT(13) +#define MT_RXD1_NORMAL_GROUP_4 BIT(14) +#define MT_RXD1_NORMAL_GROUP_5 BIT(15) +#define MT_RXD1_NORMAL_SEC_MODE GENMASK(20, 16) +#define MT_RXD1_NORMAL_KEY_ID GENMASK(22, 21) +#define MT_RXD1_NORMAL_CM BIT(23) +#define MT_RXD1_NORMAL_CLM BIT(24) +#define MT_RXD1_NORMAL_ICV_ERR BIT(25) +#define MT_RXD1_NORMAL_TKIP_MIC_ERR BIT(26) +#define MT_RXD1_NORMAL_FCS_ERR BIT(27) +#define MT_RXD1_NORMAL_BAND_IDX BIT(28) +#define MT_RXD1_NORMAL_SPP_EN BIT(29) +#define MT_RXD1_NORMAL_ADD_OM BIT(30) +#define MT_RXD1_NORMAL_SEC_DONE BIT(31) + +/* RXD DW2 */ +#define MT_RXD2_NORMAL_BSSID GENMASK(5, 0) +#define MT_RXD2_NORMAL_CO_ANT BIT(6) +#define MT_RXD2_NORMAL_BF_CQI BIT(7) +#define MT_RXD2_NORMAL_MAC_HDR_LEN GENMASK(12, 8) +#define MT_RXD2_NORMAL_HDR_TRANS BIT(13) +#define MT_RXD2_NORMAL_HDR_OFFSET GENMASK(15, 14) +#define MT_RXD2_NORMAL_TID GENMASK(19, 16) +#define MT_RXD2_NORMAL_MU_BAR BIT(21) +#define MT_RXD2_NORMAL_SW_BIT BIT(22) +#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23) +#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24) +#define MT_RXD2_NORMAL_HDR_TRANS_ERROR BIT(25) +#define MT_RXD2_NORMAL_INT_FRAME BIT(26) +#define MT_RXD2_NORMAL_FRAG BIT(27) +#define MT_RXD2_NORMAL_NULL_FRAME BIT(28) +#define MT_RXD2_NORMAL_NDATA BIT(29) +#define MT_RXD2_NORMAL_NON_AMPDU BIT(30) +#define MT_RXD2_NORMAL_BF_REPORT BIT(31) + +/* RXD DW3 */ +#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0) +#define MT_RXD3_NORMAL_CH_FREQ GENMASK(15, 8) +#define MT_RXD3_NORMAL_ADDR_TYPE GENMASK(17, 16) +#define MT_RXD3_NORMAL_U2M BIT(0) +#define MT_RXD3_NORMAL_HTC_VLD BIT(0) +#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(19) +#define MT_RXD3_NORMAL_BEACON_MC BIT(20) +#define MT_RXD3_NORMAL_BEACON_UC BIT(21) +#define MT_RXD3_NORMAL_AMSDU BIT(22) +#define MT_RXD3_NORMAL_MESH BIT(23) +#define MT_RXD3_NORMAL_MHCP BIT(24) +#define MT_RXD3_NORMAL_NO_INFO_WB BIT(25) +#define MT_RXD3_NORMAL_DISABLE_RX_HDR_TRANS BIT(26) +#define MT_RXD3_NORMAL_POWER_SAVE_STAT BIT(27) +#define MT_RXD3_NORMAL_MORE BIT(28) +#define MT_RXD3_NORMAL_UNWANT BIT(29) +#define MT_RXD3_NORMAL_RX_DROP BIT(30) +#define MT_RXD3_NORMAL_VLAN2ETH BIT(31) + +/* RXD DW4 */ +#define MT_RXD4_NORMAL_PAYLOAD_FORMAT GENMASK(1, 0) +#define MT_RXD4_NORMAL_PATTERN_DROP BIT(9) +#define MT_RXD4_NORMAL_CLS BIT(10) +#define MT_RXD4_NORMAL_OFLD GENMASK(12, 11) +#define MT_RXD4_NORMAL_MAGIC_PKT BIT(13) +#define MT_RXD4_NORMAL_WOL GENMASK(18, 14) +#define MT_RXD4_NORMAL_CLS_BITMAP GENMASK(28, 19) +#define MT_RXD3_NORMAL_PF_MODE BIT(29) +#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30) + +/* P-RXV */ +#define MT_PRXV_TX_RATE GENMASK(6, 0) +#define MT_PRXV_TX_DCM BIT(4) +#define MT_PRXV_TX_ER_SU_106T BIT(5) +#define MT_PRXV_NSTS GENMASK(9, 7) +#define MT_PRXV_HT_AD_CODE BIT(11) +#define MT_PRXV_HE_RU_ALLOC_L GENMASK(31, 28) +#define MT_PRXV_HE_RU_ALLOC_H GENMASK(3, 0) +#define MT_PRXV_RCPI3 GENMASK(31, 24) +#define MT_PRXV_RCPI2 GENMASK(23, 16) +#define MT_PRXV_RCPI1 GENMASK(15, 8) +#define MT_PRXV_RCPI0 GENMASK(7, 0) + +/* C-RXV */ +#define MT_CRXV_HT_STBC GENMASK(1, 0) +#define MT_CRXV_TX_MODE GENMASK(7, 4) +#define MT_CRXV_FRAME_MODE GENMASK(10, 8) +#define MT_CRXV_HT_SHORT_GI GENMASK(14, 13) +#define MT_CRXV_HE_LTF_SIZE GENMASK(18, 17) +#define MT_CRXV_HE_LDPC_EXT_SYM BIT(20) +#define MT_CRXV_HE_PE_DISAMBIG BIT(23) +#define MT_CRXV_HE_UPLINK BIT(31) + +#define MT_CRXV_HE_SR_MASK GENMASK(11, 8) +#define MT_CRXV_HE_SR1_MASK GENMASK(16, 12) +#define MT_CRXV_HE_SR2_MASK GENMASK(20, 17) +#define MT_CRXV_HE_SR3_MASK GENMASK(24, 21) + +#define MT_CRXV_HE_BSS_COLOR GENMASK(5, 0) +#define MT_CRXV_HE_TXOP_DUR GENMASK(12, 6) +#define MT_CRXV_HE_BEAM_CHNG BIT(13) +#define MT_CRXV_HE_DOPPLER BIT(16) + +struct mt7915_rxv { + u32 phy; + + /* P-RXV: bit 0~1, C-RXV: bit 2~19 */ + __le32 v[20]; +}; + +enum tx_header_format { + MT_HDR_FORMAT_802_3, + MT_HDR_FORMAT_CMD, + MT_HDR_FORMAT_802_11, + MT_HDR_FORMAT_802_11_EXT, +}; + +enum tx_pkt_type { + MT_TX_TYPE_CT, + MT_TX_TYPE_SF, + MT_TX_TYPE_CMD, + MT_TX_TYPE_FW, +}; + +enum tx_pkt_queue_idx { + MT_LMAC_AC00, + MT_LMAC_AC01, + MT_LMAC_AC02, + MT_LMAC_AC03, + MT_LMAC_ALTX0 = 0x10, + MT_LMAC_BMC0 = 0x10, + MT_LMAC_BCN0 = 0x12, +}; + +enum tx_port_idx { + MT_TX_PORT_IDX_LMAC, + MT_TX_PORT_IDX_MCU +}; + +enum tx_mcu_port_q_idx { + MT_TX_MCU_PORT_RX_Q0 = 0x20, + MT_TX_MCU_PORT_RX_Q1, + MT_TX_MCU_PORT_RX_Q2, + MT_TX_MCU_PORT_RX_Q3, + MT_TX_MCU_PORT_RX_FWDL = 0x3e +}; + +#define MT_CT_INFO_APPLY_TXD BIT(0) +#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1) +#define MT_CT_INFO_MGMT_FRAME BIT(2) +#define MT_CT_INFO_NONE_CIPHER_FRAME BIT(3) +#define MT_CT_INFO_HSR2_TX BIT(4) + +#define MT_TXD_SIZE (8 * 4) + +#define MT_TXD0_Q_IDX GENMASK(31, 25) +#define MT_TXD0_PKT_FMT GENMASK(24, 23) +#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16) +#define MT_TXD0_TX_BYTES GENMASK(15, 0) + +#define MT_TXD1_LONG_FORMAT BIT(31) +#define MT_TXD1_TGID BIT(30) +#define MT_TXD1_OWN_MAC GENMASK(29, 24) +#define MT_TXD1_AMSDU BIT(23) +#define MT_TXD1_TID GENMASK(22, 20) +#define MT_TXD1_HDR_PAD GENMASK(19, 18) +#define MT_TXD1_HDR_FORMAT GENMASK(17, 16) +#define MT_TXD1_HDR_INFO GENMASK(15, 11) +#define MT_TXD1_VTA BIT(10) +#define MT_TXD1_WLAN_IDX GENMASK(9, 0) + +#define MT_TXD2_FIX_RATE BIT(31) +#define MT_TXD2_FIXED_RATE BIT(30) +#define MT_TXD2_POWER_OFFSET GENMASK(29, 24) +#define MT_TXD2_MAX_TX_TIME GENMASK(23, 16) +#define MT_TXD2_FRAG GENMASK(15, 14) +#define MT_TXD2_HTC_VLD BIT(13) +#define MT_TXD2_DURATION BIT(12) +#define MT_TXD2_BIP BIT(11) +#define MT_TXD2_MULTICAST BIT(10) +#define MT_TXD2_RTS BIT(9) +#define MT_TXD2_SOUNDING BIT(8) +#define MT_TXD2_NDPA BIT(7) +#define MT_TXD2_NDP BIT(6) +#define MT_TXD2_FRAME_TYPE GENMASK(5, 4) +#define MT_TXD2_SUB_TYPE GENMASK(3, 0) + +#define MT_TXD3_SN_VALID BIT(31) +#define MT_TXD3_PN_VALID BIT(30) +#define MT_TXD3_SW_POWER_MGMT BIT(29) +#define MT_TXD3_BA_DISABLE BIT(28) +#define MT_TXD3_SEQ GENMASK(27, 16) +#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11) +#define MT_TXD3_TX_COUNT GENMASK(10, 6) +#define MT_TXD3_TIMING_MEASURE BIT(5) +#define MT_TXD3_DAS BIT(4) +#define MT_TXD3_EEOSP BIT(3) +#define MT_TXD3_EMRD BIT(2) +#define MT_TXD3_PROTECT_FRAME BIT(1) +#define MT_TXD3_NO_ACK BIT(0) + +#define MT_TXD4_PN_LOW GENMASK(31, 0) + +#define MT_TXD5_PN_HIGH GENMASK(31, 16) +#define MT_TXD5_MD BIT(15) +#define MT_TXD5_ADD_BA BIT(14) +#define MT_TXD5_TX_STATUS_HOST BIT(10) +#define MT_TXD5_TX_STATUS_MCU BIT(9) +#define MT_TXD5_TX_STATUS_FMT BIT(8) +#define MT_TXD5_PID GENMASK(7, 0) + +#define MT_TXD6_TX_IBF BIT(31) +#define MT_TXD6_TX_EBF BIT(30) +#define MT_TXD6_TX_RATE GENMASK(29, 16) +#define MT_TXD6_SGI GENMASK(15, 14) +#define MT_TXD6_HELTF GENMASK(13, 12) +#define MT_TXD6_LDPC BIT(11) +#define MT_TXD6_SPE_ID_IDX BIT(10) +#define MT_TXD6_ANT_ID GENMASK(7, 4) +#define MT_TXD6_DYN_BW BIT(3) +#define MT_TXD6_FIXED_BW BIT(2) +#define MT_TXD6_BW GENMASK(2, 0) + +#define MT_TXD7_TXD_LEN GENMASK(31, 30) +#define MT_TXD7_UDP_TCP_SUM BIT(29) +#define MT_TXD7_IP_SUM BIT(28) + +#define MT_TXD7_TYPE GENMASK(21, 20) +#define MT_TXD7_SUB_TYPE GENMASK(19, 16) + +#define MT_TXD7_PSE_FID GENMASK(27, 16) +#define MT_TXD7_SPE_IDX GENMASK(15, 11) +#define MT_TXD7_HW_AMSDU BIT(10) +#define MT_TXD7_TX_TIME GENMASK(9, 0) + +#define MT_TX_RATE_STBC BIT(13) +#define MT_TX_RATE_NSS GENMASK(12, 10) +#define MT_TX_RATE_MODE GENMASK(9, 6) +#define MT_TX_RATE_IDX GENMASK(5, 0) + +#define MT_TXP_MAX_BUF_NUM 6 + +struct mt7915_txp { + __le16 flags; + __le16 token; + u8 bss_idx; + u8 rept_wds_wcid; + u8 rsv; + u8 nbuf; + __le32 buf[MT_TXP_MAX_BUF_NUM]; + __le16 len[MT_TXP_MAX_BUF_NUM]; +} __packed __aligned(4); + +struct mt7915_tx_free { + __le16 rx_byte_cnt; + __le16 ctrl; + u8 txd_cnt; + u8 rsv[3]; + __le32 info[]; +} __packed __aligned(4); + +#define MT_TX_FREE_MSDU_CNT GENMASK(9, 0) +#define MT_TX_FREE_WLAN_ID GENMASK(23, 14) +#define MT_TX_FREE_LATENCY GENMASK(12, 0) +/* 0: success, others: dropped */ +#define MT_TX_FREE_STATUS GENMASK(14, 13) +#define MT_TX_FREE_MSDU_ID GENMASK(30, 16) +#define MT_TX_FREE_PAIR BIT(31) +/* will support this field in further revision */ +#define MT_TX_FREE_RATE GENMASK(13, 0) + +struct mt7915_dfs_pulse { + u32 max_width; /* us */ + int max_pwr; /* dbm */ + int min_pwr; /* dbm */ + u32 min_stgr_pri; /* us */ + u32 max_stgr_pri; /* us */ + u32 min_cr_pri; /* us */ + u32 max_cr_pri; /* us */ +}; + +struct mt7915_dfs_pattern { + u8 enb; + u8 stgr; + u8 min_crpn; + u8 max_crpn; + u8 min_crpr; + u8 min_pw; + u32 min_pri; + u32 max_pri; + u8 max_pw; + u8 min_crbn; + u8 max_crbn; + u8 min_stgpn; + u8 max_stgpn; + u8 min_stgpr; + u8 rsv[2]; + u32 min_stgpr_diff; +} __packed; + +struct mt7915_dfs_radar_spec { + struct mt7915_dfs_pulse pulse_th; + struct mt7915_dfs_pattern radar_pattern[16]; +}; + +static inline struct mt7915_txp * +mt7915_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t) +{ + u8 *txwi; + + if (!t) + return NULL; + + txwi = mt76_get_txwi_ptr(dev, t); + + return (struct mt7915_txp *)(txwi + MT_TXD_SIZE); +} + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c new file mode 100644 index 000000000000..0575c259f245 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -0,0 +1,838 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 MediaTek Inc. */ + +#include <linux/etherdevice.h> +#include <linux/platform_device.h> +#include <linux/pci.h> +#include <linux/module.h> +#include "mt7915.h" +#include "mcu.h" + +static bool mt7915_dev_running(struct mt7915_dev *dev) +{ + struct mt7915_phy *phy; + + if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) + return true; + + phy = mt7915_ext_phy(dev); + + return phy && test_bit(MT76_STATE_RUNNING, &phy->mt76->state); +} + +static int mt7915_start(struct ieee80211_hw *hw) +{ + struct mt7915_dev *dev = mt7915_hw_dev(hw); + struct mt7915_phy *phy = mt7915_hw_phy(hw); + bool running; + + mutex_lock(&dev->mt76.mutex); + + running = mt7915_dev_running(dev); + + if (!running) { + mt7915_mcu_set_pm(dev, 0, 0); + mt7915_mcu_set_mac(dev, 0, true, false); + mt7915_mcu_set_scs(dev, 0, true); + } + + if (phy != &dev->phy) { + mt7915_mcu_set_pm(dev, 1, 0); + mt7915_mcu_set_mac(dev, 1, true, false); + mt7915_mcu_set_scs(dev, 1, true); + } + + mt7915_mcu_set_sku_en(phy, true); + mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH); + + set_bit(MT76_STATE_RUNNING, &phy->mt76->state); + + ieee80211_queue_delayed_work(hw, &phy->mac_work, + MT7915_WATCHDOG_TIME); + + if (!running) + mt7915_mac_reset_counters(phy); + + mutex_unlock(&dev->mt76.mutex); + + return 0; +} + +static void mt7915_stop(struct ieee80211_hw *hw) +{ + struct mt7915_dev *dev = mt7915_hw_dev(hw); + struct mt7915_phy *phy = mt7915_hw_phy(hw); + + cancel_delayed_work_sync(&phy->mac_work); + + mutex_lock(&dev->mt76.mutex); + + clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); + + if (phy != &dev->phy) { + mt7915_mcu_set_pm(dev, 1, 1); + mt7915_mcu_set_mac(dev, 1, false, false); + } + + if (!mt7915_dev_running(dev)) { + mt7915_mcu_set_pm(dev, 0, 1); + mt7915_mcu_set_mac(dev, 0, false, false); + } + + mutex_unlock(&dev->mt76.mutex); +} + +static int get_omac_idx(enum nl80211_iftype type, u32 mask) +{ + int i; + + switch (type) { + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_AP: + /* ap uses hw bssid 0 and ext bssid */ + if (~mask & BIT(HW_BSSID_0)) + return HW_BSSID_0; + + for (i = EXT_BSSID_1; i < EXT_BSSID_END; i++) + if (~mask & BIT(i)) + return i; + break; + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_STATION: + /* station uses hw bssid other than 0 */ + for (i = HW_BSSID_1; i < HW_BSSID_MAX; i++) + if (~mask & BIT(i)) + return i; + break; + default: + WARN_ON(1); + break; + } + + return -1; +} + +static int mt7915_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct mt7915_dev *dev = mt7915_hw_dev(hw); + struct mt7915_phy *phy = mt7915_hw_phy(hw); + struct mt76_txq *mtxq; + bool ext_phy = phy != &dev->phy; + int idx, ret = 0; + + mutex_lock(&dev->mt76.mutex); + + mvif->idx = ffs(~phy->vif_mask) - 1; + if (mvif->idx >= MT7915_MAX_INTERFACES) { + ret = -ENOSPC; + goto out; + } + + idx = get_omac_idx(vif->type, phy->omac_mask); + if (idx < 0) { + ret = -ENOSPC; + goto out; + } + mvif->omac_idx = idx; + mvif->dev = dev; + mvif->band_idx = ext_phy; + + if (ext_phy) + mvif->wmm_idx = ext_phy * (MT7915_MAX_WMM_SETS / 2) + + mvif->idx % (MT7915_MAX_WMM_SETS / 2); + else + mvif->wmm_idx = mvif->idx % MT7915_MAX_WMM_SETS; + + ret = mt7915_mcu_add_dev_info(dev, vif, true); + if (ret) + goto out; + + phy->vif_mask |= BIT(mvif->idx); + phy->omac_mask |= BIT(mvif->omac_idx); + + idx = MT7915_WTBL_RESERVED - mvif->idx; + + INIT_LIST_HEAD(&mvif->sta.poll_list); + mvif->sta.wcid.idx = idx; + mvif->sta.wcid.ext_phy = mvif->band_idx; + mvif->sta.wcid.hw_key_idx = -1; + mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET; + mt7915_mac_wtbl_update(dev, idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + + rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); + if (vif->txq) { + mtxq = (struct mt76_txq *)vif->txq->drv_priv; + mtxq->wcid = &mvif->sta.wcid; + mt76_txq_init(&dev->mt76, vif->txq); + } + +out: + mutex_unlock(&dev->mt76.mutex); + + return ret; +} + +static void mt7915_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct mt7915_sta *msta = &mvif->sta; + struct mt7915_dev *dev = mt7915_hw_dev(hw); + struct mt7915_phy *phy = mt7915_hw_phy(hw); + int idx = msta->wcid.idx; + + /* TODO: disable beacon for the bss */ + + mt7915_mcu_add_dev_info(dev, vif, false); + + rcu_assign_pointer(dev->mt76.wcid[idx], NULL); + if (vif->txq) + mt76_txq_remove(&dev->mt76, vif->txq); + + mutex_lock(&dev->mt76.mutex); + phy->vif_mask &= ~BIT(mvif->idx); + phy->omac_mask &= ~BIT(mvif->omac_idx); + mutex_unlock(&dev->mt76.mutex); + + spin_lock_bh(&dev->sta_poll_lock); + if (!list_empty(&msta->poll_list)) + list_del_init(&msta->poll_list); + spin_unlock_bh(&dev->sta_poll_lock); +} + +static void mt7915_init_dfs_state(struct mt7915_phy *phy) +{ + struct mt76_phy *mphy = phy->mt76; + struct ieee80211_hw *hw = mphy->hw; + struct cfg80211_chan_def *chandef = &hw->conf.chandef; + + if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) + return; + + if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR)) + return; + + if (mphy->chandef.chan->center_freq == chandef->chan->center_freq && + mphy->chandef.width == chandef->width) + return; + + phy->dfs_state = -1; +} + +static int mt7915_set_channel(struct mt7915_phy *phy) +{ + struct mt7915_dev *dev = phy->dev; + int ret; + + cancel_delayed_work_sync(&phy->mac_work); + + mutex_lock(&dev->mt76.mutex); + set_bit(MT76_RESET, &phy->mt76->state); + + mt7915_init_dfs_state(phy); + mt76_set_channel(phy->mt76); + + ret = mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD_CHANNEL_SWITCH); + if (ret) + goto out; + + mt7915_mac_set_timing(phy); + ret = mt7915_dfs_init_radar_detector(phy); + mt7915_mac_cca_stats_reset(phy); + + mt7915_mac_reset_counters(phy); + phy->noise = 0; + +out: + clear_bit(MT76_RESET, &phy->mt76->state); + mutex_unlock(&dev->mt76.mutex); + + mt76_txq_schedule_all(phy->mt76); + ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work, + MT7915_WATCHDOG_TIME); + + return ret; +} + +static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct mt7915_dev *dev = mt7915_hw_dev(hw); + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct mt7915_sta *msta = sta ? (struct mt7915_sta *)sta->drv_priv : + &mvif->sta; + struct mt76_wcid *wcid = &msta->wcid; + int idx = key->keyidx; + + /* The hardware does not support per-STA RX GTK, fallback + * to software mode for these. + */ + if ((vif->type == NL80211_IFTYPE_ADHOC || + vif->type == NL80211_IFTYPE_MESH_POINT) && + (key->cipher == WLAN_CIPHER_SUITE_TKIP || + key->cipher == WLAN_CIPHER_SUITE_CCMP) && + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + return -EOPNOTSUPP; + + /* fall back to sw encryption for unsupported ciphers */ + switch (key->cipher) { + case WLAN_CIPHER_SUITE_AES_CMAC: + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE; + break; + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + case WLAN_CIPHER_SUITE_SMS4: + break; + default: + return -EOPNOTSUPP; + } + + if (cmd == SET_KEY) { + key->hw_key_idx = wcid->idx; + wcid->hw_key_idx = idx; + } else if (idx == wcid->hw_key_idx) { + wcid->hw_key_idx = -1; + } + mt76_wcid_key_setup(&dev->mt76, wcid, + cmd == SET_KEY ? key : NULL); + + return mt7915_mcu_add_key(dev, vif, msta, key, cmd); +} + +static int mt7915_config(struct ieee80211_hw *hw, u32 changed) +{ + struct mt7915_dev *dev = mt7915_hw_dev(hw); + struct mt7915_phy *phy = mt7915_hw_phy(hw); + bool band = phy != &dev->phy; + int ret; + + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { + ieee80211_stop_queues(hw); + ret = mt7915_set_channel(phy); + if (ret) + return ret; + ieee80211_wake_queues(hw); + } + + if (changed & IEEE80211_CONF_CHANGE_POWER) { + ret = mt7915_mcu_set_sku(phy); + if (ret) + return ret; + } + + mutex_lock(&dev->mt76.mutex); + + if (changed & IEEE80211_CONF_CHANGE_MONITOR) { + if (!(hw->conf.flags & IEEE80211_CONF_MONITOR)) + phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC; + else + phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC; + + mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter); + } + + mutex_unlock(&dev->mt76.mutex); + + return 0; +} + +static int +mt7915_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + + /* no need to update right away, we'll get BSS_CHANGED_QOS */ + mvif->wmm[queue].cw_min = params->cw_min; + mvif->wmm[queue].cw_max = params->cw_max; + mvif->wmm[queue].aifs = params->aifs; + mvif->wmm[queue].txop = params->txop; + + return 0; +} + +static void mt7915_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct mt7915_dev *dev = mt7915_hw_dev(hw); + struct mt7915_phy *phy = mt7915_hw_phy(hw); + bool band = phy != &dev->phy; + + u32 ctl_flags = MT_WF_RFCR1_DROP_ACK | + MT_WF_RFCR1_DROP_BF_POLL | + MT_WF_RFCR1_DROP_BA | + MT_WF_RFCR1_DROP_CFEND | + MT_WF_RFCR1_DROP_CFACK; + u32 flags = 0; + +#define MT76_FILTER(_flag, _hw) do { \ + flags |= *total_flags & FIF_##_flag; \ + phy->rxfilter &= ~(_hw); \ + phy->rxfilter |= !(flags & FIF_##_flag) * (_hw); \ + } while (0) + + phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS | + MT_WF_RFCR_DROP_OTHER_BEACON | + MT_WF_RFCR_DROP_FRAME_REPORT | + MT_WF_RFCR_DROP_PROBEREQ | + MT_WF_RFCR_DROP_MCAST_FILTERED | + MT_WF_RFCR_DROP_MCAST | + MT_WF_RFCR_DROP_BCAST | + MT_WF_RFCR_DROP_DUPLICATE | + MT_WF_RFCR_DROP_A2_BSSID | + MT_WF_RFCR_DROP_UNWANTED_CTL | + MT_WF_RFCR_DROP_STBC_MULTI); + + MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM | + MT_WF_RFCR_DROP_A3_MAC | + MT_WF_RFCR_DROP_A3_BSSID); + + MT76_FILTER(FCSFAIL, MT_WF_RFCR_DROP_FCSFAIL); + + MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS | + MT_WF_RFCR_DROP_RTS | + MT_WF_RFCR_DROP_CTL_RSV | + MT_WF_RFCR_DROP_NDPA); + + *total_flags = flags; + mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter); + + if (*total_flags & FIF_CONTROL) + mt76_clear(dev, MT_WF_RFCR1(band), ctl_flags); + else + mt76_set(dev, MT_WF_RFCR1(band), ctl_flags); +} + +static void mt7915_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u32 changed) +{ + struct mt7915_phy *phy = mt7915_hw_phy(hw); + struct mt7915_dev *dev = mt7915_hw_dev(hw); + + mutex_lock(&dev->mt76.mutex); + + /* + * station mode uses BSSID to map the wlan entry to a peer, + * and then peer references bss_info_rfch to set bandwidth cap. + */ + if (changed & BSS_CHANGED_BSSID && + vif->type == NL80211_IFTYPE_STATION) { + bool join = !is_zero_ether_addr(info->bssid); + + mt7915_mcu_add_bss_info(phy, vif, join); + mt7915_mcu_add_sta(dev, vif, NULL, join); + } + + if (changed & BSS_CHANGED_ASSOC) { + mt7915_mcu_add_bss_info(phy, vif, info->assoc); + mt7915_mcu_add_obss_spr(dev, vif, info->he_obss_pd.enable); + } + + if (changed & BSS_CHANGED_ERP_SLOT) { + int slottime = info->use_short_slot ? 9 : 20; + + if (slottime != phy->slottime) { + phy->slottime = slottime; + mt7915_mac_set_timing(phy); + } + } + + if (changed & BSS_CHANGED_BEACON_ENABLED) { + mt7915_mcu_add_bss_info(phy, vif, info->enable_beacon); + mt7915_mcu_add_sta(dev, vif, NULL, info->enable_beacon); + } + + /* ensure that enable txcmd_mode after bss_info */ + if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED)) + mt7915_mcu_set_tx(dev, vif); + + if (changed & BSS_CHANGED_HE_OBSS_PD) + mt7915_mcu_add_obss_spr(dev, vif, info->he_obss_pd.enable); + + if (changed & (BSS_CHANGED_BEACON | + BSS_CHANGED_BEACON_ENABLED)) + mt7915_mcu_add_beacon(hw, vif, info->enable_beacon); + + mutex_unlock(&dev->mt76.mutex); +} + +static void +mt7915_channel_switch_beacon(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_chan_def *chandef) +{ + struct mt7915_dev *dev = mt7915_hw_dev(hw); + + mutex_lock(&dev->mt76.mutex); + mt7915_mcu_add_beacon(hw, vif, true); + mutex_unlock(&dev->mt76.mutex); +} + +int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + int ret, idx; + + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA - 1); + if (idx < 0) + return -ENOSPC; + + INIT_LIST_HEAD(&msta->poll_list); + INIT_WORK(&msta->stats_work, mt7915_mac_sta_stats_work); + spin_lock_init(&msta->ampdu_lock); + msta->vif = mvif; + msta->wcid.sta = 1; + msta->wcid.idx = idx; + msta->wcid.ext_phy = mvif->band_idx; + msta->wcid.tx_info |= MT_WCID_TX_INFO_SET; + msta->stats.jiffies = jiffies; + + mt7915_mac_wtbl_update(dev, idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + + ret = mt7915_mcu_add_sta(dev, vif, sta, true); + if (ret) + return ret; + + return mt7915_mcu_add_sta_adv(dev, vif, sta, true); +} + +void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + + mt7915_mcu_add_sta_adv(dev, vif, sta, false); + mt7915_mcu_add_sta(dev, vif, sta, false); + + mt7915_mac_wtbl_update(dev, msta->wcid.idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + + spin_lock_bh(&dev->sta_poll_lock); + if (!list_empty(&msta->poll_list)) + list_del_init(&msta->poll_list); + spin_unlock_bh(&dev->sta_poll_lock); +} + +static void mt7915_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct mt7915_dev *dev = mt7915_hw_dev(hw); + struct mt76_phy *mphy = hw->priv; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_vif *vif = info->control.vif; + struct mt76_wcid *wcid = &dev->mt76.global_wcid; + + if (control->sta) { + struct mt7915_sta *sta; + + sta = (struct mt7915_sta *)control->sta->drv_priv; + wcid = &sta->wcid; + } + + if (vif && !control->sta) { + struct mt7915_vif *mvif; + + mvif = (struct mt7915_vif *)vif->drv_priv; + wcid = &mvif->sta.wcid; + } + + mt76_tx(mphy, control->sta, wcid, skb); +} + +static int mt7915_set_rts_threshold(struct ieee80211_hw *hw, u32 val) +{ + struct mt7915_dev *dev = mt7915_hw_dev(hw); + struct mt7915_phy *phy = mt7915_hw_phy(hw); + + mutex_lock(&dev->mt76.mutex); + mt7915_mcu_set_rts_thresh(phy, val); + mutex_unlock(&dev->mt76.mutex); + + return 0; +} + +static int +mt7915_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params) +{ + enum ieee80211_ampdu_mlme_action action = params->action; + struct mt7915_dev *dev = mt7915_hw_dev(hw); + struct ieee80211_sta *sta = params->sta; + struct ieee80211_txq *txq = sta->txq[params->tid]; + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + u16 tid = params->tid; + u16 ssn = params->ssn; + struct mt76_txq *mtxq; + int ret = 0; + + if (!txq) + return -EINVAL; + + mtxq = (struct mt76_txq *)txq->drv_priv; + + mutex_lock(&dev->mt76.mutex); + switch (action) { + case IEEE80211_AMPDU_RX_START: + mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, + params->buf_size); + mt7915_mcu_add_rx_ba(dev, params, true); + break; + case IEEE80211_AMPDU_RX_STOP: + mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid); + mt7915_mcu_add_rx_ba(dev, params, false); + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + mtxq->aggr = true; + mtxq->send_bar = false; + mt7915_set_aggr_state(msta, tid, MT7915_AGGR_OPERATIONAL); + mt7915_mcu_add_tx_ba(dev, params, true); + break; + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + mtxq->aggr = false; + mt7915_set_aggr_state(msta, tid, MT7915_AGGR_STOP); + mt7915_mcu_add_tx_ba(dev, params, false); + break; + case IEEE80211_AMPDU_TX_START: + mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn); + mt7915_set_aggr_state(msta, tid, MT7915_AGGR_START); + ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; + break; + case IEEE80211_AMPDU_TX_STOP_CONT: + mtxq->aggr = false; + mt7915_set_aggr_state(msta, tid, MT7915_AGGR_STOP); + mt7915_mcu_add_tx_ba(dev, params, false); + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + } + mutex_unlock(&dev->mt76.mutex); + + return ret; +} + +static int +mt7915_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NOTEXIST, + IEEE80211_STA_NONE); +} + +static int +mt7915_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NONE, + IEEE80211_STA_NOTEXIST); +} + +static int +mt7915_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + struct mt7915_phy *phy = mt7915_hw_phy(hw); + struct mib_stats *mib = &phy->mib; + + stats->dot11RTSSuccessCount = mib->rts_cnt; + stats->dot11RTSFailureCount = mib->rts_retries_cnt; + stats->dot11FCSErrorCount = mib->fcs_err_cnt; + stats->dot11ACKFailureCount = mib->ack_fail_cnt; + + return 0; +} + +static u64 +mt7915_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct mt7915_dev *dev = mt7915_hw_dev(hw); + struct mt7915_phy *phy = mt7915_hw_phy(hw); + bool band = phy != &dev->phy; + union { + u64 t64; + u32 t32[2]; + } tsf; + u16 n; + + mutex_lock(&dev->mt76.mutex); + + n = mvif->omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : mvif->omac_idx; + /* TSF software read */ + mt76_set(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE); + tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(band)); + tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(band)); + + mutex_unlock(&dev->mt76.mutex); + + return tsf.t64; +} + +static void +mt7915_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u64 timestamp) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct mt7915_dev *dev = mt7915_hw_dev(hw); + struct mt7915_phy *phy = mt7915_hw_phy(hw); + bool band = phy != &dev->phy; + union { + u64 t64; + u32 t32[2]; + } tsf = { .t64 = timestamp, }; + u16 n; + + mutex_lock(&dev->mt76.mutex); + + n = mvif->omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : mvif->omac_idx; + mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]); + mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]); + /* TSF software overwrite */ + mt76_set(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_WRITE); + + mutex_unlock(&dev->mt76.mutex); +} + +static void +mt7915_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) +{ + struct mt7915_phy *phy = mt7915_hw_phy(hw); + + phy->coverage_class = max_t(s16, coverage_class, 0); + mt7915_mac_set_timing(phy); +} + +static int +mt7915_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) +{ + struct mt7915_dev *dev = mt7915_hw_dev(hw); + struct mt7915_phy *phy = mt7915_hw_phy(hw); + int max_nss = hweight8(hw->wiphy->available_antennas_tx); + bool ext_phy = phy != &dev->phy; + + if (!tx_ant || tx_ant != rx_ant || ffs(tx_ant) > max_nss) + return -EINVAL; + + if ((BIT(hweight8(tx_ant)) - 1) != tx_ant) + tx_ant = BIT(ffs(tx_ant) - 1) - 1; + + mutex_lock(&dev->mt76.mutex); + + phy->mt76->antenna_mask = tx_ant; + + if (ext_phy) { + if (dev->chainmask == 0xf) + tx_ant <<= 2; + else + tx_ant <<= 1; + } + phy->chainmask = tx_ant; + + mt76_set_stream_caps(phy->mt76, true); + mt7915_set_stream_vht_txbf_caps(phy); + mt7915_set_stream_he_caps(phy); + + mutex_unlock(&dev->mt76.mutex); + + return 0; +} + +static void mt7915_sta_statistics(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct station_info *sinfo) +{ + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + struct mt7915_sta_stats *stats = &msta->stats; + + if (!stats->tx_rate.legacy && !stats->tx_rate.flags) + return; + + if (stats->tx_rate.legacy) { + sinfo->txrate.legacy = stats->tx_rate.legacy; + } else { + sinfo->txrate.mcs = stats->tx_rate.mcs; + sinfo->txrate.nss = stats->tx_rate.nss; + sinfo->txrate.bw = stats->tx_rate.bw; + sinfo->txrate.he_gi = stats->tx_rate.he_gi; + sinfo->txrate.he_dcm = stats->tx_rate.he_dcm; + sinfo->txrate.he_ru_alloc = stats->tx_rate.he_ru_alloc; + } + sinfo->txrate.flags = stats->tx_rate.flags; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); +} + +static void +mt7915_sta_rc_update(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u32 changed) +{ + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + + rcu_read_lock(); + sta = ieee80211_find_sta(vif, sta->addr); + if (!sta) { + rcu_read_unlock(); + return; + } + rcu_read_unlock(); + + set_bit(changed, &msta->stats.changed); + ieee80211_queue_work(hw, &msta->stats_work); +} + +const struct ieee80211_ops mt7915_ops = { + .tx = mt7915_tx, + .start = mt7915_start, + .stop = mt7915_stop, + .add_interface = mt7915_add_interface, + .remove_interface = mt7915_remove_interface, + .config = mt7915_config, + .conf_tx = mt7915_conf_tx, + .configure_filter = mt7915_configure_filter, + .bss_info_changed = mt7915_bss_info_changed, + .sta_add = mt7915_sta_add, + .sta_remove = mt7915_sta_remove, + .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove, + .sta_rc_update = mt7915_sta_rc_update, + .set_key = mt7915_set_key, + .ampdu_action = mt7915_ampdu_action, + .set_rts_threshold = mt7915_set_rts_threshold, + .wake_tx_queue = mt76_wake_tx_queue, + .sw_scan_start = mt76_sw_scan, + .sw_scan_complete = mt76_sw_scan_complete, + .release_buffered_frames = mt76_release_buffered_frames, + .get_txpower = mt76_get_txpower, + .channel_switch_beacon = mt7915_channel_switch_beacon, + .get_stats = mt7915_get_stats, + .get_tsf = mt7915_get_tsf, + .set_tsf = mt7915_set_tsf, + .get_survey = mt76_get_survey, + .get_antenna = mt76_get_antenna, + .set_antenna = mt7915_set_antenna, + .set_coverage_class = mt7915_set_coverage_class, + .sta_statistics = mt7915_sta_statistics, +#ifdef CONFIG_MAC80211_DEBUGFS + .sta_add_debugfs = mt7915_sta_add_debugfs, +#endif +}; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c new file mode 100644 index 000000000000..c8c12c740c1a --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -0,0 +1,3182 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 MediaTek Inc. */ + +#include <linux/firmware.h> +#include <linux/fs.h> +#include "mt7915.h" +#include "mcu.h" +#include "mac.h" +#include "eeprom.h" + +struct mt7915_patch_hdr { + char build_date[16]; + char platform[4]; + __be32 hw_sw_ver; + __be32 patch_ver; + __be16 checksum; + u16 reserved; + struct { + __be32 patch_ver; + __be32 subsys; + __be32 feature; + __be32 n_region; + __be32 crc; + u32 reserved[11]; + } desc; +} __packed; + +struct mt7915_patch_sec { + __be32 type; + __be32 offs; + __be32 size; + union { + __be32 spec[13]; + struct { + __be32 addr; + __be32 len; + __be32 sec_key_idx; + __be32 align_len; + u32 reserved[9]; + } info; + }; +} __packed; + +struct mt7915_fw_trailer { + u8 chip_id; + u8 eco_code; + u8 n_region; + u8 format_ver; + u8 format_flag; + u8 reserved[2]; + char fw_ver[10]; + char build_date[15]; + u32 crc; +} __packed; + +struct mt7915_fw_region { + __le32 decomp_crc; + __le32 decomp_len; + __le32 decomp_blk_sz; + u8 reserved[4]; + __le32 addr; + __le32 len; + u8 feature_set; + u8 reserved1[15]; +} __packed; + +#define MCU_PATCH_ADDRESS 0x200000 + +#define MT_STA_BFER BIT(0) +#define MT_STA_BFEE BIT(1) + +#define FW_FEATURE_SET_ENCRYPT BIT(0) +#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1) +#define FW_FEATURE_OVERRIDE_ADDR BIT(5) + +#define DL_MODE_ENCRYPT BIT(0) +#define DL_MODE_KEY_IDX GENMASK(2, 1) +#define DL_MODE_RESET_SEC_IV BIT(3) +#define DL_MODE_WORKING_PDA_CR4 BIT(4) +#define DL_MODE_NEED_RSP BIT(31) + +#define FW_START_OVERRIDE BIT(0) +#define FW_START_WORKING_PDA_CR4 BIT(2) + +#define PATCH_SEC_TYPE_MASK GENMASK(15, 0) +#define PATCH_SEC_TYPE_INFO 0x2 + +#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id) +#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id) + +#define HE_PHY(p, c) u8_get_bits(c, IEEE80211_HE_PHY_##p) +#define HE_MAC(m, c) u8_get_bits(c, IEEE80211_HE_MAC_##m) + +static enum mt7915_cipher_type +mt7915_mcu_get_cipher(int cipher) +{ + switch (cipher) { + case WLAN_CIPHER_SUITE_WEP40: + return MT_CIPHER_WEP40; + case WLAN_CIPHER_SUITE_WEP104: + return MT_CIPHER_WEP104; + case WLAN_CIPHER_SUITE_TKIP: + return MT_CIPHER_TKIP; + case WLAN_CIPHER_SUITE_AES_CMAC: + return MT_CIPHER_BIP_CMAC_128; + case WLAN_CIPHER_SUITE_CCMP: + return MT_CIPHER_AES_CCMP; + case WLAN_CIPHER_SUITE_CCMP_256: + return MT_CIPHER_CCMP_256; + case WLAN_CIPHER_SUITE_GCMP: + return MT_CIPHER_GCMP; + case WLAN_CIPHER_SUITE_GCMP_256: + return MT_CIPHER_GCMP_256; + case WLAN_CIPHER_SUITE_SMS4: + return MT_CIPHER_WAPI; + default: + return MT_CIPHER_NONE; + } +} + +static u8 mt7915_mcu_chan_bw(struct cfg80211_chan_def *chandef) +{ + static const u8 width_to_bw[] = { + [NL80211_CHAN_WIDTH_40] = CMD_CBW_40MHZ, + [NL80211_CHAN_WIDTH_80] = CMD_CBW_80MHZ, + [NL80211_CHAN_WIDTH_80P80] = CMD_CBW_8080MHZ, + [NL80211_CHAN_WIDTH_160] = CMD_CBW_160MHZ, + [NL80211_CHAN_WIDTH_5] = CMD_CBW_5MHZ, + [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ, + [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ, + [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ, + }; + + if (chandef->width >= ARRAY_SIZE(width_to_bw)) + return 0; + + return width_to_bw[chandef->width]; +} + +static const struct ieee80211_sta_he_cap * +mt7915_get_he_phy_cap(struct mt7915_phy *phy, struct ieee80211_vif *vif) +{ + struct ieee80211_supported_band *sband; + enum nl80211_band band; + + band = phy->mt76->chandef.chan->band; + sband = phy->mt76->hw->wiphy->bands[band]; + + return ieee80211_get_he_iftype_cap(sband, vif->type); +} + +static u8 +mt7915_get_phy_mode(struct mt7915_dev *dev, struct ieee80211_vif *vif, + enum nl80211_band band, struct ieee80211_sta *sta) +{ + struct ieee80211_sta_ht_cap *ht_cap; + struct ieee80211_sta_vht_cap *vht_cap; + const struct ieee80211_sta_he_cap *he_cap; + u8 mode = 0; + + if (sta) { + ht_cap = &sta->ht_cap; + vht_cap = &sta->vht_cap; + he_cap = &sta->he_cap; + } else { + struct ieee80211_supported_band *sband; + struct mt7915_phy *phy; + struct mt7915_vif *mvif; + + mvif = (struct mt7915_vif *)vif->drv_priv; + phy = mvif->band_idx ? mt7915_ext_phy(dev) : &dev->phy; + sband = phy->mt76->hw->wiphy->bands[band]; + + ht_cap = &sband->ht_cap; + vht_cap = &sband->vht_cap; + he_cap = ieee80211_get_he_iftype_cap(sband, vif->type); + } + + if (band == NL80211_BAND_2GHZ) { + mode |= PHY_MODE_B | PHY_MODE_G; + + if (ht_cap->ht_supported) + mode |= PHY_MODE_GN; + + if (he_cap->has_he) + mode |= PHY_MODE_AX_24G; + } else if (band == NL80211_BAND_5GHZ) { + mode |= PHY_MODE_A; + + if (ht_cap->ht_supported) + mode |= PHY_MODE_AN; + + if (vht_cap->vht_supported) + mode |= PHY_MODE_AC; + + if (he_cap->has_he) + mode |= PHY_MODE_AX_5G; + } + + return mode; +} + +static u8 +mt7915_mcu_get_sta_nss(u16 mcs_map) +{ + u8 nss; + + for (nss = 8; nss > 0; nss--) { + u8 nss_mcs = (mcs_map >> (2 * (nss - 1))) & 3; + + if (nss_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) + break; + } + + return nss - 1; +} + +static int __mt7915_mcu_msg_send(struct mt7915_dev *dev, struct sk_buff *skb, + int cmd, int *wait_seq) +{ + struct mt7915_mcu_txd *mcu_txd; + u8 seq, pkt_fmt, qidx; + enum mt76_txq_id txq; + __le32 *txd; + u32 val; + + seq = ++dev->mt76.mcu.msg_seq & 0xf; + if (!seq) + seq = ++dev->mt76.mcu.msg_seq & 0xf; + + if (cmd == -MCU_CMD_FW_SCATTER) { + txq = MT_TXQ_FWDL; + goto exit; + } + + mcu_txd = (struct mt7915_mcu_txd *)skb_push(skb, sizeof(*mcu_txd)); + + if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) { + txq = MT_TXQ_MCU_WA; + qidx = MT_TX_MCU_PORT_RX_Q0; + pkt_fmt = MT_TX_TYPE_CMD; + } else { + txq = MT_TXQ_MCU; + qidx = MT_TX_MCU_PORT_RX_Q0; + pkt_fmt = MT_TX_TYPE_CMD; + } + + txd = mcu_txd->txd; + + val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) | + FIELD_PREP(MT_TXD0_PKT_FMT, pkt_fmt) | + FIELD_PREP(MT_TXD0_Q_IDX, qidx); + txd[0] = cpu_to_le32(val); + + val = MT_TXD1_LONG_FORMAT | + FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_CMD); + txd[1] = cpu_to_le32(val); + + mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd)); + mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU, qidx)); + mcu_txd->pkt_type = MCU_PKT_ID; + mcu_txd->seq = seq; + + if (cmd < 0) { + mcu_txd->set_query = MCU_Q_NA; + mcu_txd->cid = -cmd; + } else { + mcu_txd->cid = MCU_CMD_EXT_CID; + mcu_txd->ext_cid = cmd; + mcu_txd->ext_cid_ack = 1; + + /* do not use Q_SET for efuse */ + if (cmd == MCU_EXT_CMD_EFUSE_ACCESS) + mcu_txd->set_query = MCU_Q_QUERY; + else + mcu_txd->set_query = MCU_Q_SET; + } + + mcu_txd->s2d_index = MCU_S2D_H2N; + WARN_ON(cmd == MCU_EXT_CMD_EFUSE_ACCESS && + mcu_txd->set_query != MCU_Q_QUERY); + +exit: + if (wait_seq) + *wait_seq = seq; + + return mt76_tx_queue_skb_raw(dev, txq, skb, 0); +} + +static int +mt7915_mcu_parse_eeprom(struct mt7915_dev *dev, struct sk_buff *skb) +{ + struct mt7915_mcu_eeprom_info *res; + u8 *buf; + + if (!skb) + return -EINVAL; + + skb_pull(skb, sizeof(struct mt7915_mcu_rxd)); + + res = (struct mt7915_mcu_eeprom_info *)skb->data; + buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr); + memcpy(buf, res->data, 16); + + return 0; +} + +static int +mt7915_mcu_parse_response(struct mt7915_dev *dev, int cmd, + struct sk_buff *skb, int seq) +{ + struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data; + int ret = 0; + + if (seq != rxd->seq) + return -EAGAIN; + + switch (cmd) { + case -MCU_CMD_PATCH_SEM_CONTROL: + skb_pull(skb, sizeof(*rxd) - 4); + ret = *skb->data; + break; + case MCU_EXT_CMD_THERMAL_CTRL: + skb_pull(skb, sizeof(*rxd) + 4); + ret = le32_to_cpu(*(__le32 *)skb->data); + break; + case MCU_EXT_CMD_EFUSE_ACCESS: + ret = mt7915_mcu_parse_eeprom(dev, skb); + break; + default: + break; + } + dev_kfree_skb(skb); + + return ret; +} + +static int +mt7915_mcu_wait_response(struct mt7915_dev *dev, int cmd, int seq) +{ + unsigned long expires = jiffies + 20 * HZ; + struct sk_buff *skb; + int ret = 0; + + while (true) { + skb = mt76_mcu_get_response(&dev->mt76, expires); + if (!skb) { + dev_err(dev->mt76.dev, "Message %d (seq %d) timeout\n", + cmd, seq); + return -ETIMEDOUT; + } + + ret = mt7915_mcu_parse_response(dev, cmd, skb, seq); + if (ret != -EAGAIN) + break; + } + + return ret; +} + +static int +mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, + int cmd, bool wait_resp) +{ + struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); + int ret, seq; + + mutex_lock(&mdev->mcu.mutex); + + ret = __mt7915_mcu_msg_send(dev, skb, cmd, &seq); + if (ret) + goto out; + + if (wait_resp) + ret = mt7915_mcu_wait_response(dev, cmd, seq); + +out: + mutex_unlock(&mdev->mcu.mutex); + + return ret; +} + +static int +mt7915_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, + int len, bool wait_resp) +{ + struct sk_buff *skb; + + skb = mt76_mcu_msg_alloc(mdev, data, len); + if (!skb) + return -ENOMEM; + + return __mt76_mcu_skb_send_msg(mdev, skb, cmd, wait_resp); +} + +static void +mt7915_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + if (vif->csa_active) + ieee80211_csa_finish(vif); +} + +static void +mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb) +{ + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt7915_mcu_rdd_report *r; + + r = (struct mt7915_mcu_rdd_report *)skb->data; + + if (r->idx && dev->mt76.phy2) + mphy = dev->mt76.phy2; + + ieee80211_radar_detected(mphy->hw); + dev->hw_pattern++; +} + +static void +mt7915_mcu_tx_rate_cal(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra, + struct rate_info *rate, u16 r) +{ + struct ieee80211_supported_band *sband; + u16 ru_idx = le16_to_cpu(ra->ru_idx); + u16 flags = 0; + + rate->mcs = FIELD_GET(MT_RA_RATE_MCS, r); + rate->nss = FIELD_GET(MT_RA_RATE_NSS, r) + 1; + + switch (FIELD_GET(MT_RA_RATE_TX_MODE, r)) { + case MT_PHY_TYPE_CCK: + case MT_PHY_TYPE_OFDM: + if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) + sband = &mphy->sband_5g.sband; + else + sband = &mphy->sband_2g.sband; + + rate->legacy = sband->bitrates[rate->mcs].bitrate; + break; + case MT_PHY_TYPE_HT: + case MT_PHY_TYPE_HT_GF: + rate->mcs += (rate->nss - 1) * 8; + flags |= RATE_INFO_FLAGS_MCS; + + if (ra->gi) + flags |= RATE_INFO_FLAGS_SHORT_GI; + break; + case MT_PHY_TYPE_VHT: + flags |= RATE_INFO_FLAGS_VHT_MCS; + + if (ra->gi) + flags |= RATE_INFO_FLAGS_SHORT_GI; + break; + case MT_PHY_TYPE_HE_SU: + case MT_PHY_TYPE_HE_EXT_SU: + case MT_PHY_TYPE_HE_TB: + case MT_PHY_TYPE_HE_MU: + rate->he_gi = ra->gi; + rate->he_dcm = FIELD_GET(MT_RA_RATE_DCM_EN, r); + + flags |= RATE_INFO_FLAGS_HE_MCS; + break; + default: + break; + } + rate->flags = flags; + + if (ru_idx) { + switch (ru_idx) { + case 1 ... 2: + rate->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_996; + break; + case 3 ... 6: + rate->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_484; + break; + case 7 ... 14: + rate->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_242; + break; + default: + rate->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106; + break; + } + rate->bw = RATE_INFO_BW_HE_RU; + } else { + u8 bw = mt7915_mcu_chan_bw(&mphy->chandef) - + FIELD_GET(MT_RA_RATE_BW, r); + + switch (bw) { + case IEEE80211_STA_RX_BW_160: + rate->bw = RATE_INFO_BW_160; + break; + case IEEE80211_STA_RX_BW_80: + rate->bw = RATE_INFO_BW_80; + break; + case IEEE80211_STA_RX_BW_40: + rate->bw = RATE_INFO_BW_40; + break; + default: + rate->bw = RATE_INFO_BW_20; + break; + } + } +} + +static void +mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb) +{ + struct mt7915_mcu_ra_info *ra = (struct mt7915_mcu_ra_info *)skb->data; + u16 wcidx = le16_to_cpu(ra->wlan_idx); + struct mt76_wcid *wcid = rcu_dereference(dev->mt76.wcid[wcidx]); + struct mt7915_sta *msta = container_of(wcid, struct mt7915_sta, wcid); + struct mt7915_sta_stats *stats = &msta->stats; + struct mt76_phy *mphy = &dev->mphy; + struct rate_info rate = {}, prob_rate = {}; + u16 attempts = le16_to_cpu(ra->attempts); + u16 curr = le16_to_cpu(ra->curr_rate); + u16 probe = le16_to_cpu(ra->prob_up_rate); + + if (msta->wcid.ext_phy && dev->mt76.phy2) + mphy = dev->mt76.phy2; + + /* current rate */ + mt7915_mcu_tx_rate_cal(mphy, ra, &rate, curr); + stats->tx_rate = rate; + + /* probing rate */ + mt7915_mcu_tx_rate_cal(mphy, ra, &prob_rate, probe); + stats->prob_rate = prob_rate; + + if (attempts) { + u16 success = le16_to_cpu(ra->success); + + stats->per = 1000 * (attempts - success) / attempts; + } +} + +static void +mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb) +{ + struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data; + const char *data = (char *)&rxd[1]; + const char *type; + + switch (rxd->s2d_index) { + case 0: + type = "WM"; + break; + case 2: + type = "WA"; + break; + default: + type = "unknown"; + break; + } + + wiphy_info(mt76_hw(dev)->wiphy, "%s: %s", type, data); +} + +static void +mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb) +{ + struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data; + + switch (rxd->ext_eid) { + case MCU_EXT_EVENT_RDD_REPORT: + mt7915_mcu_rx_radar_detected(dev, skb); + break; + case MCU_EXT_EVENT_CSA_NOTIFY: + ieee80211_iterate_active_interfaces_atomic(dev->mt76.hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7915_mcu_csa_finish, dev); + break; + case MCU_EXT_EVENT_RATE_REPORT: + mt7915_mcu_tx_rate_report(dev, skb); + break; + case MCU_EXT_EVENT_FW_LOG_2_HOST: + mt7915_mcu_rx_log_message(dev, skb); + break; + default: + break; + } +} + +static void +mt7915_mcu_rx_unsolicited_event(struct mt7915_dev *dev, struct sk_buff *skb) +{ + struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data; + + switch (rxd->eid) { + case MCU_EVENT_EXT: + mt7915_mcu_rx_ext_event(dev, skb); + break; + default: + break; + } + dev_kfree_skb(skb); +} + +void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb) +{ + struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data; + + if (rxd->ext_eid == MCU_EXT_EVENT_THERMAL_PROTECT || + rxd->ext_eid == MCU_EXT_EVENT_FW_LOG_2_HOST || + rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP || + rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC || + rxd->ext_eid == MCU_EXT_EVENT_RATE_REPORT || + !rxd->seq) + mt7915_mcu_rx_unsolicited_event(dev, skb); + else + mt76_mcu_rx_event(&dev->mt76, skb); +} + +static struct sk_buff * +mt7915_mcu_alloc_sta_req(struct mt7915_dev *dev, struct mt7915_vif *mvif, + struct mt7915_sta *msta, int len) +{ + struct sta_req_hdr hdr = { + .bss_idx = mvif->idx, + .wlan_idx_lo = msta ? to_wcid_lo(msta->wcid.idx) : 0, + .wlan_idx_hi = msta ? to_wcid_hi(msta->wcid.idx) : 0, + .muar_idx = msta ? mvif->omac_idx : 0, + .is_tlv_append = 1, + }; + struct sk_buff *skb; + + skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + skb_put_data(skb, &hdr, sizeof(hdr)); + + return skb; +} + +static struct wtbl_req_hdr * +mt7915_mcu_alloc_wtbl_req(struct mt7915_dev *dev, struct mt7915_sta *msta, + int cmd, void *sta_wtbl, struct sk_buff **skb) +{ + struct tlv *sta_hdr = sta_wtbl; + struct wtbl_req_hdr hdr = { + .wlan_idx_lo = to_wcid_lo(msta->wcid.idx), + .wlan_idx_hi = to_wcid_hi(msta->wcid.idx), + .operation = cmd, + }; + struct sk_buff *nskb = *skb; + + if (!nskb) { + nskb = mt76_mcu_msg_alloc(&dev->mt76, NULL, + MT7915_WTBL_UPDATE_BA_SIZE); + if (!nskb) + return ERR_PTR(-ENOMEM); + + *skb = nskb; + } + + if (sta_hdr) + sta_hdr->len = cpu_to_le16(sizeof(hdr)); + + return skb_put_data(nskb, &hdr, sizeof(hdr)); +} + +static struct tlv * +mt7915_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len, + void *sta_ntlv, void *sta_wtbl) +{ + struct sta_ntlv_hdr *ntlv_hdr = sta_ntlv; + struct tlv *sta_hdr = sta_wtbl; + struct tlv *ptlv, tlv = { + .tag = cpu_to_le16(tag), + .len = cpu_to_le16(len), + }; + u16 ntlv; + + ptlv = skb_put(skb, len); + memcpy(ptlv, &tlv, sizeof(tlv)); + + ntlv = le16_to_cpu(ntlv_hdr->tlv_num); + ntlv_hdr->tlv_num = cpu_to_le16(ntlv + 1); + + if (sta_hdr) { + u16 size = le16_to_cpu(sta_hdr->len); + + sta_hdr->len = cpu_to_le16(size + len); + } + + return ptlv; +} + +static struct tlv * +mt7915_mcu_add_tlv(struct sk_buff *skb, int tag, int len) +{ + return mt7915_mcu_add_nested_tlv(skb, tag, len, skb->data, NULL); +} + +static struct tlv * +mt7915_mcu_add_nested_subtlv(struct sk_buff *skb, int sub_tag, int sub_len, + __le16 *sub_ntlv, __le16 *len) +{ + struct tlv *ptlv, tlv = { + .tag = cpu_to_le16(sub_tag), + .len = cpu_to_le16(sub_len), + }; + + ptlv = skb_put(skb, sub_len); + memcpy(ptlv, &tlv, sizeof(tlv)); + + *sub_ntlv = cpu_to_le16(le16_to_cpu(*sub_ntlv) + 1); + *len = cpu_to_le16(le16_to_cpu(*len) + sub_len); + + return ptlv; +} + +/** bss info **/ +static int +mt7915_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, + struct mt7915_phy *phy, bool enable) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct cfg80211_chan_def *chandef = &phy->mt76->chandef; + enum nl80211_band band = chandef->chan->band; + struct bss_info_basic *bss; + u16 wlan_idx = mvif->sta.wcid.idx; + u32 type = NETWORK_INFRA; + struct tlv *tlv; + + tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss)); + + switch (vif->type) { + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_AP: + break; + case NL80211_IFTYPE_STATION: + /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */ + if (enable) { + struct ieee80211_sta *sta; + struct mt7915_sta *msta; + + rcu_read_lock(); + sta = ieee80211_find_sta(vif, vif->bss_conf.bssid); + if (!sta) { + rcu_read_unlock(); + return -EINVAL; + } + + msta = (struct mt7915_sta *)sta->drv_priv; + wlan_idx = msta->wcid.idx; + rcu_read_unlock(); + } + break; + case NL80211_IFTYPE_ADHOC: + type = NETWORK_IBSS; + break; + default: + WARN_ON(1); + break; + } + + bss = (struct bss_info_basic *)tlv; + memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN); + bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int); + bss->network_type = cpu_to_le32(type); + bss->dtim_period = vif->bss_conf.dtim_period; + bss->bmc_wcid_lo = to_wcid_lo(wlan_idx); + bss->bmc_wcid_hi = to_wcid_hi(wlan_idx); + bss->phy_mode = mt7915_get_phy_mode(phy->dev, vif, band, NULL); + bss->wmm_idx = mvif->wmm_idx; + bss->active = enable; + + return 0; +} + +static void +mt7915_mcu_bss_omac_tlv(struct sk_buff *skb, struct ieee80211_vif *vif) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct bss_info_omac *omac; + struct tlv *tlv; + u32 type = 0; + u8 idx; + + tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac)); + + switch (vif->type) { + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_AP: + type = CONNECTION_INFRA_AP; + break; + case NL80211_IFTYPE_STATION: + type = CONNECTION_INFRA_STA; + break; + case NL80211_IFTYPE_ADHOC: + type = CONNECTION_IBSS_ADHOC; + break; + default: + WARN_ON(1); + break; + } + + omac = (struct bss_info_omac *)tlv; + idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx; + omac->conn_type = cpu_to_le32(type); + omac->omac_idx = mvif->omac_idx; + omac->band_idx = mvif->band_idx; + omac->hw_bss_idx = idx; +} + +struct mt7915_he_obss_narrow_bw_ru_data { + bool tolerated; +}; + +static void mt7915_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy, + struct cfg80211_bss *bss, + void *_data) +{ + struct mt7915_he_obss_narrow_bw_ru_data *data = _data; + const struct element *elem; + + elem = ieee80211_bss_get_elem(bss, WLAN_EID_EXT_CAPABILITY); + + if (!elem || elem->datalen < 10 || + !(elem->data[10] & + WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) + data->tolerated = false; +} + +static bool mt7915_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt7915_he_obss_narrow_bw_ru_data iter_data = { + .tolerated = true, + }; + + if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR)) + return false; + + cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef, + mt7915_check_he_obss_narrow_bw_ru_iter, + &iter_data); + + /* + * If there is at least one AP on radar channel that cannot + * tolerate 26-tone RU UL OFDMA transmissions using HE TB PPDU. + */ + return !iter_data.tolerated; +} + +static void +mt7915_mcu_bss_rfch_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, + struct mt7915_phy *phy) +{ + struct cfg80211_chan_def *chandef = &phy->mt76->chandef; + struct bss_info_rf_ch *ch; + struct tlv *tlv; + int freq1 = chandef->center_freq1; + + tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_RF_CH, sizeof(*ch)); + + ch = (struct bss_info_rf_ch *)tlv; + ch->pri_ch = chandef->chan->hw_value; + ch->center_ch0 = ieee80211_frequency_to_channel(freq1); + ch->bw = mt7915_mcu_chan_bw(chandef); + + if (chandef->width == NL80211_CHAN_WIDTH_80P80) { + int freq2 = chandef->center_freq2; + + ch->center_ch1 = ieee80211_frequency_to_channel(freq2); + } + + if (vif->bss_conf.he_support && vif->type == NL80211_IFTYPE_STATION) { + struct mt7915_dev *dev = phy->dev; + struct mt76_phy *mphy = &dev->mt76.phy; + bool ext_phy = phy != &dev->phy; + + if (ext_phy && dev->mt76.phy2) + mphy = dev->mt76.phy2; + + ch->he_ru26_block = + mt7915_check_he_obss_narrow_bw_ru(mphy->hw, vif); + ch->he_all_disable = false; + } else { + ch->he_all_disable = true; + } +} + +static void +mt7915_mcu_bss_ra_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, + struct mt7915_phy *phy) +{ + struct bss_info_ra *ra; + struct tlv *tlv; + int max_nss = hweight8(phy->chainmask); + + tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_RA, sizeof(*ra)); + + ra = (struct bss_info_ra *)tlv; + ra->op_mode = vif->type == NL80211_IFTYPE_AP; + ra->adhoc_en = vif->type == NL80211_IFTYPE_ADHOC; + ra->short_preamble = true; + ra->tx_streams = max_nss; + ra->rx_streams = max_nss; + ra->algo = 4; + ra->train_up_rule = 2; + ra->train_up_high_thres = 110; + ra->train_up_rule_rssi = -70; + ra->low_traffic_thres = 2; + ra->phy_cap = cpu_to_le32(0xfdf); + ra->interval = cpu_to_le32(500); + ra->fast_interval = cpu_to_le32(100); +} + +static void +mt7915_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, + struct mt7915_phy *phy) +{ +#define DEFAULT_HE_PE_DURATION 4 +#define DEFAULT_HE_DURATION_RTS_THRES 1023 + const struct ieee80211_sta_he_cap *cap; + struct bss_info_he *he; + struct tlv *tlv; + + cap = mt7915_get_he_phy_cap(phy, vif); + + tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_HE_BASIC, sizeof(*he)); + + he = (struct bss_info_he *)tlv; + he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext * 4; + if (!he->he_pe_duration) + he->he_pe_duration = DEFAULT_HE_PE_DURATION; + + he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th * 32); + if (!he->he_rts_thres) + he->he_rts_thres = cpu_to_le16(DEFAULT_HE_DURATION_RTS_THRES); + + he->max_nss_mcs[CMD_HE_MCS_BW80] = cap->he_mcs_nss_supp.tx_mcs_80; + he->max_nss_mcs[CMD_HE_MCS_BW160] = cap->he_mcs_nss_supp.tx_mcs_160; + he->max_nss_mcs[CMD_HE_MCS_BW8080] = cap->he_mcs_nss_supp.tx_mcs_80p80; +} + +static void +mt7915_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt7915_vif *mvif) +{ +/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */ +#define BCN_TX_ESTIMATE_TIME (4096 + 20) + struct bss_info_ext_bss *ext; + int ext_bss_idx, tsf_offset; + struct tlv *tlv; + + ext_bss_idx = mvif->omac_idx - EXT_BSSID_START; + if (ext_bss_idx < 0) + return; + + tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_EXT_BSS, sizeof(*ext)); + + ext = (struct bss_info_ext_bss *)tlv; + tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME; + ext->mbss_tsf_offset = cpu_to_le32(tsf_offset); +} + +static void +mt7915_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt7915_phy *phy) +{ + struct bss_info_bmc_rate *bmc; + struct cfg80211_chan_def *chandef = &phy->mt76->chandef; + enum nl80211_band band = chandef->chan->band; + struct tlv *tlv; + + tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_BMC_RATE, sizeof(*bmc)); + + bmc = (struct bss_info_bmc_rate *)tlv; + if (band == NL80211_BAND_2GHZ) { + bmc->short_preamble = true; + } else { + bmc->bc_trans = cpu_to_le16(0x2000); + bmc->mc_trans = cpu_to_le16(0x2080); + } +} + +static void +mt7915_mcu_bss_sync_tlv(struct sk_buff *skb, struct ieee80211_vif *vif) +{ + struct bss_info_sync_mode *sync; + struct tlv *tlv; + + tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_SYNC_MODE, sizeof(*sync)); + + sync = (struct bss_info_sync_mode *)tlv; + sync->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int); + sync->dtim_period = vif->bss_conf.dtim_period; + sync->enable = true; +} + +int mt7915_mcu_add_bss_info(struct mt7915_phy *phy, + struct ieee80211_vif *vif, int enable) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct sk_buff *skb; + + skb = mt7915_mcu_alloc_sta_req(phy->dev, mvif, NULL, + MT7915_BSS_UPDATE_MAX_SIZE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + /* bss_omac must be first */ + if (enable) + mt7915_mcu_bss_omac_tlv(skb, vif); + + mt7915_mcu_bss_basic_tlv(skb, vif, phy, enable); + + if (enable) { + mt7915_mcu_bss_rfch_tlv(skb, vif, phy); + mt7915_mcu_bss_bmc_tlv(skb, phy); + mt7915_mcu_bss_ra_tlv(skb, vif, phy); + + if (vif->bss_conf.he_support) + mt7915_mcu_bss_he_tlv(skb, vif, phy); + + if (mvif->omac_idx > HW_BSSID_MAX) + mt7915_mcu_bss_ext_tlv(skb, mvif); + else + mt7915_mcu_bss_sync_tlv(skb, vif); + } + + return __mt76_mcu_skb_send_msg(&phy->dev->mt76, skb, + MCU_EXT_CMD_BSS_INFO_UPDATE, true); +} + +/** starec & wtbl **/ +static int +mt7915_mcu_sta_key_tlv(struct sk_buff *skb, struct ieee80211_key_conf *key, + enum set_key_cmd cmd) +{ + struct sta_rec_sec *sec; + struct tlv *tlv; + u32 len = sizeof(*sec); + + tlv = mt7915_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec)); + + sec = (struct sta_rec_sec *)tlv; + sec->add = cmd; + + if (cmd == SET_KEY) { + struct sec_key *sec_key; + u8 cipher; + + cipher = mt7915_mcu_get_cipher(key->cipher); + if (cipher == MT_CIPHER_NONE) + return -EOPNOTSUPP; + + sec_key = &sec->key[0]; + sec_key->cipher_len = sizeof(*sec_key); + sec_key->key_id = key->keyidx; + + if (cipher == MT_CIPHER_BIP_CMAC_128) { + sec_key->cipher_id = MT_CIPHER_AES_CCMP; + sec_key->key_len = 16; + memcpy(sec_key->key, key->key, 16); + + sec_key = &sec->key[1]; + sec_key->cipher_id = MT_CIPHER_BIP_CMAC_128; + sec_key->cipher_len = sizeof(*sec_key); + sec_key->key_len = 16; + memcpy(sec_key->key, key->key + 16, 16); + + sec->n_cipher = 2; + } else { + sec_key->cipher_id = cipher; + sec_key->key_len = key->keylen; + memcpy(sec_key->key, key->key, key->keylen); + + if (cipher == MT_CIPHER_TKIP) { + /* Rx/Tx MIC keys are swapped */ + memcpy(sec_key->key + 16, key->key + 24, 8); + memcpy(sec_key->key + 24, key->key + 16, 8); + } + + len -= sizeof(*sec_key); + sec->n_cipher = 1; + } + } else { + len -= sizeof(sec->key); + sec->n_cipher = 0; + } + sec->len = cpu_to_le16(len); + + return 0; +} + +int mt7915_mcu_add_key(struct mt7915_dev *dev, struct ieee80211_vif *vif, + struct mt7915_sta *msta, struct ieee80211_key_conf *key, + enum set_key_cmd cmd) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct sk_buff *skb; + int len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_sec); + int ret; + + skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + ret = mt7915_mcu_sta_key_tlv(skb, key, cmd); + if (ret) + return ret; + + return __mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_STA_REC_UPDATE, true); +} + +static void +mt7915_mcu_sta_ba_tlv(struct sk_buff *skb, + struct ieee80211_ampdu_params *params, + bool enable, bool tx) +{ + struct sta_rec_ba *ba; + struct tlv *tlv; + + tlv = mt7915_mcu_add_tlv(skb, STA_REC_BA, sizeof(*ba)); + + ba = (struct sta_rec_ba *)tlv; + ba->ba_type = tx ? MT_BA_TYPE_ORIGINATOR : MT_BA_TYPE_RECIPIENT, + ba->winsize = cpu_to_le16(params->buf_size); + ba->ssn = cpu_to_le16(params->ssn); + ba->ba_en = enable << params->tid; + ba->amsdu = params->amsdu; + ba->tid = params->tid; +} + +static void +mt7915_mcu_wtbl_ba_tlv(struct sk_buff *skb, + struct ieee80211_ampdu_params *params, + bool enable, bool tx, void *sta_wtbl, + void *wtbl_tlv) +{ + struct wtbl_ba *ba; + struct tlv *tlv; + + tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_BA, sizeof(*ba), + wtbl_tlv, sta_wtbl); + + ba = (struct wtbl_ba *)tlv; + ba->tid = params->tid; + + if (tx) { + ba->ba_type = MT_BA_TYPE_ORIGINATOR; + ba->sn = enable ? cpu_to_le16(params->ssn) : 0; + ba->ba_en = enable; + } else { + memcpy(ba->peer_addr, params->sta->addr, ETH_ALEN); + ba->ba_type = MT_BA_TYPE_RECIPIENT; + ba->rst_ba_tid = params->tid; + ba->rst_ba_sel = RST_BA_MAC_TID_MATCH; + ba->rst_ba_sb = 1; + } + + if (enable && tx) + ba->ba_winsize = cpu_to_le16(params->buf_size); +} + +static int +mt7915_mcu_sta_ba(struct mt7915_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable, bool tx) +{ + struct mt7915_sta *msta = (struct mt7915_sta *)params->sta->drv_priv; + struct mt7915_vif *mvif = msta->vif; + struct wtbl_req_hdr *wtbl_hdr; + struct tlv *sta_wtbl; + struct sk_buff *skb; + + skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, + MT7915_STA_UPDATE_MAX_SIZE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + mt7915_mcu_sta_ba_tlv(skb, params, enable, tx); + sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv)); + + wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl, + &skb); + mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr); + + return __mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_STA_REC_UPDATE, true); +} + +int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable) +{ + return mt7915_mcu_sta_ba(dev, params, enable, true); +} + +int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable) +{ + return mt7915_mcu_sta_ba(dev, params, enable, false); +} + +static void +mt7915_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, void *sta_wtbl, + void *wtbl_tlv) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct wtbl_generic *generic; + struct wtbl_rx *rx; + struct tlv *tlv; + + tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_GENERIC, sizeof(*generic), + wtbl_tlv, sta_wtbl); + + generic = (struct wtbl_generic *)tlv; + + if (sta) { + memcpy(generic->peer_addr, sta->addr, ETH_ALEN); + generic->partial_aid = cpu_to_le16(sta->aid); + generic->muar_idx = mvif->omac_idx; + generic->qos = sta->wme; + } else { + /* use BSSID in station mode */ + if (vif->type == NL80211_IFTYPE_STATION) + memcpy(generic->peer_addr, vif->bss_conf.bssid, + ETH_ALEN); + else + eth_broadcast_addr(generic->peer_addr); + + generic->muar_idx = 0xe; + } + + tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_RX, sizeof(*rx), + wtbl_tlv, sta_wtbl); + + rx = (struct wtbl_rx *)tlv; + rx->rca1 = sta ? vif->type != NL80211_IFTYPE_AP : 1; + rx->rca2 = 1; + rx->rv = 1; +} + +static void +mt7915_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, bool enable) +{ +#define EXTRA_INFO_VER BIT(0) +#define EXTRA_INFO_NEW BIT(1) + struct sta_rec_basic *basic; + struct tlv *tlv; + + tlv = mt7915_mcu_add_tlv(skb, STA_REC_BASIC, sizeof(*basic)); + + basic = (struct sta_rec_basic *)tlv; + basic->extra_info = cpu_to_le16(EXTRA_INFO_VER); + + if (enable) { + basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW); + basic->conn_state = CONN_STATE_PORT_SECURE; + } else { + basic->conn_state = CONN_STATE_DISCONNECT; + } + + if (!sta) { + basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC); + eth_broadcast_addr(basic->peer_addr); + return; + } + + switch (vif->type) { + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_AP: + basic->conn_type = cpu_to_le32(CONNECTION_INFRA_STA); + break; + case NL80211_IFTYPE_STATION: + basic->conn_type = cpu_to_le32(CONNECTION_INFRA_AP); + break; + case NL80211_IFTYPE_ADHOC: + basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC); + break; + default: + WARN_ON(1); + break; + } + + memcpy(basic->peer_addr, sta->addr, ETH_ALEN); + basic->aid = cpu_to_le16(sta->aid); + basic->qos = sta->wme; +} + +static void +mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +{ + struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; + struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem; + struct sta_rec_he *he; + struct tlv *tlv; + u32 cap = 0; + + tlv = mt7915_mcu_add_tlv(skb, STA_REC_HE, sizeof(*he)); + + he = (struct sta_rec_he *)tlv; + + if (elem->mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_HTC_HE) + cap |= STA_REC_HE_CAP_HTC; + + if (elem->mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) + cap |= STA_REC_HE_CAP_BSR; + + if (elem->mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL) + cap |= STA_REC_HE_CAP_OM; + + if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU) + cap |= STA_REC_HE_CAP_AMSDU_IN_AMPDU; + + if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) + cap |= STA_REC_HE_CAP_BQR; + + if (elem->phy_cap_info[0] & + (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G)) + cap |= STA_REC_HE_CAP_BW20_RU242_SUPPORT; + + if (elem->phy_cap_info[1] & + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD) + cap |= STA_REC_HE_CAP_LDPC; + + if (elem->phy_cap_info[1] & + IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US) + cap |= STA_REC_HE_CAP_SU_PPDU_1LTF_8US_GI; + + if (elem->phy_cap_info[2] & + IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US) + cap |= STA_REC_HE_CAP_NDP_4LTF_3DOT2MS_GI; + + if (elem->phy_cap_info[2] & + IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ) + cap |= STA_REC_HE_CAP_LE_EQ_80M_TX_STBC; + + if (elem->phy_cap_info[2] & + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) + cap |= STA_REC_HE_CAP_LE_EQ_80M_RX_STBC; + + if (elem->phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE) + cap |= STA_REC_HE_CAP_PARTIAL_BW_EXT_RANGE; + + if (elem->phy_cap_info[7] & + IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI) + cap |= STA_REC_HE_CAP_SU_MU_PPDU_4LTF_8US_GI; + + if (elem->phy_cap_info[7] & + IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ) + cap |= STA_REC_HE_CAP_GT_80M_TX_STBC; + + if (elem->phy_cap_info[7] & + IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ) + cap |= STA_REC_HE_CAP_GT_80M_RX_STBC; + + if (elem->phy_cap_info[8] & + IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI) + cap |= STA_REC_HE_CAP_ER_SU_PPDU_4LTF_8US_GI; + + if (elem->phy_cap_info[8] & + IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI) + cap |= STA_REC_HE_CAP_ER_SU_PPDU_1LTF_8US_GI; + + if (elem->phy_cap_info[9] & + IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK) + cap |= STA_REC_HE_CAP_TRIG_CQI_FK; + + if (elem->phy_cap_info[9] & + IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU) + cap |= STA_REC_HE_CAP_TX_1024QAM_UNDER_RU242; + + if (elem->phy_cap_info[9] & + IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU) + cap |= STA_REC_HE_CAP_RX_1024QAM_UNDER_RU242; + + he->he_cap = cpu_to_le32(cap); + + switch (sta->bandwidth) { + case IEEE80211_STA_RX_BW_160: + if (elem->phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) + he->max_nss_mcs[CMD_HE_MCS_BW8080] = + he_cap->he_mcs_nss_supp.rx_mcs_80p80; + + he->max_nss_mcs[CMD_HE_MCS_BW160] = + he_cap->he_mcs_nss_supp.rx_mcs_160; + /* fall through */ + default: + he->max_nss_mcs[CMD_HE_MCS_BW80] = + he_cap->he_mcs_nss_supp.rx_mcs_80; + break; + } + + he->t_frame_dur = + HE_MAC(CAP1_TF_MAC_PAD_DUR_MASK, elem->mac_cap_info[1]); + he->max_ampdu_exp = + HE_MAC(CAP3_MAX_AMPDU_LEN_EXP_MASK, elem->mac_cap_info[3]); + + he->bw_set = + HE_PHY(CAP0_CHANNEL_WIDTH_SET_MASK, elem->phy_cap_info[0]); + he->device_class = + HE_PHY(CAP1_DEVICE_CLASS_A, elem->phy_cap_info[1]); + he->punc_pream_rx = + HE_PHY(CAP1_PREAMBLE_PUNC_RX_MASK, elem->phy_cap_info[1]); + + he->dcm_tx_mode = + HE_PHY(CAP3_DCM_MAX_CONST_TX_MASK, elem->phy_cap_info[3]); + he->dcm_tx_max_nss = + HE_PHY(CAP3_DCM_MAX_TX_NSS_2, elem->phy_cap_info[3]); + he->dcm_rx_mode = + HE_PHY(CAP3_DCM_MAX_CONST_RX_MASK, elem->phy_cap_info[3]); + he->dcm_rx_max_nss = + HE_PHY(CAP3_DCM_MAX_RX_NSS_2, elem->phy_cap_info[3]); + he->dcm_rx_max_nss = + HE_PHY(CAP8_DCM_MAX_RU_MASK, elem->phy_cap_info[8]); + + he->pkt_ext = 2; +} + +static void +mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +{ + struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; + struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem; + struct sta_rec_muru *muru; + struct tlv *tlv; + + tlv = mt7915_mcu_add_tlv(skb, STA_REC_MURU, sizeof(*muru)); + + muru = (struct sta_rec_muru *)tlv; + muru->cfg.ofdma_dl_en = true; + muru->cfg.ofdma_ul_en = true; + muru->cfg.mimo_dl_en = true; + muru->cfg.mimo_ul_en = true; + + muru->ofdma_dl.punc_pream_rx = + HE_PHY(CAP1_PREAMBLE_PUNC_RX_MASK, elem->phy_cap_info[1]); + muru->ofdma_dl.he_20m_in_40m_2g = + HE_PHY(CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G, elem->phy_cap_info[8]); + muru->ofdma_dl.he_20m_in_160m = + HE_PHY(CAP8_20MHZ_IN_160MHZ_HE_PPDU, elem->phy_cap_info[8]); + muru->ofdma_dl.he_80m_in_160m = + HE_PHY(CAP8_80MHZ_IN_160MHZ_HE_PPDU, elem->phy_cap_info[8]); + muru->ofdma_dl.lt16_sigb = 0; + muru->ofdma_dl.rx_su_comp_sigb = 0; + muru->ofdma_dl.rx_su_non_comp_sigb = 0; + + muru->ofdma_ul.t_frame_dur = + HE_MAC(CAP1_TF_MAC_PAD_DUR_MASK, elem->mac_cap_info[1]); + muru->ofdma_ul.mu_cascading = + HE_MAC(CAP2_MU_CASCADING, elem->mac_cap_info[2]); + muru->ofdma_ul.uo_ra = + HE_MAC(CAP3_OFDMA_RA, elem->mac_cap_info[3]); + muru->ofdma_ul.he_2x996_tone = 0; + muru->ofdma_ul.rx_t_frame_11ac = 0; + + muru->mimo_dl.vht_mu_bfee = + !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); + muru->mimo_dl.partial_bw_dl_mimo = + HE_PHY(CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO, elem->phy_cap_info[6]); + + muru->mimo_ul.full_ul_mimo = + HE_PHY(CAP2_UL_MU_FULL_MU_MIMO, elem->phy_cap_info[2]); + muru->mimo_ul.partial_ul_mimo = + HE_PHY(CAP2_UL_MU_PARTIAL_MU_MIMO, elem->phy_cap_info[2]); +} + +static void +mt7915_mcu_sta_tlv(struct mt7915_dev *dev, struct sk_buff *skb, + struct ieee80211_sta *sta) +{ + struct tlv *tlv; + + if (sta->ht_cap.ht_supported) { + struct sta_rec_ht *ht; + + /* starec ht */ + tlv = mt7915_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht)); + ht = (struct sta_rec_ht *)tlv; + ht->ht_cap = cpu_to_le16(sta->ht_cap.cap); + } + + /* starec vht */ + if (sta->vht_cap.vht_supported) { + struct sta_rec_vht *vht; + + tlv = mt7915_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht)); + vht = (struct sta_rec_vht *)tlv; + vht->vht_cap = cpu_to_le32(sta->vht_cap.cap); + vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map; + vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map; + } + + /* starec he */ + if (sta->he_cap.has_he) + mt7915_mcu_sta_he_tlv(skb, sta); + + /* starec muru */ + if (sta->he_cap.has_he || sta->vht_cap.vht_supported) + mt7915_mcu_sta_muru_tlv(skb, sta); +} + +static void +mt7915_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, + void *sta_wtbl, void *wtbl_tlv) +{ + struct wtbl_smps *smps; + struct tlv *tlv; + + tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_SMPS, sizeof(*smps), + wtbl_tlv, sta_wtbl); + smps = (struct wtbl_smps *)tlv; + + if (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) + smps->smps = true; +} + +static void +mt7915_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, + void *sta_wtbl, void *wtbl_tlv) +{ + struct wtbl_ht *ht = NULL; + struct tlv *tlv; + + /* wtbl ht */ + if (sta->ht_cap.ht_supported) { + tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht), + wtbl_tlv, sta_wtbl); + ht = (struct wtbl_ht *)tlv; + ht->ldpc = sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING; + ht->af = sta->ht_cap.ampdu_factor; + ht->mm = sta->ht_cap.ampdu_density; + ht->ht = true; + } + + /* wtbl vht */ + if (sta->vht_cap.vht_supported) { + struct wtbl_vht *vht; + u8 af; + + tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_VHT, sizeof(*vht), + wtbl_tlv, sta_wtbl); + vht = (struct wtbl_vht *)tlv; + vht->ldpc = sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC, + vht->vht = true; + + af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK, + sta->vht_cap.cap); + if (ht) + ht->af = max_t(u8, ht->af, af); + } + + mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_tlv); +} + +int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + struct wtbl_req_hdr *wtbl_hdr; + struct tlv *sta_wtbl; + struct sk_buff *skb; + + skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, + MT7915_STA_UPDATE_MAX_SIZE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv)); + + wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl, + &skb); + mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr); + + return __mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_STA_REC_UPDATE, true); +} + +static void +mt7915_mcu_sta_sounding_rate(struct sta_rec_bf *bf) +{ + bf->sounding_phy = MT_PHY_TYPE_OFDM; + bf->ndp_rate = 0; /* mcs0 */ + bf->ndpa_rate = MT7915_CFEND_RATE_DEFAULT; /* ofdm 24m */ + bf->rept_poll_rate = MT7915_CFEND_RATE_DEFAULT; /* ofdm 24m */ +} + +static void +mt7915_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct sta_rec_bf *bf) +{ + struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs; + u8 n = 0; + + bf->tx_mode = MT_PHY_TYPE_HT; + bf->bf_cap |= MT_IBF; + + if (mcs->tx_params & IEEE80211_HT_MCS_TX_RX_DIFF && + (mcs->tx_params & IEEE80211_HT_MCS_TX_DEFINED)) + n = FIELD_GET(IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK, + mcs->tx_params); + else if (mcs->rx_mask[3]) + n = 3; + else if (mcs->rx_mask[2]) + n = 2; + else if (mcs->rx_mask[1]) + n = 1; + + bf->nc = min_t(u8, bf->nr, n); + bf->ibf_ncol = bf->nc; + + if (sta->bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->nc) + bf->ibf_timeout = 0x48; +} + +static void +mt7915_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7915_phy *phy, + struct sta_rec_bf *bf) +{ + struct ieee80211_sta_vht_cap *pc = &sta->vht_cap; + struct ieee80211_sta_vht_cap *vc = &phy->mt76->sband_5g.sband.vht_cap; + u8 bfee_nr, bfer_nr, n, tx_ant = hweight8(phy->chainmask) - 1; + u16 mcs_map; + + bf->tx_mode = MT_PHY_TYPE_VHT; + bf->bf_cap |= MT_EBF; + + mt7915_mcu_sta_sounding_rate(bf); + + bfee_nr = FIELD_GET(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, + pc->cap); + bfer_nr = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK, + vc->cap); + mcs_map = le16_to_cpu(pc->vht_mcs.rx_mcs_map); + + n = min_t(u8, bfer_nr, bfee_nr); + bf->nr = min_t(u8, n, tx_ant); + n = mt7915_mcu_get_sta_nss(mcs_map); + + bf->nc = min_t(u8, n, bf->nr); + bf->ibf_ncol = bf->nc; + + /* force nr from 4 to 2 */ + if (sta->bandwidth == IEEE80211_STA_RX_BW_160) + bf->nr = 1; +} + +static void +mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif, + struct mt7915_phy *phy, struct sta_rec_bf *bf) +{ + struct ieee80211_sta_he_cap *pc = &sta->he_cap; + struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem; + const struct ieee80211_he_cap_elem *ve; + const struct ieee80211_sta_he_cap *vc; + u8 bfee_nr, bfer_nr, nss_mcs; + u16 mcs_map; + + vc = mt7915_get_he_phy_cap(phy, vif); + ve = &vc->he_cap_elem; + + bf->tx_mode = MT_PHY_TYPE_HE_SU; + bf->bf_cap |= MT_EBF; + + mt7915_mcu_sta_sounding_rate(bf); + + bf->trigger_su = HE_PHY(CAP6_TRIG_SU_BEAMFORMER_FB, + pe->phy_cap_info[6]); + bf->trigger_mu = HE_PHY(CAP6_TRIG_MU_BEAMFORMER_FB, + pe->phy_cap_info[6]); + bfer_nr = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK, + ve->phy_cap_info[5]); + bfee_nr = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK, + pe->phy_cap_info[4]); + + mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.tx_mcs_80); + nss_mcs = mt7915_mcu_get_sta_nss(mcs_map); + + bf->nr = min_t(u8, bfer_nr, bfee_nr); + bf->nc = min_t(u8, nss_mcs, bf->nr); + bf->ibf_ncol = bf->nc; + + if (sta->bandwidth != IEEE80211_STA_RX_BW_160) + return; + + /* go over for 160MHz and 80p80 */ + if (pe->phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) { + mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_160); + nss_mcs = mt7915_mcu_get_sta_nss(mcs_map); + + bf->nc_bw160 = nss_mcs; + } + + if (pe->phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) { + mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80p80); + nss_mcs = mt7915_mcu_get_sta_nss(mcs_map); + + if (bf->nc_bw160) + bf->nc_bw160 = min_t(u8, bf->nc_bw160, nss_mcs); + else + bf->nc_bw160 = nss_mcs; + } + + bfer_nr = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK, + ve->phy_cap_info[5]); + bfee_nr = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK, + pe->phy_cap_info[4]); + + bf->nr_bw160 = min_t(int, bfer_nr, bfee_nr); +} + +static void +mt7915_mcu_sta_bfer_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, + struct ieee80211_vif *vif, struct mt7915_phy *phy, + bool enable) +{ + struct sta_rec_bf *bf; + struct tlv *tlv; + int tx_ant = hweight8(phy->chainmask) - 1; + const u8 matrix[4][4] = { + {0, 0, 0, 0}, + {1, 1, 0, 0}, /* 2x1, 2x2, 2x3, 2x4 */ + {2, 4, 4, 0}, /* 3x1, 3x2, 3x3, 3x4 */ + {3, 5, 6, 0} /* 4x1, 4x2, 4x3, 4x4 */ + }; + +#define MT_BFER_FREE cpu_to_le16(GENMASK(15, 0)) + + tlv = mt7915_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf)); + bf = (struct sta_rec_bf *)tlv; + + if (!enable) { + bf->pfmu = MT_BFER_FREE; + return; + } + + bf->bw = sta->bandwidth; + bf->ibf_dbw = sta->bandwidth; + bf->ibf_nrow = tx_ant; + bf->ibf_timeout = 0x18; + + if (sta->he_cap.has_he) + mt7915_mcu_sta_bfer_he(sta, vif, phy, bf); + else if (sta->vht_cap.vht_supported) + mt7915_mcu_sta_bfer_vht(sta, phy, bf); + else if (sta->ht_cap.ht_supported) + mt7915_mcu_sta_bfer_ht(sta, bf); + + if (bf->bf_cap & MT_EBF && bf->nr != tx_ant) + bf->mem_20m = matrix[tx_ant][bf->nc]; + else + bf->mem_20m = matrix[bf->nr][bf->nc]; + + switch (sta->bandwidth) { + case IEEE80211_STA_RX_BW_160: + case IEEE80211_STA_RX_BW_80: + bf->mem_total = bf->mem_20m * 2; + break; + case IEEE80211_STA_RX_BW_40: + bf->mem_total = bf->mem_20m; + break; + case IEEE80211_STA_RX_BW_20: + default: + break; + } +} + +static void +mt7915_mcu_sta_bfee_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, + struct mt7915_phy *phy) +{ + struct sta_rec_bfee *bfee; + struct tlv *tlv; + int tx_ant = hweight8(phy->chainmask) - 1; + u8 nr = 0; + + tlv = mt7915_mcu_add_tlv(skb, STA_REC_BFEE, sizeof(*bfee)); + bfee = (struct sta_rec_bfee *)tlv; + + if (sta->he_cap.has_he) { + struct ieee80211_he_cap_elem *pe = &sta->he_cap.he_cap_elem; + + nr = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK, + pe->phy_cap_info[5]); + } else if (sta->vht_cap.vht_supported) { + struct ieee80211_sta_vht_cap *pc = &sta->vht_cap; + + nr = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK, + pc->cap); + } + + /* reply with identity matrix to avoid 2x2 BF negative gain */ + if (nr == 1 && tx_ant == 2) + bfee->fb_identity_matrix = true; +} + +static u8 +mt7915_mcu_sta_txbf_type(struct mt7915_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + u8 type = 0; + + if (vif->type != NL80211_IFTYPE_STATION && + vif->type != NL80211_IFTYPE_AP) + return 0; + + if (sta->he_cap.has_he) { + struct ieee80211_he_cap_elem *pe; + const struct ieee80211_he_cap_elem *ve; + const struct ieee80211_sta_he_cap *vc; + + pe = &sta->he_cap.he_cap_elem; + vc = mt7915_get_he_phy_cap(phy, vif); + ve = &vc->he_cap_elem; + + if ((HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]) || + HE_PHY(CAP4_MU_BEAMFORMER, pe->phy_cap_info[4])) && + HE_PHY(CAP4_SU_BEAMFORMEE, ve->phy_cap_info[4])) + type |= MT_STA_BFEE; + + if ((HE_PHY(CAP3_SU_BEAMFORMER, ve->phy_cap_info[3]) || + HE_PHY(CAP4_MU_BEAMFORMER, ve->phy_cap_info[4])) && + HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4])) + type |= MT_STA_BFER; + } else if (sta->vht_cap.vht_supported) { + struct ieee80211_sta_vht_cap *pc; + struct ieee80211_sta_vht_cap *vc; + u32 cr, ce; + + pc = &sta->vht_cap; + vc = &phy->mt76->sband_5g.sband.vht_cap; + cr = IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE; + ce = IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; + + if ((pc->cap & cr) && (vc->cap & ce)) + type |= MT_STA_BFEE; + + if ((vc->cap & cr) && (pc->cap & ce)) + type |= MT_STA_BFER; + } else if (sta->ht_cap.ht_supported) { + /* TODO: iBF */ + } + + return type; +} + +static int +mt7915_mcu_add_txbf(struct mt7915_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, bool enable) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + struct mt7915_phy *phy; + struct sk_buff *skb; + int r, len; + u8 type; + + phy = mvif->band_idx ? mt7915_ext_phy(dev) : &dev->phy; + + type = mt7915_mcu_sta_txbf_type(phy, vif, sta); + + /* must keep each tag independent */ + + /* starec bf */ + if (type & MT_STA_BFER) { + len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_bf); + + skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + mt7915_mcu_sta_bfer_tlv(skb, sta, vif, phy, enable); + + r = __mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_STA_REC_UPDATE, true); + if (r) + return r; + } + + /* starec bfee */ + if (type & MT_STA_BFEE) { + len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_bfee); + + skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + mt7915_mcu_sta_bfee_tlv(skb, sta, phy); + + r = __mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_STA_REC_UPDATE, true); + if (r) + return r; + } + + return 0; +} + +static void +mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct cfg80211_chan_def *chandef = &dev->mphy.chandef; + struct sta_rec_ra *ra; + struct tlv *tlv; + enum nl80211_band band = chandef->chan->band; + u32 supp_rate = sta->supp_rates[band]; + int n_rates = hweight32(supp_rate); + u32 cap = sta->wme ? STA_CAP_WMM : 0; + u8 i, nss = sta->rx_nss, mcs = 0; + + tlv = mt7915_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra)); + + ra = (struct sta_rec_ra *)tlv; + ra->valid = true; + ra->auto_rate = true; + ra->phy_mode = mt7915_get_phy_mode(dev, vif, band, sta); + ra->channel = chandef->chan->hw_value; + ra->bw = sta->bandwidth; + ra->rate_len = n_rates; + ra->phy.bw = sta->bandwidth; + + if (n_rates) { + if (band == NL80211_BAND_2GHZ) { + ra->supp_mode = MODE_CCK; + ra->supp_cck_rate = supp_rate & GENMASK(3, 0); + ra->phy.type = MT_PHY_TYPE_CCK; + + if (n_rates > 4) { + ra->supp_mode |= MODE_OFDM; + ra->supp_ofdm_rate = supp_rate >> 4; + ra->phy.type = MT_PHY_TYPE_OFDM; + } + } else { + ra->supp_mode = MODE_OFDM; + ra->supp_ofdm_rate = supp_rate; + ra->phy.type = MT_PHY_TYPE_OFDM; + } + } + + if (sta->ht_cap.ht_supported) { + for (i = 0; i < nss; i++) + ra->ht_mcs[i] = sta->ht_cap.mcs.rx_mask[i]; + + ra->supp_ht_mcs = *(__le32 *)ra->ht_mcs; + ra->supp_mode |= MODE_HT; + mcs = hweight32(le32_to_cpu(ra->supp_ht_mcs)) - 1; + ra->af = sta->ht_cap.ampdu_factor; + ra->ht_gf = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD); + + cap |= STA_CAP_HT; + if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) + cap |= STA_CAP_SGI_20; + if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) + cap |= STA_CAP_SGI_40; + if (sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC) + cap |= STA_CAP_TX_STBC; + if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) + cap |= STA_CAP_RX_STBC; + if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING) + cap |= STA_CAP_LDPC; + } + + if (sta->vht_cap.vht_supported) { + u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map); + u16 vht_mcs; + u8 af, mcs_prev; + + af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK, + sta->vht_cap.cap); + ra->af = max_t(u8, ra->af, af); + + cap |= STA_CAP_VHT; + if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80) + cap |= STA_CAP_VHT_SGI_80; + if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160) + cap |= STA_CAP_VHT_SGI_160; + if (sta->vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC) + cap |= STA_CAP_VHT_TX_STBC; + if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1) + cap |= STA_CAP_VHT_RX_STBC; + if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC) + cap |= STA_CAP_VHT_LDPC; + + ra->supp_mode |= MODE_VHT; + for (mcs = 0, i = 0; i < nss; i++, mcs_map >>= 2) { + switch (mcs_map & 0x3) { + case IEEE80211_VHT_MCS_SUPPORT_0_9: + vht_mcs = GENMASK(9, 0); + break; + case IEEE80211_VHT_MCS_SUPPORT_0_8: + vht_mcs = GENMASK(8, 0); + break; + case IEEE80211_VHT_MCS_SUPPORT_0_7: + vht_mcs = GENMASK(7, 0); + break; + default: + vht_mcs = 0; + } + + ra->supp_vht_mcs[i] = cpu_to_le16(vht_mcs); + + mcs_prev = hweight16(vht_mcs) - 1; + if (mcs_prev > mcs) + mcs = mcs_prev; + + /* only support 2ss on 160MHz */ + if (i > 1 && (ra->bw == CMD_CBW_160MHZ || + ra->bw == CMD_CBW_8080MHZ)) + break; + } + } + + if (sta->he_cap.has_he) { + ra->supp_mode |= MODE_HE; + cap |= STA_CAP_HE; + } + + ra->sta_status = cpu_to_le32(cap); + + switch (BIT(fls(ra->supp_mode) - 1)) { + case MODE_VHT: + ra->phy.type = MT_PHY_TYPE_VHT; + ra->phy.mcs = mcs; + ra->phy.nss = nss; + ra->phy.stbc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC); + ra->phy.ldpc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC); + ra->phy.sgi = + !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80); + break; + case MODE_HT: + ra->phy.type = MT_PHY_TYPE_HT; + ra->phy.mcs = mcs; + ra->phy.ldpc = sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING; + ra->phy.stbc = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC); + ra->phy.sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20); + break; + default: + break; + } +} + +int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + struct sk_buff *skb; + int len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_ra); + + skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + mt7915_mcu_sta_rate_ctrl_tlv(skb, dev, vif, sta); + + return __mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_STA_REC_UPDATE, true); +} + +int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, bool enable) +{ + int ret; + + if (!sta) + return 0; + + /* must keep the order */ + ret = mt7915_mcu_add_txbf(dev, vif, sta, enable); + if (ret) + return ret; + + if (enable) + return mt7915_mcu_add_rate_ctrl(dev, vif, sta); + + return 0; +} + +int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, bool enable) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct wtbl_req_hdr *wtbl_hdr; + struct mt7915_sta *msta; + struct tlv *sta_wtbl; + struct sk_buff *skb; + + msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta; + + skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, + MT7915_STA_UPDATE_MAX_SIZE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + mt7915_mcu_sta_basic_tlv(skb, vif, sta, enable); + if (enable && sta) + mt7915_mcu_sta_tlv(dev, skb, sta); + + sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv)); + + wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET, + sta_wtbl, &skb); + if (enable) { + mt7915_mcu_wtbl_generic_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr); + if (sta) + mt7915_mcu_wtbl_ht_tlv(skb, sta, sta_wtbl, wtbl_hdr); + } + + return __mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_STA_REC_UPDATE, true); +} + +int mt7915_mcu_set_fixed_rate(struct mt7915_dev *dev, + struct ieee80211_sta *sta, u32 rate) +{ + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + struct mt7915_vif *mvif = msta->vif; + struct sta_rec_ra_fixed *ra; + struct sk_buff *skb; + struct tlv *tlv; + int len = sizeof(struct sta_req_hdr) + sizeof(*ra); + + skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + tlv = mt7915_mcu_add_tlv(skb, STA_REC_RA_UPDATE, sizeof(*ra)); + ra = (struct sta_rec_ra_fixed *)tlv; + + if (!rate) { + ra->field = cpu_to_le32(RATE_PARAM_AUTO); + goto out; + } else { + ra->field = cpu_to_le32(RATE_PARAM_FIXED); + } + + ra->phy.type = FIELD_GET(RATE_CFG_PHY_TYPE, rate); + ra->phy.bw = FIELD_GET(RATE_CFG_BW, rate); + ra->phy.nss = FIELD_GET(RATE_CFG_NSS, rate); + ra->phy.mcs = FIELD_GET(RATE_CFG_MCS, rate); + ra->phy.stbc = FIELD_GET(RATE_CFG_STBC, rate); + + if (ra->phy.bw) + ra->phy.ldpc = 7; + else + ra->phy.ldpc = FIELD_GET(RATE_CFG_LDPC, rate) * 7; + + /* HT/VHT - SGI: 1, LGI: 0; HE - SGI: 0, MGI: 1, LGI: 2 */ + if (ra->phy.type > MT_PHY_TYPE_VHT) + ra->phy.sgi = ra->phy.mcs * 85; + else + ra->phy.sgi = ra->phy.mcs * 15; + +out: + return __mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_STA_REC_UPDATE, true); +} + +int mt7915_mcu_add_dev_info(struct mt7915_dev *dev, + struct ieee80211_vif *vif, bool enable) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct { + struct req_hdr { + u8 omac_idx; + u8 dbdc_idx; + __le16 tlv_num; + u8 is_tlv_append; + u8 rsv[3]; + } __packed hdr; + struct req_tlv { + __le16 tag; + __le16 len; + u8 active; + u8 dbdc_idx; + u8 omac_addr[ETH_ALEN]; + } __packed tlv; + } data = { + .hdr = { + .omac_idx = mvif->omac_idx, + .dbdc_idx = mvif->band_idx, + .tlv_num = cpu_to_le16(1), + .is_tlv_append = 1, + }, + .tlv = { + .tag = cpu_to_le16(DEV_INFO_ACTIVE), + .len = cpu_to_le16(sizeof(struct req_tlv)), + .active = enable, + .dbdc_idx = mvif->band_idx, + }, + }; + + memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN); + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DEV_INFO_UPDATE, + &data, sizeof(data), true); +} + +static void +mt7915_mcu_beacon_csa(struct sk_buff *rskb, struct sk_buff *skb, + struct bss_info_bcn *bcn, + struct ieee80211_mutable_offsets *offs) +{ + if (offs->csa_counter_offs[0]) { + struct tlv *tlv; + struct bss_info_bcn_csa *csa; + + tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_CSA, + sizeof(*csa), &bcn->sub_ntlv, + &bcn->len); + csa = (struct bss_info_bcn_csa *)tlv; + csa->cnt = skb->data[offs->csa_counter_offs[0]]; + } +} + +static void +mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct sk_buff *rskb, + struct sk_buff *skb, struct bss_info_bcn *bcn, + struct ieee80211_mutable_offsets *offs) +{ + struct mt76_wcid *wcid = &dev->mt76.global_wcid; + struct bss_info_bcn_cont *cont; + struct tlv *tlv; + u8 *buf; + int len = sizeof(*cont) + MT_TXD_SIZE + skb->len; + + tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_CONTENT, + len, &bcn->sub_ntlv, &bcn->len); + + cont = (struct bss_info_bcn_cont *)tlv; + cont->pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len); + cont->tim_ofs = cpu_to_le16(offs->tim_offset); + + if (offs->csa_counter_offs[0]) + cont->csa_ofs = cpu_to_le16(offs->csa_counter_offs[0] - 4); + + buf = (u8 *)tlv + sizeof(*cont); + mt7915_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, NULL, + true); + memcpy(buf + MT_TXD_SIZE, skb->data, skb->len); +} + +int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int en) +{ +#define MAX_BEACON_SIZE 512 + struct mt7915_dev *dev = mt7915_hw_dev(hw); + struct mt7915_phy *phy = mt7915_hw_phy(hw); + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct ieee80211_mutable_offsets offs; + struct ieee80211_tx_info *info; + struct sk_buff *skb, *rskb; + struct tlv *tlv; + struct bss_info_bcn *bcn; + int len = MT7915_BEACON_UPDATE_SIZE + MAX_BEACON_SIZE; + + rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len); + if (IS_ERR(rskb)) + return PTR_ERR(rskb); + + tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn)); + bcn = (struct bss_info_bcn *)tlv; + bcn->enable = en; + + skb = ieee80211_beacon_get_template(hw, vif, &offs); + if (!skb) + return -EINVAL; + + if (skb->len > MAX_BEACON_SIZE - MT_TXD_SIZE) { + dev_err(dev->mt76.dev, "Bcn size limit exceed\n"); + dev_kfree_skb(skb); + return -EINVAL; + } + + if (mvif->band_idx) { + info = IEEE80211_SKB_CB(skb); + info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY; + } + + /* TODO: subtag - bss color count & 11v MBSSID */ + mt7915_mcu_beacon_csa(rskb, skb, bcn, &offs); + mt7915_mcu_beacon_cont(dev, rskb, skb, bcn, &offs); + dev_kfree_skb(skb); + + return __mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb, + MCU_EXT_CMD_BSS_INFO_UPDATE, true); +} + +static int mt7915_mcu_send_firmware(struct mt7915_dev *dev, const void *data, + int len) +{ + int ret = 0, cur_len; + + while (len > 0) { + cur_len = min_t(int, 4096 - sizeof(struct mt7915_mcu_txd), + len); + + ret = __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_SCATTER, + data, cur_len, false); + if (ret) + break; + + data += cur_len; + len -= cur_len; + mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false); + } + + return ret; +} + +static int mt7915_mcu_start_firmware(struct mt7915_dev *dev, u32 addr, + u32 option) +{ + struct { + __le32 option; + __le32 addr; + } req = { + .option = cpu_to_le32(option), + .addr = cpu_to_le32(addr), + }; + + return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_START_REQ, + &req, sizeof(req), true); +} + +static int mt7915_mcu_restart(struct mt76_dev *dev) +{ + struct { + u8 power_mode; + u8 rsv[3]; + } req = { + .power_mode = 1, + }; + + return __mt76_mcu_send_msg(dev, -MCU_CMD_NIC_POWER_CTRL, &req, + sizeof(req), false); +} + +static int mt7915_mcu_patch_sem_ctrl(struct mt7915_dev *dev, bool get) +{ + struct { + __le32 op; + } req = { + .op = cpu_to_le32(get ? PATCH_SEM_GET : PATCH_SEM_RELEASE), + }; + + return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_PATCH_SEM_CONTROL, + &req, sizeof(req), true); +} + +static int mt7915_mcu_start_patch(struct mt7915_dev *dev) +{ + struct { + u8 check_crc; + u8 reserved[3]; + } req = { + .check_crc = 0, + }; + + return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_PATCH_FINISH_REQ, + &req, sizeof(req), true); +} + +static int mt7915_driver_own(struct mt7915_dev *dev) +{ + u32 reg = mt7915_reg_map_l1(dev, MT_TOP_LPCR_HOST_BAND0); + + mt76_wr(dev, reg, MT_TOP_LPCR_HOST_DRV_OWN); + if (!mt76_poll_msec(dev, reg, MT_TOP_LPCR_HOST_FW_OWN, + 0, 500)) { + dev_err(dev->mt76.dev, "Timeout for driver own\n"); + return -EIO; + } + + return 0; +} + +static int mt7915_mcu_init_download(struct mt7915_dev *dev, u32 addr, + u32 len, u32 mode) +{ + struct { + __le32 addr; + __le32 len; + __le32 mode; + } req = { + .addr = cpu_to_le32(addr), + .len = cpu_to_le32(len), + .mode = cpu_to_le32(mode), + }; + int attr; + + if (req.addr == cpu_to_le32(MCU_PATCH_ADDRESS)) + attr = -MCU_CMD_PATCH_START_REQ; + else + attr = -MCU_CMD_TARGET_ADDRESS_LEN_REQ; + + return __mt76_mcu_send_msg(&dev->mt76, attr, &req, sizeof(req), true); +} + +static int mt7915_load_patch(struct mt7915_dev *dev) +{ + const struct mt7915_patch_hdr *hdr; + const struct firmware *fw = NULL; + int i, ret, sem; + + sem = mt7915_mcu_patch_sem_ctrl(dev, 1); + switch (sem) { + case PATCH_IS_DL: + return 0; + case PATCH_NOT_DL_SEM_SUCCESS: + break; + default: + dev_err(dev->mt76.dev, "Failed to get patch semaphore\n"); + return -EAGAIN; + } + + ret = request_firmware(&fw, MT7915_ROM_PATCH, dev->mt76.dev); + if (ret) + goto out; + + if (!fw || !fw->data || fw->size < sizeof(*hdr)) { + dev_err(dev->mt76.dev, "Invalid firmware\n"); + ret = -EINVAL; + goto out; + } + + hdr = (const struct mt7915_patch_hdr *)(fw->data); + + dev_info(dev->mt76.dev, "HW/SW Version: 0x%x, Build Time: %.16s\n", + be32_to_cpu(hdr->hw_sw_ver), hdr->build_date); + + for (i = 0; i < be32_to_cpu(hdr->desc.n_region); i++) { + struct mt7915_patch_sec *sec; + const u8 *dl; + u32 len, addr; + + sec = (struct mt7915_patch_sec *)(fw->data + sizeof(*hdr) + + i * sizeof(*sec)); + if ((be32_to_cpu(sec->type) & PATCH_SEC_TYPE_MASK) != + PATCH_SEC_TYPE_INFO) { + ret = -EINVAL; + goto out; + } + + addr = be32_to_cpu(sec->info.addr); + len = be32_to_cpu(sec->info.len); + dl = fw->data + be32_to_cpu(sec->offs); + + ret = mt7915_mcu_init_download(dev, addr, len, + DL_MODE_NEED_RSP); + if (ret) { + dev_err(dev->mt76.dev, "Download request failed\n"); + goto out; + } + + ret = mt7915_mcu_send_firmware(dev, dl, len); + if (ret) { + dev_err(dev->mt76.dev, "Failed to send patch\n"); + goto out; + } + } + + ret = mt7915_mcu_start_patch(dev); + if (ret) + dev_err(dev->mt76.dev, "Failed to start patch\n"); + +out: + sem = mt7915_mcu_patch_sem_ctrl(dev, 0); + switch (sem) { + case PATCH_REL_SEM_SUCCESS: + break; + default: + ret = -EAGAIN; + dev_err(dev->mt76.dev, "Failed to release patch semaphore\n"); + goto out; + } + release_firmware(fw); + + return ret; +} + +static u32 mt7915_mcu_gen_dl_mode(u8 feature_set, bool is_wa) +{ + u32 ret = 0; + + ret |= (feature_set & FW_FEATURE_SET_ENCRYPT) ? + (DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV) : 0; + ret |= FIELD_PREP(DL_MODE_KEY_IDX, + FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set)); + ret |= DL_MODE_NEED_RSP; + ret |= is_wa ? DL_MODE_WORKING_PDA_CR4 : 0; + + return ret; +} + +static int +mt7915_mcu_send_ram_firmware(struct mt7915_dev *dev, + const struct mt7915_fw_trailer *hdr, + const u8 *data, bool is_wa) +{ + int i, offset = 0; + u32 override = 0, option = 0; + + for (i = 0; i < hdr->n_region; i++) { + const struct mt7915_fw_region *region; + int err; + u32 len, addr, mode; + + region = (const struct mt7915_fw_region *)((const u8 *)hdr - + (hdr->n_region - i) * sizeof(*region)); + mode = mt7915_mcu_gen_dl_mode(region->feature_set, is_wa); + len = le32_to_cpu(region->len); + addr = le32_to_cpu(region->addr); + + if (region->feature_set & FW_FEATURE_OVERRIDE_ADDR) + override = addr; + + err = mt7915_mcu_init_download(dev, addr, len, mode); + if (err) { + dev_err(dev->mt76.dev, "Download request failed\n"); + return err; + } + + err = mt7915_mcu_send_firmware(dev, data + offset, len); + if (err) { + dev_err(dev->mt76.dev, "Failed to send firmware.\n"); + return err; + } + + offset += len; + } + + if (override) + option |= FW_START_OVERRIDE; + + if (is_wa) + option |= FW_START_WORKING_PDA_CR4; + + return mt7915_mcu_start_firmware(dev, override, option); +} + +static int mt7915_load_ram(struct mt7915_dev *dev) +{ + const struct mt7915_fw_trailer *hdr; + const struct firmware *fw; + int ret; + + ret = request_firmware(&fw, MT7915_FIRMWARE_WM, dev->mt76.dev); + if (ret) + return ret; + + if (!fw || !fw->data || fw->size < sizeof(*hdr)) { + dev_err(dev->mt76.dev, "Invalid firmware\n"); + ret = -EINVAL; + goto out; + } + + hdr = (const struct mt7915_fw_trailer *)(fw->data + fw->size - + sizeof(*hdr)); + + dev_info(dev->mt76.dev, "WM Firmware Version: %.10s, Build Time: %.15s\n", + hdr->fw_ver, hdr->build_date); + + ret = mt7915_mcu_send_ram_firmware(dev, hdr, fw->data, false); + if (ret) { + dev_err(dev->mt76.dev, "Failed to start WM firmware\n"); + goto out; + } + + release_firmware(fw); + + ret = request_firmware(&fw, MT7915_FIRMWARE_WA, dev->mt76.dev); + if (ret) + return ret; + + if (!fw || !fw->data || fw->size < sizeof(*hdr)) { + dev_err(dev->mt76.dev, "Invalid firmware\n"); + ret = -EINVAL; + goto out; + } + + hdr = (const struct mt7915_fw_trailer *)(fw->data + fw->size - + sizeof(*hdr)); + + dev_info(dev->mt76.dev, "WA Firmware Version: %.10s, Build Time: %.15s\n", + hdr->fw_ver, hdr->build_date); + + ret = mt7915_mcu_send_ram_firmware(dev, hdr, fw->data, true); + if (ret) { + dev_err(dev->mt76.dev, "Failed to start WA firmware\n"); + goto out; + } + + snprintf(dev->mt76.hw->wiphy->fw_version, + sizeof(dev->mt76.hw->wiphy->fw_version), + "%.10s-%.15s", hdr->fw_ver, hdr->build_date); + +out: + release_firmware(fw); + + return ret; +} + +static int mt7915_load_firmware(struct mt7915_dev *dev) +{ + int ret; + u32 val, reg = mt7915_reg_map_l1(dev, MT_TOP_MISC); + + val = FIELD_PREP(MT_TOP_MISC_FW_STATE, FW_STATE_FW_DOWNLOAD); + + if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE, val, 1000)) { + /* restart firmware once */ + __mt76_mcu_restart(&dev->mt76); + if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE, + val, 1000)) { + dev_err(dev->mt76.dev, + "Firmware is not ready for download\n"); + return -EIO; + } + } + + ret = mt7915_load_patch(dev); + if (ret) + return ret; + + ret = mt7915_load_ram(dev); + if (ret) + return ret; + + if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE, + FIELD_PREP(MT_TOP_MISC_FW_STATE, + FW_STATE_WACPU_RDY), 1000)) { + dev_err(dev->mt76.dev, "Timeout for initializing firmware\n"); + return -EIO; + } + + mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false); + + dev_dbg(dev->mt76.dev, "Firmware init done\n"); + + return 0; +} + +int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 ctrl) +{ + struct { + u8 ctrl_val; + u8 pad[3]; + } data = { + .ctrl_val = ctrl + }; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_FW_LOG_2_HOST, + &data, sizeof(data), true); +} + +int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level) +{ + struct { + u8 ver; + u8 pad; + u16 len; + u8 level; + u8 rsv[3]; + __le32 module_idx; + } data = { + .module_idx = cpu_to_le32(module), + .level = level, + }; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_FW_DBG_CTRL, + &data, sizeof(data), false); +} + +int mt7915_mcu_init(struct mt7915_dev *dev) +{ + static const struct mt76_mcu_ops mt7915_mcu_ops = { + .headroom = sizeof(struct mt7915_mcu_txd), + .mcu_skb_send_msg = mt7915_mcu_send_message, + .mcu_send_msg = mt7915_mcu_msg_send, + .mcu_restart = mt7915_mcu_restart, + }; + int ret; + + dev->mt76.mcu_ops = &mt7915_mcu_ops, + + ret = mt7915_driver_own(dev); + if (ret) + return ret; + + ret = mt7915_load_firmware(dev); + if (ret) + return ret; + + set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); + mt7915_mcu_fw_log_2_host(dev, 0); + + return 0; +} + +void mt7915_mcu_exit(struct mt7915_dev *dev) +{ + u32 reg = mt7915_reg_map_l1(dev, MT_TOP_MISC); + + __mt76_mcu_restart(&dev->mt76); + if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE, + FIELD_PREP(MT_TOP_MISC_FW_STATE, + FW_STATE_FW_DOWNLOAD), 1000)) { + dev_err(dev->mt76.dev, "Failed to exit mcu\n"); + return; + } + + reg = mt7915_reg_map_l1(dev, MT_TOP_LPCR_HOST_BAND0); + mt76_wr(dev, reg, MT_TOP_LPCR_HOST_FW_OWN); + skb_queue_purge(&dev->mt76.mcu.res_q); +} + +int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, + bool enable, bool hdr_trans) +{ + struct { + u8 operation; + u8 enable; + u8 check_bssid; + u8 insert_vlan; + u8 remove_vlan; + u8 tid; + u8 mode; + u8 rsv; + } __packed req_trans = { + .enable = hdr_trans, + }; + struct { + u8 enable; + u8 band; + u8 rsv[2]; + } __packed req_mac = { + .enable = enable, + .band = band, + }; + int ret; + + ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RX_HDR_TRANS, + &req_trans, sizeof(req_trans), false); + if (ret) + return ret; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_MAC_INIT_CTRL, + &req_mac, sizeof(req_mac), true); +} + +int mt7915_mcu_set_scs(struct mt7915_dev *dev, u8 band, bool enable) +{ + struct { + __le32 cmd; + u8 band; + u8 enable; + } __packed req = { + .cmd = cpu_to_le32(SCS_ENABLE), + .band = band, + .enable = enable + 1, + }; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SCS_CTRL, &req, + sizeof(req), false); +} + +int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val) +{ + struct mt7915_dev *dev = phy->dev; + struct { + u8 prot_idx; + u8 band; + u8 rsv[2]; + __le32 len_thresh; + __le32 pkt_thresh; + } __packed req = { + .prot_idx = 1, + .band = phy != &dev->phy, + .len_thresh = cpu_to_le32(val), + .pkt_thresh = cpu_to_le32(0x2), + }; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PROTECT_CTRL, + &req, sizeof(req), true); +} + +int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif) +{ +#define WMM_AIFS_SET BIT(0) +#define WMM_CW_MIN_SET BIT(1) +#define WMM_CW_MAX_SET BIT(2) +#define WMM_TXOP_SET BIT(3) +#define WMM_PARAM_SET GENMASK(3, 0) +#define TX_CMD_MODE 1 + struct edca { + u8 queue; + u8 set; + u8 aifs; + u8 cw_min; + __le16 cw_max; + __le16 txop; + }; + struct mt7915_mcu_tx { + u8 total; + u8 action; + u8 valid; + u8 mode; + + struct edca edca[IEEE80211_NUM_ACS]; + } __packed req = { + .valid = true, + .mode = TX_CMD_MODE, + .total = IEEE80211_NUM_ACS, + }; + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + int ac; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + struct edca *e = &req.edca[ac]; + + e->queue = ac + mvif->wmm_idx * MT7915_MAX_WMM_SETS; + e->aifs = mvif->wmm[ac].aifs; + e->txop = cpu_to_le16(mvif->wmm[ac].txop); + + if (mvif->wmm[ac].cw_min) + e->cw_min = fls(mvif->wmm[ac].cw_max); + else + e->cw_min = 5; + + if (mvif->wmm[ac].cw_max) + e->cw_max = cpu_to_le16(fls(mvif->wmm[ac].cw_max)); + else + e->cw_max = cpu_to_le16(10); + } + + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE, + &req, sizeof(req), true); +} + +int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter) +{ +#define ENTER_PM_STATE 1 +#define EXIT_PM_STATE 2 + struct { + u8 pm_number; + u8 pm_state; + u8 bssid[ETH_ALEN]; + u8 dtim_period; + u8 wlan_idx_lo; + __le16 bcn_interval; + __le32 aid; + __le32 rx_filter; + u8 band_idx; + u8 wlan_idx_hi; + u8 rsv[2]; + __le32 feature; + u8 omac_idx; + u8 wmm_idx; + u8 bcn_loss_cnt; + u8 bcn_sp_duration; + } __packed req = { + .pm_number = 5, + .pm_state = (enter) ? ENTER_PM_STATE : EXIT_PM_STATE, + .band_idx = band, + }; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PM_STATE_CTRL, + &req, sizeof(req), true); +} + +int mt7915_mcu_rdd_cmd(struct mt7915_dev *dev, + enum mt7915_rdd_cmd cmd, u8 index, + u8 rx_sel, u8 val) +{ + struct { + u8 ctrl; + u8 rdd_idx; + u8 rdd_rx_sel; + u8 val; + u8 rsv[4]; + } __packed req = { + .ctrl = cmd, + .rdd_idx = index, + .rdd_rx_sel = rx_sel, + .val = val, + }; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_CTRL, + &req, sizeof(req), true); +} + +int mt7915_mcu_set_fcc5_lpn(struct mt7915_dev *dev, int val) +{ + struct { + u32 tag; + u16 min_lpn; + u8 rsv[2]; + } __packed req = { + .tag = 0x1, + .min_lpn = val, + }; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH, + &req, sizeof(req), true); +} + +int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev, + const struct mt7915_dfs_pulse *pulse) +{ + struct { + u32 tag; + struct mt7915_dfs_pulse pulse; + } __packed req = { + .tag = 0x3, + }; + + memcpy(&req.pulse, pulse, sizeof(*pulse)); + + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH, + &req, sizeof(req), true); +} + +int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index, + const struct mt7915_dfs_pattern *pattern) +{ + struct { + u32 tag; + u16 radar_type; + struct mt7915_dfs_pattern pattern; + } __packed req = { + .tag = 0x2, + .radar_type = index, + }; + + memcpy(&req.pattern, pattern, sizeof(*pattern)); + + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH, + &req, sizeof(req), true); +} + +int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd) +{ + struct mt7915_dev *dev = phy->dev; + struct cfg80211_chan_def *chandef = &phy->mt76->chandef; + int freq1 = chandef->center_freq1; + struct { + u8 control_ch; + u8 center_ch; + u8 bw; + u8 tx_streams_num; + u8 rx_streams; /* mask or num */ + u8 switch_reason; + u8 band_idx; + u8 center_ch2; /* for 80+80 only */ + __le16 cac_case; + u8 channel_band; + u8 rsv0; + __le32 outband_freq; + u8 txpower_drop; + u8 ap_bw; + u8 ap_center_ch; + u8 rsv1[57]; + } __packed req = { + .control_ch = chandef->chan->hw_value, + .center_ch = ieee80211_frequency_to_channel(freq1), + .bw = mt7915_mcu_chan_bw(chandef), + .tx_streams_num = hweight8(phy->mt76->antenna_mask), + .rx_streams = phy->chainmask, + .band_idx = phy != &dev->phy, + .channel_band = chandef->chan->band, + }; + + if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) && + chandef->chan->dfs_state != NL80211_DFS_AVAILABLE) + req.switch_reason = CH_SWITCH_DFS; + else + req.switch_reason = CH_SWITCH_NORMAL; + + if (cmd == MCU_EXT_CMD_CHANNEL_SWITCH) + req.rx_streams = hweight8(req.rx_streams); + + if (chandef->width == NL80211_CHAN_WIDTH_80P80) { + int freq2 = chandef->center_freq2; + + req.center_ch2 = ieee80211_frequency_to_channel(freq2); + } + + return __mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true); +} + +int mt7915_mcu_set_eeprom(struct mt7915_dev *dev) +{ + struct req_hdr { + u8 buffer_mode; + u8 format; + __le16 len; + } __packed req = { + .buffer_mode = EE_MODE_EFUSE, + .format = EE_FORMAT_WHOLE, + }; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE, + &req, sizeof(req), true); +} + +int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset) +{ + struct mt7915_mcu_eeprom_info req = { + .addr = cpu_to_le32(round_down(offset, 16)), + }; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_ACCESS, &req, + sizeof(req), true); +} + +int mt7915_mcu_get_temperature(struct mt7915_dev *dev, int index) +{ + struct { + u8 ctrl_id; + u8 action; + u8 band; + u8 rsv[5]; + } req = { + .ctrl_id = THERMAL_SENSOR_TEMP_QUERY, + .action = index, + }; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_THERMAL_CTRL, &req, + sizeof(req), true); +} + +int mt7915_mcu_get_rate_info(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx) +{ + struct { + __le32 cmd; + __le16 wlan_idx; + __le16 ru_idx; + __le16 direction; + __le16 dump_group; + } req = { + .cmd = cpu_to_le32(cmd), + .wlan_idx = cpu_to_le16(wlan_idx), + .dump_group = cpu_to_le16(1), + }; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RATE_CTRL, &req, + sizeof(req), false); +} + +int mt7915_mcu_set_sku(struct mt7915_phy *phy) +{ + struct mt7915_dev *dev = phy->dev; + struct mt76_phy *mphy = phy->mt76; + struct ieee80211_hw *hw = mphy->hw; + struct mt7915_sku_val { + u8 format_id; + u8 limit_type; + u8 dbdc_idx; + s8 val[MT7915_SKU_RATE_NUM]; + } __packed req = { + .format_id = 4, + .dbdc_idx = phy != &dev->phy, + }; + int i; + s8 *delta; + + delta = dev->rate_power[mphy->chandef.chan->band]; + mphy->txpower_cur = hw->conf.power_level * 2 + + delta[MT7915_SKU_MAX_DELTA_IDX]; + + for (i = 0; i < MT7915_SKU_RATE_NUM; i++) + req.val[i] = hw->conf.power_level * 2 + delta[i]; + + return __mt76_mcu_send_msg(&dev->mt76, + MCU_EXT_CMD_TX_POWER_FEATURE_CTRL, + &req, sizeof(req), true); +} + +int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable) +{ + struct mt7915_dev *dev = phy->dev; + struct mt7915_sku { + u8 format_id; + u8 sku_enable; + u8 dbdc_idx; + u8 rsv; + } __packed req = { + .format_id = 0, + .dbdc_idx = phy != &dev->phy, + .sku_enable = enable, + }; + + return __mt76_mcu_send_msg(&dev->mt76, + MCU_EXT_CMD_TX_POWER_FEATURE_CTRL, + &req, sizeof(req), true); +} + +int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band) +{ + struct { + u8 action; + u8 set; + u8 band; + u8 rsv; + } req = { + .action = action, + .set = set, + .band = band, + }; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_SER_TRIGGER, + &req, sizeof(req), false); +} + +int mt7915_mcu_set_txbf_type(struct mt7915_dev *dev) +{ +#define MT_BF_TYPE_UPDATE 20 + struct { + u8 action; + bool ebf; + bool ibf; + u8 rsv; + } __packed req = { + .action = MT_BF_TYPE_UPDATE, + .ebf = true, + .ibf = false, + }; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TXBF_ACTION, + &req, sizeof(req), true); +} + +int mt7915_mcu_set_txbf_sounding(struct mt7915_dev *dev) +{ +#define MT_BF_PROCESSING 4 + struct { + u8 action; + u8 snd_mode; + u8 sta_num; + u8 rsv; + u8 wlan_idx[4]; + __le32 snd_period; /* ms */ + } __packed req = { + .action = true, + .snd_mode = MT_BF_PROCESSING, + }; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TXBF_ACTION, + &req, sizeof(req), true); +} + +int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif, + bool enable) +{ +#define MT_SPR_ENABLE 1 + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct { + u8 action; + u8 arg_num; + u8 band_idx; + u8 status; + u8 drop_tx_idx; + u8 sta_idx; /* 256 sta */ + u8 rsv[2]; + u32 val; + } __packed req = { + .action = MT_SPR_ENABLE, + .arg_num = 1, + .band_idx = mvif->band_idx, + .val = enable, + }; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_SPR, + &req, sizeof(req), true); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h new file mode 100644 index 000000000000..c241dd7c4c36 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h @@ -0,0 +1,1034 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2020 MediaTek Inc. */ + +#ifndef __MT7915_MCU_H +#define __MT7915_MCU_H + +struct mt7915_mcu_txd { + __le32 txd[8]; + + __le16 len; + __le16 pq_id; + + u8 cid; + u8 pkt_type; + u8 set_query; /* FW don't care */ + u8 seq; + + u8 uc_d2b0_rev; + u8 ext_cid; + u8 s2d_index; + u8 ext_cid_ack; + + u32 reserved[5]; +} __packed __aligned(4); + +/* event table */ +enum { + MCU_EVENT_TARGET_ADDRESS_LEN = 0x01, + MCU_EVENT_FW_START = 0x01, + MCU_EVENT_GENERIC = 0x01, + MCU_EVENT_ACCESS_REG = 0x02, + MCU_EVENT_MT_PATCH_SEM = 0x04, + MCU_EVENT_CH_PRIVILEGE = 0x18, + MCU_EVENT_EXT = 0xed, + MCU_EVENT_RESTART_DL = 0xef, +}; + +/* ext event table */ +enum { + MCU_EXT_EVENT_PS_SYNC = 0x5, + MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13, + MCU_EXT_EVENT_THERMAL_PROTECT = 0x22, + MCU_EXT_EVENT_ASSERT_DUMP = 0x23, + MCU_EXT_EVENT_RDD_REPORT = 0x3a, + MCU_EXT_EVENT_CSA_NOTIFY = 0x4f, + MCU_EXT_EVENT_RATE_REPORT = 0x87, +}; + +struct mt7915_mcu_rxd { + __le32 rxd[6]; + + __le16 len; + __le16 pkt_type_id; + + u8 eid; + u8 seq; + __le16 __rsv; + + u8 ext_eid; + u8 __rsv1[2]; + u8 s2d_index; +}; + +struct mt7915_mcu_rdd_report { + struct mt7915_mcu_rxd rxd; + + u8 idx; + u8 long_detected; + u8 constant_prf_detected; + u8 staggered_prf_detected; + u8 radar_type_idx; + u8 periodic_pulse_num; + u8 long_pulse_num; + u8 hw_pulse_num; + + u8 out_lpn; + u8 out_spn; + u8 out_crpn; + u8 out_crpw; + u8 out_crbn; + u8 out_stgpn; + u8 out_stgpw; + + u8 rsv; + + __le32 out_pri_const; + __le32 out_pri_stg[3]; + + struct { + __le32 start; + __le16 pulse_width; + __le16 pulse_power; + u8 mdrdy_flag; + u8 rsv[3]; + } long_pulse[32]; + + struct { + __le32 start; + __le16 pulse_width; + __le16 pulse_power; + u8 mdrdy_flag; + u8 rsv[3]; + } periodic_pulse[32]; + + struct { + __le32 start; + __le16 pulse_width; + __le16 pulse_power; + u8 sc_pass; + u8 sw_reset; + u8 mdrdy_flag; + u8 tx_active; + } hw_pulse[32]; +} __packed; + +struct mt7915_mcu_eeprom_info { + __le32 addr; + __le32 valid; + u8 data[16]; +} __packed; + +struct mt7915_mcu_ra_info { + struct mt7915_mcu_rxd rxd; + + __le32 event_id; + __le16 wlan_idx; + __le16 ru_idx; + __le16 direction; + __le16 dump_group; + + __le32 suggest_rate; + __le32 min_rate; /* for dynamic sounding */ + __le32 max_rate; /* for dynamic sounding */ + __le32 init_rate_down_rate; + + __le16 curr_rate; + __le16 init_rate_down_total; + __le16 init_rate_down_succ; + __le16 success; + __le16 attempts; + + __le16 prev_rate; + __le16 prob_up_rate; + u8 no_rate_up_cnt; + u8 ppdu_cnt; + u8 gi; + + u8 try_up_fail; + u8 try_up_total; + u8 suggest_wf; + u8 try_up_check; + u8 prob_up_period; + u8 prob_down_pending; +} __packed; + +#define MT_RA_RATE_NSS GENMASK(8, 6) +#define MT_RA_RATE_MCS GENMASK(3, 0) +#define MT_RA_RATE_TX_MODE GENMASK(12, 9) +#define MT_RA_RATE_DCM_EN BIT(4) +#define MT_RA_RATE_BW GENMASK(14, 13) + +#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10)) +#define MCU_PKT_ID 0xa0 + +enum { + MCU_Q_QUERY, + MCU_Q_SET, + MCU_Q_RESERVED, + MCU_Q_NA +}; + +enum { + MCU_S2D_H2N, + MCU_S2D_C2N, + MCU_S2D_H2C, + MCU_S2D_H2CN +}; + +enum { + MCU_CMD_TARGET_ADDRESS_LEN_REQ = 0x01, + MCU_CMD_FW_START_REQ = 0x02, + MCU_CMD_INIT_ACCESS_REG = 0x3, + MCU_CMD_NIC_POWER_CTRL = 0x4, + MCU_CMD_PATCH_START_REQ = 0x05, + MCU_CMD_PATCH_FINISH_REQ = 0x07, + MCU_CMD_PATCH_SEM_CONTROL = 0x10, + MCU_CMD_EXT_CID = 0xED, + MCU_CMD_FW_SCATTER = 0xEE, + MCU_CMD_RESTART_DL_REQ = 0xEF, +}; + +enum { + MCU_EXT_CMD_EFUSE_ACCESS = 0x01, + MCU_EXT_CMD_PM_STATE_CTRL = 0x07, + MCU_EXT_CMD_CHANNEL_SWITCH = 0x08, + MCU_EXT_CMD_FW_LOG_2_HOST = 0x13, + MCU_EXT_CMD_TXBF_ACTION = 0x1e, + MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21, + MCU_EXT_CMD_STA_REC_UPDATE = 0x25, + MCU_EXT_CMD_BSS_INFO_UPDATE = 0x26, + MCU_EXT_CMD_EDCA_UPDATE = 0x27, + MCU_EXT_CMD_DEV_INFO_UPDATE = 0x2A, + MCU_EXT_CMD_THERMAL_CTRL = 0x2c, + MCU_EXT_CMD_SET_RDD_CTRL = 0x3a, + MCU_EXT_CMD_PROTECT_CTRL = 0x3e, + MCU_EXT_CMD_MAC_INIT_CTRL = 0x46, + MCU_EXT_CMD_RX_HDR_TRANS = 0x47, + MCU_EXT_CMD_SET_RX_PATH = 0x4e, + MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58, + MCU_EXT_CMD_SET_SER_TRIGGER = 0x81, + MCU_EXT_CMD_SCS_CTRL = 0x82, + MCU_EXT_CMD_RATE_CTRL = 0x87, + MCU_EXT_CMD_FW_DBG_CTRL = 0x95, + MCU_EXT_CMD_SET_RDD_TH = 0x9d, + MCU_EXT_CMD_SET_SPR = 0xa8, +}; + +enum { + PATCH_SEM_RELEASE, + PATCH_SEM_GET +}; + +enum { + PATCH_NOT_DL_SEM_FAIL, + PATCH_IS_DL, + PATCH_NOT_DL_SEM_SUCCESS, + PATCH_REL_SEM_SUCCESS +}; + +enum { + FW_STATE_INITIAL, + FW_STATE_FW_DOWNLOAD, + FW_STATE_NORMAL_OPERATION, + FW_STATE_NORMAL_TRX, + FW_STATE_WACPU_RDY = 7 +}; + +enum { + EE_MODE_EFUSE, + EE_MODE_BUFFER, +}; + +enum { + EE_FORMAT_BIN, + EE_FORMAT_WHOLE, + EE_FORMAT_MULTIPLE, +}; + +#define STA_TYPE_STA BIT(0) +#define STA_TYPE_AP BIT(1) +#define STA_TYPE_ADHOC BIT(2) +#define STA_TYPE_WDS BIT(4) +#define STA_TYPE_BC BIT(5) + +#define NETWORK_INFRA BIT(16) +#define NETWORK_P2P BIT(17) +#define NETWORK_IBSS BIT(18) +#define NETWORK_WDS BIT(21) + +#define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA) +#define CONNECTION_INFRA_AP (STA_TYPE_AP | NETWORK_INFRA) +#define CONNECTION_P2P_GC (STA_TYPE_STA | NETWORK_P2P) +#define CONNECTION_P2P_GO (STA_TYPE_AP | NETWORK_P2P) +#define CONNECTION_IBSS_ADHOC (STA_TYPE_ADHOC | NETWORK_IBSS) +#define CONNECTION_WDS (STA_TYPE_WDS | NETWORK_WDS) +#define CONNECTION_INFRA_BC (STA_TYPE_BC | NETWORK_INFRA) + +#define CONN_STATE_DISCONNECT 0 +#define CONN_STATE_CONNECT 1 +#define CONN_STATE_PORT_SECURE 2 + +enum { + DEV_INFO_ACTIVE, + DEV_INFO_MAX_NUM +}; + +enum { + SCS_SEND_DATA, + SCS_SET_MANUAL_PD_TH, + SCS_CONFIG, + SCS_ENABLE, + SCS_SHOW_INFO, + SCS_GET_GLO_ADDR, + SCS_GET_GLO_ADDR_EVENT, +}; + +enum { + CMD_CBW_20MHZ = IEEE80211_STA_RX_BW_20, + CMD_CBW_40MHZ = IEEE80211_STA_RX_BW_40, + CMD_CBW_80MHZ = IEEE80211_STA_RX_BW_80, + CMD_CBW_160MHZ = IEEE80211_STA_RX_BW_160, + CMD_CBW_10MHZ, + CMD_CBW_5MHZ, + CMD_CBW_8080MHZ, + + CMD_HE_MCS_BW80 = 0, + CMD_HE_MCS_BW160, + CMD_HE_MCS_BW8080, + CMD_HE_MCS_BW_NUM +}; + +struct tlv { + __le16 tag; + __le16 len; +} __packed; + +struct bss_info_omac { + __le16 tag; + __le16 len; + u8 hw_bss_idx; + u8 omac_idx; + u8 band_idx; + u8 rsv0; + __le32 conn_type; + u32 rsv1; +} __packed; + +struct bss_info_basic { + __le16 tag; + __le16 len; + __le32 network_type; + u8 active; + u8 rsv0; + __le16 bcn_interval; + u8 bssid[ETH_ALEN]; + u8 wmm_idx; + u8 dtim_period; + u8 bmc_wcid_lo; + u8 cipher; + u8 phy_mode; + u8 max_bssid; /* max BSSID. range: 1 ~ 8, 0: MBSSID disabled */ + u8 non_tx_bssid;/* non-transmitted BSSID, 0: transmitted BSSID */ + u8 bmc_wcid_hi; /* high Byte and version */ + u8 rsv[2]; +} __packed; + +struct bss_info_rf_ch { + __le16 tag; + __le16 len; + u8 pri_ch; + u8 center_ch0; + u8 center_ch1; + u8 bw; + u8 he_ru26_block; /* 1: don't send HETB in RU26, 0: allow */ + u8 he_all_disable; /* 1: disallow all HETB, 0: allow */ + u8 rsv[2]; +} __packed; + +struct bss_info_ext_bss { + __le16 tag; + __le16 len; + __le32 mbss_tsf_offset; /* in unit of us */ + u8 rsv[8]; +} __packed; + +struct bss_info_sync_mode { + __le16 tag; + __le16 len; + __le16 bcn_interval; + u8 enable; + u8 dtim_period; + u8 rsv[8]; +} __packed; + +struct bss_info_bmc_rate { + __le16 tag; + __le16 len; + __le16 bc_trans; + __le16 mc_trans; + u8 short_preamble; + u8 rsv[7]; +} __packed; + +struct bss_info_ra { + __le16 tag; + __le16 len; + u8 op_mode; + u8 adhoc_en; + u8 short_preamble; + u8 tx_streams; + u8 rx_streams; + u8 algo; + u8 force_sgi; + u8 force_gf; + u8 ht_mode; + u8 has_20_sta; /* Check if any sta support GF. */ + u8 bss_width_trigger_events; + u8 vht_nss_cap; + u8 vht_bw_signal; /* not use */ + u8 vht_force_sgi; /* not use */ + u8 se_off; + u8 antenna_idx; + u8 train_up_rule; + u8 rsv[3]; + unsigned short train_up_high_thres; + short train_up_rule_rssi; + unsigned short low_traffic_thres; + __le16 max_phyrate; + __le32 phy_cap; + __le32 interval; + __le32 fast_interval; +} __packed; + +struct bss_info_he { + __le16 tag; + __le16 len; + u8 he_pe_duration; + u8 vht_op_info_present; + __le16 he_rts_thres; + __le16 max_nss_mcs[CMD_HE_MCS_BW_NUM]; + u8 rsv[6]; +} __packed; + +struct bss_info_bcn { + __le16 tag; + __le16 len; + u8 ver; + u8 enable; + __le16 sub_ntlv; +} __packed __aligned(4); + +struct bss_info_bcn_csa { + __le16 tag; + __le16 len; + u8 cnt; + u8 rsv[3]; +} __packed __aligned(4); + +struct bss_info_bcn_bcc { + __le16 tag; + __le16 len; + u8 cnt; + u8 rsv[3]; +} __packed __aligned(4); + +struct bss_info_bcn_mbss { +#define MAX_BEACON_NUM 32 + __le16 tag; + __le16 len; + __le32 bitmap; + __le16 offset[MAX_BEACON_NUM]; + u8 rsv[8]; +} __packed __aligned(4); + +struct bss_info_bcn_cont { + __le16 tag; + __le16 len; + __le16 tim_ofs; + __le16 csa_ofs; + __le16 bcc_ofs; + __le16 pkt_len; +} __packed __aligned(4); + +enum { + BSS_INFO_BCN_CSA, + BSS_INFO_BCN_BCC, + BSS_INFO_BCN_MBSSID, + BSS_INFO_BCN_CONTENT, + BSS_INFO_BCN_MAX +}; + +enum { + BSS_INFO_OMAC, + BSS_INFO_BASIC, + BSS_INFO_RF_CH, /* optional, for BT/LTE coex */ + BSS_INFO_PM, /* sta only */ + BSS_INFO_UAPSD, /* sta only */ + BSS_INFO_ROAM_DETECT, /* obsoleted */ + BSS_INFO_LQ_RM, /* obsoleted */ + BSS_INFO_EXT_BSS, + BSS_INFO_BMC_RATE, /* for bmc rate control in CR4 */ + BSS_INFO_SYNC_MODE, + BSS_INFO_RA, + BSS_INFO_HW_AMSDU, + BSS_INFO_BSS_COLOR, + BSS_INFO_HE_BASIC, + BSS_INFO_PROTECT_INFO, + BSS_INFO_OFFLOAD, + BSS_INFO_11V_MBSSID, + BSS_INFO_MAX_NUM +}; + +enum { + WTBL_RESET_AND_SET = 1, + WTBL_SET, + WTBL_QUERY, + WTBL_RESET_ALL +}; + +struct wtbl_req_hdr { + u8 wlan_idx_lo; + u8 operation; + __le16 tlv_num; + u8 wlan_idx_hi; + u8 rsv[3]; +} __packed; + +struct wtbl_generic { + __le16 tag; + __le16 len; + u8 peer_addr[ETH_ALEN]; + u8 muar_idx; + u8 skip_tx; + u8 cf_ack; + u8 qos; + u8 mesh; + u8 adm; + __le16 partial_aid; + u8 baf_en; + u8 aad_om; +} __packed; + +struct wtbl_rx { + __le16 tag; + __le16 len; + u8 rcid; + u8 rca1; + u8 rca2; + u8 rv; + u8 rsv[4]; +} __packed; + +struct wtbl_ht { + __le16 tag; + __le16 len; + u8 ht; + u8 ldpc; + u8 af; + u8 mm; + u8 rsv[4]; +} __packed; + +struct wtbl_vht { + __le16 tag; + __le16 len; + u8 ldpc; + u8 dyn_bw; + u8 vht; + u8 txop_ps; + u8 rsv[4]; +} __packed; + +enum { + MT_BA_TYPE_INVALID, + MT_BA_TYPE_ORIGINATOR, + MT_BA_TYPE_RECIPIENT +}; + +enum { + RST_BA_MAC_TID_MATCH, + RST_BA_MAC_MATCH, + RST_BA_NO_MATCH +}; + +struct wtbl_ba { + __le16 tag; + __le16 len; + /* common */ + u8 tid; + u8 ba_type; + u8 rsv0[2]; + /* originator only */ + __le16 sn; + u8 ba_en; + u8 ba_winsize_idx; + __le16 ba_winsize; + /* recipient only */ + u8 peer_addr[ETH_ALEN]; + u8 rst_ba_tid; + u8 rst_ba_sel; + u8 rst_ba_sb; + u8 band_idx; + u8 rsv1[4]; +} __packed; + +struct wtbl_smps { + __le16 tag; + __le16 len; + u8 smps; + u8 rsv[3]; +} __packed; + +enum { + WTBL_GENERIC, + WTBL_RX, + WTBL_HT, + WTBL_VHT, + WTBL_PEER_PS, /* not used */ + WTBL_TX_PS, + WTBL_HDR_TRANS, + WTBL_SEC_KEY, + WTBL_BA, + WTBL_RDG, /* obsoleted */ + WTBL_PROTECT, /* not used */ + WTBL_CLEAR, /* not used */ + WTBL_BF, + WTBL_SMPS, + WTBL_RAW_DATA, /* debug only */ + WTBL_PN, + WTBL_SPE, + WTBL_MAX_NUM +}; + +struct sta_ntlv_hdr { + u8 rsv[2]; + __le16 tlv_num; +} __packed; + +struct sta_req_hdr { + u8 bss_idx; + u8 wlan_idx_lo; + __le16 tlv_num; + u8 is_tlv_append; + u8 muar_idx; + u8 wlan_idx_hi; + u8 rsv; +} __packed; + +struct sta_rec_basic { + __le16 tag; + __le16 len; + __le32 conn_type; + u8 conn_state; + u8 qos; + __le16 aid; + u8 peer_addr[ETH_ALEN]; + __le16 extra_info; +} __packed; + +struct sta_rec_ht { + __le16 tag; + __le16 len; + __le16 ht_cap; + u16 rsv; +} __packed; + +struct sta_rec_vht { + __le16 tag; + __le16 len; + __le32 vht_cap; + __le16 vht_rx_mcs_map; + __le16 vht_tx_mcs_map; + u8 rts_bw_sig; + u8 rsv[3]; +} __packed; + +struct sta_rec_muru { + __le16 tag; + __le16 len; + + struct { + bool ofdma_dl_en; + bool ofdma_ul_en; + bool mimo_dl_en; + bool mimo_ul_en; + bool rsv[4]; + } cfg; + + struct { + u8 punc_pream_rx; + bool he_20m_in_40m_2g; + bool he_20m_in_160m; + bool he_80m_in_160m; + bool lt16_sigb; + bool rx_su_comp_sigb; + bool rx_su_non_comp_sigb; + bool rsv; + } ofdma_dl; + + struct { + u8 t_frame_dur; + u8 mu_cascading; + u8 uo_ra; + u8 he_2x996_tone; + u8 rx_t_frame_11ac; + u8 rsv[3]; + } ofdma_ul; + + struct { + bool vht_mu_bfee; + bool partial_bw_dl_mimo; + u8 rsv[2]; + } mimo_dl; + + struct { + bool full_ul_mimo; + bool partial_ul_mimo; + u8 rsv[2]; + } mimo_ul; +} __packed; + +struct sta_rec_he { + __le16 tag; + __le16 len; + + __le32 he_cap; + + u8 t_frame_dur; + u8 max_ampdu_exp; + u8 bw_set; + u8 device_class; + u8 dcm_tx_mode; + u8 dcm_tx_max_nss; + u8 dcm_rx_mode; + u8 dcm_rx_max_nss; + u8 dcm_max_ru; + u8 punc_pream_rx; + u8 pkt_ext; + u8 rsv1; + + __le16 max_nss_mcs[CMD_HE_MCS_BW_NUM]; + + u8 rsv2[2]; +} __packed; + +struct sta_rec_ba { + __le16 tag; + __le16 len; + u8 tid; + u8 ba_type; + u8 amsdu; + u8 ba_en; + __le16 ssn; + __le16 winsize; +} __packed; + +struct sec_key { + u8 cipher_id; + u8 cipher_len; + u8 key_id; + u8 key_len; + u8 key[32]; +} __packed; + +struct sta_rec_sec { + __le16 tag; + __le16 len; + u8 add; + u8 n_cipher; + u8 rsv[2]; + + struct sec_key key[2]; +} __packed; + +struct ra_phy { + u8 type; + u8 flag; + u8 stbc; + u8 sgi; + u8 bw; + u8 ldpc; + u8 mcs; + u8 nss; + u8 he_ltf; +}; + +struct sta_rec_ra { + __le16 tag; + __le16 len; + + u8 valid; + u8 auto_rate; + u8 phy_mode; + u8 channel; + u8 bw; + u8 disable_cck; + u8 ht_mcs32; + u8 ht_gf; + u8 ht_mcs[4]; + u8 mmps_mode; + u8 gband_256; + u8 af; + u8 auth_wapi_mode; + u8 rate_len; + + u8 supp_mode; + u8 supp_cck_rate; + u8 supp_ofdm_rate; + __le32 supp_ht_mcs; + __le16 supp_vht_mcs[4]; + + u8 op_mode; + u8 op_vht_chan_width; + u8 op_vht_rx_nss; + u8 op_vht_rx_nss_type; + + __le32 sta_status; + + struct ra_phy phy; +} __packed; + +struct sta_rec_ra_fixed { + __le16 tag; + __le16 len; + + __le32 field; + u8 op_mode; + u8 op_vht_chan_width; + u8 op_vht_rx_nss; + u8 op_vht_rx_nss_type; + + struct ra_phy phy; + + u8 spe_en; + u8 short_preamble; + u8 is_5g; + u8 mmps_mode; +} __packed; + +#define RATE_PARAM_FIXED 3 +#define RATE_PARAM_AUTO 20 +#define RATE_CFG_MCS GENMASK(3, 0) +#define RATE_CFG_NSS GENMASK(7, 4) +#define RATE_CFG_GI GENMASK(11, 8) +#define RATE_CFG_BW GENMASK(15, 12) +#define RATE_CFG_STBC GENMASK(19, 16) +#define RATE_CFG_LDPC GENMASK(23, 20) +#define RATE_CFG_PHY_TYPE GENMASK(27, 24) + +struct sta_rec_bf { + __le16 tag; + __le16 len; + + __le16 pfmu; /* 0xffff: no access right for PFMU */ + bool su_mu; /* 0: SU, 1: MU */ + u8 bf_cap; /* 0: iBF, 1: eBF */ + u8 sounding_phy; /* 0: legacy, 1: OFDM, 2: HT, 4: VHT */ + u8 ndpa_rate; + u8 ndp_rate; + u8 rept_poll_rate; + u8 tx_mode; /* 0: legacy, 1: OFDM, 2: HT, 4: VHT ... */ + u8 nc; + u8 nr; + u8 bw; /* 0: 20M, 1: 40M, 2: 80M, 3: 160M */ + + u8 mem_total; + u8 mem_20m; + struct { + u8 row; + u8 col: 6, row_msb: 2; + } mem[4]; + + __le16 smart_ant; + u8 se_idx; + u8 auto_sounding; /* b7: low traffic indicator + * b6: Stop sounding for this entry + * b5 ~ b0: postpone sounding + */ + u8 ibf_timeout; + u8 ibf_dbw; + u8 ibf_ncol; + u8 ibf_nrow; + u8 nr_bw160; + u8 nc_bw160; + u8 ru_start_idx; + u8 ru_end_idx; + + bool trigger_su; + bool trigger_mu; + bool ng16_su; + bool ng16_mu; + bool codebook42_su; + bool codebook75_mu; + + u8 he_ltf; + u8 rsv[2]; +} __packed; + +struct sta_rec_bfee { + __le16 tag; + __le16 len; + bool fb_identity_matrix; /* 1: feedback identity matrix */ + bool ignore_feedback; /* 1: ignore */ + u8 rsv[2]; +} __packed; + +enum { + STA_REC_BASIC, + STA_REC_RA, + STA_REC_RA_CMM_INFO, + STA_REC_RA_UPDATE, + STA_REC_BF, + STA_REC_AMSDU, + STA_REC_BA, + STA_REC_RED, /* not used */ + STA_REC_TX_PROC, /* for hdr trans and CSO in CR4 */ + STA_REC_HT, + STA_REC_VHT, + STA_REC_APPS, + STA_REC_KEY, + STA_REC_WTBL, + STA_REC_HE, + STA_REC_HW_AMSDU, + STA_REC_WTBL_AADOM, + STA_REC_KEY_V2, + STA_REC_MURU, + STA_REC_MUEDCA, + STA_REC_BFEE, + STA_REC_MAX_NUM +}; + +enum mt7915_cipher_type { + MT_CIPHER_NONE, + MT_CIPHER_WEP40, + MT_CIPHER_WEP104, + MT_CIPHER_WEP128, + MT_CIPHER_TKIP, + MT_CIPHER_AES_CCMP, + MT_CIPHER_CCMP_256, + MT_CIPHER_GCMP, + MT_CIPHER_GCMP_256, + MT_CIPHER_WAPI, + MT_CIPHER_BIP_CMAC_128, +}; + +enum { + CH_SWITCH_NORMAL = 0, + CH_SWITCH_SCAN = 3, + CH_SWITCH_MCC = 4, + CH_SWITCH_DFS = 5, + CH_SWITCH_BACKGROUND_SCAN_START = 6, + CH_SWITCH_BACKGROUND_SCAN_RUNNING = 7, + CH_SWITCH_BACKGROUND_SCAN_STOP = 8, + CH_SWITCH_SCAN_BYPASS_DPD = 9 +}; + +enum { + THERMAL_SENSOR_TEMP_QUERY, + THERMAL_SENSOR_MANUAL_CTRL, + THERMAL_SENSOR_INFO_QUERY, + THERMAL_SENSOR_TASK_CTRL, +}; + +enum { + MT_EBF = BIT(0), /* explicit beamforming */ + MT_IBF = BIT(1) /* implicit beamforming */ +}; + +#define MT7915_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \ + sizeof(struct wtbl_generic) + \ + sizeof(struct wtbl_rx) + \ + sizeof(struct wtbl_ht) + \ + sizeof(struct wtbl_vht) + \ + sizeof(struct wtbl_ba) + \ + sizeof(struct wtbl_smps)) + +#define MT7915_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \ + sizeof(struct sta_rec_basic) + \ + sizeof(struct sta_rec_ht) + \ + sizeof(struct sta_rec_he) + \ + sizeof(struct sta_rec_ba) + \ + sizeof(struct sta_rec_vht) + \ + sizeof(struct tlv) + \ + sizeof(struct sta_rec_muru) + \ + MT7915_WTBL_UPDATE_MAX_SIZE) + +#define MT7915_WTBL_UPDATE_BA_SIZE (sizeof(struct wtbl_req_hdr) + \ + sizeof(struct wtbl_ba)) + +#define MT7915_BSS_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \ + sizeof(struct bss_info_omac) + \ + sizeof(struct bss_info_basic) +\ + sizeof(struct bss_info_rf_ch) +\ + sizeof(struct bss_info_ra) + \ + sizeof(struct bss_info_he) + \ + sizeof(struct bss_info_bmc_rate) +\ + sizeof(struct bss_info_ext_bss) +\ + sizeof(struct bss_info_sync_mode)) + +#define MT7915_BEACON_UPDATE_SIZE (sizeof(struct sta_req_hdr) + \ + sizeof(struct bss_info_bcn_csa) + \ + sizeof(struct bss_info_bcn_bcc) + \ + sizeof(struct bss_info_bcn_mbss) + \ + sizeof(struct bss_info_bcn_cont)) + +#define PHY_MODE_A BIT(0) +#define PHY_MODE_B BIT(1) +#define PHY_MODE_G BIT(2) +#define PHY_MODE_GN BIT(3) +#define PHY_MODE_AN BIT(4) +#define PHY_MODE_AC BIT(5) +#define PHY_MODE_AX_24G BIT(6) +#define PHY_MODE_AX_5G BIT(7) +#define PHY_MODE_AX_6G BIT(8) + +#define MODE_CCK BIT(0) +#define MODE_OFDM BIT(1) +#define MODE_HT BIT(2) +#define MODE_VHT BIT(3) +#define MODE_HE BIT(4) + +#define STA_CAP_WMM BIT(0) +#define STA_CAP_SGI_20 BIT(4) +#define STA_CAP_SGI_40 BIT(5) +#define STA_CAP_TX_STBC BIT(6) +#define STA_CAP_RX_STBC BIT(7) +#define STA_CAP_VHT_SGI_80 BIT(16) +#define STA_CAP_VHT_SGI_160 BIT(17) +#define STA_CAP_VHT_TX_STBC BIT(18) +#define STA_CAP_VHT_RX_STBC BIT(19) +#define STA_CAP_VHT_LDPC BIT(23) +#define STA_CAP_LDPC BIT(24) +#define STA_CAP_HT BIT(26) +#define STA_CAP_VHT BIT(27) +#define STA_CAP_HE BIT(28) + +/* HE MAC */ +#define STA_REC_HE_CAP_HTC BIT(0) +#define STA_REC_HE_CAP_BQR BIT(1) +#define STA_REC_HE_CAP_BSR BIT(2) +#define STA_REC_HE_CAP_OM BIT(3) +#define STA_REC_HE_CAP_AMSDU_IN_AMPDU BIT(4) +/* HE PHY */ +#define STA_REC_HE_CAP_DUAL_BAND BIT(5) +#define STA_REC_HE_CAP_LDPC BIT(6) +#define STA_REC_HE_CAP_TRIG_CQI_FK BIT(7) +#define STA_REC_HE_CAP_PARTIAL_BW_EXT_RANGE BIT(8) +/* STBC */ +#define STA_REC_HE_CAP_LE_EQ_80M_TX_STBC BIT(9) +#define STA_REC_HE_CAP_LE_EQ_80M_RX_STBC BIT(10) +#define STA_REC_HE_CAP_GT_80M_TX_STBC BIT(11) +#define STA_REC_HE_CAP_GT_80M_RX_STBC BIT(12) +/* GI */ +#define STA_REC_HE_CAP_SU_PPDU_1LTF_8US_GI BIT(13) +#define STA_REC_HE_CAP_SU_MU_PPDU_4LTF_8US_GI BIT(14) +#define STA_REC_HE_CAP_ER_SU_PPDU_1LTF_8US_GI BIT(15) +#define STA_REC_HE_CAP_ER_SU_PPDU_4LTF_8US_GI BIT(16) +#define STA_REC_HE_CAP_NDP_4LTF_3DOT2MS_GI BIT(17) +/* 242 TONE */ +#define STA_REC_HE_CAP_BW20_RU242_SUPPORT BIT(18) +#define STA_REC_HE_CAP_TX_1024QAM_UNDER_RU242 BIT(19) +#define STA_REC_HE_CAP_RX_1024QAM_UNDER_RU242 BIT(20) + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h new file mode 100644 index 000000000000..85d74ecd0351 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -0,0 +1,469 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2020 MediaTek Inc. */ + +#ifndef __MT7915_H +#define __MT7915_H + +#include <linux/interrupt.h> +#include <linux/ktime.h> +#include "../mt76.h" +#include "regs.h" + +#define MT7915_MAX_INTERFACES 4 +#define MT7915_MAX_WMM_SETS 4 +#define MT7915_WTBL_SIZE 288 +#define MT7915_WTBL_RESERVED (MT7915_WTBL_SIZE - 1) +#define MT7915_WTBL_STA (MT7915_WTBL_RESERVED - \ + MT7915_MAX_INTERFACES) + +#define MT7915_WATCHDOG_TIME (HZ / 10) +#define MT7915_RESET_TIMEOUT (30 * HZ) + +#define MT7915_TX_RING_SIZE 2048 +#define MT7915_TX_MCU_RING_SIZE 256 +#define MT7915_TX_FWDL_RING_SIZE 128 + +#define MT7915_RX_RING_SIZE 1536 +#define MT7915_RX_MCU_RING_SIZE 512 + +#define MT7915_FIRMWARE_WA "mediatek/mt7915_wa.bin" +#define MT7915_FIRMWARE_WM "mediatek/mt7915_wm.bin" +#define MT7915_ROM_PATCH "mediatek/mt7915_rom_patch.bin" + +#define MT7915_EEPROM_SIZE 3584 +#define MT7915_TOKEN_SIZE 8192 + +#define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */ +#define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */ +#define MT7915_5G_RATE_DEFAULT 0x4b /* OFDM 6M */ +#define MT7915_2G_RATE_DEFAULT 0x0 /* CCK 1M */ + +#define MT7915_SKU_RATE_NUM 161 +#define MT7915_SKU_MAX_DELTA_IDX MT7915_SKU_RATE_NUM +#define MT7915_SKU_TABLE_SIZE (MT7915_SKU_RATE_NUM + 1) + +struct mt7915_vif; +struct mt7915_sta; +struct mt7915_dfs_pulse; +struct mt7915_dfs_pattern; + +enum mt7915_txq_id { + MT7915_TXQ_FWDL = 16, + MT7915_TXQ_MCU_WM, + MT7915_TXQ_BAND0, + MT7915_TXQ_BAND1, + MT7915_TXQ_MCU_WA, +}; + +enum mt7915_rxq_id { + MT7915_RXQ_BAND0 = 0, + MT7915_RXQ_BAND1, + MT7915_RXQ_MCU_WM = 0, + MT7915_RXQ_MCU_WA, +}; + +enum mt7915_ampdu_state { + MT7915_AGGR_STOP, + MT7915_AGGR_PROGRESS, + MT7915_AGGR_START, + MT7915_AGGR_OPERATIONAL +}; + +struct mt7915_sta_stats { + struct rate_info prob_rate; + struct rate_info tx_rate; + + unsigned long per; + unsigned long changed; + unsigned long jiffies; +}; + +struct mt7915_sta { + struct mt76_wcid wcid; /* must be first */ + + struct mt7915_vif *vif; + + struct list_head poll_list; + u32 airtime_ac[8]; + + struct mt7915_sta_stats stats; + struct work_struct stats_work; + + spinlock_t ampdu_lock; + enum mt7915_ampdu_state ampdu_state[IEEE80211_NUM_TIDS]; +}; + +struct mt7915_vif { + u16 idx; + u8 omac_idx; + u8 band_idx; + u8 wmm_idx; + + struct { + u16 cw_min; + u16 cw_max; + u16 txop; + u8 aifs; + } wmm[IEEE80211_NUM_ACS]; + + struct mt7915_sta sta; + struct mt7915_dev *dev; +}; + +struct mib_stats { + u16 ack_fail_cnt; + u16 fcs_err_cnt; + u16 rts_cnt; + u16 rts_retries_cnt; + u16 ba_miss_cnt; +}; + +struct mt7915_phy { + struct mt76_phy *mt76; + struct mt7915_dev *dev; + + struct ieee80211_sband_iftype_data iftype[2][NUM_NL80211_IFTYPES]; + + u32 rxfilter; + u32 vif_mask; + u32 omac_mask; + + u16 noise; + u16 chainmask; + + s16 coverage_class; + u8 slottime; + + u8 rdd_state; + int dfs_state; + + __le32 rx_ampdu_ts; + u32 ampdu_ref; + + struct mib_stats mib; + + struct delayed_work mac_work; + u8 mac_work_count; +}; + +struct mt7915_dev { + union { /* must be first */ + struct mt76_dev mt76; + struct mt76_phy mphy; + }; + + struct mt7915_phy phy; + + u16 chainmask; + + struct work_struct init_work; + struct work_struct reset_work; + wait_queue_head_t reset_wait; + u32 reset_state; + + struct list_head sta_poll_list; + spinlock_t sta_poll_lock; + + u32 hw_pattern; + + spinlock_t token_lock; + struct idr token; + + s8 **rate_power; /* TODO: use mt76_rate_power */ + + bool fw_debug; +}; + +enum { + HW_BSSID_0 = 0x0, + HW_BSSID_1, + HW_BSSID_2, + HW_BSSID_3, + HW_BSSID_MAX, + EXT_BSSID_START = 0x10, + EXT_BSSID_1, + EXT_BSSID_2, + EXT_BSSID_3, + EXT_BSSID_4, + EXT_BSSID_5, + EXT_BSSID_6, + EXT_BSSID_7, + EXT_BSSID_8, + EXT_BSSID_9, + EXT_BSSID_10, + EXT_BSSID_11, + EXT_BSSID_12, + EXT_BSSID_13, + EXT_BSSID_14, + EXT_BSSID_15, + EXT_BSSID_END +}; + +enum { + MT_RX_SEL0, + MT_RX_SEL1, +}; + +enum mt7915_rdd_cmd { + RDD_STOP, + RDD_START, + RDD_DET_MODE, + RDD_RADAR_EMULATE, + RDD_START_TXQ = 20, + RDD_CAC_START = 50, + RDD_CAC_END, + RDD_NORMAL_START, + RDD_DISABLE_DFS_CAL, + RDD_PULSE_DBG, + RDD_READ_PULSE, + RDD_RESUME_BF, + RDD_IRQ_OFF, +}; + +enum { + RATE_CTRL_RU_INFO, + RATE_CTRL_FIXED_RATE_INFO, + RATE_CTRL_DUMP_INFO, + RATE_CTRL_MU_INFO, +}; + +static inline struct mt7915_phy * +mt7915_hw_phy(struct ieee80211_hw *hw) +{ + struct mt76_phy *phy = hw->priv; + + return phy->priv; +} + +static inline struct mt7915_dev * +mt7915_hw_dev(struct ieee80211_hw *hw) +{ + struct mt76_phy *phy = hw->priv; + + return container_of(phy->dev, struct mt7915_dev, mt76); +} + +static inline struct mt7915_phy * +mt7915_ext_phy(struct mt7915_dev *dev) +{ + struct mt76_phy *phy = dev->mt76.phy2; + + if (!phy) + return NULL; + + return phy->priv; +} + +static inline void +mt7915_set_aggr_state(struct mt7915_sta *msta, u8 tid, + enum mt7915_ampdu_state state) +{ + spin_lock_bh(&msta->ampdu_lock); + msta->ampdu_state[tid] = state; + spin_unlock_bh(&msta->ampdu_lock); +} + +extern const struct ieee80211_ops mt7915_ops; +extern struct pci_driver mt7915_pci_driver; + +u32 mt7915_reg_map(struct mt7915_dev *dev, u32 addr); + +int mt7915_register_device(struct mt7915_dev *dev); +void mt7915_unregister_device(struct mt7915_dev *dev); +int mt7915_register_ext_phy(struct mt7915_dev *dev); +void mt7915_unregister_ext_phy(struct mt7915_dev *dev); +int mt7915_eeprom_init(struct mt7915_dev *dev); +u32 mt7915_eeprom_read(struct mt7915_dev *dev, u32 offset); +int mt7915_eeprom_get_target_power(struct mt7915_dev *dev, + struct ieee80211_channel *chan, + u8 chain_idx); +void mt7915_eeprom_init_sku(struct mt7915_dev *dev); +int mt7915_dma_init(struct mt7915_dev *dev); +void mt7915_dma_prefetch(struct mt7915_dev *dev); +void mt7915_dma_cleanup(struct mt7915_dev *dev); +int mt7915_mcu_init(struct mt7915_dev *dev); +int mt7915_mcu_add_dev_info(struct mt7915_dev *dev, + struct ieee80211_vif *vif, bool enable); +int mt7915_mcu_add_bss_info(struct mt7915_phy *phy, + struct ieee80211_vif *vif, int enable); +int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, bool enable); +int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, bool enable); +int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev, + struct ieee80211_ampdu_params *params, + bool add); +int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev, + struct ieee80211_ampdu_params *params, + bool add); +int mt7915_mcu_add_key(struct mt7915_dev *dev, struct ieee80211_vif *vif, + struct mt7915_sta *msta, struct ieee80211_key_conf *key, + enum set_key_cmd cmd); +int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + int enable); +int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif, + bool enable); +int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd); +int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif); +int mt7915_mcu_set_fixed_rate(struct mt7915_dev *dev, + struct ieee80211_sta *sta, u32 rate); +int mt7915_mcu_set_eeprom(struct mt7915_dev *dev); +int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset); +int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable, + bool hdr_trans); +int mt7915_mcu_set_scs(struct mt7915_dev *dev, u8 band, bool enable); +int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band); +int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val); +int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter); +int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable); +int mt7915_mcu_set_sku(struct mt7915_phy *phy); +int mt7915_mcu_set_txbf_type(struct mt7915_dev *dev); +int mt7915_mcu_set_txbf_sounding(struct mt7915_dev *dev); +int mt7915_mcu_set_fcc5_lpn(struct mt7915_dev *dev, int val); +int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev, + const struct mt7915_dfs_pulse *pulse); +int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index, + const struct mt7915_dfs_pattern *pattern); +int mt7915_mcu_get_rate_info(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx); +int mt7915_mcu_get_temperature(struct mt7915_dev *dev, int index); +int mt7915_mcu_rdd_cmd(struct mt7915_dev *dev, enum mt7915_rdd_cmd cmd, + u8 index, u8 rx_sel, u8 val); +int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 ctrl); +int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level); +void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb); +void mt7915_mcu_exit(struct mt7915_dev *dev); + +static inline bool is_mt7915(struct mt76_dev *dev) +{ + return mt76_chip(dev) == 0x7915; +} + +static inline void mt7915_irq_enable(struct mt7915_dev *dev, u32 mask) +{ + mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask); +} + +static inline void mt7915_irq_disable(struct mt7915_dev *dev, u32 mask) +{ + mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0); +} + +static inline u32 +mt7915_reg_map_l1(struct mt7915_dev *dev, u32 addr) +{ + u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr); + u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr); + + mt76_rmw_field(dev, MT_HIF_REMAP_L1, MT_HIF_REMAP_L1_MASK, base); + /* use read to push write */ + mt76_rr(dev, MT_HIF_REMAP_L1); + + return MT_HIF_REMAP_BASE_L1 + offset; +} + +static inline u32 +mt7915_l1_rr(struct mt7915_dev *dev, u32 addr) +{ + return mt76_rr(dev, mt7915_reg_map_l1(dev, addr)); +} + +static inline void +mt7915_l1_wr(struct mt7915_dev *dev, u32 addr, u32 val) +{ + mt76_wr(dev, mt7915_reg_map_l1(dev, addr), val); +} + +static inline u32 +mt7915_l1_rmw(struct mt7915_dev *dev, u32 addr, u32 mask, u32 val) +{ + val |= mt7915_l1_rr(dev, addr) & ~mask; + mt7915_l1_wr(dev, addr, val); + + return val; +} + +#define mt7915_l1_set(dev, addr, val) mt7915_l1_rmw(dev, addr, 0, val) +#define mt7915_l1_clear(dev, addr, val) mt7915_l1_rmw(dev, addr, val, 0) + +static inline u32 +mt7915_reg_map_l2(struct mt7915_dev *dev, u32 addr) +{ + u32 offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr); + u32 base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr); + + mt76_rmw_field(dev, MT_HIF_REMAP_L2, MT_HIF_REMAP_L2_MASK, base); + /* use read to push write */ + mt76_rr(dev, MT_HIF_REMAP_L2); + + return MT_HIF_REMAP_BASE_L2 + offset; +} + +static inline u32 +mt7915_l2_rr(struct mt7915_dev *dev, u32 addr) +{ + return mt76_rr(dev, mt7915_reg_map_l2(dev, addr)); +} + +static inline void +mt7915_l2_wr(struct mt7915_dev *dev, u32 addr, u32 val) +{ + mt76_wr(dev, mt7915_reg_map_l2(dev, addr), val); +} + +static inline u32 +mt7915_l2_rmw(struct mt7915_dev *dev, u32 addr, u32 mask, u32 val) +{ + val |= mt7915_l2_rr(dev, addr) & ~mask; + mt7915_l2_wr(dev, addr, val); + + return val; +} + +#define mt7915_l2_set(dev, addr, val) mt7915_l2_rmw(dev, addr, 0, val) +#define mt7915_l2_clear(dev, addr, val) mt7915_l2_rmw(dev, addr, val, 0) + +bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask); +void mt7915_mac_reset_counters(struct mt7915_phy *phy); +void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy); +void mt7915_mac_sta_poll(struct mt7915_dev *dev); +void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_key_conf *key, bool beacon); +void mt7915_mac_set_timing(struct mt7915_phy *phy); +int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb); +void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb); +int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void mt7915_mac_work(struct work_struct *work); +void mt7915_mac_reset_work(struct work_struct *work); +void mt7915_mac_sta_stats_work(struct work_struct *work); +int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + enum mt76_txq_id qid, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, + struct mt76_tx_info *tx_info); +void mt7915_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, + struct mt76_queue_entry *e); +void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, + struct sk_buff *skb); +void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps); +void mt7915_stats_work(struct work_struct *work); +void mt7915_txp_skb_unmap(struct mt76_dev *dev, + struct mt76_txwi_cache *txwi); +int mt76_dfs_start_rdd(struct mt7915_dev *dev, bool force); +int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy); +void mt7915_set_stream_he_caps(struct mt7915_phy *phy); +void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy); +void mt7915_update_channel(struct mt76_dev *mdev); +int mt7915_init_debugfs(struct mt7915_dev *dev); +#ifdef CONFIG_MAC80211_DEBUGFS +void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct dentry *dir); +#endif + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c new file mode 100644 index 000000000000..7937c6965f59 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 MediaTek Inc. + * + * Author: Ryder Lee <ryder.lee@mediatek.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "mt7915.h" +#include "mac.h" +#include "../trace.h" + +static const struct pci_device_id mt7915_pci_device_table[] = { + { PCI_DEVICE(0x14c3, 0x7915) }, + { }, +}; + +static void +mt7915_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q) +{ + struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); + + mt7915_irq_enable(dev, MT_INT_RX_DONE(q)); +} + +/* TODO: support 2/4/6/8 MSI-X vectors */ +static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance) +{ + struct mt7915_dev *dev = dev_instance; + u32 intr; + + intr = mt76_rr(dev, MT_INT_SOURCE_CSR); + mt76_wr(dev, MT_INT_SOURCE_CSR, intr); + + if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state)) + return IRQ_NONE; + + trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask); + + intr &= dev->mt76.mmio.irqmask; + + if (intr & MT_INT_TX_DONE_ALL) { + mt7915_irq_disable(dev, MT_INT_TX_DONE_ALL); + napi_schedule(&dev->mt76.tx_napi); + } + + if (intr & MT_INT_RX_DONE_DATA) { + mt7915_irq_disable(dev, MT_INT_RX_DONE_DATA); + napi_schedule(&dev->mt76.napi[0]); + } + + if (intr & MT_INT_RX_DONE_WM) { + mt7915_irq_disable(dev, MT_INT_RX_DONE_WM); + napi_schedule(&dev->mt76.napi[1]); + } + + if (intr & MT_INT_RX_DONE_WA) { + mt7915_irq_disable(dev, MT_INT_RX_DONE_WA); + napi_schedule(&dev->mt76.napi[2]); + } + + if (intr & MT_INT_MCU_CMD) { + u32 val = mt76_rr(dev, MT_MCU_CMD); + + mt76_wr(dev, MT_MCU_CMD, val); + if (val & MT_MCU_CMD_ERROR_MASK) { + dev->reset_state = val; + ieee80211_queue_work(mt76_hw(dev), &dev->reset_work); + wake_up(&dev->reset_wait); + } + } + + return IRQ_HANDLED; +} + +static int +mt7915_alloc_device(struct pci_dev *pdev, struct mt7915_dev *dev) +{ +#define NUM_BANDS 2 + int i; + s8 **sku; + + sku = devm_kzalloc(&pdev->dev, NUM_BANDS * sizeof(*sku), GFP_KERNEL); + if (!sku) + return -ENOMEM; + + for (i = 0; i < NUM_BANDS; i++) { + sku[i] = devm_kzalloc(&pdev->dev, MT7915_SKU_TABLE_SIZE * + sizeof(**sku), GFP_KERNEL); + if (!sku[i]) + return -ENOMEM; + } + dev->rate_power = sku; + + return 0; +} + +static int mt7915_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + static const struct mt76_driver_ops drv_ops = { + /* txwi_size = txd size + txp size */ + .txwi_size = MT_TXD_SIZE + sizeof(struct mt7915_txp), + .drv_flags = MT_DRV_TXWI_NO_FREE, + .survey_flags = SURVEY_INFO_TIME_TX | + SURVEY_INFO_TIME_RX | + SURVEY_INFO_TIME_BSS_RX, + .tx_prepare_skb = mt7915_tx_prepare_skb, + .tx_complete_skb = mt7915_tx_complete_skb, + .rx_skb = mt7915_queue_rx_skb, + .rx_poll_complete = mt7915_rx_poll_complete, + .sta_ps = mt7915_sta_ps, + .sta_add = mt7915_mac_sta_add, + .sta_remove = mt7915_mac_sta_remove, + .update_survey = mt7915_update_channel, + }; + struct mt7915_dev *dev; + struct mt76_dev *mdev; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); + if (ret) + return ret; + + pci_set_master(pdev); + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret) + return ret; + + mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7915_ops, + &drv_ops); + if (!mdev) + return -ENOMEM; + + dev = container_of(mdev, struct mt7915_dev, mt76); + ret = mt7915_alloc_device(pdev, dev); + if (ret) + return ret; + + mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]); + mdev->rev = (mt7915_l1_rr(dev, MT_HW_CHIPID) << 16) | + (mt7915_l1_rr(dev, MT_HW_REV) & 0xff); + dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev); + + /* master switch of PCIe tnterrupt enable */ + mt7915_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); + + ret = devm_request_irq(mdev->dev, pdev->irq, mt7915_irq_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); + if (ret) + goto error; + + ret = mt7915_register_device(dev); + if (ret) + goto error; + + return 0; +error: + ieee80211_free_hw(mt76_hw(dev)); + return ret; +} + +static void mt7915_pci_remove(struct pci_dev *pdev) +{ + struct mt76_dev *mdev = pci_get_drvdata(pdev); + struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); + + mt7915_unregister_device(dev); +} + +struct pci_driver mt7915_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = mt7915_pci_device_table, + .probe = mt7915_pci_probe, + .remove = mt7915_pci_remove, +}; + +module_pci_driver(mt7915_pci_driver); + +MODULE_DEVICE_TABLE(pci, mt7915_pci_device_table); +MODULE_FIRMWARE(MT7915_FIRMWARE_WA); +MODULE_FIRMWARE(MT7915_FIRMWARE_WM); +MODULE_FIRMWARE(MT7915_ROM_PATCH); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h new file mode 100644 index 000000000000..c121715f8bff --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h @@ -0,0 +1,375 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2020 MediaTek Inc. */ + +#ifndef __MT7915_REGS_H +#define __MT7915_REGS_H + +/* MCU WFDMA1 */ +#define MT_MCU_WFDMA1_BASE 0x3000 +#define MT_MCU_WFDMA1(ofs) (MT_MCU_WFDMA1_BASE + (ofs)) + +#define MT_MCU_INT_EVENT MT_MCU_WFDMA1(0x108) +#define MT_MCU_INT_EVENT_DMA_STOPPED BIT(0) +#define MT_MCU_INT_EVENT_DMA_INIT BIT(1) +#define MT_MCU_INT_EVENT_SER_TRIGGER BIT(2) +#define MT_MCU_INT_EVENT_RESET_DONE BIT(3) + +#define MT_PLE_BASE 0x8000 +#define MT_PLE(ofs) (MT_PLE_BASE + (ofs)) + +#define MT_PLE_FL_Q0_CTRL MT_PLE(0x1b0) +#define MT_PLE_FL_Q1_CTRL MT_PLE(0x1b4) +#define MT_PLE_FL_Q2_CTRL MT_PLE(0x1b8) +#define MT_PLE_FL_Q3_CTRL MT_PLE(0x1bc) + +#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x300 + 0x10 * (ac) + \ + ((n) << 2)) +#define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2)) + +#define MT_MDP_BASE 0xf000 +#define MT_MDP(ofs) (MT_MDP_BASE + (ofs)) + +#define MT_MDP_DCR0 MT_MDP(0x000) +#define MT_MDP_DCR0_DAMSDU_EN BIT(15) + +#define MT_MDP_DCR1 MT_MDP(0x004) +#define MT_MDP_DCR1_MAX_RX_LEN GENMASK(15, 3) + +#define MT_MDP_BNRCFR0(_band) MT_MDP(0x070 + ((_band) << 8)) +#define MT_MDP_RCFR0_MCU_RX_MGMT GENMASK(5, 4) +#define MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR GENMASK(7, 6) +#define MT_MDP_RCFR0_MCU_RX_CTL_BAR GENMASK(9, 8) + +#define MT_MDP_BNRCFR1(_band) MT_MDP(0x074 + ((_band) << 8)) +#define MT_MDP_RCFR1_MCU_RX_BYPASS GENMASK(23, 22) +#define MT_MDP_RCFR1_RX_DROPPED_UCAST GENMASK(28, 27) +#define MT_MDP_RCFR1_RX_DROPPED_MCAST GENMASK(30, 29) +#define MT_MDP_TO_HIF 0 +#define MT_MDP_TO_WM 1 + +/* TMAC: band 0(0x21000), band 1(0xa1000) */ +#define MT_WF_TMAC_BASE(_band) ((_band) ? 0xa1000 : 0x21000) +#define MT_WF_TMAC(_band, ofs) (MT_WF_TMAC_BASE(_band) + (ofs)) + +#define MT_TMAC_CDTR(_band) MT_WF_TMAC(_band, 0x090) +#define MT_TMAC_ODTR(_band) MT_WF_TMAC(_band, 0x094) +#define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0) +#define MT_TIMEOUT_VAL_CCA GENMASK(31, 16) + +#define MT_TMAC_ICR0(_band) MT_WF_TMAC(_band, 0x0a4) +#define MT_IFS_EIFS GENMASK(8, 0) +#define MT_IFS_RIFS GENMASK(14, 10) +#define MT_IFS_SIFS GENMASK(22, 16) +#define MT_IFS_SLOT GENMASK(30, 24) + +#define MT_TMAC_CTCR0(_band) MT_WF_TMAC(_band, 0x0f4) +#define MT_TMAC_CTCR0_INS_DDLMT_REFTIME GENMASK(5, 0) +#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17) +#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18) + +/* DMA Band 0 */ +#define MT_WF_DMA_BASE 0x21e00 +#define MT_WF_DMA(ofs) (MT_WF_DMA_BASE + (ofs)) + +#define MT_DMA_DCR0 MT_WF_DMA(0x000) +#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 3) +#define MT_DMA_DCR0_RXD_G5_EN BIT(23) + +/* ETBF: band 0(0x24000), band 1(0xa4000) */ +#define MT_WF_ETBF_BASE(_band) ((_band) ? 0xa4000 : 0x24000) +#define MT_WF_ETBF(_band, ofs) (MT_WF_ETBF_BASE(_band) + (ofs)) + +#define MT_ETBF_TX_NDP_BFRP(_band) MT_WF_ETBF(_band, 0x040) +#define MT_ETBF_TX_FB_CPL GENMASK(31, 16) +#define MT_ETBF_TX_FB_TRI GENMASK(15, 0) + +#define MT_ETBF_TX_APP_CNT(_band) MT_WF_ETBF(_band, 0x0f0) +#define MT_ETBF_TX_IBF_CNT GENMASK(31, 16) +#define MT_ETBF_TX_EBF_CNT GENMASK(15, 0) + +#define MT_ETBF_RX_FB_CNT(_band) MT_WF_ETBF(_band, 0x0f8) +#define MT_ETBF_RX_FB_ALL GENMASK(31, 24) +#define MT_ETBF_RX_FB_HE GENMASK(23, 16) +#define MT_ETBF_RX_FB_VHT GENMASK(15, 8) +#define MT_ETBF_RX_FB_HT GENMASK(7, 0) + +/* LPON: band 0(0x24200), band 1(0xa4200) */ +#define MT_WF_LPON_BASE(_band) ((_band) ? 0xa4200 : 0x24200) +#define MT_WF_LPON(_band, ofs) (MT_WF_LPON_BASE(_band) + (ofs)) + +#define MT_LPON_UTTR0(_band) MT_WF_LPON(_band, 0x080) +#define MT_LPON_UTTR1(_band) MT_WF_LPON(_band, 0x084) + +#define MT_LPON_TCR(_band, n) MT_WF_LPON(_band, 0x0a8 + (n) * 4) +#define MT_LPON_TCR_SW_MODE GENMASK(1, 0) +#define MT_LPON_TCR_SW_WRITE BIT(0) + +/* MIB: band 0(0x24800), band 1(0xa4800) */ +#define MT_WF_MIB_BASE(_band) ((_band) ? 0xa4800 : 0x24800) +#define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE(_band) + (ofs)) + +#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x014) +#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(15, 0) + +#define MT_MIB_SDR9(_band) MT_WF_MIB(_band, 0x02c) +#define MT_MIB_SDR9_BUSY_MASK GENMASK(23, 0) + +#define MT_MIB_SDR16(_band) MT_WF_MIB(_band, 0x048) +#define MT_MIB_SDR16_BUSY_MASK GENMASK(23, 0) + +#define MT_MIB_SDR36(_band) MT_WF_MIB(_band, 0x098) +#define MT_MIB_SDR36_TXTIME_MASK GENMASK(23, 0) +#define MT_MIB_SDR37(_band) MT_WF_MIB(_band, 0x09c) +#define MT_MIB_SDR37_RXTIME_MASK GENMASK(23, 0) + +#define MT_MIB_DR11(_band) MT_WF_MIB(_band, 0x0cc) + +#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, 0x100 + ((n) << 4)) +#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16) +#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0) + +#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(_band, 0x104 + ((n) << 4)) +#define MT_MIB_BA_MISS_COUNT_MASK GENMASK(15, 0) +#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(31, 16) + +#define MT_MIB_MB_SDR2(_band, n) MT_WF_MIB(_band, 0x108 + ((n) << 4)) +#define MT_MIB_FRAME_RETRIES_COUNT_MASK GENMASK(15, 0) + +#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0x0a8 + ((n) << 2)) +#define MT_TX_AGG_CNT2(_band, n) MT_WF_MIB(_band, 0x164 + ((n) << 2)) +#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, 0x4b8 + ((n) << 2)) +#define MT_MIB_ARNCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(7, 0)) + +#define MT_WTBLON_TOP_BASE 0x34000 +#define MT_WTBLON_TOP(ofs) (MT_WTBLON_TOP_BASE + (ofs)) +#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x0) +#define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(2, 0) + +#define MT_WTBL_UPDATE MT_WTBLON_TOP(0x030) +#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(9, 0) +#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12) +#define MT_WTBL_UPDATE_BUSY BIT(31) + +#define MT_WTBL_BASE 0x38000 +#define MT_WTBL_LMAC_ID GENMASK(14, 8) +#define MT_WTBL_LMAC_DW GENMASK(7, 2) +#define MT_WTBL_LMAC_OFFS(_id, _dw) (MT_WTBL_BASE | \ + FIELD_PREP(MT_WTBL_LMAC_ID, _id) | \ + FIELD_PREP(MT_WTBL_LMAC_DW, _dw)) + +/* AGG: band 0(0x20800), band 1(0xa0800) */ +#define MT_WF_AGG_BASE(_band) ((_band) ? 0xa0800 : 0x20800) +#define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs)) + +#define MT_AGG_ACR0(_band) MT_WF_AGG(_band, 0x084) +#define MT_AGG_ACR_CFEND_RATE GENMASK(13, 0) +#define MT_AGG_ACR_BAR_RATE GENMASK(29, 16) + +/* ARB: band 0(0x20c00), band 1(0xa0c00) */ +#define MT_WF_ARB_BASE(_band) ((_band) ? 0xa0c00 : 0x20c00) +#define MT_WF_ARB(_band, ofs) (MT_WF_ARB_BASE(_band) + (ofs)) + +#define MT_ARB_SCR(_band) MT_WF_ARB(_band, 0x080) +#define MT_ARB_SCR_TX_DISABLE BIT(8) +#define MT_ARB_SCR_RX_DISABLE BIT(9) + +/* RMAC: band 0(0x21400), band 1(0xa1400) */ +#define MT_WF_RMAC_BASE(_band) ((_band) ? 0xa1400 : 0x21400) +#define MT_WF_RMAC(_band, ofs) (MT_WF_RMAC_BASE(_band) + (ofs)) + +#define MT_WF_RFCR(_band) MT_WF_RMAC(_band, 0x000) +#define MT_WF_RFCR_DROP_STBC_MULTI BIT(0) +#define MT_WF_RFCR_DROP_FCSFAIL BIT(1) +#define MT_WF_RFCR_DROP_VERSION BIT(3) +#define MT_WF_RFCR_DROP_PROBEREQ BIT(4) +#define MT_WF_RFCR_DROP_MCAST BIT(5) +#define MT_WF_RFCR_DROP_BCAST BIT(6) +#define MT_WF_RFCR_DROP_MCAST_FILTERED BIT(7) +#define MT_WF_RFCR_DROP_A3_MAC BIT(8) +#define MT_WF_RFCR_DROP_A3_BSSID BIT(9) +#define MT_WF_RFCR_DROP_A2_BSSID BIT(10) +#define MT_WF_RFCR_DROP_OTHER_BEACON BIT(11) +#define MT_WF_RFCR_DROP_FRAME_REPORT BIT(12) +#define MT_WF_RFCR_DROP_CTL_RSV BIT(13) +#define MT_WF_RFCR_DROP_CTS BIT(14) +#define MT_WF_RFCR_DROP_RTS BIT(15) +#define MT_WF_RFCR_DROP_DUPLICATE BIT(16) +#define MT_WF_RFCR_DROP_OTHER_BSS BIT(17) +#define MT_WF_RFCR_DROP_OTHER_UC BIT(18) +#define MT_WF_RFCR_DROP_OTHER_TIM BIT(19) +#define MT_WF_RFCR_DROP_NDPA BIT(20) +#define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21) + +#define MT_WF_RFCR1(_band) MT_WF_RMAC(_band, 0x004) +#define MT_WF_RFCR1_DROP_ACK BIT(4) +#define MT_WF_RFCR1_DROP_BF_POLL BIT(5) +#define MT_WF_RFCR1_DROP_BA BIT(6) +#define MT_WF_RFCR1_DROP_CFEND BIT(7) +#define MT_WF_RFCR1_DROP_CFACK BIT(8) + +#define MT_WF_RMAC_MIB_TIME0(_band) MT_WF_RMAC(_band, 0x03c4) +#define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31) +#define MT_WF_RMAC_MIB_RXTIME_EN BIT(30) + +#define MT_WF_RMAC_MIB_AIRTIME14(_band) MT_WF_RMAC(_band, 0x03b8) +#define MT_MIB_OBSSTIME_MASK GENMASK(23, 0) +#define MT_WF_RMAC_MIB_AIRTIME0(_band) MT_WF_RMAC(_band, 0x0380) + +/* WFDMA0 */ +#define MT_WFDMA0_BASE 0xd4000 +#define MT_WFDMA0(ofs) (MT_WFDMA0_BASE + (ofs)) + +#define MT_WFDMA0_RST MT_WFDMA0(0x100) +#define MT_WFDMA0_RST_LOGIC_RST BIT(4) +#define MT_WFDMA0_RST_DMASHDL_ALL_RST BIT(5) + +#define MT_WFDMA0_BUSY_ENA MT_WFDMA0(0x13c) +#define MT_WFDMA0_BUSY_ENA_TX_FIFO0 BIT(0) +#define MT_WFDMA0_BUSY_ENA_TX_FIFO1 BIT(1) +#define MT_WFDMA0_BUSY_ENA_RX_FIFO BIT(2) + +#define MT_WFDMA0_GLO_CFG MT_WFDMA0(0x208) +#define MT_WFDMA0_GLO_CFG_TX_DMA_EN BIT(0) +#define MT_WFDMA0_GLO_CFG_RX_DMA_EN BIT(2) + +#define MT_WFDMA0_RST_DTX_PTR MT_WFDMA0(0x20c) +#define MT_WFDMA0_PRI_DLY_INT_CFG0 MT_WFDMA0(0x2f0) + +#define MT_RX_DATA_RING_BASE MT_WFDMA0(0x500) + +#define MT_WFDMA0_RX_RING0_EXT_CTRL MT_WFDMA0(0x680) +#define MT_WFDMA0_RX_RING1_EXT_CTRL MT_WFDMA0(0x684) +#define MT_WFDMA0_RX_RING2_EXT_CTRL MT_WFDMA0(0x688) + +/* WFDMA1 */ +#define MT_WFDMA1_BASE 0xd5000 +#define MT_WFDMA1(ofs) (MT_WFDMA1_BASE + (ofs)) + +#define MT_WFDMA1_RST MT_WFDMA1(0x100) +#define MT_WFDMA1_RST_LOGIC_RST BIT(4) +#define MT_WFDMA1_RST_DMASHDL_ALL_RST BIT(5) + +#define MT_WFDMA1_BUSY_ENA MT_WFDMA1(0x13c) +#define MT_WFDMA1_BUSY_ENA_TX_FIFO0 BIT(0) +#define MT_WFDMA1_BUSY_ENA_TX_FIFO1 BIT(1) +#define MT_WFDMA1_BUSY_ENA_RX_FIFO BIT(2) + +#define MT_MCU_CMD MT_WFDMA1(0x1f0) +#define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1) +#define MT_MCU_CMD_STOP_DMA BIT(2) +#define MT_MCU_CMD_RESET_DONE BIT(3) +#define MT_MCU_CMD_RECOVERY_DONE BIT(4) +#define MT_MCU_CMD_NORMAL_STATE BIT(5) +#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1) + +#define MT_WFDMA1_GLO_CFG MT_WFDMA1(0x208) +#define MT_WFDMA1_GLO_CFG_TX_DMA_EN BIT(0) +#define MT_WFDMA1_GLO_CFG_RX_DMA_EN BIT(2) +#define MT_WFDMA1_GLO_CFG_OMIT_TX_INFO BIT(28) +#define MT_WFDMA1_GLO_CFG_OMIT_RX_INFO BIT(27) + +#define MT_WFDMA1_RST_DTX_PTR MT_WFDMA1(0x20c) +#define MT_WFDMA1_PRI_DLY_INT_CFG0 MT_WFDMA1(0x2f0) + +#define MT_TX_RING_BASE MT_WFDMA1(0x300) +#define MT_RX_EVENT_RING_BASE MT_WFDMA1(0x500) + +#define MT_WFDMA1_TX_RING0_EXT_CTRL MT_WFDMA1(0x600) +#define MT_WFDMA1_TX_RING1_EXT_CTRL MT_WFDMA1(0x604) +#define MT_WFDMA1_TX_RING2_EXT_CTRL MT_WFDMA1(0x608) +#define MT_WFDMA1_TX_RING3_EXT_CTRL MT_WFDMA1(0x60c) +#define MT_WFDMA1_TX_RING4_EXT_CTRL MT_WFDMA1(0x610) +#define MT_WFDMA1_TX_RING5_EXT_CTRL MT_WFDMA1(0x614) +#define MT_WFDMA1_TX_RING6_EXT_CTRL MT_WFDMA1(0x618) +#define MT_WFDMA1_TX_RING7_EXT_CTRL MT_WFDMA1(0x61c) + +#define MT_WFDMA1_TX_RING16_EXT_CTRL MT_WFDMA1(0x640) +#define MT_WFDMA1_TX_RING17_EXT_CTRL MT_WFDMA1(0x644) +#define MT_WFDMA1_TX_RING18_EXT_CTRL MT_WFDMA1(0x648) +#define MT_WFDMA1_TX_RING19_EXT_CTRL MT_WFDMA1(0x64c) +#define MT_WFDMA1_TX_RING20_EXT_CTRL MT_WFDMA1(0x650) +#define MT_WFDMA1_TX_RING21_EXT_CTRL MT_WFDMA1(0x654) +#define MT_WFDMA1_TX_RING22_EXT_CTRL MT_WFDMA1(0x658) +#define MT_WFDMA1_TX_RING23_EXT_CTRL MT_WFDMA1(0x65c) + +#define MT_WFDMA1_RX_RING0_EXT_CTRL MT_WFDMA1(0x680) +#define MT_WFDMA1_RX_RING1_EXT_CTRL MT_WFDMA1(0x684) +#define MT_WFDMA1_RX_RING2_EXT_CTRL MT_WFDMA1(0x688) +#define MT_WFDMA1_RX_RING3_EXT_CTRL MT_WFDMA1(0x68c) + +/* WFDMA CSR */ +#define MT_WFDMA_EXT_CSR_BASE 0xd7000 +#define MT_WFDMA_EXT_CSR(ofs) (MT_WFDMA_EXT_CSR_BASE + (ofs)) + +#define MT_INT_SOURCE_CSR MT_WFDMA_EXT_CSR(0x10) +#define MT_INT_MASK_CSR MT_WFDMA_EXT_CSR(0x14) +#define MT_INT_RX_DONE_DATA BIT(16) +#define MT_INT_RX_DONE_WM BIT(0) +#define MT_INT_RX_DONE_WA BIT(1) +#define MT_INT_RX_DONE(_n) ((_n) ? BIT((_n) - 1) : BIT(16)) +#define MT_INT_RX_DONE_ALL (BIT(0) | BIT(1) | BIT(16)) +#define MT_INT_TX_DONE_ALL (BIT(15) | GENMASK(27, 26) | BIT(30)) +#define MT_INT_MCU_CMD BIT(29) + +#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44) +#define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY BIT(0) + +/* WFDMA0 PCIE1 */ +#define MT_WFDMA0_PCIE1_BASE 0xd8000 +#define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs)) + +#define MT_WFDMA0_PCIE1_BUSY_ENA MT_WFDMA0_PCIE1(0x13c) +#define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 BIT(0) +#define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1) +#define MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO BIT(2) + +/* WFDMA1 PCIE1 */ +#define MT_WFDMA1_PCIE1_BASE 0xd9000 +#define MT_WFDMA1_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs)) + +#define MT_WFDMA1_PCIE1_BUSY_ENA MT_WFDMA1_PCIE1(0x13c) +#define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 BIT(0) +#define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1) +#define MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO BIT(2) + +#define MT_INFRA_CFG_BASE 0xf1000 +#define MT_INFRA(ofs) (MT_INFRA_CFG_BASE + (ofs)) + +#define MT_HIF_REMAP_L1 MT_INFRA(0x1ac) +#define MT_HIF_REMAP_L1_MASK GENMASK(15, 0) +#define MT_HIF_REMAP_L1_OFFSET GENMASK(15, 0) +#define MT_HIF_REMAP_L1_BASE GENMASK(31, 16) +#define MT_HIF_REMAP_BASE_L1 0xe0000 + +#define MT_HIF_REMAP_L2 MT_INFRA(0x1b0) +#define MT_HIF_REMAP_L2_MASK GENMASK(19, 0) +#define MT_HIF_REMAP_L2_OFFSET GENMASK(11, 0) +#define MT_HIF_REMAP_L2_BASE GENMASK(31, 12) +#define MT_HIF_REMAP_BASE_L2 0x00000 + +#define MT_TOP_BASE 0x18060000 +#define MT_TOP(ofs) (MT_TOP_BASE + (ofs)) + +#define MT_TOP_LPCR_HOST_BAND0 MT_TOP(0x10) +#define MT_TOP_LPCR_HOST_FW_OWN BIT(0) +#define MT_TOP_LPCR_HOST_DRV_OWN BIT(1) + +#define MT_TOP_MISC MT_TOP(0xf0) +#define MT_TOP_MISC_FW_STATE GENMASK(2, 0) + +#define MT_HW_BOUND 0x70010020 +#define MT_HW_CHIPID 0x70010200 +#define MT_HW_REV 0x70010204 + +#define MT_PCIE_MAC_BASE 0x74030000 +#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs)) +#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188) + +/* PHY: band 0(0x83080000), band 1(0x83090000) */ +#define MT_WF_PHY_BASE 0x83080000 +#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs)) + +#define MT_WF_PHY_RX_CTRL1(_phy) MT_WF_PHY(0x2004 + ((_phy) << 16)) +#define MT_WF_PHY_RX_CTRL1_STSCNT_EN GENMASK(11, 9) + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index eff522dbda34..fca38ea2441f 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -101,19 +101,17 @@ mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) { __skb_queue_head_init(list); spin_lock_bh(&dev->status_list.lock); - __acquire(&dev->status_list.lock); } EXPORT_SYMBOL_GPL(mt76_tx_status_lock); void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) - __releases(&dev->status_list.unlock) + __releases(&dev->status_list.lock) { struct ieee80211_hw *hw; struct sk_buff *skb; spin_unlock_bh(&dev->status_list.lock); - __release(&dev->status_list.unlock); while ((skb = __skb_dequeue(list)) != NULL) { hw = mt76_tx_status_get_hw(dev, skb); diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index a981da6c35a5..fb97ea25b4d4 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -1009,8 +1009,19 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac) { - if (mt76_chip(dev) == 0x7663) - return ac ^ 0x3; + if (mt76_chip(dev) == 0x7663) { + static const u8 wmm_queue_map[] = { + [IEEE80211_AC_VO] = 0, + [IEEE80211_AC_VI] = 1, + [IEEE80211_AC_BE] = 2, + [IEEE80211_AC_BK] = 4, + }; + + if (WARN_ON(ac >= ARRAY_SIZE(wmm_queue_map))) + return 2; /* BE */ + + return wmm_queue_map[ac]; + } return mt76_ac_to_hwq(ac); } diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c index 8c60c450125a..ecde87465bf6 100644 --- a/drivers/net/wireless/mediatek/mt76/util.c +++ b/drivers/net/wireless/mediatek/mt76/util.c @@ -42,17 +42,17 @@ bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, } EXPORT_SYMBOL_GPL(__mt76_poll_msec); -int mt76_wcid_alloc(unsigned long *mask, int size) +int mt76_wcid_alloc(u32 *mask, int size) { int i, idx = 0, cur; - for (i = 0; i < size / BITS_PER_LONG; i++) { + for (i = 0; i < DIV_ROUND_UP(size, 32); i++) { idx = ffs(~mask[i]); if (!idx) continue; idx--; - cur = i * BITS_PER_LONG + idx; + cur = i * 32 + idx; if (cur >= size) break; @@ -74,13 +74,13 @@ int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy) rcu_read_lock(); for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) { - unsigned long mask = dev->wcid_mask[i]; - unsigned long phy_mask = dev->wcid_phy_mask[i]; + u32 mask = dev->wcid_mask[i]; + u32 phy_mask = dev->wcid_phy_mask[i]; if (!mask) continue; - for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1, phy_mask >>= 1) { + for (j = i * 32; mask; j++, mask >>= 1, phy_mask >>= 1) { if (!(mask & 1)) continue; diff --git a/drivers/net/wireless/mediatek/mt76/util.h b/drivers/net/wireless/mediatek/mt76/util.h index 48a71e7479e5..fd1a68820e0a 100644 --- a/drivers/net/wireless/mediatek/mt76/util.h +++ b/drivers/net/wireless/mediatek/mt76/util.h @@ -14,24 +14,24 @@ #define MT76_INCR(_var, _size) \ (_var = (((_var) + 1) % (_size))) -int mt76_wcid_alloc(unsigned long *mask, int size); +int mt76_wcid_alloc(u32 *mask, int size); static inline bool -mt76_wcid_mask_test(unsigned long *mask, int idx) +mt76_wcid_mask_test(u32 *mask, int idx) { - return mask[idx / BITS_PER_LONG] & BIT(idx % BITS_PER_LONG); + return mask[idx / 32] & BIT(idx % 32); } static inline void -mt76_wcid_mask_set(unsigned long *mask, int idx) +mt76_wcid_mask_set(u32 *mask, int idx) { - mask[idx / BITS_PER_LONG] |= BIT(idx % BITS_PER_LONG); + mask[idx / 32] |= BIT(idx % 32); } static inline void -mt76_wcid_mask_clear(unsigned long *mask, int idx) +mt76_wcid_mask_clear(u32 *mask, int idx) { - mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG); + mask[idx / 32] &= ~BIT(idx % 32); } static inline void diff --git a/drivers/net/wireless/quantenna/qtnfmac/bus.h b/drivers/net/wireless/quantenna/qtnfmac/bus.h index 87d048df09d1..3334c45aac13 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/bus.h +++ b/drivers/net/wireless/quantenna/qtnfmac/bus.h @@ -69,7 +69,7 @@ struct qtnf_bus { struct notifier_block netdev_nb; u8 hw_id[ETH_ALEN]; /* bus private data */ - char bus_priv[0] __aligned(sizeof(void *)); + char bus_priv[] __aligned(sizeof(void *)); }; static inline bool qtnf_fw_is_up(struct qtnf_bus *bus) diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 8be17106008d..54cdf3ad09d7 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -389,55 +389,57 @@ static int qtnf_set_wiphy_params(struct wiphy *wiphy, u32 changed) } static void -qtnf_mgmt_frame_register(struct wiphy *wiphy, struct wireless_dev *wdev, - u16 frame_type, bool reg) +qtnf_update_mgmt_frame_registrations(struct wiphy *wiphy, + struct wireless_dev *wdev, + struct mgmt_frame_regs *upd) { struct qtnf_vif *vif = qtnf_netdev_get_priv(wdev->netdev); - u16 mgmt_type; - u16 new_mask; - u16 qlink_frame_type = 0; + u16 new_mask = upd->interface_stypes; + u16 old_mask = vif->mgmt_frames_bitmask; + static const struct { + u16 mask, qlink_type; + } updates[] = { + { + .mask = BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_ASSOC_REQ >> 4), + .qlink_type = QLINK_MGMT_FRAME_ASSOC_REQ, + }, + { + .mask = BIT(IEEE80211_STYPE_AUTH >> 4), + .qlink_type = QLINK_MGMT_FRAME_AUTH, + }, + { + .mask = BIT(IEEE80211_STYPE_PROBE_REQ >> 4), + .qlink_type = QLINK_MGMT_FRAME_PROBE_REQ, + }, + { + .mask = BIT(IEEE80211_STYPE_ACTION >> 4), + .qlink_type = QLINK_MGMT_FRAME_ACTION, + }, + }; + unsigned int i; - mgmt_type = (frame_type & IEEE80211_FCTL_STYPE) >> 4; + if (new_mask == old_mask) + return; - if (reg) - new_mask = vif->mgmt_frames_bitmask | BIT(mgmt_type); - else - new_mask = vif->mgmt_frames_bitmask & ~BIT(mgmt_type); + for (i = 0; i < ARRAY_SIZE(updates); i++) { + u16 mask = updates[i].mask; + u16 qlink_frame_type = updates[i].qlink_type; + bool reg; - if (new_mask == vif->mgmt_frames_bitmask) - return; + /* the ! are here due to the assoc/reassoc merge */ + if (!(new_mask & mask) == !(old_mask & mask)) + continue; - switch (frame_type & IEEE80211_FCTL_STYPE) { - case IEEE80211_STYPE_REASSOC_REQ: - case IEEE80211_STYPE_ASSOC_REQ: - qlink_frame_type = QLINK_MGMT_FRAME_ASSOC_REQ; - break; - case IEEE80211_STYPE_AUTH: - qlink_frame_type = QLINK_MGMT_FRAME_AUTH; - break; - case IEEE80211_STYPE_PROBE_REQ: - qlink_frame_type = QLINK_MGMT_FRAME_PROBE_REQ; - break; - case IEEE80211_STYPE_ACTION: - qlink_frame_type = QLINK_MGMT_FRAME_ACTION; - break; - default: - pr_warn("VIF%u.%u: unsupported frame type: %X\n", - vif->mac->macid, vif->vifid, - (frame_type & IEEE80211_FCTL_STYPE) >> 4); - return; - } + reg = new_mask & mask; - if (qtnf_cmd_send_register_mgmt(vif, qlink_frame_type, reg)) { - pr_warn("VIF%u.%u: failed to %sregister mgmt frame type 0x%x\n", - vif->mac->macid, vif->vifid, reg ? "" : "un", - frame_type); - return; + if (qtnf_cmd_send_register_mgmt(vif, qlink_frame_type, reg)) + pr_warn("VIF%u.%u: failed to %sregister qlink frame type 0x%x\n", + vif->mac->macid, vif->vifid, reg ? "" : "un", + qlink_frame_type); } vif->mgmt_frames_bitmask = new_mask; - pr_debug("VIF%u.%u: %sregistered mgmt frame type 0x%x\n", - vif->mac->macid, vif->vifid, reg ? "" : "un", frame_type); } static int @@ -1017,7 +1019,8 @@ static struct cfg80211_ops qtn_cfg80211_ops = { .change_beacon = qtnf_change_beacon, .stop_ap = qtnf_stop_ap, .set_wiphy_params = qtnf_set_wiphy_params, - .mgmt_frame_register = qtnf_mgmt_frame_register, + .update_mgmt_frame_registrations = + qtnf_update_mgmt_frame_registrations, .mgmt_tx = qtnf_mgmt_tx, .change_station = qtnf_change_station, .del_station = qtnf_del_station, diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index 4d22a54c034f..2dda4c5d7427 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -362,7 +362,7 @@ struct qlink_cmd { struct qlink_cmd_init_fw { struct qlink_cmd chdr; __le32 qlink_proto_ver; - u8 var_info[0]; + u8 var_info[]; } __packed; /** @@ -434,7 +434,7 @@ struct qlink_cmd_frame_tx { __le32 cookie; __le16 freq; __le16 flags; - u8 frame_data[0]; + u8 frame_data[]; } __packed; /** @@ -466,7 +466,7 @@ struct qlink_cmd_add_key { __le32 cipher; __le16 vlanid; u8 rsvd[2]; - u8 key_data[0]; + u8 key_data[]; } __packed; /** @@ -578,7 +578,7 @@ struct qlink_cmd_connect { u8 mfp; u8 pbss; u8 rsvd[2]; - u8 payload[0]; + u8 payload[]; } __packed; /** @@ -592,7 +592,7 @@ struct qlink_cmd_external_auth { struct qlink_cmd chdr; u8 peer[ETH_ALEN]; __le16 status; - u8 payload[0]; + u8 payload[]; } __packed; /** @@ -698,7 +698,7 @@ struct qlink_cmd_reg_notify { u8 dfs_region; u8 slave_radar; u8 dfs_offload; - u8 info[0]; + u8 info[]; } __packed; /** @@ -773,7 +773,7 @@ struct qlink_cmd_start_ap { struct qlink_sr_params sr_params; u8 twt_responder; u8 rsvd[3]; - u8 info[0]; + u8 info[]; } __packed; /** @@ -807,7 +807,7 @@ struct qlink_mac_address { struct qlink_acl_data { __le32 policy; __le32 num_entries; - struct qlink_mac_address mac_addrs[0]; + struct qlink_mac_address mac_addrs[]; } __packed; /** @@ -882,7 +882,7 @@ enum qlink_wowlan_trigger { struct qlink_cmd_wowlan_set { struct qlink_cmd chdr; __le32 triggers; - u8 data[0]; + u8 data[]; } __packed; enum qlink_ndev_event_type { @@ -958,7 +958,7 @@ struct qlink_cmd_scan { u8 bssid[ETH_ALEN]; u8 scan_width; u8 rsvd[3]; - u8 var_info[0]; + u8 var_info[]; } __packed; /** @@ -972,7 +972,7 @@ struct qlink_cmd_update_owe { struct qlink_cmd chdr; u8 peer[ETH_ALEN]; __le16 status; - u8 ies[0]; + u8 ies[]; } __packed; /* QLINK Command Responses messages related definitions @@ -1106,7 +1106,7 @@ struct qlink_resp_get_mac_info { u8 n_reg_rules; u8 dfs_region; u8 rsvd[3]; - u8 var_info[0]; + u8 var_info[]; } __packed; /** @@ -1131,7 +1131,7 @@ struct qlink_resp_get_hw_info { u8 mac_bitmap; u8 total_tx_chain; u8 total_rx_chain; - u8 info[0]; + u8 info[]; } __packed; /** @@ -1167,7 +1167,7 @@ struct qlink_resp_get_sta_info { struct qlink_resp rhdr; u8 sta_addr[ETH_ALEN]; u8 rsvd[2]; - u8 info[0]; + u8 info[]; } __packed; /** @@ -1184,7 +1184,7 @@ struct qlink_resp_band_info_get { u8 num_chans; u8 num_bitrates; u8 rsvd[1]; - u8 info[0]; + u8 info[]; } __packed; /** @@ -1196,7 +1196,7 @@ struct qlink_resp_band_info_get { struct qlink_resp_get_chan_stats { struct qlink_resp rhdr; __le32 chan_freq; - u8 info[0]; + u8 info[]; } __packed; /** @@ -1270,7 +1270,7 @@ struct qlink_event_sta_assoc { struct qlink_event ehdr; u8 sta_addr[ETH_ALEN]; __le16 frame_control; - u8 ies[0]; + u8 ies[]; } __packed; /** @@ -1297,7 +1297,7 @@ struct qlink_event_bss_join { struct qlink_chandef chan; u8 bssid[ETH_ALEN]; __le16 status; - u8 ies[0]; + u8 ies[]; } __packed; /** @@ -1339,7 +1339,7 @@ struct qlink_event_rxmgmt { __le32 flags; s8 sig_dbm; u8 rsvd[3]; - u8 frame_data[0]; + u8 frame_data[]; } __packed; /** @@ -1367,7 +1367,7 @@ struct qlink_event_scan_result { u8 ssid[IEEE80211_MAX_SSID_LEN]; u8 bssid[ETH_ALEN]; u8 rsvd[2]; - u8 payload[0]; + u8 payload[]; } __packed; /** @@ -1456,7 +1456,7 @@ struct qlink_event_update_owe { struct qlink_event ehdr; u8 peer[ETH_ALEN]; u8 rsvd[2]; - u8 ies[0]; + u8 ies[]; } __packed; /* QLINK TLVs (Type-Length Values) definitions @@ -1512,7 +1512,7 @@ enum qlink_tlv_id { struct qlink_tlv_hdr { __le16 type; __le16 len; - u8 val[0]; + u8 val[]; } __packed; struct qlink_iface_limit { @@ -1524,7 +1524,7 @@ struct qlink_iface_limit_record { __le16 max_interfaces; u8 num_different_channels; u8 n_limits; - struct qlink_iface_limit limits[0]; + struct qlink_iface_limit limits[]; } __packed; #define QLINK_RSSI_OFFSET 120 @@ -1647,7 +1647,7 @@ struct qlink_tlv_ie_set { u8 type; u8 flags; u8 rsvd[2]; - u8 ie_data[0]; + u8 ie_data[]; } __packed; /** @@ -1660,7 +1660,7 @@ struct qlink_tlv_ext_ie { struct qlink_tlv_hdr hdr; u8 eid_ext; u8 rsvd[3]; - u8 ie_data[0]; + u8 ie_data[]; } __packed; #define IEEE80211_HE_PPE_THRES_MAX_LEN 25 @@ -1681,7 +1681,7 @@ struct qlink_tlv_iftype_data { struct qlink_tlv_hdr hdr; u8 n_iftype_data; u8 rsvd[3]; - struct qlink_sband_iftype_data iftype_data[0]; + struct qlink_sband_iftype_data iftype_data[]; } __packed; /** @@ -1867,7 +1867,7 @@ struct qlink_random_mac_addr { struct qlink_wowlan_capab_data { __le16 version; __le16 len; - u8 data[0]; + u8 data[]; } __packed; /** diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 6beac1f74e7c..a779fe771a55 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -9971,9 +9971,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) if (!rt2x00_is_usb(rt2x00dev)) ieee80211_hw_set(rt2x00dev->hw, HOST_BROADCAST_PS_BUFFERING); - /* Set MFP if HW crypto is disabled. */ - if (rt2800_hwcrypt_disabled(rt2x00dev)) - ieee80211_hw_set(rt2x00dev->hw, MFP_CAPABLE); + ieee80211_hw_set(rt2x00dev->hw, MFP_CAPABLE); SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c index 32efbc8e9f92..2f68a31072ae 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c @@ -468,7 +468,8 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return 0; - if (!rt2x00_has_cap_hw_crypto(rt2x00dev)) + /* The hardware can't do MFP */ + if (!rt2x00_has_cap_hw_crypto(rt2x00dev) || (sta && sta->mfp)) return -EOPNOTSUPP; /* diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c index b2616d61b66d..585784258c66 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c @@ -606,10 +606,6 @@ static const u8 rtl8225z2_tx_power_cck[] = { 0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03 }; -static const u8 rtl8225z2_tx_power_ofdm[] = { - 0x42, 0x00, 0x40, 0x00, 0x40 -}; - static const u8 rtl8225z2_tx_gain_cck_ofdm[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c index 6dba576aa81e..bb291b951f4d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c @@ -2866,14 +2866,12 @@ void rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery) } } - for (i = 0; i < 4; i++) { - reg_e94 = result[i][0]; - reg_e9c = result[i][1]; - reg_ea4 = result[i][2]; - reg_eb4 = result[i][4]; - reg_ebc = result[i][5]; - reg_ec4 = result[i][6]; - } + reg_e94 = result[3][0]; + reg_e9c = result[3][1]; + reg_ea4 = result[3][2]; + reg_eb4 = result[3][4]; + reg_ebc = result[3][5]; + reg_ec4 = result[3][6]; if (final_candidate != 0xff) { reg_e94 = result[final_candidate][0]; diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 348b0072cdd6..c66c6dc00378 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -881,10 +881,8 @@ static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw, WARN_ON(NULL == skb); _urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!_urb) { - kfree_skb(skb); + if (!_urb) return NULL; - } _rtl_install_trx_info(rtlusb, skb, ep_num); usb_fill_bulk_urb(_urb, rtlusb->udev, usb_sndbulkpipe(rtlusb->udev, ep_num), skb->data, skb->len, _rtl_tx_complete, skb); @@ -898,7 +896,6 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); u32 ep_num; struct urb *_urb = NULL; - struct sk_buff *_skb = NULL; WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl); if (unlikely(IS_USB_STOP(rtlusb))) { @@ -907,8 +904,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, return; } ep_num = rtlusb->ep_map.ep_mapping[qnum]; - _skb = skb; - _urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num); + _urb = _rtl_usb_tx_urb_setup(hw, skb, ep_num); if (unlikely(!_urb)) { pr_err("Can't allocate urb. Drop skb!\n"); kfree_skb(skb); diff --git a/drivers/net/wireless/realtek/rtw88/Kconfig b/drivers/net/wireless/realtek/rtw88/Kconfig index 33bd7ed797ff..ca894c4f96ac 100644 --- a/drivers/net/wireless/realtek/rtw88/Kconfig +++ b/drivers/net/wireless/realtek/rtw88/Kconfig @@ -16,26 +16,48 @@ config RTW88_CORE config RTW88_PCI tristate +config RTW88_8822B + tristate + +config RTW88_8822C + tristate + +config RTW88_8723D + tristate + config RTW88_8822BE - bool "Realtek 8822BE PCI wireless network adapter" + tristate "Realtek 8822BE PCI wireless network adapter" depends on PCI select RTW88_CORE select RTW88_PCI + select RTW88_8822B help Select this option will enable support for 8822BE chipset 802.11ac PCIe wireless network adapter config RTW88_8822CE - bool "Realtek 8822CE PCI wireless network adapter" + tristate "Realtek 8822CE PCI wireless network adapter" depends on PCI select RTW88_CORE select RTW88_PCI + select RTW88_8822C help Select this option will enable support for 8822CE chipset 802.11ac PCIe wireless network adapter +config RTW88_8723DE + tristate "Realtek 8723DE PCI wireless network adapter" + depends on PCI + select RTW88_CORE + select RTW88_PCI + select RTW88_8723D + help + Select this option will enable support for 8723DE chipset + + 802.11n PCIe wireless network adapter + config RTW88_DEBUG bool "Realtek rtw88 debug support" depends on RTW88_CORE diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile index cac148d13cf1..f31e78a6f146 100644 --- a/drivers/net/wireless/realtek/rtw88/Makefile +++ b/drivers/net/wireless/realtek/rtw88/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause -obj-$(CONFIG_RTW88_CORE) += rtw88.o -rtw88-y += main.o \ +obj-$(CONFIG_RTW88_CORE) += rtw88_core.o +rtw88_core-y += main.o \ mac80211.o \ util.o \ debug.o \ @@ -18,8 +18,24 @@ rtw88-y += main.o \ wow.o \ regd.o -rtw88-$(CONFIG_RTW88_8822BE) += rtw8822b.o rtw8822b_table.o -rtw88-$(CONFIG_RTW88_8822CE) += rtw8822c.o rtw8822c_table.o -obj-$(CONFIG_RTW88_PCI) += rtwpci.o -rtwpci-objs := pci.o +obj-$(CONFIG_RTW88_8822B) += rtw88_8822b.o +rtw88_8822b-objs := rtw8822b.o rtw8822b_table.o + +obj-$(CONFIG_RTW88_8822BE) += rtw88_8822be.o +rtw88_8822be-objs := rtw8822be.o + +obj-$(CONFIG_RTW88_8822C) += rtw88_8822c.o +rtw88_8822c-objs := rtw8822c.o rtw8822c_table.o + +obj-$(CONFIG_RTW88_8822CE) += rtw88_8822ce.o +rtw88_8822ce-objs := rtw8822ce.o + +obj-$(CONFIG_RTW88_8723D) += rtw88_8723d.o +rtw88_8723d-objs := rtw8723d.o rtw8723d_table.o + +obj-$(CONFIG_RTW88_8723DE) += rtw88_8723de.o +rtw88_8723de-objs := rtw8723de.o + +obj-$(CONFIG_RTW88_PCI) += rtw88_pci.o +rtw88_pci-objs := pci.o diff --git a/drivers/net/wireless/realtek/rtw88/bf.c b/drivers/net/wireless/realtek/rtw88/bf.c index a5912da327e2..8a070d5d9174 100644 --- a/drivers/net/wireless/realtek/rtw88/bf.c +++ b/drivers/net/wireless/realtek/rtw88/bf.c @@ -220,6 +220,7 @@ void rtw_bf_enable_bfee_su(struct rtw_dev *rtwdev, struct rtw_vif *vif, /* ndp rx standby timer */ rtw_write8(rtwdev, REG_SND_PTCL_CTRL + 3, RTW_NDP_RX_STANDBY_TIME); } +EXPORT_SYMBOL(rtw_bf_enable_bfee_su); /* nc index: 1 2T2R 0 1T1R * nr index: 1 use Nsts 0 use reg setting @@ -263,6 +264,7 @@ void rtw_bf_enable_bfee_mu(struct rtw_dev *rtwdev, struct rtw_vif *vif, /* accept NDPA and BF report poll */ rtw_write16_set(rtwdev, REG_RXFLTMAP1, BIT_RXFLTMAP1_BF); } +EXPORT_SYMBOL(rtw_bf_enable_bfee_mu); void rtw_bf_remove_bfee_su(struct rtw_dev *rtwdev, struct rtw_bfee *bfee) @@ -288,6 +290,7 @@ void rtw_bf_remove_bfee_su(struct rtw_dev *rtwdev, clear_bit(bfee->su_reg_index, bfinfo->bfer_su_reg_maping); bfee->su_reg_index = 0xFF; } +EXPORT_SYMBOL(rtw_bf_remove_bfee_su); void rtw_bf_remove_bfee_mu(struct rtw_dev *rtwdev, struct rtw_bfee *bfee) @@ -301,6 +304,7 @@ void rtw_bf_remove_bfee_mu(struct rtw_dev *rtwdev, if (bfinfo->bfer_su_cnt == 0 && bfinfo->bfer_mu_cnt == 0) rtw_bf_del_sounding(rtwdev); } +EXPORT_SYMBOL(rtw_bf_remove_bfee_mu); void rtw_bf_set_gid_table(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, struct ieee80211_bss_conf *conf) @@ -329,6 +333,7 @@ void rtw_bf_set_gid_table(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, rtw_bf_cfg_mu_bfee(rtwdev, ¶m); } +EXPORT_SYMBOL(rtw_bf_set_gid_table); void rtw_bf_phy_init(struct rtw_dev *rtwdev) { @@ -365,6 +370,7 @@ void rtw_bf_phy_init(struct rtw_dev *rtwdev) rtw_write32_mask(rtwdev, REG_BBPSF_CTRL, BIT_MASK_CSI_RATE, DESC_RATE6M); } +EXPORT_SYMBOL(rtw_bf_phy_init); void rtw_bf_cfg_csi_rate(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate, u8 fixrate_en, u8 *new_rate) @@ -395,3 +401,4 @@ void rtw_bf_cfg_csi_rate(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate, *new_rate = DESC_RATE24M; } } +EXPORT_SYMBOL(rtw_bf_cfg_csi_rate); diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c index 567372fb4e12..cbf3d503df1c 100644 --- a/drivers/net/wireless/realtek/rtw88/coex.c +++ b/drivers/net/wireless/realtek/rtw88/coex.c @@ -283,6 +283,7 @@ void rtw_coex_write_scbd(struct rtw_dev *rtwdev, u16 bitpos, bool set) rtw_write16(rtwdev, REG_WIFI_BT_INFO, val); } } +EXPORT_SYMBOL(rtw_coex_write_scbd); static u16 rtw_coex_read_scbd(struct rtw_dev *rtwdev) { @@ -732,6 +733,7 @@ u32 rtw_coex_read_indirect_reg(struct rtw_dev *rtwdev, u16 addr) return val; } +EXPORT_SYMBOL(rtw_coex_read_indirect_reg); void rtw_coex_write_indirect_reg(struct rtw_dev *rtwdev, u16 addr, u32 mask, u32 val) @@ -745,13 +747,22 @@ void rtw_coex_write_indirect_reg(struct rtw_dev *rtwdev, u16 addr, if (!ltecoex_reg_write(rtwdev, addr, tmp)) rtw_err(rtwdev, "failed to write indirect register\n"); } +EXPORT_SYMBOL(rtw_coex_write_indirect_reg); static void rtw_coex_coex_ctrl_owner(struct rtw_dev *rtwdev, bool wifi_control) { - if (wifi_control) + struct rtw_chip_info *chip = rtwdev->chip; + const struct rtw_hw_reg *btg_reg = chip->btg_reg; + + if (wifi_control) { rtw_write32_set(rtwdev, REG_SYS_SDIO_CTRL, BIT_LTE_MUX_CTRL_PATH); - else + if (btg_reg) + rtw_write8_set(rtwdev, btg_reg->addr, btg_reg->mask); + } else { rtw_write32_clr(rtwdev, REG_SYS_SDIO_CTRL, BIT_LTE_MUX_CTRL_PATH); + if (btg_reg) + rtw_write8_clr(rtwdev, btg_reg->addr, btg_reg->mask); + } } static void rtw_coex_set_gnt_bt(struct rtw_dev *rtwdev, u8 state) @@ -1343,12 +1354,15 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev) tdma_case = 108; else tdma_case = 109; + } else if (coex_stat->wl_gl_busy) { + table_case = 114; + tdma_case = 121; } else if (coex_stat->wl_connected) { - table_case = 101; - tdma_case = 110; - } else { table_case = 100; tdma_case = 100; + } else { + table_case = 101; + tdma_case = 100; } } diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c index b4964306de61..09f04feb8fe1 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.c +++ b/drivers/net/wireless/realtek/rtw88/debug.c @@ -531,8 +531,8 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v) u8 ch = hal->current_channel; u8 regd = rtwdev->regd.txpwr_regd; - seq_printf(m, "%-4s %-10s %-3s%6s %-4s %4s (%-4s %-4s)\n", - "path", "rate", "pwr", "", "base", "", "byr", "lmt"); + seq_printf(m, "%-4s %-10s %-3s%6s %-4s %4s (%-4s %-4s) %-4s\n", + "path", "rate", "pwr", "", "base", "", "byr", "lmt", "rem"); mutex_lock(&hal->tx_power_mutex); for (path = RF_PATH_A; path <= RF_PATH_B; path++) { @@ -554,13 +554,14 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v) seq_printf(m, "%4c ", path + 'A'); rtw_print_rate(m, rate); - seq_printf(m, " %3u(0x%02x) %4u %4d (%4d %4d)\n", + seq_printf(m, " %3u(0x%02x) %4u %4d (%4d %4d) %4d\n", hal->tx_pwr_tbl[path][rate], hal->tx_pwr_tbl[path][rate], pwr_param.pwr_base, min_t(s8, pwr_param.pwr_offset, pwr_param.pwr_limit), - pwr_param.pwr_offset, pwr_param.pwr_limit); + pwr_param.pwr_offset, pwr_param.pwr_limit, + pwr_param.pwr_remnant); } } diff --git a/drivers/net/wireless/realtek/rtw88/efuse.c b/drivers/net/wireless/realtek/rtw88/efuse.c index 13d1c58d6de5..c266c84ef233 100644 --- a/drivers/net/wireless/realtek/rtw88/efuse.c +++ b/drivers/net/wireless/realtek/rtw88/efuse.c @@ -141,6 +141,7 @@ int rtw_read8_physical_efuse(struct rtw_dev *rtwdev, u16 addr, u8 *data) return 0; } +EXPORT_SYMBOL(rtw_read8_physical_efuse); int rtw_parse_efuse_map(struct rtw_dev *rtwdev) { diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index 2c28afe525c7..6478fd7a78f6 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -330,6 +330,7 @@ void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para) rtw_fw_send_h2c_packet(rtwdev, h2c_pkt); } +EXPORT_SYMBOL(rtw_fw_do_iqk); void rtw_fw_query_bt_info(struct rtw_dev *rtwdev) { diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c index e8ffeb338584..19b9b7ab016b 100644 --- a/drivers/net/wireless/realtek/rtw88/mac.c +++ b/drivers/net/wireless/realtek/rtw88/mac.c @@ -56,6 +56,7 @@ void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw, value8 |= BIT_CHECK_CCK_EN; rtw_write8(rtwdev, REG_CCK_CHECK, value8); } +EXPORT_SYMBOL(rtw_set_channel_mac); static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev) { @@ -919,31 +920,24 @@ static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues) static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev, u32 prio_queue, bool drop) { - u32 addr; + struct rtw_chip_info *chip = rtwdev->chip; + const struct rtw_prioq_addr *addr; + bool wsize; u16 avail_page, rsvd_page; int i; - switch (prio_queue) { - case RTW_DMA_MAPPING_EXTRA: - addr = REG_FIFOPAGE_INFO_4; - break; - case RTW_DMA_MAPPING_LOW: - addr = REG_FIFOPAGE_INFO_2; - break; - case RTW_DMA_MAPPING_NORMAL: - addr = REG_FIFOPAGE_INFO_3; - break; - case RTW_DMA_MAPPING_HIGH: - addr = REG_FIFOPAGE_INFO_1; - break; - default: + if (prio_queue >= RTW_DMA_MAPPING_MAX) return; - } + + addr = &chip->prioq_addrs->prio[prio_queue]; + wsize = chip->prioq_addrs->wsize; /* check if all of the reserved pages are available for 100 msecs */ for (i = 0; i < 5; i++) { - rsvd_page = rtw_read16(rtwdev, addr); - avail_page = rtw_read16(rtwdev, addr + 2); + rsvd_page = wsize ? rtw_read16(rtwdev, addr->rsvd) : + rtw_read8(rtwdev, addr->rsvd); + avail_page = wsize ? rtw_read16(rtwdev, addr->avail) : + rtw_read8(rtwdev, addr->avail); if (rsvd_page == avail_page) return; diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c index 98d2ac22f6f6..c412bc54efde 100644 --- a/drivers/net/wireless/realtek/rtw88/mac80211.c +++ b/drivers/net/wireless/realtek/rtw88/mac80211.c @@ -341,13 +341,11 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw, rtw_leave_lps_deep(rtwdev); if (changed & BSS_CHANGED_ASSOC) { - struct rtw_chip_info *chip = rtwdev->chip; enum rtw_net_type net_type; if (conf->assoc) { rtw_coex_connect_notify(rtwdev, COEX_ASSOCIATE_FINISH); net_type = RTW_NET_MGD_LINKED; - chip->ops->phy_calibration(rtwdev); rtwvif->aid = conf->aid; rtw_fw_download_rsvd_page(rtwdev); @@ -663,6 +661,7 @@ static void rtw_ops_mgd_prepare_tx(struct ieee80211_hw *hw, mutex_lock(&rtwdev->mutex); rtw_leave_lps_deep(rtwdev); rtw_coex_connect_notify(rtwdev, COEX_ASSOCIATE_START); + rtw_chip_prepare_tx(rtwdev); mutex_unlock(&rtwdev->mutex); } diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index f88a7d2370aa..0eefafc51c62 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -408,6 +408,23 @@ void rtw_set_channel(struct rtw_dev *rtwdev) } rtw_phy_set_tx_power_level(rtwdev, center_chan); + + /* if the channel isn't set for scanning, we will do RF calibration + * in ieee80211_ops::mgd_prepare_tx(). Performing the calibration + * during scanning on each channel takes too long. + */ + if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) + rtwdev->need_rfk = true; +} + +void rtw_chip_prepare_tx(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + if (rtwdev->need_rfk) { + rtwdev->need_rfk = false; + chip->ops->phy_calibration(rtwdev); + } } static void rtw_vif_write_addr(struct rtw_dev *rtwdev, u32 start, u8 *addr) diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index e0365a70c6f7..0841f5fa4bf2 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -11,6 +11,7 @@ #include <linux/average.h> #include <linux/bitops.h> #include <linux/bitfield.h> +#include <linux/iopoll.h> #include <linux/interrupt.h> #include "util.h" @@ -39,9 +40,6 @@ extern bool rtw_bf_support; extern unsigned int rtw_fw_lps_deep_mode; extern unsigned int rtw_debug_mask; extern const struct ieee80211_ops rtw_ops; -extern struct rtw_chip_info rtw8822b_hw_spec; -extern struct rtw_chip_info rtw8822c_hw_spec; -extern struct rtw_chip_info rtw8723d_hw_spec; #define RTW_MAX_CHANNEL_NUM_2G 14 #define RTW_MAX_CHANNEL_NUM_5G 49 @@ -518,6 +516,12 @@ struct rtw_hw_reg { u32 mask; }; +struct rtw_ltecoex_addr { + u32 ctrl; + u32 wdata; + u32 rdata; +}; + struct rtw_reg_domain { u32 addr; u32 mask; @@ -794,6 +798,7 @@ struct rtw_regulatory { struct rtw_chip_ops { int (*mac_init)(struct rtw_dev *rtwdev); + void (*shutdown)(struct rtw_dev *rtwdev); int (*read_efuse)(struct rtw_dev *rtwdev, u8 *map); void (*phy_set_param)(struct rtw_dev *rtwdev); void (*set_channel)(struct rtw_dev *rtwdev, u8 channel, @@ -943,6 +948,16 @@ struct rtw_rqpn { enum rtw_dma_mapping dma_map_hi; }; +struct rtw_prioq_addr { + u32 rsvd; + u32 avail; +}; + +struct rtw_prioq_addrs { + struct rtw_prioq_addr prio[RTW_DMA_MAPPING_MAX]; + bool wsize; +}; + struct rtw_page_table { u16 hq_num; u16 nq_num; @@ -1055,6 +1070,8 @@ struct rtw_pwr_track_tbl { const u8 *pwrtrk_2g_cckb_p; const u8 *pwrtrk_2g_ccka_n; const u8 *pwrtrk_2g_ccka_p; + const s8 *pwrtrk_xtal_n; + const s8 *pwrtrk_xtal_p; }; enum rtw_wlan_cpu { @@ -1097,6 +1114,7 @@ struct rtw_chip_info { const struct rtw_pwr_seq_cmd **pwr_on_seq; const struct rtw_pwr_seq_cmd **pwr_off_seq; const struct rtw_rqpn *rqpn_table; + const struct rtw_prioq_addrs *prioq_addrs; const struct rtw_page_table *page_table; const struct rtw_intf_phy_para_table *intf_table; @@ -1106,6 +1124,7 @@ struct rtw_chip_info { u32 rf_sipi_addr[2]; const struct rtw_rf_sipi_addr *rf_sipi_read_addr; u8 fix_rf_phy_num; + const struct rtw_ltecoex_addr *ltecoex_addr; const struct rtw_table *mac_tbl; const struct rtw_table *agc_tbl; @@ -1155,6 +1174,7 @@ struct rtw_chip_info { const struct coex_rf_para *wl_rf_para_tx; const struct coex_rf_para *wl_rf_para_rx; const struct coex_5g_afh_map *afh_5g; + const struct rtw_hw_reg *btg_reg; const struct rtw_reg_domain *coex_info_hw_regs; }; @@ -1399,6 +1419,16 @@ struct rtw_pkt_count { DECLARE_EWMA(evm, 10, 4); DECLARE_EWMA(snr, 10, 4); +struct rtw_iqk_info { + bool done; + struct { + u32 s1_x; + u32 s1_y; + u32 s0_x; + u32 s0_y; + } result; +}; + struct rtw_dm_info { u32 cck_fa_cnt; u32 ofdm_fa_cnt; @@ -1436,6 +1466,8 @@ struct rtw_dm_info { bool pwr_trk_triggered; bool pwr_trk_init_trigger; struct ewma_thermal avg_thermal[RTW_RF_PATH_MAX]; + s8 txagc_remnant_cck; + s8 txagc_remnant_ofdm; /* backup dack results for each path and I/Q */ u32 dack_adck[RTW_RF_PATH_MAX]; @@ -1458,6 +1490,8 @@ struct rtw_dm_info { struct rtw_pkt_count last_pkt_count; struct ewma_evm ewma_evm[RTW_EVM_NUM]; struct ewma_snr ewma_snr[RTW_SNR_NUM]; + + struct rtw_iqk_info iqk; }; struct rtw_efuse { @@ -1686,6 +1720,8 @@ struct rtw_dev { struct rtw_fw_state wow_fw; struct rtw_wow_param wow; + bool need_rfk; + /* hci related data, must be last */ u8 priv[] __aligned(sizeof(void *)); }; @@ -1759,6 +1795,7 @@ void rtw_restore_reg(struct rtw_dev *rtwdev, struct rtw_backup_info *bckp, u32 num); void rtw_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss); void rtw_set_channel(struct rtw_dev *rtwdev); +void rtw_chip_prepare_tx(struct rtw_dev *rtwdev); void rtw_vif_port_config(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, u32 config); void rtw_tx_report_purge_timer(struct timer_list *t); diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index a9752c34c9d8..8228db9a5fc8 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -1360,7 +1360,8 @@ static int __maybe_unused rtw_pci_resume(struct device *dev) return 0; } -static SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume); +SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume); +EXPORT_SYMBOL(rtw_pm_ops); static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev) { @@ -1473,8 +1474,8 @@ static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev) pci_free_irq_vectors(pdev); } -static int rtw_pci_probe(struct pci_dev *pdev, - const struct pci_device_id *id) +int rtw_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) { struct ieee80211_hw *hw; struct rtw_dev *rtwdev; @@ -1551,8 +1552,9 @@ err_release_hw: return ret; } +EXPORT_SYMBOL(rtw_pci_probe); -static void rtw_pci_remove(struct pci_dev *pdev) +void rtw_pci_remove(struct pci_dev *pdev) { struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct rtw_dev *rtwdev; @@ -1572,29 +1574,24 @@ static void rtw_pci_remove(struct pci_dev *pdev) rtw_core_deinit(rtwdev); ieee80211_free_hw(hw); } +EXPORT_SYMBOL(rtw_pci_remove); -static const struct pci_device_id rtw_pci_id_table[] = { -#ifdef CONFIG_RTW88_8822BE - { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB822, rtw8822b_hw_spec) }, -#endif -#ifdef CONFIG_RTW88_8822CE - { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC822, rtw8822c_hw_spec) }, -#endif -#ifdef CONFIG_RTW88_8723DE - { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xD723, rtw8723d_hw_spec) }, -#endif - {}, -}; -MODULE_DEVICE_TABLE(pci, rtw_pci_id_table); - -static struct pci_driver rtw_pci_driver = { - .name = "rtw_pci", - .id_table = rtw_pci_id_table, - .probe = rtw_pci_probe, - .remove = rtw_pci_remove, - .driver.pm = &rtw_pm_ops, -}; -module_pci_driver(rtw_pci_driver); +void rtw_pci_shutdown(struct pci_dev *pdev) +{ + struct ieee80211_hw *hw = pci_get_drvdata(pdev); + struct rtw_dev *rtwdev; + struct rtw_chip_info *chip; + + if (!hw) + return; + + rtwdev = hw->priv; + chip = rtwdev->chip; + + if (chip->ops->shutdown) + chip->ops->shutdown(rtwdev); +} +EXPORT_SYMBOL(rtw_pci_shutdown); MODULE_AUTHOR("Realtek Corporation"); MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver"); diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h index 3ac4fb328d31..024c2bc275cb 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.h +++ b/drivers/net/wireless/realtek/rtw88/pci.h @@ -5,10 +5,6 @@ #ifndef __RTK_PCI_H_ #define __RTK_PCI_H_ -#define RTK_PCI_DEVICE(vend, dev, hw_config) \ - PCI_DEVICE(vend, dev), \ - .driver_data = (kernel_ulong_t)&(hw_config), - #define RTK_DEFAULT_TX_DESC_NUM 128 #define RTK_BEQ_TX_DESC_NUM 256 diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c index 72a16eff9db3..8d93f3159746 100644 --- a/drivers/net/wireless/realtek/rtw88/phy.c +++ b/drivers/net/wireless/realtek/rtw88/phy.c @@ -82,6 +82,8 @@ u8 *rtw_rate_section[RTW_RATE_SECTION_MAX] = { rtw_ht_1s_rates, rtw_ht_2s_rates, rtw_vht_1s_rates, rtw_vht_2s_rates }; +EXPORT_SYMBOL(rtw_rate_section); + u8 rtw_rate_size[RTW_RATE_SECTION_MAX] = { ARRAY_SIZE(rtw_cck_rates), ARRAY_SIZE(rtw_ofdm_rates), @@ -90,6 +92,8 @@ u8 rtw_rate_size[RTW_RATE_SECTION_MAX] = { ARRAY_SIZE(rtw_vht_1s_rates), ARRAY_SIZE(rtw_vht_2s_rates) }; +EXPORT_SYMBOL(rtw_rate_size); + static const u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates); static const u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates); static const u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates); @@ -134,7 +138,10 @@ void rtw_phy_init(struct rtw_dev *rtwdev) mask = chip->dig[0].mask; dm_info->igi_history[0] = rtw_read32_mask(rtwdev, addr, mask); rtw_phy_cck_pd_init(rtwdev); + + dm_info->iqk.done = false; } +EXPORT_SYMBOL(rtw_phy_init); void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi) { @@ -674,6 +681,7 @@ u8 rtw_phy_rf_power_2_rssi(s8 *rf_power, u8 path_num) return rtw_phy_linear_2_db(sum); } +EXPORT_SYMBOL(rtw_phy_rf_power_2_rssi); u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, u32 addr, u32 mask) @@ -696,6 +704,7 @@ u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, return val; } +EXPORT_SYMBOL(rtw_phy_read_rf); u32 rtw_phy_read_rf_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, u32 addr, u32 mask) @@ -744,6 +753,7 @@ u32 rtw_phy_read_rf_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, return (val32 & mask) >> shift; } +EXPORT_SYMBOL(rtw_phy_read_rf_sipi); bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, u32 addr, u32 mask, u32 data) @@ -783,6 +793,7 @@ bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, return true; } +EXPORT_SYMBOL(rtw_phy_write_rf_reg_sipi); bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, u32 addr, u32 mask, u32 data) @@ -816,6 +827,7 @@ bool rtw_phy_write_rf_reg_mix(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, return rtw_phy_write_rf_reg_sipi(rtwdev, rf_path, addr, mask, data); } +EXPORT_SYMBOL(rtw_phy_write_rf_reg_mix); void rtw_phy_setup_phy_cond(struct rtw_dev *rtwdev, u32 pkg) { @@ -908,6 +920,7 @@ void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl) } } } +EXPORT_SYMBOL(rtw_parse_tbl_phy_cond); #define bcd_to_dec_pwr_by_rate(val, i) bcd2bin(val >> (i * 8)) @@ -1271,6 +1284,7 @@ void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl) p->data); } } +EXPORT_SYMBOL(rtw_parse_tbl_bb_pg); static const u8 rtw_channel_idx_5g[RTW_MAX_CHANNEL_NUM_5G] = { 36, 38, 40, 42, 44, 46, 48, /* Band 1 */ @@ -1415,18 +1429,21 @@ void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev, rtw_xref_txpwr_lmt(rtwdev); } +EXPORT_SYMBOL(rtw_parse_tbl_txpwr_lmt); void rtw_phy_cfg_mac(struct rtw_dev *rtwdev, const struct rtw_table *tbl, u32 addr, u32 data) { rtw_write8(rtwdev, addr, data); } +EXPORT_SYMBOL(rtw_phy_cfg_mac); void rtw_phy_cfg_agc(struct rtw_dev *rtwdev, const struct rtw_table *tbl, u32 addr, u32 data) { rtw_write32(rtwdev, addr, data); } +EXPORT_SYMBOL(rtw_phy_cfg_agc); void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl, u32 addr, u32 data) @@ -1446,6 +1463,7 @@ void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl, else rtw_write32(rtwdev, addr, data); } +EXPORT_SYMBOL(rtw_phy_cfg_bb); void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl, u32 addr, u32 data) @@ -1459,6 +1477,7 @@ void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl, udelay(1); } } +EXPORT_SYMBOL(rtw_phy_cfg_rf); static void rtw_load_rfk_table(struct rtw_dev *rtwdev) { @@ -1496,6 +1515,7 @@ void rtw_phy_load_tables(struct rtw_dev *rtwdev) rtw_load_table(rtwdev, tbl); } } +EXPORT_SYMBOL(rtw_phy_load_tables); static u8 rtw_get_channel_group(u8 channel) { @@ -1783,11 +1803,13 @@ void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw, u8 ch, u8 regd, struct rtw_power_params *pwr_param) { struct rtw_hal *hal = &rtwdev->hal; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; struct rtw_txpwr_idx *pwr_idx; u8 group, band; u8 *base = &pwr_param->pwr_base; s8 *offset = &pwr_param->pwr_offset; s8 *limit = &pwr_param->pwr_limit; + s8 *remnant = &pwr_param->pwr_remnant; pwr_idx = &rtwdev->efuse.txpwr_idx_table[path]; group = rtw_get_channel_group(ch); @@ -1809,6 +1831,8 @@ void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw, *limit = rtw_phy_get_tx_power_limit(rtwdev, band, bw, path, rate, ch, regd); + *remnant = (rate <= DESC_RATE11M ? dm_info->txagc_remnant_cck : + dm_info->txagc_remnant_ofdm); } u8 @@ -1828,13 +1852,14 @@ rtw_phy_get_tx_power_index(struct rtw_dev *rtwdev, u8 rf_path, u8 rate, if (rtwdev->chip->en_dis_dpd) offset += rtw_phy_get_dis_dpd_by_rate_diff(rtwdev, rate); - tx_power += offset; + tx_power += offset + pwr_param.pwr_remnant; if (tx_power > rtwdev->chip->max_power_index) tx_power = rtwdev->chip->max_power_index; return tx_power; } +EXPORT_SYMBOL(rtw_phy_get_tx_power_index); static void rtw_phy_set_tx_power_index_by_rs(struct rtw_dev *rtwdev, u8 ch, u8 path, u8 rs) @@ -1897,6 +1922,7 @@ void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel) chip->ops->set_tx_power_index(rtwdev); mutex_unlock(&hal->tx_power_mutex); } +EXPORT_SYMBOL(rtw_phy_set_tx_power_level); static void rtw_phy_tx_power_by_rate_config_by_path(struct rtw_hal *hal, u8 path, @@ -2054,6 +2080,7 @@ void rtw_phy_config_swing_table(struct rtw_dev *rtwdev, swing_table->n[RF_PATH_B] = tbl->pwrtrk_2gb_n; } } +EXPORT_SYMBOL(rtw_phy_config_swing_table); void rtw_phy_pwrtrack_avg(struct rtw_dev *rtwdev, u8 thermal, u8 path) { @@ -2063,6 +2090,7 @@ void rtw_phy_pwrtrack_avg(struct rtw_dev *rtwdev, u8 thermal, u8 path) dm_info->thermal_avg[path] = ewma_thermal_read(&dm_info->avg_thermal[path]); } +EXPORT_SYMBOL(rtw_phy_pwrtrack_avg); bool rtw_phy_pwrtrack_thermal_changed(struct rtw_dev *rtwdev, u8 thermal, u8 path) @@ -2075,6 +2103,7 @@ bool rtw_phy_pwrtrack_thermal_changed(struct rtw_dev *rtwdev, u8 thermal, return true; } +EXPORT_SYMBOL(rtw_phy_pwrtrack_thermal_changed); u8 rtw_phy_pwrtrack_get_delta(struct rtw_dev *rtwdev, u8 path) { @@ -2087,6 +2116,7 @@ u8 rtw_phy_pwrtrack_get_delta(struct rtw_dev *rtwdev, u8 path) return min_t(u8, therm_delta, RTW_PWR_TRK_TBL_SZ - 1); } +EXPORT_SYMBOL(rtw_phy_pwrtrack_get_delta); s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev, struct rtw_swing_table *swing_table, @@ -2120,6 +2150,7 @@ s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev, else return -delta_swing_table_idx_neg[delta]; } +EXPORT_SYMBOL(rtw_phy_pwrtrack_get_pwridx); bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev) { @@ -2133,3 +2164,4 @@ bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev) } return false; } +EXPORT_SYMBOL(rtw_phy_pwrtrack_need_iqk); diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h index 413bf7165cc0..b924ed07630a 100644 --- a/drivers/net/wireless/realtek/rtw88/phy.h +++ b/drivers/net/wireless/realtek/rtw88/phy.h @@ -141,6 +141,7 @@ struct rtw_power_params { u8 pwr_base; s8 pwr_offset; s8 pwr_limit; + s8 pwr_remnant; }; void diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h index 9fdfcdc5c5cf..5a3e9cc7c400 100644 --- a/drivers/net/wireless/realtek/rtw88/reg.h +++ b/drivers/net/wireless/realtek/rtw88/reg.h @@ -69,6 +69,7 @@ #define BIT_DPDT_SEL_EN BIT(23) #define REG_LEDCFG2 0x004E #define REG_PAD_CTRL1 0x0064 +#define BIT_BT_BTG_SEL BIT(31) #define BIT_PAPE_WLBT_SEL BIT(29) #define BIT_LNAON_WLBT_SEL BIT(28) #define BIT_BTGP_JTAG_EN BIT(24) @@ -82,6 +83,7 @@ #define BIT_DBG_GNT_WL_BT BIT(27) #define BIT_LTE_MUX_CTRL_PATH BIT(26) #define REG_HCI_OPT_CTRL 0x0074 +#define BIT_USB_SUS_DIS BIT(8) #define REG_AFE_CTRL_4 0x0078 #define BIT_CK320M_AFE_EN BIT(4) @@ -611,7 +613,10 @@ #define REG_IGN_GNTBT4 0x4160 +#define RF_MODE 0x00 #define RF_MODOPT 0x01 +#define RF_WLINT 0x01 +#define RF_WLSEL 0x02 #define RF_DTXLOK 0x08 #define RF_CFGCH 0x18 #define RF_RCK 0x1d @@ -619,9 +624,15 @@ #define RF_LUTWD1 0x3e #define RF_LUTWD0 0x3f #define RF_T_METER 0x42 +#define RF_BSPAD 0x54 +#define RF_GAINTX 0x56 +#define RF_TXATANK 0x64 +#define RF_TRXIQ 0x66 +#define RF_RXIQGEN 0x8d #define RF_XTALX2 0xb8 #define RF_MALSEL 0xbe #define RF_RCKD 0xde +#define RF_TXADBG 0xde #define RF_LUTDBG 0xdf #define RF_LUTWE2 0xee #define RF_LUTWE 0xef diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c index 92c742d1ce6d..4700195c8eef 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c @@ -2,6 +2,7 @@ /* Copyright(c) 2018-2019 Realtek Corporation */ +#include <linux/module.h> #include "main.h" #include "coex.h" #include "fw.h" @@ -64,6 +65,74 @@ static const struct rtw_hw_reg rtw8723d_txagc[] = { #define WLAN_LTR_CTRL1 0xCB004010 #define WLAN_LTR_CTRL2 0x01233425 +static void rtw8723d_lck(struct rtw_dev *rtwdev) +{ + u32 lc_cal; + u8 val_ctx, rf_val; + int ret; + + val_ctx = rtw_read8(rtwdev, REG_CTX); + if ((val_ctx & BIT_MASK_CTX_TYPE) != 0) + rtw_write8(rtwdev, REG_CTX, val_ctx & ~BIT_MASK_CTX_TYPE); + else + rtw_write8(rtwdev, REG_TXPAUSE, 0xFF); + lc_cal = rtw_read_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK); + + rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, lc_cal | BIT_LCK); + + ret = read_poll_timeout(rtw_read_rf, rf_val, rf_val != 0x1, + 10000, 1000000, false, + rtwdev, RF_PATH_A, RF_CFGCH, BIT_LCK); + if (ret) + rtw_warn(rtwdev, "failed to poll LCK status bit\n"); + + rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, lc_cal); + if ((val_ctx & BIT_MASK_CTX_TYPE) != 0) + rtw_write8(rtwdev, REG_CTX, val_ctx); + else + rtw_write8(rtwdev, REG_TXPAUSE, 0x00); +} + +static const u32 rtw8723d_ofdm_swing_table[] = { + 0x0b40002d, 0x0c000030, 0x0cc00033, 0x0d800036, 0x0e400039, 0x0f00003c, + 0x10000040, 0x11000044, 0x12000048, 0x1300004c, 0x14400051, 0x15800056, + 0x16c0005b, 0x18000060, 0x19800066, 0x1b00006c, 0x1c800072, 0x1e400079, + 0x20000080, 0x22000088, 0x24000090, 0x26000098, 0x288000a2, 0x2ac000ab, + 0x2d4000b5, 0x300000c0, 0x32c000cb, 0x35c000d7, 0x390000e4, 0x3c8000f2, + 0x40000100, 0x43c0010f, 0x47c0011f, 0x4c000130, 0x50800142, 0x55400155, + 0x5a400169, 0x5fc0017f, 0x65400195, 0x6b8001ae, 0x71c001c7, 0x788001e2, + 0x7f8001fe, +}; + +static const u32 rtw8723d_cck_swing_table[] = { + 0x0CD, 0x0D9, 0x0E6, 0x0F3, 0x102, 0x111, 0x121, 0x132, 0x144, 0x158, + 0x16C, 0x182, 0x198, 0x1B1, 0x1CA, 0x1E5, 0x202, 0x221, 0x241, 0x263, + 0x287, 0x2AE, 0x2D6, 0x301, 0x32F, 0x35F, 0x392, 0x3C9, 0x402, 0x43F, + 0x47F, 0x4C3, 0x50C, 0x558, 0x5A9, 0x5FF, 0x65A, 0x6BA, 0x720, 0x78C, + 0x7FF, +}; + +#define RTW_OFDM_SWING_TABLE_SIZE ARRAY_SIZE(rtw8723d_ofdm_swing_table) +#define RTW_CCK_SWING_TABLE_SIZE ARRAY_SIZE(rtw8723d_cck_swing_table) + +static void rtw8723d_pwrtrack_init(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 path; + + dm_info->default_ofdm_index = RTW_DEF_OFDM_SWING_INDEX; + + for (path = RF_PATH_A; path < rtwdev->hal.rf_path_num; path++) { + ewma_thermal_init(&dm_info->avg_thermal[path]); + dm_info->delta_power_index[path] = 0; + } + dm_info->pwr_trk_triggered = false; + dm_info->pwr_trk_init_trigger = true; + dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k; + dm_info->txagc_remnant_cck = 0; + dm_info->txagc_remnant_ofdm = 0; +} + static void rtw8723d_phy_set_param(struct rtw_dev *rtwdev) { u8 xtal_cap; @@ -125,8 +194,13 @@ static void rtw8723d_phy_set_param(struct rtw_dev *rtwdev) rtw_phy_init(rtwdev); rtw_write16_set(rtwdev, REG_TXDMA_OFFSET_CHK, BIT_DROP_DATA_EN); + + rtw8723d_lck(rtwdev); + rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x50); rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x20); + + rtw8723d_pwrtrack_init(rtwdev); } static void rtw8723de_efuse_parsing(struct rtw_efuse *efuse, @@ -476,6 +550,11 @@ static int rtw8723d_mac_init(struct rtw_dev *rtwdev) return 0; } +static void rtw8723d_shutdown(struct rtw_dev *rtwdev) +{ + rtw_write16_set(rtwdev, REG_HCI_OPT_CTRL, BIT_USB_SUS_DIS); +} + static void rtw8723d_cfg_ldo25(struct rtw_dev *rtwdev, bool enable) { u8 ldo_pwr; @@ -483,7 +562,7 @@ static void rtw8723d_cfg_ldo25(struct rtw_dev *rtwdev, bool enable) ldo_pwr = rtw_read8(rtwdev, REG_LDO_EFUSE_CTRL + 3); if (enable) { ldo_pwr &= ~BIT_MASK_LDO25_VOLTAGE; - ldo_pwr = (BIT_LDO25_VOLTAGE_V25 << 4) | BIT_LDO25_EN; + ldo_pwr |= (BIT_LDO25_VOLTAGE_V25 << 4) | BIT_LDO25_EN; } else { ldo_pwr &= ~BIT_LDO25_EN; } @@ -603,12 +682,1247 @@ static void rtw8723d_false_alarm_statistics(struct rtw_dev *rtwdev) rtw_write32_mask(rtwdev, REG_PAGE_F_RST_11N, BIT_MASK_F_RST_ALL, 0); } +static const u32 iqk_adda_regs[] = { + 0x85c, 0xe6c, 0xe70, 0xe74, 0xe78, 0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c, + 0xed0, 0xed4, 0xed8, 0xedc, 0xee0, 0xeec +}; + +static const u32 iqk_mac8_regs[] = {0x522, 0x550, 0x551}; +static const u32 iqk_mac32_regs[] = {0x40}; + +static const u32 iqk_bb_regs[] = { + 0xc04, 0xc08, 0x874, 0xb68, 0xb6c, 0x870, 0x860, 0x864, 0xa04 +}; + +#define IQK_ADDA_REG_NUM ARRAY_SIZE(iqk_adda_regs) +#define IQK_MAC8_REG_NUM ARRAY_SIZE(iqk_mac8_regs) +#define IQK_MAC32_REG_NUM ARRAY_SIZE(iqk_mac32_regs) +#define IQK_BB_REG_NUM ARRAY_SIZE(iqk_bb_regs) + +struct iqk_backup_regs { + u32 adda[IQK_ADDA_REG_NUM]; + u8 mac8[IQK_MAC8_REG_NUM]; + u32 mac32[IQK_MAC32_REG_NUM]; + u32 bb[IQK_BB_REG_NUM]; + + u32 lte_path; + u32 lte_gnt; + + u32 bb_sel_btg; + u8 btg_sel; + + u8 igia; + u8 igib; +}; + +static void rtw8723d_iqk_backup_regs(struct rtw_dev *rtwdev, + struct iqk_backup_regs *backup) +{ + int i; + + for (i = 0; i < IQK_ADDA_REG_NUM; i++) + backup->adda[i] = rtw_read32(rtwdev, iqk_adda_regs[i]); + + for (i = 0; i < IQK_MAC8_REG_NUM; i++) + backup->mac8[i] = rtw_read8(rtwdev, iqk_mac8_regs[i]); + for (i = 0; i < IQK_MAC32_REG_NUM; i++) + backup->mac32[i] = rtw_read32(rtwdev, iqk_mac32_regs[i]); + + for (i = 0; i < IQK_BB_REG_NUM; i++) + backup->bb[i] = rtw_read32(rtwdev, iqk_bb_regs[i]); + + backup->igia = rtw_read32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0); + backup->igib = rtw_read32_mask(rtwdev, REG_OFDM0_XBAGC1, MASKBYTE0); + + backup->bb_sel_btg = rtw_read32(rtwdev, REG_BB_SEL_BTG); +} + +static void rtw8723d_iqk_restore_regs(struct rtw_dev *rtwdev, + const struct iqk_backup_regs *backup) +{ + int i; + + for (i = 0; i < IQK_ADDA_REG_NUM; i++) + rtw_write32(rtwdev, iqk_adda_regs[i], backup->adda[i]); + + for (i = 0; i < IQK_MAC8_REG_NUM; i++) + rtw_write8(rtwdev, iqk_mac8_regs[i], backup->mac8[i]); + for (i = 0; i < IQK_MAC32_REG_NUM; i++) + rtw_write32(rtwdev, iqk_mac32_regs[i], backup->mac32[i]); + + for (i = 0; i < IQK_BB_REG_NUM; i++) + rtw_write32(rtwdev, iqk_bb_regs[i], backup->bb[i]); + + rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x50); + rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, backup->igia); + + rtw_write32_mask(rtwdev, REG_OFDM0_XBAGC1, MASKBYTE0, 0x50); + rtw_write32_mask(rtwdev, REG_OFDM0_XBAGC1, MASKBYTE0, backup->igib); + + rtw_write32(rtwdev, REG_TXIQK_TONE_A_11N, 0x01008c00); + rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x01008c00); +} + +static void rtw8723d_iqk_backup_path_ctrl(struct rtw_dev *rtwdev, + struct iqk_backup_regs *backup) +{ + backup->btg_sel = rtw_read8(rtwdev, REG_BTG_SEL); + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] original 0x67 = 0x%x\n", + backup->btg_sel); +} + +static void rtw8723d_iqk_config_path_ctrl(struct rtw_dev *rtwdev) +{ + rtw_write32_mask(rtwdev, REG_PAD_CTRL1, BIT_BT_BTG_SEL, 0x1); + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] set 0x67 = 0x%x\n", + rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3)); +} + +static void rtw8723d_iqk_restore_path_ctrl(struct rtw_dev *rtwdev, + const struct iqk_backup_regs *backup) +{ + rtw_write8(rtwdev, REG_BTG_SEL, backup->btg_sel); + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] restore 0x67 = 0x%x\n", + rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3)); +} + +static void rtw8723d_iqk_backup_lte_path_gnt(struct rtw_dev *rtwdev, + struct iqk_backup_regs *backup) +{ + backup->lte_path = rtw_read32(rtwdev, REG_LTECOEX_PATH_CONTROL); + rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0x800f0038); + mdelay(1); + backup->lte_gnt = rtw_read32(rtwdev, REG_LTECOEX_READ_DATA); + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] OriginalGNT = 0x%x\n", + backup->lte_gnt); +} + +static void rtw8723d_iqk_config_lte_path_gnt(struct rtw_dev *rtwdev) +{ + rtw_write32(rtwdev, REG_LTECOEX_WRITE_DATA, 0x0000ff00); + rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0xc0020038); + rtw_write32_mask(rtwdev, REG_LTECOEX_PATH_CONTROL, BIT_LTE_MUX_CTRL_PATH, 0x1); +} + +static void rtw8723d_iqk_restore_lte_path_gnt(struct rtw_dev *rtwdev, + const struct iqk_backup_regs *bak) +{ + rtw_write32(rtwdev, REG_LTECOEX_WRITE_DATA, bak->lte_gnt); + rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0xc00f0038); + rtw_write32(rtwdev, REG_LTECOEX_PATH_CONTROL, bak->lte_path); +} + +struct rtw_8723d_iqk_cfg { + const char *name; + u32 val_bb_sel_btg; + u32 reg_lutwe; + u32 val_txiqk_pi; + u32 reg_padlut; + u32 reg_gaintx; + u32 reg_bspad; + u32 val_wlint; + u32 val_wlsel; + u32 val_iqkpts; +}; + +static const struct rtw_8723d_iqk_cfg iqk_tx_cfg[PATH_NR] = { + [PATH_S1] = { + .name = "S1", + .val_bb_sel_btg = 0x99000000, + .reg_lutwe = RF_LUTWE, + .val_txiqk_pi = 0x8214019f, + .reg_padlut = RF_LUTDBG, + .reg_gaintx = RF_GAINTX, + .reg_bspad = RF_BSPAD, + .val_wlint = 0xe0d, + .val_wlsel = 0x60d, + .val_iqkpts = 0xfa000000, + }, + [PATH_S0] = { + .name = "S0", + .val_bb_sel_btg = 0x99000280, + .reg_lutwe = RF_LUTWE2, + .val_txiqk_pi = 0x8214018a, + .reg_padlut = RF_TXADBG, + .reg_gaintx = RF_TRXIQ, + .reg_bspad = RF_TXATANK, + .val_wlint = 0xe6d, + .val_wlsel = 0x66d, + .val_iqkpts = 0xf9000000, + }, +}; + +static u8 rtw8723d_iqk_check_tx_failed(struct rtw_dev *rtwdev, + const struct rtw_8723d_iqk_cfg *iqk_cfg) +{ + s32 tx_x, tx_y; + u32 tx_fail; + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0xeac = 0x%x\n", + rtw_read32(rtwdev, REG_IQK_RES_RY)); + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0xe94 = 0x%x, 0xe9c = 0x%x\n", + rtw_read32(rtwdev, REG_IQK_RES_TX), + rtw_read32(rtwdev, REG_IQK_RES_TY)); + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] 0xe90(before IQK)= 0x%x, 0xe98(afer IQK) = 0x%x\n", + rtw_read32(rtwdev, 0xe90), + rtw_read32(rtwdev, 0xe98)); + + tx_fail = rtw_read32_mask(rtwdev, REG_IQK_RES_RY, BIT_IQK_TX_FAIL); + tx_x = rtw_read32_mask(rtwdev, REG_IQK_RES_TX, BIT_MASK_RES_TX); + tx_y = rtw_read32_mask(rtwdev, REG_IQK_RES_TY, BIT_MASK_RES_TY); + + if (!tx_fail && tx_x != IQK_TX_X_ERR && tx_y != IQK_TX_Y_ERR) + return IQK_TX_OK; + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] %s TXIQK is failed\n", + iqk_cfg->name); + + return 0; +} + +static u8 rtw8723d_iqk_check_rx_failed(struct rtw_dev *rtwdev, + const struct rtw_8723d_iqk_cfg *iqk_cfg) +{ + s32 rx_x, rx_y; + u32 rx_fail; + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0xea4 = 0x%x, 0xeac = 0x%x\n", + rtw_read32(rtwdev, REG_IQK_RES_RX), + rtw_read32(rtwdev, REG_IQK_RES_RY)); + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] 0xea0(before IQK)= 0x%x, 0xea8(afer IQK) = 0x%x\n", + rtw_read32(rtwdev, 0xea0), + rtw_read32(rtwdev, 0xea8)); + + rx_fail = rtw_read32_mask(rtwdev, REG_IQK_RES_RY, BIT_IQK_RX_FAIL); + rx_x = rtw_read32_mask(rtwdev, REG_IQK_RES_RX, BIT_MASK_RES_RX); + rx_y = rtw_read32_mask(rtwdev, REG_IQK_RES_RY, BIT_MASK_RES_RY); + rx_y = abs(iqkxy_to_s32(rx_y)); + + if (!rx_fail && rx_x < IQK_RX_X_UPPER && rx_x > IQK_RX_X_LOWER && + rx_y < IQK_RX_Y_LMT) + return IQK_RX_OK; + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] %s RXIQK STEP2 is failed\n", + iqk_cfg->name); + + return 0; +} + +static void rtw8723d_iqk_one_shot(struct rtw_dev *rtwdev, bool tx, + const struct rtw_8723d_iqk_cfg *iqk_cfg) +{ + u32 pts = (tx ? iqk_cfg->val_iqkpts : 0xf9000000); + + /* enter IQK mode */ + rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, EN_IQK); + rtw8723d_iqk_config_lte_path_gnt(rtwdev); + + rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0x800f0054); + mdelay(1); + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] GNT_BT @%s %sIQK1 = 0x%x\n", + iqk_cfg->name, tx ? "TX" : "RX", + rtw_read32(rtwdev, REG_LTECOEX_READ_DATA)); + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0x948 @%s %sIQK1 = 0x%x\n", + iqk_cfg->name, tx ? "TX" : "RX", + rtw_read32(rtwdev, REG_BB_SEL_BTG)); + + /* One shot, LOK & IQK */ + rtw_write32(rtwdev, REG_IQK_AGC_PTS_11N, pts); + rtw_write32(rtwdev, REG_IQK_AGC_PTS_11N, 0xf8000000); + + if (!check_hw_ready(rtwdev, REG_IQK_RES_RY, BIT_IQK_DONE, 1)) + rtw_warn(rtwdev, "%s %s IQK isn't done\n", iqk_cfg->name, + tx ? "TX" : "RX"); +} + +static void rtw8723d_iqk_txrx_path_post(struct rtw_dev *rtwdev, + const struct rtw_8723d_iqk_cfg *iqk_cfg, + const struct iqk_backup_regs *backup) +{ + rtw8723d_iqk_restore_lte_path_gnt(rtwdev, backup); + rtw_write32(rtwdev, REG_BB_SEL_BTG, backup->bb_sel_btg); + + /* leave IQK mode */ + rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, RST_IQK); + mdelay(1); + rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_padlut, 0x800, 0x0); + rtw_write_rf(rtwdev, RF_PATH_A, RF_WLINT, BIT(0), 0x0); + rtw_write_rf(rtwdev, RF_PATH_A, RF_WLSEL, BIT(0), 0x0); +} + +static u8 rtw8723d_iqk_tx_path(struct rtw_dev *rtwdev, + const struct rtw_8723d_iqk_cfg *iqk_cfg, + const struct iqk_backup_regs *backup) +{ + u8 status; + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path %s TXIQK!!\n", iqk_cfg->name); + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0x67 @%s TXIQK = 0x%x\n", + iqk_cfg->name, + rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3)); + + rtw_write32(rtwdev, REG_BB_SEL_BTG, iqk_cfg->val_bb_sel_btg); + rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, RST_IQK); + mdelay(1); + rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_lutwe, RFREG_MASK, 0x80000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x00004); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD1, RFREG_MASK, 0x0005d); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0xBFFE0); + rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_lutwe, RFREG_MASK, 0x00000); + + /* IQK setting */ + rtw_write32(rtwdev, REG_TXIQK_TONE_A_11N, 0x08008c0c); + rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x38008c1c); + rtw_write32(rtwdev, REG_TXIQK_PI_A_11N, iqk_cfg->val_txiqk_pi); + rtw_write32(rtwdev, REG_RXIQK_PI_A_11N, 0x28160200); + rtw_write32(rtwdev, REG_TXIQK_11N, 0x01007c00); + rtw_write32(rtwdev, REG_RXIQK_11N, 0x01004800); + + /* LOK setting */ + rtw_write32(rtwdev, REG_IQK_AGC_RSP_11N, 0x00462911); + + /* PA, PAD setting */ + rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_padlut, 0x800, 0x1); + rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_gaintx, 0x600, 0x0); + rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_gaintx, 0x1E0, 0x3); + rtw_write_rf(rtwdev, RF_PATH_A, RF_RXIQGEN, 0x1F, 0xf); + + /* LOK setting for 8723D */ + rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_lutwe, 0x10, 0x1); + rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_bspad, 0x1, 0x1); + + rtw_write_rf(rtwdev, RF_PATH_A, RF_WLINT, RFREG_MASK, iqk_cfg->val_wlint); + rtw_write_rf(rtwdev, RF_PATH_A, RF_WLSEL, RFREG_MASK, iqk_cfg->val_wlsel); + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] RF0x1 @%s TXIQK = 0x%x\n", + iqk_cfg->name, + rtw_read_rf(rtwdev, RF_PATH_A, RF_WLINT, RFREG_MASK)); + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] RF0x2 @%s TXIQK = 0x%x\n", + iqk_cfg->name, + rtw_read_rf(rtwdev, RF_PATH_A, RF_WLSEL, RFREG_MASK)); + + rtw8723d_iqk_one_shot(rtwdev, true, iqk_cfg); + status = rtw8723d_iqk_check_tx_failed(rtwdev, iqk_cfg); + + rtw8723d_iqk_txrx_path_post(rtwdev, iqk_cfg, backup); + + return status; +} + +static u8 rtw8723d_iqk_rx_path(struct rtw_dev *rtwdev, + const struct rtw_8723d_iqk_cfg *iqk_cfg, + const struct iqk_backup_regs *backup) +{ + u32 tx_x, tx_y; + u8 status; + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path %s RXIQK Step1!!\n", + iqk_cfg->name); + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0x67 @%s RXIQK1 = 0x%x\n", + iqk_cfg->name, + rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3)); + rtw_write32(rtwdev, REG_BB_SEL_BTG, iqk_cfg->val_bb_sel_btg); + + rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, RST_IQK); + + /* IQK setting */ + rtw_write32(rtwdev, REG_TXIQK_11N, 0x01007c00); + rtw_write32(rtwdev, REG_RXIQK_11N, 0x01004800); + + /* path IQK setting */ + rtw_write32(rtwdev, REG_TXIQK_TONE_A_11N, 0x18008c1c); + rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x38008c1c); + rtw_write32(rtwdev, REG_TX_IQK_TONE_B, 0x38008c1c); + rtw_write32(rtwdev, REG_RX_IQK_TONE_B, 0x38008c1c); + rtw_write32(rtwdev, REG_TXIQK_PI_A_11N, 0x82160000); + rtw_write32(rtwdev, REG_RXIQK_PI_A_11N, 0x28160000); + + /* LOK setting */ + rtw_write32(rtwdev, REG_IQK_AGC_RSP_11N, 0x0046a911); + + /* RXIQK mode */ + rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_lutwe, RFREG_MASK, 0x80000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x00006); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD1, RFREG_MASK, 0x0005f); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0xa7ffb); + rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_lutwe, RFREG_MASK, 0x00000); + + /* PA/PAD=0 */ + rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_padlut, 0x800, 0x1); + rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_gaintx, 0x600, 0x0); + rtw_write_rf(rtwdev, RF_PATH_A, RF_WLINT, RFREG_MASK, iqk_cfg->val_wlint); + rtw_write_rf(rtwdev, RF_PATH_A, RF_WLSEL, RFREG_MASK, iqk_cfg->val_wlsel); + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] RF0x1@ path %s RXIQK1 = 0x%x\n", + iqk_cfg->name, + rtw_read_rf(rtwdev, RF_PATH_A, RF_WLINT, RFREG_MASK)); + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] RF0x2@ path %s RXIQK1 = 0x%x\n", + iqk_cfg->name, + rtw_read_rf(rtwdev, RF_PATH_A, RF_WLSEL, RFREG_MASK)); + + rtw8723d_iqk_one_shot(rtwdev, false, iqk_cfg); + status = rtw8723d_iqk_check_tx_failed(rtwdev, iqk_cfg); + + if (!status) + goto restore; + + /* second round */ + tx_x = rtw_read32_mask(rtwdev, REG_IQK_RES_TX, BIT_MASK_RES_TX); + tx_y = rtw_read32_mask(rtwdev, REG_IQK_RES_TY, BIT_MASK_RES_TY); + + rtw_write32(rtwdev, REG_TXIQK_11N, BIT_SET_TXIQK_11N(tx_x, tx_y)); + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0xe40 = 0x%x u4tmp = 0x%x\n", + rtw_read32(rtwdev, REG_TXIQK_11N), + BIT_SET_TXIQK_11N(tx_x, tx_y)); + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path %s RXIQK STEP2!!\n", + iqk_cfg->name); + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0x67 @%s RXIQK2 = 0x%x\n", + iqk_cfg->name, + rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3)); + + rtw_write32(rtwdev, REG_RXIQK_11N, 0x01004800); + rtw_write32(rtwdev, REG_TXIQK_TONE_A_11N, 0x38008c1c); + rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x18008c1c); + rtw_write32(rtwdev, REG_TX_IQK_TONE_B, 0x38008c1c); + rtw_write32(rtwdev, REG_RX_IQK_TONE_B, 0x38008c1c); + rtw_write32(rtwdev, REG_TXIQK_PI_A_11N, 0x82170000); + rtw_write32(rtwdev, REG_RXIQK_PI_A_11N, 0x28171400); + + /* LOK setting */ + rtw_write32(rtwdev, REG_IQK_AGC_RSP_11N, 0x0046a8d1); + + /* RXIQK mode */ + rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, RST_IQK); + mdelay(1); + rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_lutwe, 0x80000, 0x1); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x00007); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD1, RFREG_MASK, 0x0005f); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0xb3fdb); + rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_lutwe, RFREG_MASK, 0x00000); + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] RF0x1 @%s RXIQK2 = 0x%x\n", + iqk_cfg->name, + rtw_read_rf(rtwdev, RF_PATH_A, RF_WLINT, RFREG_MASK)); + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] RF0x2 @%s RXIQK2 = 0x%x\n", + iqk_cfg->name, + rtw_read_rf(rtwdev, RF_PATH_A, RF_WLSEL, RFREG_MASK)); + + rtw8723d_iqk_one_shot(rtwdev, false, iqk_cfg); + status |= rtw8723d_iqk_check_rx_failed(rtwdev, iqk_cfg); + +restore: + rtw8723d_iqk_txrx_path_post(rtwdev, iqk_cfg, backup); + + return status; +} + +static +void rtw8723d_iqk_fill_s1_matrix(struct rtw_dev *rtwdev, const s32 result[]) +{ + s32 oldval_1; + s32 x, y; + s32 tx1_a, tx1_a_ext; + s32 tx1_c, tx1_c_ext; + + if (result[IQK_S1_TX_X] == 0) + return; + + oldval_1 = rtw_read32_mask(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE, + BIT_MASK_TXIQ_ELM_D); + + x = iqkxy_to_s32(result[IQK_S1_TX_X]); + tx1_a = iqk_mult(x, oldval_1, &tx1_a_ext); + rtw_write32_mask(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE, + BIT_MASK_TXIQ_ELM_A, tx1_a); + rtw_write32_mask(rtwdev, REG_OFDM_0_ECCA_THRESHOLD, + BIT_MASK_OFDM0_EXT_A, tx1_a_ext); + + y = iqkxy_to_s32(result[IQK_S1_TX_Y]); + tx1_c = iqk_mult(y, oldval_1, &tx1_c_ext); + rtw_write32_mask(rtwdev, REG_TXIQK_MATRIXA_LSB2_11N, MASKH4BITS, + BIT_SET_TXIQ_ELM_C1(tx1_c)); + rtw_write32_mask(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE, + BIT_MASK_TXIQ_ELM_C, BIT_SET_TXIQ_ELM_C2(tx1_c)); + rtw_write32_mask(rtwdev, REG_OFDM_0_ECCA_THRESHOLD, + BIT_MASK_OFDM0_EXT_C, tx1_c_ext); + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] X = 0x%x, TX1_A = 0x%x, oldval_1 0x%x\n", + x, tx1_a, oldval_1); + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] Y = 0x%x, TX1_C = 0x%x\n", y, tx1_c); + + if (result[IQK_S1_RX_X] == 0) + return; + + rtw_write32_mask(rtwdev, REG_A_RXIQI, BIT_MASK_RXIQ_S1_X, + result[IQK_S1_RX_X]); + rtw_write32_mask(rtwdev, REG_A_RXIQI, BIT_MASK_RXIQ_S1_Y1, + BIT_SET_RXIQ_S1_Y1(result[IQK_S1_RX_Y])); + rtw_write32_mask(rtwdev, REG_RXIQK_MATRIX_LSB_11N, BIT_MASK_RXIQ_S1_Y2, + BIT_SET_RXIQ_S1_Y2(result[IQK_S1_RX_Y])); +} + +static +void rtw8723d_iqk_fill_s0_matrix(struct rtw_dev *rtwdev, const s32 result[]) +{ + s32 oldval_0; + s32 x, y; + s32 tx0_a, tx0_a_ext; + s32 tx0_c, tx0_c_ext; + + if (result[IQK_S0_TX_X] == 0) + return; + + oldval_0 = rtw_read32_mask(rtwdev, REG_TXIQ_CD_S0, BIT_MASK_TXIQ_D_S0); + + x = iqkxy_to_s32(result[IQK_S0_TX_X]); + tx0_a = iqk_mult(x, oldval_0, &tx0_a_ext); + + rtw_write32_mask(rtwdev, REG_TXIQ_AB_S0, BIT_MASK_TXIQ_A_S0, tx0_a); + rtw_write32_mask(rtwdev, REG_TXIQ_AB_S0, BIT_MASK_TXIQ_A_EXT_S0, tx0_a_ext); + + y = iqkxy_to_s32(result[IQK_S0_TX_Y]); + tx0_c = iqk_mult(y, oldval_0, &tx0_c_ext); + + rtw_write32_mask(rtwdev, REG_TXIQ_CD_S0, BIT_MASK_TXIQ_C_S0, tx0_c); + rtw_write32_mask(rtwdev, REG_TXIQ_CD_S0, BIT_MASK_TXIQ_C_EXT_S0, tx0_c_ext); + + if (result[IQK_S0_RX_X] == 0) + return; + + rtw_write32_mask(rtwdev, REG_RXIQ_AB_S0, BIT_MASK_RXIQ_X_S0, + result[IQK_S0_RX_X]); + rtw_write32_mask(rtwdev, REG_RXIQ_AB_S0, BIT_MASK_RXIQ_Y_S0, + result[IQK_S0_RX_Y]); +} + +static void rtw8723d_iqk_path_adda_on(struct rtw_dev *rtwdev) +{ + int i; + + for (i = 0; i < IQK_ADDA_REG_NUM; i++) + rtw_write32(rtwdev, iqk_adda_regs[i], 0x03c00016); +} + +static void rtw8723d_iqk_config_mac(struct rtw_dev *rtwdev) +{ + rtw_write8(rtwdev, REG_TXPAUSE, 0xff); +} + +static +void rtw8723d_iqk_rf_standby(struct rtw_dev *rtwdev, enum rtw_rf_path path) +{ + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path-%s standby mode!\n", + path == RF_PATH_A ? "S1" : "S0"); + + rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, RST_IQK); + mdelay(1); + rtw_write_rf(rtwdev, path, RF_MODE, RFREG_MASK, 0x10000); + rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, EN_IQK); +} + +static +bool rtw8723d_iqk_similarity_cmp(struct rtw_dev *rtwdev, s32 result[][IQK_NR], + u8 c1, u8 c2) +{ + u32 i, j, diff; + u32 bitmap = 0; + u8 candidate[PATH_NR] = {IQK_ROUND_INVALID, IQK_ROUND_INVALID}; + bool ret = true; + + s32 tmp1, tmp2; + + for (i = 0; i < IQK_NR; i++) { + tmp1 = iqkxy_to_s32(result[c1][i]); + tmp2 = iqkxy_to_s32(result[c2][i]); + + diff = abs(tmp1 - tmp2); + + if (diff <= MAX_TOLERANCE) + continue; + + if ((i == IQK_S1_RX_X || i == IQK_S0_RX_X) && !bitmap) { + if (result[c1][i] + result[c1][i + 1] == 0) + candidate[i / IQK_SX_NR] = c2; + else if (result[c2][i] + result[c2][i + 1] == 0) + candidate[i / IQK_SX_NR] = c1; + else + bitmap |= BIT(i); + } else { + bitmap |= BIT(i); + } + } + + if (bitmap != 0) + goto check_sim; + + for (i = 0; i < PATH_NR; i++) { + if (candidate[i] == IQK_ROUND_INVALID) + continue; + + for (j = i * IQK_SX_NR; j < i * IQK_SX_NR + 2; j++) + result[IQK_ROUND_HYBRID][j] = result[candidate[i]][j]; + ret = false; + } + + return ret; + +check_sim: + for (i = 0; i < IQK_NR; i++) { + j = i & ~1; /* 2 bits are a pair for IQ[X, Y] */ + if (bitmap & GENMASK(j + 1, j)) + continue; + + result[IQK_ROUND_HYBRID][i] = result[c1][i]; + } + + return false; +} + +static +void rtw8723d_iqk_precfg_path(struct rtw_dev *rtwdev, enum rtw8723d_path path) +{ + if (path == PATH_S0) { + rtw8723d_iqk_rf_standby(rtwdev, RF_PATH_A); + rtw8723d_iqk_path_adda_on(rtwdev); + } + + rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, EN_IQK); + rtw_write32(rtwdev, REG_TXIQK_11N, 0x01007c00); + rtw_write32(rtwdev, REG_RXIQK_11N, 0x01004800); + + if (path == PATH_S1) { + rtw8723d_iqk_rf_standby(rtwdev, RF_PATH_B); + rtw8723d_iqk_path_adda_on(rtwdev); + } +} + +static +void rtw8723d_iqk_one_round(struct rtw_dev *rtwdev, s32 result[][IQK_NR], u8 t, + const struct iqk_backup_regs *backup) +{ + u32 i; + u8 s1_ok, s0_ok; + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] IQ Calibration for 1T1R_S0/S1 for %d times\n", t); + + rtw8723d_iqk_path_adda_on(rtwdev); + rtw8723d_iqk_config_mac(rtwdev); + rtw_write32_mask(rtwdev, REG_CCK_ANT_SEL_11N, 0x0f000000, 0xf); + rtw_write32(rtwdev, REG_BB_RX_PATH_11N, 0x03a05611); + rtw_write32(rtwdev, REG_TRMUX_11N, 0x000800e4); + rtw_write32(rtwdev, REG_BB_PWR_SAV1_11N, 0x25204200); + rtw8723d_iqk_precfg_path(rtwdev, PATH_S1); + + for (i = 0; i < PATH_IQK_RETRY; i++) { + s1_ok = rtw8723d_iqk_tx_path(rtwdev, &iqk_tx_cfg[PATH_S1], backup); + if (s1_ok == IQK_TX_OK) { + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] path S1 Tx IQK Success!!\n"); + result[t][IQK_S1_TX_X] = + rtw_read32_mask(rtwdev, REG_IQK_RES_TX, BIT_MASK_RES_TX); + result[t][IQK_S1_TX_Y] = + rtw_read32_mask(rtwdev, REG_IQK_RES_TY, BIT_MASK_RES_TY); + break; + } + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path S1 Tx IQK Fail!!\n"); + result[t][IQK_S1_TX_X] = 0x100; + result[t][IQK_S1_TX_Y] = 0x0; + } + + for (i = 0; i < PATH_IQK_RETRY; i++) { + s1_ok = rtw8723d_iqk_rx_path(rtwdev, &iqk_tx_cfg[PATH_S1], backup); + if (s1_ok == (IQK_TX_OK | IQK_RX_OK)) { + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] path S1 Rx IQK Success!!\n"); + result[t][IQK_S1_RX_X] = + rtw_read32_mask(rtwdev, REG_IQK_RES_RX, BIT_MASK_RES_RX); + result[t][IQK_S1_RX_Y] = + rtw_read32_mask(rtwdev, REG_IQK_RES_RY, BIT_MASK_RES_RY); + break; + } + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path S1 Rx IQK Fail!!\n"); + result[t][IQK_S1_RX_X] = 0x100; + result[t][IQK_S1_RX_Y] = 0x0; + } + + if (s1_ok == 0x0) + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path S1 IQK is failed!!\n"); + + rtw8723d_iqk_precfg_path(rtwdev, PATH_S0); + + for (i = 0; i < PATH_IQK_RETRY; i++) { + s0_ok = rtw8723d_iqk_tx_path(rtwdev, &iqk_tx_cfg[PATH_S0], backup); + if (s0_ok == IQK_TX_OK) { + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] path S0 Tx IQK Success!!\n"); + result[t][IQK_S0_TX_X] = + rtw_read32_mask(rtwdev, REG_IQK_RES_TX, BIT_MASK_RES_TX); + result[t][IQK_S0_TX_Y] = + rtw_read32_mask(rtwdev, REG_IQK_RES_TY, BIT_MASK_RES_TY); + break; + } + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path S0 Tx IQK Fail!!\n"); + result[t][IQK_S0_TX_X] = 0x100; + result[t][IQK_S0_TX_Y] = 0x0; + } + + for (i = 0; i < PATH_IQK_RETRY; i++) { + s0_ok = rtw8723d_iqk_rx_path(rtwdev, &iqk_tx_cfg[PATH_S0], backup); + if (s0_ok == (IQK_TX_OK | IQK_RX_OK)) { + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] path S0 Rx IQK Success!!\n"); + + result[t][IQK_S0_RX_X] = + rtw_read32_mask(rtwdev, REG_IQK_RES_RX, BIT_MASK_RES_RX); + result[t][IQK_S0_RX_Y] = + rtw_read32_mask(rtwdev, REG_IQK_RES_RY, BIT_MASK_RES_RY); + break; + } + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path S0 Rx IQK Fail!!\n"); + result[t][IQK_S0_RX_X] = 0x100; + result[t][IQK_S0_RX_Y] = 0x0; + } + + if (s0_ok == 0x0) + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path S0 IQK is failed!!\n"); + + rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, RST_IQK); + mdelay(1); + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] back to BB mode, load original value!\n"); +} + +static void rtw8723d_phy_calibration(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + s32 result[IQK_ROUND_SIZE][IQK_NR]; + struct iqk_backup_regs backup; + u8 i, j; + u8 final_candidate = IQK_ROUND_INVALID; + bool good; + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] Start!!!\n"); + + memset(result, 0, sizeof(result)); + + rtw8723d_iqk_backup_path_ctrl(rtwdev, &backup); + rtw8723d_iqk_backup_lte_path_gnt(rtwdev, &backup); + rtw8723d_iqk_backup_regs(rtwdev, &backup); + + for (i = IQK_ROUND_0; i <= IQK_ROUND_2; i++) { + rtw8723d_iqk_config_path_ctrl(rtwdev); + rtw8723d_iqk_config_lte_path_gnt(rtwdev); + + rtw8723d_iqk_one_round(rtwdev, result, i, &backup); + + if (i > IQK_ROUND_0) + rtw8723d_iqk_restore_regs(rtwdev, &backup); + rtw8723d_iqk_restore_lte_path_gnt(rtwdev, &backup); + rtw8723d_iqk_restore_path_ctrl(rtwdev, &backup); + + for (j = IQK_ROUND_0; j < i; j++) { + good = rtw8723d_iqk_similarity_cmp(rtwdev, result, j, i); + + if (good) { + final_candidate = j; + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] cmp %d:%d final_candidate is %x\n", + j, i, final_candidate); + goto iqk_done; + } + } + } + + if (final_candidate == IQK_ROUND_INVALID) { + s32 reg_tmp = 0; + + for (i = 0; i < IQK_NR; i++) + reg_tmp += result[IQK_ROUND_HYBRID][i]; + + if (reg_tmp != 0) { + final_candidate = IQK_ROUND_HYBRID; + } else { + WARN(1, "IQK is failed\n"); + goto out; + } + } + +iqk_done: + rtw8723d_iqk_fill_s1_matrix(rtwdev, result[final_candidate]); + rtw8723d_iqk_fill_s0_matrix(rtwdev, result[final_candidate]); + + dm_info->iqk.result.s1_x = result[final_candidate][IQK_S1_TX_X]; + dm_info->iqk.result.s1_y = result[final_candidate][IQK_S1_TX_Y]; + dm_info->iqk.result.s0_x = result[final_candidate][IQK_S0_TX_X]; + dm_info->iqk.result.s0_y = result[final_candidate][IQK_S0_TX_Y]; + dm_info->iqk.done = true; + +out: + rtw_write32(rtwdev, REG_BB_SEL_BTG, backup.bb_sel_btg); + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] final_candidate is %x\n", + final_candidate); + + for (i = IQK_ROUND_0; i < IQK_ROUND_SIZE; i++) + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] Result %u: rege94_s1=%x rege9c_s1=%x regea4_s1=%x regeac_s1=%x rege94_s0=%x rege9c_s0=%x regea4_s0=%x regeac_s0=%x %s\n", + i, + result[i][0], result[i][1], result[i][2], result[i][3], + result[i][4], result[i][5], result[i][6], result[i][7], + final_candidate == i ? "(final candidate)" : ""); + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK]0xc80 = 0x%x 0xc94 = 0x%x 0xc14 = 0x%x 0xca0 = 0x%x\n", + rtw_read32(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE), + rtw_read32(rtwdev, REG_TXIQK_MATRIXA_LSB2_11N), + rtw_read32(rtwdev, REG_A_RXIQI), + rtw_read32(rtwdev, REG_RXIQK_MATRIX_LSB_11N)); + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK]0xcd0 = 0x%x 0xcd4 = 0x%x 0xcd8 = 0x%x\n", + rtw_read32(rtwdev, REG_TXIQ_AB_S0), + rtw_read32(rtwdev, REG_TXIQ_CD_S0), + rtw_read32(rtwdev, REG_RXIQ_AB_S0)); + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] finished\n"); +} + +/* for coex */ +static void rtw8723d_coex_cfg_init(struct rtw_dev *rtwdev) +{ + /* enable TBTT nterrupt */ + rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION); + + /* BT report packet sample rate */ + /* 0x790[5:0]=0x5 */ + rtw_write8_set(rtwdev, REG_BT_TDMA_TIME, 0x05); + + /* enable BT counter statistics */ + rtw_write8(rtwdev, REG_BT_STAT_CTRL, 0x1); + + /* enable PTA (3-wire function form BT side) */ + rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_PTA_EN); + rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_AOD_GPIO3); + + /* enable PTA (tx/rx signal form WiFi side) */ + rtw_write8_set(rtwdev, REG_QUEUE_CTRL, BIT_PTA_WL_TX_EN); +} + +static void rtw8723d_coex_cfg_gnt_fix(struct rtw_dev *rtwdev) +{ +} + +static void rtw8723d_coex_cfg_gnt_debug(struct rtw_dev *rtwdev) +{ + rtw_write8_mask(rtwdev, REG_LEDCFG2, BIT(6), 0); + rtw_write8_mask(rtwdev, REG_PAD_CTRL1 + 3, BIT(0), 0); + rtw_write8_mask(rtwdev, REG_GPIO_INTM + 2, BIT(4), 0); + rtw_write8_mask(rtwdev, REG_GPIO_MUXCFG + 2, BIT(1), 0); + rtw_write8_mask(rtwdev, REG_PAD_CTRL1 + 3, BIT(1), 0); + rtw_write8_mask(rtwdev, REG_PAD_CTRL1 + 2, BIT(7), 0); + rtw_write8_mask(rtwdev, REG_SYS_CLKR + 1, BIT(1), 0); + rtw_write8_mask(rtwdev, REG_SYS_SDIO_CTRL + 3, BIT(3), 0); +} + +static void rtw8723d_coex_cfg_rfe_type(struct rtw_dev *rtwdev) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_rfe *coex_rfe = &coex->rfe; + bool aux = efuse->bt_setting & BIT(6); + + coex_rfe->rfe_module_type = rtwdev->efuse.rfe_option; + coex_rfe->ant_switch_polarity = 0; + coex_rfe->ant_switch_exist = false; + coex_rfe->ant_switch_with_bt = false; + coex_rfe->ant_switch_diversity = false; + coex_rfe->wlg_at_btg = true; + + /* decide antenna at main or aux */ + if (efuse->share_ant) { + if (aux) + rtw_write16(rtwdev, REG_BB_SEL_BTG, 0x80); + else + rtw_write16(rtwdev, REG_BB_SEL_BTG, 0x200); + } else { + if (aux) + rtw_write16(rtwdev, REG_BB_SEL_BTG, 0x280); + else + rtw_write16(rtwdev, REG_BB_SEL_BTG, 0x0); + } + + /* disable LTE coex in wifi side */ + rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, BIT_LTE_COEX_EN, 0x0); + rtw_coex_write_indirect_reg(rtwdev, LTE_WL_TRX_CTRL, MASKLWORD, 0xffff); + rtw_coex_write_indirect_reg(rtwdev, LTE_BT_TRX_CTRL, MASKLWORD, 0xffff); +} + +static void rtw8723d_coex_cfg_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + static const u8 wl_tx_power[] = {0xb2, 0x90}; + u8 pwr; + + if (wl_pwr == coex_dm->cur_wl_pwr_lvl) + return; + + coex_dm->cur_wl_pwr_lvl = wl_pwr; + + if (coex_dm->cur_wl_pwr_lvl >= ARRAY_SIZE(wl_tx_power)) + coex_dm->cur_wl_pwr_lvl = ARRAY_SIZE(wl_tx_power) - 1; + + pwr = wl_tx_power[coex_dm->cur_wl_pwr_lvl]; + + rtw_write8(rtwdev, REG_ANA_PARAM1 + 3, pwr); +} + +static void rtw8723d_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + /* WL Rx Low gain on */ + static const u32 wl_rx_low_gain_on[] = { + 0xec120101, 0xeb130101, 0xce140101, 0xcd150101, 0xcc160101, + 0xcb170101, 0xca180101, 0x8d190101, 0x8c1a0101, 0x8b1b0101, + 0x4f1c0101, 0x4e1d0101, 0x4d1e0101, 0x4c1f0101, 0x0e200101, + 0x0d210101, 0x0c220101, 0x0b230101, 0xcf240001, 0xce250001, + 0xcd260001, 0xcc270001, 0x8f280001 + }; + /* WL Rx Low gain off */ + static const u32 wl_rx_low_gain_off[] = { + 0xec120101, 0xeb130101, 0xea140101, 0xe9150101, 0xe8160101, + 0xe7170101, 0xe6180101, 0xe5190101, 0xe41a0101, 0xe31b0101, + 0xe21c0101, 0xe11d0101, 0xe01e0101, 0x861f0101, 0x85200101, + 0x84210101, 0x83220101, 0x82230101, 0x81240101, 0x80250101, + 0x44260101, 0x43270101, 0x42280101 + }; + u8 i; + + if (low_gain == coex_dm->cur_wl_rx_low_gain_en) + return; + + coex_dm->cur_wl_rx_low_gain_en = low_gain; + + if (coex_dm->cur_wl_rx_low_gain_en) { + for (i = 0; i < ARRAY_SIZE(wl_rx_low_gain_on); i++) + rtw_write32(rtwdev, REG_AGCRSSI, wl_rx_low_gain_on[i]); + } else { + for (i = 0; i < ARRAY_SIZE(wl_rx_low_gain_off); i++) + rtw_write32(rtwdev, REG_AGCRSSI, wl_rx_low_gain_off[i]); + } +} + +static u8 rtw8723d_pwrtrack_get_limit_ofdm(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 tx_rate = dm_info->tx_rate; + u8 limit_ofdm = 30; + + switch (tx_rate) { + case DESC_RATE1M...DESC_RATE5_5M: + case DESC_RATE11M: + break; + case DESC_RATE6M...DESC_RATE48M: + limit_ofdm = 36; + break; + case DESC_RATE54M: + limit_ofdm = 34; + break; + case DESC_RATEMCS0...DESC_RATEMCS2: + limit_ofdm = 38; + break; + case DESC_RATEMCS3...DESC_RATEMCS4: + limit_ofdm = 36; + break; + case DESC_RATEMCS5...DESC_RATEMCS7: + limit_ofdm = 34; + break; + default: + rtw_warn(rtwdev, "pwrtrack unhandled tx_rate 0x%x\n", tx_rate); + break; + } + + return limit_ofdm; +} + +static void rtw8723d_set_iqk_matrix_by_result(struct rtw_dev *rtwdev, + u32 ofdm_swing, u8 rf_path) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + s32 ele_A, ele_D, ele_C; + s32 ele_A_ext, ele_C_ext, ele_D_ext; + s32 iqk_result_x; + s32 iqk_result_y; + s32 value32; + + switch (rf_path) { + default: + case RF_PATH_A: + iqk_result_x = dm_info->iqk.result.s1_x; + iqk_result_y = dm_info->iqk.result.s1_y; + break; + case RF_PATH_B: + iqk_result_x = dm_info->iqk.result.s0_x; + iqk_result_y = dm_info->iqk.result.s0_y; + break; + } + + /* new element D */ + ele_D = OFDM_SWING_D(ofdm_swing); + iqk_mult(iqk_result_x, ele_D, &ele_D_ext); + /* new element A */ + iqk_result_x = iqkxy_to_s32(iqk_result_x); + ele_A = iqk_mult(iqk_result_x, ele_D, &ele_A_ext); + /* new element C */ + iqk_result_y = iqkxy_to_s32(iqk_result_y); + ele_C = iqk_mult(iqk_result_y, ele_D, &ele_C_ext); + + switch (rf_path) { + case RF_PATH_A: + default: + /* write new elements A, C, D, and element B is always 0 */ + value32 = BIT_SET_TXIQ_ELM_ACD(ele_A, ele_C, ele_D); + rtw_write32(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE, value32); + value32 = BIT_SET_TXIQ_ELM_C1(ele_C); + rtw_write32_mask(rtwdev, REG_TXIQK_MATRIXA_LSB2_11N, MASKH4BITS, + value32); + value32 = rtw_read32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD); + value32 &= ~BIT_MASK_OFDM0_EXTS; + value32 |= BIT_SET_OFDM0_EXTS(ele_A_ext, ele_C_ext, ele_D_ext); + rtw_write32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD, value32); + break; + + case RF_PATH_B: + /* write new elements A, C, D, and element B is always 0 */ + rtw_write32_mask(rtwdev, REG_TXIQ_CD_S0, BIT_MASK_TXIQ_D_S0, ele_D); + rtw_write32_mask(rtwdev, REG_TXIQ_CD_S0, BIT_MASK_TXIQ_C_S0, ele_C); + rtw_write32_mask(rtwdev, REG_TXIQ_AB_S0, BIT_MASK_TXIQ_A_S0, ele_A); + + rtw_write32_mask(rtwdev, REG_TXIQ_CD_S0, BIT_MASK_TXIQ_D_EXT_S0, + ele_D_ext); + rtw_write32_mask(rtwdev, REG_TXIQ_AB_S0, BIT_MASK_TXIQ_A_EXT_S0, + ele_A_ext); + rtw_write32_mask(rtwdev, REG_TXIQ_CD_S0, BIT_MASK_TXIQ_C_EXT_S0, + ele_C_ext); + break; + } +} + +static void rtw8723d_set_iqk_matrix(struct rtw_dev *rtwdev, s8 ofdm_index, + u8 rf_path) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + s32 value32; + u32 ofdm_swing; + + if (ofdm_index >= RTW_OFDM_SWING_TABLE_SIZE) + ofdm_index = RTW_OFDM_SWING_TABLE_SIZE - 1; + else if (ofdm_index < 0) + ofdm_index = 0; + + ofdm_swing = rtw8723d_ofdm_swing_table[ofdm_index]; + + if (dm_info->iqk.done) { + rtw8723d_set_iqk_matrix_by_result(rtwdev, ofdm_swing, rf_path); + return; + } + + switch (rf_path) { + case RF_PATH_A: + default: + rtw_write32(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE, ofdm_swing); + rtw_write32_mask(rtwdev, REG_TXIQK_MATRIXA_LSB2_11N, MASKH4BITS, + 0x00); + value32 = rtw_read32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD); + value32 &= ~BIT_MASK_OFDM0_EXTS; + rtw_write32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD, value32); + break; + + case RF_PATH_B: + /* image S1:c80 to S0:Cd0 and Cd4 */ + rtw_write32_mask(rtwdev, REG_TXIQ_AB_S0, BIT_MASK_TXIQ_A_S0, + OFDM_SWING_A(ofdm_swing)); + rtw_write32_mask(rtwdev, REG_TXIQ_AB_S0, BIT_MASK_TXIQ_B_S0, + OFDM_SWING_B(ofdm_swing)); + rtw_write32_mask(rtwdev, REG_TXIQ_CD_S0, BIT_MASK_TXIQ_C_S0, + OFDM_SWING_C(ofdm_swing)); + rtw_write32_mask(rtwdev, REG_TXIQ_CD_S0, BIT_MASK_TXIQ_D_S0, + OFDM_SWING_D(ofdm_swing)); + rtw_write32_mask(rtwdev, REG_TXIQ_CD_S0, BIT_MASK_TXIQ_D_EXT_S0, 0x0); + rtw_write32_mask(rtwdev, REG_TXIQ_CD_S0, BIT_MASK_TXIQ_C_EXT_S0, 0x0); + rtw_write32_mask(rtwdev, REG_TXIQ_AB_S0, BIT_MASK_TXIQ_A_EXT_S0, 0x0); + break; + } +} + +static void rtw8723d_pwrtrack_set_ofdm_pwr(struct rtw_dev *rtwdev, s8 swing_idx, + s8 txagc_idx) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + + dm_info->txagc_remnant_ofdm = txagc_idx; + + rtw8723d_set_iqk_matrix(rtwdev, swing_idx, RF_PATH_A); + rtw8723d_set_iqk_matrix(rtwdev, swing_idx, RF_PATH_B); +} + +static void rtw8723d_pwrtrack_set_cck_pwr(struct rtw_dev *rtwdev, s8 swing_idx, + s8 txagc_idx) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + + dm_info->txagc_remnant_cck = txagc_idx; + + rtw_write32_mask(rtwdev, 0xab4, 0x000007FF, + rtw8723d_cck_swing_table[swing_idx]); +} + +static void rtw8723d_pwrtrack_set(struct rtw_dev *rtwdev, u8 path) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_hal *hal = &rtwdev->hal; + u8 limit_ofdm; + u8 limit_cck = 40; + s8 final_ofdm_swing_index; + s8 final_cck_swing_index; + + limit_ofdm = rtw8723d_pwrtrack_get_limit_ofdm(rtwdev); + + final_ofdm_swing_index = RTW_DEF_OFDM_SWING_INDEX + + dm_info->delta_power_index[path]; + final_cck_swing_index = RTW_DEF_CCK_SWING_INDEX + + dm_info->delta_power_index[path]; + + if (final_ofdm_swing_index > limit_ofdm) + rtw8723d_pwrtrack_set_ofdm_pwr(rtwdev, limit_ofdm, + final_ofdm_swing_index - limit_ofdm); + else if (final_ofdm_swing_index < 0) + rtw8723d_pwrtrack_set_ofdm_pwr(rtwdev, 0, + final_ofdm_swing_index); + else + rtw8723d_pwrtrack_set_ofdm_pwr(rtwdev, final_ofdm_swing_index, 0); + + if (final_cck_swing_index > limit_cck) + rtw8723d_pwrtrack_set_cck_pwr(rtwdev, limit_cck, + final_cck_swing_index - limit_cck); + else if (final_cck_swing_index < 0) + rtw8723d_pwrtrack_set_cck_pwr(rtwdev, 0, + final_cck_swing_index); + else + rtw8723d_pwrtrack_set_cck_pwr(rtwdev, final_cck_swing_index, 0); + + rtw_phy_set_tx_power_level(rtwdev, hal->current_channel); +} + +static void rtw8723d_pwrtrack_set_xtal(struct rtw_dev *rtwdev, u8 therm_path, + u8 delta) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + const struct rtw_pwr_track_tbl *tbl = rtwdev->chip->pwr_track_tbl; + const s8 *pwrtrk_xtal; + s8 xtal_cap; + + if (dm_info->thermal_avg[therm_path] > + rtwdev->efuse.thermal_meter[therm_path]) + pwrtrk_xtal = tbl->pwrtrk_xtal_p; + else + pwrtrk_xtal = tbl->pwrtrk_xtal_n; + + xtal_cap = rtwdev->efuse.crystal_cap & 0x3F; + xtal_cap = clamp_t(s8, xtal_cap + pwrtrk_xtal[delta], 0, 0x3F); + rtw_write32_mask(rtwdev, REG_AFE_CTRL3, BIT_MASK_XTAL, + xtal_cap | (xtal_cap << 6)); +} + +static void rtw8723d_phy_pwrtrack(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_swing_table swing_table; + u8 thermal_value, delta, path; + bool do_iqk = false; + + rtw_phy_config_swing_table(rtwdev, &swing_table); + + if (rtwdev->efuse.thermal_meter[0] == 0xff) + return; + + thermal_value = rtw_read_rf(rtwdev, RF_PATH_A, RF_T_METER, 0xfc00); + + rtw_phy_pwrtrack_avg(rtwdev, thermal_value, RF_PATH_A); + + do_iqk = rtw_phy_pwrtrack_need_iqk(rtwdev); + + if (do_iqk) + rtw8723d_lck(rtwdev); + + if (dm_info->pwr_trk_init_trigger) + dm_info->pwr_trk_init_trigger = false; + else if (!rtw_phy_pwrtrack_thermal_changed(rtwdev, thermal_value, + RF_PATH_A)) + goto iqk; + + delta = rtw_phy_pwrtrack_get_delta(rtwdev, RF_PATH_A); + + delta = min_t(u8, delta, RTW_PWR_TRK_TBL_SZ - 1); + + for (path = 0; path < rtwdev->hal.rf_path_num; path++) { + s8 delta_cur, delta_last; + + delta_last = dm_info->delta_power_index[path]; + delta_cur = rtw_phy_pwrtrack_get_pwridx(rtwdev, &swing_table, + path, RF_PATH_A, delta); + if (delta_last == delta_cur) + continue; + + dm_info->delta_power_index[path] = delta_cur; + rtw8723d_pwrtrack_set(rtwdev, path); + } + + rtw8723d_pwrtrack_set_xtal(rtwdev, RF_PATH_A, delta); + +iqk: + if (do_iqk) + rtw8723d_phy_calibration(rtwdev); +} + +static void rtw8723d_pwr_track(struct rtw_dev *rtwdev) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + + if (efuse->power_track_type != 0) + return; + + if (!dm_info->pwr_trk_triggered) { + rtw_write_rf(rtwdev, RF_PATH_A, RF_T_METER, + GENMASK(17, 16), 0x03); + dm_info->pwr_trk_triggered = true; + return; + } + + rtw8723d_phy_pwrtrack(rtwdev); + dm_info->pwr_trk_triggered = false; +} + static struct rtw_chip_ops rtw8723d_ops = { .phy_set_param = rtw8723d_phy_set_param, .read_efuse = rtw8723d_read_efuse, .query_rx_desc = rtw8723d_query_rx_desc, .set_channel = rtw8723d_set_channel, .mac_init = rtw8723d_mac_init, + .shutdown = rtw8723d_shutdown, .read_rf = rtw_phy_read_rf_sipi, .write_rf = rtw_phy_write_rf_reg_sipi, .set_tx_power_index = rtw8723d_set_tx_power_index, @@ -616,9 +1930,166 @@ static struct rtw_chip_ops rtw8723d_ops = { .cfg_ldo25 = rtw8723d_cfg_ldo25, .efuse_grant = rtw8723d_efuse_grant, .false_alarm_statistics = rtw8723d_false_alarm_statistics, + .phy_calibration = rtw8723d_phy_calibration, + .pwr_track = rtw8723d_pwr_track, .config_bfee = NULL, .set_gid_table = NULL, .cfg_csi_rate = NULL, + + .coex_set_init = rtw8723d_coex_cfg_init, + .coex_set_ant_switch = NULL, + .coex_set_gnt_fix = rtw8723d_coex_cfg_gnt_fix, + .coex_set_gnt_debug = rtw8723d_coex_cfg_gnt_debug, + .coex_set_rfe_type = rtw8723d_coex_cfg_rfe_type, + .coex_set_wl_tx_power = rtw8723d_coex_cfg_wl_tx_power, + .coex_set_wl_rx_gain = rtw8723d_coex_cfg_wl_rx_gain, +}; + +/* Shared-Antenna Coex Table */ +static const struct coex_table_para table_sant_8723d[] = { + {0xffffffff, 0xffffffff}, /* case-0 */ + {0x55555555, 0x55555555}, + {0x65555555, 0x65555555}, + {0xaaaaaaaa, 0xaaaaaaaa}, + {0x5a5a5a5a, 0x5a5a5a5a}, + {0xfafafafa, 0xfafafafa}, /* case-5 */ + {0xa5555555, 0xaaaa5aaa}, + {0x6a5a5a5a, 0x5a5a5a5a}, + {0x6a5a5a5a, 0x6a5a5a5a}, + {0x65555555, 0x5a5a5a5a}, + {0x65555555, 0x6a5a5a5a}, /* case-10 */ + {0x65555555, 0xfafafafa}, + {0x65555555, 0x6a5a5aaa}, + {0x65555555, 0x5aaa5aaa}, + {0x65555555, 0xaaaa5aaa}, + {0x65555555, 0xaaaaaaaa}, /* case-15 */ + {0xffff55ff, 0xfafafafa}, + {0xffff55ff, 0x6afa5afa}, + {0xaaffffaa, 0xfafafafa}, + {0xaa5555aa, 0x5a5a5a5a}, + {0xaa5555aa, 0x6a5a5a5a}, /* case-20 */ + {0xaa5555aa, 0xaaaaaaaa}, + {0xffffffff, 0x5a5a5a5a}, + {0xffffffff, 0x6a5a5a5a}, + {0xffffffff, 0x55555555}, + {0xffffffff, 0x6a5a5aaa}, /* case-25 */ + {0x55555555, 0x5a5a5a5a}, + {0x55555555, 0xaaaaaaaa}, + {0x55555555, 0x6a6a6a6a}, + {0x656a656a, 0x656a656a} +}; + +/* Non-Shared-Antenna Coex Table */ +static const struct coex_table_para table_nsant_8723d[] = { + {0xffffffff, 0xffffffff}, /* case-100 */ + {0x55555555, 0x55555555}, + {0x65555555, 0x65555555}, + {0xaaaaaaaa, 0xaaaaaaaa}, + {0x5a5a5a5a, 0x5a5a5a5a}, + {0xfafafafa, 0xfafafafa}, /* case-105 */ + {0x5afa5afa, 0x5afa5afa}, + {0x55555555, 0xfafafafa}, + {0x65555555, 0xfafafafa}, + {0x65555555, 0x5a5a5a5a}, + {0x65555555, 0x6a5a5a5a}, /* case-110 */ + {0x65555555, 0xaaaaaaaa}, + {0xffff55ff, 0xfafafafa}, + {0xffff55ff, 0x5afa5afa}, + {0xffff55ff, 0xaaaaaaaa}, + {0xaaffffaa, 0xfafafafa}, /* case-115 */ + {0xaaffffaa, 0x5afa5afa}, + {0xaaffffaa, 0xaaaaaaaa}, + {0xffffffff, 0xfafafafa}, + {0xffffffff, 0x5afa5afa}, + {0xffffffff, 0xaaaaaaaa},/* case-120 */ + {0x55ff55ff, 0x5afa5afa}, + {0x55ff55ff, 0xaaaaaaaa}, + {0x55ff55ff, 0x55ff55ff} +}; + +/* Shared-Antenna TDMA */ +static const struct coex_tdma_para tdma_sant_8723d[] = { + { {0x08, 0x00, 0x00, 0x00, 0x00} }, /* case-0 */ + { {0x61, 0x45, 0x03, 0x11, 0x11} }, /* case-1 */ + { {0x61, 0x3a, 0x03, 0x11, 0x11} }, + { {0x61, 0x20, 0x03, 0x11, 0x11} }, + { {0x61, 0x30, 0x03, 0x11, 0x11} }, + { {0x61, 0x10, 0x03, 0x11, 0x11} }, /* case-5 */ + { {0x61, 0x48, 0x03, 0x11, 0x10} }, + { {0x61, 0x3a, 0x03, 0x11, 0x10} }, + { {0x61, 0x30, 0x03, 0x11, 0x10} }, + { {0x61, 0x20, 0x03, 0x11, 0x10} }, + { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-10 */ + { {0x61, 0x10, 0x03, 0x11, 0x14} }, + { {0x61, 0x08, 0x03, 0x10, 0x14} }, + { {0x51, 0x10, 0x03, 0x10, 0x54} }, + { {0x51, 0x10, 0x03, 0x10, 0x55} }, + { {0x51, 0x10, 0x07, 0x10, 0x54} }, /* case-15 */ + { {0x51, 0x45, 0x03, 0x10, 0x50} }, + { {0x51, 0x3a, 0x03, 0x10, 0x50} }, + { {0x51, 0x30, 0x03, 0x10, 0x50} }, + { {0x51, 0x20, 0x03, 0x10, 0x50} }, + { {0x51, 0x15, 0x03, 0x10, 0x50} }, /* case-20 */ + { {0x51, 0x4a, 0x03, 0x10, 0x50} }, + { {0x51, 0x0c, 0x03, 0x10, 0x54} }, + { {0x55, 0x08, 0x03, 0x10, 0x54} }, + { {0x65, 0x10, 0x03, 0x11, 0x11} }, + { {0x51, 0x10, 0x03, 0x10, 0x51} }, + { {0x61, 0x15, 0x03, 0x11, 0x10} } +}; + +/* Non-Shared-Antenna TDMA */ +static const struct coex_tdma_para tdma_nsant_8723d[] = { + { {0x00, 0x00, 0x00, 0x40, 0x01} }, /* case-100 */ + { {0x61, 0x45, 0x03, 0x11, 0x11} }, /* case-101 */ + { {0x61, 0x3a, 0x03, 0x11, 0x11} }, + { {0x61, 0x30, 0x03, 0x11, 0x11} }, + { {0x61, 0x20, 0x03, 0x11, 0x11} }, + { {0x61, 0x10, 0x03, 0x11, 0x11} }, /* case-105 */ + { {0x61, 0x45, 0x03, 0x11, 0x10} }, + { {0x61, 0x3a, 0x03, 0x11, 0x10} }, + { {0x61, 0x30, 0x03, 0x11, 0x10} }, + { {0x61, 0x20, 0x03, 0x11, 0x10} }, + { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-110 */ + { {0x61, 0x08, 0x03, 0x11, 0x14} }, + { {0x61, 0x08, 0x03, 0x10, 0x14} }, + { {0x51, 0x08, 0x03, 0x10, 0x54} }, + { {0x51, 0x08, 0x03, 0x10, 0x55} }, + { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-115 */ + { {0x51, 0x45, 0x03, 0x10, 0x50} }, + { {0x51, 0x3a, 0x03, 0x10, 0x50} }, + { {0x51, 0x30, 0x03, 0x10, 0x50} }, + { {0x51, 0x20, 0x03, 0x10, 0x50} }, + { {0x51, 0x10, 0x03, 0x10, 0x50} }, /* case-120 */ + { {0x51, 0x08, 0x03, 0x10, 0x50} }, +}; + +/* rssi in percentage % (dbm = % - 100) */ +static const u8 wl_rssi_step_8723d[] = {60, 50, 44, 30}; +static const u8 bt_rssi_step_8723d[] = {30, 30, 30, 30}; +static const struct coex_5g_afh_map afh_5g_8723d[] = { {0, 0, 0} }; + +static const struct rtw_hw_reg btg_reg_8723d = { + .addr = REG_BTG_SEL, .mask = BIT_MASK_BTG_WL, +}; + +/* wl_tx_dec_power, bt_tx_dec_power, wl_rx_gain, bt_rx_lna_constrain */ +static const struct coex_rf_para rf_para_tx_8723d[] = { + {0, 0, false, 7}, /* for normal */ + {0, 10, false, 7}, /* for WL-CPT */ + {1, 0, true, 4}, + {1, 2, true, 4}, + {1, 10, true, 4}, + {1, 15, true, 4} +}; + +static const struct coex_rf_para rf_para_rx_8723d[] = { + {0, 0, false, 7}, /* for normal */ + {0, 10, false, 7}, /* for WL-CPT */ + {1, 0, true, 5}, + {1, 2, true, 5}, + {1, 10, true, 5}, + {1, 15, true, 5} }; static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8723d[] = { @@ -1048,6 +2519,22 @@ static const struct rtw_rqpn rqpn_table_8723d[] = { RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH}, }; +static const struct rtw_prioq_addrs prioq_addrs_8723d = { + .prio[RTW_DMA_MAPPING_EXTRA] = { + .rsvd = REG_RQPN_NPQ + 2, .avail = REG_RQPN_NPQ + 3, + }, + .prio[RTW_DMA_MAPPING_LOW] = { + .rsvd = REG_RQPN + 1, .avail = REG_FIFOPAGE_CTRL_2 + 1, + }, + .prio[RTW_DMA_MAPPING_NORMAL] = { + .rsvd = REG_RQPN_NPQ, .avail = REG_RQPN_NPQ + 1, + }, + .prio[RTW_DMA_MAPPING_HIGH] = { + .rsvd = REG_RQPN, .avail = REG_FIFOPAGE_CTRL_2, + }, + .wsize = false, +}; + static const struct rtw_intf_phy_para pcie_gen1_param_8723d[] = { {0x0008, 0x4a22, RTW_IP_SEL_PHY, @@ -1084,11 +2571,102 @@ static const struct rtw_rf_sipi_addr rtw8723d_rf_sipi_addr[] = { .hssi_2 = 0x82c, .lssi_read_pi = 0x8bc}, }; +static const struct rtw_ltecoex_addr rtw8723d_ltecoex_addr = { + .ctrl = REG_LTECOEX_CTRL, + .wdata = REG_LTECOEX_WRITE_DATA, + .rdata = REG_LTECOEX_READ_DATA, +}; + static const struct rtw_rfe_def rtw8723d_rfe_defs[] = { [0] = { .phy_pg_tbl = &rtw8723d_bb_pg_tbl, .txpwr_lmt_tbl = &rtw8723d_txpwr_lmt_tbl,}, }; +static const u8 rtw8723d_pwrtrk_2gb_n[] = { + 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 4, 4, 5, 5, 5, + 6, 6, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 10 +}; + +static const u8 rtw8723d_pwrtrk_2gb_p[] = { + 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, + 7, 8, 8, 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10 +}; + +static const u8 rtw8723d_pwrtrk_2ga_n[] = { + 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 4, 4, 5, 5, 5, + 6, 6, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 10 +}; + +static const u8 rtw8723d_pwrtrk_2ga_p[] = { + 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, + 7, 8, 8, 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10 +}; + +static const u8 rtw8723d_pwrtrk_2g_cck_b_n[] = { + 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, + 6, 7, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11 +}; + +static const u8 rtw8723d_pwrtrk_2g_cck_b_p[] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, + 7, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11 +}; + +static const u8 rtw8723d_pwrtrk_2g_cck_a_n[] = { + 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, + 6, 7, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11 +}; + +static const u8 rtw8723d_pwrtrk_2g_cck_a_p[] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, + 7, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11 +}; + +static const s8 rtw8723d_pwrtrk_xtal_n[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +}; + +static const s8 rtw8723d_pwrtrk_xtal_p[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, -10, -12, -14, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16 +}; + +static const struct rtw_pwr_track_tbl rtw8723d_rtw_pwr_track_tbl = { + .pwrtrk_2gb_n = rtw8723d_pwrtrk_2gb_n, + .pwrtrk_2gb_p = rtw8723d_pwrtrk_2gb_p, + .pwrtrk_2ga_n = rtw8723d_pwrtrk_2ga_n, + .pwrtrk_2ga_p = rtw8723d_pwrtrk_2ga_p, + .pwrtrk_2g_cckb_n = rtw8723d_pwrtrk_2g_cck_b_n, + .pwrtrk_2g_cckb_p = rtw8723d_pwrtrk_2g_cck_b_p, + .pwrtrk_2g_ccka_n = rtw8723d_pwrtrk_2g_cck_a_n, + .pwrtrk_2g_ccka_p = rtw8723d_pwrtrk_2g_cck_a_p, + .pwrtrk_xtal_p = rtw8723d_pwrtrk_xtal_p, + .pwrtrk_xtal_n = rtw8723d_pwrtrk_xtal_n, +}; + +static const struct rtw_reg_domain coex_info_hw_regs_8723d[] = { + {0x948, MASKDWORD, RTW_REG_DOMAIN_MAC32}, + {0x67, BIT(7), RTW_REG_DOMAIN_MAC8}, + {0, 0, RTW_REG_DOMAIN_NL}, + {0x964, BIT(1), RTW_REG_DOMAIN_MAC8}, + {0x864, BIT(0), RTW_REG_DOMAIN_MAC8}, + {0xab7, BIT(5), RTW_REG_DOMAIN_MAC8}, + {0xa01, BIT(7), RTW_REG_DOMAIN_MAC8}, + {0, 0, RTW_REG_DOMAIN_NL}, + {0x430, MASKDWORD, RTW_REG_DOMAIN_MAC32}, + {0x434, MASKDWORD, RTW_REG_DOMAIN_MAC32}, + {0x42a, MASKLWORD, RTW_REG_DOMAIN_MAC16}, + {0x426, MASKBYTE0, RTW_REG_DOMAIN_MAC8}, + {0x45e, BIT(3), RTW_REG_DOMAIN_MAC8}, + {0, 0, RTW_REG_DOMAIN_NL}, + {0x4c6, BIT(4), RTW_REG_DOMAIN_MAC8}, + {0x40, BIT(5), RTW_REG_DOMAIN_MAC8}, + {0x550, MASKDWORD, RTW_REG_DOMAIN_MAC32}, + {0x522, MASKBYTE0, RTW_REG_DOMAIN_MAC8}, + {0x953, BIT(1), RTW_REG_DOMAIN_MAC8}, +}; + struct rtw_chip_info rtw8723d_hw_spec = { .ops = &rtw8723d_ops, .id = RTW_CHIP_TYPE_8723D, @@ -1118,12 +2696,14 @@ struct rtw_chip_info rtw8723d_hw_spec = { .pwr_off_seq = card_disable_flow_8723d, .page_table = page_table_8723d, .rqpn_table = rqpn_table_8723d, + .prioq_addrs = &prioq_addrs_8723d, .intf_table = &phy_para_table_8723d, .dig = rtw8723d_dig, .dig_cck = rtw8723d_dig_cck, .rf_sipi_addr = {0x840, 0x844}, .rf_sipi_read_addr = rtw8723d_rf_sipi_addr, .fix_rf_phy_num = 2, + .ltecoex_addr = &rtw8723d_ltecoex_addr, .mac_tbl = &rtw8723d_mac_tbl, .agc_tbl = &rtw8723d_agc_tbl, .bb_tbl = &rtw8723d_bb_tbl, @@ -1131,7 +2711,43 @@ struct rtw_chip_info rtw8723d_hw_spec = { .rfe_defs = rtw8723d_rfe_defs, .rfe_defs_size = ARRAY_SIZE(rtw8723d_rfe_defs), .rx_ldpc = false, + .pwr_track_tbl = &rtw8723d_rtw_pwr_track_tbl, + .iqk_threshold = 8, + + .coex_para_ver = 0x1905302f, + .bt_desired_ver = 0x2f, + .scbd_support = true, + .new_scbd10_def = true, + .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF, + .bt_rssi_type = COEX_BTRSSI_RATIO, + .ant_isolation = 15, + .rssi_tolerance = 2, + .wl_rssi_step = wl_rssi_step_8723d, + .bt_rssi_step = bt_rssi_step_8723d, + .table_sant_num = ARRAY_SIZE(table_sant_8723d), + .table_sant = table_sant_8723d, + .table_nsant_num = ARRAY_SIZE(table_nsant_8723d), + .table_nsant = table_nsant_8723d, + .tdma_sant_num = ARRAY_SIZE(tdma_sant_8723d), + .tdma_sant = tdma_sant_8723d, + .tdma_nsant_num = ARRAY_SIZE(tdma_nsant_8723d), + .tdma_nsant = tdma_nsant_8723d, + .wl_rf_para_num = ARRAY_SIZE(rf_para_tx_8723d), + .wl_rf_para_tx = rf_para_tx_8723d, + .wl_rf_para_rx = rf_para_rx_8723d, + .bt_afh_span_bw20 = 0x20, + .bt_afh_span_bw40 = 0x30, + .afh_5g_num = ARRAY_SIZE(afh_5g_8723d), + .afh_5g = afh_5g_8723d, + .btg_reg = &btg_reg_8723d, + + .coex_info_hw_regs_num = ARRAY_SIZE(coex_info_hw_regs_8723d), + .coex_info_hw_regs = coex_info_hw_regs_8723d, }; EXPORT_SYMBOL(rtw8723d_hw_spec); MODULE_FIRMWARE("rtw88/rtw8723d_fw.bin"); + +MODULE_AUTHOR("Realtek Corporation"); +MODULE_DESCRIPTION("Realtek 802.11n wireless 8723d driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.h b/drivers/net/wireless/realtek/rtw88/rtw8723d.h index ac66f672bec8..7894d321cd7e 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723d.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.h @@ -5,6 +5,34 @@ #ifndef __RTW8723D_H__ #define __RTW8723D_H__ +enum rtw8723d_path { + PATH_S1, + PATH_S0, + PATH_NR, +}; + +enum rtw8723d_iqk_round { + IQK_ROUND_0, + IQK_ROUND_1, + IQK_ROUND_2, + IQK_ROUND_HYBRID, + IQK_ROUND_SIZE, + IQK_ROUND_INVALID = 0xff, +}; + +enum rtw8723d_iqk_result { + IQK_S1_TX_X, + IQK_S1_TX_Y, + IQK_S1_RX_X, + IQK_S1_RX_Y, + IQK_S0_TX_X, + IQK_S0_TX_Y, + IQK_S0_RX_X, + IQK_S0_RX_Y, + IQK_NR, + IQK_SX_NR = IQK_NR / PATH_NR, +}; + struct rtw8723de_efuse { u8 mac_addr[ETH_ALEN]; /* 0xd0 */ u8 vender_id[2]; @@ -66,6 +94,41 @@ struct rtw8723d_efuse { #define GET_PHY_STAT_P1_RXSNR_A(phy_stat) \ le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(7, 0)) +static inline s32 iqkxy_to_s32(s32 val) +{ + /* val is Q10.8 */ + return sign_extend32(val, 9); +} + +static inline s32 iqk_mult(s32 x, s32 y, s32 *ext) +{ + /* x, y and return value are Q10.8 */ + s32 t; + + t = x * y; + if (ext) + *ext = (t >> 7) & 0x1; /* Q.16 --> Q.9; get LSB of Q.9 */ + + return (t >> 8); /* Q.16 --> Q.8 */ +} + +#define OFDM_SWING_A(swing) FIELD_GET(GENMASK(9, 0), swing) +#define OFDM_SWING_B(swing) FIELD_GET(GENMASK(15, 10), swing) +#define OFDM_SWING_C(swing) FIELD_GET(GENMASK(21, 16), swing) +#define OFDM_SWING_D(swing) FIELD_GET(GENMASK(31, 22), swing) +#define RTW_DEF_OFDM_SWING_INDEX 28 +#define RTW_DEF_CCK_SWING_INDEX 28 + +#define MAX_TOLERANCE 5 +#define IQK_TX_X_ERR 0x142 +#define IQK_TX_Y_ERR 0x42 +#define IQK_RX_X_UPPER 0x11a +#define IQK_RX_X_LOWER 0xe6 +#define IQK_RX_Y_LMT 0x1a +#define IQK_TX_OK BIT(0) +#define IQK_RX_OK BIT(1) +#define PATH_IQK_RETRY 2 + #define SPUR_THRES 0x16 #define CCK_DFIR_NR 3 #define DIS_3WIRE 0xccf000c0 @@ -78,16 +141,28 @@ struct rtw8723d_efuse { #define RFCFGCH_BW_20M (BIT(11) | BIT(10)) #define RFCFGCH_BW_40M BIT(10) #define BIT_MASK_RFMOD BIT(0) +#define BIT_LCK BIT(15) +#define REG_GPIO_INTM 0x0048 +#define REG_BTG_SEL 0x0067 +#define BIT_MASK_BTG_WL BIT(7) +#define REG_LTECOEX_PATH_CONTROL 0x0070 +#define REG_LTECOEX_CTRL 0x07c0 +#define REG_LTECOEX_WRITE_DATA 0x07c4 +#define REG_LTECOEX_READ_DATA 0x07c8 #define REG_PSDFN 0x0808 +#define REG_BB_PWR_SAV1_11N 0x0874 +#define REG_ANA_PARAM1 0x0880 #define REG_ANALOG_P4 0x088c #define REG_PSDRPT 0x08b4 #define REG_FPGA1_RFMOD 0x0900 +#define REG_BB_SEL_BTG 0x0948 #define REG_BBRX_DFIR 0x0954 #define BIT_MASK_RXBB_DFIR GENMASK(27, 24) #define BIT_RXBB_DFIR_EN BIT(19) #define REG_CCK0_SYS 0x0a00 #define BIT_CCK_SIDE_BAND BIT(4) +#define REG_CCK_ANT_SEL_11N 0x0a04 #define REG_CCK_FA_RST_11N 0x0a2c #define BIT_MASK_CCK_CNT_KEEP BIT(12) #define BIT_MASK_CCK_CNT_EN BIT(13) @@ -102,19 +177,57 @@ struct rtw8723d_efuse { #define BIT_MASK_CCK_FA_LSB GENMASK(15, 8) #define REG_OFDM_FA_HOLDC_11N 0x0c00 #define BIT_MASK_OFDM_FA_KEEP BIT(31) +#define REG_BB_RX_PATH_11N 0x0c04 +#define REG_TRMUX_11N 0x0c08 #define REG_OFDM_FA_RSTC_11N 0x0c0c #define BIT_MASK_OFDM_FA_RST BIT(31) +#define REG_A_RXIQI 0x0c14 +#define BIT_MASK_RXIQ_S1_X 0x000003FF +#define BIT_MASK_RXIQ_S1_Y1 0x0000FC00 +#define BIT_SET_RXIQ_S1_Y1(y) ((y) & 0x3F) #define REG_OFDM0_RXDSP 0x0c40 #define BIT_MASK_RXDSP GENMASK(28, 24) #define BIT_EN_RXDSP BIT(9) +#define REG_OFDM_0_ECCA_THRESHOLD 0x0c4c +#define BIT_MASK_OFDM0_EXT_A BIT(31) +#define BIT_MASK_OFDM0_EXT_C BIT(29) +#define BIT_MASK_OFDM0_EXTS (BIT(31) | BIT(29) | BIT(28)) +#define BIT_SET_OFDM0_EXTS(a, c, d) (((a) << 31) | ((c) << 29) | ((d) << 28)) #define REG_OFDM0_XAAGC1 0x0c50 #define REG_OFDM0_XBAGC1 0x0c58 +#define REG_AGCRSSI 0x0c78 +#define REG_OFDM_0_XA_TX_IQ_IMBALANCE 0x0c80 +#define BIT_MASK_TXIQ_ELM_A 0x03ff +#define BIT_SET_TXIQ_ELM_ACD(a, c, d) (((d) << 22) | (((c) & 0x3F) << 16) | \ + ((a) & 0x03ff)) +#define BIT_MASK_TXIQ_ELM_C GENMASK(21, 16) +#define BIT_SET_TXIQ_ELM_C2(c) ((c) & 0x3F) +#define BIT_MASK_TXIQ_ELM_D GENMASK(31, 22) +#define REG_TXIQK_MATRIXA_LSB2_11N 0x0c94 +#define BIT_SET_TXIQ_ELM_C1(c) (((c) & 0x000003C0) >> 6) +#define REG_RXIQK_MATRIX_LSB_11N 0x0ca0 +#define BIT_MASK_RXIQ_S1_Y2 0xF0000000 +#define BIT_SET_RXIQ_S1_Y2(y) (((y) >> 6) & 0xF) +#define REG_TXIQ_AB_S0 0x0cd0 +#define BIT_MASK_TXIQ_A_S0 0x000007FE +#define BIT_MASK_TXIQ_A_EXT_S0 BIT(0) +#define BIT_MASK_TXIQ_B_S0 0x0007E000 +#define REG_TXIQ_CD_S0 0x0cd4 +#define BIT_MASK_TXIQ_C_S0 0x000007FE +#define BIT_MASK_TXIQ_C_EXT_S0 BIT(0) +#define BIT_MASK_TXIQ_D_S0 GENMASK(22, 13) +#define BIT_MASK_TXIQ_D_EXT_S0 BIT(12) +#define REG_RXIQ_AB_S0 0x0cd8 +#define BIT_MASK_RXIQ_X_S0 0x000003FF +#define BIT_MASK_RXIQ_Y_S0 0x003FF000 #define REG_OFDM_FA_TYPE1_11N 0x0cf0 #define BIT_MASK_OFDM_FF_CNT GENMASK(15, 0) #define BIT_MASK_OFDM_SF_CNT GENMASK(31, 16) #define REG_OFDM_FA_RSTD_11N 0x0d00 #define BIT_MASK_OFDM_FA_RST1 BIT(27) #define BIT_MASK_OFDM_FA_KEEP1 BIT(31) +#define REG_CTX 0x0d03 +#define BIT_MASK_CTX_TYPE GENMASK(6, 4) #define REG_OFDM1_CFOTRK 0x0d2c #define BIT_EN_CFOTRK BIT(28) #define REG_OFDM1_CSI1 0x0d40 @@ -129,6 +242,32 @@ struct rtw8723d_efuse { #define BIT_MASK_OFDM_CRC_CNT GENMASK(31, 16) #define REG_OFDM_FA_TYPE4_11N 0x0da8 #define BIT_MASK_OFDM_MNS_CNT GENMASK(15, 0) +#define REG_FPGA0_IQK_11N 0x0e28 +#define BIT_MASK_IQK_MOD 0xffffff00 +#define EN_IQK 0x808000 +#define RST_IQK 0x000000 +#define REG_TXIQK_TONE_A_11N 0x0e30 +#define REG_RXIQK_TONE_A_11N 0x0e34 +#define REG_TXIQK_PI_A_11N 0x0e38 +#define REG_RXIQK_PI_A_11N 0x0e3c +#define REG_TXIQK_11N 0x0e40 +#define BIT_SET_TXIQK_11N(x, y) (0x80007C00 | ((x) << 16) | (y)) +#define REG_RXIQK_11N 0x0e44 +#define REG_IQK_AGC_PTS_11N 0x0e48 +#define REG_IQK_AGC_RSP_11N 0x0e4c +#define REG_TX_IQK_TONE_B 0x0e50 +#define REG_RX_IQK_TONE_B 0x0e54 +#define REG_IQK_RES_TX 0x0e94 +#define BIT_MASK_RES_TX GENMASK(25, 16) +#define REG_IQK_RES_TY 0x0e9c +#define BIT_MASK_RES_TY GENMASK(25, 16) +#define REG_IQK_RES_RX 0x0ea4 +#define BIT_MASK_RES_RX GENMASK(25, 16) +#define REG_IQK_RES_RY 0x0eac +#define BIT_IQK_TX_FAIL BIT(28) +#define BIT_IQK_RX_FAIL BIT(27) +#define BIT_IQK_DONE BIT(26) +#define BIT_MASK_RES_RY GENMASK(25, 16) #define REG_PAGE_F_RST_11N 0x0f14 #define BIT_MASK_F_RST_ALL BIT(16) #define REG_IGI_C_11N 0x0f84 diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723de.c b/drivers/net/wireless/realtek/rtw88/rtw8723de.c new file mode 100644 index 000000000000..c81eb4c33642 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8723de.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#include <linux/module.h> +#include <linux/pci.h> +#include "rtw8723de.h" + +static const struct pci_device_id rtw_8723de_id_table[] = { + { + PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xD723), + .driver_data = (kernel_ulong_t)&rtw8723d_hw_spec + }, + {} +}; +MODULE_DEVICE_TABLE(pci, rtw_8723de_id_table); + +static struct pci_driver rtw_8723de_driver = { + .name = "rtw_8723de", + .id_table = rtw_8723de_id_table, + .probe = rtw_pci_probe, + .remove = rtw_pci_remove, + .driver.pm = &rtw_pm_ops, + .shutdown = rtw_pci_shutdown, +}; +module_pci_driver(rtw_8723de_driver); + +MODULE_AUTHOR("Realtek Corporation"); +MODULE_DESCRIPTION("Realtek 802.11n wireless 8723de driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723de.h b/drivers/net/wireless/realtek/rtw88/rtw8723de.h new file mode 100644 index 000000000000..ba3842360c20 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8723de.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#ifndef __RTW_8723DE_H_ +#define __RTW_8723DE_H_ + +extern const struct dev_pm_ops rtw_pm_ops; +extern struct rtw_chip_info rtw8723d_hw_spec; +int rtw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); +void rtw_pci_remove(struct pci_dev *pdev); +void rtw_pci_shutdown(struct pci_dev *pdev); + +#endif diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c index 45636382dafd..e49bdd76ab9a 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c @@ -2,6 +2,7 @@ /* Copyright(c) 2018-2019 Realtek Corporation */ +#include <linux/module.h> #include "main.h" #include "coex.h" #include "fw.h" @@ -2057,6 +2058,12 @@ static const struct rtw_hw_reg rtw8822b_dig[] = { [1] = { .addr = 0xe50, .mask = 0x7f }, }; +static const struct rtw_ltecoex_addr rtw8822b_ltecoex_addr = { + .ctrl = LTECOEX_ACCESS_CTRL, + .wdata = LTECOEX_WRITE_DATA, + .rdata = LTECOEX_READ_DATA, +}; + static const struct rtw_page_table page_table_8822b[] = { {64, 64, 64, 64, 1}, {64, 64, 64, 64, 1}, @@ -2083,6 +2090,22 @@ static const struct rtw_rqpn rqpn_table_8822b[] = { RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH}, }; +static struct rtw_prioq_addrs prioq_addrs_8822b = { + .prio[RTW_DMA_MAPPING_EXTRA] = { + .rsvd = REG_FIFOPAGE_INFO_4, .avail = REG_FIFOPAGE_INFO_4 + 2, + }, + .prio[RTW_DMA_MAPPING_LOW] = { + .rsvd = REG_FIFOPAGE_INFO_2, .avail = REG_FIFOPAGE_INFO_2 + 2, + }, + .prio[RTW_DMA_MAPPING_NORMAL] = { + .rsvd = REG_FIFOPAGE_INFO_3, .avail = REG_FIFOPAGE_INFO_3 + 2, + }, + .prio[RTW_DMA_MAPPING_HIGH] = { + .rsvd = REG_FIFOPAGE_INFO_1, .avail = REG_FIFOPAGE_INFO_1 + 2, + }, + .wsize = true, +}; + static struct rtw_chip_ops rtw8822b_ops = { .phy_set_param = rtw8822b_phy_set_param, .read_efuse = rtw8822b_read_efuse, @@ -2433,11 +2456,13 @@ struct rtw_chip_info rtw8822b_hw_spec = { .pwr_off_seq = card_disable_flow_8822b, .page_table = page_table_8822b, .rqpn_table = rqpn_table_8822b, + .prioq_addrs = &prioq_addrs_8822b, .intf_table = &phy_para_table_8822b, .dig = rtw8822b_dig, .dig_cck = NULL, .rf_base_addr = {0x2800, 0x2c00}, .rf_sipi_addr = {0xc90, 0xe90}, + .ltecoex_addr = &rtw8822b_ltecoex_addr, .mac_tbl = &rtw8822b_mac_tbl, .agc_tbl = &rtw8822b_agc_tbl, .bb_tbl = &rtw8822b_bb_tbl, @@ -2482,3 +2507,7 @@ struct rtw_chip_info rtw8822b_hw_spec = { EXPORT_SYMBOL(rtw8822b_hw_spec); MODULE_FIRMWARE("rtw88/rtw8822b_fw.bin"); + +MODULE_AUTHOR("Realtek Corporation"); +MODULE_DESCRIPTION("Realtek 802.11ac wireless 8822b driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822be.c b/drivers/net/wireless/realtek/rtw88/rtw8822be.c new file mode 100644 index 000000000000..921916ae15ca --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8822be.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#include <linux/module.h> +#include <linux/pci.h> +#include "rtw8822be.h" + +static const struct pci_device_id rtw_8822be_id_table[] = { + { + PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB822), + .driver_data = (kernel_ulong_t)&rtw8822b_hw_spec + }, + {} +}; +MODULE_DEVICE_TABLE(pci, rtw_8822be_id_table); + +static struct pci_driver rtw_8822be_driver = { + .name = "rtw_8822be", + .id_table = rtw_8822be_id_table, + .probe = rtw_pci_probe, + .remove = rtw_pci_remove, + .driver.pm = &rtw_pm_ops, + .shutdown = rtw_pci_shutdown, +}; +module_pci_driver(rtw_8822be_driver); + +MODULE_AUTHOR("Realtek Corporation"); +MODULE_DESCRIPTION("Realtek 802.11ac wireless 8822be driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822be.h b/drivers/net/wireless/realtek/rtw88/rtw8822be.h new file mode 100644 index 000000000000..d823ca059f5c --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8822be.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#ifndef __RTW_8822BE_H_ +#define __RTW_8822BE_H_ + +extern const struct dev_pm_ops rtw_pm_ops; +extern struct rtw_chip_info rtw8822b_hw_spec; +int rtw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); +void rtw_pci_remove(struct pci_dev *pdev); +void rtw_pci_shutdown(struct pci_dev *pdev); + +#endif diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index 64b77a7cbffd..c3d72ef611c6 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -2,6 +2,7 @@ /* Copyright(c) 2018-2019 Realtek Corporation */ +#include <linux/module.h> #include "main.h" #include "coex.h" #include "fw.h" @@ -1036,7 +1037,7 @@ static void rtw8822c_set_power_trim(struct rtw_dev *rtwdev, s8 bb_gain[2][8]) static void rtw8822c_power_trim(struct rtw_dev *rtwdev) { u8 pg_pwr = 0xff, i, path, idx; - s8 bb_gain[2][8] = {0}; + s8 bb_gain[2][8] = {}; u16 rf_efuse_2g[3] = {PPG_2GL_TXAB, PPG_2GM_TXAB, PPG_2GH_TXAB}; u16 rf_efuse_5g[2][5] = {{PPG_5GL1_TXA, PPG_5GL2_TXA, PPG_5GM1_TXA, PPG_5GM2_TXA, PPG_5GH1_TXA}, @@ -1495,7 +1496,6 @@ static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw, { if (IS_CH_2G_BAND(channel)) { rtw_write32_clr(rtwdev, REG_BGCTRL, BITS_RX_IQ_WEIGHT); - rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x8); rtw_write32_set(rtwdev, REG_TXF4, BIT(20)); rtw_write32_clr(rtwdev, REG_CCK_CHECK, BIT_CHECK_CCK_EN); rtw_write32_clr(rtwdev, REG_CCKTXONLY, BIT_BB_CCK_CHECK_EN); @@ -1563,7 +1563,6 @@ static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw, rtw_write32_set(rtwdev, REG_CCK_CHECK, BIT_CHECK_CCK_EN); rtw_write32_set(rtwdev, REG_BGCTRL, BITS_RX_IQ_WEIGHT); rtw_write32_clr(rtwdev, REG_TXF4, BIT(20)); - rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x0); rtw_write32_mask(rtwdev, REG_CCAMSK, 0x3F000000, 0x22); rtw_write32_mask(rtwdev, REG_TXDFIR0, 0x70, 0x3); if (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel)) { @@ -3907,6 +3906,12 @@ static const struct rtw_hw_reg rtw8822c_dig[] = { [1] = { .addr = 0x1d70, .mask = 0x7f00 }, }; +static const struct rtw_ltecoex_addr rtw8822c_ltecoex_addr = { + .ctrl = LTECOEX_ACCESS_CTRL, + .wdata = LTECOEX_WRITE_DATA, + .rdata = LTECOEX_READ_DATA, +}; + static const struct rtw_page_table page_table_8822c[] = { {64, 64, 64, 64, 1}, {64, 64, 64, 64, 1}, @@ -3933,6 +3938,22 @@ static const struct rtw_rqpn rqpn_table_8822c[] = { RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH}, }; +static struct rtw_prioq_addrs prioq_addrs_8822c = { + .prio[RTW_DMA_MAPPING_EXTRA] = { + .rsvd = REG_FIFOPAGE_INFO_4, .avail = REG_FIFOPAGE_INFO_4 + 2, + }, + .prio[RTW_DMA_MAPPING_LOW] = { + .rsvd = REG_FIFOPAGE_INFO_2, .avail = REG_FIFOPAGE_INFO_2 + 2, + }, + .prio[RTW_DMA_MAPPING_NORMAL] = { + .rsvd = REG_FIFOPAGE_INFO_3, .avail = REG_FIFOPAGE_INFO_3 + 2, + }, + .prio[RTW_DMA_MAPPING_HIGH] = { + .rsvd = REG_FIFOPAGE_INFO_1, .avail = REG_FIFOPAGE_INFO_1 + 2, + }, + .wsize = true, +}; + static struct rtw_chip_ops rtw8822c_ops = { .phy_set_param = rtw8822c_phy_set_param, .read_efuse = rtw8822c_read_efuse, @@ -4295,11 +4316,13 @@ struct rtw_chip_info rtw8822c_hw_spec = { .pwr_off_seq = card_disable_flow_8822c, .page_table = page_table_8822c, .rqpn_table = rqpn_table_8822c, + .prioq_addrs = &prioq_addrs_8822c, .intf_table = &phy_para_table_8822c, .dig = rtw8822c_dig, .dig_cck = NULL, .rf_base_addr = {0x3c00, 0x4c00}, .rf_sipi_addr = {0x1808, 0x4108}, + .ltecoex_addr = &rtw8822c_ltecoex_addr, .mac_tbl = &rtw8822c_mac_tbl, .agc_tbl = &rtw8822c_agc_tbl, .bb_tbl = &rtw8822c_bb_tbl, @@ -4353,3 +4376,7 @@ EXPORT_SYMBOL(rtw8822c_hw_spec); MODULE_FIRMWARE("rtw88/rtw8822c_fw.bin"); MODULE_FIRMWARE("rtw88/rtw8822c_wow_fw.bin"); + +MODULE_AUTHOR("Realtek Corporation"); +MODULE_DESCRIPTION("Realtek 802.11ac wireless 8822c driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822ce.c b/drivers/net/wireless/realtek/rtw88/rtw8822ce.c new file mode 100644 index 000000000000..7b6bd990651e --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8822ce.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#include <linux/module.h> +#include <linux/pci.h> +#include "rtw8822ce.h" + +static const struct pci_device_id rtw_8822ce_id_table[] = { + { + PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC822), + .driver_data = (kernel_ulong_t)&rtw8822c_hw_spec + }, + {} +}; +MODULE_DEVICE_TABLE(pci, rtw_8822ce_id_table); + +static struct pci_driver rtw_8822ce_driver = { + .name = "rtw_8822ce", + .id_table = rtw_8822ce_id_table, + .probe = rtw_pci_probe, + .remove = rtw_pci_remove, + .driver.pm = &rtw_pm_ops, + .shutdown = rtw_pci_shutdown, +}; +module_pci_driver(rtw_8822ce_driver); + +MODULE_AUTHOR("Realtek Corporation"); +MODULE_DESCRIPTION("Realtek 802.11ac wireless 8822ce driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822ce.h b/drivers/net/wireless/realtek/rtw88/rtw8822ce.h new file mode 100644 index 000000000000..c2c0e8675d74 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8822ce.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#ifndef __RTW_8822CE_H_ +#define __RTW_8822CE_H_ + +extern const struct dev_pm_ops rtw_pm_ops; +extern struct rtw_chip_info rtw8822c_hw_spec; +int rtw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); +void rtw_pci_remove(struct pci_dev *pdev); +void rtw_pci_shutdown(struct pci_dev *pdev); + +#endif diff --git a/drivers/net/wireless/realtek/rtw88/rx.c b/drivers/net/wireless/realtek/rtw88/rx.c index 9b90339ab697..7087e385a9b3 100644 --- a/drivers/net/wireless/realtek/rtw88/rx.c +++ b/drivers/net/wireless/realtek/rtw88/rx.c @@ -191,3 +191,4 @@ void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev, rtw_rx_addr_match(rtwdev, pkt_stat, hdr); } +EXPORT_SYMBOL(rtw_rx_fill_rx_status); diff --git a/drivers/net/wireless/realtek/rtw88/sec.c b/drivers/net/wireless/realtek/rtw88/sec.c index d0d7fbb10d58..ce46e5b4a60a 100644 --- a/drivers/net/wireless/realtek/rtw88/sec.c +++ b/drivers/net/wireless/realtek/rtw88/sec.c @@ -44,7 +44,7 @@ void rtw_sec_write_cam(struct rtw_dev *rtwdev, write_cmd = RTW_SEC_CMD_WRITE_ENABLE | RTW_SEC_CMD_POLLING; addr = hw_key_idx << RTW_SEC_CAM_ENTRY_SHIFT; - for (i = 5; i >= 0; i--) { + for (i = 7; i >= 0; i--) { switch (i) { case 0: content = ((key->keyidx & 0x3)) | @@ -60,6 +60,10 @@ void rtw_sec_write_cam(struct rtw_dev *rtwdev, (cam->addr[4] << 16) | (cam->addr[5] << 24); break; + case 6: + case 7: + content = 0; + break; default: j = (i - 2) << 2; content = (key->key[j]) | diff --git a/drivers/net/wireless/realtek/rtw88/util.c b/drivers/net/wireless/realtek/rtw88/util.c index 10f1117c0cfb..2c515af214e7 100644 --- a/drivers/net/wireless/realtek/rtw88/util.c +++ b/drivers/net/wireless/realtek/rtw88/util.c @@ -19,25 +19,32 @@ bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target) return false; } +EXPORT_SYMBOL(check_hw_ready); bool ltecoex_read_reg(struct rtw_dev *rtwdev, u16 offset, u32 *val) { - if (!check_hw_ready(rtwdev, LTECOEX_ACCESS_CTRL, LTECOEX_READY, 1)) + struct rtw_chip_info *chip = rtwdev->chip; + const struct rtw_ltecoex_addr *ltecoex = chip->ltecoex_addr; + + if (!check_hw_ready(rtwdev, ltecoex->ctrl, LTECOEX_READY, 1)) return false; - rtw_write32(rtwdev, LTECOEX_ACCESS_CTRL, 0x800F0000 | offset); - *val = rtw_read32(rtwdev, LTECOEX_READ_DATA); + rtw_write32(rtwdev, ltecoex->ctrl, 0x800F0000 | offset); + *val = rtw_read32(rtwdev, ltecoex->rdata); return true; } bool ltecoex_reg_write(struct rtw_dev *rtwdev, u16 offset, u32 value) { - if (!check_hw_ready(rtwdev, LTECOEX_ACCESS_CTRL, LTECOEX_READY, 1)) + struct rtw_chip_info *chip = rtwdev->chip; + const struct rtw_ltecoex_addr *ltecoex = chip->ltecoex_addr; + + if (!check_hw_ready(rtwdev, ltecoex->ctrl, LTECOEX_READY, 1)) return false; - rtw_write32(rtwdev, LTECOEX_WRITE_DATA, value); - rtw_write32(rtwdev, LTECOEX_ACCESS_CTRL, 0xC00F0000 | offset); + rtw_write32(rtwdev, ltecoex->wdata, value); + rtw_write32(rtwdev, ltecoex->ctrl, 0xC00F0000 | offset); return true; } @@ -70,6 +77,7 @@ void rtw_restore_reg(struct rtw_dev *rtwdev, } } } +EXPORT_SYMBOL(rtw_restore_reg); void rtw_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss) { diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index c8f8fe5497a8..8852a1832951 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -201,7 +201,7 @@ struct ndis_80211_pmkid_candidate { struct ndis_80211_pmkid_cand_list { __le32 version; __le32 num_candidates; - struct ndis_80211_pmkid_candidate candidate_list[0]; + struct ndis_80211_pmkid_candidate candidate_list[]; } __packed; struct ndis_80211_status_indication { @@ -246,12 +246,12 @@ struct ndis_80211_bssid_ex { __le32 net_infra; u8 rates[NDIS_802_11_LENGTH_RATES_EX]; __le32 ie_length; - u8 ies[0]; + u8 ies[]; } __packed; struct ndis_80211_bssid_list_ex { __le32 num_items; - struct ndis_80211_bssid_ex bssid[0]; + struct ndis_80211_bssid_ex bssid[]; } __packed; struct ndis_80211_fixed_ies { @@ -312,17 +312,11 @@ struct ndis_80211_assoc_info { __le32 offset_resp_ies; } __packed; -struct ndis_80211_auth_encr_pair { - __le32 auth_mode; - __le32 encr_mode; -} __packed; - struct ndis_80211_capability { __le32 length; __le32 version; __le32 num_pmkids; __le32 num_auth_encr_pair; - struct ndis_80211_auth_encr_pair auth_encr_pair[0]; } __packed; struct ndis_80211_bssid_info { @@ -333,7 +327,7 @@ struct ndis_80211_bssid_info { struct ndis_80211_pmkid { __le32 length; __le32 bssid_info_count; - struct ndis_80211_bssid_info bssid_info[0]; + struct ndis_80211_bssid_info bssid_info[]; } __packed; /* @@ -3109,8 +3103,7 @@ static int rndis_wlan_get_caps(struct usbnet *usbdev, struct wiphy *wiphy) __le32 num_items; __le32 items[8]; } networks_supported; - struct ndis_80211_capability *caps; - u8 caps_buf[sizeof(*caps) + sizeof(caps->auth_encr_pair) * 16]; + struct ndis_80211_capability caps; int len, retval, i, n; struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); @@ -3140,19 +3133,18 @@ static int rndis_wlan_get_caps(struct usbnet *usbdev, struct wiphy *wiphy) } /* get device 802.11 capabilities, number of PMKIDs */ - caps = (struct ndis_80211_capability *)caps_buf; - len = sizeof(caps_buf); + len = sizeof(caps); retval = rndis_query_oid(usbdev, RNDIS_OID_802_11_CAPABILITY, - caps, &len); + &caps, &len); if (retval >= 0) { netdev_dbg(usbdev->net, "RNDIS_OID_802_11_CAPABILITY -> len %d, " "ver %d, pmkids %d, auth-encr-pairs %d\n", - le32_to_cpu(caps->length), - le32_to_cpu(caps->version), - le32_to_cpu(caps->num_pmkids), - le32_to_cpu(caps->num_auth_encr_pair)); - wiphy->max_num_pmkids = le32_to_cpu(caps->num_pmkids); + le32_to_cpu(caps.length), + le32_to_cpu(caps.version), + le32_to_cpu(caps.num_pmkids), + le32_to_cpu(caps.num_auth_encr_pair)); + wiphy->max_num_pmkids = le32_to_cpu(caps.num_pmkids); } else wiphy->max_num_pmkids = 0; diff --git a/drivers/net/wireless/st/cw1200/cw1200_sdio.c b/drivers/net/wireless/st/cw1200/cw1200_sdio.c index 43e012073dbf..b65ec14136c7 100644 --- a/drivers/net/wireless/st/cw1200/cw1200_sdio.c +++ b/drivers/net/wireless/st/cw1200/cw1200_sdio.c @@ -14,6 +14,7 @@ #include <linux/mmc/sdio_func.h> #include <linux/mmc/card.h> #include <linux/mmc/sdio.h> +#include <linux/mmc/sdio_ids.h> #include <net/mac80211.h> #include "cw1200.h" @@ -48,14 +49,6 @@ struct hwbus_priv { const struct cw1200_platform_data_sdio *pdata; }; -#ifndef SDIO_VENDOR_ID_STE -#define SDIO_VENDOR_ID_STE 0x0020 -#endif - -#ifndef SDIO_DEVICE_ID_STE_CW1200 -#define SDIO_DEVICE_ID_STE_CW1200 0x2280 -#endif - static const struct sdio_device_id cw1200_sdio_ids[] = { { SDIO_DEVICE(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200) }, { /* end: all zeroes */ }, diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 4421fc656b1c..de6c8a7589ca 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -548,7 +548,7 @@ static int wlcore_irq_locked(struct wl1271 *wl) ret = wlcore_fw_status(wl, wl->fw_status); if (ret < 0) - goto out; + goto err_ret; wlcore_hw_tx_immediate_compl(wl); @@ -565,7 +565,7 @@ static int wlcore_irq_locked(struct wl1271 *wl) ret = -EIO; /* restarting the chip. ignore any other interrupt. */ - goto out; + goto err_ret; } if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) { @@ -575,7 +575,7 @@ static int wlcore_irq_locked(struct wl1271 *wl) ret = -EIO; /* restarting the chip. ignore any other interrupt. */ - goto out; + goto err_ret; } if (likely(intr & WL1271_ACX_INTR_DATA)) { @@ -583,7 +583,7 @@ static int wlcore_irq_locked(struct wl1271 *wl) ret = wlcore_rx(wl, wl->fw_status); if (ret < 0) - goto out; + goto err_ret; /* Check if any tx blocks were freed */ spin_lock_irqsave(&wl->wl_lock, flags); @@ -596,7 +596,7 @@ static int wlcore_irq_locked(struct wl1271 *wl) */ ret = wlcore_tx_work_locked(wl); if (ret < 0) - goto out; + goto err_ret; } else { spin_unlock_irqrestore(&wl->wl_lock, flags); } @@ -604,7 +604,7 @@ static int wlcore_irq_locked(struct wl1271 *wl) /* check for tx results */ ret = wlcore_hw_tx_delayed_compl(wl); if (ret < 0) - goto out; + goto err_ret; /* Make sure the deferred queues don't get too long */ defer_count = skb_queue_len(&wl->deferred_tx_queue) + @@ -617,14 +617,14 @@ static int wlcore_irq_locked(struct wl1271 *wl) wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A"); ret = wl1271_event_handle(wl, 0); if (ret < 0) - goto out; + goto err_ret; } if (intr & WL1271_ACX_INTR_EVENT_B) { wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B"); ret = wl1271_event_handle(wl, 1); if (ret < 0) - goto out; + goto err_ret; } if (intr & WL1271_ACX_INTR_INIT_COMPLETE) @@ -635,6 +635,7 @@ static int wlcore_irq_locked(struct wl1271 *wl) wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE"); } +err_ret: pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); @@ -1746,9 +1747,7 @@ static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw, ret = wl1271_configure_suspend(wl, wlvif, wow); if (ret < 0) { - mutex_unlock(&wl->mutex); - wl1271_warning("couldn't prepare device to suspend"); - return ret; + goto out_sleep; } } @@ -2698,12 +2697,16 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl, if (!wlcore_is_p2p_mgmt(wlvif)) { ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto deinit; + } } else { ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto deinit; + } } pm_runtime_mark_last_busy(wl->dev); @@ -3665,8 +3668,10 @@ void wlcore_regdomain_config(struct wl1271 *wl) goto out; ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(wl->dev); goto out; + } ret = wlcore_cmd_regdomain_config_locked(wl); if (ret < 0) { diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c index 90e56d4c3df3..e20e18cd04ae 100644 --- a/drivers/net/wireless/ti/wlcore/tx.c +++ b/drivers/net/wireless/ti/wlcore/tx.c @@ -863,6 +863,7 @@ void wl1271_tx_work(struct work_struct *work) ret = wlcore_tx_work_locked(wl); if (ret < 0) { + pm_runtime_put_noidle(wl->dev); wl12xx_queue_recovery_work(wl); goto out; } diff --git a/drivers/nfc/st21nfca/dep.c b/drivers/nfc/st21nfca/dep.c index a1d69f9b2d4a..0b9ca6d20ffa 100644 --- a/drivers/nfc/st21nfca/dep.c +++ b/drivers/nfc/st21nfca/dep.c @@ -173,8 +173,10 @@ static int st21nfca_tm_send_atr_res(struct nfc_hci_dev *hdev, memcpy(atr_res->gbi, atr_req->gbi, gb_len); r = nfc_set_remote_general_bytes(hdev->ndev, atr_res->gbi, gb_len); - if (r < 0) + if (r < 0) { + kfree_skb(skb); return r; + } } info->dep_info.curr_nfc_dep_pni = 0; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index e13c370de830..cc46e250fcac 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -989,6 +989,11 @@ static inline int nvme_process_cq(struct nvme_queue *nvmeq) while (nvme_cqe_pending(nvmeq)) { found++; + /* + * load-load control dependency between phase and the rest of + * the cqe requires a full read memory barrier + */ + dma_rmb(); nvme_handle_cqe(nvmeq, nvmeq->cq_head); nvme_update_cq_head(nvmeq); } @@ -1377,16 +1382,19 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) /* * Called only on a device that has been disabled and after all other threads - * that can check this device's completion queues have synced. This is the - * last chance for the driver to see a natural completion before - * nvme_cancel_request() terminates all incomplete requests. + * that can check this device's completion queues have synced, except + * nvme_poll(). This is the last chance for the driver to see a natural + * completion before nvme_cancel_request() terminates all incomplete requests. */ static void nvme_reap_pending_cqes(struct nvme_dev *dev) { int i; - for (i = dev->ctrl.queue_count - 1; i > 0; i--) + for (i = dev->ctrl.queue_count - 1; i > 0; i--) { + spin_lock(&dev->queues[i].cq_poll_lock); nvme_process_cq(&dev->queues[i]); + spin_unlock(&dev->queues[i].cq_poll_lock); + } } static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index c15a92163c1f..4c972d8abf31 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1313,8 +1313,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, { struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); struct nvme_tcp_queue *queue = &ctrl->queues[qid]; - struct linger sol = { .l_onoff = 1, .l_linger = 0 }; - int ret, opt, rcv_pdu_size; + int ret, rcv_pdu_size; queue->ctrl = ctrl; INIT_LIST_HEAD(&queue->send_list); @@ -1337,60 +1336,24 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, } /* Single syn retry */ - opt = 1; - ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, TCP_SYNCNT, - (char *)&opt, sizeof(opt)); - if (ret) { - dev_err(nctrl->device, - "failed to set TCP_SYNCNT sock opt %d\n", ret); - goto err_sock; - } + tcp_sock_set_syncnt(queue->sock->sk, 1); /* Set TCP no delay */ - opt = 1; - ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, - TCP_NODELAY, (char *)&opt, sizeof(opt)); - if (ret) { - dev_err(nctrl->device, - "failed to set TCP_NODELAY sock opt %d\n", ret); - goto err_sock; - } + tcp_sock_set_nodelay(queue->sock->sk); /* * Cleanup whatever is sitting in the TCP transmit queue on socket * close. This is done to prevent stale data from being sent should * the network connection be restored before TCP times out. */ - ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_LINGER, - (char *)&sol, sizeof(sol)); - if (ret) { - dev_err(nctrl->device, - "failed to set SO_LINGER sock opt %d\n", ret); - goto err_sock; - } + sock_no_linger(queue->sock->sk); - if (so_priority > 0) { - ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_PRIORITY, - (char *)&so_priority, sizeof(so_priority)); - if (ret) { - dev_err(ctrl->ctrl.device, - "failed to set SO_PRIORITY sock opt, ret %d\n", - ret); - goto err_sock; - } - } + if (so_priority > 0) + sock_set_priority(queue->sock->sk, so_priority); /* Set socket type of service */ - if (nctrl->opts->tos >= 0) { - opt = nctrl->opts->tos; - ret = kernel_setsockopt(queue->sock, SOL_IP, IP_TOS, - (char *)&opt, sizeof(opt)); - if (ret) { - dev_err(nctrl->device, - "failed to set IP_TOS sock opt %d\n", ret); - goto err_sock; - } - } + if (nctrl->opts->tos >= 0) + ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos); queue->sock->sk->sk_allocation = GFP_ATOMIC; nvme_tcp_set_queue_io_cpu(queue); diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index f0da04e960f4..4546049a96b3 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -1429,7 +1429,6 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) { struct socket *sock = queue->sock; struct inet_sock *inet = inet_sk(sock->sk); - struct linger sol = { .l_onoff = 1, .l_linger = 0 }; int ret; ret = kernel_getsockname(sock, @@ -1447,27 +1446,14 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) * close. This is done to prevent stale data from being sent should * the network connection be restored before TCP times out. */ - ret = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, - (char *)&sol, sizeof(sol)); - if (ret) - return ret; + sock_no_linger(sock->sk); - if (so_priority > 0) { - ret = kernel_setsockopt(sock, SOL_SOCKET, SO_PRIORITY, - (char *)&so_priority, sizeof(so_priority)); - if (ret) - return ret; - } + if (so_priority > 0) + sock_set_priority(sock->sk, so_priority); /* Set socket type of service */ - if (inet->rcv_tos > 0) { - int tos = inet->rcv_tos; - - ret = kernel_setsockopt(sock, SOL_IP, IP_TOS, - (char *)&tos, sizeof(tos)); - if (ret) - return ret; - } + if (inet->rcv_tos > 0) + ip_sock_set_tos(sock->sk, inet->rcv_tos); write_lock_bh(&sock->sk->sk_callback_lock); sock->sk->sk_user_data = queue; @@ -1588,7 +1574,7 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport) { struct nvmet_tcp_port *port; __kernel_sa_family_t af; - int opt, ret; + int ret; port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port) @@ -1632,30 +1618,10 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport) port->sock->sk->sk_user_data = port; port->data_ready = port->sock->sk->sk_data_ready; port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready; - - opt = 1; - ret = kernel_setsockopt(port->sock, IPPROTO_TCP, - TCP_NODELAY, (char *)&opt, sizeof(opt)); - if (ret) { - pr_err("failed to set TCP_NODELAY sock opt %d\n", ret); - goto err_sock; - } - - ret = kernel_setsockopt(port->sock, SOL_SOCKET, SO_REUSEADDR, - (char *)&opt, sizeof(opt)); - if (ret) { - pr_err("failed to set SO_REUSEADDR sock opt %d\n", ret); - goto err_sock; - } - - if (so_priority > 0) { - ret = kernel_setsockopt(port->sock, SOL_SOCKET, SO_PRIORITY, - (char *)&so_priority, sizeof(so_priority)); - if (ret) { - pr_err("failed to set SO_PRIORITY sock opt %d\n", ret); - goto err_sock; - } - } + sock_set_reuseaddr(port->sock->sk); + tcp_sock_set_nodelay(port->sock->sk); + if (so_priority > 0) + sock_set_priority(port->sock->sk, so_priority); ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr, sizeof(port->addr)); diff --git a/drivers/pinctrl/actions/pinctrl-s700.c b/drivers/pinctrl/actions/pinctrl-s700.c index 47a4ccd9fed4..f579a6593f37 100644 --- a/drivers/pinctrl/actions/pinctrl-s700.c +++ b/drivers/pinctrl/actions/pinctrl-s700.c @@ -1435,7 +1435,7 @@ static const char * const sd2_groups[] = { static const char * const i2c0_groups[] = { "uart0_rx_mfp", "uart0_tx_mfp", - "i2c0_mfp_mfp", + "i2c0_mfp", }; static const char * const i2c1_groups[] = { diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index b409642f168d..9b821c9cbd16 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -1286,6 +1286,7 @@ static const struct gpio_chip byt_gpio_chip = { .direction_output = byt_gpio_direction_output, .get = byt_gpio_get, .set = byt_gpio_set, + .set_config = gpiochip_generic_config, .dbg_show = byt_gpio_dbg_show, }; diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 4c74fdde576d..1093a6105d40 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1479,11 +1479,15 @@ static void chv_gpio_irq_handler(struct irq_desc *desc) struct chv_pinctrl *pctrl = gpiochip_get_data(gc); struct irq_chip *chip = irq_desc_get_chip(desc); unsigned long pending; + unsigned long flags; u32 intr_line; chained_irq_enter(chip, desc); + raw_spin_lock_irqsave(&chv_lock, flags); pending = readl(pctrl->regs + CHV_INTSTAT); + raw_spin_unlock_irqrestore(&chv_lock, flags); + for_each_set_bit(intr_line, &pending, pctrl->community->nirqs) { unsigned int irq, offset; diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c index 330c8f077b73..4d7a86a5a37b 100644 --- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c +++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c @@ -15,17 +15,18 @@ #include "pinctrl-intel.h" -#define SPT_PAD_OWN 0x020 -#define SPT_PADCFGLOCK 0x0a0 -#define SPT_HOSTSW_OWN 0x0d0 -#define SPT_GPI_IS 0x100 -#define SPT_GPI_IE 0x120 +#define SPT_PAD_OWN 0x020 +#define SPT_H_PADCFGLOCK 0x090 +#define SPT_LP_PADCFGLOCK 0x0a0 +#define SPT_HOSTSW_OWN 0x0d0 +#define SPT_GPI_IS 0x100 +#define SPT_GPI_IE 0x120 #define SPT_COMMUNITY(b, s, e) \ { \ .barno = (b), \ .padown_offset = SPT_PAD_OWN, \ - .padcfglock_offset = SPT_PADCFGLOCK, \ + .padcfglock_offset = SPT_LP_PADCFGLOCK, \ .hostown_offset = SPT_HOSTSW_OWN, \ .is_offset = SPT_GPI_IS, \ .ie_offset = SPT_GPI_IE, \ @@ -47,7 +48,7 @@ { \ .barno = (b), \ .padown_offset = SPT_PAD_OWN, \ - .padcfglock_offset = SPT_PADCFGLOCK, \ + .padcfglock_offset = SPT_H_PADCFGLOCK, \ .hostown_offset = SPT_HOSTSW_OWN, \ .is_offset = SPT_GPI_IS, \ .ie_offset = SPT_GPI_IE, \ diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c index 3853ec3a2a8e..ee305f140400 100644 --- a/drivers/pinctrl/mediatek/pinctrl-paris.c +++ b/drivers/pinctrl/mediatek/pinctrl-paris.c @@ -164,8 +164,6 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev, case MTK_PIN_CONFIG_PU_ADV: case MTK_PIN_CONFIG_PD_ADV: if (hw->soc->adv_pull_get) { - bool pullup; - pullup = param == MTK_PIN_CONFIG_PU_ADV; err = hw->soc->adv_pull_get(hw, desc, pullup, &ret); } else diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 9a398a211d30..85858c1d56d0 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -697,7 +697,7 @@ static void msm_gpio_update_dual_edge_pos(struct msm_pinctrl *pctrl, pol = msm_readl_intr_cfg(pctrl, g); pol ^= BIT(g->intr_polarity_bit); - msm_writel_intr_cfg(val, pctrl, g); + msm_writel_intr_cfg(pol, pctrl, g); val2 = msm_readl_io(pctrl, g) & BIT(g->in_bit); intstat = msm_readl_intr_status(pctrl, g); @@ -1034,6 +1034,29 @@ static void msm_gpio_irq_relres(struct irq_data *d) module_put(gc->owner); } +static int msm_gpio_irq_set_affinity(struct irq_data *d, + const struct cpumask *dest, bool force) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct msm_pinctrl *pctrl = gpiochip_get_data(gc); + + if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs)) + return irq_chip_set_affinity_parent(d, dest, force); + + return 0; +} + +static int msm_gpio_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct msm_pinctrl *pctrl = gpiochip_get_data(gc); + + if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs)) + return irq_chip_set_vcpu_affinity_parent(d, vcpu_info); + + return 0; +} + static void msm_gpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); @@ -1132,6 +1155,8 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) pctrl->irq_chip.irq_set_wake = msm_gpio_irq_set_wake; pctrl->irq_chip.irq_request_resources = msm_gpio_irq_reqres; pctrl->irq_chip.irq_release_resources = msm_gpio_irq_relres; + pctrl->irq_chip.irq_set_affinity = msm_gpio_irq_set_affinity; + pctrl->irq_chip.irq_set_vcpu_affinity = msm_gpio_irq_set_vcpu_affinity; np = of_parse_phandle(pctrl->dev->of_node, "wakeup-parent", 0); if (np) { diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index fc984a8828fb..03a246e60fd9 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -147,8 +147,14 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx) err = ops->adjfreq(ops, ppb); ptp->dialed_frequency = tx->freq; } else if (tx->modes & ADJ_OFFSET) { - if (ops->adjphase) - err = ops->adjphase(ops, tx->offset); + if (ops->adjphase) { + s32 offset = tx->offset; + + if (!(tx->modes & ADJ_NANO)) + offset *= NSEC_PER_USEC; + + err = ops->adjphase(ops, offset); + } } else if (tx->modes == 0) { tx->freq = ptp->dialed_frequency; err = 0; diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 8155f59ece38..10af330153b5 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -877,6 +877,11 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, rmcd_error("pinned %ld out of %ld pages", pinned, nr_pages); ret = -EFAULT; + /* + * Set nr_pages up to mean "how many pages to unpin, in + * the error handler: + */ + nr_pages = pinned; goto err_pg; } diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 437a6d822105..d06809eac16d 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -1698,43 +1698,6 @@ static void ctcm_remove_device(struct ccwgroup_device *cgdev) put_device(&cgdev->dev); } -static int ctcm_pm_suspend(struct ccwgroup_device *gdev) -{ - struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev); - - if (gdev->state == CCWGROUP_OFFLINE) - return 0; - netif_device_detach(priv->channel[CTCM_READ]->netdev); - ctcm_close(priv->channel[CTCM_READ]->netdev); - if (!wait_event_timeout(priv->fsm->wait_q, - fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) { - netif_device_attach(priv->channel[CTCM_READ]->netdev); - return -EBUSY; - } - ccw_device_set_offline(gdev->cdev[1]); - ccw_device_set_offline(gdev->cdev[0]); - return 0; -} - -static int ctcm_pm_resume(struct ccwgroup_device *gdev) -{ - struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev); - int rc; - - if (gdev->state == CCWGROUP_OFFLINE) - return 0; - rc = ccw_device_set_online(gdev->cdev[1]); - if (rc) - goto err_out; - rc = ccw_device_set_online(gdev->cdev[0]); - if (rc) - goto err_out; - ctcm_open(priv->channel[CTCM_READ]->netdev); -err_out: - netif_device_attach(priv->channel[CTCM_READ]->netdev); - return rc; -} - static struct ccw_device_id ctcm_ids[] = { {CCW_DEVICE(0x3088, 0x08), .driver_info = ctcm_channel_type_parallel}, {CCW_DEVICE(0x3088, 0x1e), .driver_info = ctcm_channel_type_ficon}, @@ -1764,9 +1727,6 @@ static struct ccwgroup_driver ctcm_group_driver = { .remove = ctcm_remove_device, .set_online = ctcm_new_device, .set_offline = ctcm_shutdown_device, - .freeze = ctcm_pm_suspend, - .thaw = ctcm_pm_resume, - .restore = ctcm_pm_resume, }; static ssize_t group_store(struct device_driver *ddrv, const char *buf, diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 8f08b0a2917c..440219bcaa2b 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -2296,60 +2296,6 @@ lcs_remove_device(struct ccwgroup_device *ccwgdev) put_device(&ccwgdev->dev); } -static int lcs_pm_suspend(struct lcs_card *card) -{ - if (card->dev) - netif_device_detach(card->dev); - lcs_set_allowed_threads(card, 0); - lcs_wait_for_threads(card, 0xffffffff); - if (card->state != DEV_STATE_DOWN) - __lcs_shutdown_device(card->gdev, 1); - return 0; -} - -static int lcs_pm_resume(struct lcs_card *card) -{ - int rc = 0; - - if (card->state == DEV_STATE_RECOVER) - rc = lcs_new_device(card->gdev); - if (card->dev) - netif_device_attach(card->dev); - if (rc) { - dev_warn(&card->gdev->dev, "The lcs device driver " - "failed to recover the device\n"); - } - return rc; -} - -static int lcs_prepare(struct ccwgroup_device *gdev) -{ - return 0; -} - -static void lcs_complete(struct ccwgroup_device *gdev) -{ - return; -} - -static int lcs_freeze(struct ccwgroup_device *gdev) -{ - struct lcs_card *card = dev_get_drvdata(&gdev->dev); - return lcs_pm_suspend(card); -} - -static int lcs_thaw(struct ccwgroup_device *gdev) -{ - struct lcs_card *card = dev_get_drvdata(&gdev->dev); - return lcs_pm_resume(card); -} - -static int lcs_restore(struct ccwgroup_device *gdev) -{ - struct lcs_card *card = dev_get_drvdata(&gdev->dev); - return lcs_pm_resume(card); -} - static struct ccw_device_id lcs_ids[] = { {CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel}, {CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216}, @@ -2382,11 +2328,6 @@ static struct ccwgroup_driver lcs_group_driver = { .remove = lcs_remove_device, .set_online = lcs_new_device, .set_offline = lcs_shutdown_device, - .prepare = lcs_prepare, - .complete = lcs_complete, - .freeze = lcs_freeze, - .thaw = lcs_thaw, - .restore = lcs_restore, }; static ssize_t group_store(struct device_driver *ddrv, const char *buf, diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 5ce2424ca729..260860cf3aa1 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -112,27 +112,10 @@ DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf); */ #define PRINTK_HEADER " iucv: " /* for debugging */ -/* dummy device to make sure netiucv_pm functions are called */ -static struct device *netiucv_dev; - -static int netiucv_pm_prepare(struct device *); -static void netiucv_pm_complete(struct device *); -static int netiucv_pm_freeze(struct device *); -static int netiucv_pm_restore_thaw(struct device *); - -static const struct dev_pm_ops netiucv_pm_ops = { - .prepare = netiucv_pm_prepare, - .complete = netiucv_pm_complete, - .freeze = netiucv_pm_freeze, - .thaw = netiucv_pm_restore_thaw, - .restore = netiucv_pm_restore_thaw, -}; - static struct device_driver netiucv_driver = { .owner = THIS_MODULE, .name = "netiucv", .bus = &iucv_bus, - .pm = &netiucv_pm_ops, }; static int netiucv_callback_connreq(struct iucv_path *, u8 *, u8 *); @@ -213,7 +196,6 @@ struct netiucv_priv { fsm_instance *fsm; struct iucv_connection *conn; struct device *dev; - int pm_state; }; /** @@ -1275,72 +1257,6 @@ static int netiucv_close(struct net_device *dev) return 0; } -static int netiucv_pm_prepare(struct device *dev) -{ - IUCV_DBF_TEXT(trace, 3, __func__); - return 0; -} - -static void netiucv_pm_complete(struct device *dev) -{ - IUCV_DBF_TEXT(trace, 3, __func__); - return; -} - -/** - * netiucv_pm_freeze() - Freeze PM callback - * @dev: netiucv device - * - * close open netiucv interfaces - */ -static int netiucv_pm_freeze(struct device *dev) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - struct net_device *ndev = NULL; - int rc = 0; - - IUCV_DBF_TEXT(trace, 3, __func__); - if (priv && priv->conn) - ndev = priv->conn->netdev; - if (!ndev) - goto out; - netif_device_detach(ndev); - priv->pm_state = fsm_getstate(priv->fsm); - rc = netiucv_close(ndev); -out: - return rc; -} - -/** - * netiucv_pm_restore_thaw() - Thaw and restore PM callback - * @dev: netiucv device - * - * re-open netiucv interfaces closed during freeze - */ -static int netiucv_pm_restore_thaw(struct device *dev) -{ - struct netiucv_priv *priv = dev_get_drvdata(dev); - struct net_device *ndev = NULL; - int rc = 0; - - IUCV_DBF_TEXT(trace, 3, __func__); - if (priv && priv->conn) - ndev = priv->conn->netdev; - if (!ndev) - goto out; - switch (priv->pm_state) { - case DEV_STATE_RUNNING: - case DEV_STATE_STARTWAIT: - rc = netiucv_open(ndev); - break; - default: - break; - } - netif_device_attach(ndev); -out: - return rc; -} - /** * Start transmission of a packet. * Called from generic network device layer. @@ -2156,7 +2072,6 @@ static void __exit netiucv_exit(void) netiucv_unregister_device(dev); } - device_unregister(netiucv_dev); driver_unregister(&netiucv_driver); iucv_unregister(&netiucv_handler, 1); iucv_unregister_dbf_views(); @@ -2182,27 +2097,10 @@ static int __init netiucv_init(void) IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc); goto out_iucv; } - /* establish dummy device */ - netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL); - if (!netiucv_dev) { - rc = -ENOMEM; - goto out_driver; - } - dev_set_name(netiucv_dev, "netiucv"); - netiucv_dev->bus = &iucv_bus; - netiucv_dev->parent = iucv_root; - netiucv_dev->release = (void (*)(struct device *))kfree; - netiucv_dev->driver = &netiucv_driver; - rc = device_register(netiucv_dev); - if (rc) { - put_device(netiucv_dev); - goto out_driver; - } + netiucv_banner(); return rc; -out_driver: - driver_unregister(&netiucv_driver); out_iucv: iucv_unregister(&netiucv_handler, 1); out_dbf: diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index db8e069be3a0..18a0fb75a710 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -6434,32 +6434,6 @@ static void qeth_core_shutdown(struct ccwgroup_device *gdev) qdio_free(CARD_DDEV(card)); } -static int qeth_suspend(struct ccwgroup_device *gdev) -{ - struct qeth_card *card = dev_get_drvdata(&gdev->dev); - - qeth_set_allowed_threads(card, 0, 1); - wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); - if (gdev->state == CCWGROUP_OFFLINE) - return 0; - - qeth_set_offline(card, false); - return 0; -} - -static int qeth_resume(struct ccwgroup_device *gdev) -{ - struct qeth_card *card = dev_get_drvdata(&gdev->dev); - int rc; - - rc = qeth_set_online(card); - - qeth_set_allowed_threads(card, 0xffffffff, 0); - if (rc) - dev_warn(&card->gdev->dev, "The qeth device driver failed to recover an error on the device\n"); - return rc; -} - static ssize_t group_store(struct device_driver *ddrv, const char *buf, size_t count) { @@ -6496,11 +6470,6 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = { .set_online = qeth_core_set_online, .set_offline = qeth_core_set_offline, .shutdown = qeth_core_shutdown, - .prepare = NULL, - .complete = NULL, - .freeze = qeth_suspend, - .thaw = qeth_resume, - .restore = qeth_resume, }; struct qeth_card *qeth_get_card_by_busid(char *bus_id) diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index 066b5c3aaae6..c84ec2fbf99b 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c @@ -29,12 +29,9 @@ MODULE_AUTHOR MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver"); static struct iucv_path *smsg_path; -/* dummy device used as trigger for PM functions */ -static struct device *smsg_dev; static DEFINE_SPINLOCK(smsg_list_lock); static LIST_HEAD(smsg_list); -static int iucv_path_connected; static int smsg_path_pending(struct iucv_path *, u8 *, u8 *); static void smsg_message_pending(struct iucv_path *, struct iucv_message *); @@ -124,60 +121,15 @@ void smsg_unregister_callback(const char *prefix, kfree(cb); } -static int smsg_pm_freeze(struct device *dev) -{ -#ifdef CONFIG_PM_DEBUG - printk(KERN_WARNING "smsg_pm_freeze\n"); -#endif - if (smsg_path && iucv_path_connected) { - iucv_path_sever(smsg_path, NULL); - iucv_path_connected = 0; - } - return 0; -} - -static int smsg_pm_restore_thaw(struct device *dev) -{ - int rc; - -#ifdef CONFIG_PM_DEBUG - printk(KERN_WARNING "smsg_pm_restore_thaw\n"); -#endif - if (smsg_path && !iucv_path_connected) { - memset(smsg_path, 0, sizeof(*smsg_path)); - smsg_path->msglim = 255; - smsg_path->flags = 0; - rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", - NULL, NULL, NULL); -#ifdef CONFIG_PM_DEBUG - if (rc) - printk(KERN_ERR - "iucv_path_connect returned with rc %i\n", rc); -#endif - if (!rc) - iucv_path_connected = 1; - cpcmd("SET SMSG IUCV", NULL, 0, NULL); - } - return 0; -} - -static const struct dev_pm_ops smsg_pm_ops = { - .freeze = smsg_pm_freeze, - .thaw = smsg_pm_restore_thaw, - .restore = smsg_pm_restore_thaw, -}; - static struct device_driver smsg_driver = { .owner = THIS_MODULE, .name = SMSGIUCV_DRV_NAME, .bus = &iucv_bus, - .pm = &smsg_pm_ops, }; static void __exit smsg_exit(void) { cpcmd("SET SMSG OFF", NULL, 0, NULL); - device_unregister(smsg_dev); iucv_unregister(&smsg_handler, 1); driver_unregister(&smsg_driver); } @@ -205,27 +157,10 @@ static int __init smsg_init(void) NULL, NULL, NULL); if (rc) goto out_free_path; - else - iucv_path_connected = 1; - smsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL); - if (!smsg_dev) { - rc = -ENOMEM; - goto out_free_path; - } - dev_set_name(smsg_dev, "smsg_iucv"); - smsg_dev->bus = &iucv_bus; - smsg_dev->parent = iucv_root; - smsg_dev->release = (void (*)(struct device *))kfree; - smsg_dev->driver = &smsg_driver; - rc = device_register(smsg_dev); - if (rc) - goto out_put; cpcmd("SET SMSG IUCV", NULL, 0, NULL); return 0; -out_put: - put_device(smsg_dev); out_free_path: iucv_path_free(smsg_path); smsg_path = NULL; diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 33255968f03a..2c9e5ac24692 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1850,9 +1850,6 @@ qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr, return -EINVAL; } - ql_log(ql_log_info, vha, 0x70d6, - "port speed:%d\n", ha->link_data_rate); - return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]); } diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index 3717eea37ecb..5f0ad8b32e3a 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c @@ -80,6 +80,10 @@ static int scsi_dev_type_resume(struct device *dev, dev_dbg(dev, "scsi resume: %d\n", err); if (err == 0) { + bool was_runtime_suspended; + + was_runtime_suspended = pm_runtime_suspended(dev); + pm_runtime_disable(dev); err = pm_runtime_set_active(dev); pm_runtime_enable(dev); @@ -93,8 +97,10 @@ static int scsi_dev_type_resume(struct device *dev, */ if (!err && scsi_is_sdev_device(dev)) { struct scsi_device *sdev = to_scsi_device(dev); - - blk_set_runtime_active(sdev->request_queue); + if (was_runtime_suspended) + blk_post_runtime_resume(sdev->request_queue, 0); + else + blk_set_runtime_active(sdev->request_queue); } } diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index db37144ae98c..87ee9f767b7a 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -351,7 +351,9 @@ int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb, spin_unlock_irqrestore(&client->lock, flags); } - mbox_send_message(client->chan, pkt); + err = mbox_send_message(client->chan, pkt); + if (err < 0) + return err; /* We can send next packet immediately, so just call txdone. */ mbox_client_txdone(client->chan, 0); diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c index 55c51143bb09..4ffb334cd5cd 100644 --- a/drivers/staging/greybus/uart.c +++ b/drivers/staging/greybus/uart.c @@ -537,9 +537,9 @@ static void gb_tty_set_termios(struct tty_struct *tty, } if (C_CRTSCTS(tty) && C_BAUD(tty) != B0) - newline.flow_control |= GB_SERIAL_AUTO_RTSCTS_EN; + newline.flow_control = GB_SERIAL_AUTO_RTSCTS_EN; else - newline.flow_control &= ~GB_SERIAL_AUTO_RTSCTS_EN; + newline.flow_control = 0; if (memcmp(&gb_tty->line_coding, &newline, sizeof(newline))) { memcpy(&gb_tty->line_coding, &newline, sizeof(newline)); diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c index 4b25a3a314ed..ed404355ea4c 100644 --- a/drivers/staging/iio/resolver/ad2s1210.c +++ b/drivers/staging/iio/resolver/ad2s1210.c @@ -130,17 +130,24 @@ static int ad2s1210_config_write(struct ad2s1210_state *st, u8 data) static int ad2s1210_config_read(struct ad2s1210_state *st, unsigned char address) { - struct spi_transfer xfer = { - .len = 2, - .rx_buf = st->rx, - .tx_buf = st->tx, + struct spi_transfer xfers[] = { + { + .len = 1, + .rx_buf = &st->rx[0], + .tx_buf = &st->tx[0], + .cs_change = 1, + }, { + .len = 1, + .rx_buf = &st->rx[1], + .tx_buf = &st->tx[1], + }, }; int ret = 0; ad2s1210_set_mode(MOD_CONFIG, st); st->tx[0] = address | AD2S1210_MSB_IS_HIGH; st->tx[1] = AD2S1210_REG_FAULT; - ret = spi_sync_transfer(st->sdev, &xfer, 1); + ret = spi_sync_transfer(st->sdev, xfers, 2); if (ret < 0) return ret; diff --git a/drivers/staging/kpc2000/kpc2000/core.c b/drivers/staging/kpc2000/kpc2000/core.c index 7b00d7069e21..358d7b2f4ad1 100644 --- a/drivers/staging/kpc2000/kpc2000/core.c +++ b/drivers/staging/kpc2000/kpc2000/core.c @@ -298,7 +298,6 @@ static int kp2000_pcie_probe(struct pci_dev *pdev, { int err = 0; struct kp2000_device *pcard; - int rv; unsigned long reg_bar_phys_addr; unsigned long reg_bar_phys_len; unsigned long dma_bar_phys_addr; @@ -445,11 +444,11 @@ static int kp2000_pcie_probe(struct pci_dev *pdev, if (err < 0) goto err_release_dma; - rv = request_irq(pcard->pdev->irq, kp2000_irq_handler, IRQF_SHARED, - pcard->name, pcard); - if (rv) { + err = request_irq(pcard->pdev->irq, kp2000_irq_handler, IRQF_SHARED, + pcard->name, pcard); + if (err) { dev_err(&pcard->pdev->dev, - "%s: failed to request_irq: %d\n", __func__, rv); + "%s: failed to request_irq: %d\n", __func__, err); goto err_disable_msi; } diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c index 1ba85a43f05a..cd31ad2b8a7b 100644 --- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c +++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c @@ -3163,29 +3163,6 @@ exit: return ret; } -static void cfg80211_rtw_mgmt_frame_register(struct wiphy *wiphy, - struct wireless_dev *wdev, - u16 frame_type, bool reg) -{ - struct net_device *ndev = wdev_to_ndev(wdev); - struct adapter *adapter; - - if (ndev == NULL) - goto exit; - - adapter = (struct adapter *)rtw_netdev_priv(ndev); - -#ifdef DEBUG_CFG80211 - DBG_871X(FUNC_ADPT_FMT" frame_type:%x, reg:%d\n", FUNC_ADPT_ARG(adapter), - frame_type, reg); -#endif - - if (frame_type != (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ)) - return; -exit: - return; -} - #if defined(CONFIG_PNO_SUPPORT) static int cfg80211_rtw_sched_scan_start(struct wiphy *wiphy, struct net_device *dev, @@ -3397,7 +3374,6 @@ static struct cfg80211_ops rtw_cfg80211_ops = { .change_bss = cfg80211_rtw_change_bss, .mgmt_tx = cfg80211_rtw_mgmt_tx, - .mgmt_frame_register = cfg80211_rtw_mgmt_frame_register, #if defined(CONFIG_PNO_SUPPORT) .sched_scan_start = cfg80211_rtw_sched_scan_start, diff --git a/drivers/staging/wfx/scan.c b/drivers/staging/wfx/scan.c index 6e1e50048651..9aa14331affd 100644 --- a/drivers/staging/wfx/scan.c +++ b/drivers/staging/wfx/scan.c @@ -57,8 +57,10 @@ static int send_scan_req(struct wfx_vif *wvif, wvif->scan_abort = false; reinit_completion(&wvif->scan_complete); timeout = hif_scan(wvif, req, start_idx, i - start_idx); - if (timeout < 0) + if (timeout < 0) { + wfx_tx_unlock(wvif->wdev); return timeout; + } ret = wait_for_completion_timeout(&wvif->scan_complete, timeout); if (req->channels[start_idx]->max_power != wvif->vif->bss_conf.txpower) hif_set_output_power(wvif, wvif->vif->bss_conf.txpower); diff --git a/drivers/staging/wilc1000/cfg80211.c b/drivers/staging/wilc1000/cfg80211.c index 4bdcbc5fd2fd..b6065a0d660f 100644 --- a/drivers/staging/wilc1000/cfg80211.c +++ b/drivers/staging/wilc1000/cfg80211.c @@ -1217,33 +1217,31 @@ static int mgmt_tx_cancel_wait(struct wiphy *wiphy, return 0; } -void wilc_mgmt_frame_register(struct wiphy *wiphy, struct wireless_dev *wdev, - u16 frame_type, bool reg) +void wilc_update_mgmt_frame_registrations(struct wiphy *wiphy, + struct wireless_dev *wdev, + struct mgmt_frame_regs *upd) { struct wilc *wl = wiphy_priv(wiphy); struct wilc_vif *vif = netdev_priv(wdev->netdev); + u32 presp_bit = BIT(IEEE80211_STYPE_PROBE_REQ >> 4); + u32 action_bit = BIT(IEEE80211_STYPE_ACTION >> 4); - if (!frame_type) - return; + if (wl->initialized) { + bool prev = vif->mgmt_reg_stypes & presp_bit; + bool now = upd->interface_stypes & presp_bit; - switch (frame_type) { - case IEEE80211_STYPE_PROBE_REQ: - vif->frame_reg[0].type = frame_type; - vif->frame_reg[0].reg = reg; - break; + if (now != prev) + wilc_frame_register(vif, IEEE80211_STYPE_PROBE_REQ, now); - case IEEE80211_STYPE_ACTION: - vif->frame_reg[1].type = frame_type; - vif->frame_reg[1].reg = reg; - break; + prev = vif->mgmt_reg_stypes & action_bit; + now = upd->interface_stypes & action_bit; - default: - break; + if (now != prev) + wilc_frame_register(vif, IEEE80211_STYPE_ACTION, now); } - if (!wl->initialized) - return; - wilc_frame_register(vif, frame_type, reg); + vif->mgmt_reg_stypes = + upd->interface_stypes & (presp_bit | action_bit); } static int set_cqm_rssi_config(struct wiphy *wiphy, struct net_device *dev, @@ -1665,7 +1663,7 @@ static const struct cfg80211_ops wilc_cfg80211_ops = { .cancel_remain_on_channel = cancel_remain_on_channel, .mgmt_tx_cancel_wait = mgmt_tx_cancel_wait, .mgmt_tx = mgmt_tx, - .mgmt_frame_register = wilc_mgmt_frame_register, + .update_mgmt_frame_registrations = wilc_update_mgmt_frame_registrations, .set_power_mgmt = set_power_mgmt, .set_cqm_rssi_config = set_cqm_rssi_config, diff --git a/drivers/staging/wilc1000/cfg80211.h b/drivers/staging/wilc1000/cfg80211.h index 5e5d63f70df2..37b294cb3b37 100644 --- a/drivers/staging/wilc1000/cfg80211.h +++ b/drivers/staging/wilc1000/cfg80211.h @@ -21,8 +21,9 @@ void wilc_wfi_deinit_mon_interface(struct wilc *wl, bool rtnl_locked); struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl, const char *name, struct net_device *real_dev); -void wilc_mgmt_frame_register(struct wiphy *wiphy, struct wireless_dev *wdev, - u16 frame_type, bool reg); +void wilc_update_mgmt_frame_registrations(struct wiphy *wiphy, + struct wireless_dev *wdev, + struct mgmt_frame_regs *upd); struct wilc_vif *wilc_get_interface(struct wilc *wl); struct wilc_vif *wilc_get_wl_to_vif(struct wilc *wl); void wlan_deinit_locks(struct wilc *wilc); diff --git a/drivers/staging/wilc1000/netdev.c b/drivers/staging/wilc1000/netdev.c index f94a17babd12..fda0ab97b02c 100644 --- a/drivers/staging/wilc1000/netdev.c +++ b/drivers/staging/wilc1000/netdev.c @@ -571,6 +571,7 @@ static int wilc_mac_open(struct net_device *ndev) struct wilc *wl = vif->wilc; unsigned char mac_add[ETH_ALEN] = {0}; int ret = 0; + struct mgmt_frame_regs mgmt_regs = {}; if (!wl || !wl->dev) { netdev_err(ndev, "device not ready\n"); @@ -602,14 +603,12 @@ static int wilc_mac_open(struct net_device *ndev) return -EINVAL; } - wilc_mgmt_frame_register(vif->ndev->ieee80211_ptr->wiphy, - vif->ndev->ieee80211_ptr, - vif->frame_reg[0].type, - vif->frame_reg[0].reg); - wilc_mgmt_frame_register(vif->ndev->ieee80211_ptr->wiphy, - vif->ndev->ieee80211_ptr, - vif->frame_reg[1].type, - vif->frame_reg[1].reg); + mgmt_regs.interface_stypes = vif->mgmt_reg_stypes; + /* so we detect a change */ + vif->mgmt_reg_stypes = 0; + wilc_update_mgmt_frame_registrations(vif->ndev->ieee80211_ptr->wiphy, + vif->ndev->ieee80211_ptr, + &mgmt_regs); netif_wake_queue(ndev); wl->open_ifcs++; vif->mac_opened = 1; @@ -792,12 +791,10 @@ void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size) srcu_idx = srcu_read_lock(&wilc->srcu); list_for_each_entry_rcu(vif, &wilc->vif_list, list) { u16 type = le16_to_cpup((__le16 *)buff); + u32 type_bit = BIT(type >> 4); if (vif->priv.p2p_listen_state && - ((type == vif->frame_reg[0].type && - vif->frame_reg[0].reg) || - (type == vif->frame_reg[1].type && - vif->frame_reg[1].reg))) + vif->mgmt_reg_stypes & type_bit) wilc_wfi_p2p_rx(vif, buff, size); if (vif->monitor_flag) diff --git a/drivers/staging/wilc1000/netdev.h b/drivers/staging/wilc1000/netdev.h index 61cbec674a62..d0a006b68d08 100644 --- a/drivers/staging/wilc1000/netdev.h +++ b/drivers/staging/wilc1000/netdev.h @@ -24,8 +24,6 @@ #define PMKID_FOUND 1 #define NUM_STA_ASSOCIATED 8 -#define NUM_REG_FRAME 2 - #define TCP_ACK_FILTER_LINK_SPEED_THRESH 54 #define DEFAULT_LINK_SPEED 72 @@ -151,11 +149,6 @@ struct wilc_priv { u64 inc_roc_cookie; }; -struct frame_reg { - u16 type; - bool reg; -}; - #define MAX_TCP_SESSION 25 #define MAX_PENDING_ACKS 256 @@ -187,7 +180,7 @@ struct wilc_vif { u8 iftype; int monitor_flag; int mac_opened; - struct frame_reg frame_reg[NUM_REG_FRAME]; + u32 mgmt_reg_stypes; struct net_device_stats netstats; struct wilc *wilc; u8 bssid[ETH_ALEN]; diff --git a/drivers/target/iscsi/Kconfig b/drivers/target/iscsi/Kconfig index 1f93ea381353..922484ea4e30 100644 --- a/drivers/target/iscsi/Kconfig +++ b/drivers/target/iscsi/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config ISCSI_TARGET tristate "Linux-iSCSI.org iSCSI Target Mode Stack" - depends on NET + depends on INET select CRYPTO select CRYPTO_CRC32C select CRYPTO_CRC32C_INTEL if X86 diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 731ee67fe914..85748e338858 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -15,6 +15,7 @@ #include <linux/sched/signal.h> #include <linux/idr.h> #include <linux/tcp.h> /* TCP_NODELAY */ +#include <net/ip.h> #include <net/ipv6.h> /* ipv6_addr_v4mapped() */ #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> @@ -855,7 +856,7 @@ int iscsit_setup_np( struct sockaddr_storage *sockaddr) { struct socket *sock = NULL; - int backlog = ISCSIT_TCP_BACKLOG, ret, opt = 0, len; + int backlog = ISCSIT_TCP_BACKLOG, ret, len; switch (np->np_network_transport) { case ISCSI_TCP: @@ -897,34 +898,10 @@ int iscsit_setup_np( /* * Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY. */ - /* FIXME: Someone please explain why this is endian-safe */ - opt = 1; - if (np->np_network_transport == ISCSI_TCP) { - ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, - (char *)&opt, sizeof(opt)); - if (ret < 0) { - pr_err("kernel_setsockopt() for TCP_NODELAY" - " failed: %d\n", ret); - goto fail; - } - } - - /* FIXME: Someone please explain why this is endian-safe */ - ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, - (char *)&opt, sizeof(opt)); - if (ret < 0) { - pr_err("kernel_setsockopt() for SO_REUSEADDR" - " failed\n"); - goto fail; - } - - ret = kernel_setsockopt(sock, IPPROTO_IP, IP_FREEBIND, - (char *)&opt, sizeof(opt)); - if (ret < 0) { - pr_err("kernel_setsockopt() for IP_FREEBIND" - " failed\n"); - goto fail; - } + if (np->np_network_transport == ISCSI_TCP) + tcp_sock_set_nodelay(sock->sk); + sock_set_reuseaddr(sock->sk); + ip_sock_set_freebind(sock->sk); ret = kernel_bind(sock, (struct sockaddr *)&np->np_sockaddr, len); if (ret < 0) { diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 594b724bbf79..264a822c0bfa 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -3350,6 +3350,7 @@ static void target_tmr_work(struct work_struct *work) cmd->se_tfo->queue_tm_rsp(cmd); + transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c index 13eadcb8aec4..0b5110dad051 100644 --- a/drivers/tty/serial/sifive.c +++ b/drivers/tty/serial/sifive.c @@ -883,6 +883,7 @@ console_initcall(sifive_console_init); static void __ssp_add_console_port(struct sifive_serial_port *ssp) { + spin_lock_init(&ssp->port.lock); sifive_serial_console_ports[ssp->port.line] = ssp; } diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c index 372460ea4df9..4d43f3b28309 100644 --- a/drivers/usb/cdns3/gadget.c +++ b/drivers/usb/cdns3/gadget.c @@ -82,7 +82,7 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, * @ptr: address of device controller register to be read and changed * @mask: bits requested to clar */ -void cdns3_clear_register_bit(void __iomem *ptr, u32 mask) +static void cdns3_clear_register_bit(void __iomem *ptr, u32 mask) { mask = readl(ptr) & ~mask; writel(mask, ptr); @@ -137,7 +137,7 @@ struct usb_request *cdns3_next_request(struct list_head *list) * * Returns buffer or NULL if no buffers in list */ -struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list) +static struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list) { return list_first_entry_or_null(list, struct cdns3_aligned_buf, list); } @@ -148,7 +148,7 @@ struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list) * * Returns request or NULL if no requests in list */ -struct cdns3_request *cdns3_next_priv_request(struct list_head *list) +static struct cdns3_request *cdns3_next_priv_request(struct list_head *list) { return list_first_entry_or_null(list, struct cdns3_request, list); } @@ -190,7 +190,7 @@ dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep, return priv_ep->trb_pool_dma + offset; } -int cdns3_ring_size(struct cdns3_endpoint *priv_ep) +static int cdns3_ring_size(struct cdns3_endpoint *priv_ep) { switch (priv_ep->type) { case USB_ENDPOINT_XFER_ISOC: @@ -345,7 +345,7 @@ static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep) cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs); } -void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req) +static void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req) { struct cdns3_endpoint *priv_ep = priv_req->priv_ep; int current_trb = priv_req->start_trb; @@ -511,7 +511,7 @@ static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep, } } -struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev, +static struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev, struct cdns3_endpoint *priv_ep, struct cdns3_request *priv_req) { @@ -551,7 +551,7 @@ struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev, return &priv_req->request; } -int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev, +static int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev, struct cdns3_endpoint *priv_ep, struct cdns3_request *priv_req) { @@ -836,7 +836,7 @@ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep, cdns3_gadget_ep_free_request(&priv_ep->endpoint, request); } -void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep) +static void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep) { /* Work around for stale data address in TRB*/ if (priv_ep->wa1_set) { @@ -1904,7 +1904,7 @@ static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev, return 0; } -void cdns3_stream_ep_reconfig(struct cdns3_device *priv_dev, +static void cdns3_stream_ep_reconfig(struct cdns3_device *priv_dev, struct cdns3_endpoint *priv_ep) { if (!priv_ep->use_streams || priv_dev->gadget.speed < USB_SPEED_SUPER) @@ -1925,7 +1925,7 @@ void cdns3_stream_ep_reconfig(struct cdns3_device *priv_dev, EP_CFG_TDL_CHK | EP_CFG_SID_CHK); } -void cdns3_configure_dmult(struct cdns3_device *priv_dev, +static void cdns3_configure_dmult(struct cdns3_device *priv_dev, struct cdns3_endpoint *priv_ep) { struct cdns3_usb_regs __iomem *regs = priv_dev->regs; @@ -2548,7 +2548,7 @@ found: link_trb = priv_req->trb; /* Update ring only if removed request is on pending_req_list list */ - if (req_on_hw_ring) { + if (req_on_hw_ring && link_trb) { link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma + ((priv_req->end_trb + 1) * TRB_SIZE)); link_trb->control = (link_trb->control & TRB_CYCLE) | diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index b9db9812d6c5..d93d94d7ff50 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -251,9 +251,19 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) usbm->vma_use_count = 1; INIT_LIST_HEAD(&usbm->memlist); - if (dma_mmap_coherent(hcd->self.sysdev, vma, mem, dma_handle, size)) { - dec_usb_memory_use_count(usbm, &usbm->vma_use_count); - return -EAGAIN; + if (hcd->localmem_pool || !hcd_uses_dma(hcd)) { + if (remap_pfn_range(vma, vma->vm_start, + virt_to_phys(usbm->mem) >> PAGE_SHIFT, + size, vma->vm_page_prot) < 0) { + dec_usb_memory_use_count(usbm, &usbm->vma_use_count); + return -EAGAIN; + } + } else { + if (dma_mmap_coherent(hcd->self.sysdev, vma, mem, dma_handle, + size)) { + dec_usb_memory_use_count(usbm, &usbm->vma_use_count); + return -EAGAIN; + } } vma->vm_flags |= VM_IO; diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 2b6565c06c23..fc748c731832 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -39,6 +39,7 @@ #define USB_VENDOR_GENESYS_LOGIC 0x05e3 #define USB_VENDOR_SMSC 0x0424 +#define USB_PRODUCT_USB5534B 0x5534 #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01 #define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02 @@ -5621,8 +5622,11 @@ out_hdev_lock: } static const struct usb_device_id hub_id_table[] = { - { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS, + { .match_flags = USB_DEVICE_ID_MATCH_VENDOR + | USB_DEVICE_ID_MATCH_PRODUCT + | USB_DEVICE_ID_MATCH_INT_CLASS, .idVendor = USB_VENDOR_SMSC, + .idProduct = USB_PRODUCT_USB5534B, .bInterfaceClass = USB_CLASS_HUB, .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND}, { .match_flags = USB_DEVICE_ID_MATCH_VENDOR diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig index 206caa0ea1c6..7a2304565a73 100644 --- a/drivers/usb/dwc3/Kconfig +++ b/drivers/usb/dwc3/Kconfig @@ -4,6 +4,7 @@ config USB_DWC3 tristate "DesignWare USB3 DRD Core Support" depends on (USB || USB_GADGET) && HAS_DMA select USB_XHCI_PLATFORM if USB_XHCI_HCD + select USB_ROLE_SWITCH if USB_DWC3_DUAL_ROLE help Say Y or M here if your system has a Dual Role SuperSpeed USB controller based on the DesignWare USB3 IP Core. diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 7051611229c9..b67372737dc9 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -114,6 +114,7 @@ static const struct property_entry dwc3_pci_intel_properties[] = { static const struct property_entry dwc3_pci_mrfld_properties[] = { PROPERTY_ENTRY_STRING("dr_mode", "otg"), + PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"), PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"), {} }; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 00746c2848c0..585cb3deea7a 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2483,9 +2483,6 @@ static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep, for_each_sg(sg, s, pending, i) { trb = &dep->trb_pool[dep->trb_dequeue]; - if (trb->ctrl & DWC3_TRB_CTRL_HWO) - break; - req->sg = sg_next(s); req->num_pending_sgs--; diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 32b637e3e1fa..6a9aa4413d64 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -260,6 +260,9 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item, char *name; int ret; + if (strlen(page) < len) + return -EOVERFLOW; + name = kstrdup(page, GFP_KERNEL); if (!name) return -ENOMEM; diff --git a/drivers/usb/gadget/legacy/audio.c b/drivers/usb/gadget/legacy/audio.c index dd81fd538cb8..a748ed0842e8 100644 --- a/drivers/usb/gadget/legacy/audio.c +++ b/drivers/usb/gadget/legacy/audio.c @@ -300,8 +300,10 @@ static int audio_bind(struct usb_composite_dev *cdev) struct usb_descriptor_header *usb_desc; usb_desc = usb_otg_descriptor_alloc(cdev->gadget); - if (!usb_desc) + if (!usb_desc) { + status = -ENOMEM; goto fail; + } usb_otg_descriptor_init(cdev->gadget, usb_desc); otg_desc[0] = usb_desc; otg_desc[1] = NULL; diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c index 8d7a556ece30..563363aba48f 100644 --- a/drivers/usb/gadget/legacy/cdc2.c +++ b/drivers/usb/gadget/legacy/cdc2.c @@ -179,8 +179,10 @@ static int cdc_bind(struct usb_composite_dev *cdev) struct usb_descriptor_header *usb_desc; usb_desc = usb_otg_descriptor_alloc(gadget); - if (!usb_desc) + if (!usb_desc) { + status = -ENOMEM; goto fail1; + } usb_otg_descriptor_init(gadget, usb_desc); otg_desc[0] = usb_desc; otg_desc[1] = NULL; diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index aa0de9e35afa..3afddd3bea6e 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -1361,7 +1361,6 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) req->buf = dev->rbuf; req->context = NULL; - value = -EOPNOTSUPP; switch (ctrl->bRequest) { case USB_REQ_GET_DESCRIPTOR: @@ -1784,7 +1783,7 @@ static ssize_t dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) { struct dev_data *dev = fd->private_data; - ssize_t value = len, length = len; + ssize_t value, length = len; unsigned total; u32 tag; char *kbuf; diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c index c61e71ba7045..0f1b45e3abd1 100644 --- a/drivers/usb/gadget/legacy/ncm.c +++ b/drivers/usb/gadget/legacy/ncm.c @@ -156,8 +156,10 @@ static int gncm_bind(struct usb_composite_dev *cdev) struct usb_descriptor_header *usb_desc; usb_desc = usb_otg_descriptor_alloc(gadget); - if (!usb_desc) + if (!usb_desc) { + status = -ENOMEM; goto fail; + } usb_otg_descriptor_init(gadget, usb_desc); otg_desc[0] = usb_desc; otg_desc[1] = NULL; diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c index ca7d95bf7397..e01e366d89cd 100644 --- a/drivers/usb/gadget/legacy/raw_gadget.c +++ b/drivers/usb/gadget/legacy/raw_gadget.c @@ -7,6 +7,7 @@ */ #include <linux/compiler.h> +#include <linux/ctype.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/kref.h> @@ -123,8 +124,6 @@ static void raw_event_queue_destroy(struct raw_event_queue *queue) struct raw_dev; -#define USB_RAW_MAX_ENDPOINTS 32 - enum ep_state { STATE_EP_DISABLED, STATE_EP_ENABLED, @@ -134,6 +133,7 @@ struct raw_ep { struct raw_dev *dev; enum ep_state state; struct usb_ep *ep; + u8 addr; struct usb_request *req; bool urb_queued; bool disabling; @@ -168,7 +168,8 @@ struct raw_dev { bool ep0_out_pending; bool ep0_urb_queued; ssize_t ep0_status; - struct raw_ep eps[USB_RAW_MAX_ENDPOINTS]; + struct raw_ep eps[USB_RAW_EPS_NUM_MAX]; + int eps_num; struct completion ep0_done; struct raw_event_queue queue; @@ -202,8 +203,8 @@ static void dev_free(struct kref *kref) usb_ep_free_request(dev->gadget->ep0, dev->req); } raw_event_queue_destroy(&dev->queue); - for (i = 0; i < USB_RAW_MAX_ENDPOINTS; i++) { - if (dev->eps[i].state != STATE_EP_ENABLED) + for (i = 0; i < dev->eps_num; i++) { + if (dev->eps[i].state == STATE_EP_DISABLED) continue; usb_ep_disable(dev->eps[i].ep); usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req); @@ -249,12 +250,26 @@ static void gadget_ep0_complete(struct usb_ep *ep, struct usb_request *req) complete(&dev->ep0_done); } +static u8 get_ep_addr(const char *name) +{ + /* If the endpoint has fixed function (named as e.g. "ep12out-bulk"), + * parse the endpoint address from its name. We deliberately use + * deprecated simple_strtoul() function here, as the number isn't + * followed by '\0' nor '\n'. + */ + if (isdigit(name[2])) + return simple_strtoul(&name[2], NULL, 10); + /* Otherwise the endpoint is configurable (named as e.g. "ep-a"). */ + return USB_RAW_EP_ADDR_ANY; +} + static int gadget_bind(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { - int ret = 0; + int ret = 0, i = 0; struct raw_dev *dev = container_of(driver, struct raw_dev, driver); struct usb_request *req; + struct usb_ep *ep; unsigned long flags; if (strcmp(gadget->name, dev->udc_name) != 0) @@ -273,6 +288,13 @@ static int gadget_bind(struct usb_gadget *gadget, dev->req->context = dev; dev->req->complete = gadget_ep0_complete; dev->gadget = gadget; + gadget_for_each_ep(ep, dev->gadget) { + dev->eps[i].ep = ep; + dev->eps[i].addr = get_ep_addr(ep->name); + dev->eps[i].state = STATE_EP_DISABLED; + i++; + } + dev->eps_num = i; spin_unlock_irqrestore(&dev->lock, flags); /* Matches kref_put() in gadget_unbind(). */ @@ -555,7 +577,7 @@ static void *raw_alloc_io_data(struct usb_raw_ep_io *io, void __user *ptr, if (copy_from_user(io, ptr, sizeof(*io))) return ERR_PTR(-EFAULT); - if (io->ep >= USB_RAW_MAX_ENDPOINTS) + if (io->ep >= USB_RAW_EPS_NUM_MAX) return ERR_PTR(-EINVAL); if (!usb_raw_io_flags_valid(io->flags)) return ERR_PTR(-EINVAL); @@ -669,43 +691,61 @@ static int raw_ioctl_ep0_read(struct raw_dev *dev, unsigned long value) if (IS_ERR(data)) return PTR_ERR(data); ret = raw_process_ep0_io(dev, &io, data, false); - if (ret) + if (ret < 0) goto free; length = min(io.length, (unsigned int)ret); if (copy_to_user((void __user *)(value + sizeof(io)), data, length)) ret = -EFAULT; + else + ret = length; free: kfree(data); return ret; } -static bool check_ep_caps(struct usb_ep *ep, - struct usb_endpoint_descriptor *desc) +static int raw_ioctl_ep0_stall(struct raw_dev *dev, unsigned long value) { - switch (usb_endpoint_type(desc)) { - case USB_ENDPOINT_XFER_ISOC: - if (!ep->caps.type_iso) - return false; - break; - case USB_ENDPOINT_XFER_BULK: - if (!ep->caps.type_bulk) - return false; - break; - case USB_ENDPOINT_XFER_INT: - if (!ep->caps.type_int) - return false; - break; - default: - return false; + int ret = 0; + unsigned long flags; + + if (value) + return -EINVAL; + spin_lock_irqsave(&dev->lock, flags); + if (dev->state != STATE_DEV_RUNNING) { + dev_dbg(dev->dev, "fail, device is not running\n"); + ret = -EINVAL; + goto out_unlock; + } + if (!dev->gadget) { + dev_dbg(dev->dev, "fail, gadget is not bound\n"); + ret = -EBUSY; + goto out_unlock; + } + if (dev->ep0_urb_queued) { + dev_dbg(&dev->gadget->dev, "fail, urb already queued\n"); + ret = -EBUSY; + goto out_unlock; + } + if (!dev->ep0_in_pending && !dev->ep0_out_pending) { + dev_dbg(&dev->gadget->dev, "fail, no request pending\n"); + ret = -EBUSY; + goto out_unlock; } - if (usb_endpoint_dir_in(desc) && !ep->caps.dir_in) - return false; - if (usb_endpoint_dir_out(desc) && !ep->caps.dir_out) - return false; + ret = usb_ep_set_halt(dev->gadget->ep0); + if (ret < 0) + dev_err(&dev->gadget->dev, + "fail, usb_ep_set_halt returned %d\n", ret); + + if (dev->ep0_in_pending) + dev->ep0_in_pending = false; + else + dev->ep0_out_pending = false; - return true; +out_unlock: + spin_unlock_irqrestore(&dev->lock, flags); + return ret; } static int raw_ioctl_ep_enable(struct raw_dev *dev, unsigned long value) @@ -713,7 +753,7 @@ static int raw_ioctl_ep_enable(struct raw_dev *dev, unsigned long value) int ret = 0, i; unsigned long flags; struct usb_endpoint_descriptor *desc; - struct usb_ep *ep = NULL; + struct raw_ep *ep; desc = memdup_user((void __user *)value, sizeof(*desc)); if (IS_ERR(desc)) @@ -741,41 +781,32 @@ static int raw_ioctl_ep_enable(struct raw_dev *dev, unsigned long value) goto out_free; } - for (i = 0; i < USB_RAW_MAX_ENDPOINTS; i++) { - if (dev->eps[i].state == STATE_EP_ENABLED) + for (i = 0; i < dev->eps_num; i++) { + ep = &dev->eps[i]; + if (ep->state != STATE_EP_DISABLED) continue; - break; - } - if (i == USB_RAW_MAX_ENDPOINTS) { - dev_dbg(&dev->gadget->dev, - "fail, no device endpoints available\n"); - ret = -EBUSY; - goto out_free; - } - - gadget_for_each_ep(ep, dev->gadget) { - if (ep->enabled) + if (ep->addr != usb_endpoint_num(desc) && + ep->addr != USB_RAW_EP_ADDR_ANY) continue; - if (!check_ep_caps(ep, desc)) + if (!usb_gadget_ep_match_desc(dev->gadget, ep->ep, desc, NULL)) continue; - ep->desc = desc; - ret = usb_ep_enable(ep); + ep->ep->desc = desc; + ret = usb_ep_enable(ep->ep); if (ret < 0) { dev_err(&dev->gadget->dev, "fail, usb_ep_enable returned %d\n", ret); goto out_free; } - dev->eps[i].req = usb_ep_alloc_request(ep, GFP_ATOMIC); - if (!dev->eps[i].req) { + ep->req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC); + if (!ep->req) { dev_err(&dev->gadget->dev, "fail, usb_ep_alloc_request failed\n"); - usb_ep_disable(ep); + usb_ep_disable(ep->ep); ret = -ENOMEM; goto out_free; } - dev->eps[i].ep = ep; - dev->eps[i].state = STATE_EP_ENABLED; - ep->driver_data = &dev->eps[i]; + ep->state = STATE_EP_ENABLED; + ep->ep->driver_data = ep; ret = i; goto out_unlock; } @@ -794,10 +825,6 @@ static int raw_ioctl_ep_disable(struct raw_dev *dev, unsigned long value) { int ret = 0, i = value; unsigned long flags; - const void *desc; - - if (i < 0 || i >= USB_RAW_MAX_ENDPOINTS) - return -EINVAL; spin_lock_irqsave(&dev->lock, flags); if (dev->state != STATE_DEV_RUNNING) { @@ -810,7 +837,12 @@ static int raw_ioctl_ep_disable(struct raw_dev *dev, unsigned long value) ret = -EBUSY; goto out_unlock; } - if (dev->eps[i].state != STATE_EP_ENABLED) { + if (i < 0 || i >= dev->eps_num) { + dev_dbg(dev->dev, "fail, invalid endpoint\n"); + ret = -EBUSY; + goto out_unlock; + } + if (dev->eps[i].state == STATE_EP_DISABLED) { dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n"); ret = -EINVAL; goto out_unlock; @@ -834,10 +866,8 @@ static int raw_ioctl_ep_disable(struct raw_dev *dev, unsigned long value) spin_lock_irqsave(&dev->lock, flags); usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req); - desc = dev->eps[i].ep->desc; - dev->eps[i].ep = NULL; + kfree(dev->eps[i].ep->desc); dev->eps[i].state = STATE_EP_DISABLED; - kfree(desc); dev->eps[i].disabling = false; out_unlock: @@ -845,6 +875,74 @@ out_unlock: return ret; } +static int raw_ioctl_ep_set_clear_halt_wedge(struct raw_dev *dev, + unsigned long value, bool set, bool halt) +{ + int ret = 0, i = value; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + if (dev->state != STATE_DEV_RUNNING) { + dev_dbg(dev->dev, "fail, device is not running\n"); + ret = -EINVAL; + goto out_unlock; + } + if (!dev->gadget) { + dev_dbg(dev->dev, "fail, gadget is not bound\n"); + ret = -EBUSY; + goto out_unlock; + } + if (i < 0 || i >= dev->eps_num) { + dev_dbg(dev->dev, "fail, invalid endpoint\n"); + ret = -EBUSY; + goto out_unlock; + } + if (dev->eps[i].state == STATE_EP_DISABLED) { + dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n"); + ret = -EINVAL; + goto out_unlock; + } + if (dev->eps[i].disabling) { + dev_dbg(&dev->gadget->dev, + "fail, disable is in progress\n"); + ret = -EINVAL; + goto out_unlock; + } + if (dev->eps[i].urb_queued) { + dev_dbg(&dev->gadget->dev, + "fail, waiting for urb completion\n"); + ret = -EINVAL; + goto out_unlock; + } + if (usb_endpoint_xfer_isoc(dev->eps[i].ep->desc)) { + dev_dbg(&dev->gadget->dev, + "fail, can't halt/wedge ISO endpoint\n"); + ret = -EINVAL; + goto out_unlock; + } + + if (set && halt) { + ret = usb_ep_set_halt(dev->eps[i].ep); + if (ret < 0) + dev_err(&dev->gadget->dev, + "fail, usb_ep_set_halt returned %d\n", ret); + } else if (!set && halt) { + ret = usb_ep_clear_halt(dev->eps[i].ep); + if (ret < 0) + dev_err(&dev->gadget->dev, + "fail, usb_ep_clear_halt returned %d\n", ret); + } else if (set && !halt) { + ret = usb_ep_set_wedge(dev->eps[i].ep); + if (ret < 0) + dev_err(&dev->gadget->dev, + "fail, usb_ep_set_wedge returned %d\n", ret); + } + +out_unlock: + spin_unlock_irqrestore(&dev->lock, flags); + return ret; +} + static void gadget_ep_complete(struct usb_ep *ep, struct usb_request *req) { struct raw_ep *r_ep = (struct raw_ep *)ep->driver_data; @@ -866,7 +964,7 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io, { int ret = 0; unsigned long flags; - struct raw_ep *ep = &dev->eps[io->ep]; + struct raw_ep *ep; DECLARE_COMPLETION_ONSTACK(done); spin_lock_irqsave(&dev->lock, flags); @@ -880,6 +978,12 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io, ret = -EBUSY; goto out_unlock; } + if (io->ep >= dev->eps_num) { + dev_dbg(&dev->gadget->dev, "fail, invalid endpoint\n"); + ret = -EINVAL; + goto out_unlock; + } + ep = &dev->eps[io->ep]; if (ep->state != STATE_EP_ENABLED) { dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n"); ret = -EBUSY; @@ -964,12 +1068,14 @@ static int raw_ioctl_ep_read(struct raw_dev *dev, unsigned long value) if (IS_ERR(data)) return PTR_ERR(data); ret = raw_process_ep_io(dev, &io, data, false); - if (ret) + if (ret < 0) goto free; length = min(io.length, (unsigned int)ret); if (copy_to_user((void __user *)(value + sizeof(io)), data, length)) ret = -EFAULT; + else + ret = length; free: kfree(data); return ret; @@ -1023,6 +1129,71 @@ out_unlock: return ret; } +static void fill_ep_caps(struct usb_ep_caps *caps, + struct usb_raw_ep_caps *raw_caps) +{ + raw_caps->type_control = caps->type_control; + raw_caps->type_iso = caps->type_iso; + raw_caps->type_bulk = caps->type_bulk; + raw_caps->type_int = caps->type_int; + raw_caps->dir_in = caps->dir_in; + raw_caps->dir_out = caps->dir_out; +} + +static void fill_ep_limits(struct usb_ep *ep, struct usb_raw_ep_limits *limits) +{ + limits->maxpacket_limit = ep->maxpacket_limit; + limits->max_streams = ep->max_streams; +} + +static int raw_ioctl_eps_info(struct raw_dev *dev, unsigned long value) +{ + int ret = 0, i; + unsigned long flags; + struct usb_raw_eps_info *info; + struct raw_ep *ep; + + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + ret = -ENOMEM; + goto out; + } + + spin_lock_irqsave(&dev->lock, flags); + if (dev->state != STATE_DEV_RUNNING) { + dev_dbg(dev->dev, "fail, device is not running\n"); + ret = -EINVAL; + spin_unlock_irqrestore(&dev->lock, flags); + goto out_free; + } + if (!dev->gadget) { + dev_dbg(dev->dev, "fail, gadget is not bound\n"); + ret = -EBUSY; + spin_unlock_irqrestore(&dev->lock, flags); + goto out_free; + } + + memset(info, 0, sizeof(*info)); + for (i = 0; i < dev->eps_num; i++) { + ep = &dev->eps[i]; + strscpy(&info->eps[i].name[0], ep->ep->name, + USB_RAW_EP_NAME_MAX); + info->eps[i].addr = ep->addr; + fill_ep_caps(&ep->ep->caps, &info->eps[i].caps); + fill_ep_limits(ep->ep, &info->eps[i].limits); + } + ret = dev->eps_num; + spin_unlock_irqrestore(&dev->lock, flags); + + if (copy_to_user((void __user *)value, info, sizeof(*info))) + ret = -EFAULT; + +out_free: + kfree(info); +out: + return ret; +} + static long raw_ioctl(struct file *fd, unsigned int cmd, unsigned long value) { struct raw_dev *dev = fd->private_data; @@ -1065,6 +1236,24 @@ static long raw_ioctl(struct file *fd, unsigned int cmd, unsigned long value) case USB_RAW_IOCTL_VBUS_DRAW: ret = raw_ioctl_vbus_draw(dev, value); break; + case USB_RAW_IOCTL_EPS_INFO: + ret = raw_ioctl_eps_info(dev, value); + break; + case USB_RAW_IOCTL_EP0_STALL: + ret = raw_ioctl_ep0_stall(dev, value); + break; + case USB_RAW_IOCTL_EP_SET_HALT: + ret = raw_ioctl_ep_set_clear_halt_wedge( + dev, value, true, true); + break; + case USB_RAW_IOCTL_EP_CLEAR_HALT: + ret = raw_ioctl_ep_set_clear_halt_wedge( + dev, value, false, true); + break; + case USB_RAW_IOCTL_EP_SET_WEDGE: + ret = raw_ioctl_ep_set_clear_halt_wedge( + dev, value, true, false); + break; default: ret = -EINVAL; } diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index 22200341c8ec..b771a854e29c 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -185,7 +185,7 @@ static int regs_dbg_release(struct inode *inode, struct file *file) return 0; } -const struct file_operations queue_dbg_fops = { +static const struct file_operations queue_dbg_fops = { .owner = THIS_MODULE, .open = queue_dbg_open, .llseek = no_llseek, @@ -193,7 +193,7 @@ const struct file_operations queue_dbg_fops = { .release = queue_dbg_release, }; -const struct file_operations regs_dbg_fops = { +static const struct file_operations regs_dbg_fops = { .owner = THIS_MODULE, .open = regs_dbg_open, .llseek = generic_file_llseek, diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c index a8273b589456..5af0fe9c61d7 100644 --- a/drivers/usb/gadget/udc/net2272.c +++ b/drivers/usb/gadget/udc/net2272.c @@ -2647,6 +2647,8 @@ net2272_plat_probe(struct platform_device *pdev) err_req: release_mem_region(base, len); err: + kfree(dev); + return ret; } diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c index 52a6add961f4..dfabc54cdc27 100644 --- a/drivers/usb/gadget/udc/tegra-xudc.c +++ b/drivers/usb/gadget/udc/tegra-xudc.c @@ -3840,11 +3840,11 @@ static int __maybe_unused tegra_xudc_suspend(struct device *dev) flush_work(&xudc->usb_role_sw_work); - /* Forcibly disconnect before powergating. */ - tegra_xudc_device_mode_off(xudc); - - if (!pm_runtime_status_suspended(dev)) + if (!pm_runtime_status_suspended(dev)) { + /* Forcibly disconnect before powergating. */ + tegra_xudc_device_mode_off(xudc); tegra_xudc_powergate(xudc); + } pm_runtime_disable(dev); diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 1d4f6f85f0fe..ea460b9682d5 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -362,6 +362,7 @@ static int xhci_plat_remove(struct platform_device *dev) struct clk *reg_clk = xhci->reg_clk; struct usb_hcd *shared_hcd = xhci->shared_hcd; + pm_runtime_get_sync(&dev->dev); xhci->xhc_state |= XHCI_STATE_REMOVING; usb_remove_hcd(shared_hcd); @@ -375,8 +376,9 @@ static int xhci_plat_remove(struct platform_device *dev) clk_disable_unprepare(reg_clk); usb_put_hcd(hcd); - pm_runtime_set_suspended(&dev->dev); pm_runtime_disable(&dev->dev); + pm_runtime_put_noidle(&dev->dev); + pm_runtime_set_suspended(&dev->dev); return 0; } diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 0fda0c0f4d31..2c255d0620b0 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -3433,8 +3433,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, /* New sg entry */ --num_sgs; sent_len -= block_len; - if (num_sgs != 0) { - sg = sg_next(sg); + sg = sg_next(sg); + if (num_sgs != 0 && sg) { block_len = sg_dma_len(sg); addr = (u64) sg_dma_address(sg); addr += sent_len; diff --git a/drivers/usb/mtu3/mtu3_debugfs.c b/drivers/usb/mtu3/mtu3_debugfs.c index c96e5dab0a48..fdeade6254ae 100644 --- a/drivers/usb/mtu3/mtu3_debugfs.c +++ b/drivers/usb/mtu3/mtu3_debugfs.c @@ -276,7 +276,7 @@ static const struct file_operations mtu3_ep_fops = { .release = single_release, }; -static struct debugfs_reg32 mtu3_prb_regs[] = { +static const struct debugfs_reg32 mtu3_prb_regs[] = { dump_prb_reg("enable", U3D_SSUSB_PRB_CTRL0), dump_prb_reg("byte-sell", U3D_SSUSB_PRB_CTRL1), dump_prb_reg("byte-selh", U3D_SSUSB_PRB_CTRL2), @@ -349,7 +349,7 @@ static const struct file_operations mtu3_probe_fops = { static void mtu3_debugfs_create_prb_files(struct mtu3 *mtu) { struct ssusb_mtk *ssusb = mtu->ssusb; - struct debugfs_reg32 *regs; + const struct debugfs_reg32 *regs; struct dentry *dir_prb; int i; diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c index bfebf1f2e991..9a7e655d5280 100644 --- a/drivers/usb/phy/phy-twl6030-usb.c +++ b/drivers/usb/phy/phy-twl6030-usb.c @@ -377,7 +377,7 @@ static int twl6030_usb_probe(struct platform_device *pdev) if (status < 0) { dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", twl->irq1, status); - return status; + goto err_put_regulator; } status = request_threaded_irq(twl->irq2, NULL, twl6030_usb_irq, @@ -386,8 +386,7 @@ static int twl6030_usb_probe(struct platform_device *pdev) if (status < 0) { dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", twl->irq2, status); - free_irq(twl->irq1, twl); - return status; + goto err_free_irq1; } twl->asleep = 0; @@ -396,6 +395,13 @@ static int twl6030_usb_probe(struct platform_device *pdev) dev_info(&pdev->dev, "Initialized TWL6030 USB module\n"); return 0; + +err_free_irq1: + free_irq(twl->irq1, twl); +err_put_regulator: + regulator_put(twl->usb3v3); + + return status; } static int twl6030_usb_remove(struct platform_device *pdev) diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c index 67c5139cfa0d..c22e5c4bbf1a 100644 --- a/drivers/usb/typec/mux/intel_pmc_mux.c +++ b/drivers/usb/typec/mux/intel_pmc_mux.c @@ -63,6 +63,7 @@ enum { #define PMC_USB_ALTMODE_DP_MODE_SHIFT 8 /* TBT specific Mode Data bits */ +#define PMC_USB_ALTMODE_HPD_HIGH BIT(14) #define PMC_USB_ALTMODE_TBT_TYPE BIT(17) #define PMC_USB_ALTMODE_CABLE_TYPE BIT(18) #define PMC_USB_ALTMODE_ACTIVE_LINK BIT(20) @@ -74,8 +75,8 @@ enum { #define PMC_USB_ALTMODE_TBT_GEN(_g_) (((_g_) & GENMASK(1, 0)) << 28) /* Display HPD Request bits */ +#define PMC_USB_DP_HPD_LVL BIT(4) #define PMC_USB_DP_HPD_IRQ BIT(5) -#define PMC_USB_DP_HPD_LVL BIT(6) struct pmc_usb; @@ -158,8 +159,7 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state) PMC_USB_ALTMODE_DP_MODE_SHIFT; if (data->status & DP_STATUS_HPD_STATE) - req.mode_data |= PMC_USB_DP_HPD_LVL << - PMC_USB_ALTMODE_DP_MODE_SHIFT; + req.mode_data |= PMC_USB_ALTMODE_HPD_HIGH; return pmc_usb_command(port, (void *)&req, sizeof(req)); } diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index 7957d2d41fc4..01c456f7c1f7 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -89,15 +89,14 @@ static struct vdpasim *dev_to_sim(struct device *dev) static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx) { struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; - int ret; - ret = vringh_init_iotlb(&vq->vring, vdpasim_features, - VDPASIM_QUEUE_MAX, false, - (struct vring_desc *)(uintptr_t)vq->desc_addr, - (struct vring_avail *) - (uintptr_t)vq->driver_addr, - (struct vring_used *) - (uintptr_t)vq->device_addr); + vringh_init_iotlb(&vq->vring, vdpasim_features, + VDPASIM_QUEUE_MAX, false, + (struct vring_desc *)(uintptr_t)vq->desc_addr, + (struct vring_avail *) + (uintptr_t)vq->driver_addr, + (struct vring_used *) + (uintptr_t)vq->device_addr); } static void vdpasim_vq_reset(struct vdpasim_virtqueue *vq) diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index d450e16c5c25..21a59b598ed8 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -730,7 +730,7 @@ static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq, if (!map) return NULL; - return (void *)(uintptr_t)(map->addr + addr - map->start); + return (void __user *)(uintptr_t)(map->addr + addr - map->start); } /* Can we switch to this memory table? */ @@ -869,7 +869,7 @@ static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq, * not happen in this case. */ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, - void *addr, unsigned int size, + void __user *addr, unsigned int size, int type) { void __user *uaddr = vhost_vq_meta_fetch(vq, |