diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/ce.c')
| -rw-r--r-- | drivers/net/wireless/ath/ath10k/ce.c | 367 |
1 files changed, 202 insertions, 165 deletions
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 2a5668b4f6bc..7bbda46cfd93 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -1,21 +1,13 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018 The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ +#include <linux/export.h> #include "hif.h" #include "ce.h" #include "debug.h" @@ -88,59 +80,13 @@ static inline u32 shadow_sr_wr_ind_addr(struct ath10k *ar, return addr; } -static inline u32 shadow_dst_wr_ind_addr(struct ath10k *ar, - struct ath10k_ce_pipe *ce_state) -{ - u32 ce_id = ce_state->id; - u32 addr = 0; - - switch (ce_id) { - case 1: - addr = 0x00032034; - break; - case 2: - addr = 0x00032038; - break; - case 5: - addr = 0x00032044; - break; - case 7: - addr = 0x0003204C; - break; - case 8: - addr = 0x00032050; - break; - case 9: - addr = 0x00032054; - break; - case 10: - addr = 0x00032058; - break; - case 11: - addr = 0x0003205C; - break; - default: - ath10k_warn(ar, "invalid CE id: %d", ce_id); - break; - } - - return addr; -} - static inline unsigned int ath10k_set_ring_byte(unsigned int offset, - struct ath10k_hw_ce_regs_addr_map *addr_map) + const struct ath10k_hw_ce_regs_addr_map *addr_map) { return ((offset << addr_map->lsb) & addr_map->mask); } -static inline unsigned int -ath10k_get_ring_byte(unsigned int offset, - struct ath10k_hw_ce_regs_addr_map *addr_map) -{ - return ((offset & addr_map->mask) >> (addr_map->lsb)); -} - static inline u32 ath10k_ce_read32(struct ath10k *ar, u32 offset) { struct ath10k_ce *ce = ath10k_ce_priv(ar); @@ -219,20 +165,32 @@ ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar, ath10k_ce_write32(ar, shadow_sr_wr_ind_addr(ar, ce_state), value); } -static inline void -ath10k_ce_shadow_dest_ring_write_index_set(struct ath10k *ar, - struct ath10k_ce_pipe *ce_state, - unsigned int value) +static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar, + u32 ce_id, + u64 addr) { - ath10k_ce_write32(ar, shadow_dst_wr_ind_addr(ar, ce_state), value); + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; + u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id); + u32 addr_lo = lower_32_bits(addr); + + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->sr_base_addr_lo, addr_lo); + + if (ce_state->ops->ce_set_src_ring_base_addr_hi) { + ce_state->ops->ce_set_src_ring_base_addr_hi(ar, ce_ctrl_addr, + addr); + } } -static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar, - u32 ce_ctrl_addr, - unsigned int addr) +static void ath10k_ce_set_src_ring_base_addr_hi(struct ath10k *ar, + u32 ce_ctrl_addr, + u64 addr) { + u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK; + ath10k_ce_write32(ar, ce_ctrl_addr + - ar->hw_ce_regs->sr_base_addr, addr); + ar->hw_ce_regs->sr_base_addr_hi, addr_hi); } static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar, @@ -247,7 +205,7 @@ static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; + const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr + ctrl_regs->addr); @@ -261,7 +219,7 @@ static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; + const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr + ctrl_regs->addr); @@ -275,7 +233,7 @@ static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; + const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr + ctrl_regs->addr); @@ -313,11 +271,36 @@ static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar, } static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar, - u32 ce_ctrl_addr, - u32 addr) + u32 ce_id, + u64 addr) { + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; + u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id); + u32 addr_lo = lower_32_bits(addr); + ath10k_ce_write32(ar, ce_ctrl_addr + - ar->hw_ce_regs->dr_base_addr, addr); + ar->hw_ce_regs->dr_base_addr_lo, addr_lo); + + if (ce_state->ops->ce_set_dest_ring_base_addr_hi) { + ce_state->ops->ce_set_dest_ring_base_addr_hi(ar, ce_ctrl_addr, + addr); + } +} + +static void ath10k_ce_set_dest_ring_base_addr_hi(struct ath10k *ar, + u32 ce_ctrl_addr, + u64 addr) +{ + u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK; + u32 reg_value; + + reg_value = ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->dr_base_addr_hi); + reg_value &= ~CE_DESC_ADDR_HI_MASK; + reg_value |= addr_hi; + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->dr_base_addr_hi, reg_value); } static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar, @@ -332,7 +315,7 @@ static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr; + const struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr; u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr); ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr, @@ -344,7 +327,7 @@ static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr; + const struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr; u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr); ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr, @@ -356,7 +339,7 @@ static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr; + const struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr; u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr); ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr, @@ -368,7 +351,7 @@ static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr; + const struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr; u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr); ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr, @@ -379,7 +362,7 @@ static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar, static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar, u32 ce_ctrl_addr) { - struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie; + const struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie; u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr); @@ -391,7 +374,7 @@ static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar, static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar, u32 ce_ctrl_addr) { - struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie; + const struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie; u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr); @@ -403,7 +386,7 @@ static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar, static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar, u32 ce_ctrl_addr) { - struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; + const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr); @@ -412,23 +395,10 @@ static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar, host_ie_addr & ~(wm_regs->wm_mask)); } -static inline void ath10k_ce_error_intr_enable(struct ath10k *ar, - u32 ce_ctrl_addr) -{ - struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs; - - u32 misc_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->misc_ie_addr); - - ath10k_ce_write32(ar, - ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr, - misc_ie_addr | misc_regs->err_mask); -} - static inline void ath10k_ce_error_intr_disable(struct ath10k *ar, u32 ce_ctrl_addr) { - struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs; + const struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs; u32 misc_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr); @@ -442,7 +412,7 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int mask) { - struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; + const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; ath10k_ce_write32(ar, ce_ctrl_addr + wm_regs->addr, mask); } @@ -500,14 +470,8 @@ static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, write_index = CE_RING_IDX_INCR(nentries_mask, write_index); /* WORKAROUND */ - if (!(flags & CE_SEND_FLAG_GATHER)) { - if (ar->hw_params.shadow_reg_support) - ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state, - write_index); - else - ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, - write_index); - } + if (!(flags & CE_SEND_FLAG_GATHER)) + ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index); src_ring->write_index = write_index; exit: @@ -563,7 +527,7 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state, addr = (__le32 *)&sdesc.addr; - flags |= upper_32_bits(buffer) & CE_DESC_FLAGS_GET_MASK; + flags |= upper_32_bits(buffer) & CE_DESC_ADDR_HI_MASK; addr[0] = __cpu_to_le32(buffer); addr[1] = __cpu_to_le32(flags); if (flags & CE_SEND_FLAG_GATHER) @@ -581,8 +545,14 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state, /* Update Source Ring Write Index */ write_index = CE_RING_IDX_INCR(nentries_mask, write_index); - if (!(flags & CE_SEND_FLAG_GATHER)) - ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index); + if (!(flags & CE_SEND_FLAG_GATHER)) { + if (ar->hw_params.shadow_reg_support) + ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state, + write_index); + else + ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, + write_index); + } src_ring->write_index = write_index; exit: @@ -731,7 +701,7 @@ static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe, return -ENOSPC; desc->addr = __cpu_to_le64(paddr); - desc->addr &= __cpu_to_le64(CE_DESC_37BIT_ADDR_MASK); + desc->addr &= __cpu_to_le64(CE_DESC_ADDR_MASK); desc->nbytes = 0; @@ -1032,8 +1002,8 @@ EXPORT_SYMBOL(ath10k_ce_revoke_recv_next); * Guts of ath10k_ce_completed_send_next. * The caller takes responsibility for any necessary locking. */ -int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, - void **per_transfer_contextp) +static int _ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp) { struct ath10k_ce_ring *src_ring = ce_state->src_ring; u32 ctrl_addr = ce_state->ctrl_addr; @@ -1084,6 +1054,66 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, return 0; } + +static int _ath10k_ce_completed_send_next_nolock_64(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp) +{ + struct ath10k_ce_ring *src_ring = ce_state->src_ring; + u32 ctrl_addr = ce_state->ctrl_addr; + struct ath10k *ar = ce_state->ar; + unsigned int nentries_mask = src_ring->nentries_mask; + unsigned int sw_index = src_ring->sw_index; + unsigned int read_index; + struct ce_desc_64 *desc; + + if (src_ring->hw_index == sw_index) { + /* + * The SW completion index has caught up with the cached + * version of the HW completion index. + * Update the cached HW completion index to see whether + * the SW has really caught up to the HW, or if the cached + * value of the HW index has become stale. + */ + + read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); + if (read_index == 0xffffffff) + return -ENODEV; + + read_index &= nentries_mask; + src_ring->hw_index = read_index; + } + + if (ar->hw_params.rri_on_ddr) + read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); + else + read_index = src_ring->hw_index; + + if (read_index == sw_index) + return -EIO; + + if (per_transfer_contextp) + *per_transfer_contextp = + src_ring->per_transfer_context[sw_index]; + + /* sanity */ + src_ring->per_transfer_context[sw_index] = NULL; + desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space, + sw_index); + desc->nbytes = 0; + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + src_ring->sw_index = sw_index; + + return 0; +} + +int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp) +{ + return ce_state->ops->ce_completed_send_next_nolock(ce_state, + per_transfer_contextp); +} EXPORT_SYMBOL(ath10k_ce_completed_send_next_nolock); static void ath10k_ce_extract_desc_data(struct ath10k *ar, @@ -1202,39 +1232,34 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) { struct ath10k_ce *ce = ath10k_ce_priv(ar); struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; - struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; + const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; u32 ctrl_addr = ce_state->ctrl_addr; - spin_lock_bh(&ce->ce_lock); - - /* Clear the copy-complete interrupts that will be handled here. */ + /* + * Clear before handling + * + * Misc CE interrupts are not being handled, but still need + * to be cleared. + * + * NOTE: When the last copy engine interrupt is cleared the + * hardware will go to sleep. Once this happens any access to + * the CE registers can cause a hardware fault. + */ ath10k_ce_engine_int_status_clear(ar, ctrl_addr, - wm_regs->cc_mask); - - spin_unlock_bh(&ce->ce_lock); + wm_regs->cc_mask | wm_regs->wm_mask); if (ce_state->recv_cb) ce_state->recv_cb(ce_state); if (ce_state->send_cb) ce_state->send_cb(ce_state); - - spin_lock_bh(&ce->ce_lock); - - /* - * Misc CE interrupts are not being handled, but still need - * to be cleared. - */ - ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->wm_mask); - - spin_unlock_bh(&ce->ce_lock); } EXPORT_SYMBOL(ath10k_ce_per_engine_service); /* * Handler for per-engine interrupts on ALL active CEs. * This is used in cases where the system is sharing a - * single interrput for all CEs + * single interrupt for all CEs */ void ath10k_ce_per_engine_service_any(struct ath10k *ar) @@ -1278,45 +1303,55 @@ static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state) ath10k_ce_watermark_intr_disable(ar, ctrl_addr); } -int ath10k_ce_disable_interrupts(struct ath10k *ar) +void ath10k_ce_disable_interrupt(struct ath10k *ar, int ce_id) { struct ath10k_ce *ce = ath10k_ce_priv(ar); struct ath10k_ce_pipe *ce_state; u32 ctrl_addr; - int ce_id; - for (ce_id = 0; ce_id < CE_COUNT; ce_id++) { - ce_state = &ce->ce_states[ce_id]; - if (ce_state->attr_flags & CE_ATTR_POLL) - continue; + ce_state = &ce->ce_states[ce_id]; + if (ce_state->attr_flags & CE_ATTR_POLL) + return; - ctrl_addr = ath10k_ce_base_address(ar, ce_id); + ctrl_addr = ath10k_ce_base_address(ar, ce_id); - ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); - ath10k_ce_error_intr_disable(ar, ctrl_addr); - ath10k_ce_watermark_intr_disable(ar, ctrl_addr); - } + ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); + ath10k_ce_error_intr_disable(ar, ctrl_addr); + ath10k_ce_watermark_intr_disable(ar, ctrl_addr); +} +EXPORT_SYMBOL(ath10k_ce_disable_interrupt); - return 0; +void ath10k_ce_disable_interrupts(struct ath10k *ar) +{ + int ce_id; + + for (ce_id = 0; ce_id < CE_COUNT; ce_id++) + ath10k_ce_disable_interrupt(ar, ce_id); } EXPORT_SYMBOL(ath10k_ce_disable_interrupts); -void ath10k_ce_enable_interrupts(struct ath10k *ar) +void ath10k_ce_enable_interrupt(struct ath10k *ar, int ce_id) { struct ath10k_ce *ce = ath10k_ce_priv(ar); - int ce_id; struct ath10k_ce_pipe *ce_state; + ce_state = &ce->ce_states[ce_id]; + if (ce_state->attr_flags & CE_ATTR_POLL) + return; + + ath10k_ce_per_engine_handler_adjust(ce_state); +} +EXPORT_SYMBOL(ath10k_ce_enable_interrupt); + +void ath10k_ce_enable_interrupts(struct ath10k *ar) +{ + int ce_id; + /* Enable interrupts for copy engine that * are not using polling mode. */ - for (ce_id = 0; ce_id < CE_COUNT; ce_id++) { - ce_state = &ce->ce_states[ce_id]; - if (ce_state->attr_flags & CE_ATTR_POLL) - continue; - - ath10k_ce_per_engine_handler_adjust(ce_state); - } + for (ce_id = 0; ce_id < CE_COUNT; ce_id++) + ath10k_ce_enable_interrupt(ar, ce_id); } EXPORT_SYMBOL(ath10k_ce_enable_interrupts); @@ -1346,7 +1381,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); src_ring->write_index &= src_ring->nentries_mask; - ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, + ath10k_ce_src_ring_base_addr_set(ar, ce_id, src_ring->base_addr_ce_space); ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries); ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max); @@ -1355,7 +1390,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries); ath10k_dbg(ar, ATH10K_DBG_BOOT, - "boot init ce src ring id %d entries %d base_addr %pK\n", + "boot init ce src ring id %d entries %d base_addr %p\n", ce_id, nentries, src_ring->base_addr_owner_space); return 0; @@ -1385,7 +1420,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); dest_ring->write_index &= dest_ring->nentries_mask; - ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, + ath10k_ce_dest_ring_base_addr_set(ar, ce_id, dest_ring->base_addr_ce_space); ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries); ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0); @@ -1393,7 +1428,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries); ath10k_dbg(ar, ATH10K_DBG_BOOT, - "boot ce dest ring id %d entries %d base_addr %pK\n", + "boot ce dest ring id %d entries %d base_addr %p\n", ce_id, nentries, dest_ring->base_addr_owner_space); return 0; @@ -1404,12 +1439,12 @@ static int ath10k_ce_alloc_shadow_base(struct ath10k *ar, u32 nentries) { src_ring->shadow_base_unaligned = kcalloc(nentries, - sizeof(struct ce_desc), + sizeof(struct ce_desc_64), GFP_KERNEL); if (!src_ring->shadow_base_unaligned) return -ENOMEM; - src_ring->shadow_base = (struct ce_desc *) + src_ring->shadow_base = (struct ce_desc_64 *) PTR_ALIGN(src_ring->shadow_base_unaligned, CE_DESC_RING_ALIGN); return 0; @@ -1554,7 +1589,8 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, */ dest_ring->base_addr_owner_space_unaligned = dma_alloc_coherent(ar->dev, - (nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN), + (nentries * sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), &base_addr, GFP_KERNEL); if (!dest_ring->base_addr_owner_space_unaligned) { kfree(dest_ring); @@ -1609,9 +1645,6 @@ ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id, /* Correctly initialize memory to 0 to prevent garbage * data crashing system when download firmware */ - memset(dest_ring->base_addr_owner_space_unaligned, 0, - nentries * sizeof(struct ce_desc_64) + CE_DESC_RING_ALIGN); - dest_ring->base_addr_owner_space = PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned, CE_DESC_RING_ALIGN); @@ -1660,7 +1693,7 @@ static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id) { u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); - ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0); + ath10k_ce_src_ring_base_addr_set(ar, ce_id, 0); ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0); ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0); ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0); @@ -1670,7 +1703,7 @@ static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id) { u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); - ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0); + ath10k_ce_dest_ring_base_addr_set(ar, ce_id, 0); ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0); ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0); } @@ -1760,7 +1793,7 @@ void ath10k_ce_dump_registers(struct ath10k *ar, struct ath10k_ce_crash_data ce_data; u32 addr, id; - lockdep_assert_held(&ar->data_lock); + lockdep_assert_held(&ar->dump_mutex); ath10k_err(ar, "Copy Engine register dump:\n"); @@ -1802,6 +1835,9 @@ static const struct ath10k_ce_ops ce_ops = { .ce_extract_desc_data = ath10k_ce_extract_desc_data, .ce_free_pipe = _ath10k_ce_free_pipe, .ce_send_nolock = _ath10k_ce_send_nolock, + .ce_set_src_ring_base_addr_hi = NULL, + .ce_set_dest_ring_base_addr_hi = NULL, + .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock, }; static const struct ath10k_ce_ops ce_64_ops = { @@ -1814,6 +1850,9 @@ static const struct ath10k_ce_ops ce_64_ops = { .ce_extract_desc_data = ath10k_ce_extract_desc_data_64, .ce_free_pipe = _ath10k_ce_free_pipe_64, .ce_send_nolock = _ath10k_ce_send_nolock_64, + .ce_set_src_ring_base_addr_hi = ath10k_ce_set_src_ring_base_addr_hi, + .ce_set_dest_ring_base_addr_hi = ath10k_ce_set_dest_ring_base_addr_hi, + .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock_64, }; static void ath10k_ce_set_ops(struct ath10k *ar, @@ -1909,7 +1948,7 @@ void ath10k_ce_alloc_rri(struct ath10k *ar) lower_32_bits(ce->paddr_rri)); ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_high, (upper_32_bits(ce->paddr_rri) & - CE_DESC_FLAGS_GET_MASK)); + CE_DESC_ADDR_HI_MASK)); for (i = 0; i < CE_COUNT; i++) { ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr; @@ -1918,8 +1957,6 @@ void ath10k_ce_alloc_rri(struct ath10k *ar) value |= ar->hw_ce_regs->upd->mask; ath10k_ce_write32(ar, ce_base_addr + ctrl1_regs, value); } - - memset(ce->vaddr_rri, 0, CE_COUNT * sizeof(u32)); } EXPORT_SYMBOL(ath10k_ce_alloc_rri); |
