diff options
Diffstat (limited to 'drivers/misc/mei/hw-me.c')
| -rw-r--r-- | drivers/misc/mei/hw-me.c | 647 |
1 files changed, 506 insertions, 141 deletions
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 71216affcab1..d4612c659784 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -1,17 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver - * Copyright (c) 2003-2012, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * */ #include <linux/pci.h> @@ -19,6 +9,8 @@ #include <linux/kthread.h> #include <linux/interrupt.h> #include <linux/pm_runtime.h> +#include <linux/sizes.h> +#include <linux/delay.h> #include "mei_dev.h" #include "hbm.h" @@ -92,7 +84,7 @@ static inline u32 mei_me_mecsr_read(const struct mei_device *dev) u32 reg; reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA); - trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg); + trace_mei_reg_read(&dev->dev, "ME_CSR_HA", ME_CSR_HA, reg); return reg; } @@ -109,7 +101,7 @@ static inline u32 mei_hcsr_read(const struct mei_device *dev) u32 reg; reg = mei_me_reg_read(to_me_hw(dev), H_CSR); - trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg); + trace_mei_reg_read(&dev->dev, "H_CSR", H_CSR, reg); return reg; } @@ -122,7 +114,7 @@ static inline u32 mei_hcsr_read(const struct mei_device *dev) */ static inline void mei_hcsr_write(struct mei_device *dev, u32 reg) { - trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg); + trace_mei_reg_write(&dev->dev, "H_CSR", H_CSR, reg); mei_me_reg_write(to_me_hw(dev), H_CSR, reg); } @@ -164,7 +156,7 @@ static inline u32 mei_me_d0i3c_read(const struct mei_device *dev) u32 reg; reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C); - trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg); + trace_mei_reg_read(&dev->dev, "H_D0I3C", H_D0I3C, reg); return reg; } @@ -177,11 +169,32 @@ static inline u32 mei_me_d0i3c_read(const struct mei_device *dev) */ static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg) { - trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg); + trace_mei_reg_write(&dev->dev, "H_D0I3C", H_D0I3C, reg); mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg); } /** + * mei_me_trc_status - read trc status register + * + * @dev: mei device + * @trc: trc status register value + * + * Return: 0 on success, error otherwise + */ +static int mei_me_trc_status(struct mei_device *dev, u32 *trc) +{ + struct mei_me_hw *hw = to_me_hw(dev); + + if (!hw->cfg->hw_trc_supported) + return -EOPNOTSUPP; + + *trc = mei_me_reg_read(hw, ME_TRC); + trace_mei_reg_read(&dev->dev, "ME_TRC", ME_TRC, *trc); + + return 0; +} + +/** * mei_me_fw_status - read fw status register from pci config space * * @dev: mei device @@ -192,20 +205,19 @@ static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg) static int mei_me_fw_status(struct mei_device *dev, struct mei_fw_status *fw_status) { - struct pci_dev *pdev = to_pci_dev(dev->dev); struct mei_me_hw *hw = to_me_hw(dev); const struct mei_fw_status *fw_src = &hw->cfg->fw_status; int ret; int i; - if (!fw_status) + if (!fw_status || !hw->read_fws) return -EINVAL; fw_status->count = fw_src->count; for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) { - ret = pci_read_config_dword(pdev, fw_src->status[i], - &fw_status->status[i]); - trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X", + ret = hw->read_fws(dev, fw_src->status[i], + &fw_status->status[i]); + trace_mei_pci_cfg_read(&dev->dev, "PCI_CFG_HFS_X", fw_src->status[i], fw_status->status[i]); if (ret) @@ -219,20 +231,27 @@ static int mei_me_fw_status(struct mei_device *dev, * mei_me_hw_config - configure hw dependent settings * * @dev: mei device + * + * Return: + * * -EINVAL when read_fws is not set + * * 0 on success + * */ -static void mei_me_hw_config(struct mei_device *dev) +static int mei_me_hw_config(struct mei_device *dev) { - struct pci_dev *pdev = to_pci_dev(dev->dev); struct mei_me_hw *hw = to_me_hw(dev); u32 hcsr, reg; + if (WARN_ON(!hw->read_fws)) + return -EINVAL; + /* Doesn't change in runtime */ hcsr = mei_hcsr_read(dev); - dev->hbuf_depth = (hcsr & H_CBD) >> 24; + hw->hbuf_depth = (hcsr & H_CBD) >> 24; reg = 0; - pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®); - trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg); + hw->read_fws(dev, PCI_CFG_HFS_1, ®); + trace_mei_pci_cfg_read(&dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg); hw->d0i3_supported = ((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK); @@ -242,6 +261,8 @@ static void mei_me_hw_config(struct mei_device *dev) if (reg & H_D0I3C_I3) hw->pg_state = MEI_PG_ON; } + + return 0; } /** @@ -278,7 +299,7 @@ static inline void me_intr_disable(struct mei_device *dev, u32 hcsr) } /** - * mei_me_intr_clear - clear and stop interrupts + * me_intr_clear - clear and stop interrupts * * @dev: the device structure * @hcsr: supplied hcsr register value @@ -307,9 +328,12 @@ static void mei_me_intr_clear(struct mei_device *dev) */ static void mei_me_intr_enable(struct mei_device *dev) { - u32 hcsr = mei_hcsr_read(dev); + u32 hcsr; + + if (mei_me_hw_use_polling(to_me_hw(dev))) + return; - hcsr |= H_CSR_IE_MASK; + hcsr = mei_hcsr_read(dev) | H_CSR_IE_MASK; mei_hcsr_set(dev, hcsr); } @@ -332,9 +356,12 @@ static void mei_me_intr_disable(struct mei_device *dev) */ static void mei_me_synchronize_irq(struct mei_device *dev) { - struct pci_dev *pdev = to_pci_dev(dev->dev); + struct mei_me_hw *hw = to_me_hw(dev); + + if (mei_me_hw_use_polling(hw)) + return; - synchronize_irq(pdev->irq); + synchronize_irq(hw->irq); } /** @@ -349,9 +376,6 @@ static void mei_me_hw_reset_release(struct mei_device *dev) hcsr |= H_IG; hcsr &= ~H_RST; mei_hcsr_set(dev, hcsr); - - /* complete this write before we set host ready on another CPU */ - mmiowb(); } /** @@ -363,7 +387,10 @@ static void mei_me_host_set_ready(struct mei_device *dev) { u32 hcsr = mei_hcsr_read(dev); - hcsr |= H_CSR_IE_MASK | H_IG | H_RDY; + if (!mei_me_hw_use_polling(to_me_hw(dev))) + hcsr |= H_CSR_IE_MASK; + + hcsr |= H_IG | H_RDY; mei_hcsr_set(dev, hcsr); } @@ -407,6 +434,40 @@ static bool mei_me_hw_is_resetting(struct mei_device *dev) } /** + * mei_gsc_pxp_check - check for gsc firmware entering pxp mode + * + * @dev: the device structure + */ +static void mei_gsc_pxp_check(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 fwsts5 = 0; + + if (!kind_is_gsc(dev) && !kind_is_gscfi(dev)) + return; + + hw->read_fws(dev, PCI_CFG_HFS_5, &fwsts5); + trace_mei_pci_cfg_read(&dev->dev, "PCI_CFG_HFS_5", PCI_CFG_HFS_5, fwsts5); + + if ((fwsts5 & GSC_CFG_HFS_5_BOOT_TYPE_MSK) == GSC_CFG_HFS_5_BOOT_TYPE_PXP) { + if (dev->gsc_reset_to_pxp == MEI_DEV_RESET_TO_PXP_DEFAULT) + dev->gsc_reset_to_pxp = MEI_DEV_RESET_TO_PXP_PERFORMED; + } else { + dev->gsc_reset_to_pxp = MEI_DEV_RESET_TO_PXP_DEFAULT; + } + + if (dev->pxp_mode == MEI_DEV_PXP_DEFAULT) + return; + + if ((fwsts5 & GSC_CFG_HFS_5_BOOT_TYPE_MSK) == GSC_CFG_HFS_5_BOOT_TYPE_PXP) { + dev_dbg(&dev->dev, "pxp mode is ready 0x%08x\n", fwsts5); + dev->pxp_mode = MEI_DEV_PXP_READY; + } else { + dev_dbg(&dev->dev, "pxp mode is not ready 0x%08x\n", fwsts5); + } +} + +/** * mei_me_hw_ready_wait - wait until the me(hw) has turned ready * or timeout is reached * @@ -418,13 +479,15 @@ static int mei_me_hw_ready_wait(struct mei_device *dev) mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_hw_ready, dev->recvd_hw_ready, - mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT)); + dev->timeouts.hw_ready); mutex_lock(&dev->device_lock); if (!dev->recvd_hw_ready) { - dev_err(dev->dev, "wait hw ready failed\n"); + dev_err(&dev->dev, "wait hw ready failed\n"); return -ETIME; } + mei_gsc_pxp_check(dev); + mei_me_hw_reset_release(dev); dev->recvd_hw_ready = false; return 0; @@ -440,9 +503,12 @@ static int mei_me_hw_start(struct mei_device *dev) { int ret = mei_me_hw_ready_wait(dev); + if ((kind_is_gsc(dev) || kind_is_gscfi(dev)) && + dev->gsc_reset_to_pxp == MEI_DEV_RESET_TO_PXP_PERFORMED) + dev->gsc_reset_to_pxp = MEI_DEV_RESET_TO_PXP_DONE; if (ret) return ret; - dev_dbg(dev->dev, "hw is ready\n"); + dev_dbg(&dev->dev, "hw is ready\n"); mei_me_host_set_ready(dev); return ret; @@ -490,70 +556,87 @@ static bool mei_me_hbuf_is_empty(struct mei_device *dev) */ static int mei_me_hbuf_empty_slots(struct mei_device *dev) { + struct mei_me_hw *hw = to_me_hw(dev); unsigned char filled_slots, empty_slots; filled_slots = mei_hbuf_filled_slots(dev); - empty_slots = dev->hbuf_depth - filled_slots; + empty_slots = hw->hbuf_depth - filled_slots; /* check for overflow */ - if (filled_slots > dev->hbuf_depth) + if (filled_slots > hw->hbuf_depth) return -EOVERFLOW; return empty_slots; } /** - * mei_me_hbuf_max_len - returns size of hw buffer. + * mei_me_hbuf_depth - returns depth of the hw buffer. * * @dev: the device structure * - * Return: size of hw buffer in bytes + * Return: size of hw buffer in slots */ -static size_t mei_me_hbuf_max_len(const struct mei_device *dev) +static u32 mei_me_hbuf_depth(const struct mei_device *dev) { - return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr); -} + struct mei_me_hw *hw = to_me_hw(dev); + return hw->hbuf_depth; +} /** * mei_me_hbuf_write - writes a message to host hw buffer. * * @dev: the device structure - * @header: mei HECI header of message - * @buf: message payload will be written + * @hdr: header of message + * @hdr_len: header length in bytes: must be multiplication of a slot (4bytes) + * @data: payload + * @data_len: payload length in bytes * - * Return: -EIO if write has failed + * Return: 0 if success, < 0 - otherwise. */ static int mei_me_hbuf_write(struct mei_device *dev, - struct mei_msg_hdr *header, - const unsigned char *buf) + const void *hdr, size_t hdr_len, + const void *data, size_t data_len) { unsigned long rem; - unsigned long length = header->length; - u32 *reg_buf = (u32 *)buf; + unsigned long i; + const u32 *reg_buf; u32 dw_cnt; - int i; int empty_slots; - dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header)); + if (WARN_ON(!hdr || hdr_len & 0x3)) + return -EINVAL; + + if (!data && data_len) { + dev_err(&dev->dev, "wrong parameters null data with data_len = %zu\n", data_len); + return -EINVAL; + } + + dev_dbg(&dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr)); empty_slots = mei_hbuf_empty_slots(dev); - dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots); + dev_dbg(&dev->dev, "empty slots = %d.\n", empty_slots); + + if (empty_slots < 0) + return -EOVERFLOW; - dw_cnt = mei_data2slots(length); - if (empty_slots < 0 || dw_cnt > empty_slots) + dw_cnt = mei_data2slots(hdr_len + data_len); + if (dw_cnt > (u32)empty_slots) return -EMSGSIZE; - mei_me_hcbww_write(dev, *((u32 *) header)); + reg_buf = hdr; + for (i = 0; i < hdr_len / MEI_SLOT_SIZE; i++) + mei_me_hcbww_write(dev, reg_buf[i]); - for (i = 0; i < length / 4; i++) + reg_buf = data; + for (i = 0; i < data_len / MEI_SLOT_SIZE; i++) mei_me_hcbww_write(dev, reg_buf[i]); - rem = length & 0x3; + rem = data_len & 0x3; if (rem > 0) { u32 reg = 0; - memcpy(®, &buf[length - rem], rem); + memcpy(®, (const u8 *)data + data_len - rem, rem); mei_me_hcbww_write(dev, reg); } @@ -587,7 +670,7 @@ static int mei_me_count_full_read_slots(struct mei_device *dev) if (filled_slots > buffer_depth) return -EOVERFLOW; - dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots); + dev_dbg(&dev->dev, "filled_slots =%08x\n", filled_slots); return (int)filled_slots; } @@ -601,11 +684,11 @@ static int mei_me_count_full_read_slots(struct mei_device *dev) * Return: always 0 */ static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer, - unsigned long buffer_length) + unsigned long buffer_length) { u32 *reg_buf = (u32 *)buffer; - for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32)) + for (; buffer_length >= MEI_SLOT_SIZE; buffer_length -= MEI_SLOT_SIZE) *reg_buf++ = mei_me_mecbrw_read(dev); if (buffer_length > 0) { @@ -629,11 +712,11 @@ static void mei_me_pg_set(struct mei_device *dev) u32 reg; reg = mei_me_reg_read(hw, H_HPG_CSR); - trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); + trace_mei_reg_read(&dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); reg |= H_HPG_CSR_PGI; - trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); + trace_mei_reg_write(&dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); mei_me_reg_write(hw, H_HPG_CSR, reg); } @@ -648,13 +731,13 @@ static void mei_me_pg_unset(struct mei_device *dev) u32 reg; reg = mei_me_reg_read(hw, H_HPG_CSR); - trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); + trace_mei_reg_read(&dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n"); reg |= H_HPG_CSR_PGIHEXR; - trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); + trace_mei_reg_write(&dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); mei_me_reg_write(hw, H_HPG_CSR, reg); } @@ -668,7 +751,6 @@ static void mei_me_pg_unset(struct mei_device *dev) static int mei_me_pg_legacy_enter_sync(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); - unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); int ret; dev->pg_event = MEI_PG_EVENT_WAIT; @@ -679,7 +761,8 @@ static int mei_me_pg_legacy_enter_sync(struct mei_device *dev) mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_pg, - dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); + dev->pg_event == MEI_PG_EVENT_RECEIVED, + dev->timeouts.pgi); mutex_lock(&dev->device_lock); if (dev->pg_event == MEI_PG_EVENT_RECEIVED) { @@ -705,7 +788,6 @@ static int mei_me_pg_legacy_enter_sync(struct mei_device *dev) static int mei_me_pg_legacy_exit_sync(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); - unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); int ret; if (dev->pg_event == MEI_PG_EVENT_RECEIVED) @@ -717,7 +799,8 @@ static int mei_me_pg_legacy_exit_sync(struct mei_device *dev) mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_pg, - dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); + dev->pg_event == MEI_PG_EVENT_RECEIVED, + dev->timeouts.pgi); mutex_lock(&dev->device_lock); reply: @@ -733,7 +816,8 @@ reply: mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_pg, - dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout); + dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, + dev->timeouts.pgi); mutex_lock(&dev->device_lock); if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED) @@ -785,7 +869,7 @@ static bool mei_me_pg_is_enabled(struct mei_device *dev) return true; notsupported: - dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n", + dev_dbg(&dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n", hw->d0i3_supported, !!(reg & ME_PGIC_HRA), dev->version.major_version, @@ -848,15 +932,13 @@ static u32 mei_me_d0i3_unset(struct mei_device *dev) static int mei_me_d0i3_enter_sync(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); - unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT); - unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); int ret; u32 reg; reg = mei_me_d0i3c_read(dev); if (reg & H_D0I3C_I3) { /* we are in d0i3, nothing to do */ - dev_dbg(dev->dev, "d0i3 set not needed\n"); + dev_dbg(&dev->dev, "d0i3 set not needed\n"); ret = 0; goto on; } @@ -871,7 +953,8 @@ static int mei_me_d0i3_enter_sync(struct mei_device *dev) mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_pg, - dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout); + dev->pg_event == MEI_PG_EVENT_RECEIVED, + dev->timeouts.pgi); mutex_lock(&dev->device_lock); if (dev->pg_event != MEI_PG_EVENT_RECEIVED) { @@ -884,14 +967,15 @@ static int mei_me_d0i3_enter_sync(struct mei_device *dev) reg = mei_me_d0i3_set(dev, true); if (!(reg & H_D0I3C_CIP)) { - dev_dbg(dev->dev, "d0i3 enter wait not needed\n"); + dev_dbg(&dev->dev, "d0i3 enter wait not needed\n"); ret = 0; goto on; } mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_pg, - dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout); + dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, + dev->timeouts.d0i3); mutex_lock(&dev->device_lock); if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) { @@ -907,7 +991,7 @@ on: hw->pg_state = MEI_PG_ON; out: dev->pg_event = MEI_PG_EVENT_IDLE; - dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret); + dev_dbg(&dev->dev, "d0i3 enter ret = %d\n", ret); return ret; } @@ -929,7 +1013,7 @@ static int mei_me_d0i3_enter(struct mei_device *dev) reg = mei_me_d0i3c_read(dev); if (reg & H_D0I3C_I3) { /* we are in d0i3, nothing to do */ - dev_dbg(dev->dev, "already d0i3 : set not needed\n"); + dev_dbg(&dev->dev, "already d0i3 : set not needed\n"); goto on; } @@ -937,7 +1021,7 @@ static int mei_me_d0i3_enter(struct mei_device *dev) on: hw->pg_state = MEI_PG_ON; dev->pg_event = MEI_PG_EVENT_IDLE; - dev_dbg(dev->dev, "d0i3 enter\n"); + dev_dbg(&dev->dev, "d0i3 enter\n"); return 0; } @@ -951,7 +1035,6 @@ on: static int mei_me_d0i3_exit_sync(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); - unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT); int ret; u32 reg; @@ -960,21 +1043,22 @@ static int mei_me_d0i3_exit_sync(struct mei_device *dev) reg = mei_me_d0i3c_read(dev); if (!(reg & H_D0I3C_I3)) { /* we are not in d0i3, nothing to do */ - dev_dbg(dev->dev, "d0i3 exit not needed\n"); + dev_dbg(&dev->dev, "d0i3 exit not needed\n"); ret = 0; goto off; } reg = mei_me_d0i3_unset(dev); if (!(reg & H_D0I3C_CIP)) { - dev_dbg(dev->dev, "d0i3 exit wait not needed\n"); + dev_dbg(&dev->dev, "d0i3 exit wait not needed\n"); ret = 0; goto off; } mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_pg, - dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout); + dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, + dev->timeouts.d0i3); mutex_lock(&dev->device_lock); if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) { @@ -991,7 +1075,7 @@ off: out: dev->pg_event = MEI_PG_EVENT_IDLE; - dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret); + dev_dbg(&dev->dev, "d0i3 exit ret = %d\n", ret); return ret; } @@ -1034,7 +1118,7 @@ static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source) * force H_RDY because it could be * wiped off during PG */ - dev_dbg(dev->dev, "d0i3 set host ready\n"); + dev_dbg(&dev->dev, "d0i3 set host ready\n"); mei_me_host_set_ready(dev); } } else { @@ -1050,7 +1134,7 @@ static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source) * we got here because of HW initiated exit from D0i3. * Start runtime pm resume sequence to exit low power state. */ - dev_dbg(dev->dev, "d0i3 want resume\n"); + dev_dbg(&dev->dev, "d0i3 want resume\n"); mei_hbm_pg_resume(dev); } } @@ -1125,10 +1209,12 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) ret = mei_me_d0i3_exit_sync(dev); if (ret) return ret; + } else { + hw->pg_state = MEI_PG_OFF; } } - pm_runtime_set_active(dev->dev); + pm_runtime_set_active(dev->parent); hcsr = mei_hcsr_read(dev); /* H_RST may be found lit before reset is started, @@ -1137,7 +1223,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) * we need to clean H_RST bit to start a successful reset sequence. */ if ((hcsr & H_RST) == H_RST) { - dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr); + dev_warn(&dev->dev, "H_RST is set = 0x%08X", hcsr); hcsr &= ~H_RST; mei_hcsr_set(dev, hcsr); hcsr = mei_hcsr_read(dev); @@ -1145,7 +1231,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) hcsr |= H_RST | H_IG | H_CSR_IS_MASK; - if (!intr_enable) + if (!intr_enable || mei_me_hw_use_polling(to_me_hw(dev))) hcsr &= ~H_CSR_IE_MASK; dev->recvd_hw_ready = false; @@ -1158,10 +1244,10 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) hcsr = mei_hcsr_read(dev); if ((hcsr & H_RST) == 0) - dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr); + dev_warn(&dev->dev, "H_RST is not set = 0x%08X", hcsr); if ((hcsr & H_RDY) == H_RDY) - dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr); + dev_warn(&dev->dev, "H_RDY is not cleared 0x%08X", hcsr); if (!intr_enable) { mei_me_hw_reset_release(dev); @@ -1191,12 +1277,13 @@ irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id) if (!me_intr_src(hcsr)) return IRQ_NONE; - dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr)); + dev_dbg(&dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr)); /* disable interrupts on device */ me_intr_disable(dev, hcsr); return IRQ_WAKE_THREAD; } +EXPORT_SYMBOL_GPL(mei_me_irq_quick_handler); /** * mei_me_irq_thread_handler - function called after ISR to handle the interrupt @@ -1216,7 +1303,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) u32 hcsr; int rets = 0; - dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n"); + dev_dbg(&dev->dev, "function called after ISR to handle the interrupt processing.\n"); /* initialize our complete list */ mutex_lock(&dev->device_lock); @@ -1227,8 +1314,18 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) /* check if ME wants a reset */ if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) { - dev_warn(dev->dev, "FW not ready: resetting.\n"); - schedule_work(&dev->reset_work); + if (kind_is_gsc(dev) || kind_is_gscfi(dev)) { + dev_dbg(&dev->dev, "FW not ready: resetting: dev_state = %d\n", + dev->dev_state); + } else { + dev_warn(&dev->dev, "FW not ready: resetting: dev_state = %d\n", + dev->dev_state); + } + if (dev->dev_state == MEI_DEV_POWERING_DOWN || + dev->dev_state == MEI_DEV_POWER_DOWN) + mei_cl_all_disconnect(dev); + else if (dev->dev_state != MEI_DEV_DISABLED) + schedule_work(&dev->reset_work); goto end; } @@ -1240,18 +1337,29 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) /* check if we need to start the dev */ if (!mei_host_is_ready(dev)) { if (mei_hw_is_ready(dev)) { - dev_dbg(dev->dev, "we need to start the dev.\n"); - dev->recvd_hw_ready = true; - wake_up(&dev->wait_hw_ready); + /* synchronized by dev mutex */ + if (waitqueue_active(&dev->wait_hw_ready)) { + dev_dbg(&dev->dev, "we need to start the dev.\n"); + dev->recvd_hw_ready = true; + wake_up(&dev->wait_hw_ready); + } else if (dev->dev_state != MEI_DEV_UNINITIALIZED && + dev->dev_state != MEI_DEV_POWERING_DOWN && + dev->dev_state != MEI_DEV_POWER_DOWN) { + dev_dbg(&dev->dev, "Force link reset.\n"); + schedule_work(&dev->reset_work); + } else { + dev_dbg(&dev->dev, "Ignore this interrupt in state = %d\n", + dev->dev_state); + } } else { - dev_dbg(dev->dev, "Spurious Interrupt\n"); + dev_dbg(&dev->dev, "Spurious Interrupt\n"); } goto end; } /* check slots available for reading */ slots = mei_count_full_read_slots(dev); while (slots > 0) { - dev_dbg(dev->dev, "slots to read = %08x\n", slots); + dev_dbg(&dev->dev, "slots to read = %08x\n", slots); rets = mei_irq_read_handler(dev, &cmpl_list, &slots); /* There is a race between ME write and interrupt delivery: * Not all data is always available immediately after the @@ -1260,10 +1368,14 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) if (rets == -ENODATA) break; - if (rets && dev->dev_state != MEI_DEV_RESETTING) { - dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n", - rets); - schedule_work(&dev->reset_work); + if (rets) { + dev_err(&dev->dev, "mei_irq_read_handler ret = %d, state = %d.\n", + rets, dev->dev_state); + if (dev->dev_state != MEI_DEV_RESETTING && + dev->dev_state != MEI_DEV_DISABLED && + dev->dev_state != MEI_DEV_POWERING_DOWN && + dev->dev_state != MEI_DEV_POWER_DOWN) + schedule_work(&dev->reset_work); goto end; } } @@ -1284,14 +1396,76 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) mei_irq_compl_handler(dev, &cmpl_list); end: - dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets); + dev_dbg(&dev->dev, "interrupt thread end ret = %d\n", rets); mei_me_intr_enable(dev); mutex_unlock(&dev->device_lock); return IRQ_HANDLED; } +EXPORT_SYMBOL_GPL(mei_me_irq_thread_handler); + +#define MEI_POLLING_TIMEOUT_ACTIVE 100 +#define MEI_POLLING_TIMEOUT_IDLE 500 + +/** + * mei_me_polling_thread - interrupt register polling thread + * + * @_dev: mei device + * + * The thread monitors the interrupt source register and calls + * mei_me_irq_thread_handler() to handle the firmware + * input. + * + * The function polls in MEI_POLLING_TIMEOUT_ACTIVE timeout + * in case there was an event, in idle case the polling + * time increases yet again by MEI_POLLING_TIMEOUT_ACTIVE + * up to MEI_POLLING_TIMEOUT_IDLE. + * + * Return: always 0 + */ +int mei_me_polling_thread(void *_dev) +{ + struct mei_device *dev = _dev; + irqreturn_t irq_ret; + long polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE; + + dev_dbg(&dev->dev, "kernel thread is running\n"); + while (!kthread_should_stop()) { + struct mei_me_hw *hw = to_me_hw(dev); + u32 hcsr; + + wait_event_timeout(hw->wait_active, + hw->is_active || kthread_should_stop(), + msecs_to_jiffies(MEI_POLLING_TIMEOUT_IDLE)); + + if (kthread_should_stop()) + break; + + hcsr = mei_hcsr_read(dev); + if (me_intr_src(hcsr)) { + polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE; + irq_ret = mei_me_irq_thread_handler(1, dev); + if (irq_ret != IRQ_HANDLED) + dev_err(&dev->dev, "irq_ret %d\n", irq_ret); + } else { + /* + * Increase timeout by MEI_POLLING_TIMEOUT_ACTIVE + * up to MEI_POLLING_TIMEOUT_IDLE + */ + polling_timeout = clamp_val(polling_timeout + MEI_POLLING_TIMEOUT_ACTIVE, + MEI_POLLING_TIMEOUT_ACTIVE, + MEI_POLLING_TIMEOUT_IDLE); + } + + schedule_timeout_interruptible(msecs_to_jiffies(polling_timeout)); + } + + return 0; +} +EXPORT_SYMBOL_GPL(mei_me_polling_thread); static const struct mei_hw_ops mei_me_hw_ops = { + .trc_status = mei_me_trc_status, .fw_status = mei_me_fw_status, .pg_state = mei_me_pg_state, @@ -1312,7 +1486,7 @@ static const struct mei_hw_ops mei_me_hw_ops = { .hbuf_free_slots = mei_me_hbuf_empty_slots, .hbuf_is_ready = mei_me_hbuf_is_empty, - .hbuf_max_len = mei_me_hbuf_max_len, + .hbuf_depth = mei_me_hbuf_depth, .write = mei_me_hbuf_write, @@ -1321,11 +1495,24 @@ static const struct mei_hw_ops mei_me_hw_ops = { .read = mei_me_read_slots }; -static bool mei_me_fw_type_nm(struct pci_dev *pdev) +/** + * mei_me_fw_type_nm() - check for nm sku + * + * @pdev: pci device + * + * Read ME FW Status register to check for the Node Manager (NM) Firmware. + * The NM FW is only signaled in PCI function 0. + * __Note__: Deprecated by PCH8 and newer. + * + * Return: true in case of NM firmware + */ +static bool mei_me_fw_type_nm(const struct pci_dev *pdev) { u32 reg; + unsigned int devfn; - pci_read_config_dword(pdev, PCI_CFG_HFS_2, ®); + devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); + pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_2, ®); trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg); /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */ return (reg & 0x600) == 0x200; @@ -1334,30 +1521,77 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev) #define MEI_CFG_FW_NM \ .quirk_probe = mei_me_fw_type_nm -static bool mei_me_fw_type_sps(struct pci_dev *pdev) +/** + * mei_me_fw_type_sps_4() - check for sps 4.0 sku + * + * @pdev: pci device + * + * Read ME FW Status register to check for SPS Firmware. + * The SPS FW is only signaled in the PCI function 0. + * __Note__: Deprecated by SPS 5.0 and newer. + * + * Return: true in case of SPS firmware + */ +static bool mei_me_fw_type_sps_4(const struct pci_dev *pdev) { u32 reg; unsigned int devfn; - /* - * Read ME FW Status register to check for SPS Firmware - * The SPS FW is only signaled in pci function 0 - */ devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, ®); trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg); - /* if bits [19:16] = 15, running SPS Firmware */ - return (reg & 0xf0000) == 0xf0000; + return (reg & PCI_CFG_HFS_1_OPMODE_MSK) == PCI_CFG_HFS_1_OPMODE_SPS; } -#define MEI_CFG_FW_SPS \ - .quirk_probe = mei_me_fw_type_sps +#define MEI_CFG_FW_SPS_4 \ + .quirk_probe = mei_me_fw_type_sps_4 +/** + * mei_me_fw_type_sps_ign() - check for sps or ign sku + * + * @pdev: pci device + * + * Read ME FW Status register to check for SPS or IGN Firmware. + * The SPS/IGN FW is only signaled in pci function 0 + * + * Return: true in case of SPS/IGN firmware + */ +static bool mei_me_fw_type_sps_ign(const struct pci_dev *pdev) +{ + u32 reg; + u32 fw_type; + unsigned int devfn; + + devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); + pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_3, ®); + trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_3", PCI_CFG_HFS_3, reg); + fw_type = (reg & PCI_CFG_HFS_3_FW_SKU_MSK); + + dev_dbg(&pdev->dev, "fw type is %d\n", fw_type); + + return fw_type == PCI_CFG_HFS_3_FW_SKU_IGN || + fw_type == PCI_CFG_HFS_3_FW_SKU_SPS; +} -#define MEI_CFG_LEGACY_HFS \ +#define MEI_CFG_KIND_ITOUCH \ + .kind = "itouch" + +#define MEI_CFG_TYPE_GSC \ + .kind = "gsc" + +#define MEI_CFG_TYPE_GSCFI \ + .kind = "gscfi" + +#define MEI_CFG_FW_SPS_IGN \ + .quirk_probe = mei_me_fw_type_sps_ign + +#define MEI_CFG_FW_VER_SUPP \ + .fw_ver_supported = 1 + +#define MEI_CFG_ICH_HFS \ .fw_status.count = 0 -#define MEI_CFG_ICH_HFS \ +#define MEI_CFG_ICH10_HFS \ .fw_status.count = 1, \ .fw_status.status[0] = PCI_CFG_HFS_1 @@ -1375,61 +1609,192 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev) .fw_status.status[4] = PCI_CFG_HFS_5, \ .fw_status.status[5] = PCI_CFG_HFS_6 +#define MEI_CFG_DMA_128 \ + .dma_size[DMA_DSCR_HOST] = SZ_128K, \ + .dma_size[DMA_DSCR_DEVICE] = SZ_128K, \ + .dma_size[DMA_DSCR_CTRL] = PAGE_SIZE + +#define MEI_CFG_TRC \ + .hw_trc_supported = 1 + /* ICH Legacy devices */ -const struct mei_cfg mei_me_legacy_cfg = { - MEI_CFG_LEGACY_HFS, +static const struct mei_cfg mei_me_ich_cfg = { + MEI_CFG_ICH_HFS, }; /* ICH devices */ -const struct mei_cfg mei_me_ich_cfg = { - MEI_CFG_ICH_HFS, +static const struct mei_cfg mei_me_ich10_cfg = { + MEI_CFG_ICH10_HFS, }; -/* PCH devices */ -const struct mei_cfg mei_me_pch_cfg = { +/* PCH6 devices */ +static const struct mei_cfg mei_me_pch6_cfg = { MEI_CFG_PCH_HFS, }; +/* PCH7 devices */ +static const struct mei_cfg mei_me_pch7_cfg = { + MEI_CFG_PCH_HFS, + MEI_CFG_FW_VER_SUPP, +}; /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */ -const struct mei_cfg mei_me_pch_cpt_pbg_cfg = { +static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = { MEI_CFG_PCH_HFS, + MEI_CFG_FW_VER_SUPP, MEI_CFG_FW_NM, }; /* PCH8 Lynx Point and newer devices */ -const struct mei_cfg mei_me_pch8_cfg = { +static const struct mei_cfg mei_me_pch8_cfg = { MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, +}; + +/* PCH8 Lynx Point and newer devices - iTouch */ +static const struct mei_cfg mei_me_pch8_itouch_cfg = { + MEI_CFG_KIND_ITOUCH, + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, }; /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */ -const struct mei_cfg mei_me_pch8_sps_cfg = { +static const struct mei_cfg mei_me_pch8_sps_4_cfg = { + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, + MEI_CFG_FW_SPS_4, +}; + +/* LBG with quirk for SPS (4.0) Firmware exclusion */ +static const struct mei_cfg mei_me_pch12_sps_4_cfg = { + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, + MEI_CFG_FW_SPS_4, +}; + +/* Cannon Lake and newer devices */ +static const struct mei_cfg mei_me_pch12_cfg = { + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, + MEI_CFG_DMA_128, +}; + +/* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion */ +static const struct mei_cfg mei_me_pch12_sps_cfg = { MEI_CFG_PCH8_HFS, - MEI_CFG_FW_SPS, + MEI_CFG_FW_VER_SUPP, + MEI_CFG_DMA_128, + MEI_CFG_FW_SPS_IGN, }; +/* Cannon Lake itouch with quirk for SPS 5.0 and newer Firmware exclusion + * w/o DMA support. + */ +static const struct mei_cfg mei_me_pch12_itouch_sps_cfg = { + MEI_CFG_KIND_ITOUCH, + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, + MEI_CFG_FW_SPS_IGN, +}; + +/* Tiger Lake and newer devices */ +static const struct mei_cfg mei_me_pch15_cfg = { + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, + MEI_CFG_DMA_128, + MEI_CFG_TRC, +}; + +/* Tiger Lake with quirk for SPS 5.0 and newer Firmware exclusion */ +static const struct mei_cfg mei_me_pch15_sps_cfg = { + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, + MEI_CFG_DMA_128, + MEI_CFG_TRC, + MEI_CFG_FW_SPS_IGN, +}; + +/* Graphics System Controller */ +static const struct mei_cfg mei_me_gsc_cfg = { + MEI_CFG_TYPE_GSC, + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, +}; + +/* Graphics System Controller Firmware Interface */ +static const struct mei_cfg mei_me_gscfi_cfg = { + MEI_CFG_TYPE_GSCFI, + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, +}; + +/* + * mei_cfg_list - A list of platform platform specific configurations. + * Note: has to be synchronized with enum mei_cfg_idx. + */ +static const struct mei_cfg *const mei_cfg_list[] = { + [MEI_ME_UNDEF_CFG] = NULL, + [MEI_ME_ICH_CFG] = &mei_me_ich_cfg, + [MEI_ME_ICH10_CFG] = &mei_me_ich10_cfg, + [MEI_ME_PCH6_CFG] = &mei_me_pch6_cfg, + [MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg, + [MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg, + [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg, + [MEI_ME_PCH8_ITOUCH_CFG] = &mei_me_pch8_itouch_cfg, + [MEI_ME_PCH8_SPS_4_CFG] = &mei_me_pch8_sps_4_cfg, + [MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg, + [MEI_ME_PCH12_SPS_4_CFG] = &mei_me_pch12_sps_4_cfg, + [MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg, + [MEI_ME_PCH12_SPS_ITOUCH_CFG] = &mei_me_pch12_itouch_sps_cfg, + [MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg, + [MEI_ME_PCH15_SPS_CFG] = &mei_me_pch15_sps_cfg, + [MEI_ME_GSC_CFG] = &mei_me_gsc_cfg, + [MEI_ME_GSCFI_CFG] = &mei_me_gscfi_cfg, +}; + +const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx) +{ + BUILD_BUG_ON(ARRAY_SIZE(mei_cfg_list) != MEI_ME_NUM_CFG); + + if (idx >= MEI_ME_NUM_CFG) + return NULL; + + return mei_cfg_list[idx]; +} +EXPORT_SYMBOL_GPL(mei_me_get_cfg); + /** * mei_me_dev_init - allocates and initializes the mei device structure * - * @pdev: The pci device structure + * @parent: device associated with physical device (pci/platform) * @cfg: per device generation config + * @slow_fw: configure longer timeouts as FW is slow * * Return: The mei_device pointer on success, NULL on failure. */ -struct mei_device *mei_me_dev_init(struct pci_dev *pdev, - const struct mei_cfg *cfg) +struct mei_device *mei_me_dev_init(struct device *parent, + const struct mei_cfg *cfg, bool slow_fw) { struct mei_device *dev; struct mei_me_hw *hw; + int i; - dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) + - sizeof(struct mei_me_hw), GFP_KERNEL); + dev = kzalloc(sizeof(*dev) + sizeof(*hw), GFP_KERNEL); if (!dev) return NULL; + hw = to_me_hw(dev); - mei_device_init(dev, &pdev->dev, &mei_me_hw_ops); + for (i = 0; i < DMA_DSCR_NUM; i++) + dev->dr_dscr[i].size = cfg->dma_size[i]; + + mei_device_init(dev, parent, slow_fw, &mei_me_hw_ops); hw->cfg = cfg; + + dev->fw_f_fw_ver_supported = cfg->fw_ver_supported; + + dev->kind = cfg->kind; + return dev; } - +EXPORT_SYMBOL_GPL(mei_me_dev_init); |
