diff options
Diffstat (limited to 'drivers/misc')
220 files changed, 14173 insertions, 19331 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 75e427f124b2..9d1de68dee27 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -104,6 +104,28 @@ config PHANTOM If you choose to build module, its name will be phantom. If unsure, say N here. +config RPMB + tristate "RPMB partition interface" + depends on MMC || SCSI_UFSHCD + help + Unified RPMB unit interface for RPMB capable devices such as eMMC and + UFS. Provides interface for in-kernel security controllers to access + RPMB unit. + + If unsure, select N. + +config TI_FPC202 + tristate "TI FPC202 Dual Port Controller" + depends on I2C + select GPIOLIB + select I2C_ATR + help + If you say yes here you get support for the Texas Instruments FPC202 + Dual Port Controller. + + This driver can also be built as a module. If so, the module will be + called fpc202. + config TIFM_CORE tristate "TI Flash Media interface support" depends on PCI @@ -166,7 +188,7 @@ config ENCLOSURE_SERVICES config SGI_XP tristate "Support communication between SGI SSIs" depends on NET - depends on (IA64_SGI_UV || X86_UV) && SMP + depends on X86_UV && SMP depends on X86_64 || BROKEN select SGI_GRU if X86_64 && SMP help @@ -293,21 +315,21 @@ config SGI_GRU depends on X86_UV && SMP select MMU_NOTIFIER help - The GRU is a hardware resource located in the system chipset. The GRU - contains memory that can be mmapped into the user address space. This memory is - used to communicate with the GRU to perform functions such as load/store, - scatter/gather, bcopy, AMOs, etc. The GRU is directly accessed by user - instructions using user virtual addresses. GRU instructions (ex., bcopy) use - user virtual addresses for operands. + The GRU is a hardware resource located in the system chipset. The GRU + contains memory that can be mmapped into the user address space. + This memory is used to communicate with the GRU to perform functions + such as load/store, scatter/gather, bcopy, AMOs, etc. The GRU is + directly accessed by user instructions using user virtual addresses. + GRU instructions (ex., bcopy) use user virtual addresses for operands. - If you are not running on a SGI UV system, say N. + If you are not running on a SGI UV system, say N. config SGI_GRU_DEBUG bool "SGI GRU driver debug" depends on SGI_GRU help - This option enables additional debugging code for the SGI GRU driver. - If you are unsure, say N. + This option enables additional debugging code for the SGI GRU driver. + If you are unsure, say N. config APDS9802ALS tristate "Medfield Avago APDS9802 ALS Sensor module" @@ -428,7 +450,6 @@ config LATTICE_ECP3_CONFIG tristate "Lattice ECP3 FPGA bitstream configuration via SPI" depends on SPI && SYSFS select FW_LOADER - default n help This option enables support for bitstream configuration (programming or loading) of the Lattice ECP3 FPGA family via SPI. @@ -496,6 +517,7 @@ config HISI_HIKEY_USB config OPEN_DICE tristate "Open Profile for DICE driver" depends on OF_RESERVED_MEM + depends on HAS_IOMEM help This driver exposes a DICE reserved memory region to userspace via a character device. The memory region contains Compound Device @@ -505,6 +527,17 @@ config OPEN_DICE If unsure, say N. +config NTSYNC + tristate "NT synchronization primitive emulation" + help + This module provides kernel support for emulation of Windows NT + synchronization primitives. It is not a hardware driver. + + To compile this driver as a module, choose M here: the + module will be called ntsync. + + If unsure, say N. + config VCPU_STALL_DETECTOR tristate "Guest vCPU stall detector" depends on OF && HAS_IOMEM @@ -561,21 +594,71 @@ config TPS6594_PFSM This driver can also be built as a module. If so, the module will be called tps6594-pfsm. +config NSM + tristate "Nitro (Enclaves) Security Module support" + depends on VIRTIO + select HW_RANDOM + help + This driver provides support for the Nitro Security Module + in AWS EC2 Nitro based Enclaves. The driver exposes a /dev/nsm + device user space can use to communicate with the hypervisor. + + To compile this driver as a module, choose M here. + The module will be called nsm. + +config MARVELL_CN10K_DPI + tristate "Octeon CN10K DPI driver" + depends on PCI && PCI_IOV + depends on ARCH_THUNDER || (COMPILE_TEST && 64BIT) + help + Enables Octeon CN10K DMA packet interface (DPI) driver which + intializes DPI hardware's physical function (PF) device's + global configuration and its virtual function (VFs) resource + configuration to enable DMA transfers. DPI PF device does not + have any data movement functionality, it only serves VF's + resource configuration requests. + + To compile this driver as a module, choose M here: the module + will be called mrvl_cn10k_dpi. + +config MCHP_LAN966X_PCI + tristate "Microchip LAN966x PCIe Support" + depends on PCI + depends on OF_OVERLAY + select IRQ_DOMAIN + help + This enables the support for the LAN966x PCIe device. + + This is used to drive the LAN966x PCIe device from the host system + to which it is connected. The driver uses a device tree overlay to + load other drivers to support for LAN966x internal components. + + Even if this driver does not depend on those other drivers, in order + to have a fully functional board, the following drivers are needed: + - fixed-clock (COMMON_CLK) + - lan966x-oic (LAN966X_OIC) + - lan966x-cpu-syscon (MFD_SYSCON) + - lan966x-switch-reset (RESET_MCHP_SPARX5) + - lan966x-pinctrl (PINCTRL_OCELOT) + - lan966x-serdes (PHY_LAN966X_SERDES) + - lan966x-miim (MDIO_MSCC_MIIM) + - lan966x-switch (LAN966X_SWITCH) + source "drivers/misc/c2port/Kconfig" source "drivers/misc/eeprom/Kconfig" source "drivers/misc/cb710/Kconfig" -source "drivers/misc/ti-st/Kconfig" source "drivers/misc/lis3lv02d/Kconfig" source "drivers/misc/altera-stapl/Kconfig" source "drivers/misc/mei/Kconfig" source "drivers/misc/vmw_vmci/Kconfig" source "drivers/misc/genwqe/Kconfig" -source "drivers/misc/echo/Kconfig" -source "drivers/misc/cxl/Kconfig" source "drivers/misc/ocxl/Kconfig" source "drivers/misc/bcm-vk/Kconfig" source "drivers/misc/cardreader/Kconfig" source "drivers/misc/uacce/Kconfig" source "drivers/misc/pvpanic/Kconfig" source "drivers/misc/mchp_pci1xxxx/Kconfig" +source "drivers/misc/keba/Kconfig" +source "drivers/misc/amd-sbi/Kconfig" +source "drivers/misc/rp1/Kconfig" endmenu diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index f2a4d1ff65d4..b32a2597d246 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -12,9 +12,11 @@ obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o obj-$(CONFIG_DUMMY_IRQ) += dummy-irq.o obj-$(CONFIG_ICS932S401) += ics932s401.o obj-$(CONFIG_LKDTM) += lkdtm/ +obj-$(CONFIG_TI_FPC202) += ti_fpc202.o obj-$(CONFIG_TIFM_CORE) += tifm_core.o obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o obj-$(CONFIG_PHANTOM) += phantom.o +obj-$(CONFIG_RPMB) += rpmb-core.o obj-$(CONFIG_QCOM_COINCELL) += qcom-coincell.o obj-$(CONFIG_QCOM_FASTRPC) += fastrpc.o obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o @@ -39,7 +41,6 @@ obj-y += eeprom/ obj-y += cb710/ obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o obj-$(CONFIG_PCH_PHUB) += pch_phub.o -obj-y += ti-st/ obj-y += lis3lv02d/ obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/ obj-$(CONFIG_INTEL_MEI) += mei/ @@ -48,8 +49,6 @@ obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o obj-$(CONFIG_SRAM) += sram.o obj-$(CONFIG_SRAM_EXEC) += sram-exec.o obj-$(CONFIG_GENWQE) += genwqe/ -obj-$(CONFIG_ECHO) += echo/ -obj-$(CONFIG_CXL_BASE) += cxl/ obj-$(CONFIG_DW_XDATA_PCIE) += dw-xdata-pcie.o obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o obj-$(CONFIG_OCXL) += ocxl/ @@ -59,6 +58,7 @@ obj-$(CONFIG_PVPANIC) += pvpanic/ obj-$(CONFIG_UACCE) += uacce/ obj-$(CONFIG_XILINX_SDFEC) += xilinx_sdfec.o obj-$(CONFIG_HISI_HIKEY_USB) += hisi_hikey_usb.o +obj-$(CONFIG_NTSYNC) += ntsync.o obj-$(CONFIG_HI6421V600_IRQ) += hi6421v600-irq.o obj-$(CONFIG_OPEN_DICE) += open-dice.o obj-$(CONFIG_GP_PCI1XXXX) += mchp_pci1xxxx/ @@ -67,3 +67,11 @@ obj-$(CONFIG_TMR_MANAGER) += xilinx_tmr_manager.o obj-$(CONFIG_TMR_INJECT) += xilinx_tmr_inject.o obj-$(CONFIG_TPS6594_ESM) += tps6594-esm.o obj-$(CONFIG_TPS6594_PFSM) += tps6594-pfsm.o +obj-$(CONFIG_NSM) += nsm.o +obj-$(CONFIG_MARVELL_CN10K_DPI) += mrvl_cn10k_dpi.o +lan966x-pci-objs := lan966x_pci.o +lan966x-pci-objs += lan966x_pci.dtbo.o +obj-$(CONFIG_MCHP_LAN966X_PCI) += lan966x-pci.o +obj-y += keba/ +obj-y += amd-sbi/ +obj-$(CONFIG_MISC_RP1) += rp1/ diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c index 756ef6912b5a..04683b981e54 100644 --- a/drivers/misc/ad525x_dpot.c +++ b/drivers/misc/ad525x_dpot.c @@ -73,6 +73,7 @@ #include <linux/kernel.h> #include <linux/delay.h> #include <linux/slab.h> +#include <linux/string_choices.h> #include "ad525x_dpot.h" @@ -418,10 +419,8 @@ static ssize_t sysfs_show_reg(struct device *dev, s32 value; if (reg & DPOT_ADDR_OTP_EN) - return sprintf(buf, "%s\n", - test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask) ? - "enabled" : "disabled"); - + return sprintf(buf, "%s\n", str_enabled_disabled( + test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask))); mutex_lock(&data->update_lock); value = dpot_read(data, reg); diff --git a/drivers/misc/altera-stapl/altera.c b/drivers/misc/altera-stapl/altera.c index 587427b73914..bbe3967c3a4c 100644 --- a/drivers/misc/altera-stapl/altera.c +++ b/drivers/misc/altera-stapl/altera.c @@ -9,7 +9,7 @@ * Copyright (C) 2010,2011 Igor M. Liplianin <liplianin@netup.ru> */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/firmware.h> diff --git a/drivers/misc/amd-sbi/Kconfig b/drivers/misc/amd-sbi/Kconfig new file mode 100644 index 000000000000..be022c71a90c --- /dev/null +++ b/drivers/misc/amd-sbi/Kconfig @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: GPL-2.0-only +config AMD_SBRMI_I2C + tristate "AMD side band RMI support" + depends on I2C + depends on ARM || ARM64 || COMPILE_TEST + select REGMAP_I2C + depends on I3C || !I3C + select REGMAP_I3C if I3C + help + Side band RMI over I2C/I3C support for AMD out of band management. + This driver is intended to run on the BMC, not the managed node. + + This driver can also be built as a module. If so, the module will + be called sbrmi-i2c. + +config AMD_SBRMI_HWMON + bool "SBRMI hardware monitoring" + depends on AMD_SBRMI_I2C && HWMON + depends on !(AMD_SBRMI_I2C=y && HWMON=m) + help + This provides support for RMI device hardware monitoring. If enabled, + a hardware monitoring device will be created for each socket in + the system. diff --git a/drivers/misc/amd-sbi/Makefile b/drivers/misc/amd-sbi/Makefile new file mode 100644 index 000000000000..38eaaa651fd9 --- /dev/null +++ b/drivers/misc/amd-sbi/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +sbrmi-i2c-objs += rmi-i2c.o rmi-core.o +sbrmi-i2c-$(CONFIG_AMD_SBRMI_HWMON) += rmi-hwmon.o +obj-$(CONFIG_AMD_SBRMI_I2C) += sbrmi-i2c.o diff --git a/drivers/misc/amd-sbi/rmi-core.c b/drivers/misc/amd-sbi/rmi-core.c new file mode 100644 index 000000000000..c3a58912d6db --- /dev/null +++ b/drivers/misc/amd-sbi/rmi-core.c @@ -0,0 +1,592 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * sbrmi-core.c - file defining SB-RMI protocols compliant + * AMD SoC device. + * + * Copyright (C) 2025 Advanced Micro Devices, Inc. + */ +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/fs.h> +#include <linux/i2c.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/regmap.h> +#include "rmi-core.h" + +/* Mask for Status Register bit[1] */ +#define SW_ALERT_MASK 0x2 +/* Mask to check H/W Alert status bit */ +#define HW_ALERT_MASK 0x80 + +/* Software Interrupt for triggering */ +#define START_CMD 0x80 +#define TRIGGER_MAILBOX 0x01 + +/* Default message lengths as per APML command protocol */ +/* CPUID */ +#define CPUID_RD_DATA_LEN 0x8 +#define CPUID_WR_DATA_LEN 0x8 +#define CPUID_WR_DATA_LEN_EXT 0x9 +#define CPUID_RD_REG_LEN 0xa +#define CPUID_WR_REG_LEN 0x9 +#define CPUID_WR_REG_LEN_EXT 0xa +/* MSR */ +#define MSR_RD_REG_LEN 0xa +#define MSR_WR_REG_LEN 0x8 +#define MSR_WR_REG_LEN_EXT 0x9 +#define MSR_RD_DATA_LEN 0x8 +#define MSR_WR_DATA_LEN 0x7 +#define MSR_WR_DATA_LEN_EXT 0x8 + +/* CPUID MSR Command Ids */ +#define CPUID_MCA_CMD 0x73 +#define RD_CPUID_CMD 0x91 +#define RD_MCA_CMD 0x86 + +/* CPUID MCAMSR mask & index */ +#define CPUID_MCA_THRD_INDEX 32 +#define CPUID_MCA_FUNC_MASK GENMASK(31, 0) +#define CPUID_EXT_FUNC_INDEX 56 + +/* input for bulk write to CPUID protocol */ +struct cpu_msr_indata { + u8 wr_len; /* const value */ + u8 rd_len; /* const value */ + u8 proto_cmd; /* const value */ + u8 thread; /* thread number */ + union { + u8 reg_offset[4]; /* input value */ + u32 value; + } __packed; + u8 ext; /* extended function */ +}; + +/* input for bulk write to CPUID protocol for REV 0x21 */ +struct cpu_msr_indata_ext { + u8 wr_len; /* const value */ + u8 rd_len; /* const value */ + u8 proto_cmd; /* const value */ + u8 thread_lo; /* thread number low */ + u8 thread_hi; /* thread number high */ + union { + u8 reg_offset[4]; /* input value */ + u32 value; + } __packed; + u8 ext; /* extended function */ +}; + +/* output for bulk read from CPUID protocol */ +struct cpu_msr_outdata { + u8 num_bytes; /* number of bytes return */ + u8 status; /* Protocol status code */ + union { + u64 value; + u8 reg_data[8]; + } __packed; +}; + +static inline void prepare_cpuid_input_message(struct cpu_msr_indata *input, + u8 thread_id, u32 func, + u8 ext_func) +{ + input->rd_len = CPUID_RD_DATA_LEN; + input->wr_len = CPUID_WR_DATA_LEN; + input->proto_cmd = RD_CPUID_CMD; + input->thread = thread_id << 1; + input->value = func; + input->ext = ext_func; +} + +static inline void prepare_cpuid_input_message_ext(struct cpu_msr_indata_ext *input, + u16 thread_id, u32 func, + u8 ext_func) +{ + input->rd_len = CPUID_RD_DATA_LEN; + input->wr_len = CPUID_WR_DATA_LEN_EXT; + input->proto_cmd = RD_CPUID_CMD; + input->thread_lo = (thread_id & 0xFF) << 1; + input->thread_hi = thread_id >> 8; + input->value = func; + input->ext = ext_func; +} + +static inline void prepare_mca_msr_input_message(struct cpu_msr_indata *input, + u8 thread_id, u32 data_in) +{ + input->rd_len = MSR_RD_DATA_LEN; + input->wr_len = MSR_WR_DATA_LEN; + input->proto_cmd = RD_MCA_CMD; + input->thread = thread_id << 1; + input->value = data_in; +} + +static inline void prepare_mca_msr_input_message_ext(struct cpu_msr_indata_ext *input, + u16 thread_id, u32 data_in) +{ + input->rd_len = MSR_RD_DATA_LEN; + input->wr_len = MSR_WR_DATA_LEN_EXT; + input->proto_cmd = RD_MCA_CMD; + input->thread_lo = (thread_id & 0xFF) << 1; + input->thread_hi = thread_id >> 8; + input->value = data_in; +} + +static int sbrmi_get_rev(struct sbrmi_data *data) +{ + unsigned int rev; + u16 offset = SBRMI_REV; + int ret; + + ret = regmap_read(data->regmap, offset, &rev); + if (ret < 0) + return ret; + + data->rev = rev; + return 0; +} + +static int rmi_cpuid_input(struct sbrmi_data *data, struct apml_cpuid_msg *msg, + u16 thread) +{ + struct cpu_msr_indata input = {0}; + int val = 0, ret; + + /* Thread > 127, Thread128 CS register, 1'b1 needs to be set to 1 */ + if (thread > 127) { + thread -= 128; + val = 1; + } + + ret = regmap_write(data->regmap, SBRMI_THREAD128CS, val); + if (ret < 0) + return ret; + + prepare_cpuid_input_message(&input, thread, + msg->cpu_in_out & CPUID_MCA_FUNC_MASK, + msg->cpu_in_out >> CPUID_EXT_FUNC_INDEX); + + return regmap_bulk_write(data->regmap, CPUID_MCA_CMD, + &input, CPUID_WR_REG_LEN); +} + +static int rmi_cpuid_input_ext(struct sbrmi_data *data, struct apml_cpuid_msg *msg, + u16 thread) +{ + struct cpu_msr_indata_ext input = {0}; + + prepare_cpuid_input_message_ext(&input, thread, + msg->cpu_in_out & CPUID_MCA_FUNC_MASK, + msg->cpu_in_out >> CPUID_EXT_FUNC_INDEX); + + return regmap_bulk_write(data->regmap, CPUID_MCA_CMD, + &input, CPUID_WR_REG_LEN_EXT); +} + +/* Read CPUID function protocol */ +static int rmi_cpuid_read(struct sbrmi_data *data, + struct apml_cpuid_msg *msg) +{ + struct cpu_msr_outdata output = {0}; + int ret, hw_status; + u16 thread; + + mutex_lock(&data->lock); + /* cache the rev value to identify if protocol is supported or not */ + if (!data->rev) { + ret = sbrmi_get_rev(data); + if (ret < 0) + goto exit_unlock; + } + + /* Extract thread from the input msg structure */ + thread = msg->cpu_in_out >> CPUID_MCA_THRD_INDEX; + + switch (data->rev) { + case 0x10: + /* CPUID protocol for REV 0x10 is not supported*/ + ret = -EOPNOTSUPP; + goto exit_unlock; + case 0x20: + ret = rmi_cpuid_input(data, msg, thread); + if (ret) + goto exit_unlock; + break; + case 0x21: + ret = rmi_cpuid_input_ext(data, msg, thread); + if (ret) + goto exit_unlock; + break; + default: + ret = -EOPNOTSUPP; + goto exit_unlock; + } + + /* + * For RMI Rev 0x20, new h/w status bit is introduced. which is used + * by firmware to indicate completion of commands (0x71, 0x72, 0x73). + * wait for the status bit to be set by the hardware before + * reading the data out. + */ + ret = regmap_read_poll_timeout(data->regmap, SBRMI_STATUS, hw_status, + hw_status & HW_ALERT_MASK, 500, 2000000); + if (ret) + goto exit_unlock; + + ret = regmap_bulk_read(data->regmap, CPUID_MCA_CMD, + &output, CPUID_RD_REG_LEN); + if (ret < 0) + goto exit_unlock; + + ret = regmap_write(data->regmap, SBRMI_STATUS, + HW_ALERT_MASK); + if (ret < 0) + goto exit_unlock; + + if (output.num_bytes != CPUID_RD_REG_LEN - 1) { + ret = -EMSGSIZE; + goto exit_unlock; + } + if (output.status) { + ret = -EPROTOTYPE; + msg->fw_ret_code = output.status; + goto exit_unlock; + } + msg->cpu_in_out = output.value; +exit_unlock: + if (ret < 0) + msg->cpu_in_out = 0; + mutex_unlock(&data->lock); + return ret; +} + +static int rmi_mcamsr_input(struct sbrmi_data *data, struct apml_mcamsr_msg *msg, + u16 thread) +{ + struct cpu_msr_indata input = {0}; + int val = 0, ret; + + /* Thread > 127, Thread128 CS register, 1'b1 needs to be set to 1 */ + if (thread > 127) { + thread -= 128; + val = 1; + } + + ret = regmap_write(data->regmap, SBRMI_THREAD128CS, val); + if (ret < 0) + return ret; + + prepare_mca_msr_input_message(&input, thread, + msg->mcamsr_in_out & CPUID_MCA_FUNC_MASK); + + return regmap_bulk_write(data->regmap, CPUID_MCA_CMD, + &input, MSR_WR_REG_LEN); +} + +static int rmi_mcamsr_input_ext(struct sbrmi_data *data, struct apml_mcamsr_msg *msg, + u16 thread) +{ + struct cpu_msr_indata_ext input = {0}; + + prepare_mca_msr_input_message_ext(&input, thread, + msg->mcamsr_in_out & CPUID_MCA_FUNC_MASK); + + return regmap_bulk_write(data->regmap, CPUID_MCA_CMD, + &input, MSR_WR_REG_LEN_EXT); +} + +/* MCA MSR protocol */ +static int rmi_mca_msr_read(struct sbrmi_data *data, + struct apml_mcamsr_msg *msg) +{ + struct cpu_msr_outdata output = {0}; + int ret; + int hw_status; + u16 thread; + + mutex_lock(&data->lock); + /* cache the rev value to identify if protocol is supported or not */ + if (!data->rev) { + ret = sbrmi_get_rev(data); + if (ret < 0) + goto exit_unlock; + } + + /* Extract thread from the input msg structure */ + thread = msg->mcamsr_in_out >> CPUID_MCA_THRD_INDEX; + + switch (data->rev) { + case 0x10: + /* MCAMSR protocol for REV 0x10 is not supported*/ + ret = -EOPNOTSUPP; + goto exit_unlock; + case 0x20: + ret = rmi_mcamsr_input(data, msg, thread); + if (ret) + goto exit_unlock; + break; + case 0x21: + ret = rmi_mcamsr_input_ext(data, msg, thread); + if (ret) + goto exit_unlock; + break; + default: + ret = -EOPNOTSUPP; + goto exit_unlock; + } + + /* + * For RMI Rev 0x20, new h/w status bit is introduced. which is used + * by firmware to indicate completion of commands (0x71, 0x72, 0x73). + * wait for the status bit to be set by the hardware before + * reading the data out. + */ + ret = regmap_read_poll_timeout(data->regmap, SBRMI_STATUS, hw_status, + hw_status & HW_ALERT_MASK, 500, 2000000); + if (ret) + goto exit_unlock; + + ret = regmap_bulk_read(data->regmap, CPUID_MCA_CMD, + &output, MSR_RD_REG_LEN); + if (ret < 0) + goto exit_unlock; + + ret = regmap_write(data->regmap, SBRMI_STATUS, + HW_ALERT_MASK); + if (ret < 0) + goto exit_unlock; + + if (output.num_bytes != MSR_RD_REG_LEN - 1) { + ret = -EMSGSIZE; + goto exit_unlock; + } + if (output.status) { + ret = -EPROTOTYPE; + msg->fw_ret_code = output.status; + goto exit_unlock; + } + msg->mcamsr_in_out = output.value; + +exit_unlock: + mutex_unlock(&data->lock); + return ret; +} + +int rmi_mailbox_xfer(struct sbrmi_data *data, + struct apml_mbox_msg *msg) +{ + unsigned int bytes, ec; + int i, ret; + int sw_status; + u8 byte; + + mutex_lock(&data->lock); + + msg->fw_ret_code = 0; + + /* Indicate firmware a command is to be serviced */ + ret = regmap_write(data->regmap, SBRMI_INBNDMSG7, START_CMD); + if (ret < 0) + goto exit_unlock; + + /* Write the command to SBRMI::InBndMsg_inst0 */ + ret = regmap_write(data->regmap, SBRMI_INBNDMSG0, msg->cmd); + if (ret < 0) + goto exit_unlock; + + /* + * For both read and write the initiator (BMC) writes + * Command Data In[31:0] to SBRMI::InBndMsg_inst[4:1] + * SBRMI_x3C(MSB):SBRMI_x39(LSB) + */ + for (i = 0; i < AMD_SBI_MB_DATA_SIZE; i++) { + byte = (msg->mb_in_out >> i * 8) & 0xff; + ret = regmap_write(data->regmap, SBRMI_INBNDMSG1 + i, byte); + if (ret < 0) + goto exit_unlock; + } + + /* + * Write 0x01 to SBRMI::SoftwareInterrupt to notify firmware to + * perform the requested read or write command + */ + ret = regmap_write(data->regmap, SBRMI_SW_INTERRUPT, TRIGGER_MAILBOX); + if (ret < 0) + goto exit_unlock; + + /* + * Firmware will write SBRMI::Status[SwAlertSts]=1 to generate + * an ALERT (if enabled) to initiator (BMC) to indicate completion + * of the requested command + */ + ret = regmap_read_poll_timeout(data->regmap, SBRMI_STATUS, sw_status, + sw_status & SW_ALERT_MASK, 500, 2000000); + if (ret) + goto exit_unlock; + + ret = regmap_read(data->regmap, SBRMI_OUTBNDMSG7, &ec); + if (ret || ec) + goto exit_clear_alert; + + /* Clear the input value before updating the output data */ + msg->mb_in_out = 0; + + /* + * For a read operation, the initiator (BMC) reads the firmware + * response Command Data Out[31:0] from SBRMI::OutBndMsg_inst[4:1] + * {SBRMI_x34(MSB):SBRMI_x31(LSB)}. + */ + for (i = 0; i < AMD_SBI_MB_DATA_SIZE; i++) { + ret = regmap_read(data->regmap, + SBRMI_OUTBNDMSG1 + i, &bytes); + if (ret < 0) + break; + msg->mb_in_out |= bytes << i * 8; + } + +exit_clear_alert: + /* + * BMC must write 1'b1 to SBRMI::Status[SwAlertSts] to clear the + * ALERT to initiator + */ + ret = regmap_write(data->regmap, SBRMI_STATUS, + sw_status | SW_ALERT_MASK); + if (ec) { + ret = -EPROTOTYPE; + msg->fw_ret_code = ec; + } +exit_unlock: + mutex_unlock(&data->lock); + return ret; +} + +static int apml_rmi_reg_xfer(struct sbrmi_data *data, + struct apml_reg_xfer_msg __user *arg) +{ + struct apml_reg_xfer_msg msg = { 0 }; + unsigned int data_read; + int ret; + + /* Copy the structure from user */ + if (copy_from_user(&msg, arg, sizeof(struct apml_reg_xfer_msg))) + return -EFAULT; + + mutex_lock(&data->lock); + if (msg.rflag) { + ret = regmap_read(data->regmap, msg.reg_addr, &data_read); + if (!ret) + msg.data_in_out = data_read; + } else { + ret = regmap_write(data->regmap, msg.reg_addr, msg.data_in_out); + } + + mutex_unlock(&data->lock); + + if (msg.rflag && !ret) + if (copy_to_user(arg, &msg, sizeof(struct apml_reg_xfer_msg))) + return -EFAULT; + return ret; +} + +static int apml_mailbox_xfer(struct sbrmi_data *data, struct apml_mbox_msg __user *arg) +{ + struct apml_mbox_msg msg = { 0 }; + int ret; + + /* Copy the structure from user */ + if (copy_from_user(&msg, arg, sizeof(struct apml_mbox_msg))) + return -EFAULT; + + /* Mailbox protocol */ + ret = rmi_mailbox_xfer(data, &msg); + if (ret && ret != -EPROTOTYPE) + return ret; + + if (copy_to_user(arg, &msg, sizeof(struct apml_mbox_msg))) + return -EFAULT; + return ret; +} + +static int apml_cpuid_xfer(struct sbrmi_data *data, struct apml_cpuid_msg __user *arg) +{ + struct apml_cpuid_msg msg = { 0 }; + int ret; + + /* Copy the structure from user */ + if (copy_from_user(&msg, arg, sizeof(struct apml_cpuid_msg))) + return -EFAULT; + + /* CPUID Protocol */ + ret = rmi_cpuid_read(data, &msg); + if (ret && ret != -EPROTOTYPE) + return ret; + + if (copy_to_user(arg, &msg, sizeof(struct apml_cpuid_msg))) + return -EFAULT; + return ret; +} + +static int apml_mcamsr_xfer(struct sbrmi_data *data, struct apml_mcamsr_msg __user *arg) +{ + struct apml_mcamsr_msg msg = { 0 }; + int ret; + + /* Copy the structure from user */ + if (copy_from_user(&msg, arg, sizeof(struct apml_mcamsr_msg))) + return -EFAULT; + + /* MCAMSR Protocol */ + ret = rmi_mca_msr_read(data, &msg); + if (ret && ret != -EPROTOTYPE) + return ret; + + if (copy_to_user(arg, &msg, sizeof(struct apml_mcamsr_msg))) + return -EFAULT; + return ret; +} + +static long sbrmi_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct sbrmi_data *data; + + data = container_of(fp->private_data, struct sbrmi_data, sbrmi_misc_dev); + switch (cmd) { + case SBRMI_IOCTL_MBOX_CMD: + return apml_mailbox_xfer(data, argp); + case SBRMI_IOCTL_CPUID_CMD: + return apml_cpuid_xfer(data, argp); + case SBRMI_IOCTL_MCAMSR_CMD: + return apml_mcamsr_xfer(data, argp); + case SBRMI_IOCTL_REG_XFER_CMD: + return apml_rmi_reg_xfer(data, argp); + default: + return -ENOTTY; + } +} + +static const struct file_operations sbrmi_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = sbrmi_ioctl, + .compat_ioctl = compat_ptr_ioctl, +}; + +int create_misc_rmi_device(struct sbrmi_data *data, + struct device *dev) +{ + data->sbrmi_misc_dev.name = devm_kasprintf(dev, + GFP_KERNEL, + "sbrmi-%x", + data->dev_static_addr); + data->sbrmi_misc_dev.minor = MISC_DYNAMIC_MINOR; + data->sbrmi_misc_dev.fops = &sbrmi_fops; + data->sbrmi_misc_dev.parent = dev; + data->sbrmi_misc_dev.nodename = devm_kasprintf(dev, + GFP_KERNEL, + "sbrmi-%x", + data->dev_static_addr); + data->sbrmi_misc_dev.mode = 0600; + + return misc_register(&data->sbrmi_misc_dev); +} diff --git a/drivers/misc/amd-sbi/rmi-core.h b/drivers/misc/amd-sbi/rmi-core.h new file mode 100644 index 000000000000..975ae858e9fd --- /dev/null +++ b/drivers/misc/amd-sbi/rmi-core.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2025 Advanced Micro Devices, Inc. + */ + +#ifndef _SBRMI_CORE_H_ +#define _SBRMI_CORE_H_ + +#include <linux/miscdevice.h> +#include <linux/mutex.h> +#include <linux/i2c.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <uapi/misc/amd-apml.h> + +/* SB-RMI registers */ +enum sbrmi_reg { + SBRMI_REV, + SBRMI_CTRL, + SBRMI_STATUS, + SBRMI_OUTBNDMSG0 = 0x30, + SBRMI_OUTBNDMSG1, + SBRMI_OUTBNDMSG2, + SBRMI_OUTBNDMSG3, + SBRMI_OUTBNDMSG4, + SBRMI_OUTBNDMSG5, + SBRMI_OUTBNDMSG6, + SBRMI_OUTBNDMSG7, + SBRMI_INBNDMSG0, + SBRMI_INBNDMSG1, + SBRMI_INBNDMSG2, + SBRMI_INBNDMSG3, + SBRMI_INBNDMSG4, + SBRMI_INBNDMSG5, + SBRMI_INBNDMSG6, + SBRMI_INBNDMSG7, + SBRMI_SW_INTERRUPT, + SBRMI_THREAD128CS = 0x4b, +}; + +/* + * SB-RMI supports soft mailbox service request to MP1 (power management + * firmware) through SBRMI inbound/outbound message registers. + * SB-RMI message IDs + */ +enum sbrmi_msg_id { + SBRMI_READ_PKG_PWR_CONSUMPTION = 0x1, + SBRMI_WRITE_PKG_PWR_LIMIT, + SBRMI_READ_PKG_PWR_LIMIT, + SBRMI_READ_PKG_MAX_PWR_LIMIT, +}; + +/* Each client has this additional data */ +struct sbrmi_data { + struct miscdevice sbrmi_misc_dev; + struct regmap *regmap; + /* Mutex locking */ + struct mutex lock; + u32 pwr_limit_max; + u8 dev_static_addr; + u8 rev; +}; + +int rmi_mailbox_xfer(struct sbrmi_data *data, struct apml_mbox_msg *msg); +#ifdef CONFIG_AMD_SBRMI_HWMON +int create_hwmon_sensor_device(struct device *dev, struct sbrmi_data *data); +#else +static inline int create_hwmon_sensor_device(struct device *dev, struct sbrmi_data *data) +{ + return 0; +} +#endif +int create_misc_rmi_device(struct sbrmi_data *data, struct device *dev); +#endif /*_SBRMI_CORE_H_*/ diff --git a/drivers/misc/amd-sbi/rmi-hwmon.c b/drivers/misc/amd-sbi/rmi-hwmon.c new file mode 100644 index 000000000000..f4f015605daa --- /dev/null +++ b/drivers/misc/amd-sbi/rmi-hwmon.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * rmi-hwmon.c - hwmon sensor support for side band RMI + * + * Copyright (C) 2025 Advanced Micro Devices, Inc. + */ +#include <linux/err.h> +#include <linux/hwmon.h> +#include <uapi/misc/amd-apml.h> +#include "rmi-core.h" + +/* Do not allow setting negative power limit */ +#define SBRMI_PWR_MIN 0 + +static int sbrmi_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct sbrmi_data *data = dev_get_drvdata(dev); + struct apml_mbox_msg msg = { 0 }; + int ret; + + if (!data) + return -ENODEV; + + if (type != hwmon_power) + return -EINVAL; + + switch (attr) { + case hwmon_power_input: + msg.cmd = SBRMI_READ_PKG_PWR_CONSUMPTION; + ret = rmi_mailbox_xfer(data, &msg); + break; + case hwmon_power_cap: + msg.cmd = SBRMI_READ_PKG_PWR_LIMIT; + ret = rmi_mailbox_xfer(data, &msg); + break; + case hwmon_power_cap_max: + msg.mb_in_out = data->pwr_limit_max; + ret = 0; + break; + default: + return -EINVAL; + } + if (ret < 0) + return ret; + /* hwmon power attributes are in microWatt */ + *val = (long)msg.mb_in_out * 1000; + return ret; +} + +static int sbrmi_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + struct sbrmi_data *data = dev_get_drvdata(dev); + struct apml_mbox_msg msg = { 0 }; + + if (!data) + return -ENODEV; + + if (type != hwmon_power && attr != hwmon_power_cap) + return -EINVAL; + /* + * hwmon power attributes are in microWatt + * mailbox read/write is in mWatt + */ + val /= 1000; + + val = clamp_val(val, SBRMI_PWR_MIN, data->pwr_limit_max); + + msg.cmd = SBRMI_WRITE_PKG_PWR_LIMIT; + msg.mb_in_out = val; + + return rmi_mailbox_xfer(data, &msg); +} + +static umode_t sbrmi_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + switch (type) { + case hwmon_power: + switch (attr) { + case hwmon_power_input: + case hwmon_power_cap_max: + return 0444; + case hwmon_power_cap: + return 0644; + } + break; + default: + break; + } + return 0; +} + +static const struct hwmon_channel_info * const sbrmi_info[] = { + HWMON_CHANNEL_INFO(power, + HWMON_P_INPUT | HWMON_P_CAP | HWMON_P_CAP_MAX), + NULL +}; + +static const struct hwmon_ops sbrmi_hwmon_ops = { + .is_visible = sbrmi_is_visible, + .read = sbrmi_read, + .write = sbrmi_write, +}; + +static const struct hwmon_chip_info sbrmi_chip_info = { + .ops = &sbrmi_hwmon_ops, + .info = sbrmi_info, +}; + +int create_hwmon_sensor_device(struct device *dev, struct sbrmi_data *data) +{ + struct device *hwmon_dev; + + hwmon_dev = devm_hwmon_device_register_with_info(dev, "sbrmi", data, + &sbrmi_chip_info, NULL); + return PTR_ERR_OR_ZERO(hwmon_dev); +} diff --git a/drivers/misc/amd-sbi/rmi-i2c.c b/drivers/misc/amd-sbi/rmi-i2c.c new file mode 100644 index 000000000000..f0cc99000b69 --- /dev/null +++ b/drivers/misc/amd-sbi/rmi-i2c.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * rmi-i2c.c - Side band RMI over I2C support for AMD out + * of band management + * + * Copyright (C) 2024 Advanced Micro Devices, Inc. + */ + +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/i3c/device.h> +#include <linux/i3c/master.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/regmap.h> +#include "rmi-core.h" + +#define REV_TWO_BYTE_ADDR 0x21 + +static int sbrmi_enable_alert(struct sbrmi_data *data) +{ + int ctrl, ret; + + /* + * Enable the SB-RMI Software alert status + * by writing 0 to bit 4 of Control register(0x1) + */ + ret = regmap_read(data->regmap, SBRMI_CTRL, &ctrl); + if (ret < 0) + return ret; + + if (ctrl & 0x10) { + ctrl &= ~0x10; + return regmap_write(data->regmap, SBRMI_CTRL, ctrl); + } + + return 0; +} + +static int sbrmi_get_max_pwr_limit(struct sbrmi_data *data) +{ + struct apml_mbox_msg msg = { 0 }; + int ret; + + msg.cmd = SBRMI_READ_PKG_MAX_PWR_LIMIT; + ret = rmi_mailbox_xfer(data, &msg); + if (ret < 0) + return ret; + data->pwr_limit_max = msg.mb_in_out; + + return ret; +} + +static int sbrmi_common_probe(struct device *dev, struct regmap *regmap, uint8_t address) +{ + struct sbrmi_data *data; + int ret; + + data = devm_kzalloc(dev, sizeof(struct sbrmi_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->regmap = regmap; + mutex_init(&data->lock); + + /* Enable alert for SB-RMI sequence */ + ret = sbrmi_enable_alert(data); + if (ret < 0) + return ret; + + /* Cache maximum power limit */ + ret = sbrmi_get_max_pwr_limit(data); + if (ret < 0) + return ret; + + data->dev_static_addr = address; + + dev_set_drvdata(dev, data); + + ret = create_hwmon_sensor_device(dev, data); + if (ret < 0) + return ret; + return create_misc_rmi_device(data, dev); +} + +static struct regmap_config sbrmi_regmap_config = { + .reg_bits = 8, + .val_bits = 8, +}; + +static struct regmap_config sbrmi_regmap_config_ext = { + .reg_bits = 16, + .val_bits = 8, + .reg_format_endian = REGMAP_ENDIAN_LITTLE, +}; + +static int sbrmi_i2c_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct regmap *regmap; + int rev, ret; + + regmap = devm_regmap_init_i2c(client, &sbrmi_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + ret = regmap_read(regmap, SBRMI_REV, &rev); + if (ret) + return ret; + + /* + * For Turin and newer platforms, revision is 0x21 or later. This is + * to identify the two byte register address size. However, one + * byte transaction can be successful. + * Verify if revision is 0x21 or later, if yes, switch to 2 byte + * address size. + * Continuously using 1 byte address for revision 0x21 or later can lead + * to bus corruption. + */ + if (rev >= REV_TWO_BYTE_ADDR) { + regmap = devm_regmap_init_i2c(client, &sbrmi_regmap_config_ext); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + } + return sbrmi_common_probe(dev, regmap, client->addr); +} + +static void sbrmi_i2c_remove(struct i2c_client *client) +{ + struct sbrmi_data *data = dev_get_drvdata(&client->dev); + + misc_deregister(&data->sbrmi_misc_dev); + /* Assign fops and parent of misc dev to NULL */ + data->sbrmi_misc_dev.fops = NULL; + data->sbrmi_misc_dev.parent = NULL; + dev_info(&client->dev, "Removed sbrmi-i2c driver\n"); + return; +} + +static const struct i2c_device_id sbrmi_id[] = { + {"sbrmi-i2c"}, + {} +}; +MODULE_DEVICE_TABLE(i2c, sbrmi_id); + +static const struct of_device_id __maybe_unused sbrmi_of_match[] = { + { + .compatible = "amd,sbrmi", + }, + { }, +}; +MODULE_DEVICE_TABLE(of, sbrmi_of_match); + +static struct i2c_driver sbrmi_driver = { + .driver = { + .name = "sbrmi-i2c", + .of_match_table = of_match_ptr(sbrmi_of_match), + }, + .probe = sbrmi_i2c_probe, + .remove = sbrmi_i2c_remove, + .id_table = sbrmi_id, +}; + +static int sbrmi_i3c_probe(struct i3c_device *i3cdev) +{ + struct device *dev = i3cdev_to_dev(i3cdev); + struct regmap *regmap; + int rev, ret; + + regmap = devm_regmap_init_i3c(i3cdev, &sbrmi_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + ret = regmap_read(regmap, SBRMI_REV, &rev); + if (ret) + return ret; + + /* + * For Turin and newer platforms, revision is 0x21 or later. This is + * to identify the two byte register address size. However, one + * byte transaction can be successful. + * Verify if revision is 0x21 or later, if yes, switch to 2 byte + * address size. + * Continuously using 1 byte address for revision 0x21 or later can lead + * to bus corruption. + */ + if (rev >= REV_TWO_BYTE_ADDR) { + regmap = devm_regmap_init_i3c(i3cdev, &sbrmi_regmap_config_ext); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + } + + /* + * AMD APML I3C devices support static address. + * If static address is defined, dynamic address is same as static address. + * In case static address is not defined, I3C master controller defined + * dynamic address is used. + */ + return sbrmi_common_probe(dev, regmap, i3cdev->desc->info.dyn_addr); +} + +static void sbrmi_i3c_remove(struct i3c_device *i3cdev) +{ + struct sbrmi_data *data = dev_get_drvdata(&i3cdev->dev); + + misc_deregister(&data->sbrmi_misc_dev); +} + +static const struct i3c_device_id sbrmi_i3c_id[] = { + /* PID for AMD SBRMI device */ + I3C_DEVICE_EXTRA_INFO(0x112, 0x0, 0x2, NULL), + {} +}; +MODULE_DEVICE_TABLE(i3c, sbrmi_i3c_id); + +static struct i3c_driver sbrmi_i3c_driver = { + .driver = { + .name = "sbrmi-i3c", + }, + .probe = sbrmi_i3c_probe, + .remove = sbrmi_i3c_remove, + .id_table = sbrmi_i3c_id, +}; + +module_i3c_i2c_driver(sbrmi_i3c_driver, &sbrmi_driver); + +MODULE_AUTHOR("Akshay Gupta <akshay.gupta@amd.com>"); +MODULE_AUTHOR("Naveen Krishna Chatradhi <naveenkrishna.chatradhi@amd.com>"); +MODULE_DESCRIPTION("Hwmon driver for AMD SB-RMI emulated sensor"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c index 693f0e539f37..6db4db975b9a 100644 --- a/drivers/misc/apds9802als.c +++ b/drivers/misc/apds9802als.c @@ -285,7 +285,7 @@ static UNIVERSAL_DEV_PM_OPS(apds9802als_pm_ops, apds9802als_suspend, #endif /* CONFIG_PM */ static const struct i2c_device_id apds9802als_id[] = { - { DRIVER_NAME, 0 }, + { DRIVER_NAME }, { } }; diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c index 92b92be91d60..58946c4ff1a5 100644 --- a/drivers/misc/apds990x.c +++ b/drivers/misc/apds990x.c @@ -625,15 +625,15 @@ static ssize_t apds990x_lux_show(struct device *dev, struct apds990x_chip *chip = dev_get_drvdata(dev); ssize_t ret; u32 result; - long timeout; + long time_left; if (pm_runtime_suspended(dev)) return -EIO; - timeout = wait_event_interruptible_timeout(chip->wait, - !chip->lux_wait_fresh_res, - msecs_to_jiffies(APDS_TIMEOUT)); - if (!timeout) + time_left = wait_event_interruptible_timeout(chip->wait, + !chip->lux_wait_fresh_res, + msecs_to_jiffies(APDS_TIMEOUT)); + if (!time_left) return -EIO; mutex_lock(&chip->mutex); @@ -984,7 +984,6 @@ static ssize_t apds990x_power_state_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", !pm_runtime_suspended(dev)); - return 0; } static ssize_t apds990x_power_state_store(struct device *dev, @@ -1147,7 +1146,7 @@ static int apds990x_probe(struct i2c_client *client) err = chip->pdata->setup_resources(); if (err) { err = -EINVAL; - goto fail3; + goto fail4; } } @@ -1155,7 +1154,7 @@ static int apds990x_probe(struct i2c_client *client) apds990x_attribute_group); if (err < 0) { dev_err(&chip->client->dev, "Sysfs registration failed\n"); - goto fail4; + goto fail5; } err = request_threaded_irq(client->irq, NULL, @@ -1166,15 +1165,17 @@ static int apds990x_probe(struct i2c_client *client) if (err) { dev_err(&client->dev, "could not get IRQ %d\n", client->irq); - goto fail5; + goto fail6; } return err; -fail5: +fail6: sysfs_remove_group(&chip->client->dev.kobj, &apds990x_attribute_group[0]); -fail4: +fail5: if (chip->pdata && chip->pdata->release_resources) chip->pdata->release_resources(); +fail4: + pm_runtime_disable(&client->dev); fail3: regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs); fail2: @@ -1253,7 +1254,7 @@ static int apds990x_runtime_resume(struct device *dev) #endif static const struct i2c_device_id apds990x_id[] = { - {"apds990x", 0 }, + { "apds990x" }, {} }; diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c index 7f9f562d6433..35a196341534 100644 --- a/drivers/misc/atmel-ssc.c +++ b/drivers/misc/atmel-ssc.c @@ -153,7 +153,7 @@ static int ssc_sound_dai_probe(struct ssc_device *ssc) ssc->sound_dai = false; - if (!of_property_read_bool(np, "#sound-dai-cells")) + if (!of_property_present(np, "#sound-dai-cells")) return 0; id = of_alias_get_id(np, "ssc"); @@ -176,7 +176,7 @@ static void ssc_sound_dai_remove(struct ssc_device *ssc) #else static inline int ssc_sound_dai_probe(struct ssc_device *ssc) { - if (of_property_read_bool(ssc->pdev->dev.of_node, "#sound-dai-cells")) + if (of_property_present(ssc->pdev->dev.of_node, "#sound-dai-cells")) return -ENOTSUPP; return 0; @@ -212,8 +212,7 @@ static int ssc_probe(struct platform_device *pdev) of_property_read_bool(np, "atmel,clk-from-rk-pin"); } - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ssc->regs = devm_ioremap_resource(&pdev->dev, regs); + ssc->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ®s); if (IS_ERR(ssc->regs)) return PTR_ERR(ssc->regs); @@ -252,7 +251,7 @@ static int ssc_probe(struct platform_device *pdev) return 0; } -static int ssc_remove(struct platform_device *pdev) +static void ssc_remove(struct platform_device *pdev) { struct ssc_device *ssc = platform_get_drvdata(pdev); @@ -261,8 +260,6 @@ static int ssc_remove(struct platform_device *pdev) mutex_lock(&user_lock); list_del(&ssc->list); mutex_unlock(&user_lock); - - return 0; } static struct platform_driver ssc_driver = { diff --git a/drivers/misc/bcm-vk/bcm_vk.h b/drivers/misc/bcm-vk/bcm_vk.h index 25d51222eedf..9344c2366a4b 100644 --- a/drivers/misc/bcm-vk/bcm_vk.h +++ b/drivers/misc/bcm-vk/bcm_vk.h @@ -311,7 +311,6 @@ struct bcm_vk_peer_log { u32 wr_idx; u32 buf_size; u32 mask; - char data[]; }; /* max buf size allowed */ @@ -340,7 +339,7 @@ struct bcm_vk_proc_mon_info { }; struct bcm_vk_hb_ctrl { - struct timer_list timer; + struct delayed_work work; u32 last_uptime; u32 lost_cnt; }; diff --git a/drivers/misc/bcm-vk/bcm_vk_msg.c b/drivers/misc/bcm-vk/bcm_vk_msg.c index 3c081504f38c..1f42d1d5a630 100644 --- a/drivers/misc/bcm-vk/bcm_vk_msg.c +++ b/drivers/misc/bcm-vk/bcm_vk_msg.c @@ -137,11 +137,11 @@ void bcm_vk_set_host_alert(struct bcm_vk *vk, u32 bit_mask) #define BCM_VK_HB_TIMER_VALUE (BCM_VK_HB_TIMER_S * HZ) #define BCM_VK_HB_LOST_MAX (27 / BCM_VK_HB_TIMER_S) -static void bcm_vk_hb_poll(struct timer_list *t) +static void bcm_vk_hb_poll(struct work_struct *work) { u32 uptime_s; - struct bcm_vk_hb_ctrl *hb = container_of(t, struct bcm_vk_hb_ctrl, - timer); + struct bcm_vk_hb_ctrl *hb = container_of(to_delayed_work(work), struct bcm_vk_hb_ctrl, + work); struct bcm_vk *vk = container_of(hb, struct bcm_vk, hb_ctrl); if (bcm_vk_drv_access_ok(vk) && hb_mon_is_on()) { @@ -177,22 +177,22 @@ static void bcm_vk_hb_poll(struct timer_list *t) bcm_vk_set_host_alert(vk, ERR_LOG_HOST_HB_FAIL); } /* re-arm timer */ - mod_timer(&hb->timer, jiffies + BCM_VK_HB_TIMER_VALUE); + schedule_delayed_work(&hb->work, BCM_VK_HB_TIMER_VALUE); } void bcm_vk_hb_init(struct bcm_vk *vk) { struct bcm_vk_hb_ctrl *hb = &vk->hb_ctrl; - timer_setup(&hb->timer, bcm_vk_hb_poll, 0); - mod_timer(&hb->timer, jiffies + BCM_VK_HB_TIMER_VALUE); + INIT_DELAYED_WORK(&hb->work, bcm_vk_hb_poll); + schedule_delayed_work(&hb->work, BCM_VK_HB_TIMER_VALUE); } void bcm_vk_hb_deinit(struct bcm_vk *vk) { struct bcm_vk_hb_ctrl *hb = &vk->hb_ctrl; - del_timer(&hb->timer); + cancel_delayed_work_sync(&hb->work); } static void bcm_vk_msgid_bitmap_clear(struct bcm_vk *vk, @@ -703,12 +703,12 @@ int bcm_vk_send_shutdown_msg(struct bcm_vk *vk, u32 shut_type, entry = kzalloc(struct_size(entry, to_v_msg, 1), GFP_KERNEL); if (!entry) return -ENOMEM; + entry->to_v_blks = 1; /* always 1 block */ /* fill up necessary data */ entry->to_v_msg[0].function_id = VK_FID_SHUTDOWN; set_q_num(&entry->to_v_msg[0], q_num); set_msg_id(&entry->to_v_msg[0], VK_SIMPLEX_MSG_ID); - entry->to_v_blks = 1; /* always 1 block */ entry->to_v_msg[0].cmd = shut_type; entry->to_v_msg[0].arg = pid; diff --git a/drivers/misc/bcm-vk/bcm_vk_msg.h b/drivers/misc/bcm-vk/bcm_vk_msg.h index 56784c8896d8..157495e48f15 100644 --- a/drivers/misc/bcm-vk/bcm_vk_msg.h +++ b/drivers/misc/bcm-vk/bcm_vk_msg.h @@ -116,7 +116,7 @@ struct bcm_vk_wkent { u32 usr_msg_id; u32 to_v_blks; u32 seq_num; - struct vk_msg_blk to_v_msg[]; + struct vk_msg_blk to_v_msg[] __counted_by(to_v_blks); }; /* queue stats counters */ diff --git a/drivers/misc/bcm-vk/bcm_vk_sg.c b/drivers/misc/bcm-vk/bcm_vk_sg.c index 2e9daaf3e492..d309216ee181 100644 --- a/drivers/misc/bcm-vk/bcm_vk_sg.c +++ b/drivers/misc/bcm-vk/bcm_vk_sg.c @@ -9,7 +9,7 @@ #include <linux/vmalloc.h> #include <asm/page.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <uapi/linux/misc/bcm_vk.h> diff --git a/drivers/misc/bcm-vk/bcm_vk_tty.c b/drivers/misc/bcm-vk/bcm_vk_tty.c index 6669625ba4c8..e6c42b772e96 100644 --- a/drivers/misc/bcm-vk/bcm_vk_tty.c +++ b/drivers/misc/bcm-vk/bcm_vk_tty.c @@ -43,7 +43,7 @@ struct bcm_vk_tty_chan { static void bcm_vk_tty_poll(struct timer_list *t) { - struct bcm_vk *vk = from_timer(vk, t, serial_timer); + struct bcm_vk *vk = timer_container_of(vk, t, serial_timer); queue_work(vk->tty_wq_thread, &vk->tty_wq_work); mod_timer(&vk->serial_timer, jiffies + SERIAL_TIMER_VALUE); @@ -64,9 +64,9 @@ static void bcm_vk_tty_wq_handler(struct work_struct *work) struct bcm_vk_tty *vktty; int card_status; int count; - unsigned char c; int i; int wr; + u8 c; card_status = vkread32(vk, BAR_0, BAR_CARD_STATUS); if (BCM_VK_INTF_IS_DOWN(card_status)) @@ -177,7 +177,7 @@ static void bcm_vk_tty_close(struct tty_struct *tty, struct file *file) vk->tty[tty->index].is_opened = false; if (tty->count == 1) - del_timer_sync(&vk->serial_timer); + timer_delete_sync(&vk->serial_timer); } static void bcm_vk_tty_doorbell(struct bcm_vk *vk, u32 db_val) @@ -186,14 +186,13 @@ static void bcm_vk_tty_doorbell(struct bcm_vk *vk, u32 db_val) VK_BAR0_REGSEG_DB_BASE + VK_BAR0_REGSEG_TTY_DB_OFFSET); } -static int bcm_vk_tty_write(struct tty_struct *tty, - const unsigned char *buffer, - int count) +static ssize_t bcm_vk_tty_write(struct tty_struct *tty, const u8 *buffer, + size_t count) { int index; struct bcm_vk *vk; struct bcm_vk_tty *vktty; - int i; + size_t i; index = tty->index; vk = dev_get_drvdata(tty->dev); @@ -305,7 +304,7 @@ void bcm_vk_tty_exit(struct bcm_vk *vk) { int i; - del_timer_sync(&vk->serial_timer); + timer_delete_sync(&vk->serial_timer); for (i = 0; i < BCM_VK_NUM_TTY; ++i) { tty_port_unregister_device(&vk->tty[i].port, vk->tty_drv, diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c index 1629b62fd052..45f8fc69a711 100644 --- a/drivers/misc/bh1770glc.c +++ b/drivers/misc/bh1770glc.c @@ -640,7 +640,9 @@ static ssize_t bh1770_power_state_store(struct device *dev, mutex_lock(&chip->mutex); if (value) { - pm_runtime_get_sync(dev); + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + goto leave; ret = bh1770_lux_rate(chip, chip->lux_rate_index); if (ret < 0) { @@ -680,15 +682,15 @@ static ssize_t bh1770_lux_result_show(struct device *dev, { struct bh1770_chip *chip = dev_get_drvdata(dev); ssize_t ret; - long timeout; + long time_left; if (pm_runtime_suspended(dev)) return -EIO; /* Chip is not enabled at all */ - timeout = wait_event_interruptible_timeout(chip->wait, - !chip->lux_wait_result, - msecs_to_jiffies(BH1770_TIMEOUT)); - if (!timeout) + time_left = wait_event_interruptible_timeout(chip->wait, + !chip->lux_wait_result, + msecs_to_jiffies(BH1770_TIMEOUT)); + if (!time_left) return -EIO; mutex_lock(&chip->mutex); @@ -1361,8 +1363,8 @@ static int bh1770_runtime_resume(struct device *dev) #endif static const struct i2c_device_id bh1770_id[] = { - {"bh1770glc", 0 }, - {"sfh7770", 0 }, + { "bh1770glc" }, + { "sfh7770" }, {} }; diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c index f574c83b82cf..babdb60cc46c 100644 --- a/drivers/misc/c2port/core.c +++ b/drivers/misc/c2port/core.c @@ -714,7 +714,7 @@ static ssize_t __c2port_read_flash_data(struct c2port_device *dev, } static ssize_t c2port_read_flash_data(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buffer, loff_t offset, size_t count) { struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj)); @@ -829,7 +829,7 @@ static ssize_t __c2port_write_flash_data(struct c2port_device *dev, } static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buffer, loff_t offset, size_t count) { struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj)); @@ -849,8 +849,8 @@ static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj, return ret; } /* size is computed at run-time */ -static BIN_ATTR(flash_data, 0644, c2port_read_flash_data, - c2port_write_flash_data, 0); +static const BIN_ATTR(flash_data, 0644, c2port_read_flash_data, + c2port_write_flash_data, 0); /* * Class attributes @@ -869,14 +869,27 @@ static struct attribute *c2port_attrs[] = { NULL, }; -static struct bin_attribute *c2port_bin_attrs[] = { +static const struct bin_attribute *const c2port_bin_attrs[] = { &bin_attr_flash_data, NULL, }; +static size_t c2port_bin_attr_size(struct kobject *kobj, + const struct bin_attribute *attr, + int i) +{ + struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj)); + + if (attr == &bin_attr_flash_data) + return c2dev->ops->blocks_num * c2dev->ops->block_size; + + return attr->size; +} + static const struct attribute_group c2port_group = { .attrs = c2port_attrs, .bin_attrs = c2port_bin_attrs, + .bin_size = c2port_bin_attr_size, }; static const struct attribute_group *c2port_groups[] = { @@ -912,8 +925,7 @@ struct c2port_device *c2port_device_register(char *name, if (ret < 0) goto error_idr_alloc; c2dev->id = ret; - - bin_attr_flash_data.size = ops->blocks_num * ops->block_size; + c2dev->ops = ops; c2dev->dev = device_create(c2port_class, NULL, 0, c2dev, "c2port%d", c2dev->id); @@ -923,8 +935,7 @@ struct c2port_device *c2port_device_register(char *name, } dev_set_drvdata(c2dev->dev, c2dev); - strncpy(c2dev->name, name, C2PORT_NAME_LEN - 1); - c2dev->ops = ops; + strscpy(c2dev->name, name, sizeof(c2dev->name)); mutex_init(&c2dev->mutex); /* By default C2 port access is off */ diff --git a/drivers/misc/cardreader/Kconfig b/drivers/misc/cardreader/Kconfig index 022322dfb36e..a70700f0e592 100644 --- a/drivers/misc/cardreader/Kconfig +++ b/drivers/misc/cardreader/Kconfig @@ -16,7 +16,8 @@ config MISC_RTSX_PCI select MFD_CORE help This supports for Realtek PCI-Express card reader including rts5209, - rts5227, rts522A, rts5229, rts5249, rts524A, rts525A, rtl8411, rts5260. + rts5227, rts5228, rts522A, rts5229, rts5249, rts524A, rts525A, rtl8411, + rts5260, rts5261, rts5264. Realtek card readers support access to many types of memory cards, such as Memory Stick, Memory Stick Pro, Secure Digital and MultiMediaCard. diff --git a/drivers/misc/cardreader/Makefile b/drivers/misc/cardreader/Makefile index 895128475d83..1e1bca6b0b22 100644 --- a/drivers/misc/cardreader/Makefile +++ b/drivers/misc/cardreader/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_MISC_ALCOR_PCI) += alcor_pci.o obj-$(CONFIG_MISC_RTSX_PCI) += rtsx_pci.o -rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o rts5261.o rts5228.o +rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o rts5261.o rts5228.o rts5264.o obj-$(CONFIG_MISC_RTSX_USB) += rtsx_usb.o diff --git a/drivers/misc/cardreader/alcor_pci.c b/drivers/misc/cardreader/alcor_pci.c index 0142c4bf4f42..8e7ea2c9142d 100644 --- a/drivers/misc/cardreader/alcor_pci.c +++ b/drivers/misc/cardreader/alcor_pci.c @@ -17,8 +17,6 @@ #include <linux/alcor_pci.h> -#define DRV_NAME_ALCOR_PCI "alcor_pci" - static DEFINE_IDA(alcor_pci_idr); static struct mfd_cell alcor_pci_cells[] = { @@ -123,23 +121,23 @@ static int alcor_pci_probe(struct pci_dev *pdev, priv->cfg = cfg; priv->irq = pdev->irq; - ret = pci_request_regions(pdev, DRV_NAME_ALCOR_PCI); + ret = pcim_request_all_regions(pdev, DRV_NAME_ALCOR_PCI); if (ret) { dev_err(&pdev->dev, "Cannot request region\n"); - ret = -ENOMEM; + ret = -EBUSY; goto error_free_ida; } if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar); ret = -ENODEV; - goto error_release_regions; + goto error_free_ida; } priv->iobase = pcim_iomap(pdev, bar, 0); if (!priv->iobase) { ret = -ENOMEM; - goto error_release_regions; + goto error_free_ida; } /* make sure irqs are disabled */ @@ -149,7 +147,7 @@ static int alcor_pci_probe(struct pci_dev *pdev, ret = dma_set_mask_and_coherent(priv->dev, AU6601_SDMA_MASK); if (ret) { dev_err(priv->dev, "Failed to set DMA mask\n"); - goto error_release_regions; + goto error_free_ida; } pci_set_master(pdev); @@ -171,8 +169,6 @@ static int alcor_pci_probe(struct pci_dev *pdev, error_clear_drvdata: pci_clear_master(pdev); pci_set_drvdata(pdev, NULL); -error_release_regions: - pci_release_regions(pdev); error_free_ida: ida_free(&alcor_pci_idr, priv->id); return ret; @@ -188,7 +184,6 @@ static void alcor_pci_remove(struct pci_dev *pdev) ida_free(&alcor_pci_idr, priv->id); - pci_release_regions(pdev); pci_clear_master(pdev); pci_set_drvdata(pdev, NULL); } diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c index 3dae5e3a1697..46444bb47f65 100644 --- a/drivers/misc/cardreader/rts5227.c +++ b/drivers/misc/cardreader/rts5227.c @@ -79,67 +79,28 @@ static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr) pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg); if (rtsx_reg_check_reverse_socket(reg)) pcr->flags |= PCR_REVERSE_SOCKET; + if (rtsx_reg_check_cd_reverse(reg)) + pcr->option.sd_cd_reverse_en = 1; + if (rtsx_reg_check_wp_reverse(reg)) + pcr->option.sd_wp_reverse_en = 1; } static void rts5227_init_from_cfg(struct rtsx_pcr *pcr) { - struct pci_dev *pdev = pcr->pci; - int l1ss; - u32 lval; struct rtsx_cr_option *option = &pcr->option; - l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); - if (!l1ss) - return; - - pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval); - if (CHK_PCI_PID(pcr, 0x522A)) { - if (0 == (lval & 0x0F)) - rtsx_pci_enable_oobs_polling(pcr); - else + if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN + | PM_L1_1_EN | PM_L1_2_EN)) rtsx_pci_disable_oobs_polling(pcr); + else + rtsx_pci_enable_oobs_polling(pcr); } - if (lval & PCI_L1SS_CTL1_ASPM_L1_1) - rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); - else - rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN); - - if (lval & PCI_L1SS_CTL1_ASPM_L1_2) - rtsx_set_dev_flag(pcr, ASPM_L1_2_EN); - else - rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN); - - if (lval & PCI_L1SS_CTL1_PCIPM_L1_1) - rtsx_set_dev_flag(pcr, PM_L1_1_EN); - else - rtsx_clear_dev_flag(pcr, PM_L1_1_EN); - - if (lval & PCI_L1SS_CTL1_PCIPM_L1_2) - rtsx_set_dev_flag(pcr, PM_L1_2_EN); - else - rtsx_clear_dev_flag(pcr, PM_L1_2_EN); - if (option->ltr_en) { - u16 val; - - pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val); - if (val & PCI_EXP_DEVCTL2_LTR_EN) { - option->ltr_enabled = true; - option->ltr_active = true; + if (option->ltr_enabled) rtsx_set_ltr_latency(pcr, option->ltr_active_latency); - } else { - option->ltr_enabled = false; - } } - - if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN - | PM_L1_1_EN | PM_L1_2_EN)) - option->force_clkreq_0 = false; - else - option->force_clkreq_0 = true; - } static int rts5227_extra_init_hw(struct rtsx_pcr *pcr) @@ -170,8 +131,10 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr) /* Configure force_clock_req */ if (pcr->flags & PCR_REVERSE_SOCKET) rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x30, 0x30); - else - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x30, 0x00); + else { + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x20, option->sd_cd_reverse_en << 5); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x10, option->sd_wp_reverse_en << 4); + } if (CHK_PCI_PID(pcr, 0x522A)) rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RTS522A_AUTOLOAD_CFG1, @@ -195,7 +158,7 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr) } } - if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG) + if (option->force_clkreq_0) rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); else @@ -393,6 +356,8 @@ void rts5227_init_params(struct rtsx_pcr *pcr) pcr->ms_pull_ctl_disable_tbl = rts5227_ms_pull_ctl_disable_tbl; pcr->reg_pm_ctrl3 = PM_CTRL3; + pcr->option.sd_cd_reverse_en = 0; + pcr->option.sd_wp_reverse_en = 0; } static int rts522a_optimize_phy(struct rtsx_pcr *pcr) @@ -551,5 +516,4 @@ void rts522a_init_params(struct rtsx_pcr *pcr) pcr->hw_param.interrupt_en |= SD_OC_INT_EN; pcr->hw_param.ocp_glitch = SD_OCP_GLITCH_10M; pcr->option.sd_800mA_ocp_thd = RTS522A_OCP_THD_800; - } diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c index f4ab09439da7..db7e735ac24f 100644 --- a/drivers/misc/cardreader/rts5228.c +++ b/drivers/misc/cardreader/rts5228.c @@ -84,6 +84,10 @@ static void rtsx5228_fetch_vendor_settings(struct rtsx_pcr *pcr) pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg); if (rtsx_reg_check_reverse_socket(reg)) pcr->flags |= PCR_REVERSE_SOCKET; + if (rtsx_reg_check_cd_reverse(reg)) + pcr->option.sd_cd_reverse_en = 1; + if (rtsx_reg_check_wp_reverse(reg)) + pcr->option.sd_wp_reverse_en = 1; } static int rts5228_optimize_phy(struct rtsx_pcr *pcr) @@ -386,59 +390,25 @@ static void rts5228_process_ocp(struct rtsx_pcr *pcr) static void rts5228_init_from_cfg(struct rtsx_pcr *pcr) { - struct pci_dev *pdev = pcr->pci; - int l1ss; - u32 lval; struct rtsx_cr_option *option = &pcr->option; - l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); - if (!l1ss) - return; - - pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval); - - if (0 == (lval & 0x0F)) - rtsx_pci_enable_oobs_polling(pcr); - else + if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN + | PM_L1_1_EN | PM_L1_2_EN)) rtsx_pci_disable_oobs_polling(pcr); - - if (lval & PCI_L1SS_CTL1_ASPM_L1_1) - rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); - else - rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN); - - if (lval & PCI_L1SS_CTL1_ASPM_L1_2) - rtsx_set_dev_flag(pcr, ASPM_L1_2_EN); - else - rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN); - - if (lval & PCI_L1SS_CTL1_PCIPM_L1_1) - rtsx_set_dev_flag(pcr, PM_L1_1_EN); else - rtsx_clear_dev_flag(pcr, PM_L1_1_EN); - - if (lval & PCI_L1SS_CTL1_PCIPM_L1_2) - rtsx_set_dev_flag(pcr, PM_L1_2_EN); - else - rtsx_clear_dev_flag(pcr, PM_L1_2_EN); + rtsx_pci_enable_oobs_polling(pcr); rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0); - if (option->ltr_en) { - u16 val; - pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val); - if (val & PCI_EXP_DEVCTL2_LTR_EN) { - option->ltr_enabled = true; - option->ltr_active = true; + if (option->ltr_en) { + if (option->ltr_enabled) rtsx_set_ltr_latency(pcr, option->ltr_active_latency); - } else { - option->ltr_enabled = false; - } } } static int rts5228_extra_init_hw(struct rtsx_pcr *pcr) { + struct rtsx_cr_option *option = &pcr->option; rtsx_pci_write_register(pcr, RTS5228_AUTOLOAD_CFG1, CD_RESUME_EN_MASK, CD_RESUME_EN_MASK); @@ -466,8 +436,21 @@ static int rts5228_extra_init_hw(struct rtsx_pcr *pcr) if (pcr->flags & PCR_REVERSE_SOCKET) rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x30); + else { + rtsx_pci_write_register(pcr, PETXCFG, 0x20, option->sd_cd_reverse_en << 5); + rtsx_pci_write_register(pcr, PETXCFG, 0x10, option->sd_wp_reverse_en << 4); + } + + /* + * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced + * to drive low, and we forcibly request clock. + */ + if (option->force_clkreq_0) + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); else - rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00); + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB); @@ -743,4 +726,6 @@ void rts5228_init_params(struct rtsx_pcr *pcr) hw_param->interrupt_en |= SD_OC_INT_EN; hw_param->ocp_glitch = SD_OCP_GLITCH_800U; option->sd_800mA_ocp_thd = RTS5228_LDO1_OCP_THD_930; + option->sd_cd_reverse_en = 0; + option->sd_wp_reverse_en = 0; } diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c index 47ab72a43256..38aefd8db452 100644 --- a/drivers/misc/cardreader/rts5249.c +++ b/drivers/misc/cardreader/rts5249.c @@ -60,6 +60,7 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr) pci_read_config_dword(pdev, PCR_SETTING_REG1, ®); pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg); + pci_write_config_dword(pdev, 0x718, 0x0007C000); if (!rtsx_vendor_setting_valid(reg)) { pcr_dbg(pcr, "skip fetch vendor setting\n"); @@ -82,68 +83,30 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr) pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg); if (rtsx_reg_check_reverse_socket(reg)) pcr->flags |= PCR_REVERSE_SOCKET; + if (rtsx_reg_check_cd_reverse(reg)) + pcr->option.sd_cd_reverse_en = 1; + if (rtsx_reg_check_wp_reverse(reg)) + pcr->option.sd_wp_reverse_en = 1; } static void rts5249_init_from_cfg(struct rtsx_pcr *pcr) { - struct pci_dev *pdev = pcr->pci; - int l1ss; struct rtsx_cr_option *option = &(pcr->option); - u32 lval; - - l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); - if (!l1ss) - return; - - pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval); if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) { - if (0 == (lval & 0x0F)) - rtsx_pci_enable_oobs_polling(pcr); - else + if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN + | PM_L1_1_EN | PM_L1_2_EN)) rtsx_pci_disable_oobs_polling(pcr); + else + rtsx_pci_enable_oobs_polling(pcr); } - - if (lval & PCI_L1SS_CTL1_ASPM_L1_1) - rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); - - if (lval & PCI_L1SS_CTL1_ASPM_L1_2) - rtsx_set_dev_flag(pcr, ASPM_L1_2_EN); - - if (lval & PCI_L1SS_CTL1_PCIPM_L1_1) - rtsx_set_dev_flag(pcr, PM_L1_1_EN); - - if (lval & PCI_L1SS_CTL1_PCIPM_L1_2) - rtsx_set_dev_flag(pcr, PM_L1_2_EN); - if (option->ltr_en) { - u16 val; - - pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val); - if (val & PCI_EXP_DEVCTL2_LTR_EN) { - option->ltr_enabled = true; - option->ltr_active = true; + if (option->ltr_enabled) rtsx_set_ltr_latency(pcr, option->ltr_active_latency); - } else { - option->ltr_enabled = false; - } } } -static int rts5249_init_from_hw(struct rtsx_pcr *pcr) -{ - struct rtsx_cr_option *option = &(pcr->option); - - if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN - | PM_L1_1_EN | PM_L1_2_EN)) - option->force_clkreq_0 = false; - else - option->force_clkreq_0 = true; - - return 0; -} - static void rts52xa_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime) { /* Set relink_time to 0 */ @@ -276,7 +239,6 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr) struct rtsx_cr_option *option = &(pcr->option); rts5249_init_from_cfg(pcr); - rts5249_init_from_hw(pcr); rtsx_pci_init_cmd(pcr); @@ -297,9 +259,11 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr) /* Configure driving */ rts5249_fill_driving(pcr, OUTPUT_3V3); if (pcr->flags & PCR_REVERSE_SOCKET) - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0xB0); - else - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0x80); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x30, 0x30); + else { + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x20, option->sd_cd_reverse_en << 5); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x10, option->sd_wp_reverse_en << 4); + } rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF); @@ -327,11 +291,12 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr) } } + /* * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced * to drive low, and we forcibly request clock. */ - if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG) + if (option->force_clkreq_0) rtsx_pci_write_register(pcr, PETXCFG, FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); else @@ -614,6 +579,9 @@ void rts5249_init_params(struct rtsx_pcr *pcr) option->ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5249_DEF; option->ltr_l1off_snooze_sspwrgate = LTR_L1OFF_SNOOZE_SSPWRGATE_5249_DEF; + + option->sd_cd_reverse_en = 0; + option->sd_wp_reverse_en = 0; } static int rts524a_write_phy(struct rtsx_pcr *pcr, u8 addr, u16 val) diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c index 79b18f6f73a8..d2d3a6ccb8f7 100644 --- a/drivers/misc/cardreader/rts5260.c +++ b/drivers/misc/cardreader/rts5260.c @@ -480,47 +480,19 @@ static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr) static void rts5260_init_from_cfg(struct rtsx_pcr *pcr) { - struct pci_dev *pdev = pcr->pci; - int l1ss; struct rtsx_cr_option *option = &pcr->option; - u32 lval; - - l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); - if (!l1ss) - return; - - pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval); - - if (lval & PCI_L1SS_CTL1_ASPM_L1_1) - rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); - - if (lval & PCI_L1SS_CTL1_ASPM_L1_2) - rtsx_set_dev_flag(pcr, ASPM_L1_2_EN); - - if (lval & PCI_L1SS_CTL1_PCIPM_L1_1) - rtsx_set_dev_flag(pcr, PM_L1_1_EN); - - if (lval & PCI_L1SS_CTL1_PCIPM_L1_2) - rtsx_set_dev_flag(pcr, PM_L1_2_EN); rts5260_pwr_saving_setting(pcr); if (option->ltr_en) { - u16 val; - - pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val); - if (val & PCI_EXP_DEVCTL2_LTR_EN) { - option->ltr_enabled = true; - option->ltr_active = true; + if (option->ltr_enabled) rtsx_set_ltr_latency(pcr, option->ltr_active_latency); - } else { - option->ltr_enabled = false; - } } } static int rts5260_extra_init_hw(struct rtsx_pcr *pcr) { + struct rtsx_cr_option *option = &pcr->option; /* Set mcu_cnt to 7 to ensure data can be sampled properly */ rtsx_pci_write_register(pcr, 0xFC03, 0x7F, 0x07); @@ -539,6 +511,17 @@ static int rts5260_extra_init_hw(struct rtsx_pcr *pcr) rts5260_init_hw(pcr); + /* + * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced + * to drive low, and we forcibly request clock. + */ + if (option->force_clkreq_0) + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); + else + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00); return 0; diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c index 94af6bf8a25a..67252512a132 100644 --- a/drivers/misc/cardreader/rts5261.c +++ b/drivers/misc/cardreader/rts5261.c @@ -454,54 +454,17 @@ static void rts5261_init_from_hw(struct rtsx_pcr *pcr) static void rts5261_init_from_cfg(struct rtsx_pcr *pcr) { - struct pci_dev *pdev = pcr->pci; - int l1ss; - u32 lval; struct rtsx_cr_option *option = &pcr->option; - l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); - if (!l1ss) - return; - - pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval); - - if (lval & PCI_L1SS_CTL1_ASPM_L1_1) - rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); - else - rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN); - - if (lval & PCI_L1SS_CTL1_ASPM_L1_2) - rtsx_set_dev_flag(pcr, ASPM_L1_2_EN); - else - rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN); - - if (lval & PCI_L1SS_CTL1_PCIPM_L1_1) - rtsx_set_dev_flag(pcr, PM_L1_1_EN); - else - rtsx_clear_dev_flag(pcr, PM_L1_1_EN); - - if (lval & PCI_L1SS_CTL1_PCIPM_L1_2) - rtsx_set_dev_flag(pcr, PM_L1_2_EN); - else - rtsx_clear_dev_flag(pcr, PM_L1_2_EN); - - rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0); if (option->ltr_en) { - u16 val; - - pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val); - if (val & PCI_EXP_DEVCTL2_LTR_EN) { - option->ltr_enabled = true; - option->ltr_active = true; + if (option->ltr_enabled) rtsx_set_ltr_latency(pcr, option->ltr_active_latency); - } else { - option->ltr_enabled = false; - } } } static int rts5261_extra_init_hw(struct rtsx_pcr *pcr) { + struct rtsx_cr_option *option = &pcr->option; u32 val; rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG1, @@ -547,6 +510,17 @@ static int rts5261_extra_init_hw(struct rtsx_pcr *pcr) else rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00); + /* + * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced + * to drive low, and we forcibly request clock. + */ + if (option->force_clkreq_0) + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); + else + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); + rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB); if (pcr->rtd3_en) { diff --git a/drivers/misc/cardreader/rts5264.c b/drivers/misc/cardreader/rts5264.c new file mode 100644 index 000000000000..99a2d5ea6421 --- /dev/null +++ b/drivers/misc/cardreader/rts5264.c @@ -0,0 +1,972 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Driver for Realtek PCI-Express card reader + * + * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. + * + * Author: + * Ricky Wu <ricky_wu@realtek.com> + */ + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/rtsx_pci.h> + +#include "rts5264.h" +#include "rtsx_pcr.h" + +static u8 rts5264_get_ic_version(struct rtsx_pcr *pcr) +{ + u8 val; + + rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val); + return val & 0x0F; +} + +static void rts5264_fill_driving(struct rtsx_pcr *pcr, u8 voltage) +{ + u8 driving_3v3[4][3] = { + {0x88, 0x88, 0x88}, + {0x77, 0x77, 0x77}, + {0x99, 0x99, 0x99}, + {0x66, 0x66, 0x66}, + }; + u8 driving_1v8[4][3] = { + {0x99, 0x99, 0x99}, + {0x77, 0x77, 0x77}, + {0xBB, 0xBB, 0xBB}, + {0x65, 0x65, 0x65}, + }; + u8 (*driving)[3], drive_sel; + + if (voltage == OUTPUT_3V3) { + driving = driving_3v3; + drive_sel = pcr->sd30_drive_sel_3v3; + } else { + driving = driving_1v8; + drive_sel = pcr->sd30_drive_sel_1v8; + } + + rtsx_pci_write_register(pcr, SD30_CLK_DRIVE_SEL, + 0xFF, driving[drive_sel][0]); + rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL, + 0xFF, driving[drive_sel][1]); + rtsx_pci_write_register(pcr, SD30_DAT_DRIVE_SEL, + 0xFF, driving[drive_sel][2]); +} + +static void rts5264_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime) +{ + /* Set relink_time to 0 */ + rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0); + rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0); + rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3, + RELINK_TIME_MASK, 0); + + if (pm_state == HOST_ENTER_S3) + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, + D3_DELINK_MODE_EN, D3_DELINK_MODE_EN); + + if (!runtime) { + rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG1, + CD_RESUME_EN_MASK, 0); + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x00); + rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL, + FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL); + } else { + rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL, + FORCE_PM_CONTROL | FORCE_PM_VALUE, 0); + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x01); + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, + D3_DELINK_MODE_EN, 0); + rtsx_pci_write_register(pcr, RTS5264_FW_CTL, + RTS5264_INFORM_RTD3_COLD, RTS5264_INFORM_RTD3_COLD); + rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG4, + RTS5264_FORCE_PRSNT_LOW, RTS5264_FORCE_PRSNT_LOW); + } + + rtsx_pci_write_register(pcr, RTS5264_REG_FPDCTL, + SSC_POWER_DOWN, SSC_POWER_DOWN); +} + +static int rts5264_enable_auto_blink(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, OLT_LED_CTL, + LED_SHINE_MASK, LED_SHINE_EN); +} + +static int rts5264_disable_auto_blink(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, OLT_LED_CTL, + LED_SHINE_MASK, LED_SHINE_DISABLE); +} + +static int rts5264_turn_on_led(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, GPIO_CTL, + 0x02, 0x02); +} + +static int rts5264_turn_off_led(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, GPIO_CTL, + 0x02, 0x00); +} + +/* SD Pull Control Enable: + * SD_DAT[3:0] ==> pull up + * SD_CD ==> pull up + * SD_WP ==> pull up + * SD_CMD ==> pull up + * SD_CLK ==> pull down + */ +static const u32 rts5264_sd_pull_ctl_enable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9), + 0, +}; + +/* SD Pull Control Disable: + * SD_DAT[3:0] ==> pull down + * SD_CD ==> pull up + * SD_WP ==> pull down + * SD_CMD ==> pull down + * SD_CLK ==> pull down + */ +static const u32 rts5264_sd_pull_ctl_disable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5), + 0, +}; + +static int rts5264_sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr) +{ + rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK + | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST); + rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ); + rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF, + CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); + rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0); + + return 0; +} + +static int rts5264_card_power_on(struct rtsx_pcr *pcr, int card) +{ + struct rtsx_cr_option *option = &pcr->option; + + if (option->ocp_en) + rtsx_pci_enable_ocp(pcr); + + rtsx_pci_write_register(pcr, REG_CRC_DUMMY_0, + CFG_SD_POW_AUTO_PD, CFG_SD_POW_AUTO_PD); + + rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG1, + RTS5264_LDO1_TUNE_MASK, RTS5264_LDO1_33); + rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL, + RTS5264_LDO1_POWERON, RTS5264_LDO1_POWERON); + rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL, + RTS5264_LDO3318_POWERON, RTS5264_LDO3318_POWERON); + + msleep(20); + + rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN); + + /* Initialize SD_CFG1 register */ + rtsx_pci_write_register(pcr, SD_CFG1, 0xFF, + SD_CLK_DIVIDE_128 | SD_20_MODE | SD_BUS_WIDTH_1BIT); + rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL, + 0xFF, SD20_RX_POS_EDGE); + rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0); + rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR, + SD_STOP | SD_CLR_ERR); + + /* Reset SD_CFG3 register */ + rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0); + rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG, + SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 | + SD30_CLK_STOP_CFG0, 0); + + if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 || + pcr->extra_caps & EXTRA_CAPS_SD_SDR104) + rts5264_sd_set_sample_push_timing_sd30(pcr); + + return 0; +} + +static int rts5264_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) +{ + rtsx_pci_write_register(pcr, RTS5264_CARD_PWR_CTL, + RTS5264_PUPDC, RTS5264_PUPDC); + + switch (voltage) { + case OUTPUT_3V3: + rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL, + RTS5264_TUNE_REF_LDO3318, RTS5264_TUNE_REF_LDO3318); + rtsx_pci_write_register(pcr, RTS5264_DV3318_CFG, + RTS5264_DV3318_TUNE_MASK, RTS5264_DV3318_33); + rtsx_pci_write_register(pcr, SD_PAD_CTL, + SD_IO_USING_1V8, 0); + break; + case OUTPUT_1V8: + rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL, + RTS5264_TUNE_REF_LDO3318, RTS5264_TUNE_REF_LDO3318_DFT); + rtsx_pci_write_register(pcr, RTS5264_DV3318_CFG, + RTS5264_DV3318_TUNE_MASK, RTS5264_DV3318_18); + rtsx_pci_write_register(pcr, SD_PAD_CTL, + SD_IO_USING_1V8, SD_IO_USING_1V8); + break; + default: + return -EINVAL; + } + + /* set pad drive */ + rts5264_fill_driving(pcr, voltage); + + return 0; +} + +static void rts5264_stop_cmd(struct rtsx_pcr *pcr) +{ + rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD); + rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA); + rtsx_pci_write_register(pcr, DMACTL, DMA_RST, DMA_RST); + rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH); +} + +static void rts5264_card_before_power_off(struct rtsx_pcr *pcr) +{ + rts5264_stop_cmd(pcr); + rts5264_switch_output_voltage(pcr, OUTPUT_3V3); +} + +static int rts5264_card_power_off(struct rtsx_pcr *pcr, int card) +{ + int err = 0; + + rts5264_card_before_power_off(pcr); + err = rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL, + RTS5264_LDO_POWERON_MASK, 0); + + rtsx_pci_write_register(pcr, REG_CRC_DUMMY_0, + CFG_SD_POW_AUTO_PD, 0); + if (pcr->option.ocp_en) + rtsx_pci_disable_ocp(pcr); + + return err; +} + +static void rts5264_enable_ocp(struct rtsx_pcr *pcr) +{ + u8 mask = 0; + u8 val = 0; + + rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0, + RTS5264_LDO1_OCP_EN | RTS5264_LDO1_OCP_LMT_EN, + RTS5264_LDO1_OCP_EN | RTS5264_LDO1_OCP_LMT_EN); + rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0, + RTS5264_LDO2_OCP_EN | RTS5264_LDO2_OCP_LMT_EN, + RTS5264_LDO2_OCP_EN | RTS5264_LDO2_OCP_LMT_EN); + rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0, + RTS5264_LDO3_OCP_EN | RTS5264_LDO3_OCP_LMT_EN, + RTS5264_LDO3_OCP_EN | RTS5264_LDO3_OCP_LMT_EN); + rtsx_pci_write_register(pcr, RTS5264_OVP_DET, + RTS5264_POW_VDET, RTS5264_POW_VDET); + + mask = SD_OCP_INT_EN | SD_DETECT_EN; + mask |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN; + val = mask; + rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val); + + mask = SD_VDD3_OCP_INT_EN | SD_VDD3_DETECT_EN; + val = mask; + rtsx_pci_write_register(pcr, RTS5264_OCP_VDD3_CTL, mask, val); + + mask = RTS5264_OVP_INT_EN | RTS5264_OVP_DETECT_EN; + val = mask; + rtsx_pci_write_register(pcr, RTS5264_OVP_CTL, mask, val); +} + +static void rts5264_disable_ocp(struct rtsx_pcr *pcr) +{ + u8 mask = 0; + + mask = SD_OCP_INT_EN | SD_DETECT_EN; + mask |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN; + rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); + + mask = SD_VDD3_OCP_INT_EN | SD_VDD3_DETECT_EN; + rtsx_pci_write_register(pcr, RTS5264_OCP_VDD3_CTL, mask, 0); + + mask = RTS5264_OVP_INT_EN | RTS5264_OVP_DETECT_EN; + rtsx_pci_write_register(pcr, RTS5264_OVP_CTL, mask, 0); + + rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0, + RTS5264_LDO1_OCP_EN | RTS5264_LDO1_OCP_LMT_EN, 0); + rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0, + RTS5264_LDO2_OCP_EN | RTS5264_LDO2_OCP_LMT_EN, 0); + rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0, + RTS5264_LDO3_OCP_EN | RTS5264_LDO3_OCP_LMT_EN, 0); + rtsx_pci_write_register(pcr, RTS5264_OVP_DET, RTS5264_POW_VDET, 0); +} + +static void rts5264_init_ocp(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &pcr->option; + + if (option->ocp_en) { + u8 mask, val; + + rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0, + RTS5264_LDO1_OCP_THD_MASK, option->sd_800mA_ocp_thd); + rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0, + RTS5264_LDO1_OCP_LMT_THD_MASK, + RTS5264_LDO1_LMT_THD_2000); + + rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0, + RTS5264_LDO2_OCP_THD_MASK, RTS5264_LDO2_OCP_THD_950); + rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0, + RTS5264_LDO2_OCP_LMT_THD_MASK, + RTS5264_LDO2_LMT_THD_2000); + + rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0, + RTS5264_LDO3_OCP_THD_MASK, RTS5264_LDO3_OCP_THD_710); + rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0, + RTS5264_LDO3_OCP_LMT_THD_MASK, + RTS5264_LDO3_LMT_THD_1500); + + rtsx_pci_write_register(pcr, RTS5264_OVP_DET, + RTS5264_TUNE_VROV_MASK, RTS5264_TUNE_VROV_1V6); + + mask = SD_OCP_GLITCH_MASK | SDVIO_OCP_GLITCH_MASK; + val = pcr->hw_param.ocp_glitch; + rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val); + + } else { + rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0, + RTS5264_LDO1_OCP_EN | RTS5264_LDO1_OCP_LMT_EN, 0); + rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0, + RTS5264_LDO2_OCP_EN | RTS5264_LDO2_OCP_LMT_EN, 0); + rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0, + RTS5264_LDO3_OCP_EN | RTS5264_LDO3_OCP_LMT_EN, 0); + rtsx_pci_write_register(pcr, RTS5264_OVP_DET, + RTS5264_POW_VDET, 0); + } +} + +static int rts5264_get_ocpstat2(struct rtsx_pcr *pcr, u8 *val) +{ + return rtsx_pci_read_register(pcr, RTS5264_OCP_VDD3_STS, val); +} + +static int rts5264_get_ovpstat(struct rtsx_pcr *pcr, u8 *val) +{ + return rtsx_pci_read_register(pcr, RTS5264_OVP_STS, val); +} + +static void rts5264_clear_ocpstat(struct rtsx_pcr *pcr) +{ + u8 mask = 0; + u8 val = 0; + + mask = SD_OCP_INT_CLR | SD_OC_CLR; + mask |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR; + val = mask; + rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val); + rtsx_pci_write_register(pcr, RTS5264_OCP_VDD3_CTL, + SD_VDD3_OCP_INT_CLR | SD_VDD3_OC_CLR, + SD_VDD3_OCP_INT_CLR | SD_VDD3_OC_CLR); + rtsx_pci_write_register(pcr, RTS5264_OVP_CTL, + RTS5264_OVP_INT_CLR | RTS5264_OVP_CLR, + RTS5264_OVP_INT_CLR | RTS5264_OVP_CLR); + + udelay(1000); + + rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); + rtsx_pci_write_register(pcr, RTS5264_OCP_VDD3_CTL, + SD_VDD3_OCP_INT_CLR | SD_VDD3_OC_CLR, 0); + rtsx_pci_write_register(pcr, RTS5264_OVP_CTL, + RTS5264_OVP_INT_CLR | RTS5264_OVP_CLR, 0); +} + +static void rts5264_process_ocp(struct rtsx_pcr *pcr) +{ + if (!pcr->option.ocp_en) + return; + + rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat); + rts5264_get_ocpstat2(pcr, &pcr->ocp_stat2); + rts5264_get_ovpstat(pcr, &pcr->ovp_stat); + + if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER | SDVIO_OC_NOW | SDVIO_OC_EVER)) || + (pcr->ocp_stat2 & (SD_VDD3_OC_NOW | SD_VDD3_OC_EVER)) || + (pcr->ovp_stat & (RTS5264_OVP_NOW | RTS5264_OVP_EVER))) { + rts5264_clear_ocpstat(pcr); + rts5264_card_power_off(pcr, RTSX_SD_CARD); + rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0); + pcr->ocp_stat = 0; + pcr->ocp_stat2 = 0; + pcr->ovp_stat = 0; + } +} + +static void rts5264_init_from_hw(struct rtsx_pcr *pcr) +{ + struct pci_dev *pdev = pcr->pci; + u32 lval1, lval2, i; + u16 setting_reg1, setting_reg2, phy_val; + u8 valid, efuse_valid, tmp, efuse_len; + + rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL, + REG_EFUSE_POR | REG_EFUSE_POWER_MASK, + REG_EFUSE_POR | REG_EFUSE_POWERON); + udelay(1); + rtsx_pci_write_register(pcr, RTS5264_EFUSE_ADDR, + RTS5264_EFUSE_ADDR_MASK, 0x00); + rtsx_pci_write_register(pcr, RTS5264_EFUSE_CTL, + RTS5264_EFUSE_ENABLE | RTS5264_EFUSE_MODE_MASK, + RTS5264_EFUSE_ENABLE); + + /* Wait transfer end */ + for (i = 0; i < MAX_RW_REG_CNT; i++) { + rtsx_pci_read_register(pcr, RTS5264_EFUSE_CTL, &tmp); + if ((tmp & 0x80) == 0) + break; + } + rtsx_pci_read_register(pcr, RTS5264_EFUSE_READ_DATA, &tmp); + efuse_len = ((tmp & 0x70) >> 4); + pcr_dbg(pcr, "Load efuse len: 0x%x\n", efuse_len); + efuse_valid = ((tmp & 0x0C) >> 2); + pcr_dbg(pcr, "Load efuse valid: 0x%x\n", efuse_valid); + + pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval2); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, lval2); + /* 0x816 */ + valid = (u8)((lval2 >> 16) & 0x03); + + rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL, + REG_EFUSE_POR, 0); + pcr_dbg(pcr, "Disable efuse por!\n"); + + if (is_version(pcr, PID_5264, RTS5264_IC_VER_B)) { + pci_write_config_dword(pdev, 0x718, 0x0007C000); + rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE, 0xFF, 0x88); + rtsx_pci_read_phy_register(pcr, _PHY_REV0, &phy_val); + phy_val &= 0xFFFD; + + if (efuse_len == 0) { + rtsx_pci_write_register(pcr, RTS5264_FW_CFG_INFO2, 0x0F, 0x0F); + rtsx_pci_write_register(pcr, 0xFF14, 0xFF, 0x79); + rtsx_pci_write_register(pcr, 0xFF15, 0xFF, 0xFF); + rtsx_pci_write_register(pcr, 0xFF16, 0xFF, 0x3D); + rtsx_pci_write_register(pcr, 0xFF17, 0xFF, 0xFE); + + rtsx_pci_write_register(pcr, 0xFF18, 0xFF, 0x5B); + rtsx_pci_write_register(pcr, 0xFF19, 0xFF, 0xFF); + rtsx_pci_write_register(pcr, 0xFF1A, 0xFF, 0x3E); + rtsx_pci_write_register(pcr, 0xFF1B, 0xFF, 0xFE); + + rtsx_pci_write_register(pcr, 0xFF1C, 0xFF, 0x00); + rtsx_pci_write_register(pcr, 0xFF1D, 0xFF, 0xFF); + rtsx_pci_write_register(pcr, 0xFF1E, 0xFF, 0x3F); + rtsx_pci_write_register(pcr, 0xFF1F, 0xFF, 0xFE); + + rtsx_pci_write_register(pcr, 0xFF20, 0xFF, 0x81); + rtsx_pci_write_register(pcr, 0xFF21, 0xFF, 0xFF); + rtsx_pci_write_register(pcr, 0xFF22, 0xFF, 0x3C); + rtsx_pci_write_register(pcr, 0xFF23, 0xFF, 0xFE); + } + + rtsx_pci_write_register(pcr, 0xFF24, 0xFF, 0x79); + rtsx_pci_write_register(pcr, 0xFF25, 0xFF, 0x5B); + rtsx_pci_write_register(pcr, 0xFF26, 0xFF, 0x00); + rtsx_pci_write_register(pcr, 0xFF27, 0xFF, 0x40); + + rtsx_pci_write_register(pcr, 0xFF28, 0xFF, (u8)phy_val); + rtsx_pci_write_register(pcr, 0xFF29, 0xFF, (u8)(phy_val >> 8)); + rtsx_pci_write_register(pcr, 0xFF2A, 0xFF, 0x19); + rtsx_pci_write_register(pcr, 0xFF2B, 0xFF, 0x40); + + rtsx_pci_write_register(pcr, 0xFF2C, 0xFF, 0x20); + rtsx_pci_write_register(pcr, 0xFF2D, 0xFF, 0xDA); + rtsx_pci_write_register(pcr, 0xFF2E, 0xFF, 0x0A); + rtsx_pci_write_register(pcr, 0xFF2F, 0xFF, 0x40); + + rtsx_pci_write_register(pcr, 0xFF30, 0xFF, 0x20); + rtsx_pci_write_register(pcr, 0xFF31, 0xFF, 0xD2); + rtsx_pci_write_register(pcr, 0xFF32, 0xFF, 0x0A); + rtsx_pci_write_register(pcr, 0xFF33, 0xFF, 0x40); + } else { + rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE, 0x80, 0x80); + } + + if (efuse_valid == 2 || efuse_valid == 3) { + if (valid == 3) { + /* Bypass efuse */ + setting_reg1 = PCR_SETTING_REG1; + setting_reg2 = PCR_SETTING_REG2; + } else { + /* Use efuse data */ + setting_reg1 = PCR_SETTING_REG4; + setting_reg2 = PCR_SETTING_REG5; + } + } else if (efuse_valid == 0) { + // default + setting_reg1 = PCR_SETTING_REG1; + setting_reg2 = PCR_SETTING_REG2; + } else { + return; + } + + pci_read_config_dword(pdev, setting_reg2, &lval2); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", setting_reg2, lval2); + + if (!rts5264_vendor_setting_valid(lval2)) { + pcr_dbg(pcr, "skip fetch vendor setting\n"); + return; + } + + pcr->rtd3_en = rts5264_reg_to_rtd3(lval2); + + if (rts5264_reg_check_reverse_socket(lval2)) { + if (is_version_higher_than(pcr, PID_5264, RTS5264_IC_VER_B)) + pcr->option.sd_cd_reverse_en = 1; + else + pcr->flags |= PCR_REVERSE_SOCKET; + } + + if (rts5264_reg_check_wp_reverse(lval2) && + is_version_higher_than(pcr, PID_5264, RTS5264_IC_VER_B)) + pcr->option.sd_wp_reverse_en = 1; + + pci_read_config_dword(pdev, setting_reg1, &lval1); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", setting_reg1, lval1); + + pcr->aspm_en = rts5264_reg_to_aspm(lval1); + pcr->sd30_drive_sel_1v8 = rts5264_reg_to_sd30_drive_sel_1v8(lval1); + pcr->sd30_drive_sel_3v3 = rts5264_reg_to_sd30_drive_sel_3v3(lval1); + + if (setting_reg1 == PCR_SETTING_REG1) { + /* store setting */ + rtsx_pci_write_register(pcr, 0xFF0C, 0xFF, (u8)(lval1 & 0xFF)); + rtsx_pci_write_register(pcr, 0xFF0D, 0xFF, (u8)((lval1 >> 8) & 0xFF)); + rtsx_pci_write_register(pcr, 0xFF0E, 0xFF, (u8)((lval1 >> 16) & 0xFF)); + rtsx_pci_write_register(pcr, 0xFF0F, 0xFF, (u8)((lval1 >> 24) & 0xFF)); + rtsx_pci_write_register(pcr, 0xFF10, 0xFF, (u8)(lval2 & 0xFF)); + rtsx_pci_write_register(pcr, 0xFF11, 0xFF, (u8)((lval2 >> 8) & 0xFF)); + rtsx_pci_write_register(pcr, 0xFF12, 0xFF, (u8)((lval2 >> 16) & 0xFF)); + + pci_write_config_dword(pdev, PCR_SETTING_REG4, lval1); + lval2 = lval2 & 0x00FFFFFF; + pci_write_config_dword(pdev, PCR_SETTING_REG5, lval2); + } +} + +static void rts5264_init_from_cfg(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &pcr->option; + + if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN + | PM_L1_1_EN | PM_L1_2_EN)) + rtsx_pci_disable_oobs_polling(pcr); + else + rtsx_pci_enable_oobs_polling(pcr); + + rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0); + + if (option->ltr_en) { + if (option->ltr_enabled) + rtsx_set_ltr_latency(pcr, option->ltr_active_latency); + } +} + +static int rts5264_extra_init_hw(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &pcr->option; + + rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG1, + CD_RESUME_EN_MASK, CD_RESUME_EN_MASK); + rtsx_pci_write_register(pcr, REG_VREF, PWD_SUSPND_EN, PWD_SUSPND_EN); + + rts5264_init_from_cfg(pcr); + rts5264_init_from_hw(pcr); + + /* power off efuse */ + rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL, + REG_EFUSE_POWER_MASK, REG_EFUSE_POWEROFF); + rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG2, + RTS5264_CHIP_RST_N_SEL, 0); + rtsx_pci_write_register(pcr, RTS5264_REG_LDO12_CFG, + RTS5264_LDO12_SR_MASK, RTS5264_LDO12_SR_0_0_MS); + rtsx_pci_write_register(pcr, CDGW, 0xFF, 0x01); + rtsx_pci_write_register(pcr, RTS5264_CKMUX_MBIAS_PWR, + RTS5264_POW_CKMUX, RTS5264_POW_CKMUX); + rtsx_pci_write_register(pcr, RTS5264_CMD_OE_START_EARLY, + RTS5264_CMD_OE_EARLY_EN | RTS5264_CMD_OE_EARLY_CYCLE_MASK, + RTS5264_CMD_OE_EARLY_EN); + rtsx_pci_write_register(pcr, RTS5264_DAT_OE_START_EARLY, + RTS5264_DAT_OE_EARLY_EN | RTS5264_DAT_OE_EARLY_CYCLE_MASK, + RTS5264_DAT_OE_EARLY_EN); + rtsx_pci_write_register(pcr, SSC_DIV_N_0, 0xFF, 0x5D); + + rtsx_pci_write_register(pcr, RTS5264_PWR_CUT, + RTS5264_CFG_MEM_PD, RTS5264_CFG_MEM_PD); + rtsx_pci_write_register(pcr, L1SUB_CONFIG1, + AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE); + rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, 0); + rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG4, + RTS5264_AUX_CLK_16M_EN, 0); + + /* Release PRSNT# */ + rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG4, + RTS5264_FORCE_PRSNT_LOW, 0); + rtsx_pci_write_register(pcr, PCLK_CTL, + PCLK_MODE_SEL, PCLK_MODE_SEL); + + /* LED shine disabled, set initial shine cycle period */ + rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x0F, 0x02); + + /* Configure driving */ + rts5264_fill_driving(pcr, OUTPUT_3V3); + + if (pcr->flags & PCR_REVERSE_SOCKET) + rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x30); + else { + rtsx_pci_write_register(pcr, PETXCFG, 0x20, option->sd_cd_reverse_en << 5); + rtsx_pci_write_register(pcr, PETXCFG, 0x10, option->sd_wp_reverse_en << 4); + } + + /* + * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced + * to drive low, and we forcibly request clock. + */ + if (option->force_clkreq_0) + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); + else + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); + + rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFF); + rtsx_pci_write_register(pcr, RBCTL, U_AUTO_DMA_EN_MASK, 0); + rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG4, + RTS5264_F_HIGH_RC_MASK, RTS5264_F_HIGH_RC_400K); + + if (pcr->rtd3_en) { + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x00); + rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL, + FORCE_PM_CONTROL | FORCE_PM_VALUE, 0); + } else { + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x00); + rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL, + FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL); + } + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, D3_DELINK_MODE_EN, 0x00); + + /* Clear Enter RTD3_cold Information*/ + rtsx_pci_write_register(pcr, RTS5264_FW_CTL, + RTS5264_INFORM_RTD3_COLD, 0); + + return 0; +} + +static int rts5264_optimize_phy(struct rtsx_pcr *pcr) +{ + u16 subvendor, subdevice, val; + + subvendor = pcr->pci->subsystem_vendor; + subdevice = pcr->pci->subsystem_device; + + if ((subvendor == 0x1028) && (subdevice == 0x0CE1)) { + rtsx_pci_read_phy_register(pcr, _PHY_REV0, &val); + if ((val & 0xFE00) > 0x3800) + rtsx_pci_update_phy(pcr, _PHY_REV0, 0x1FF, 0x3800); + } + + if (is_version(pcr, PID_5264, RTS5264_IC_VER_B)) + rtsx_pci_write_phy_register(pcr, 0x00, 0x5B79); + + return 0; +} + +static void rts5264_enable_aspm(struct rtsx_pcr *pcr, bool enable) +{ + u8 val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1; + u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1; + + if (pcr->aspm_enabled == enable) + return; + + val |= (pcr->aspm_en & 0x02); + rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val); + pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC, pcr->aspm_en); + pcr->aspm_enabled = enable; +} + +static void rts5264_disable_aspm(struct rtsx_pcr *pcr, bool enable) +{ + u8 val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1; + u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1; + + if (pcr->aspm_enabled == enable) + return; + + pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC, 0); + rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val); + rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0); + udelay(10); + pcr->aspm_enabled = enable; +} + +static void rts5264_set_aspm(struct rtsx_pcr *pcr, bool enable) +{ + if (enable) + rts5264_enable_aspm(pcr, true); + else + rts5264_disable_aspm(pcr, false); +} + +static void rts5264_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active) +{ + struct rtsx_cr_option *option = &(pcr->option); + + u32 interrupt = rtsx_pci_readl(pcr, RTSX_BIPR); + int card_exist = (interrupt & SD_EXIST); + int aspm_L1_1, aspm_L1_2; + u8 val = 0; + + aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN); + aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN); + + if (active) { + /* Run, latency: 60us */ + if (aspm_L1_1) + val = option->ltr_l1off_snooze_sspwrgate; + } else { + /* L1off, latency: 300us */ + if (aspm_L1_2) + val = option->ltr_l1off_sspwrgate; + } + + if (aspm_L1_1 || aspm_L1_2) { + if (rtsx_check_dev_flag(pcr, + LTR_L1SS_PWR_GATE_CHECK_CARD_EN)) { + if (card_exist) + val &= ~L1OFF_MBIAS2_EN_5250; + else + val |= L1OFF_MBIAS2_EN_5250; + } + } + rtsx_set_l1off_sub(pcr, val); +} + +static const struct pcr_ops rts5264_pcr_ops = { + .turn_on_led = rts5264_turn_on_led, + .turn_off_led = rts5264_turn_off_led, + .extra_init_hw = rts5264_extra_init_hw, + .optimize_phy = rts5264_optimize_phy, + .enable_auto_blink = rts5264_enable_auto_blink, + .disable_auto_blink = rts5264_disable_auto_blink, + .card_power_on = rts5264_card_power_on, + .card_power_off = rts5264_card_power_off, + .switch_output_voltage = rts5264_switch_output_voltage, + .force_power_down = rts5264_force_power_down, + .stop_cmd = rts5264_stop_cmd, + .set_aspm = rts5264_set_aspm, + .set_l1off_cfg_sub_d0 = rts5264_set_l1off_cfg_sub_d0, + .enable_ocp = rts5264_enable_ocp, + .disable_ocp = rts5264_disable_ocp, + .init_ocp = rts5264_init_ocp, + .process_ocp = rts5264_process_ocp, + .clear_ocpstat = rts5264_clear_ocpstat, +}; + +static inline u8 double_ssc_depth(u8 depth) +{ + return ((depth > 1) ? (depth - 1) : depth); +} + +int rts5264_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock, + u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk) +{ + int err, clk; + u16 n; + u8 clk_divider, mcu_cnt, div; + static const u8 depth[] = { + [RTSX_SSC_DEPTH_4M] = RTS5264_SSC_DEPTH_4M, + [RTSX_SSC_DEPTH_2M] = RTS5264_SSC_DEPTH_2M, + [RTSX_SSC_DEPTH_1M] = RTS5264_SSC_DEPTH_1M, + [RTSX_SSC_DEPTH_500K] = RTS5264_SSC_DEPTH_512K, + }; + + if (initial_mode) { + /* We use 250k(around) here, in initial stage */ + clk_divider = SD_CLK_DIVIDE_128; + card_clock = 30000000; + } else { + clk_divider = SD_CLK_DIVIDE_0; + } + err = rtsx_pci_write_register(pcr, SD_CFG1, + SD_CLK_DIVIDE_MASK, clk_divider); + if (err < 0) + return err; + + card_clock /= 1000000; + pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock); + + clk = card_clock; + if (!initial_mode && double_clk) + clk = card_clock * 2; + pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n", + clk, pcr->cur_clock); + + if (clk == pcr->cur_clock) + return 0; + + if (pcr->ops->conv_clk_and_div_n) + n = pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N); + else + n = clk - 4; + if ((clk <= 4) || (n > 396)) + return -EINVAL; + + mcu_cnt = 125/clk + 3; + if (mcu_cnt > 15) + mcu_cnt = 15; + + div = CLK_DIV_1; + while ((n < MIN_DIV_N_PCR - 4) && (div < CLK_DIV_8)) { + if (pcr->ops->conv_clk_and_div_n) { + int dbl_clk = pcr->ops->conv_clk_and_div_n(n, + DIV_N_TO_CLK) * 2; + n = pcr->ops->conv_clk_and_div_n(dbl_clk, + CLK_TO_DIV_N); + } else { + n = (n + 4) * 2 - 4; + } + div++; + } + + n = (n / 2) - 1; + pcr_dbg(pcr, "n = %d, div = %d\n", n, div); + + ssc_depth = depth[ssc_depth]; + if (double_clk) + ssc_depth = double_ssc_depth(ssc_depth); + + if (ssc_depth) { + if (div == CLK_DIV_2) { + if (ssc_depth > 1) + ssc_depth -= 1; + else + ssc_depth = RTS5264_SSC_DEPTH_8M; + } else if (div == CLK_DIV_4) { + if (ssc_depth > 2) + ssc_depth -= 2; + else + ssc_depth = RTS5264_SSC_DEPTH_8M; + } else if (div == CLK_DIV_8) { + if (ssc_depth > 3) + ssc_depth -= 3; + else + ssc_depth = RTS5264_SSC_DEPTH_8M; + } + } else { + ssc_depth = 0; + } + pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth); + + rtsx_pci_init_cmd(pcr); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, + CHANGE_CLK, CHANGE_CLK); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, + 0xFF, (div << 4) | mcu_cnt); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, + SSC_DEPTH_MASK, ssc_depth); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n); + + if (is_version(pcr, PID_5264, RTS5264_IC_VER_A)) { + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RTS5264_CARD_CLK_SRC2, + RTS5264_REG_BIG_KVCO_A, 0); + } else { + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RTS5264_SYS_DUMMY_1, + RTS5264_REG_BIG_KVCO, 0); + } + + if (vpclk) { + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, + PHASE_NOT_RESET, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL, + PHASE_NOT_RESET, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, + PHASE_NOT_RESET, PHASE_NOT_RESET); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL, + PHASE_NOT_RESET, PHASE_NOT_RESET); + } + + err = rtsx_pci_send_cmd(pcr, 2000); + if (err < 0) + return err; + + /* Wait SSC clock stable */ + udelay(SSC_CLOCK_STABLE_WAIT); + err = rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, 0); + if (err < 0) + return err; + + pcr->cur_clock = clk; + return 0; +} + +void rts5264_init_params(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &pcr->option; + struct rtsx_hw_param *hw_param = &pcr->hw_param; + u8 val; + + pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104; + pcr->extra_caps |= EXTRA_CAPS_NO_MMC; + rtsx_pci_read_register(pcr, RTS5264_FW_STATUS, &val); + if (!(val & RTS5264_EXPRESS_LINK_FAIL_MASK)) + pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS; + pcr->num_slots = 1; + pcr->ops = &rts5264_pcr_ops; + + pcr->flags = 0; + pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT; + pcr->sd30_drive_sel_1v8 = 0x00; + pcr->sd30_drive_sel_3v3 = 0x00; + pcr->aspm_en = ASPM_L1_EN; + pcr->aspm_mode = ASPM_MODE_REG; + pcr->tx_initial_phase = SET_CLOCK_PHASE(24, 24, 11); + pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5); + + pcr->ic_version = rts5264_get_ic_version(pcr); + pcr->sd_pull_ctl_enable_tbl = rts5264_sd_pull_ctl_enable_tbl; + pcr->sd_pull_ctl_disable_tbl = rts5264_sd_pull_ctl_disable_tbl; + + pcr->reg_pm_ctrl3 = RTS5264_AUTOLOAD_CFG3; + + option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN + | LTR_L1SS_PWR_GATE_EN); + option->ltr_en = true; + + /* init latency of active, idle, L1OFF to 60us, 300us, 3ms */ + option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF; + option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF; + option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF; + option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF; + option->ltr_l1off_sspwrgate = 0x7F; + option->ltr_l1off_snooze_sspwrgate = 0x78; + + option->ocp_en = 1; + hw_param->interrupt_en |= (SD_OC_INT_EN | SD_OVP_INT_EN); + hw_param->ocp_glitch = SD_OCP_GLITCH_800U | SDVIO_OCP_GLITCH_800U; + option->sd_800mA_ocp_thd = RTS5264_LDO1_OCP_THD_1150; + option->sd_cd_reverse_en = 0; + option->sd_wp_reverse_en = 0; +} diff --git a/drivers/misc/cardreader/rts5264.h b/drivers/misc/cardreader/rts5264.h new file mode 100644 index 000000000000..611ee253367c --- /dev/null +++ b/drivers/misc/cardreader/rts5264.h @@ -0,0 +1,285 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Driver for Realtek PCI-Express card reader + * + * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved. + * + * Author: + * Ricky Wu <ricky_wu@realtek.com> + */ +#ifndef RTS5264_H +#define RTS5264_H + +/*New add*/ +#define rts5264_vendor_setting_valid(reg) ((reg) & 0x010000) +#define rts5264_reg_to_aspm(reg) \ + (((~(reg) >> 28) & 0x02) | (((reg) >> 28) & 0x01)) +#define rts5264_reg_check_reverse_socket(reg) ((reg) & 0x04) +#define rts5264_reg_check_wp_reverse(reg) ((reg) & 0x8000) +#define rts5264_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 22) & 0x03) +#define rts5264_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 16) & 0x03) +#define rts5264_reg_to_rtd3(reg) ((reg) & 0x08) + +#define RTS5264_AUTOLOAD_CFG0 0xFF7B +#define RTS5264_AUTOLOAD_CFG1 0xFF7C +#define RTS5264_AUTOLOAD_CFG3 0xFF7E +#define RTS5264_AUTOLOAD_CFG4 0xFF7F +#define RTS5264_FORCE_PRSNT_LOW (1 << 6) +#define RTS5264_AUX_CLK_16M_EN (1 << 5) +#define RTS5264_F_HIGH_RC_MASK (1 << 4) +#define RTS5264_F_HIGH_RC_1_6M (1 << 4) +#define RTS5264_F_HIGH_RC_400K (0 << 4) + +/* SSC_CTL2 0xFC12 */ +#define RTS5264_SSC_DEPTH_MASK 0x07 +#define RTS5264_SSC_DEPTH_DISALBE 0x00 +#define RTS5264_SSC_DEPTH_8M 0x01 +#define RTS5264_SSC_DEPTH_4M 0x02 +#define RTS5264_SSC_DEPTH_2M 0x03 +#define RTS5264_SSC_DEPTH_1M 0x04 +#define RTS5264_SSC_DEPTH_512K 0x05 +#define RTS5264_SSC_DEPTH_256K 0x06 +#define RTS5264_SSC_DEPTH_128K 0x07 + +#define RTS5264_CARD_CLK_SRC2 0xFC2F +#define RTS5264_REG_BIG_KVCO_A 0x20 + +/* efuse control register*/ +#define RTS5264_EFUSE_CTL 0xFC30 +#define RTS5264_EFUSE_ENABLE 0x80 +/* EFUSE_MODE: 0=READ 1=PROGRAM */ +#define RTS5264_EFUSE_MODE_MASK 0x40 +#define RTS5264_EFUSE_PROGRAM 0x40 + +#define RTS5264_EFUSE_ADDR 0xFC31 +#define RTS5264_EFUSE_ADDR_MASK 0x3F + +#define RTS5264_EFUSE_WRITE_DATA 0xFC32 +#define RTS5264_EFUSE_READ_DATA 0xFC34 + +#define RTS5264_SYS_DUMMY_1 0xFC35 +#define RTS5264_REG_BIG_KVCO 0x04 + +/* DMACTL 0xFE2C */ +#define RTS5264_DMA_PACK_SIZE_MASK 0x70 + +#define RTS5264_FW_CFG_INFO2 0xFF52 + +#define RTS5264_FW_CFG1 0xFF55 +#define RTS5264_SYS_CLK_SEL_MCU_CLK (0x01<<7) +#define RTS5264_CRC_CLK_SEL_MCU_CLK (0x01<<6) +#define RTS5264_FAKE_MCU_CLOCK_GATING (0x01<<5) +#define RTS5264_MCU_BUS_SEL_MASK (0x01<<4) + +/* FW status register */ +#define RTS5264_FW_STATUS 0xFF56 +#define RTS5264_EXPRESS_LINK_FAIL_MASK (0x01<<7) + +/* FW control register */ +#define RTS5264_FW_CTL 0xFF5F +#define RTS5264_INFORM_RTD3_COLD (0x01<<5) + +#define RTS5264_REG_FPDCTL 0xFF60 + +#define RTS5264_REG_LDO12_CFG 0xFF6E +#define RTS5264_LDO12_SR_MASK (0x03<<6) +#define RTS5264_LDO12_SR_1_0_MS (0x03<<6) +#define RTS5264_LDO12_SR_0_5_MS (0x02<<6) +#define RTS5264_LDO12_SR_0_2_5_MS (0x01<<6) +#define RTS5264_LDO12_SR_0_0_MS (0x00<<6) +#define RTS5264_LDO12_VO_TUNE_MASK (0x07<<1) +#define RTS5264_LDO12_115 (0x03<<1) +#define RTS5264_LDO12_120 (0x04<<1) +#define RTS5264_LDO12_125 (0x05<<1) +#define RTS5264_LDO12_130 (0x06<<1) +#define RTS5264_LDO12_135 (0x07<<1) + +/* LDO control register */ +#define RTS5264_CARD_PWR_CTL 0xFD50 +#define RTS5264_SD_CLK_ISO (0x01<<7) +#define RTS5264_PAD_SD_DAT_FW_CTRL (0x01<<6) +#define RTS5264_PUPDC (0x01<<5) +#define RTS5264_SD_CMD_ISO (0x01<<4) + +#define RTS5264_OCP_VDD3_CTL 0xFD89 +#define SD_VDD3_DETECT_EN 0x08 +#define SD_VDD3_OCP_INT_EN 0x04 +#define SD_VDD3_OCP_INT_CLR 0x02 +#define SD_VDD3_OC_CLR 0x01 + +#define RTS5264_OCP_VDD3_STS 0xFD8A +#define SD_VDD3_OCP_DETECT 0x08 +#define SD_VDD3_OC_NOW 0x04 +#define SD_VDD3_OC_EVER 0x02 + +#define RTS5264_OVP_CTL 0xFD8D +#define RTS5264_OVP_TIME_MASK 0xF0 +#define RTS5264_OVP_TIME_DFT 0x50 +#define RTS5264_OVP_DETECT_EN 0x08 +#define RTS5264_OVP_INT_EN 0x04 +#define RTS5264_OVP_INT_CLR 0x02 +#define RTS5264_OVP_CLR 0x01 + +#define RTS5264_OVP_STS 0xFD8E +#define RTS5264_OVP_GLTCH_TIME_MASK 0xF0 +#define RTS5264_OVP_GLTCH_TIME_DFT 0x50 +#define RTS5264_VOVER_DET 0x08 +#define RTS5264_OVP_NOW 0x04 +#define RTS5264_OVP_EVER 0x02 + +#define RTS5264_CMD_OE_START_EARLY 0xFDCB +#define RTS5264_CMD_OE_EARLY_LEAVE 0x08 +#define RTS5264_CMD_OE_EARLY_CYCLE_MASK 0x06 +#define RTS5264_CMD_OE_EARLY_4CYCLE 0x06 +#define RTS5264_CMD_OE_EARLY_3CYCLE 0x04 +#define RTS5264_CMD_OE_EARLY_2CYCLE 0x02 +#define RTS5264_CMD_OE_EARLY_1CYCLE 0x00 +#define RTS5264_CMD_OE_EARLY_EN 0x01 + +#define RTS5264_DAT_OE_START_EARLY 0xFDCC +#define RTS5264_DAT_OE_EARLY_LEAVE 0x08 +#define RTS5264_DAT_OE_EARLY_CYCLE_MASK 0x06 +#define RTS5264_DAT_OE_EARLY_4CYCLE 0x06 +#define RTS5264_DAT_OE_EARLY_3CYCLE 0x04 +#define RTS5264_DAT_OE_EARLY_2CYCLE 0x02 +#define RTS5264_DAT_OE_EARLY_1CYCLE 0x00 +#define RTS5264_DAT_OE_EARLY_EN 0x01 + +#define RTS5264_LDO1233318_POW_CTL 0xFF70 +#define RTS5264_TUNE_REF_LDO3318 (0x03<<6) +#define RTS5264_TUNE_REF_LDO3318_DFT (0x02<<6) +#define RTS5264_LDO3318_POWERON (0x01<<3) +#define RTS5264_LDO3_POWERON (0x01<<2) +#define RTS5264_LDO2_POWERON (0x01<<1) +#define RTS5264_LDO1_POWERON (0x01<<0) +#define RTS5264_LDO_POWERON_MASK (0x0F<<0) + +#define RTS5264_DV3318_CFG 0xFF71 +#define RTS5264_DV3318_TUNE_MASK (0x07<<4) +#define RTS5264_DV3318_18 (0x02<<4) +#define RTS5264_DV3318_19 (0x04<<4) +#define RTS5264_DV3318_33 (0x07<<4) + +#define RTS5264_LDO1_CFG0 0xFF72 +#define RTS5264_LDO1_OCP_THD_MASK (0x07 << 5) +#define RTS5264_LDO1_OCP_EN (0x01 << 4) +#define RTS5264_LDO1_OCP_LMT_THD_MASK (0x03 << 2) +#define RTS5264_LDO1_OCP_LMT_EN (0x01 << 1) + +#define RTS5264_LDO1_OCP_THD_850 (0x00<<5) +#define RTS5264_LDO1_OCP_THD_950 (0x01<<5) +#define RTS5264_LDO1_OCP_THD_1050 (0x02<<5) +#define RTS5264_LDO1_OCP_THD_1100 (0x03<<5) +#define RTS5264_LDO1_OCP_THD_1150 (0x04<<5) +#define RTS5264_LDO1_OCP_THD_1200 (0x05<<5) +#define RTS5264_LDO1_OCP_THD_1300 (0x06<<5) +#define RTS5264_LDO1_OCP_THD_1350 (0x07<<5) + +#define RTS5264_LDO1_LMT_THD_1700 (0x00<<2) +#define RTS5264_LDO1_LMT_THD_1800 (0x01<<2) +#define RTS5264_LDO1_LMT_THD_1900 (0x02<<2) +#define RTS5264_LDO1_LMT_THD_2000 (0x03<<2) + +#define RTS5264_LDO1_CFG1 0xFF73 +#define RTS5264_LDO1_TUNE_MASK (0x07<<1) +#define RTS5264_LDO1_18 (0x05<<1) +#define RTS5264_LDO1_33 (0x07<<1) +#define RTS5264_LDO1_PWD_MASK (0x01<<0) + +#define RTS5264_LDO2_CFG0 0xFF74 +#define RTS5264_LDO2_OCP_THD_MASK (0x07<<5) +#define RTS5264_LDO2_OCP_EN (0x01<<4) +#define RTS5264_LDO2_OCP_LMT_THD_MASK (0x03<<2) +#define RTS5264_LDO2_OCP_LMT_EN (0x01<<1) + +#define RTS5264_LDO2_OCP_THD_750 (0x00<<5) +#define RTS5264_LDO2_OCP_THD_850 (0x01<<5) +#define RTS5264_LDO2_OCP_THD_900 (0x02<<5) +#define RTS5264_LDO2_OCP_THD_950 (0x03<<5) +#define RTS5264_LDO2_OCP_THD_1050 (0x04<<5) +#define RTS5264_LDO2_OCP_THD_1100 (0x05<<5) +#define RTS5264_LDO2_OCP_THD_1150 (0x06<<5) +#define RTS5264_LDO2_OCP_THD_1200 (0x07<<5) + +#define RTS5264_LDO2_LMT_THD_1700 (0x00<<2) +#define RTS5264_LDO2_LMT_THD_1800 (0x01<<2) +#define RTS5264_LDO2_LMT_THD_1900 (0x02<<2) +#define RTS5264_LDO2_LMT_THD_2000 (0x03<<2) + +#define RTS5264_LDO2_CFG1 0xFF75 +#define RTS5264_LDO2_TUNE_MASK (0x07<<1) +#define RTS5264_LDO2_18 (0x02<<1) +#define RTS5264_LDO2_185 (0x03<<1) +#define RTS5264_LDO2_19 (0x04<<1) +#define RTS5264_LDO2_195 (0x05<<1) +#define RTS5264_LDO2_33 (0x07<<1) +#define RTS5264_LDO2_PWD_MASK (0x01<<0) + +#define RTS5264_LDO3_CFG0 0xFF76 +#define RTS5264_LDO3_OCP_THD_MASK (0x07<<5) +#define RTS5264_LDO3_OCP_EN (0x01<<4) +#define RTS5264_LDO3_OCP_LMT_THD_MASK (0x03<<2) +#define RTS5264_LDO3_OCP_LMT_EN (0x01<<1) + +#define RTS5264_LDO3_OCP_THD_610 (0x00<<5) +#define RTS5264_LDO3_OCP_THD_630 (0x01<<5) +#define RTS5264_LDO3_OCP_THD_670 (0x02<<5) +#define RTS5264_LDO3_OCP_THD_710 (0x03<<5) +#define RTS5264_LDO3_OCP_THD_750 (0x04<<5) +#define RTS5264_LDO3_OCP_THD_770 (0x05<<5) +#define RTS5264_LDO3_OCP_THD_810 (0x06<<5) +#define RTS5264_LDO3_OCP_THD_850 (0x07<<5) + +#define RTS5264_LDO3_LMT_THD_1200 (0x00<<2) +#define RTS5264_LDO3_LMT_THD_1300 (0x01<<2) +#define RTS5264_LDO3_LMT_THD_1400 (0x02<<2) +#define RTS5264_LDO3_LMT_THD_1500 (0x03<<2) + +#define RTS5264_LDO3_CFG1 0xFF77 +#define RTS5264_LDO3_TUNE_MASK (0x07<<1) +#define RTS5264_LDO3_12 (0x02<<1) +#define RTS5264_LDO3_125 (0x03<<1) +#define RTS5264_LDO3_13 (0x04<<1) +#define RTS5264_LDO3_135 (0x05<<1) +#define RTS5264_LDO3_33 (0x07<<1) +#define RTS5264_LDO3_PWD_MASK (0x01<<0) + +#define RTS5264_REG_PME_FORCE_CTL 0xFF78 +#define FORCE_PM_CONTROL 0x20 +#define FORCE_PM_VALUE 0x10 +#define REG_EFUSE_BYPASS 0x08 +#define REG_EFUSE_POR 0x04 +#define REG_EFUSE_POWER_MASK 0x03 +#define REG_EFUSE_POWERON 0x03 +#define REG_EFUSE_POWEROFF 0x00 + +#define RTS5264_PWR_CUT 0xFF81 +#define RTS5264_CFG_MEM_PD 0xF0 + +#define RTS5264_OVP_DET 0xFF8A +#define RTS5264_POW_VDET 0x04 +#define RTS5264_TUNE_VROV_MASK 0x03 +#define RTS5264_TUNE_VROV_2V 0x03 +#define RTS5264_TUNE_VROV_1V8 0x02 +#define RTS5264_TUNE_VROV_1V6 0x01 +#define RTS5264_TUNE_VROV_1V4 0x00 + +#define RTS5264_CKMUX_MBIAS_PWR 0xFF8B +#define RTS5264_NON_XTAL_SEL 0x80 +#define RTS5264_POW_CKMUX 0x40 +#define RTS5264_LVD_MASK 0x04 +#define RTS5264_POW_PSW_MASK 0x03 +#define RTS5264_POW_PSW_DFT 0x03 + +/* Single LUN, support SD/SD EXPRESS */ +#define DEFAULT_SINGLE 0 +#define SD_LUN 1 +#define SD_EXPRESS_LUN 2 + +#define RTS5264_IC_VER_A 0 +#define RTS5264_IC_VER_B 2 +#define RTS5264_IC_VER_C 3 + +int rts5264_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock, + u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk); + +#endif /* RTS5264_H */ diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index a3f4b52bb159..f9952d76d6ed 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -19,13 +19,14 @@ #include <linux/mfd/core.h> #include <linux/rtsx_pci.h> #include <linux/mmc/card.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include "rtsx_pcr.h" #include "rts5261.h" #include "rts5228.h" +#include "rts5264.h" static bool msi_en = true; module_param(msi_en, bool, S_IRUGO | S_IWUSR); @@ -54,6 +55,7 @@ static const struct pci_device_id rtsx_pci_ids[] = { { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 }, { PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 }, { PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 }, + { PCI_DEVICE(0x10EC, 0x5264), PCI_CLASS_OTHERS << 16, 0xFF0000 }, { 0, } }; @@ -418,25 +420,6 @@ static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr, pcr->sgi++; } -int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist, - int num_sg, bool read, int timeout) -{ - int err = 0, count; - - pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg); - count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read); - if (count < 1) - return -EINVAL; - pcr_dbg(pcr, "DMA mapping count: %d\n", count); - - err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout); - - rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read); - - return err; -} -EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data); - int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, int num_sg, bool read) { @@ -714,6 +697,9 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock, if (PCI_PID(pcr) == PID_5228) return rts5228_pci_switch_clock(pcr, card_clock, ssc_depth, initial_mode, double_clk, vpclk); + if (PCI_PID(pcr) == PID_5264) + return rts5264_pci_switch_clock(pcr, card_clock, + ssc_depth, initial_mode, double_clk, vpclk); if (initial_mode) { /* We use 250k(around) here, in initial stage */ @@ -987,7 +973,8 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id) int_reg &= (pcr->bier | 0x7FFFFF); - if (int_reg & SD_OC_INT) + if ((int_reg & SD_OC_INT) || + ((int_reg & SD_OVP_INT) && (PCI_PID(pcr) == PID_5264))) rtsx_pci_process_ocp_interrupt(pcr); if (int_reg & SD_INT) { @@ -996,12 +983,14 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id) } else { pcr->card_removed |= SD_EXIST; pcr->card_inserted &= ~SD_EXIST; - if (PCI_PID(pcr) == PID_5261) { - rtsx_pci_write_register(pcr, RTS5261_FW_STATUS, - RTS5261_EXPRESS_LINK_FAIL_MASK, 0); - pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS; - } } + + if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) { + rtsx_pci_write_register(pcr, RTS5261_FW_STATUS, + RTS5261_EXPRESS_LINK_FAIL_MASK, 0); + pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS; + } + pcr->dma_error_count = 0; } @@ -1159,7 +1148,9 @@ void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr) { u16 val; - if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) { + if ((PCI_PID(pcr) != PID_525A) && + (PCI_PID(pcr) != PID_5260) && + (PCI_PID(pcr) != PID_5264)) { rtsx_pci_read_phy_register(pcr, 0x01, &val); val |= 1<<9; rtsx_pci_write_phy_register(pcr, 0x01, val); @@ -1175,7 +1166,9 @@ void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr) { u16 val; - if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) { + if ((PCI_PID(pcr) != PID_525A) && + (PCI_PID(pcr) != PID_5260) && + (PCI_PID(pcr) != PID_5264)) { rtsx_pci_read_phy_register(pcr, 0x01, &val); val &= ~(1<<9); rtsx_pci_write_phy_register(pcr, 0x01, val); @@ -1185,33 +1178,6 @@ void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr) } -int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr) -{ - rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN | - MS_CLK_EN | SD40_CLK_EN, 0); - rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0); - rtsx_pci_card_power_off(pcr, RTSX_SD_CARD); - - msleep(50); - - rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD); - - return 0; -} - -int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr) -{ - rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN | - MS_CLK_EN | SD40_CLK_EN, 0); - - rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD); - - rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0); - rtsx_pci_card_power_off(pcr, RTSX_MS_CARD); - - return 0; -} - static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) { struct pci_dev *pdev = pcr->pci; @@ -1226,7 +1192,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) rtsx_pci_enable_bus_int(pcr); /* Power on SSC */ - if (PCI_PID(pcr) == PID_5261) { + if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) { /* Gating real mcu clock */ err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1, RTS5261_MCU_CLOCK_GATING, 0); @@ -1270,6 +1236,11 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) else if (PCI_PID(pcr) == PID_5228) rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, RTS5228_SSC_DEPTH_2M); + else if (is_version(pcr, PID_5264, RTS5264_IC_VER_A)) + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0); + else if (PCI_PID(pcr) == PID_5264) + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, + RTS5264_SSC_DEPTH_2M); else rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12); @@ -1305,6 +1276,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) case PID_5260: case PID_5261: case PID_5228: + case PID_5264: rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1); break; default: @@ -1326,11 +1298,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) return err; } - if (pcr->aspm_mode == ASPM_MODE_REG) { + if (pcr->aspm_mode == ASPM_MODE_REG) rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30); - rtsx_pci_write_register(pcr, PETXCFG, - FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); - } /* No CD interrupt if probing driver with card inserted. * So we need to initialize pcr->card_exist here. @@ -1345,7 +1314,9 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) static int rtsx_pci_init_chip(struct rtsx_pcr *pcr) { - int err; + struct rtsx_cr_option *option = &(pcr->option); + int err, l1ss; + u32 lval; u16 cfg_val; u8 val; @@ -1405,6 +1376,10 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr) case 0x5228: rts5228_init_params(pcr); break; + + case 0x5264: + rts5264_init_params(pcr); + break; } pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n", @@ -1430,6 +1405,48 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr) pcr->aspm_enabled = true; } + l1ss = pci_find_ext_capability(pcr->pci, PCI_EXT_CAP_ID_L1SS); + if (l1ss) { + pci_read_config_dword(pcr->pci, l1ss + PCI_L1SS_CTL1, &lval); + + if (lval & PCI_L1SS_CTL1_ASPM_L1_1) + rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); + else + rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN); + + if (lval & PCI_L1SS_CTL1_ASPM_L1_2) + rtsx_set_dev_flag(pcr, ASPM_L1_2_EN); + else + rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN); + + if (lval & PCI_L1SS_CTL1_PCIPM_L1_1) + rtsx_set_dev_flag(pcr, PM_L1_1_EN); + else + rtsx_clear_dev_flag(pcr, PM_L1_1_EN); + + if (lval & PCI_L1SS_CTL1_PCIPM_L1_2) + rtsx_set_dev_flag(pcr, PM_L1_2_EN); + else + rtsx_clear_dev_flag(pcr, PM_L1_2_EN); + + pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &cfg_val); + if (cfg_val & PCI_EXP_DEVCTL2_LTR_EN) { + option->ltr_enabled = true; + option->ltr_active = true; + } else { + option->ltr_enabled = false; + } + + if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN + | PM_L1_1_EN | PM_L1_2_EN)) + option->force_clkreq_0 = false; + else + option->force_clkreq_0 = true; + } else { + option->ltr_enabled = false; + option->force_clkreq_0 = true; + } + if (pcr->ops->fetch_vendor_settings) pcr->ops->fetch_vendor_settings(pcr); @@ -1503,7 +1520,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev, pcr->pci = pcidev; dev_set_drvdata(&pcidev->dev, handle); - if (CHK_PCI_PID(pcr, 0x525A)) + if ((CHK_PCI_PID(pcr, 0x525A)) || (CHK_PCI_PID(pcr, 0x5264))) bar = 1; len = pci_resource_len(pcidev, bar); base = pci_resource_start(pcidev, bar); diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h index 37d1f316ae17..40562ff2be13 100644 --- a/drivers/misc/cardreader/rtsx_pcr.h +++ b/drivers/misc/cardreader/rtsx_pcr.h @@ -74,6 +74,7 @@ void rtl8411b_init_params(struct rtsx_pcr *pcr); void rts5260_init_params(struct rtsx_pcr *pcr); void rts5261_init_params(struct rtsx_pcr *pcr); void rts5228_init_params(struct rtsx_pcr *pcr); +void rts5264_init_params(struct rtsx_pcr *pcr); static inline u8 map_sd_drive(int idx) { @@ -99,6 +100,8 @@ static inline u8 map_sd_drive(int idx) #define rtsx_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 5) & 0x03) #define rtsx_reg_to_card_drive_sel(reg) ((((reg) >> 25) & 0x01) << 6) #define rtsx_reg_check_reverse_socket(reg) ((reg) & 0x4000) +#define rtsx_reg_check_cd_reverse(reg) ((reg) & 0x800000) +#define rtsx_reg_check_wp_reverse(reg) ((reg) & 0x400000) #define rts5209_reg_to_aspm(reg) (((reg) >> 5) & 0x03) #define rts5209_reg_check_ms_pmos(reg) (!((reg) & 0x08)) #define rts5209_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 3) & 0x07) @@ -126,7 +129,5 @@ int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val); void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr); void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr); void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr); -int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr); -int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr); #endif diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c index f150d8769f19..1830e9ed2521 100644 --- a/drivers/misc/cardreader/rtsx_usb.c +++ b/drivers/misc/cardreader/rtsx_usb.c @@ -20,18 +20,18 @@ MODULE_PARM_DESC(polling_pipe, "polling pipe (0: ctl, 1: bulk)"); static const struct mfd_cell rtsx_usb_cells[] = { [RTSX_USB_SD_CARD] = { - .name = "rtsx_usb_sdmmc", + .name = DRV_NAME_RTSX_USB_SDMMC, .pdata_size = 0, }, [RTSX_USB_MS_CARD] = { - .name = "rtsx_usb_ms", + .name = DRV_NAME_RTSX_USB_MS, .pdata_size = 0, }, }; static void rtsx_usb_sg_timed_out(struct timer_list *t) { - struct rtsx_ucr *ucr = from_timer(ucr, t, sg_timer); + struct rtsx_ucr *ucr = timer_container_of(ucr, t, sg_timer); dev_dbg(&ucr->pusb_intf->dev, "%s: sg transfer timed out", __func__); usb_sg_cancel(&ucr->current_sg); @@ -53,7 +53,7 @@ static int rtsx_usb_bulk_transfer_sglist(struct rtsx_ucr *ucr, ucr->sg_timer.expires = jiffies + msecs_to_jiffies(timeout); add_timer(&ucr->sg_timer); usb_sg_wait(&ucr->current_sg); - if (!del_timer_sync(&ucr->sg_timer)) + if (!timer_delete_sync(&ucr->sg_timer)) ret = -ETIMEDOUT; else ret = ucr->current_sg.status; @@ -552,6 +552,10 @@ static int rtsx_usb_reset_chip(struct rtsx_ucr *ucr) ret = rtsx_usb_send_cmd(ucr, MODE_C, 100); if (ret) return ret; + /* config OCP */ + rtsx_usb_write_register(ucr, OCPCTL, MS_OCP_DETECT_EN, MS_OCP_DETECT_EN); + rtsx_usb_write_register(ucr, OCPPARA1, 0xF0, 0x50); + rtsx_usb_write_register(ucr, OCPPARA2, 0x7, 0x3); /* config non-crystal mode */ rtsx_usb_read_register(ucr, CFG_MODE, &val); @@ -698,6 +702,12 @@ static void rtsx_usb_disconnect(struct usb_interface *intf) } #ifdef CONFIG_PM +static int rtsx_usb_resume_child(struct device *dev, void *data) +{ + pm_request_resume(dev); + return 0; +} + static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message) { struct rtsx_ucr *ucr = @@ -713,8 +723,13 @@ static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message) mutex_unlock(&ucr->dev_mutex); /* Defer the autosuspend if card exists */ - if (val & (SD_CD | MS_CD)) + if (val & (SD_CD | MS_CD)) { + device_for_each_child(&intf->dev, NULL, rtsx_usb_resume_child); return -EAGAIN; + } else { + /* if the card does not exists, clear OCP status */ + rtsx_usb_write_register(ucr, OCPCTL, MS_OCP_CLEAR, MS_OCP_CLEAR); + } } else { /* There is an ongoing operation*/ return -EAGAIN; @@ -724,12 +739,6 @@ static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message) return 0; } -static int rtsx_usb_resume_child(struct device *dev, void *data) -{ - pm_request_resume(dev); - return 0; -} - static int rtsx_usb_resume(struct usb_interface *intf) { device_for_each_child(&intf->dev, NULL, rtsx_usb_resume_child); @@ -780,7 +789,7 @@ static const struct usb_device_id rtsx_usb_usb_ids[] = { MODULE_DEVICE_TABLE(usb, rtsx_usb_usb_ids); static struct usb_driver rtsx_usb_driver = { - .name = "rtsx_usb", + .name = DRV_NAME_RTSX_USB, .probe = rtsx_usb_probe, .disconnect = rtsx_usb_disconnect, .suspend = rtsx_usb_suspend, diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c index 55b7ee0e8f93..2dd212f04fed 100644 --- a/drivers/misc/cb710/core.c +++ b/drivers/misc/cb710/core.c @@ -223,13 +223,11 @@ static int cb710_probe(struct pci_dev *pdev, if (err) return err; - err = pcim_iomap_regions(pdev, 0x0001, KBUILD_MODNAME); - if (err) - return err; - spin_lock_init(&chip->irq_lock); chip->pdev = pdev; - chip->iobase = pcim_iomap_table(pdev)[0]; + chip->iobase = pcim_iomap_region(pdev, 0, KBUILD_MODNAME); + if (IS_ERR(chip->iobase)) + return PTR_ERR(chip->iobase); pci_set_drvdata(pdev, chip); diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c index 18fc1aaa5cdd..2b6778d8d166 100644 --- a/drivers/misc/cs5535-mfgpt.c +++ b/drivers/misc/cs5535-mfgpt.c @@ -16,6 +16,7 @@ #include <linux/platform_device.h> #include <linux/cs5535.h> #include <linux/slab.h> +#include <asm/msr.h> #define DRV_NAME "cs5535-mfgpt" diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig deleted file mode 100644 index 5efc4151bf58..000000000000 --- a/drivers/misc/cxl/Kconfig +++ /dev/null @@ -1,26 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# IBM Coherent Accelerator (CXL) compatible devices -# - -config CXL_BASE - bool - select PPC_COPRO_BASE - select PPC_64S_HASH_MMU - -config CXL - tristate "Support for IBM Coherent Accelerators (CXL)" - depends on PPC_POWERNV && PCI_MSI && EEH - select CXL_BASE - default m - help - Select this option to enable driver support for IBM Coherent - Accelerators (CXL). CXL is otherwise known as Coherent Accelerator - Processor Interface (CAPI). CAPI allows accelerators in FPGAs to be - coherently attached to a CPU via an MMU. This driver enables - userspace programs to access these accelerators via /dev/cxl/afuM.N - devices. - - CAPI adapters are found in POWER8 based systems. - - If unsure, say N. diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile deleted file mode 100644 index 5eea61b9584f..000000000000 --- a/drivers/misc/cxl/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -ccflags-y := $(call cc-disable-warning, unused-const-variable) -ccflags-$(CONFIG_PPC_WERROR) += -Werror - -cxl-y += main.o file.o irq.o fault.o native.o -cxl-y += context.o sysfs.o pci.o trace.o -cxl-y += vphb.o api.o cxllib.o -cxl-$(CONFIG_PPC_PSERIES) += flash.o guest.o of.o hcalls.o -cxl-$(CONFIG_DEBUG_FS) += debugfs.o -obj-$(CONFIG_CXL) += cxl.o -obj-$(CONFIG_CXL_BASE) += base.o - -# For tracepoints to include our trace.h from tracepoint infrastructure: -CFLAGS_trace.o := -I$(src) diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c deleted file mode 100644 index d85c56530863..000000000000 --- a/drivers/misc/cxl/api.c +++ /dev/null @@ -1,532 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2014 IBM Corp. - */ - -#include <linux/pci.h> -#include <linux/slab.h> -#include <linux/file.h> -#include <misc/cxl.h> -#include <linux/module.h> -#include <linux/mount.h> -#include <linux/pseudo_fs.h> -#include <linux/sched/mm.h> -#include <linux/mmu_context.h> -#include <linux/irqdomain.h> - -#include "cxl.h" - -/* - * Since we want to track memory mappings to be able to force-unmap - * when the AFU is no longer reachable, we need an inode. For devices - * opened through the cxl user API, this is not a problem, but a - * userland process can also get a cxl fd through the cxl_get_fd() - * API, which is used by the cxlflash driver. - * - * Therefore we implement our own simple pseudo-filesystem and inode - * allocator. We don't use the anonymous inode, as we need the - * meta-data associated with it (address_space) and it is shared by - * other drivers/processes, so it could lead to cxl unmapping VMAs - * from random processes. - */ - -#define CXL_PSEUDO_FS_MAGIC 0x1697697f - -static int cxl_fs_cnt; -static struct vfsmount *cxl_vfs_mount; - -static int cxl_fs_init_fs_context(struct fs_context *fc) -{ - return init_pseudo(fc, CXL_PSEUDO_FS_MAGIC) ? 0 : -ENOMEM; -} - -static struct file_system_type cxl_fs_type = { - .name = "cxl", - .owner = THIS_MODULE, - .init_fs_context = cxl_fs_init_fs_context, - .kill_sb = kill_anon_super, -}; - - -void cxl_release_mapping(struct cxl_context *ctx) -{ - if (ctx->kernelapi && ctx->mapping) - simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt); -} - -static struct file *cxl_getfile(const char *name, - const struct file_operations *fops, - void *priv, int flags) -{ - struct file *file; - struct inode *inode; - int rc; - - /* strongly inspired by anon_inode_getfile() */ - - if (fops->owner && !try_module_get(fops->owner)) - return ERR_PTR(-ENOENT); - - rc = simple_pin_fs(&cxl_fs_type, &cxl_vfs_mount, &cxl_fs_cnt); - if (rc < 0) { - pr_err("Cannot mount cxl pseudo filesystem: %d\n", rc); - file = ERR_PTR(rc); - goto err_module; - } - - inode = alloc_anon_inode(cxl_vfs_mount->mnt_sb); - if (IS_ERR(inode)) { - file = ERR_CAST(inode); - goto err_fs; - } - - file = alloc_file_pseudo(inode, cxl_vfs_mount, name, - flags & (O_ACCMODE | O_NONBLOCK), fops); - if (IS_ERR(file)) - goto err_inode; - - file->private_data = priv; - - return file; - -err_inode: - iput(inode); -err_fs: - simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt); -err_module: - module_put(fops->owner); - return file; -} - -struct cxl_context *cxl_dev_context_init(struct pci_dev *dev) -{ - struct cxl_afu *afu; - struct cxl_context *ctx; - int rc; - - afu = cxl_pci_to_afu(dev); - if (IS_ERR(afu)) - return ERR_CAST(afu); - - ctx = cxl_context_alloc(); - if (!ctx) - return ERR_PTR(-ENOMEM); - - ctx->kernelapi = true; - - /* Make it a slave context. We can promote it later? */ - rc = cxl_context_init(ctx, afu, false); - if (rc) - goto err_ctx; - - return ctx; - -err_ctx: - kfree(ctx); - return ERR_PTR(rc); -} -EXPORT_SYMBOL_GPL(cxl_dev_context_init); - -struct cxl_context *cxl_get_context(struct pci_dev *dev) -{ - return dev->dev.archdata.cxl_ctx; -} -EXPORT_SYMBOL_GPL(cxl_get_context); - -int cxl_release_context(struct cxl_context *ctx) -{ - if (ctx->status >= STARTED) - return -EBUSY; - - cxl_context_free(ctx); - - return 0; -} -EXPORT_SYMBOL_GPL(cxl_release_context); - -static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num) -{ - __u16 range; - int r; - - for (r = 0; r < CXL_IRQ_RANGES; r++) { - range = ctx->irqs.range[r]; - if (num < range) { - return ctx->irqs.offset[r] + num; - } - num -= range; - } - return 0; -} - - -int cxl_set_priv(struct cxl_context *ctx, void *priv) -{ - if (!ctx) - return -EINVAL; - - ctx->priv = priv; - - return 0; -} -EXPORT_SYMBOL_GPL(cxl_set_priv); - -void *cxl_get_priv(struct cxl_context *ctx) -{ - if (!ctx) - return ERR_PTR(-EINVAL); - - return ctx->priv; -} -EXPORT_SYMBOL_GPL(cxl_get_priv); - -int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num) -{ - int res; - irq_hw_number_t hwirq; - - if (num == 0) - num = ctx->afu->pp_irqs; - res = afu_allocate_irqs(ctx, num); - if (res) - return res; - - if (!cpu_has_feature(CPU_FTR_HVMODE)) { - /* In a guest, the PSL interrupt is not multiplexed. It was - * allocated above, and we need to set its handler - */ - hwirq = cxl_find_afu_irq(ctx, 0); - if (hwirq) - cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl"); - } - - if (ctx->status == STARTED) { - if (cxl_ops->update_ivtes) - cxl_ops->update_ivtes(ctx); - else WARN(1, "BUG: cxl_allocate_afu_irqs must be called prior to starting the context on this platform\n"); - } - - return res; -} -EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs); - -void cxl_free_afu_irqs(struct cxl_context *ctx) -{ - irq_hw_number_t hwirq; - unsigned int virq; - - if (!cpu_has_feature(CPU_FTR_HVMODE)) { - hwirq = cxl_find_afu_irq(ctx, 0); - if (hwirq) { - virq = irq_find_mapping(NULL, hwirq); - if (virq) - cxl_unmap_irq(virq, ctx); - } - } - afu_irq_name_free(ctx); - cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter); -} -EXPORT_SYMBOL_GPL(cxl_free_afu_irqs); - -int cxl_map_afu_irq(struct cxl_context *ctx, int num, - irq_handler_t handler, void *cookie, char *name) -{ - irq_hw_number_t hwirq; - - /* - * Find interrupt we are to register. - */ - hwirq = cxl_find_afu_irq(ctx, num); - if (!hwirq) - return -ENOENT; - - return cxl_map_irq(ctx->afu->adapter, hwirq, handler, cookie, name); -} -EXPORT_SYMBOL_GPL(cxl_map_afu_irq); - -void cxl_unmap_afu_irq(struct cxl_context *ctx, int num, void *cookie) -{ - irq_hw_number_t hwirq; - unsigned int virq; - - hwirq = cxl_find_afu_irq(ctx, num); - if (!hwirq) - return; - - virq = irq_find_mapping(NULL, hwirq); - if (virq) - cxl_unmap_irq(virq, cookie); -} -EXPORT_SYMBOL_GPL(cxl_unmap_afu_irq); - -/* - * Start a context - * Code here similar to afu_ioctl_start_work(). - */ -int cxl_start_context(struct cxl_context *ctx, u64 wed, - struct task_struct *task) -{ - int rc = 0; - bool kernel = true; - - pr_devel("%s: pe: %i\n", __func__, ctx->pe); - - mutex_lock(&ctx->status_mutex); - if (ctx->status == STARTED) - goto out; /* already started */ - - /* - * Increment the mapped context count for adapter. This also checks - * if adapter_context_lock is taken. - */ - rc = cxl_adapter_context_get(ctx->afu->adapter); - if (rc) - goto out; - - if (task) { - ctx->pid = get_task_pid(task, PIDTYPE_PID); - kernel = false; - - /* acquire a reference to the task's mm */ - ctx->mm = get_task_mm(current); - - /* ensure this mm_struct can't be freed */ - cxl_context_mm_count_get(ctx); - - if (ctx->mm) { - /* decrement the use count from above */ - mmput(ctx->mm); - /* make TLBIs for this context global */ - mm_context_add_copro(ctx->mm); - } - } - - /* - * Increment driver use count. Enables global TLBIs for hash - * and callbacks to handle the segment table - */ - cxl_ctx_get(); - - /* See the comment in afu_ioctl_start_work() */ - smp_mb(); - - if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) { - put_pid(ctx->pid); - ctx->pid = NULL; - cxl_adapter_context_put(ctx->afu->adapter); - cxl_ctx_put(); - if (task) { - cxl_context_mm_count_put(ctx); - if (ctx->mm) - mm_context_remove_copro(ctx->mm); - } - goto out; - } - - ctx->status = STARTED; -out: - mutex_unlock(&ctx->status_mutex); - return rc; -} -EXPORT_SYMBOL_GPL(cxl_start_context); - -int cxl_process_element(struct cxl_context *ctx) -{ - return ctx->external_pe; -} -EXPORT_SYMBOL_GPL(cxl_process_element); - -/* Stop a context. Returns 0 on success, otherwise -Errno */ -int cxl_stop_context(struct cxl_context *ctx) -{ - return __detach_context(ctx); -} -EXPORT_SYMBOL_GPL(cxl_stop_context); - -void cxl_set_master(struct cxl_context *ctx) -{ - ctx->master = true; -} -EXPORT_SYMBOL_GPL(cxl_set_master); - -/* wrappers around afu_* file ops which are EXPORTED */ -int cxl_fd_open(struct inode *inode, struct file *file) -{ - return afu_open(inode, file); -} -EXPORT_SYMBOL_GPL(cxl_fd_open); -int cxl_fd_release(struct inode *inode, struct file *file) -{ - return afu_release(inode, file); -} -EXPORT_SYMBOL_GPL(cxl_fd_release); -long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - return afu_ioctl(file, cmd, arg); -} -EXPORT_SYMBOL_GPL(cxl_fd_ioctl); -int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm) -{ - return afu_mmap(file, vm); -} -EXPORT_SYMBOL_GPL(cxl_fd_mmap); -__poll_t cxl_fd_poll(struct file *file, struct poll_table_struct *poll) -{ - return afu_poll(file, poll); -} -EXPORT_SYMBOL_GPL(cxl_fd_poll); -ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count, - loff_t *off) -{ - return afu_read(file, buf, count, off); -} -EXPORT_SYMBOL_GPL(cxl_fd_read); - -#define PATCH_FOPS(NAME) if (!fops->NAME) fops->NAME = afu_fops.NAME - -/* Get a struct file and fd for a context and attach the ops */ -struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops, - int *fd) -{ - struct file *file; - int rc, flags, fdtmp; - char *name = NULL; - - /* only allow one per context */ - if (ctx->mapping) - return ERR_PTR(-EEXIST); - - flags = O_RDWR | O_CLOEXEC; - - /* This code is similar to anon_inode_getfd() */ - rc = get_unused_fd_flags(flags); - if (rc < 0) - return ERR_PTR(rc); - fdtmp = rc; - - /* - * Patch the file ops. Needs to be careful that this is rentrant safe. - */ - if (fops) { - PATCH_FOPS(open); - PATCH_FOPS(poll); - PATCH_FOPS(read); - PATCH_FOPS(release); - PATCH_FOPS(unlocked_ioctl); - PATCH_FOPS(compat_ioctl); - PATCH_FOPS(mmap); - } else /* use default ops */ - fops = (struct file_operations *)&afu_fops; - - name = kasprintf(GFP_KERNEL, "cxl:%d", ctx->pe); - file = cxl_getfile(name, fops, ctx, flags); - kfree(name); - if (IS_ERR(file)) - goto err_fd; - - cxl_context_set_mapping(ctx, file->f_mapping); - *fd = fdtmp; - return file; - -err_fd: - put_unused_fd(fdtmp); - return NULL; -} -EXPORT_SYMBOL_GPL(cxl_get_fd); - -struct cxl_context *cxl_fops_get_context(struct file *file) -{ - return file->private_data; -} -EXPORT_SYMBOL_GPL(cxl_fops_get_context); - -void cxl_set_driver_ops(struct cxl_context *ctx, - struct cxl_afu_driver_ops *ops) -{ - WARN_ON(!ops->fetch_event || !ops->event_delivered); - atomic_set(&ctx->afu_driver_events, 0); - ctx->afu_driver_ops = ops; -} -EXPORT_SYMBOL_GPL(cxl_set_driver_ops); - -void cxl_context_events_pending(struct cxl_context *ctx, - unsigned int new_events) -{ - atomic_add(new_events, &ctx->afu_driver_events); - wake_up_all(&ctx->wq); -} -EXPORT_SYMBOL_GPL(cxl_context_events_pending); - -int cxl_start_work(struct cxl_context *ctx, - struct cxl_ioctl_start_work *work) -{ - int rc; - - /* code taken from afu_ioctl_start_work */ - if (!(work->flags & CXL_START_WORK_NUM_IRQS)) - work->num_interrupts = ctx->afu->pp_irqs; - else if ((work->num_interrupts < ctx->afu->pp_irqs) || - (work->num_interrupts > ctx->afu->irqs_max)) { - return -EINVAL; - } - - rc = afu_register_irqs(ctx, work->num_interrupts); - if (rc) - return rc; - - rc = cxl_start_context(ctx, work->work_element_descriptor, current); - if (rc < 0) { - afu_release_irqs(ctx, ctx); - return rc; - } - - return 0; -} -EXPORT_SYMBOL_GPL(cxl_start_work); - -void __iomem *cxl_psa_map(struct cxl_context *ctx) -{ - if (ctx->status != STARTED) - return NULL; - - pr_devel("%s: psn_phys%llx size:%llx\n", - __func__, ctx->psn_phys, ctx->psn_size); - return ioremap(ctx->psn_phys, ctx->psn_size); -} -EXPORT_SYMBOL_GPL(cxl_psa_map); - -void cxl_psa_unmap(void __iomem *addr) -{ - iounmap(addr); -} -EXPORT_SYMBOL_GPL(cxl_psa_unmap); - -int cxl_afu_reset(struct cxl_context *ctx) -{ - struct cxl_afu *afu = ctx->afu; - int rc; - - rc = cxl_ops->afu_reset(afu); - if (rc) - return rc; - - return cxl_ops->afu_check_and_enable(afu); -} -EXPORT_SYMBOL_GPL(cxl_afu_reset); - -void cxl_perst_reloads_same_image(struct cxl_afu *afu, - bool perst_reloads_same_image) -{ - afu->adapter->perst_same_image = perst_reloads_same_image; -} -EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image); - -ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count) -{ - struct cxl_afu *afu = cxl_pci_to_afu(dev); - if (IS_ERR(afu)) - return -ENODEV; - - return cxl_ops->read_adapter_vpd(afu->adapter, buf, count); -} -EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd); diff --git a/drivers/misc/cxl/base.c b/drivers/misc/cxl/base.c deleted file mode 100644 index cc0caf9192dc..000000000000 --- a/drivers/misc/cxl/base.c +++ /dev/null @@ -1,125 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2014 IBM Corp. - */ - -#include <linux/module.h> -#include <linux/rcupdate.h> -#include <asm/errno.h> -#include <misc/cxl-base.h> -#include <linux/of_platform.h> -#include "cxl.h" - -/* protected by rcu */ -static struct cxl_calls *cxl_calls; - -atomic_t cxl_use_count = ATOMIC_INIT(0); -EXPORT_SYMBOL(cxl_use_count); - -#ifdef CONFIG_CXL_MODULE - -static inline struct cxl_calls *cxl_calls_get(void) -{ - struct cxl_calls *calls = NULL; - - rcu_read_lock(); - calls = rcu_dereference(cxl_calls); - if (calls && !try_module_get(calls->owner)) - calls = NULL; - rcu_read_unlock(); - - return calls; -} - -static inline void cxl_calls_put(struct cxl_calls *calls) -{ - BUG_ON(calls != cxl_calls); - - /* we don't need to rcu this, as we hold a reference to the module */ - module_put(cxl_calls->owner); -} - -#else /* !defined CONFIG_CXL_MODULE */ - -static inline struct cxl_calls *cxl_calls_get(void) -{ - return cxl_calls; -} - -static inline void cxl_calls_put(struct cxl_calls *calls) { } - -#endif /* CONFIG_CXL_MODULE */ - -/* AFU refcount management */ -struct cxl_afu *cxl_afu_get(struct cxl_afu *afu) -{ - return (get_device(&afu->dev) == NULL) ? NULL : afu; -} -EXPORT_SYMBOL_GPL(cxl_afu_get); - -void cxl_afu_put(struct cxl_afu *afu) -{ - put_device(&afu->dev); -} -EXPORT_SYMBOL_GPL(cxl_afu_put); - -void cxl_slbia(struct mm_struct *mm) -{ - struct cxl_calls *calls; - - calls = cxl_calls_get(); - if (!calls) - return; - - if (cxl_ctx_in_use()) - calls->cxl_slbia(mm); - - cxl_calls_put(calls); -} - -int register_cxl_calls(struct cxl_calls *calls) -{ - if (cxl_calls) - return -EBUSY; - - rcu_assign_pointer(cxl_calls, calls); - return 0; -} -EXPORT_SYMBOL_GPL(register_cxl_calls); - -void unregister_cxl_calls(struct cxl_calls *calls) -{ - BUG_ON(cxl_calls->owner != calls->owner); - RCU_INIT_POINTER(cxl_calls, NULL); - synchronize_rcu(); -} -EXPORT_SYMBOL_GPL(unregister_cxl_calls); - -int cxl_update_properties(struct device_node *dn, - struct property *new_prop) -{ - return of_update_property(dn, new_prop); -} -EXPORT_SYMBOL_GPL(cxl_update_properties); - -static int __init cxl_base_init(void) -{ - struct device_node *np; - struct platform_device *dev; - int count = 0; - - /* - * Scan for compatible devices in guest only - */ - if (cpu_has_feature(CPU_FTR_HVMODE)) - return 0; - - for_each_compatible_node(np, NULL, "ibm,coherent-platform-facility") { - dev = of_platform_device_create(np, NULL, NULL); - if (dev) - count++; - } - pr_devel("Found %d cxl device(s)\n", count); - return 0; -} -device_initcall(cxl_base_init); diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c deleted file mode 100644 index 76b5ea66dfa1..000000000000 --- a/drivers/misc/cxl/context.c +++ /dev/null @@ -1,362 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2014 IBM Corp. - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/bitmap.h> -#include <linux/sched.h> -#include <linux/pid.h> -#include <linux/fs.h> -#include <linux/mm.h> -#include <linux/debugfs.h> -#include <linux/slab.h> -#include <linux/idr.h> -#include <linux/sched/mm.h> -#include <linux/mmu_context.h> -#include <asm/cputable.h> -#include <asm/current.h> -#include <asm/copro.h> - -#include "cxl.h" - -/* - * Allocates space for a CXL context. - */ -struct cxl_context *cxl_context_alloc(void) -{ - return kzalloc(sizeof(struct cxl_context), GFP_KERNEL); -} - -/* - * Initialises a CXL context. - */ -int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master) -{ - int i; - - ctx->afu = afu; - ctx->master = master; - ctx->pid = NULL; /* Set in start work ioctl */ - mutex_init(&ctx->mapping_lock); - ctx->mapping = NULL; - ctx->tidr = 0; - ctx->assign_tidr = false; - - if (cxl_is_power8()) { - spin_lock_init(&ctx->sste_lock); - - /* - * Allocate the segment table before we put it in the IDR so that we - * can always access it when dereferenced from IDR. For the same - * reason, the segment table is only destroyed after the context is - * removed from the IDR. Access to this in the IOCTL is protected by - * Linux filesystem semantics (can't IOCTL until open is complete). - */ - i = cxl_alloc_sst(ctx); - if (i) - return i; - } - - INIT_WORK(&ctx->fault_work, cxl_handle_fault); - - init_waitqueue_head(&ctx->wq); - spin_lock_init(&ctx->lock); - - ctx->irq_bitmap = NULL; - ctx->pending_irq = false; - ctx->pending_fault = false; - ctx->pending_afu_err = false; - - INIT_LIST_HEAD(&ctx->irq_names); - - /* - * When we have to destroy all contexts in cxl_context_detach_all() we - * end up with afu_release_irqs() called from inside a - * idr_for_each_entry(). Hence we need to make sure that anything - * dereferenced from this IDR is ok before we allocate the IDR here. - * This clears out the IRQ ranges to ensure this. - */ - for (i = 0; i < CXL_IRQ_RANGES; i++) - ctx->irqs.range[i] = 0; - - mutex_init(&ctx->status_mutex); - - ctx->status = OPENED; - - /* - * Allocating IDR! We better make sure everything's setup that - * dereferences from it. - */ - mutex_lock(&afu->contexts_lock); - idr_preload(GFP_KERNEL); - i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0, - ctx->afu->num_procs, GFP_NOWAIT); - idr_preload_end(); - mutex_unlock(&afu->contexts_lock); - if (i < 0) - return i; - - ctx->pe = i; - if (cpu_has_feature(CPU_FTR_HVMODE)) { - ctx->elem = &ctx->afu->native->spa[i]; - ctx->external_pe = ctx->pe; - } else { - ctx->external_pe = -1; /* assigned when attaching */ - } - ctx->pe_inserted = false; - - /* - * take a ref on the afu so that it stays alive at-least till - * this context is reclaimed inside reclaim_ctx. - */ - cxl_afu_get(afu); - return 0; -} - -void cxl_context_set_mapping(struct cxl_context *ctx, - struct address_space *mapping) -{ - mutex_lock(&ctx->mapping_lock); - ctx->mapping = mapping; - mutex_unlock(&ctx->mapping_lock); -} - -static vm_fault_t cxl_mmap_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct cxl_context *ctx = vma->vm_file->private_data; - u64 area, offset; - vm_fault_t ret; - - offset = vmf->pgoff << PAGE_SHIFT; - - pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n", - __func__, ctx->pe, vmf->address, offset); - - if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { - area = ctx->afu->psn_phys; - if (offset >= ctx->afu->adapter->ps_size) - return VM_FAULT_SIGBUS; - } else { - area = ctx->psn_phys; - if (offset >= ctx->psn_size) - return VM_FAULT_SIGBUS; - } - - mutex_lock(&ctx->status_mutex); - - if (ctx->status != STARTED) { - mutex_unlock(&ctx->status_mutex); - pr_devel("%s: Context not started, failing problem state access\n", __func__); - if (ctx->mmio_err_ff) { - if (!ctx->ff_page) { - ctx->ff_page = alloc_page(GFP_USER); - if (!ctx->ff_page) - return VM_FAULT_OOM; - memset(page_address(ctx->ff_page), 0xff, PAGE_SIZE); - } - get_page(ctx->ff_page); - vmf->page = ctx->ff_page; - vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); - return 0; - } - return VM_FAULT_SIGBUS; - } - - ret = vmf_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT); - - mutex_unlock(&ctx->status_mutex); - - return ret; -} - -static const struct vm_operations_struct cxl_mmap_vmops = { - .fault = cxl_mmap_fault, -}; - -/* - * Map a per-context mmio space into the given vma. - */ -int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) -{ - u64 start = vma->vm_pgoff << PAGE_SHIFT; - u64 len = vma->vm_end - vma->vm_start; - - if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { - if (start + len > ctx->afu->adapter->ps_size) - return -EINVAL; - - if (cxl_is_power9()) { - /* - * Make sure there is a valid problem state - * area space for this AFU. - */ - if (ctx->master && !ctx->afu->psa) { - pr_devel("AFU doesn't support mmio space\n"); - return -EINVAL; - } - - /* Can't mmap until the AFU is enabled */ - if (!ctx->afu->enabled) - return -EBUSY; - } - } else { - if (start + len > ctx->psn_size) - return -EINVAL; - - /* Make sure there is a valid per process space for this AFU */ - if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { - pr_devel("AFU doesn't support mmio space\n"); - return -EINVAL; - } - - /* Can't mmap until the AFU is enabled */ - if (!ctx->afu->enabled) - return -EBUSY; - } - - pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__, - ctx->psn_phys, ctx->pe , ctx->master); - - vm_flags_set(vma, VM_IO | VM_PFNMAP); - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - vma->vm_ops = &cxl_mmap_vmops; - return 0; -} - -/* - * Detach a context from the hardware. This disables interrupts and doesn't - * return until all outstanding interrupts for this context have completed. The - * hardware should no longer access *ctx after this has returned. - */ -int __detach_context(struct cxl_context *ctx) -{ - enum cxl_context_status status; - - mutex_lock(&ctx->status_mutex); - status = ctx->status; - ctx->status = CLOSED; - mutex_unlock(&ctx->status_mutex); - if (status != STARTED) - return -EBUSY; - - /* Only warn if we detached while the link was OK. - * If detach fails when hw is down, we don't care. - */ - WARN_ON(cxl_ops->detach_process(ctx) && - cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)); - flush_work(&ctx->fault_work); /* Only needed for dedicated process */ - - /* - * Wait until no further interrupts are presented by the PSL - * for this context. - */ - if (cxl_ops->irq_wait) - cxl_ops->irq_wait(ctx); - - /* release the reference to the group leader and mm handling pid */ - put_pid(ctx->pid); - - cxl_ctx_put(); - - /* Decrease the attached context count on the adapter */ - cxl_adapter_context_put(ctx->afu->adapter); - - /* Decrease the mm count on the context */ - cxl_context_mm_count_put(ctx); - if (ctx->mm) - mm_context_remove_copro(ctx->mm); - ctx->mm = NULL; - - return 0; -} - -/* - * Detach the given context from the AFU. This doesn't actually - * free the context but it should stop the context running in hardware - * (ie. prevent this context from generating any further interrupts - * so that it can be freed). - */ -void cxl_context_detach(struct cxl_context *ctx) -{ - int rc; - - rc = __detach_context(ctx); - if (rc) - return; - - afu_release_irqs(ctx, ctx); - wake_up_all(&ctx->wq); -} - -/* - * Detach all contexts on the given AFU. - */ -void cxl_context_detach_all(struct cxl_afu *afu) -{ - struct cxl_context *ctx; - int tmp; - - mutex_lock(&afu->contexts_lock); - idr_for_each_entry(&afu->contexts_idr, ctx, tmp) { - /* - * Anything done in here needs to be setup before the IDR is - * created and torn down after the IDR removed - */ - cxl_context_detach(ctx); - - /* - * We are force detaching - remove any active PSA mappings so - * userspace cannot interfere with the card if it comes back. - * Easiest way to exercise this is to unbind and rebind the - * driver via sysfs while it is in use. - */ - mutex_lock(&ctx->mapping_lock); - if (ctx->mapping) - unmap_mapping_range(ctx->mapping, 0, 0, 1); - mutex_unlock(&ctx->mapping_lock); - } - mutex_unlock(&afu->contexts_lock); -} - -static void reclaim_ctx(struct rcu_head *rcu) -{ - struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu); - - if (cxl_is_power8()) - free_page((u64)ctx->sstp); - if (ctx->ff_page) - __free_page(ctx->ff_page); - ctx->sstp = NULL; - - bitmap_free(ctx->irq_bitmap); - - /* Drop ref to the afu device taken during cxl_context_init */ - cxl_afu_put(ctx->afu); - - kfree(ctx); -} - -void cxl_context_free(struct cxl_context *ctx) -{ - if (ctx->kernelapi && ctx->mapping) - cxl_release_mapping(ctx); - mutex_lock(&ctx->afu->contexts_lock); - idr_remove(&ctx->afu->contexts_idr, ctx->pe); - mutex_unlock(&ctx->afu->contexts_lock); - call_rcu(&ctx->rcu, reclaim_ctx); -} - -void cxl_context_mm_count_get(struct cxl_context *ctx) -{ - if (ctx->mm) - mmgrab(ctx->mm); -} - -void cxl_context_mm_count_put(struct cxl_context *ctx) -{ - if (ctx->mm) - mmdrop(ctx->mm); -} diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h deleted file mode 100644 index 0562071cdd4a..000000000000 --- a/drivers/misc/cxl/cxl.h +++ /dev/null @@ -1,1134 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright 2014 IBM Corp. - */ - -#ifndef _CXL_H_ -#define _CXL_H_ - -#include <linux/interrupt.h> -#include <linux/semaphore.h> -#include <linux/device.h> -#include <linux/types.h> -#include <linux/cdev.h> -#include <linux/pid.h> -#include <linux/io.h> -#include <linux/pci.h> -#include <linux/fs.h> -#include <asm/cputable.h> -#include <asm/mmu.h> -#include <asm/reg.h> -#include <misc/cxl-base.h> - -#include <misc/cxl.h> -#include <uapi/misc/cxl.h> - -extern uint cxl_verbose; - -struct property; - -#define CXL_TIMEOUT 5 - -/* - * Bump version each time a user API change is made, whether it is - * backwards compatible ot not. - */ -#define CXL_API_VERSION 3 -#define CXL_API_VERSION_COMPATIBLE 1 - -/* - * Opaque types to avoid accidentally passing registers for the wrong MMIO - * - * At the end of the day, I'm not married to using typedef here, but it might - * (and has!) help avoid bugs like mixing up CXL_PSL_CtxTime and - * CXL_PSL_CtxTime_An, or calling cxl_p1n_write instead of cxl_p1_write. - * - * I'm quite happy if these are changed back to #defines before upstreaming, it - * should be little more than a regexp search+replace operation in this file. - */ -typedef struct { - const int x; -} cxl_p1_reg_t; -typedef struct { - const int x; -} cxl_p1n_reg_t; -typedef struct { - const int x; -} cxl_p2n_reg_t; -#define cxl_reg_off(reg) \ - (reg.x) - -/* Memory maps. Ref CXL Appendix A */ - -/* PSL Privilege 1 Memory Map */ -/* Configuration and Control area - CAIA 1&2 */ -static const cxl_p1_reg_t CXL_PSL_CtxTime = {0x0000}; -static const cxl_p1_reg_t CXL_PSL_ErrIVTE = {0x0008}; -static const cxl_p1_reg_t CXL_PSL_KEY1 = {0x0010}; -static const cxl_p1_reg_t CXL_PSL_KEY2 = {0x0018}; -static const cxl_p1_reg_t CXL_PSL_Control = {0x0020}; -/* Downloading */ -static const cxl_p1_reg_t CXL_PSL_DLCNTL = {0x0060}; -static const cxl_p1_reg_t CXL_PSL_DLADDR = {0x0068}; - -/* PSL Lookaside Buffer Management Area - CAIA 1 */ -static const cxl_p1_reg_t CXL_PSL_LBISEL = {0x0080}; -static const cxl_p1_reg_t CXL_PSL_SLBIE = {0x0088}; -static const cxl_p1_reg_t CXL_PSL_SLBIA = {0x0090}; -static const cxl_p1_reg_t CXL_PSL_TLBIE = {0x00A0}; -static const cxl_p1_reg_t CXL_PSL_TLBIA = {0x00A8}; -static const cxl_p1_reg_t CXL_PSL_AFUSEL = {0x00B0}; - -/* 0x00C0:7EFF Implementation dependent area */ -/* PSL registers - CAIA 1 */ -static const cxl_p1_reg_t CXL_PSL_FIR1 = {0x0100}; -static const cxl_p1_reg_t CXL_PSL_FIR2 = {0x0108}; -static const cxl_p1_reg_t CXL_PSL_Timebase = {0x0110}; -static const cxl_p1_reg_t CXL_PSL_VERSION = {0x0118}; -static const cxl_p1_reg_t CXL_PSL_RESLCKTO = {0x0128}; -static const cxl_p1_reg_t CXL_PSL_TB_CTLSTAT = {0x0140}; -static const cxl_p1_reg_t CXL_PSL_FIR_CNTL = {0x0148}; -static const cxl_p1_reg_t CXL_PSL_DSNDCTL = {0x0150}; -static const cxl_p1_reg_t CXL_PSL_SNWRALLOC = {0x0158}; -static const cxl_p1_reg_t CXL_PSL_TRACE = {0x0170}; -/* PSL registers - CAIA 2 */ -static const cxl_p1_reg_t CXL_PSL9_CONTROL = {0x0020}; -static const cxl_p1_reg_t CXL_XSL9_INV = {0x0110}; -static const cxl_p1_reg_t CXL_XSL9_DBG = {0x0130}; -static const cxl_p1_reg_t CXL_XSL9_DEF = {0x0140}; -static const cxl_p1_reg_t CXL_XSL9_DSNCTL = {0x0168}; -static const cxl_p1_reg_t CXL_PSL9_FIR1 = {0x0300}; -static const cxl_p1_reg_t CXL_PSL9_FIR_MASK = {0x0308}; -static const cxl_p1_reg_t CXL_PSL9_Timebase = {0x0310}; -static const cxl_p1_reg_t CXL_PSL9_DEBUG = {0x0320}; -static const cxl_p1_reg_t CXL_PSL9_FIR_CNTL = {0x0348}; -static const cxl_p1_reg_t CXL_PSL9_DSNDCTL = {0x0350}; -static const cxl_p1_reg_t CXL_PSL9_TB_CTLSTAT = {0x0340}; -static const cxl_p1_reg_t CXL_PSL9_TRACECFG = {0x0368}; -static const cxl_p1_reg_t CXL_PSL9_APCDEDALLOC = {0x0378}; -static const cxl_p1_reg_t CXL_PSL9_APCDEDTYPE = {0x0380}; -static const cxl_p1_reg_t CXL_PSL9_TNR_ADDR = {0x0388}; -static const cxl_p1_reg_t CXL_PSL9_CTCCFG = {0x0390}; -static const cxl_p1_reg_t CXL_PSL9_GP_CT = {0x0398}; -static const cxl_p1_reg_t CXL_XSL9_IERAT = {0x0588}; -static const cxl_p1_reg_t CXL_XSL9_ILPP = {0x0590}; - -/* 0x7F00:7FFF Reserved PCIe MSI-X Pending Bit Array area */ -/* 0x8000:FFFF Reserved PCIe MSI-X Table Area */ - -/* PSL Slice Privilege 1 Memory Map */ -/* Configuration Area - CAIA 1&2 */ -static const cxl_p1n_reg_t CXL_PSL_SR_An = {0x00}; -static const cxl_p1n_reg_t CXL_PSL_LPID_An = {0x08}; -static const cxl_p1n_reg_t CXL_PSL_AMBAR_An = {0x10}; -static const cxl_p1n_reg_t CXL_PSL_SPOffset_An = {0x18}; -static const cxl_p1n_reg_t CXL_PSL_ID_An = {0x20}; -static const cxl_p1n_reg_t CXL_PSL_SERR_An = {0x28}; -/* Memory Management and Lookaside Buffer Management - CAIA 1*/ -static const cxl_p1n_reg_t CXL_PSL_SDR_An = {0x30}; -/* Memory Management and Lookaside Buffer Management - CAIA 1&2 */ -static const cxl_p1n_reg_t CXL_PSL_AMOR_An = {0x38}; -/* Pointer Area - CAIA 1&2 */ -static const cxl_p1n_reg_t CXL_HAURP_An = {0x80}; -static const cxl_p1n_reg_t CXL_PSL_SPAP_An = {0x88}; -static const cxl_p1n_reg_t CXL_PSL_LLCMD_An = {0x90}; -/* Control Area - CAIA 1&2 */ -static const cxl_p1n_reg_t CXL_PSL_SCNTL_An = {0xA0}; -static const cxl_p1n_reg_t CXL_PSL_CtxTime_An = {0xA8}; -static const cxl_p1n_reg_t CXL_PSL_IVTE_Offset_An = {0xB0}; -static const cxl_p1n_reg_t CXL_PSL_IVTE_Limit_An = {0xB8}; -/* 0xC0:FF Implementation Dependent Area - CAIA 1&2 */ -static const cxl_p1n_reg_t CXL_PSL_FIR_SLICE_An = {0xC0}; -static const cxl_p1n_reg_t CXL_AFU_DEBUG_An = {0xC8}; -/* 0xC0:FF Implementation Dependent Area - CAIA 1 */ -static const cxl_p1n_reg_t CXL_PSL_APCALLOC_A = {0xD0}; -static const cxl_p1n_reg_t CXL_PSL_COALLOC_A = {0xD8}; -static const cxl_p1n_reg_t CXL_PSL_RXCTL_A = {0xE0}; -static const cxl_p1n_reg_t CXL_PSL_SLICE_TRACE = {0xE8}; - -/* PSL Slice Privilege 2 Memory Map */ -/* Configuration and Control Area - CAIA 1&2 */ -static const cxl_p2n_reg_t CXL_PSL_PID_TID_An = {0x000}; -static const cxl_p2n_reg_t CXL_CSRP_An = {0x008}; -/* Configuration and Control Area - CAIA 1 */ -static const cxl_p2n_reg_t CXL_AURP0_An = {0x010}; -static const cxl_p2n_reg_t CXL_AURP1_An = {0x018}; -static const cxl_p2n_reg_t CXL_SSTP0_An = {0x020}; -static const cxl_p2n_reg_t CXL_SSTP1_An = {0x028}; -/* Configuration and Control Area - CAIA 1 */ -static const cxl_p2n_reg_t CXL_PSL_AMR_An = {0x030}; -/* Segment Lookaside Buffer Management - CAIA 1 */ -static const cxl_p2n_reg_t CXL_SLBIE_An = {0x040}; -static const cxl_p2n_reg_t CXL_SLBIA_An = {0x048}; -static const cxl_p2n_reg_t CXL_SLBI_Select_An = {0x050}; -/* Interrupt Registers - CAIA 1&2 */ -static const cxl_p2n_reg_t CXL_PSL_DSISR_An = {0x060}; -static const cxl_p2n_reg_t CXL_PSL_DAR_An = {0x068}; -static const cxl_p2n_reg_t CXL_PSL_DSR_An = {0x070}; -static const cxl_p2n_reg_t CXL_PSL_TFC_An = {0x078}; -static const cxl_p2n_reg_t CXL_PSL_PEHandle_An = {0x080}; -static const cxl_p2n_reg_t CXL_PSL_ErrStat_An = {0x088}; -/* AFU Registers - CAIA 1&2 */ -static const cxl_p2n_reg_t CXL_AFU_Cntl_An = {0x090}; -static const cxl_p2n_reg_t CXL_AFU_ERR_An = {0x098}; -/* Work Element Descriptor - CAIA 1&2 */ -static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0}; -/* 0x0C0:FFF Implementation Dependent Area */ - -#define CXL_PSL_SPAP_Addr 0x0ffffffffffff000ULL -#define CXL_PSL_SPAP_Size 0x0000000000000ff0ULL -#define CXL_PSL_SPAP_Size_Shift 4 -#define CXL_PSL_SPAP_V 0x0000000000000001ULL - -/****** CXL_PSL_Control ****************************************************/ -#define CXL_PSL_Control_tb (0x1ull << (63-63)) -#define CXL_PSL_Control_Fr (0x1ull << (63-31)) -#define CXL_PSL_Control_Fs_MASK (0x3ull << (63-29)) -#define CXL_PSL_Control_Fs_Complete (0x3ull << (63-29)) - -/****** CXL_PSL_DLCNTL *****************************************************/ -#define CXL_PSL_DLCNTL_D (0x1ull << (63-28)) -#define CXL_PSL_DLCNTL_C (0x1ull << (63-29)) -#define CXL_PSL_DLCNTL_E (0x1ull << (63-30)) -#define CXL_PSL_DLCNTL_S (0x1ull << (63-31)) -#define CXL_PSL_DLCNTL_CE (CXL_PSL_DLCNTL_C | CXL_PSL_DLCNTL_E) -#define CXL_PSL_DLCNTL_DCES (CXL_PSL_DLCNTL_D | CXL_PSL_DLCNTL_CE | CXL_PSL_DLCNTL_S) - -/****** CXL_PSL_SR_An ******************************************************/ -#define CXL_PSL_SR_An_SF MSR_SF /* 64bit */ -#define CXL_PSL_SR_An_TA (1ull << (63-1)) /* Tags active, GA1: 0 */ -#define CXL_PSL_SR_An_HV MSR_HV /* Hypervisor, GA1: 0 */ -#define CXL_PSL_SR_An_XLAT_hpt (0ull << (63-6))/* Hashed page table (HPT) mode */ -#define CXL_PSL_SR_An_XLAT_roh (2ull << (63-6))/* Radix on HPT mode */ -#define CXL_PSL_SR_An_XLAT_ror (3ull << (63-6))/* Radix on Radix mode */ -#define CXL_PSL_SR_An_BOT (1ull << (63-10)) /* Use the in-memory segment table */ -#define CXL_PSL_SR_An_PR MSR_PR /* Problem state, GA1: 1 */ -#define CXL_PSL_SR_An_ISL (1ull << (63-53)) /* Ignore Segment Large Page */ -#define CXL_PSL_SR_An_TC (1ull << (63-54)) /* Page Table secondary hash */ -#define CXL_PSL_SR_An_US (1ull << (63-56)) /* User state, GA1: X */ -#define CXL_PSL_SR_An_SC (1ull << (63-58)) /* Segment Table secondary hash */ -#define CXL_PSL_SR_An_R MSR_DR /* Relocate, GA1: 1 */ -#define CXL_PSL_SR_An_MP (1ull << (63-62)) /* Master Process */ -#define CXL_PSL_SR_An_LE (1ull << (63-63)) /* Little Endian */ - -/****** CXL_PSL_ID_An ****************************************************/ -#define CXL_PSL_ID_An_F (1ull << (63-31)) -#define CXL_PSL_ID_An_L (1ull << (63-30)) - -/****** CXL_PSL_SERR_An ****************************************************/ -#define CXL_PSL_SERR_An_afuto (1ull << (63-0)) -#define CXL_PSL_SERR_An_afudis (1ull << (63-1)) -#define CXL_PSL_SERR_An_afuov (1ull << (63-2)) -#define CXL_PSL_SERR_An_badsrc (1ull << (63-3)) -#define CXL_PSL_SERR_An_badctx (1ull << (63-4)) -#define CXL_PSL_SERR_An_llcmdis (1ull << (63-5)) -#define CXL_PSL_SERR_An_llcmdto (1ull << (63-6)) -#define CXL_PSL_SERR_An_afupar (1ull << (63-7)) -#define CXL_PSL_SERR_An_afudup (1ull << (63-8)) -#define CXL_PSL_SERR_An_IRQS ( \ - CXL_PSL_SERR_An_afuto | CXL_PSL_SERR_An_afudis | CXL_PSL_SERR_An_afuov | \ - CXL_PSL_SERR_An_badsrc | CXL_PSL_SERR_An_badctx | CXL_PSL_SERR_An_llcmdis | \ - CXL_PSL_SERR_An_llcmdto | CXL_PSL_SERR_An_afupar | CXL_PSL_SERR_An_afudup) -#define CXL_PSL_SERR_An_afuto_mask (1ull << (63-32)) -#define CXL_PSL_SERR_An_afudis_mask (1ull << (63-33)) -#define CXL_PSL_SERR_An_afuov_mask (1ull << (63-34)) -#define CXL_PSL_SERR_An_badsrc_mask (1ull << (63-35)) -#define CXL_PSL_SERR_An_badctx_mask (1ull << (63-36)) -#define CXL_PSL_SERR_An_llcmdis_mask (1ull << (63-37)) -#define CXL_PSL_SERR_An_llcmdto_mask (1ull << (63-38)) -#define CXL_PSL_SERR_An_afupar_mask (1ull << (63-39)) -#define CXL_PSL_SERR_An_afudup_mask (1ull << (63-40)) -#define CXL_PSL_SERR_An_IRQ_MASKS ( \ - CXL_PSL_SERR_An_afuto_mask | CXL_PSL_SERR_An_afudis_mask | CXL_PSL_SERR_An_afuov_mask | \ - CXL_PSL_SERR_An_badsrc_mask | CXL_PSL_SERR_An_badctx_mask | CXL_PSL_SERR_An_llcmdis_mask | \ - CXL_PSL_SERR_An_llcmdto_mask | CXL_PSL_SERR_An_afupar_mask | CXL_PSL_SERR_An_afudup_mask) - -#define CXL_PSL_SERR_An_AE (1ull << (63-30)) - -/****** CXL_PSL_SCNTL_An ****************************************************/ -#define CXL_PSL_SCNTL_An_CR (0x1ull << (63-15)) -/* Programming Modes: */ -#define CXL_PSL_SCNTL_An_PM_MASK (0xffffull << (63-31)) -#define CXL_PSL_SCNTL_An_PM_Shared (0x0000ull << (63-31)) -#define CXL_PSL_SCNTL_An_PM_OS (0x0001ull << (63-31)) -#define CXL_PSL_SCNTL_An_PM_Process (0x0002ull << (63-31)) -#define CXL_PSL_SCNTL_An_PM_AFU (0x0004ull << (63-31)) -#define CXL_PSL_SCNTL_An_PM_AFU_PBT (0x0104ull << (63-31)) -/* Purge Status (ro) */ -#define CXL_PSL_SCNTL_An_Ps_MASK (0x3ull << (63-39)) -#define CXL_PSL_SCNTL_An_Ps_Pending (0x1ull << (63-39)) -#define CXL_PSL_SCNTL_An_Ps_Complete (0x3ull << (63-39)) -/* Purge */ -#define CXL_PSL_SCNTL_An_Pc (0x1ull << (63-48)) -/* Suspend Status (ro) */ -#define CXL_PSL_SCNTL_An_Ss_MASK (0x3ull << (63-55)) -#define CXL_PSL_SCNTL_An_Ss_Pending (0x1ull << (63-55)) -#define CXL_PSL_SCNTL_An_Ss_Complete (0x3ull << (63-55)) -/* Suspend Control */ -#define CXL_PSL_SCNTL_An_Sc (0x1ull << (63-63)) - -/* AFU Slice Enable Status (ro) */ -#define CXL_AFU_Cntl_An_ES_MASK (0x7ull << (63-2)) -#define CXL_AFU_Cntl_An_ES_Disabled (0x0ull << (63-2)) -#define CXL_AFU_Cntl_An_ES_Enabled (0x4ull << (63-2)) -/* AFU Slice Enable */ -#define CXL_AFU_Cntl_An_E (0x1ull << (63-3)) -/* AFU Slice Reset status (ro) */ -#define CXL_AFU_Cntl_An_RS_MASK (0x3ull << (63-5)) -#define CXL_AFU_Cntl_An_RS_Pending (0x1ull << (63-5)) -#define CXL_AFU_Cntl_An_RS_Complete (0x2ull << (63-5)) -/* AFU Slice Reset */ -#define CXL_AFU_Cntl_An_RA (0x1ull << (63-7)) - -/****** CXL_SSTP0/1_An ******************************************************/ -/* These top bits are for the segment that CONTAINS the segment table */ -#define CXL_SSTP0_An_B_SHIFT SLB_VSID_SSIZE_SHIFT -#define CXL_SSTP0_An_KS (1ull << (63-2)) -#define CXL_SSTP0_An_KP (1ull << (63-3)) -#define CXL_SSTP0_An_N (1ull << (63-4)) -#define CXL_SSTP0_An_L (1ull << (63-5)) -#define CXL_SSTP0_An_C (1ull << (63-6)) -#define CXL_SSTP0_An_TA (1ull << (63-7)) -#define CXL_SSTP0_An_LP_SHIFT (63-9) /* 2 Bits */ -/* And finally, the virtual address & size of the segment table: */ -#define CXL_SSTP0_An_SegTableSize_SHIFT (63-31) /* 12 Bits */ -#define CXL_SSTP0_An_SegTableSize_MASK \ - (((1ull << 12) - 1) << CXL_SSTP0_An_SegTableSize_SHIFT) -#define CXL_SSTP0_An_STVA_U_MASK ((1ull << (63-49))-1) -#define CXL_SSTP1_An_STVA_L_MASK (~((1ull << (63-55))-1)) -#define CXL_SSTP1_An_V (1ull << (63-63)) - -/****** CXL_PSL_SLBIE_[An] - CAIA 1 **************************************************/ -/* write: */ -#define CXL_SLBIE_C PPC_BIT(36) /* Class */ -#define CXL_SLBIE_SS PPC_BITMASK(37, 38) /* Segment Size */ -#define CXL_SLBIE_SS_SHIFT PPC_BITLSHIFT(38) -#define CXL_SLBIE_TA PPC_BIT(38) /* Tags Active */ -/* read: */ -#define CXL_SLBIE_MAX PPC_BITMASK(24, 31) -#define CXL_SLBIE_PENDING PPC_BITMASK(56, 63) - -/****** Common to all CXL_TLBIA/SLBIA_[An] - CAIA 1 **********************************/ -#define CXL_TLB_SLB_P (1ull) /* Pending (read) */ - -/****** Common to all CXL_TLB/SLB_IA/IE_[An] registers - CAIA 1 **********************/ -#define CXL_TLB_SLB_IQ_ALL (0ull) /* Inv qualifier */ -#define CXL_TLB_SLB_IQ_LPID (1ull) /* Inv qualifier */ -#define CXL_TLB_SLB_IQ_LPIDPID (3ull) /* Inv qualifier */ - -/****** CXL_PSL_AFUSEL ******************************************************/ -#define CXL_PSL_AFUSEL_A (1ull << (63-55)) /* Adapter wide invalidates affect all AFUs */ - -/****** CXL_PSL_DSISR_An - CAIA 1 ****************************************************/ -#define CXL_PSL_DSISR_An_DS (1ull << (63-0)) /* Segment not found */ -#define CXL_PSL_DSISR_An_DM (1ull << (63-1)) /* PTE not found (See also: M) or protection fault */ -#define CXL_PSL_DSISR_An_ST (1ull << (63-2)) /* Segment Table PTE not found */ -#define CXL_PSL_DSISR_An_UR (1ull << (63-3)) /* AURP PTE not found */ -#define CXL_PSL_DSISR_TRANS (CXL_PSL_DSISR_An_DS | CXL_PSL_DSISR_An_DM | CXL_PSL_DSISR_An_ST | CXL_PSL_DSISR_An_UR) -#define CXL_PSL_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */ -#define CXL_PSL_DSISR_An_AE (1ull << (63-5)) /* AFU Error */ -#define CXL_PSL_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */ -#define CXL_PSL_DSISR_PENDING (CXL_PSL_DSISR_TRANS | CXL_PSL_DSISR_An_PE | CXL_PSL_DSISR_An_AE | CXL_PSL_DSISR_An_OC) -/* NOTE: Bits 32:63 are undefined if DSISR[DS] = 1 */ -#define CXL_PSL_DSISR_An_M DSISR_NOHPTE /* PTE not found */ -#define CXL_PSL_DSISR_An_P DSISR_PROTFAULT /* Storage protection violation */ -#define CXL_PSL_DSISR_An_A (1ull << (63-37)) /* AFU lock access to write through or cache inhibited storage */ -#define CXL_PSL_DSISR_An_S DSISR_ISSTORE /* Access was afu_wr or afu_zero */ -#define CXL_PSL_DSISR_An_K DSISR_KEYFAULT /* Access not permitted by virtual page class key protection */ - -/****** CXL_PSL_DSISR_An - CAIA 2 ****************************************************/ -#define CXL_PSL9_DSISR_An_TF (1ull << (63-3)) /* Translation fault */ -#define CXL_PSL9_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */ -#define CXL_PSL9_DSISR_An_AE (1ull << (63-5)) /* AFU Error */ -#define CXL_PSL9_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */ -#define CXL_PSL9_DSISR_An_S (1ull << (63-38)) /* TF for a write operation */ -#define CXL_PSL9_DSISR_PENDING (CXL_PSL9_DSISR_An_TF | CXL_PSL9_DSISR_An_PE | CXL_PSL9_DSISR_An_AE | CXL_PSL9_DSISR_An_OC) -/* - * NOTE: Bits 56:63 (Checkout Response Status) are valid when DSISR_An[TF] = 1 - * Status (0:7) Encoding - */ -#define CXL_PSL9_DSISR_An_CO_MASK 0x00000000000000ffULL -#define CXL_PSL9_DSISR_An_SF 0x0000000000000080ULL /* Segment Fault 0b10000000 */ -#define CXL_PSL9_DSISR_An_PF_SLR 0x0000000000000088ULL /* PTE not found (Single Level Radix) 0b10001000 */ -#define CXL_PSL9_DSISR_An_PF_RGC 0x000000000000008CULL /* PTE not found (Radix Guest (child)) 0b10001100 */ -#define CXL_PSL9_DSISR_An_PF_RGP 0x0000000000000090ULL /* PTE not found (Radix Guest (parent)) 0b10010000 */ -#define CXL_PSL9_DSISR_An_PF_HRH 0x0000000000000094ULL /* PTE not found (HPT/Radix Host) 0b10010100 */ -#define CXL_PSL9_DSISR_An_PF_STEG 0x000000000000009CULL /* PTE not found (STEG VA) 0b10011100 */ -#define CXL_PSL9_DSISR_An_URTCH 0x00000000000000B4ULL /* Unsupported Radix Tree Configuration 0b10110100 */ - -/****** CXL_PSL_TFC_An ******************************************************/ -#define CXL_PSL_TFC_An_A (1ull << (63-28)) /* Acknowledge non-translation fault */ -#define CXL_PSL_TFC_An_C (1ull << (63-29)) /* Continue (abort transaction) */ -#define CXL_PSL_TFC_An_AE (1ull << (63-30)) /* Restart PSL with address error */ -#define CXL_PSL_TFC_An_R (1ull << (63-31)) /* Restart PSL transaction */ - -/****** CXL_PSL_DEBUG *****************************************************/ -#define CXL_PSL_DEBUG_CDC (1ull << (63-27)) /* Coherent Data cache support */ - -/****** CXL_XSL9_IERAT_ERAT - CAIA 2 **********************************/ -#define CXL_XSL9_IERAT_MLPID (1ull << (63-0)) /* Match LPID */ -#define CXL_XSL9_IERAT_MPID (1ull << (63-1)) /* Match PID */ -#define CXL_XSL9_IERAT_PRS (1ull << (63-4)) /* PRS bit for Radix invalidations */ -#define CXL_XSL9_IERAT_INVR (1ull << (63-3)) /* Invalidate Radix */ -#define CXL_XSL9_IERAT_IALL (1ull << (63-8)) /* Invalidate All */ -#define CXL_XSL9_IERAT_IINPROG (1ull << (63-63)) /* Invalidate in progress */ - -/* cxl_process_element->software_status */ -#define CXL_PE_SOFTWARE_STATE_V (1ul << (31 - 0)) /* Valid */ -#define CXL_PE_SOFTWARE_STATE_C (1ul << (31 - 29)) /* Complete */ -#define CXL_PE_SOFTWARE_STATE_S (1ul << (31 - 30)) /* Suspend */ -#define CXL_PE_SOFTWARE_STATE_T (1ul << (31 - 31)) /* Terminate */ - -/****** CXL_PSL_RXCTL_An (Implementation Specific) ************************** - * Controls AFU Hang Pulse, which sets the timeout for the AFU to respond to - * the PSL for any response (except MMIO). Timeouts will occur between 1x to 2x - * of the hang pulse frequency. - */ -#define CXL_PSL_RXCTL_AFUHP_4S 0x7000000000000000ULL - -/* SPA->sw_command_status */ -#define CXL_SPA_SW_CMD_MASK 0xffff000000000000ULL -#define CXL_SPA_SW_CMD_TERMINATE 0x0001000000000000ULL -#define CXL_SPA_SW_CMD_REMOVE 0x0002000000000000ULL -#define CXL_SPA_SW_CMD_SUSPEND 0x0003000000000000ULL -#define CXL_SPA_SW_CMD_RESUME 0x0004000000000000ULL -#define CXL_SPA_SW_CMD_ADD 0x0005000000000000ULL -#define CXL_SPA_SW_CMD_UPDATE 0x0006000000000000ULL -#define CXL_SPA_SW_STATE_MASK 0x0000ffff00000000ULL -#define CXL_SPA_SW_STATE_TERMINATED 0x0000000100000000ULL -#define CXL_SPA_SW_STATE_REMOVED 0x0000000200000000ULL -#define CXL_SPA_SW_STATE_SUSPENDED 0x0000000300000000ULL -#define CXL_SPA_SW_STATE_RESUMED 0x0000000400000000ULL -#define CXL_SPA_SW_STATE_ADDED 0x0000000500000000ULL -#define CXL_SPA_SW_STATE_UPDATED 0x0000000600000000ULL -#define CXL_SPA_SW_PSL_ID_MASK 0x00000000ffff0000ULL -#define CXL_SPA_SW_LINK_MASK 0x000000000000ffffULL - -#define CXL_MAX_SLICES 4 -#define MAX_AFU_MMIO_REGS 3 - -#define CXL_MODE_TIME_SLICED 0x4 -#define CXL_SUPPORTED_MODES (CXL_MODE_DEDICATED | CXL_MODE_DIRECTED) - -#define CXL_DEV_MINORS 13 /* 1 control + 4 AFUs * 3 (dedicated/master/shared) */ -#define CXL_CARD_MINOR(adapter) (adapter->adapter_num * CXL_DEV_MINORS) -#define CXL_DEVT_ADAPTER(dev) (MINOR(dev) / CXL_DEV_MINORS) - -#define CXL_PSL9_TRACEID_MAX 0xAU -#define CXL_PSL9_TRACESTATE_FIN 0x3U - -enum cxl_context_status { - CLOSED, - OPENED, - STARTED -}; - -enum prefault_modes { - CXL_PREFAULT_NONE, - CXL_PREFAULT_WED, - CXL_PREFAULT_ALL, -}; - -enum cxl_attrs { - CXL_ADAPTER_ATTRS, - CXL_AFU_MASTER_ATTRS, - CXL_AFU_ATTRS, -}; - -struct cxl_sste { - __be64 esid_data; - __be64 vsid_data; -}; - -#define to_cxl_adapter(d) container_of(d, struct cxl, dev) -#define to_cxl_afu(d) container_of(d, struct cxl_afu, dev) - -struct cxl_afu_native { - void __iomem *p1n_mmio; - void __iomem *afu_desc_mmio; - irq_hw_number_t psl_hwirq; - unsigned int psl_virq; - struct mutex spa_mutex; - /* - * Only the first part of the SPA is used for the process element - * linked list. The only other part that software needs to worry about - * is sw_command_status, which we store a separate pointer to. - * Everything else in the SPA is only used by hardware - */ - struct cxl_process_element *spa; - __be64 *sw_command_status; - unsigned int spa_size; - int spa_order; - int spa_max_procs; - u64 pp_offset; -}; - -struct cxl_afu_guest { - struct cxl_afu *parent; - u64 handle; - phys_addr_t p2n_phys; - u64 p2n_size; - int max_ints; - bool handle_err; - struct delayed_work work_err; - int previous_state; -}; - -struct cxl_afu { - struct cxl_afu_native *native; - struct cxl_afu_guest *guest; - irq_hw_number_t serr_hwirq; - unsigned int serr_virq; - char *psl_irq_name; - char *err_irq_name; - void __iomem *p2n_mmio; - phys_addr_t psn_phys; - u64 pp_size; - - struct cxl *adapter; - struct device dev; - struct cdev afu_cdev_s, afu_cdev_m, afu_cdev_d; - struct device *chardev_s, *chardev_m, *chardev_d; - struct idr contexts_idr; - struct dentry *debugfs; - struct mutex contexts_lock; - spinlock_t afu_cntl_lock; - - /* -1: AFU deconfigured/locked, >= 0: number of readers */ - atomic_t configured_state; - - /* AFU error buffer fields and bin attribute for sysfs */ - u64 eb_len, eb_offset; - struct bin_attribute attr_eb; - - /* pointer to the vphb */ - struct pci_controller *phb; - - int pp_irqs; - int irqs_max; - int num_procs; - int max_procs_virtualised; - int slice; - int modes_supported; - int current_mode; - int crs_num; - u64 crs_len; - u64 crs_offset; - struct list_head crs; - enum prefault_modes prefault_mode; - bool psa; - bool pp_psa; - bool enabled; -}; - - -struct cxl_irq_name { - struct list_head list; - char *name; -}; - -struct irq_avail { - irq_hw_number_t offset; - irq_hw_number_t range; - unsigned long *bitmap; -}; - -/* - * This is a cxl context. If the PSL is in dedicated mode, there will be one - * of these per AFU. If in AFU directed there can be lots of these. - */ -struct cxl_context { - struct cxl_afu *afu; - - /* Problem state MMIO */ - phys_addr_t psn_phys; - u64 psn_size; - - /* Used to unmap any mmaps when force detaching */ - struct address_space *mapping; - struct mutex mapping_lock; - struct page *ff_page; - bool mmio_err_ff; - bool kernelapi; - - spinlock_t sste_lock; /* Protects segment table entries */ - struct cxl_sste *sstp; - u64 sstp0, sstp1; - unsigned int sst_size, sst_lru; - - wait_queue_head_t wq; - /* use mm context associated with this pid for ds faults */ - struct pid *pid; - spinlock_t lock; /* Protects pending_irq_mask, pending_fault and fault_addr */ - /* Only used in PR mode */ - u64 process_token; - - /* driver private data */ - void *priv; - - unsigned long *irq_bitmap; /* Accessed from IRQ context */ - struct cxl_irq_ranges irqs; - struct list_head irq_names; - u64 fault_addr; - u64 fault_dsisr; - u64 afu_err; - - /* - * This status and it's lock pretects start and detach context - * from racing. It also prevents detach from racing with - * itself - */ - enum cxl_context_status status; - struct mutex status_mutex; - - - /* XXX: Is it possible to need multiple work items at once? */ - struct work_struct fault_work; - u64 dsisr; - u64 dar; - - struct cxl_process_element *elem; - - /* - * pe is the process element handle, assigned by this driver when the - * context is initialized. - * - * external_pe is the PE shown outside of cxl. - * On bare-metal, pe=external_pe, because we decide what the handle is. - * In a guest, we only find out about the pe used by pHyp when the - * context is attached, and that's the value we want to report outside - * of cxl. - */ - int pe; - int external_pe; - - u32 irq_count; - bool pe_inserted; - bool master; - bool kernel; - bool pending_irq; - bool pending_fault; - bool pending_afu_err; - - /* Used by AFU drivers for driver specific event delivery */ - struct cxl_afu_driver_ops *afu_driver_ops; - atomic_t afu_driver_events; - - struct rcu_head rcu; - - struct mm_struct *mm; - - u16 tidr; - bool assign_tidr; -}; - -struct cxl_irq_info; - -struct cxl_service_layer_ops { - int (*adapter_regs_init)(struct cxl *adapter, struct pci_dev *dev); - int (*invalidate_all)(struct cxl *adapter); - int (*afu_regs_init)(struct cxl_afu *afu); - int (*sanitise_afu_regs)(struct cxl_afu *afu); - int (*register_serr_irq)(struct cxl_afu *afu); - void (*release_serr_irq)(struct cxl_afu *afu); - irqreturn_t (*handle_interrupt)(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info); - irqreturn_t (*fail_irq)(struct cxl_afu *afu, struct cxl_irq_info *irq_info); - int (*activate_dedicated_process)(struct cxl_afu *afu); - int (*attach_afu_directed)(struct cxl_context *ctx, u64 wed, u64 amr); - int (*attach_dedicated_process)(struct cxl_context *ctx, u64 wed, u64 amr); - void (*update_dedicated_ivtes)(struct cxl_context *ctx); - void (*debugfs_add_adapter_regs)(struct cxl *adapter, struct dentry *dir); - void (*debugfs_add_afu_regs)(struct cxl_afu *afu, struct dentry *dir); - void (*psl_irq_dump_registers)(struct cxl_context *ctx); - void (*err_irq_dump_registers)(struct cxl *adapter); - void (*debugfs_stop_trace)(struct cxl *adapter); - void (*write_timebase_ctrl)(struct cxl *adapter); - u64 (*timebase_read)(struct cxl *adapter); - int capi_mode; - bool needs_reset_before_disable; -}; - -struct cxl_native { - u64 afu_desc_off; - u64 afu_desc_size; - void __iomem *p1_mmio; - void __iomem *p2_mmio; - irq_hw_number_t err_hwirq; - unsigned int err_virq; - u64 ps_off; - bool no_data_cache; /* set if no data cache on the card */ - const struct cxl_service_layer_ops *sl_ops; -}; - -struct cxl_guest { - struct platform_device *pdev; - int irq_nranges; - struct cdev cdev; - irq_hw_number_t irq_base_offset; - struct irq_avail *irq_avail; - spinlock_t irq_alloc_lock; - u64 handle; - char *status; - u16 vendor; - u16 device; - u16 subsystem_vendor; - u16 subsystem; -}; - -struct cxl { - struct cxl_native *native; - struct cxl_guest *guest; - spinlock_t afu_list_lock; - struct cxl_afu *afu[CXL_MAX_SLICES]; - struct device dev; - struct dentry *trace; - struct dentry *psl_err_chk; - struct dentry *debugfs; - char *irq_name; - struct bin_attribute cxl_attr; - int adapter_num; - int user_irqs; - u64 ps_size; - u16 psl_rev; - u16 base_image; - u8 vsec_status; - u8 caia_major; - u8 caia_minor; - u8 slices; - bool user_image_loaded; - bool perst_loads_image; - bool perst_select_user; - bool perst_same_image; - bool psl_timebase_synced; - bool tunneled_ops_supported; - - /* - * number of contexts mapped on to this card. Possible values are: - * >0: Number of contexts mapped and new one can be mapped. - * 0: No active contexts and new ones can be mapped. - * -1: No contexts mapped and new ones cannot be mapped. - */ - atomic_t contexts_num; -}; - -int cxl_pci_alloc_one_irq(struct cxl *adapter); -void cxl_pci_release_one_irq(struct cxl *adapter, int hwirq); -int cxl_pci_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num); -void cxl_pci_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter); -int cxl_pci_setup_irq(struct cxl *adapter, unsigned int hwirq, unsigned int virq); -int cxl_update_image_control(struct cxl *adapter); -int cxl_pci_reset(struct cxl *adapter); -void cxl_pci_release_afu(struct device *dev); -ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len); - -/* common == phyp + powernv - CAIA 1&2 */ -struct cxl_process_element_common { - __be32 tid; - __be32 pid; - __be64 csrp; - union { - struct { - __be64 aurp0; - __be64 aurp1; - __be64 sstp0; - __be64 sstp1; - } psl8; /* CAIA 1 */ - struct { - u8 reserved2[8]; - u8 reserved3[8]; - u8 reserved4[8]; - u8 reserved5[8]; - } psl9; /* CAIA 2 */ - } u; - __be64 amr; - u8 reserved6[4]; - __be64 wed; -} __packed; - -/* just powernv - CAIA 1&2 */ -struct cxl_process_element { - __be64 sr; - __be64 SPOffset; - union { - __be64 sdr; /* CAIA 1 */ - u8 reserved1[8]; /* CAIA 2 */ - } u; - __be64 haurp; - __be32 ctxtime; - __be16 ivte_offsets[4]; - __be16 ivte_ranges[4]; - __be32 lpid; - struct cxl_process_element_common common; - __be32 software_state; -} __packed; - -static inline bool cxl_adapter_link_ok(struct cxl *cxl, struct cxl_afu *afu) -{ - struct pci_dev *pdev; - - if (cpu_has_feature(CPU_FTR_HVMODE)) { - pdev = to_pci_dev(cxl->dev.parent); - return !pci_channel_offline(pdev); - } - return true; -} - -static inline void __iomem *_cxl_p1_addr(struct cxl *cxl, cxl_p1_reg_t reg) -{ - WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE)); - return cxl->native->p1_mmio + cxl_reg_off(reg); -} - -static inline void cxl_p1_write(struct cxl *cxl, cxl_p1_reg_t reg, u64 val) -{ - if (likely(cxl_adapter_link_ok(cxl, NULL))) - out_be64(_cxl_p1_addr(cxl, reg), val); -} - -static inline u64 cxl_p1_read(struct cxl *cxl, cxl_p1_reg_t reg) -{ - if (likely(cxl_adapter_link_ok(cxl, NULL))) - return in_be64(_cxl_p1_addr(cxl, reg)); - else - return ~0ULL; -} - -static inline void __iomem *_cxl_p1n_addr(struct cxl_afu *afu, cxl_p1n_reg_t reg) -{ - WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE)); - return afu->native->p1n_mmio + cxl_reg_off(reg); -} - -static inline void cxl_p1n_write(struct cxl_afu *afu, cxl_p1n_reg_t reg, u64 val) -{ - if (likely(cxl_adapter_link_ok(afu->adapter, afu))) - out_be64(_cxl_p1n_addr(afu, reg), val); -} - -static inline u64 cxl_p1n_read(struct cxl_afu *afu, cxl_p1n_reg_t reg) -{ - if (likely(cxl_adapter_link_ok(afu->adapter, afu))) - return in_be64(_cxl_p1n_addr(afu, reg)); - else - return ~0ULL; -} - -static inline void __iomem *_cxl_p2n_addr(struct cxl_afu *afu, cxl_p2n_reg_t reg) -{ - return afu->p2n_mmio + cxl_reg_off(reg); -} - -static inline void cxl_p2n_write(struct cxl_afu *afu, cxl_p2n_reg_t reg, u64 val) -{ - if (likely(cxl_adapter_link_ok(afu->adapter, afu))) - out_be64(_cxl_p2n_addr(afu, reg), val); -} - -static inline u64 cxl_p2n_read(struct cxl_afu *afu, cxl_p2n_reg_t reg) -{ - if (likely(cxl_adapter_link_ok(afu->adapter, afu))) - return in_be64(_cxl_p2n_addr(afu, reg)); - else - return ~0ULL; -} - -static inline bool cxl_is_power8(void) -{ - if ((pvr_version_is(PVR_POWER8E)) || - (pvr_version_is(PVR_POWER8NVL)) || - (pvr_version_is(PVR_POWER8))) - return true; - return false; -} - -static inline bool cxl_is_power9(void) -{ - if (pvr_version_is(PVR_POWER9)) - return true; - return false; -} - -ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf, - loff_t off, size_t count); - - -struct cxl_calls { - void (*cxl_slbia)(struct mm_struct *mm); - struct module *owner; -}; -int register_cxl_calls(struct cxl_calls *calls); -void unregister_cxl_calls(struct cxl_calls *calls); -int cxl_update_properties(struct device_node *dn, struct property *new_prop); - -void cxl_remove_adapter_nr(struct cxl *adapter); - -void cxl_release_spa(struct cxl_afu *afu); - -dev_t cxl_get_dev(void); -int cxl_file_init(void); -void cxl_file_exit(void); -int cxl_register_adapter(struct cxl *adapter); -int cxl_register_afu(struct cxl_afu *afu); -int cxl_chardev_d_afu_add(struct cxl_afu *afu); -int cxl_chardev_m_afu_add(struct cxl_afu *afu); -int cxl_chardev_s_afu_add(struct cxl_afu *afu); -void cxl_chardev_afu_remove(struct cxl_afu *afu); - -void cxl_context_detach_all(struct cxl_afu *afu); -void cxl_context_free(struct cxl_context *ctx); -void cxl_context_detach(struct cxl_context *ctx); - -int cxl_sysfs_adapter_add(struct cxl *adapter); -void cxl_sysfs_adapter_remove(struct cxl *adapter); -int cxl_sysfs_afu_add(struct cxl_afu *afu); -void cxl_sysfs_afu_remove(struct cxl_afu *afu); -int cxl_sysfs_afu_m_add(struct cxl_afu *afu); -void cxl_sysfs_afu_m_remove(struct cxl_afu *afu); - -struct cxl *cxl_alloc_adapter(void); -struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice); -int cxl_afu_select_best_mode(struct cxl_afu *afu); - -int cxl_native_register_psl_irq(struct cxl_afu *afu); -void cxl_native_release_psl_irq(struct cxl_afu *afu); -int cxl_native_register_psl_err_irq(struct cxl *adapter); -void cxl_native_release_psl_err_irq(struct cxl *adapter); -int cxl_native_register_serr_irq(struct cxl_afu *afu); -void cxl_native_release_serr_irq(struct cxl_afu *afu); -int afu_register_irqs(struct cxl_context *ctx, u32 count); -void afu_release_irqs(struct cxl_context *ctx, void *cookie); -void afu_irq_name_free(struct cxl_context *ctx); - -int cxl_attach_afu_directed_psl9(struct cxl_context *ctx, u64 wed, u64 amr); -int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr); -int cxl_activate_dedicated_process_psl9(struct cxl_afu *afu); -int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu); -int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr); -int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr); -void cxl_update_dedicated_ivtes_psl9(struct cxl_context *ctx); -void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx); - -#ifdef CONFIG_DEBUG_FS - -void cxl_debugfs_init(void); -void cxl_debugfs_exit(void); -void cxl_debugfs_adapter_add(struct cxl *adapter); -void cxl_debugfs_adapter_remove(struct cxl *adapter); -void cxl_debugfs_afu_add(struct cxl_afu *afu); -void cxl_debugfs_afu_remove(struct cxl_afu *afu); -void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter, struct dentry *dir); -void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir); -void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir); -void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir); - -#else /* CONFIG_DEBUG_FS */ - -static inline void __init cxl_debugfs_init(void) -{ -} - -static inline void cxl_debugfs_exit(void) -{ -} - -static inline void cxl_debugfs_adapter_add(struct cxl *adapter) -{ -} - -static inline void cxl_debugfs_adapter_remove(struct cxl *adapter) -{ -} - -static inline void cxl_debugfs_afu_add(struct cxl_afu *afu) -{ -} - -static inline void cxl_debugfs_afu_remove(struct cxl_afu *afu) -{ -} - -static inline void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter, - struct dentry *dir) -{ -} - -static inline void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, - struct dentry *dir) -{ -} - -static inline void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir) -{ -} - -static inline void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir) -{ -} - -#endif /* CONFIG_DEBUG_FS */ - -void cxl_handle_fault(struct work_struct *work); -void cxl_prefault(struct cxl_context *ctx, u64 wed); -int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar); - -struct cxl *get_cxl_adapter(int num); -int cxl_alloc_sst(struct cxl_context *ctx); -void cxl_dump_debug_buffer(void *addr, size_t size); - -void init_cxl_native(void); - -struct cxl_context *cxl_context_alloc(void); -int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master); -void cxl_context_set_mapping(struct cxl_context *ctx, - struct address_space *mapping); -void cxl_context_free(struct cxl_context *ctx); -int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma); -unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq, - irq_handler_t handler, void *cookie, const char *name); -void cxl_unmap_irq(unsigned int virq, void *cookie); -int __detach_context(struct cxl_context *ctx); - -/* - * This must match the layout of the H_COLLECT_CA_INT_INFO retbuf defined - * in PAPR. - * Field pid_tid is now 'reserved' because it's no more used on bare-metal. - * On a guest environment, PSL_PID_An is located on the upper 32 bits and - * PSL_TID_An register in the lower 32 bits. - */ -struct cxl_irq_info { - u64 dsisr; - u64 dar; - u64 dsr; - u64 reserved; - u64 afu_err; - u64 errstat; - u64 proc_handle; - u64 padding[2]; /* to match the expected retbuf size for plpar_hcall9 */ -}; - -void cxl_assign_psn_space(struct cxl_context *ctx); -int cxl_invalidate_all_psl9(struct cxl *adapter); -int cxl_invalidate_all_psl8(struct cxl *adapter); -irqreturn_t cxl_irq_psl9(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info); -irqreturn_t cxl_irq_psl8(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info); -irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info); -int cxl_register_one_irq(struct cxl *adapter, irq_handler_t handler, - void *cookie, irq_hw_number_t *dest_hwirq, - unsigned int *dest_virq, const char *name); - -int cxl_check_error(struct cxl_afu *afu); -int cxl_afu_slbia(struct cxl_afu *afu); -int cxl_data_cache_flush(struct cxl *adapter); -int cxl_afu_disable(struct cxl_afu *afu); -int cxl_psl_purge(struct cxl_afu *afu); -int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid, - u32 *phb_index, u64 *capp_unit_id); -int cxl_slot_is_switched(struct pci_dev *dev); -int cxl_get_xsl9_dsnctl(struct pci_dev *dev, u64 capp_unit_id, u64 *reg); -u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9); - -void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx); -void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx); -void cxl_native_err_irq_dump_regs_psl8(struct cxl *adapter); -void cxl_native_err_irq_dump_regs_psl9(struct cxl *adapter); -int cxl_pci_vphb_add(struct cxl_afu *afu); -void cxl_pci_vphb_remove(struct cxl_afu *afu); -void cxl_release_mapping(struct cxl_context *ctx); - -extern struct pci_driver cxl_pci_driver; -extern struct platform_driver cxl_of_driver; -int afu_allocate_irqs(struct cxl_context *ctx, u32 count); - -int afu_open(struct inode *inode, struct file *file); -int afu_release(struct inode *inode, struct file *file); -long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg); -int afu_mmap(struct file *file, struct vm_area_struct *vm); -__poll_t afu_poll(struct file *file, struct poll_table_struct *poll); -ssize_t afu_read(struct file *file, char __user *buf, size_t count, loff_t *off); -extern const struct file_operations afu_fops; - -struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *dev); -void cxl_guest_remove_adapter(struct cxl *adapter); -int cxl_of_read_adapter_handle(struct cxl *adapter, struct device_node *np); -int cxl_of_read_adapter_properties(struct cxl *adapter, struct device_node *np); -ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len); -ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len); -int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np); -void cxl_guest_remove_afu(struct cxl_afu *afu); -int cxl_of_read_afu_handle(struct cxl_afu *afu, struct device_node *afu_np); -int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *afu_np); -int cxl_guest_add_chardev(struct cxl *adapter); -void cxl_guest_remove_chardev(struct cxl *adapter); -void cxl_guest_reload_module(struct cxl *adapter); -int cxl_of_probe(struct platform_device *pdev); - -struct cxl_backend_ops { - struct module *module; - int (*adapter_reset)(struct cxl *adapter); - int (*alloc_one_irq)(struct cxl *adapter); - void (*release_one_irq)(struct cxl *adapter, int hwirq); - int (*alloc_irq_ranges)(struct cxl_irq_ranges *irqs, - struct cxl *adapter, unsigned int num); - void (*release_irq_ranges)(struct cxl_irq_ranges *irqs, - struct cxl *adapter); - int (*setup_irq)(struct cxl *adapter, unsigned int hwirq, - unsigned int virq); - irqreturn_t (*handle_psl_slice_error)(struct cxl_context *ctx, - u64 dsisr, u64 errstat); - irqreturn_t (*psl_interrupt)(int irq, void *data); - int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask); - void (*irq_wait)(struct cxl_context *ctx); - int (*attach_process)(struct cxl_context *ctx, bool kernel, - u64 wed, u64 amr); - int (*detach_process)(struct cxl_context *ctx); - void (*update_ivtes)(struct cxl_context *ctx); - bool (*support_attributes)(const char *attr_name, enum cxl_attrs type); - bool (*link_ok)(struct cxl *cxl, struct cxl_afu *afu); - void (*release_afu)(struct device *dev); - ssize_t (*afu_read_err_buffer)(struct cxl_afu *afu, char *buf, - loff_t off, size_t count); - int (*afu_check_and_enable)(struct cxl_afu *afu); - int (*afu_activate_mode)(struct cxl_afu *afu, int mode); - int (*afu_deactivate_mode)(struct cxl_afu *afu, int mode); - int (*afu_reset)(struct cxl_afu *afu); - int (*afu_cr_read8)(struct cxl_afu *afu, int cr_idx, u64 offset, u8 *val); - int (*afu_cr_read16)(struct cxl_afu *afu, int cr_idx, u64 offset, u16 *val); - int (*afu_cr_read32)(struct cxl_afu *afu, int cr_idx, u64 offset, u32 *val); - int (*afu_cr_read64)(struct cxl_afu *afu, int cr_idx, u64 offset, u64 *val); - int (*afu_cr_write8)(struct cxl_afu *afu, int cr_idx, u64 offset, u8 val); - int (*afu_cr_write16)(struct cxl_afu *afu, int cr_idx, u64 offset, u16 val); - int (*afu_cr_write32)(struct cxl_afu *afu, int cr_idx, u64 offset, u32 val); - ssize_t (*read_adapter_vpd)(struct cxl *adapter, void *buf, size_t count); -}; -extern const struct cxl_backend_ops cxl_native_ops; -extern const struct cxl_backend_ops cxl_guest_ops; -extern const struct cxl_backend_ops *cxl_ops; - -/* check if the given pci_dev is on the cxl vphb bus */ -bool cxl_pci_is_vphb_device(struct pci_dev *dev); - -/* decode AFU error bits in the PSL register PSL_SERR_An */ -void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr); - -/* - * Increments the number of attached contexts on an adapter. - * In case an adapter_context_lock is taken the return -EBUSY. - */ -int cxl_adapter_context_get(struct cxl *adapter); - -/* Decrements the number of attached contexts on an adapter */ -void cxl_adapter_context_put(struct cxl *adapter); - -/* If no active contexts then prevents contexts from being attached */ -int cxl_adapter_context_lock(struct cxl *adapter); - -/* Unlock the contexts-lock if taken. Warn and force unlock otherwise */ -void cxl_adapter_context_unlock(struct cxl *adapter); - -/* Increases the reference count to "struct mm_struct" */ -void cxl_context_mm_count_get(struct cxl_context *ctx); - -/* Decrements the reference count to "struct mm_struct" */ -void cxl_context_mm_count_put(struct cxl_context *ctx); - -#endif diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c deleted file mode 100644 index e5fe0a171472..000000000000 --- a/drivers/misc/cxl/cxllib.c +++ /dev/null @@ -1,271 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2017 IBM Corp. - */ - -#include <linux/hugetlb.h> -#include <linux/sched/mm.h> -#include <asm/opal-api.h> -#include <asm/pnv-pci.h> -#include <misc/cxllib.h> - -#include "cxl.h" - -#define CXL_INVALID_DRA ~0ull -#define CXL_DUMMY_READ_SIZE 128 -#define CXL_DUMMY_READ_ALIGN 8 -#define CXL_CAPI_WINDOW_START 0x2000000000000ull -#define CXL_CAPI_WINDOW_LOG_SIZE 48 -#define CXL_XSL_CONFIG_CURRENT_VERSION CXL_XSL_CONFIG_VERSION1 - - -bool cxllib_slot_is_supported(struct pci_dev *dev, unsigned long flags) -{ - int rc; - u32 phb_index; - u64 chip_id, capp_unit_id; - - /* No flags currently supported */ - if (flags) - return false; - - if (!cpu_has_feature(CPU_FTR_HVMODE)) - return false; - - if (!cxl_is_power9()) - return false; - - if (cxl_slot_is_switched(dev)) - return false; - - /* on p9, some pci slots are not connected to a CAPP unit */ - rc = cxl_calc_capp_routing(dev, &chip_id, &phb_index, &capp_unit_id); - if (rc) - return false; - - return true; -} -EXPORT_SYMBOL_GPL(cxllib_slot_is_supported); - -static DEFINE_MUTEX(dra_mutex); -static u64 dummy_read_addr = CXL_INVALID_DRA; - -static int allocate_dummy_read_buf(void) -{ - u64 buf, vaddr; - size_t buf_size; - - /* - * Dummy read buffer is 128-byte long, aligned on a - * 256-byte boundary and we need the physical address. - */ - buf_size = CXL_DUMMY_READ_SIZE + (1ull << CXL_DUMMY_READ_ALIGN); - buf = (u64) kzalloc(buf_size, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - vaddr = (buf + (1ull << CXL_DUMMY_READ_ALIGN) - 1) & - (~0ull << CXL_DUMMY_READ_ALIGN); - - WARN((vaddr + CXL_DUMMY_READ_SIZE) > (buf + buf_size), - "Dummy read buffer alignment issue"); - dummy_read_addr = virt_to_phys((void *) vaddr); - return 0; -} - -int cxllib_get_xsl_config(struct pci_dev *dev, struct cxllib_xsl_config *cfg) -{ - int rc; - u32 phb_index; - u64 chip_id, capp_unit_id; - - if (!cpu_has_feature(CPU_FTR_HVMODE)) - return -EINVAL; - - mutex_lock(&dra_mutex); - if (dummy_read_addr == CXL_INVALID_DRA) { - rc = allocate_dummy_read_buf(); - if (rc) { - mutex_unlock(&dra_mutex); - return rc; - } - } - mutex_unlock(&dra_mutex); - - rc = cxl_calc_capp_routing(dev, &chip_id, &phb_index, &capp_unit_id); - if (rc) - return rc; - - rc = cxl_get_xsl9_dsnctl(dev, capp_unit_id, &cfg->dsnctl); - if (rc) - return rc; - - cfg->version = CXL_XSL_CONFIG_CURRENT_VERSION; - cfg->log_bar_size = CXL_CAPI_WINDOW_LOG_SIZE; - cfg->bar_addr = CXL_CAPI_WINDOW_START; - cfg->dra = dummy_read_addr; - return 0; -} -EXPORT_SYMBOL_GPL(cxllib_get_xsl_config); - -int cxllib_switch_phb_mode(struct pci_dev *dev, enum cxllib_mode mode, - unsigned long flags) -{ - int rc = 0; - - if (!cpu_has_feature(CPU_FTR_HVMODE)) - return -EINVAL; - - switch (mode) { - case CXL_MODE_PCI: - /* - * We currently don't support going back to PCI mode - * However, we'll turn the invalidations off, so that - * the firmware doesn't have to ack them and can do - * things like reset, etc.. with no worries. - * So always return EPERM (can't go back to PCI) or - * EBUSY if we couldn't even turn off snooping - */ - rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_OFF); - if (rc) - rc = -EBUSY; - else - rc = -EPERM; - break; - case CXL_MODE_CXL: - /* DMA only supported on TVT1 for the time being */ - if (flags != CXL_MODE_DMA_TVT1) - return -EINVAL; - rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_DMA_TVT1); - if (rc) - return rc; - rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON); - break; - default: - rc = -EINVAL; - } - return rc; -} -EXPORT_SYMBOL_GPL(cxllib_switch_phb_mode); - -/* - * When switching the PHB to capi mode, the TVT#1 entry for - * the Partitionable Endpoint is set in bypass mode, like - * in PCI mode. - * Configure the device dma to use TVT#1, which is done - * by calling dma_set_mask() with a mask large enough. - */ -int cxllib_set_device_dma(struct pci_dev *dev, unsigned long flags) -{ - int rc; - - if (flags) - return -EINVAL; - - rc = dma_set_mask(&dev->dev, DMA_BIT_MASK(64)); - return rc; -} -EXPORT_SYMBOL_GPL(cxllib_set_device_dma); - -int cxllib_get_PE_attributes(struct task_struct *task, - unsigned long translation_mode, - struct cxllib_pe_attributes *attr) -{ - if (translation_mode != CXL_TRANSLATED_MODE && - translation_mode != CXL_REAL_MODE) - return -EINVAL; - - attr->sr = cxl_calculate_sr(false, - task == NULL, - translation_mode == CXL_REAL_MODE, - true); - attr->lpid = mfspr(SPRN_LPID); - if (task) { - struct mm_struct *mm = get_task_mm(task); - if (mm == NULL) - return -EINVAL; - /* - * Caller is keeping a reference on mm_users for as long - * as XSL uses the memory context - */ - attr->pid = mm->context.id; - mmput(mm); - attr->tid = task->thread.tidr; - } else { - attr->pid = 0; - attr->tid = 0; - } - return 0; -} -EXPORT_SYMBOL_GPL(cxllib_get_PE_attributes); - -static int get_vma_info(struct mm_struct *mm, u64 addr, - u64 *vma_start, u64 *vma_end, - unsigned long *page_size) -{ - struct vm_area_struct *vma = NULL; - int rc = 0; - - mmap_read_lock(mm); - - vma = find_vma(mm, addr); - if (!vma) { - rc = -EFAULT; - goto out; - } - *page_size = vma_kernel_pagesize(vma); - *vma_start = vma->vm_start; - *vma_end = vma->vm_end; -out: - mmap_read_unlock(mm); - return rc; -} - -int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags) -{ - int rc; - u64 dar, vma_start, vma_end; - unsigned long page_size; - - if (mm == NULL) - return -EFAULT; - - /* - * The buffer we have to process can extend over several pages - * and may also cover several VMAs. - * We iterate over all the pages. The page size could vary - * between VMAs. - */ - rc = get_vma_info(mm, addr, &vma_start, &vma_end, &page_size); - if (rc) - return rc; - - for (dar = (addr & ~(page_size - 1)); dar < (addr + size); - dar += page_size) { - if (dar < vma_start || dar >= vma_end) { - /* - * We don't hold mm->mmap_lock while iterating, since - * the lock is required by one of the lower-level page - * fault processing functions and it could - * create a deadlock. - * - * It means the VMAs can be altered between 2 - * loop iterations and we could theoretically - * miss a page (however unlikely). But that's - * not really a problem, as the driver will - * retry access, get another page fault on the - * missing page and call us again. - */ - rc = get_vma_info(mm, dar, &vma_start, &vma_end, - &page_size); - if (rc) - return rc; - } - - rc = cxl_handle_mm_fault(mm, flags, dar); - if (rc) - return -EFAULT; - } - return 0; -} -EXPORT_SYMBOL_GPL(cxllib_handle_fault); diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c deleted file mode 100644 index 7b987bf498b5..000000000000 --- a/drivers/misc/cxl/debugfs.c +++ /dev/null @@ -1,134 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2014 IBM Corp. - */ - -#include <linux/debugfs.h> -#include <linux/kernel.h> -#include <linux/slab.h> - -#include "cxl.h" - -static struct dentry *cxl_debugfs; - -/* Helpers to export CXL mmaped IO registers via debugfs */ -static int debugfs_io_u64_get(void *data, u64 *val) -{ - *val = in_be64((u64 __iomem *)data); - return 0; -} - -static int debugfs_io_u64_set(void *data, u64 val) -{ - out_be64((u64 __iomem *)data, val); - return 0; -} -DEFINE_DEBUGFS_ATTRIBUTE(fops_io_x64, debugfs_io_u64_get, debugfs_io_u64_set, - "0x%016llx\n"); - -static void debugfs_create_io_x64(const char *name, umode_t mode, - struct dentry *parent, u64 __iomem *value) -{ - debugfs_create_file_unsafe(name, mode, parent, (void __force *)value, - &fops_io_x64); -} - -void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter, struct dentry *dir) -{ - debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR1)); - debugfs_create_io_x64("fir_mask", 0400, dir, - _cxl_p1_addr(adapter, CXL_PSL9_FIR_MASK)); - debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR_CNTL)); - debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_TRACECFG)); - debugfs_create_io_x64("debug", 0600, dir, - _cxl_p1_addr(adapter, CXL_PSL9_DEBUG)); - debugfs_create_io_x64("xsl-debug", 0600, dir, - _cxl_p1_addr(adapter, CXL_XSL9_DBG)); -} - -void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir) -{ - debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR1)); - debugfs_create_io_x64("fir2", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR2)); - debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR_CNTL)); - debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_TRACE)); -} - -void cxl_debugfs_adapter_add(struct cxl *adapter) -{ - struct dentry *dir; - char buf[32]; - - if (!cxl_debugfs) - return; - - snprintf(buf, 32, "card%i", adapter->adapter_num); - dir = debugfs_create_dir(buf, cxl_debugfs); - adapter->debugfs = dir; - - debugfs_create_io_x64("err_ivte", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_ErrIVTE)); - - if (adapter->native->sl_ops->debugfs_add_adapter_regs) - adapter->native->sl_ops->debugfs_add_adapter_regs(adapter, dir); -} - -void cxl_debugfs_adapter_remove(struct cxl *adapter) -{ - debugfs_remove_recursive(adapter->debugfs); -} - -void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir) -{ - debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An)); -} - -void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir) -{ - debugfs_create_io_x64("sstp0", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP0_An)); - debugfs_create_io_x64("sstp1", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP1_An)); - - debugfs_create_io_x64("fir", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_FIR_SLICE_An)); - debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An)); - debugfs_create_io_x64("afu_debug", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_AFU_DEBUG_An)); - debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SLICE_TRACE)); -} - -void cxl_debugfs_afu_add(struct cxl_afu *afu) -{ - struct dentry *dir; - char buf[32]; - - if (!afu->adapter->debugfs) - return; - - snprintf(buf, 32, "psl%i.%i", afu->adapter->adapter_num, afu->slice); - dir = debugfs_create_dir(buf, afu->adapter->debugfs); - afu->debugfs = dir; - - debugfs_create_io_x64("sr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SR_An)); - debugfs_create_io_x64("dsisr", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DSISR_An)); - debugfs_create_io_x64("dar", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DAR_An)); - - debugfs_create_io_x64("err_status", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_ErrStat_An)); - - if (afu->adapter->native->sl_ops->debugfs_add_afu_regs) - afu->adapter->native->sl_ops->debugfs_add_afu_regs(afu, dir); -} - -void cxl_debugfs_afu_remove(struct cxl_afu *afu) -{ - debugfs_remove_recursive(afu->debugfs); -} - -void __init cxl_debugfs_init(void) -{ - if (!cpu_has_feature(CPU_FTR_HVMODE)) - return; - - cxl_debugfs = debugfs_create_dir("cxl", NULL); -} - -void cxl_debugfs_exit(void) -{ - debugfs_remove_recursive(cxl_debugfs); -} diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c deleted file mode 100644 index 2c64f55cf01f..000000000000 --- a/drivers/misc/cxl/fault.c +++ /dev/null @@ -1,341 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2014 IBM Corp. - */ - -#include <linux/workqueue.h> -#include <linux/sched/signal.h> -#include <linux/sched/mm.h> -#include <linux/pid.h> -#include <linux/mm.h> -#include <linux/moduleparam.h> - -#undef MODULE_PARAM_PREFIX -#define MODULE_PARAM_PREFIX "cxl" "." -#include <asm/current.h> -#include <asm/copro.h> -#include <asm/mmu.h> - -#include "cxl.h" -#include "trace.h" - -static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb) -{ - return ((sste->vsid_data == cpu_to_be64(slb->vsid)) && - (sste->esid_data == cpu_to_be64(slb->esid))); -} - -/* - * This finds a free SSTE for the given SLB, or returns NULL if it's already in - * the segment table. - */ -static struct cxl_sste *find_free_sste(struct cxl_context *ctx, - struct copro_slb *slb) -{ - struct cxl_sste *primary, *sste, *ret = NULL; - unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */ - unsigned int entry; - unsigned int hash; - - if (slb->vsid & SLB_VSID_B_1T) - hash = (slb->esid >> SID_SHIFT_1T) & mask; - else /* 256M */ - hash = (slb->esid >> SID_SHIFT) & mask; - - primary = ctx->sstp + (hash << 3); - - for (entry = 0, sste = primary; entry < 8; entry++, sste++) { - if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V)) - ret = sste; - if (sste_matches(sste, slb)) - return NULL; - } - if (ret) - return ret; - - /* Nothing free, select an entry to cast out */ - ret = primary + ctx->sst_lru; - ctx->sst_lru = (ctx->sst_lru + 1) & 0x7; - - return ret; -} - -static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb) -{ - /* mask is the group index, we search primary and secondary here. */ - struct cxl_sste *sste; - unsigned long flags; - - spin_lock_irqsave(&ctx->sste_lock, flags); - sste = find_free_sste(ctx, slb); - if (!sste) - goto out_unlock; - - pr_devel("CXL Populating SST[%li]: %#llx %#llx\n", - sste - ctx->sstp, slb->vsid, slb->esid); - trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid); - - sste->vsid_data = cpu_to_be64(slb->vsid); - sste->esid_data = cpu_to_be64(slb->esid); -out_unlock: - spin_unlock_irqrestore(&ctx->sste_lock, flags); -} - -static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm, - u64 ea) -{ - struct copro_slb slb = {0,0}; - int rc; - - if (!(rc = copro_calculate_slb(mm, ea, &slb))) { - cxl_load_segment(ctx, &slb); - } - - return rc; -} - -static void cxl_ack_ae(struct cxl_context *ctx) -{ - unsigned long flags; - - cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_AE, 0); - - spin_lock_irqsave(&ctx->lock, flags); - ctx->pending_fault = true; - ctx->fault_addr = ctx->dar; - ctx->fault_dsisr = ctx->dsisr; - spin_unlock_irqrestore(&ctx->lock, flags); - - wake_up_all(&ctx->wq); -} - -static int cxl_handle_segment_miss(struct cxl_context *ctx, - struct mm_struct *mm, u64 ea) -{ - int rc; - - pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea); - trace_cxl_ste_miss(ctx, ea); - - if ((rc = cxl_fault_segment(ctx, mm, ea))) - cxl_ack_ae(ctx); - else { - - mb(); /* Order seg table write to TFC MMIO write */ - cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0); - } - - return IRQ_HANDLED; -} - -int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar) -{ - vm_fault_t flt = 0; - int result; - unsigned long access, flags, inv_flags = 0; - - /* - * Add the fault handling cpu to task mm cpumask so that we - * can do a safe lockless page table walk when inserting the - * hash page table entry. This function get called with a - * valid mm for user space addresses. Hence using the if (mm) - * check is sufficient here. - */ - if (mm && !cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { - cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); - /* - * We need to make sure we walk the table only after - * we update the cpumask. The other side of the barrier - * is explained in serialize_against_pte_lookup() - */ - smp_mb(); - } - if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { - pr_devel("copro_handle_mm_fault failed: %#x\n", result); - return result; - } - - if (!radix_enabled()) { - /* - * update_mmu_cache() will not have loaded the hash since current->trap - * is not a 0x400 or 0x300, so just call hash_page_mm() here. - */ - access = _PAGE_PRESENT | _PAGE_READ; - if (dsisr & CXL_PSL_DSISR_An_S) - access |= _PAGE_WRITE; - - if (!mm && (get_region_id(dar) != USER_REGION_ID)) - access |= _PAGE_PRIVILEGED; - - if (dsisr & DSISR_NOHPTE) - inv_flags |= HPTE_NOHPTE_UPDATE; - - local_irq_save(flags); - hash_page_mm(mm, dar, access, 0x300, inv_flags); - local_irq_restore(flags); - } - return 0; -} - -static void cxl_handle_page_fault(struct cxl_context *ctx, - struct mm_struct *mm, - u64 dsisr, u64 dar) -{ - trace_cxl_pte_miss(ctx, dsisr, dar); - - if (cxl_handle_mm_fault(mm, dsisr, dar)) { - cxl_ack_ae(ctx); - } else { - pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe); - cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0); - } -} - -/* - * Returns the mm_struct corresponding to the context ctx. - * mm_users == 0, the context may be in the process of being closed. - */ -static struct mm_struct *get_mem_context(struct cxl_context *ctx) -{ - if (ctx->mm == NULL) - return NULL; - - if (!mmget_not_zero(ctx->mm)) - return NULL; - - return ctx->mm; -} - -static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr) -{ - if ((cxl_is_power8() && (dsisr & CXL_PSL_DSISR_An_DS))) - return true; - - return false; -} - -static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr) -{ - if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_An_DM)) - return true; - - if (cxl_is_power9()) - return true; - - return false; -} - -void cxl_handle_fault(struct work_struct *fault_work) -{ - struct cxl_context *ctx = - container_of(fault_work, struct cxl_context, fault_work); - u64 dsisr = ctx->dsisr; - u64 dar = ctx->dar; - struct mm_struct *mm = NULL; - - if (cpu_has_feature(CPU_FTR_HVMODE)) { - if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr || - cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar || - cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) { - /* Most likely explanation is harmless - a dedicated - * process has detached and these were cleared by the - * PSL purge, but warn about it just in case - */ - dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n"); - return; - } - } - - /* Early return if the context is being / has been detached */ - if (ctx->status == CLOSED) { - cxl_ack_ae(ctx); - return; - } - - pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. " - "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar); - - if (!ctx->kernel) { - - mm = get_mem_context(ctx); - if (mm == NULL) { - pr_devel("%s: unable to get mm for pe=%d pid=%i\n", - __func__, ctx->pe, pid_nr(ctx->pid)); - cxl_ack_ae(ctx); - return; - } else { - pr_devel("Handling page fault for pe=%d pid=%i\n", - ctx->pe, pid_nr(ctx->pid)); - } - } - - if (cxl_is_segment_miss(ctx, dsisr)) - cxl_handle_segment_miss(ctx, mm, dar); - else if (cxl_is_page_fault(ctx, dsisr)) - cxl_handle_page_fault(ctx, mm, dsisr, dar); - else - WARN(1, "cxl_handle_fault has nothing to handle\n"); - - if (mm) - mmput(mm); -} - -static u64 next_segment(u64 ea, u64 vsid) -{ - if (vsid & SLB_VSID_B_1T) - ea |= (1ULL << 40) - 1; - else - ea |= (1ULL << 28) - 1; - - return ea + 1; -} - -static void cxl_prefault_vma(struct cxl_context *ctx, struct mm_struct *mm) -{ - u64 ea, last_esid = 0; - struct copro_slb slb; - VMA_ITERATOR(vmi, mm, 0); - struct vm_area_struct *vma; - int rc; - - mmap_read_lock(mm); - for_each_vma(vmi, vma) { - for (ea = vma->vm_start; ea < vma->vm_end; - ea = next_segment(ea, slb.vsid)) { - rc = copro_calculate_slb(mm, ea, &slb); - if (rc) - continue; - - if (last_esid == slb.esid) - continue; - - cxl_load_segment(ctx, &slb); - last_esid = slb.esid; - } - } - mmap_read_unlock(mm); -} - -void cxl_prefault(struct cxl_context *ctx, u64 wed) -{ - struct mm_struct *mm = get_mem_context(ctx); - - if (mm == NULL) { - pr_devel("cxl_prefault unable to get mm %i\n", - pid_nr(ctx->pid)); - return; - } - - switch (ctx->afu->prefault_mode) { - case CXL_PREFAULT_WED: - cxl_fault_segment(ctx, mm, wed); - break; - case CXL_PREFAULT_ALL: - cxl_prefault_vma(ctx, mm); - break; - default: - break; - } - - mmput(mm); -} diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c deleted file mode 100644 index 144d1f2d78ce..000000000000 --- a/drivers/misc/cxl/file.c +++ /dev/null @@ -1,700 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2014 IBM Corp. - */ - -#include <linux/spinlock.h> -#include <linux/module.h> -#include <linux/export.h> -#include <linux/kernel.h> -#include <linux/bitmap.h> -#include <linux/sched/signal.h> -#include <linux/poll.h> -#include <linux/pid.h> -#include <linux/fs.h> -#include <linux/mm.h> -#include <linux/slab.h> -#include <linux/sched/mm.h> -#include <linux/mmu_context.h> -#include <asm/cputable.h> -#include <asm/current.h> -#include <asm/copro.h> - -#include "cxl.h" -#include "trace.h" - -#define CXL_NUM_MINORS 256 /* Total to reserve */ - -#define CXL_AFU_MINOR_D(afu) (CXL_CARD_MINOR(afu->adapter) + 1 + (3 * afu->slice)) -#define CXL_AFU_MINOR_M(afu) (CXL_AFU_MINOR_D(afu) + 1) -#define CXL_AFU_MINOR_S(afu) (CXL_AFU_MINOR_D(afu) + 2) -#define CXL_AFU_MKDEV_D(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_D(afu)) -#define CXL_AFU_MKDEV_M(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_M(afu)) -#define CXL_AFU_MKDEV_S(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_S(afu)) - -#define CXL_DEVT_AFU(dev) ((MINOR(dev) % CXL_DEV_MINORS - 1) / 3) - -#define CXL_DEVT_IS_CARD(dev) (MINOR(dev) % CXL_DEV_MINORS == 0) - -static dev_t cxl_dev; - -static struct class *cxl_class; - -static int __afu_open(struct inode *inode, struct file *file, bool master) -{ - struct cxl *adapter; - struct cxl_afu *afu; - struct cxl_context *ctx; - int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev); - int slice = CXL_DEVT_AFU(inode->i_rdev); - int rc = -ENODEV; - - pr_devel("afu_open afu%i.%i\n", slice, adapter_num); - - if (!(adapter = get_cxl_adapter(adapter_num))) - return -ENODEV; - - if (slice > adapter->slices) - goto err_put_adapter; - - spin_lock(&adapter->afu_list_lock); - if (!(afu = adapter->afu[slice])) { - spin_unlock(&adapter->afu_list_lock); - goto err_put_adapter; - } - - /* - * taking a ref to the afu so that it doesn't go away - * for rest of the function. This ref is released before - * we return. - */ - cxl_afu_get(afu); - spin_unlock(&adapter->afu_list_lock); - - if (!afu->current_mode) - goto err_put_afu; - - if (!cxl_ops->link_ok(adapter, afu)) { - rc = -EIO; - goto err_put_afu; - } - - if (!(ctx = cxl_context_alloc())) { - rc = -ENOMEM; - goto err_put_afu; - } - - rc = cxl_context_init(ctx, afu, master); - if (rc) - goto err_put_afu; - - cxl_context_set_mapping(ctx, inode->i_mapping); - - pr_devel("afu_open pe: %i\n", ctx->pe); - file->private_data = ctx; - - /* indicate success */ - rc = 0; - -err_put_afu: - /* release the ref taken earlier */ - cxl_afu_put(afu); -err_put_adapter: - put_device(&adapter->dev); - return rc; -} - -int afu_open(struct inode *inode, struct file *file) -{ - return __afu_open(inode, file, false); -} - -static int afu_master_open(struct inode *inode, struct file *file) -{ - return __afu_open(inode, file, true); -} - -int afu_release(struct inode *inode, struct file *file) -{ - struct cxl_context *ctx = file->private_data; - - pr_devel("%s: closing cxl file descriptor. pe: %i\n", - __func__, ctx->pe); - cxl_context_detach(ctx); - - - /* - * Delete the context's mapping pointer, unless it's created by the - * kernel API, in which case leave it so it can be freed by reclaim_ctx() - */ - if (!ctx->kernelapi) { - mutex_lock(&ctx->mapping_lock); - ctx->mapping = NULL; - mutex_unlock(&ctx->mapping_lock); - } - - /* - * At this this point all bottom halfs have finished and we should be - * getting no more IRQs from the hardware for this context. Once it's - * removed from the IDR (and RCU synchronised) it's safe to free the - * sstp and context. - */ - cxl_context_free(ctx); - - return 0; -} - -static long afu_ioctl_start_work(struct cxl_context *ctx, - struct cxl_ioctl_start_work __user *uwork) -{ - struct cxl_ioctl_start_work work; - u64 amr = 0; - int rc; - - pr_devel("%s: pe: %i\n", __func__, ctx->pe); - - /* Do this outside the status_mutex to avoid a circular dependency with - * the locking in cxl_mmap_fault() */ - if (copy_from_user(&work, uwork, sizeof(work))) - return -EFAULT; - - mutex_lock(&ctx->status_mutex); - if (ctx->status != OPENED) { - rc = -EIO; - goto out; - } - - /* - * if any of the reserved fields are set or any of the unused - * flags are set it's invalid - */ - if (work.reserved1 || work.reserved2 || work.reserved3 || - work.reserved4 || work.reserved5 || - (work.flags & ~CXL_START_WORK_ALL)) { - rc = -EINVAL; - goto out; - } - - if (!(work.flags & CXL_START_WORK_NUM_IRQS)) - work.num_interrupts = ctx->afu->pp_irqs; - else if ((work.num_interrupts < ctx->afu->pp_irqs) || - (work.num_interrupts > ctx->afu->irqs_max)) { - rc = -EINVAL; - goto out; - } - - if ((rc = afu_register_irqs(ctx, work.num_interrupts))) - goto out; - - if (work.flags & CXL_START_WORK_AMR) - amr = work.amr & mfspr(SPRN_UAMOR); - - if (work.flags & CXL_START_WORK_TID) - ctx->assign_tidr = true; - - ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF); - - /* - * Increment the mapped context count for adapter. This also checks - * if adapter_context_lock is taken. - */ - rc = cxl_adapter_context_get(ctx->afu->adapter); - if (rc) { - afu_release_irqs(ctx, ctx); - goto out; - } - - /* - * We grab the PID here and not in the file open to allow for the case - * where a process (master, some daemon, etc) has opened the chardev on - * behalf of another process, so the AFU's mm gets bound to the process - * that performs this ioctl and not the process that opened the file. - * Also we grab the PID of the group leader so that if the task that - * has performed the attach operation exits the mm context of the - * process is still accessible. - */ - ctx->pid = get_task_pid(current, PIDTYPE_PID); - - /* acquire a reference to the task's mm */ - ctx->mm = get_task_mm(current); - - /* ensure this mm_struct can't be freed */ - cxl_context_mm_count_get(ctx); - - if (ctx->mm) { - /* decrement the use count from above */ - mmput(ctx->mm); - /* make TLBIs for this context global */ - mm_context_add_copro(ctx->mm); - } - - /* - * Increment driver use count. Enables global TLBIs for hash - * and callbacks to handle the segment table - */ - cxl_ctx_get(); - - /* - * A barrier is needed to make sure all TLBIs are global - * before we attach and the context starts being used by the - * adapter. - * - * Needed after mm_context_add_copro() for radix and - * cxl_ctx_get() for hash/p8. - * - * The barrier should really be mb(), since it involves a - * device. However, it's only useful when we have local - * vs. global TLBIs, i.e SMP=y. So keep smp_mb(). - */ - smp_mb(); - - trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr); - - if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor, - amr))) { - afu_release_irqs(ctx, ctx); - cxl_adapter_context_put(ctx->afu->adapter); - put_pid(ctx->pid); - ctx->pid = NULL; - cxl_ctx_put(); - cxl_context_mm_count_put(ctx); - if (ctx->mm) - mm_context_remove_copro(ctx->mm); - goto out; - } - - rc = 0; - if (work.flags & CXL_START_WORK_TID) { - work.tid = ctx->tidr; - if (copy_to_user(uwork, &work, sizeof(work))) - rc = -EFAULT; - } - - ctx->status = STARTED; - -out: - mutex_unlock(&ctx->status_mutex); - return rc; -} - -static long afu_ioctl_process_element(struct cxl_context *ctx, - int __user *upe) -{ - pr_devel("%s: pe: %i\n", __func__, ctx->pe); - - if (copy_to_user(upe, &ctx->external_pe, sizeof(__u32))) - return -EFAULT; - - return 0; -} - -static long afu_ioctl_get_afu_id(struct cxl_context *ctx, - struct cxl_afu_id __user *upafuid) -{ - struct cxl_afu_id afuid = { 0 }; - - afuid.card_id = ctx->afu->adapter->adapter_num; - afuid.afu_offset = ctx->afu->slice; - afuid.afu_mode = ctx->afu->current_mode; - - /* set the flag bit in case the afu is a slave */ - if (ctx->afu->current_mode == CXL_MODE_DIRECTED && !ctx->master) - afuid.flags |= CXL_AFUID_FLAG_SLAVE; - - if (copy_to_user(upafuid, &afuid, sizeof(afuid))) - return -EFAULT; - - return 0; -} - -long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - struct cxl_context *ctx = file->private_data; - - if (ctx->status == CLOSED) - return -EIO; - - if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) - return -EIO; - - pr_devel("afu_ioctl\n"); - switch (cmd) { - case CXL_IOCTL_START_WORK: - return afu_ioctl_start_work(ctx, (struct cxl_ioctl_start_work __user *)arg); - case CXL_IOCTL_GET_PROCESS_ELEMENT: - return afu_ioctl_process_element(ctx, (__u32 __user *)arg); - case CXL_IOCTL_GET_AFU_ID: - return afu_ioctl_get_afu_id(ctx, (struct cxl_afu_id __user *) - arg); - } - return -EINVAL; -} - -static long afu_compat_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - return afu_ioctl(file, cmd, arg); -} - -int afu_mmap(struct file *file, struct vm_area_struct *vm) -{ - struct cxl_context *ctx = file->private_data; - - /* AFU must be started before we can MMIO */ - if (ctx->status != STARTED) - return -EIO; - - if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) - return -EIO; - - return cxl_context_iomap(ctx, vm); -} - -static inline bool ctx_event_pending(struct cxl_context *ctx) -{ - if (ctx->pending_irq || ctx->pending_fault || ctx->pending_afu_err) - return true; - - if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events)) - return true; - - return false; -} - -__poll_t afu_poll(struct file *file, struct poll_table_struct *poll) -{ - struct cxl_context *ctx = file->private_data; - __poll_t mask = 0; - unsigned long flags; - - - poll_wait(file, &ctx->wq, poll); - - pr_devel("afu_poll wait done pe: %i\n", ctx->pe); - - spin_lock_irqsave(&ctx->lock, flags); - if (ctx_event_pending(ctx)) - mask |= EPOLLIN | EPOLLRDNORM; - else if (ctx->status == CLOSED) - /* Only error on closed when there are no futher events pending - */ - mask |= EPOLLERR; - spin_unlock_irqrestore(&ctx->lock, flags); - - pr_devel("afu_poll pe: %i returning %#x\n", ctx->pe, mask); - - return mask; -} - -static ssize_t afu_driver_event_copy(struct cxl_context *ctx, - char __user *buf, - struct cxl_event *event, - struct cxl_event_afu_driver_reserved *pl) -{ - /* Check event */ - if (!pl) { - ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL); - return -EFAULT; - } - - /* Check event size */ - event->header.size += pl->data_size; - if (event->header.size > CXL_READ_MIN_SIZE) { - ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL); - return -EFAULT; - } - - /* Copy event header */ - if (copy_to_user(buf, event, sizeof(struct cxl_event_header))) { - ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT); - return -EFAULT; - } - - /* Copy event data */ - buf += sizeof(struct cxl_event_header); - if (copy_to_user(buf, &pl->data, pl->data_size)) { - ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT); - return -EFAULT; - } - - ctx->afu_driver_ops->event_delivered(ctx, pl, 0); /* Success */ - return event->header.size; -} - -ssize_t afu_read(struct file *file, char __user *buf, size_t count, - loff_t *off) -{ - struct cxl_context *ctx = file->private_data; - struct cxl_event_afu_driver_reserved *pl = NULL; - struct cxl_event event; - unsigned long flags; - int rc; - DEFINE_WAIT(wait); - - if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) - return -EIO; - - if (count < CXL_READ_MIN_SIZE) - return -EINVAL; - - spin_lock_irqsave(&ctx->lock, flags); - - for (;;) { - prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE); - if (ctx_event_pending(ctx) || (ctx->status == CLOSED)) - break; - - if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) { - rc = -EIO; - goto out; - } - - if (file->f_flags & O_NONBLOCK) { - rc = -EAGAIN; - goto out; - } - - if (signal_pending(current)) { - rc = -ERESTARTSYS; - goto out; - } - - spin_unlock_irqrestore(&ctx->lock, flags); - pr_devel("afu_read going to sleep...\n"); - schedule(); - pr_devel("afu_read woken up\n"); - spin_lock_irqsave(&ctx->lock, flags); - } - - finish_wait(&ctx->wq, &wait); - - memset(&event, 0, sizeof(event)); - event.header.process_element = ctx->pe; - event.header.size = sizeof(struct cxl_event_header); - if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events)) { - pr_devel("afu_read delivering AFU driver specific event\n"); - pl = ctx->afu_driver_ops->fetch_event(ctx); - atomic_dec(&ctx->afu_driver_events); - event.header.type = CXL_EVENT_AFU_DRIVER; - } else if (ctx->pending_irq) { - pr_devel("afu_read delivering AFU interrupt\n"); - event.header.size += sizeof(struct cxl_event_afu_interrupt); - event.header.type = CXL_EVENT_AFU_INTERRUPT; - event.irq.irq = find_first_bit(ctx->irq_bitmap, ctx->irq_count) + 1; - clear_bit(event.irq.irq - 1, ctx->irq_bitmap); - if (bitmap_empty(ctx->irq_bitmap, ctx->irq_count)) - ctx->pending_irq = false; - } else if (ctx->pending_fault) { - pr_devel("afu_read delivering data storage fault\n"); - event.header.size += sizeof(struct cxl_event_data_storage); - event.header.type = CXL_EVENT_DATA_STORAGE; - event.fault.addr = ctx->fault_addr; - event.fault.dsisr = ctx->fault_dsisr; - ctx->pending_fault = false; - } else if (ctx->pending_afu_err) { - pr_devel("afu_read delivering afu error\n"); - event.header.size += sizeof(struct cxl_event_afu_error); - event.header.type = CXL_EVENT_AFU_ERROR; - event.afu_error.error = ctx->afu_err; - ctx->pending_afu_err = false; - } else if (ctx->status == CLOSED) { - pr_devel("afu_read fatal error\n"); - spin_unlock_irqrestore(&ctx->lock, flags); - return -EIO; - } else - WARN(1, "afu_read must be buggy\n"); - - spin_unlock_irqrestore(&ctx->lock, flags); - - if (event.header.type == CXL_EVENT_AFU_DRIVER) - return afu_driver_event_copy(ctx, buf, &event, pl); - - if (copy_to_user(buf, &event, event.header.size)) - return -EFAULT; - return event.header.size; - -out: - finish_wait(&ctx->wq, &wait); - spin_unlock_irqrestore(&ctx->lock, flags); - return rc; -} - -/* - * Note: if this is updated, we need to update api.c to patch the new ones in - * too - */ -const struct file_operations afu_fops = { - .owner = THIS_MODULE, - .open = afu_open, - .poll = afu_poll, - .read = afu_read, - .release = afu_release, - .unlocked_ioctl = afu_ioctl, - .compat_ioctl = afu_compat_ioctl, - .mmap = afu_mmap, -}; - -static const struct file_operations afu_master_fops = { - .owner = THIS_MODULE, - .open = afu_master_open, - .poll = afu_poll, - .read = afu_read, - .release = afu_release, - .unlocked_ioctl = afu_ioctl, - .compat_ioctl = afu_compat_ioctl, - .mmap = afu_mmap, -}; - - -static char *cxl_devnode(const struct device *dev, umode_t *mode) -{ - if (cpu_has_feature(CPU_FTR_HVMODE) && - CXL_DEVT_IS_CARD(dev->devt)) { - /* - * These minor numbers will eventually be used to program the - * PSL and AFUs once we have dynamic reprogramming support - */ - return NULL; - } - return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev)); -} - -extern struct class *cxl_class; - -static int cxl_add_chardev(struct cxl_afu *afu, dev_t devt, struct cdev *cdev, - struct device **chardev, char *postfix, char *desc, - const struct file_operations *fops) -{ - struct device *dev; - int rc; - - cdev_init(cdev, fops); - rc = cdev_add(cdev, devt, 1); - if (rc) { - dev_err(&afu->dev, "Unable to add %s chardev: %i\n", desc, rc); - return rc; - } - - dev = device_create(cxl_class, &afu->dev, devt, afu, - "afu%i.%i%s", afu->adapter->adapter_num, afu->slice, postfix); - if (IS_ERR(dev)) { - rc = PTR_ERR(dev); - dev_err(&afu->dev, "Unable to create %s chardev in sysfs: %i\n", desc, rc); - goto err; - } - - *chardev = dev; - - return 0; -err: - cdev_del(cdev); - return rc; -} - -int cxl_chardev_d_afu_add(struct cxl_afu *afu) -{ - return cxl_add_chardev(afu, CXL_AFU_MKDEV_D(afu), &afu->afu_cdev_d, - &afu->chardev_d, "d", "dedicated", - &afu_master_fops); /* Uses master fops */ -} - -int cxl_chardev_m_afu_add(struct cxl_afu *afu) -{ - return cxl_add_chardev(afu, CXL_AFU_MKDEV_M(afu), &afu->afu_cdev_m, - &afu->chardev_m, "m", "master", - &afu_master_fops); -} - -int cxl_chardev_s_afu_add(struct cxl_afu *afu) -{ - return cxl_add_chardev(afu, CXL_AFU_MKDEV_S(afu), &afu->afu_cdev_s, - &afu->chardev_s, "s", "shared", - &afu_fops); -} - -void cxl_chardev_afu_remove(struct cxl_afu *afu) -{ - if (afu->chardev_d) { - cdev_del(&afu->afu_cdev_d); - device_unregister(afu->chardev_d); - afu->chardev_d = NULL; - } - if (afu->chardev_m) { - cdev_del(&afu->afu_cdev_m); - device_unregister(afu->chardev_m); - afu->chardev_m = NULL; - } - if (afu->chardev_s) { - cdev_del(&afu->afu_cdev_s); - device_unregister(afu->chardev_s); - afu->chardev_s = NULL; - } -} - -int cxl_register_afu(struct cxl_afu *afu) -{ - afu->dev.class = cxl_class; - - return device_register(&afu->dev); -} - -int cxl_register_adapter(struct cxl *adapter) -{ - adapter->dev.class = cxl_class; - - /* - * Future: When we support dynamically reprogramming the PSL & AFU we - * will expose the interface to do that via a chardev: - * adapter->dev.devt = CXL_CARD_MKDEV(adapter); - */ - - return device_register(&adapter->dev); -} - -dev_t cxl_get_dev(void) -{ - return cxl_dev; -} - -int __init cxl_file_init(void) -{ - int rc; - - /* - * If these change we really need to update API. Either change some - * flags or update API version number CXL_API_VERSION. - */ - BUILD_BUG_ON(CXL_API_VERSION != 3); - BUILD_BUG_ON(sizeof(struct cxl_ioctl_start_work) != 64); - BUILD_BUG_ON(sizeof(struct cxl_event_header) != 8); - BUILD_BUG_ON(sizeof(struct cxl_event_afu_interrupt) != 8); - BUILD_BUG_ON(sizeof(struct cxl_event_data_storage) != 32); - BUILD_BUG_ON(sizeof(struct cxl_event_afu_error) != 16); - - if ((rc = alloc_chrdev_region(&cxl_dev, 0, CXL_NUM_MINORS, "cxl"))) { - pr_err("Unable to allocate CXL major number: %i\n", rc); - return rc; - } - - pr_devel("CXL device allocated, MAJOR %i\n", MAJOR(cxl_dev)); - - cxl_class = class_create("cxl"); - if (IS_ERR(cxl_class)) { - pr_err("Unable to create CXL class\n"); - rc = PTR_ERR(cxl_class); - goto err; - } - cxl_class->devnode = cxl_devnode; - - return 0; - -err: - unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS); - return rc; -} - -void cxl_file_exit(void) -{ - unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS); - class_destroy(cxl_class); -} diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c deleted file mode 100644 index eee9decc121e..000000000000 --- a/drivers/misc/cxl/flash.c +++ /dev/null @@ -1,538 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <linux/kernel.h> -#include <linux/fs.h> -#include <linux/semaphore.h> -#include <linux/slab.h> -#include <linux/uaccess.h> -#include <linux/of.h> -#include <asm/rtas.h> - -#include "cxl.h" -#include "hcalls.h" - -#define DOWNLOAD_IMAGE 1 -#define VALIDATE_IMAGE 2 - -struct ai_header { - u16 version; - u8 reserved0[6]; - u16 vendor; - u16 device; - u16 subsystem_vendor; - u16 subsystem; - u64 image_offset; - u64 image_length; - u8 reserved1[96]; -}; - -static struct semaphore sem; -static unsigned long *buffer[CXL_AI_MAX_ENTRIES]; -static struct sg_list *le; -static u64 continue_token; -static unsigned int transfer; - -struct update_props_workarea { - __be32 phandle; - __be32 state; - __be64 reserved; - __be32 nprops; -} __packed; - -struct update_nodes_workarea { - __be32 state; - __be64 unit_address; - __be32 reserved; -} __packed; - -#define DEVICE_SCOPE 3 -#define NODE_ACTION_MASK 0xff000000 -#define NODE_COUNT_MASK 0x00ffffff -#define OPCODE_DELETE 0x01000000 -#define OPCODE_UPDATE 0x02000000 -#define OPCODE_ADD 0x03000000 - -static int rcall(int token, char *buf, s32 scope) -{ - int rc; - - spin_lock(&rtas_data_buf_lock); - - memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE); - rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, scope); - memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE); - - spin_unlock(&rtas_data_buf_lock); - return rc; -} - -static int update_property(struct device_node *dn, const char *name, - u32 vd, char *value) -{ - struct property *new_prop; - u32 *val; - int rc; - - new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL); - if (!new_prop) - return -ENOMEM; - - new_prop->name = kstrdup(name, GFP_KERNEL); - if (!new_prop->name) { - kfree(new_prop); - return -ENOMEM; - } - - new_prop->length = vd; - new_prop->value = kzalloc(new_prop->length, GFP_KERNEL); - if (!new_prop->value) { - kfree(new_prop->name); - kfree(new_prop); - return -ENOMEM; - } - memcpy(new_prop->value, value, vd); - - val = (u32 *)new_prop->value; - rc = cxl_update_properties(dn, new_prop); - pr_devel("%pOFn: update property (%s, length: %i, value: %#x)\n", - dn, name, vd, be32_to_cpu(*val)); - - if (rc) { - kfree(new_prop->name); - kfree(new_prop->value); - kfree(new_prop); - } - return rc; -} - -static int update_node(__be32 phandle, s32 scope) -{ - struct update_props_workarea *upwa; - struct device_node *dn; - int i, rc, ret; - char *prop_data; - char *buf; - int token; - u32 nprops; - u32 vd; - - token = rtas_token("ibm,update-properties"); - if (token == RTAS_UNKNOWN_SERVICE) - return -EINVAL; - - buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - dn = of_find_node_by_phandle(be32_to_cpu(phandle)); - if (!dn) { - kfree(buf); - return -ENOENT; - } - - upwa = (struct update_props_workarea *)&buf[0]; - upwa->phandle = phandle; - do { - rc = rcall(token, buf, scope); - if (rc < 0) - break; - - prop_data = buf + sizeof(*upwa); - nprops = be32_to_cpu(upwa->nprops); - - if (*prop_data == 0) { - prop_data++; - vd = be32_to_cpu(*(__be32 *)prop_data); - prop_data += vd + sizeof(vd); - nprops--; - } - - for (i = 0; i < nprops; i++) { - char *prop_name; - - prop_name = prop_data; - prop_data += strlen(prop_name) + 1; - vd = be32_to_cpu(*(__be32 *)prop_data); - prop_data += sizeof(vd); - - if ((vd != 0x00000000) && (vd != 0x80000000)) { - ret = update_property(dn, prop_name, vd, - prop_data); - if (ret) - pr_err("cxl: Could not update property %s - %i\n", - prop_name, ret); - - prop_data += vd; - } - } - } while (rc == 1); - - of_node_put(dn); - kfree(buf); - return rc; -} - -static int update_devicetree(struct cxl *adapter, s32 scope) -{ - struct update_nodes_workarea *unwa; - u32 action, node_count; - int token, rc, i; - __be32 *data, phandle; - char *buf; - - token = rtas_token("ibm,update-nodes"); - if (token == RTAS_UNKNOWN_SERVICE) - return -EINVAL; - - buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - unwa = (struct update_nodes_workarea *)&buf[0]; - unwa->unit_address = cpu_to_be64(adapter->guest->handle); - do { - rc = rcall(token, buf, scope); - if (rc && rc != 1) - break; - - data = (__be32 *)buf + 4; - while (be32_to_cpu(*data) & NODE_ACTION_MASK) { - action = be32_to_cpu(*data) & NODE_ACTION_MASK; - node_count = be32_to_cpu(*data) & NODE_COUNT_MASK; - pr_devel("device reconfiguration - action: %#x, nodes: %#x\n", - action, node_count); - data++; - - for (i = 0; i < node_count; i++) { - phandle = *data++; - - switch (action) { - case OPCODE_DELETE: - /* nothing to do */ - break; - case OPCODE_UPDATE: - update_node(phandle, scope); - break; - case OPCODE_ADD: - /* nothing to do, just move pointer */ - data++; - break; - } - } - } - } while (rc == 1); - - kfree(buf); - return 0; -} - -static int handle_image(struct cxl *adapter, int operation, - long (*fct)(u64, u64, u64, u64 *), - struct cxl_adapter_image *ai) -{ - size_t mod, s_copy, len_chunk = 0; - struct ai_header *header = NULL; - unsigned int entries = 0, i; - void *dest, *from; - int rc = 0, need_header; - - /* base adapter image header */ - need_header = (ai->flags & CXL_AI_NEED_HEADER); - if (need_header) { - header = kzalloc(sizeof(struct ai_header), GFP_KERNEL); - if (!header) - return -ENOMEM; - header->version = cpu_to_be16(1); - header->vendor = cpu_to_be16(adapter->guest->vendor); - header->device = cpu_to_be16(adapter->guest->device); - header->subsystem_vendor = cpu_to_be16(adapter->guest->subsystem_vendor); - header->subsystem = cpu_to_be16(adapter->guest->subsystem); - header->image_offset = cpu_to_be64(CXL_AI_HEADER_SIZE); - header->image_length = cpu_to_be64(ai->len_image); - } - - /* number of entries in the list */ - len_chunk = ai->len_data; - if (need_header) - len_chunk += CXL_AI_HEADER_SIZE; - - entries = len_chunk / CXL_AI_BUFFER_SIZE; - mod = len_chunk % CXL_AI_BUFFER_SIZE; - if (mod) - entries++; - - if (entries > CXL_AI_MAX_ENTRIES) { - rc = -EINVAL; - goto err; - } - - /* < -- MAX_CHUNK_SIZE = 4096 * 256 = 1048576 bytes --> - * chunk 0 ---------------------------------------------------- - * | header | data | - * ---------------------------------------------------- - * chunk 1 ---------------------------------------------------- - * | data | - * ---------------------------------------------------- - * .... - * chunk n ---------------------------------------------------- - * | data | - * ---------------------------------------------------- - */ - from = (void *) ai->data; - for (i = 0; i < entries; i++) { - dest = buffer[i]; - s_copy = CXL_AI_BUFFER_SIZE; - - if ((need_header) && (i == 0)) { - /* add adapter image header */ - memcpy(buffer[i], header, sizeof(struct ai_header)); - s_copy = CXL_AI_BUFFER_SIZE - CXL_AI_HEADER_SIZE; - dest += CXL_AI_HEADER_SIZE; /* image offset */ - } - if ((i == (entries - 1)) && mod) - s_copy = mod; - - /* copy data */ - if (copy_from_user(dest, from, s_copy)) - goto err; - - /* fill in the list */ - le[i].phys_addr = cpu_to_be64(virt_to_phys(buffer[i])); - le[i].len = cpu_to_be64(CXL_AI_BUFFER_SIZE); - if ((i == (entries - 1)) && mod) - le[i].len = cpu_to_be64(mod); - from += s_copy; - } - pr_devel("%s (op: %i, need header: %i, entries: %i, token: %#llx)\n", - __func__, operation, need_header, entries, continue_token); - - /* - * download/validate the adapter image to the coherent - * platform facility - */ - rc = fct(adapter->guest->handle, virt_to_phys(le), entries, - &continue_token); - if (rc == 0) /* success of download/validation operation */ - continue_token = 0; - -err: - kfree(header); - - return rc; -} - -static int transfer_image(struct cxl *adapter, int operation, - struct cxl_adapter_image *ai) -{ - int rc = 0; - int afu; - - switch (operation) { - case DOWNLOAD_IMAGE: - rc = handle_image(adapter, operation, - &cxl_h_download_adapter_image, ai); - if (rc < 0) { - pr_devel("resetting adapter\n"); - cxl_h_reset_adapter(adapter->guest->handle); - } - return rc; - - case VALIDATE_IMAGE: - rc = handle_image(adapter, operation, - &cxl_h_validate_adapter_image, ai); - if (rc < 0) { - pr_devel("resetting adapter\n"); - cxl_h_reset_adapter(adapter->guest->handle); - return rc; - } - if (rc == 0) { - pr_devel("remove current afu\n"); - for (afu = 0; afu < adapter->slices; afu++) - cxl_guest_remove_afu(adapter->afu[afu]); - - pr_devel("resetting adapter\n"); - cxl_h_reset_adapter(adapter->guest->handle); - - /* The entire image has now been - * downloaded and the validation has - * been successfully performed. - * After that, the partition should call - * ibm,update-nodes and - * ibm,update-properties to receive the - * current configuration - */ - rc = update_devicetree(adapter, DEVICE_SCOPE); - transfer = 1; - } - return rc; - } - - return -EINVAL; -} - -static long ioctl_transfer_image(struct cxl *adapter, int operation, - struct cxl_adapter_image __user *uai) -{ - struct cxl_adapter_image ai; - - pr_devel("%s\n", __func__); - - if (copy_from_user(&ai, uai, sizeof(struct cxl_adapter_image))) - return -EFAULT; - - /* - * Make sure reserved fields and bits are set to 0 - */ - if (ai.reserved1 || ai.reserved2 || ai.reserved3 || ai.reserved4 || - (ai.flags & ~CXL_AI_ALL)) - return -EINVAL; - - return transfer_image(adapter, operation, &ai); -} - -static int device_open(struct inode *inode, struct file *file) -{ - int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev); - struct cxl *adapter; - int rc = 0, i; - - pr_devel("in %s\n", __func__); - - BUG_ON(sizeof(struct ai_header) != CXL_AI_HEADER_SIZE); - - /* Allows one process to open the device by using a semaphore */ - if (down_interruptible(&sem) != 0) - return -EPERM; - - if (!(adapter = get_cxl_adapter(adapter_num))) { - rc = -ENODEV; - goto err_unlock; - } - - file->private_data = adapter; - continue_token = 0; - transfer = 0; - - for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) - buffer[i] = NULL; - - /* aligned buffer containing list entries which describes up to - * 1 megabyte of data (256 entries of 4096 bytes each) - * Logical real address of buffer 0 - Buffer 0 length in bytes - * Logical real address of buffer 1 - Buffer 1 length in bytes - * Logical real address of buffer 2 - Buffer 2 length in bytes - * .... - * .... - * Logical real address of buffer N - Buffer N length in bytes - */ - le = (struct sg_list *)get_zeroed_page(GFP_KERNEL); - if (!le) { - rc = -ENOMEM; - goto err; - } - - for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) { - buffer[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL); - if (!buffer[i]) { - rc = -ENOMEM; - goto err1; - } - } - - return 0; - -err1: - for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) { - if (buffer[i]) - free_page((unsigned long) buffer[i]); - } - - if (le) - free_page((unsigned long) le); -err: - put_device(&adapter->dev); -err_unlock: - up(&sem); - - return rc; -} - -static long device_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - struct cxl *adapter = file->private_data; - - pr_devel("in %s\n", __func__); - - if (cmd == CXL_IOCTL_DOWNLOAD_IMAGE) - return ioctl_transfer_image(adapter, - DOWNLOAD_IMAGE, - (struct cxl_adapter_image __user *)arg); - else if (cmd == CXL_IOCTL_VALIDATE_IMAGE) - return ioctl_transfer_image(adapter, - VALIDATE_IMAGE, - (struct cxl_adapter_image __user *)arg); - else - return -EINVAL; -} - -static int device_close(struct inode *inode, struct file *file) -{ - struct cxl *adapter = file->private_data; - int i; - - pr_devel("in %s\n", __func__); - - for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) { - if (buffer[i]) - free_page((unsigned long) buffer[i]); - } - - if (le) - free_page((unsigned long) le); - - up(&sem); - put_device(&adapter->dev); - continue_token = 0; - - /* reload the module */ - if (transfer) - cxl_guest_reload_module(adapter); - else { - pr_devel("resetting adapter\n"); - cxl_h_reset_adapter(adapter->guest->handle); - } - - transfer = 0; - return 0; -} - -static const struct file_operations fops = { - .owner = THIS_MODULE, - .open = device_open, - .unlocked_ioctl = device_ioctl, - .compat_ioctl = compat_ptr_ioctl, - .release = device_close, -}; - -void cxl_guest_remove_chardev(struct cxl *adapter) -{ - cdev_del(&adapter->guest->cdev); -} - -int cxl_guest_add_chardev(struct cxl *adapter) -{ - dev_t devt; - int rc; - - devt = MKDEV(MAJOR(cxl_get_dev()), CXL_CARD_MINOR(adapter)); - cdev_init(&adapter->guest->cdev, &fops); - if ((rc = cdev_add(&adapter->guest->cdev, devt, 1))) { - dev_err(&adapter->dev, - "Unable to add chardev on adapter (card%i): %i\n", - adapter->adapter_num, rc); - goto err; - } - adapter->dev.devt = devt; - sema_init(&sem, 1); -err: - return rc; -} diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c deleted file mode 100644 index fb95a2d5cef4..000000000000 --- a/drivers/misc/cxl/guest.c +++ /dev/null @@ -1,1208 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2015 IBM Corp. - */ - -#include <linux/spinlock.h> -#include <linux/uaccess.h> -#include <linux/delay.h> -#include <linux/irqdomain.h> -#include <linux/platform_device.h> - -#include "cxl.h" -#include "hcalls.h" -#include "trace.h" - -#define CXL_ERROR_DETECTED_EVENT 1 -#define CXL_SLOT_RESET_EVENT 2 -#define CXL_RESUME_EVENT 3 - -static void pci_error_handlers(struct cxl_afu *afu, - int bus_error_event, - pci_channel_state_t state) -{ - struct pci_dev *afu_dev; - struct pci_driver *afu_drv; - const struct pci_error_handlers *err_handler; - - if (afu->phb == NULL) - return; - - list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { - afu_drv = to_pci_driver(afu_dev->dev.driver); - if (!afu_drv) - continue; - - err_handler = afu_drv->err_handler; - switch (bus_error_event) { - case CXL_ERROR_DETECTED_EVENT: - afu_dev->error_state = state; - - if (err_handler && - err_handler->error_detected) - err_handler->error_detected(afu_dev, state); - break; - case CXL_SLOT_RESET_EVENT: - afu_dev->error_state = state; - - if (err_handler && - err_handler->slot_reset) - err_handler->slot_reset(afu_dev); - break; - case CXL_RESUME_EVENT: - if (err_handler && - err_handler->resume) - err_handler->resume(afu_dev); - break; - } - } -} - -static irqreturn_t guest_handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, - u64 errstat) -{ - pr_devel("in %s\n", __func__); - dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat); - - return cxl_ops->ack_irq(ctx, 0, errstat); -} - -static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu, - void *buf, size_t len) -{ - unsigned int entries, mod; - unsigned long **vpd_buf = NULL; - struct sg_list *le; - int rc = 0, i, tocopy; - u64 out = 0; - - if (buf == NULL) - return -EINVAL; - - /* number of entries in the list */ - entries = len / SG_BUFFER_SIZE; - mod = len % SG_BUFFER_SIZE; - if (mod) - entries++; - - if (entries > SG_MAX_ENTRIES) { - entries = SG_MAX_ENTRIES; - len = SG_MAX_ENTRIES * SG_BUFFER_SIZE; - mod = 0; - } - - vpd_buf = kcalloc(entries, sizeof(unsigned long *), GFP_KERNEL); - if (!vpd_buf) - return -ENOMEM; - - le = (struct sg_list *)get_zeroed_page(GFP_KERNEL); - if (!le) { - rc = -ENOMEM; - goto err1; - } - - for (i = 0; i < entries; i++) { - vpd_buf[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL); - if (!vpd_buf[i]) { - rc = -ENOMEM; - goto err2; - } - le[i].phys_addr = cpu_to_be64(virt_to_phys(vpd_buf[i])); - le[i].len = cpu_to_be64(SG_BUFFER_SIZE); - if ((i == (entries - 1)) && mod) - le[i].len = cpu_to_be64(mod); - } - - if (adapter) - rc = cxl_h_collect_vpd_adapter(adapter->guest->handle, - virt_to_phys(le), entries, &out); - else - rc = cxl_h_collect_vpd(afu->guest->handle, 0, - virt_to_phys(le), entries, &out); - pr_devel("length of available (entries: %i), vpd: %#llx\n", - entries, out); - - if (!rc) { - /* - * hcall returns in 'out' the size of available VPDs. - * It fills the buffer with as much data as possible. - */ - if (out < len) - len = out; - rc = len; - if (out) { - for (i = 0; i < entries; i++) { - if (len < SG_BUFFER_SIZE) - tocopy = len; - else - tocopy = SG_BUFFER_SIZE; - memcpy(buf, vpd_buf[i], tocopy); - buf += tocopy; - len -= tocopy; - } - } - } -err2: - for (i = 0; i < entries; i++) { - if (vpd_buf[i]) - free_page((unsigned long) vpd_buf[i]); - } - free_page((unsigned long) le); -err1: - kfree(vpd_buf); - return rc; -} - -static int guest_get_irq_info(struct cxl_context *ctx, struct cxl_irq_info *info) -{ - return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info); -} - -static irqreturn_t guest_psl_irq(int irq, void *data) -{ - struct cxl_context *ctx = data; - struct cxl_irq_info irq_info; - int rc; - - pr_devel("%d: received PSL interrupt %i\n", ctx->pe, irq); - rc = guest_get_irq_info(ctx, &irq_info); - if (rc) { - WARN(1, "Unable to get IRQ info: %i\n", rc); - return IRQ_HANDLED; - } - - rc = cxl_irq_psl8(irq, ctx, &irq_info); - return rc; -} - -static int afu_read_error_state(struct cxl_afu *afu, int *state_out) -{ - u64 state; - int rc = 0; - - if (!afu) - return -EIO; - - rc = cxl_h_read_error_state(afu->guest->handle, &state); - if (!rc) { - WARN_ON(state != H_STATE_NORMAL && - state != H_STATE_DISABLE && - state != H_STATE_TEMP_UNAVAILABLE && - state != H_STATE_PERM_UNAVAILABLE); - *state_out = state & 0xffffffff; - } - return rc; -} - -static irqreturn_t guest_slice_irq_err(int irq, void *data) -{ - struct cxl_afu *afu = data; - int rc; - u64 serr, afu_error, dsisr; - - rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr); - if (rc) { - dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc); - return IRQ_HANDLED; - } - afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An); - dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); - cxl_afu_decode_psl_serr(afu, serr); - dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error); - dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr); - - rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr); - if (rc) - dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n", - rc); - - return IRQ_HANDLED; -} - - -static int irq_alloc_range(struct cxl *adapter, int len, int *irq) -{ - int i, n; - struct irq_avail *cur; - - for (i = 0; i < adapter->guest->irq_nranges; i++) { - cur = &adapter->guest->irq_avail[i]; - n = bitmap_find_next_zero_area(cur->bitmap, cur->range, - 0, len, 0); - if (n < cur->range) { - bitmap_set(cur->bitmap, n, len); - *irq = cur->offset + n; - pr_devel("guest: allocate IRQs %#x->%#x\n", - *irq, *irq + len - 1); - - return 0; - } - } - return -ENOSPC; -} - -static int irq_free_range(struct cxl *adapter, int irq, int len) -{ - int i, n; - struct irq_avail *cur; - - if (len == 0) - return -ENOENT; - - for (i = 0; i < adapter->guest->irq_nranges; i++) { - cur = &adapter->guest->irq_avail[i]; - if (irq >= cur->offset && - (irq + len) <= (cur->offset + cur->range)) { - n = irq - cur->offset; - bitmap_clear(cur->bitmap, n, len); - pr_devel("guest: release IRQs %#x->%#x\n", - irq, irq + len - 1); - return 0; - } - } - return -ENOENT; -} - -static int guest_reset(struct cxl *adapter) -{ - struct cxl_afu *afu = NULL; - int i, rc; - - pr_devel("Adapter reset request\n"); - spin_lock(&adapter->afu_list_lock); - for (i = 0; i < adapter->slices; i++) { - if ((afu = adapter->afu[i])) { - pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, - pci_channel_io_frozen); - cxl_context_detach_all(afu); - } - } - - rc = cxl_h_reset_adapter(adapter->guest->handle); - for (i = 0; i < adapter->slices; i++) { - if (!rc && (afu = adapter->afu[i])) { - pci_error_handlers(afu, CXL_SLOT_RESET_EVENT, - pci_channel_io_normal); - pci_error_handlers(afu, CXL_RESUME_EVENT, 0); - } - } - spin_unlock(&adapter->afu_list_lock); - return rc; -} - -static int guest_alloc_one_irq(struct cxl *adapter) -{ - int irq; - - spin_lock(&adapter->guest->irq_alloc_lock); - if (irq_alloc_range(adapter, 1, &irq)) - irq = -ENOSPC; - spin_unlock(&adapter->guest->irq_alloc_lock); - return irq; -} - -static void guest_release_one_irq(struct cxl *adapter, int irq) -{ - spin_lock(&adapter->guest->irq_alloc_lock); - irq_free_range(adapter, irq, 1); - spin_unlock(&adapter->guest->irq_alloc_lock); -} - -static int guest_alloc_irq_ranges(struct cxl_irq_ranges *irqs, - struct cxl *adapter, unsigned int num) -{ - int i, try, irq; - - memset(irqs, 0, sizeof(struct cxl_irq_ranges)); - - spin_lock(&adapter->guest->irq_alloc_lock); - for (i = 0; i < CXL_IRQ_RANGES && num; i++) { - try = num; - while (try) { - if (irq_alloc_range(adapter, try, &irq) == 0) - break; - try /= 2; - } - if (!try) - goto error; - irqs->offset[i] = irq; - irqs->range[i] = try; - num -= try; - } - if (num) - goto error; - spin_unlock(&adapter->guest->irq_alloc_lock); - return 0; - -error: - for (i = 0; i < CXL_IRQ_RANGES; i++) - irq_free_range(adapter, irqs->offset[i], irqs->range[i]); - spin_unlock(&adapter->guest->irq_alloc_lock); - return -ENOSPC; -} - -static void guest_release_irq_ranges(struct cxl_irq_ranges *irqs, - struct cxl *adapter) -{ - int i; - - spin_lock(&adapter->guest->irq_alloc_lock); - for (i = 0; i < CXL_IRQ_RANGES; i++) - irq_free_range(adapter, irqs->offset[i], irqs->range[i]); - spin_unlock(&adapter->guest->irq_alloc_lock); -} - -static int guest_register_serr_irq(struct cxl_afu *afu) -{ - afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", - dev_name(&afu->dev)); - if (!afu->err_irq_name) - return -ENOMEM; - - if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq, - guest_slice_irq_err, afu, afu->err_irq_name))) { - kfree(afu->err_irq_name); - afu->err_irq_name = NULL; - return -ENOMEM; - } - - return 0; -} - -static void guest_release_serr_irq(struct cxl_afu *afu) -{ - cxl_unmap_irq(afu->serr_virq, afu); - cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); - kfree(afu->err_irq_name); -} - -static int guest_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask) -{ - return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token, - tfc >> 32, (psl_reset_mask != 0)); -} - -static void disable_afu_irqs(struct cxl_context *ctx) -{ - irq_hw_number_t hwirq; - unsigned int virq; - int r, i; - - pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice); - for (r = 0; r < CXL_IRQ_RANGES; r++) { - hwirq = ctx->irqs.offset[r]; - for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { - virq = irq_find_mapping(NULL, hwirq); - disable_irq(virq); - } - } -} - -static void enable_afu_irqs(struct cxl_context *ctx) -{ - irq_hw_number_t hwirq; - unsigned int virq; - int r, i; - - pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice); - for (r = 0; r < CXL_IRQ_RANGES; r++) { - hwirq = ctx->irqs.offset[r]; - for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { - virq = irq_find_mapping(NULL, hwirq); - enable_irq(virq); - } - } -} - -static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx, - u64 offset, u64 *val) -{ - unsigned long cr; - char c; - int rc = 0; - - if (afu->crs_len < sz) - return -ENOENT; - - if (unlikely(offset >= afu->crs_len)) - return -ERANGE; - - cr = get_zeroed_page(GFP_KERNEL); - if (!cr) - return -ENOMEM; - - rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset, - virt_to_phys((void *)cr), sz); - if (rc) - goto err; - - switch (sz) { - case 1: - c = *((char *) cr); - *val = c; - break; - case 2: - *val = in_le16((u16 *)cr); - break; - case 4: - *val = in_le32((unsigned *)cr); - break; - case 8: - *val = in_le64((u64 *)cr); - break; - default: - WARN_ON(1); - } -err: - free_page(cr); - return rc; -} - -static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset, - u32 *out) -{ - int rc; - u64 val; - - rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val); - if (!rc) - *out = (u32) val; - return rc; -} - -static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset, - u16 *out) -{ - int rc; - u64 val; - - rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val); - if (!rc) - *out = (u16) val; - return rc; -} - -static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset, - u8 *out) -{ - int rc; - u64 val; - - rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val); - if (!rc) - *out = (u8) val; - return rc; -} - -static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset, - u64 *out) -{ - return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out); -} - -static int guest_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in) -{ - /* config record is not writable from guest */ - return -EPERM; -} - -static int guest_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in) -{ - /* config record is not writable from guest */ - return -EPERM; -} - -static int guest_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in) -{ - /* config record is not writable from guest */ - return -EPERM; -} - -static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) -{ - struct cxl_process_element_hcall *elem; - struct cxl *adapter = ctx->afu->adapter; - const struct cred *cred; - u32 pid, idx; - int rc, r, i; - u64 mmio_addr, mmio_size; - __be64 flags = 0; - - /* Must be 8 byte aligned and cannot cross a 4096 byte boundary */ - if (!(elem = (struct cxl_process_element_hcall *) - get_zeroed_page(GFP_KERNEL))) - return -ENOMEM; - - elem->version = cpu_to_be64(CXL_PROCESS_ELEMENT_VERSION); - if (ctx->kernel) { - pid = 0; - flags |= CXL_PE_TRANSLATION_ENABLED; - flags |= CXL_PE_PRIVILEGED_PROCESS; - if (mfmsr() & MSR_SF) - flags |= CXL_PE_64_BIT; - } else { - pid = current->pid; - flags |= CXL_PE_PROBLEM_STATE; - flags |= CXL_PE_TRANSLATION_ENABLED; - if (!test_tsk_thread_flag(current, TIF_32BIT)) - flags |= CXL_PE_64_BIT; - cred = get_current_cred(); - if (uid_eq(cred->euid, GLOBAL_ROOT_UID)) - flags |= CXL_PE_PRIVILEGED_PROCESS; - put_cred(cred); - } - elem->flags = cpu_to_be64(flags); - elem->common.tid = cpu_to_be32(0); /* Unused */ - elem->common.pid = cpu_to_be32(pid); - elem->common.csrp = cpu_to_be64(0); /* disable */ - elem->common.u.psl8.aurp0 = cpu_to_be64(0); /* disable */ - elem->common.u.psl8.aurp1 = cpu_to_be64(0); /* disable */ - - cxl_prefault(ctx, wed); - - elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0); - elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1); - - /* - * Ensure we have at least one interrupt allocated to take faults for - * kernel contexts that may not have allocated any AFU IRQs at all: - */ - if (ctx->irqs.range[0] == 0) { - rc = afu_register_irqs(ctx, 0); - if (rc) - goto out_free; - } - - for (r = 0; r < CXL_IRQ_RANGES; r++) { - for (i = 0; i < ctx->irqs.range[r]; i++) { - if (r == 0 && i == 0) { - elem->pslVirtualIsn = cpu_to_be32(ctx->irqs.offset[0]); - } else { - idx = ctx->irqs.offset[r] + i - adapter->guest->irq_base_offset; - elem->applicationVirtualIsnBitmap[idx / 8] |= 0x80 >> (idx % 8); - } - } - } - elem->common.amr = cpu_to_be64(amr); - elem->common.wed = cpu_to_be64(wed); - - disable_afu_irqs(ctx); - - rc = cxl_h_attach_process(ctx->afu->guest->handle, elem, - &ctx->process_token, &mmio_addr, &mmio_size); - if (rc == H_SUCCESS) { - if (ctx->master || !ctx->afu->pp_psa) { - ctx->psn_phys = ctx->afu->psn_phys; - ctx->psn_size = ctx->afu->adapter->ps_size; - } else { - ctx->psn_phys = mmio_addr; - ctx->psn_size = mmio_size; - } - if (ctx->afu->pp_psa && mmio_size && - ctx->afu->pp_size == 0) { - /* - * There's no property in the device tree to read the - * pp_size. We only find out at the 1st attach. - * Compared to bare-metal, it is too late and we - * should really lock here. However, on powerVM, - * pp_size is really only used to display in /sys. - * Being discussed with pHyp for their next release. - */ - ctx->afu->pp_size = mmio_size; - } - /* from PAPR: process element is bytes 4-7 of process token */ - ctx->external_pe = ctx->process_token & 0xFFFFFFFF; - pr_devel("CXL pe=%i is known as %i for pHyp, mmio_size=%#llx", - ctx->pe, ctx->external_pe, ctx->psn_size); - ctx->pe_inserted = true; - enable_afu_irqs(ctx); - } - -out_free: - free_page((u64)elem); - return rc; -} - -static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr) -{ - pr_devel("in %s\n", __func__); - - ctx->kernel = kernel; - if (ctx->afu->current_mode == CXL_MODE_DIRECTED) - return attach_afu_directed(ctx, wed, amr); - - /* dedicated mode not supported on FW840 */ - - return -EINVAL; -} - -static int detach_afu_directed(struct cxl_context *ctx) -{ - if (!ctx->pe_inserted) - return 0; - if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token)) - return -1; - return 0; -} - -static int guest_detach_process(struct cxl_context *ctx) -{ - pr_devel("in %s\n", __func__); - trace_cxl_detach(ctx); - - if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) - return -EIO; - - if (ctx->afu->current_mode == CXL_MODE_DIRECTED) - return detach_afu_directed(ctx); - - return -EINVAL; -} - -static void guest_release_afu(struct device *dev) -{ - struct cxl_afu *afu = to_cxl_afu(dev); - - pr_devel("%s\n", __func__); - - idr_destroy(&afu->contexts_idr); - - kfree(afu->guest); - kfree(afu); -} - -ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len) -{ - return guest_collect_vpd(NULL, afu, buf, len); -} - -#define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE -static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf, - loff_t off, size_t count) -{ - void *tbuf = NULL; - int rc = 0; - - tbuf = (void *) get_zeroed_page(GFP_KERNEL); - if (!tbuf) - return -ENOMEM; - - rc = cxl_h_get_afu_err(afu->guest->handle, - off & 0x7, - virt_to_phys(tbuf), - count); - if (rc) - goto err; - - if (count > ERR_BUFF_MAX_COPY_SIZE) - count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7); - memcpy(buf, tbuf, count); -err: - free_page((u64)tbuf); - - return rc; -} - -static int guest_afu_check_and_enable(struct cxl_afu *afu) -{ - return 0; -} - -static bool guest_support_attributes(const char *attr_name, - enum cxl_attrs type) -{ - switch (type) { - case CXL_ADAPTER_ATTRS: - if ((strcmp(attr_name, "base_image") == 0) || - (strcmp(attr_name, "load_image_on_perst") == 0) || - (strcmp(attr_name, "perst_reloads_same_image") == 0) || - (strcmp(attr_name, "image_loaded") == 0)) - return false; - break; - case CXL_AFU_MASTER_ATTRS: - if ((strcmp(attr_name, "pp_mmio_off") == 0)) - return false; - break; - case CXL_AFU_ATTRS: - break; - default: - break; - } - - return true; -} - -static int activate_afu_directed(struct cxl_afu *afu) -{ - int rc; - - dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice); - - afu->current_mode = CXL_MODE_DIRECTED; - - afu->num_procs = afu->max_procs_virtualised; - - if ((rc = cxl_chardev_m_afu_add(afu))) - return rc; - - if ((rc = cxl_sysfs_afu_m_add(afu))) - goto err; - - if ((rc = cxl_chardev_s_afu_add(afu))) - goto err1; - - return 0; -err1: - cxl_sysfs_afu_m_remove(afu); -err: - cxl_chardev_afu_remove(afu); - return rc; -} - -static int guest_afu_activate_mode(struct cxl_afu *afu, int mode) -{ - if (!mode) - return 0; - if (!(mode & afu->modes_supported)) - return -EINVAL; - - if (mode == CXL_MODE_DIRECTED) - return activate_afu_directed(afu); - - if (mode == CXL_MODE_DEDICATED) - dev_err(&afu->dev, "Dedicated mode not supported\n"); - - return -EINVAL; -} - -static int deactivate_afu_directed(struct cxl_afu *afu) -{ - dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice); - - afu->current_mode = 0; - afu->num_procs = 0; - - cxl_sysfs_afu_m_remove(afu); - cxl_chardev_afu_remove(afu); - - cxl_ops->afu_reset(afu); - - return 0; -} - -static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode) -{ - if (!mode) - return 0; - if (!(mode & afu->modes_supported)) - return -EINVAL; - - if (mode == CXL_MODE_DIRECTED) - return deactivate_afu_directed(afu); - return 0; -} - -static int guest_afu_reset(struct cxl_afu *afu) -{ - pr_devel("AFU(%d) reset request\n", afu->slice); - return cxl_h_reset_afu(afu->guest->handle); -} - -static int guest_map_slice_regs(struct cxl_afu *afu) -{ - if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) { - dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n", - afu->slice); - return -ENOMEM; - } - return 0; -} - -static void guest_unmap_slice_regs(struct cxl_afu *afu) -{ - if (afu->p2n_mmio) - iounmap(afu->p2n_mmio); -} - -static int afu_update_state(struct cxl_afu *afu) -{ - int rc, cur_state; - - rc = afu_read_error_state(afu, &cur_state); - if (rc) - return rc; - - if (afu->guest->previous_state == cur_state) - return 0; - - pr_devel("AFU(%d) update state to %#x\n", afu->slice, cur_state); - - switch (cur_state) { - case H_STATE_NORMAL: - afu->guest->previous_state = cur_state; - break; - - case H_STATE_DISABLE: - pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, - pci_channel_io_frozen); - - cxl_context_detach_all(afu); - if ((rc = cxl_ops->afu_reset(afu))) - pr_devel("reset hcall failed %d\n", rc); - - rc = afu_read_error_state(afu, &cur_state); - if (!rc && cur_state == H_STATE_NORMAL) { - pci_error_handlers(afu, CXL_SLOT_RESET_EVENT, - pci_channel_io_normal); - pci_error_handlers(afu, CXL_RESUME_EVENT, 0); - } - afu->guest->previous_state = 0; - break; - - case H_STATE_TEMP_UNAVAILABLE: - afu->guest->previous_state = cur_state; - break; - - case H_STATE_PERM_UNAVAILABLE: - dev_err(&afu->dev, "AFU is in permanent error state\n"); - pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, - pci_channel_io_perm_failure); - afu->guest->previous_state = cur_state; - break; - - default: - pr_err("Unexpected AFU(%d) error state: %#x\n", - afu->slice, cur_state); - return -EINVAL; - } - - return rc; -} - -static void afu_handle_errstate(struct work_struct *work) -{ - struct cxl_afu_guest *afu_guest = - container_of(to_delayed_work(work), struct cxl_afu_guest, work_err); - - if (!afu_update_state(afu_guest->parent) && - afu_guest->previous_state == H_STATE_PERM_UNAVAILABLE) - return; - - if (afu_guest->handle_err) - schedule_delayed_work(&afu_guest->work_err, - msecs_to_jiffies(3000)); -} - -static bool guest_link_ok(struct cxl *cxl, struct cxl_afu *afu) -{ - int state; - - if (afu && (!afu_read_error_state(afu, &state))) { - if (state == H_STATE_NORMAL) - return true; - } - - return false; -} - -static int afu_properties_look_ok(struct cxl_afu *afu) -{ - if (afu->pp_irqs < 0) { - dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n"); - return -EINVAL; - } - - if (afu->max_procs_virtualised < 1) { - dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n"); - return -EINVAL; - } - - return 0; -} - -int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np) -{ - struct cxl_afu *afu; - bool free = true; - int rc; - - pr_devel("in %s - AFU(%d)\n", __func__, slice); - if (!(afu = cxl_alloc_afu(adapter, slice))) - return -ENOMEM; - - if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) { - kfree(afu); - return -ENOMEM; - } - - if ((rc = dev_set_name(&afu->dev, "afu%i.%i", - adapter->adapter_num, - slice))) - goto err1; - - adapter->slices++; - - if ((rc = cxl_of_read_afu_handle(afu, afu_np))) - goto err1; - - if ((rc = cxl_ops->afu_reset(afu))) - goto err1; - - if ((rc = cxl_of_read_afu_properties(afu, afu_np))) - goto err1; - - if ((rc = afu_properties_look_ok(afu))) - goto err1; - - if ((rc = guest_map_slice_regs(afu))) - goto err1; - - if ((rc = guest_register_serr_irq(afu))) - goto err2; - - /* - * After we call this function we must not free the afu directly, even - * if it returns an error! - */ - if ((rc = cxl_register_afu(afu))) - goto err_put_dev; - - if ((rc = cxl_sysfs_afu_add(afu))) - goto err_del_dev; - - /* - * pHyp doesn't expose the programming models supported by the - * AFU. pHyp currently only supports directed mode. If it adds - * dedicated mode later, this version of cxl has no way to - * detect it. So we'll initialize the driver, but the first - * attach will fail. - * Being discussed with pHyp to do better (likely new property) - */ - if (afu->max_procs_virtualised == 1) - afu->modes_supported = CXL_MODE_DEDICATED; - else - afu->modes_supported = CXL_MODE_DIRECTED; - - if ((rc = cxl_afu_select_best_mode(afu))) - goto err_remove_sysfs; - - adapter->afu[afu->slice] = afu; - - afu->enabled = true; - - /* - * wake up the cpu periodically to check the state - * of the AFU using "afu" stored in the guest structure. - */ - afu->guest->parent = afu; - afu->guest->handle_err = true; - INIT_DELAYED_WORK(&afu->guest->work_err, afu_handle_errstate); - schedule_delayed_work(&afu->guest->work_err, msecs_to_jiffies(1000)); - - if ((rc = cxl_pci_vphb_add(afu))) - dev_info(&afu->dev, "Can't register vPHB\n"); - - return 0; - -err_remove_sysfs: - cxl_sysfs_afu_remove(afu); -err_del_dev: - device_del(&afu->dev); -err_put_dev: - put_device(&afu->dev); - free = false; - guest_release_serr_irq(afu); -err2: - guest_unmap_slice_regs(afu); -err1: - if (free) { - kfree(afu->guest); - kfree(afu); - } - return rc; -} - -void cxl_guest_remove_afu(struct cxl_afu *afu) -{ - if (!afu) - return; - - /* flush and stop pending job */ - afu->guest->handle_err = false; - flush_delayed_work(&afu->guest->work_err); - - cxl_pci_vphb_remove(afu); - cxl_sysfs_afu_remove(afu); - - spin_lock(&afu->adapter->afu_list_lock); - afu->adapter->afu[afu->slice] = NULL; - spin_unlock(&afu->adapter->afu_list_lock); - - cxl_context_detach_all(afu); - cxl_ops->afu_deactivate_mode(afu, afu->current_mode); - guest_release_serr_irq(afu); - guest_unmap_slice_regs(afu); - - device_unregister(&afu->dev); -} - -static void free_adapter(struct cxl *adapter) -{ - struct irq_avail *cur; - int i; - - if (adapter->guest) { - if (adapter->guest->irq_avail) { - for (i = 0; i < adapter->guest->irq_nranges; i++) { - cur = &adapter->guest->irq_avail[i]; - bitmap_free(cur->bitmap); - } - kfree(adapter->guest->irq_avail); - } - kfree(adapter->guest->status); - kfree(adapter->guest); - } - cxl_remove_adapter_nr(adapter); - kfree(adapter); -} - -static int properties_look_ok(struct cxl *adapter) -{ - /* The absence of this property means that the operational - * status is unknown or okay - */ - if (strlen(adapter->guest->status) && - strcmp(adapter->guest->status, "okay")) { - pr_err("ABORTING:Bad operational status of the device\n"); - return -EINVAL; - } - - return 0; -} - -ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len) -{ - return guest_collect_vpd(adapter, NULL, buf, len); -} - -void cxl_guest_remove_adapter(struct cxl *adapter) -{ - pr_devel("in %s\n", __func__); - - cxl_sysfs_adapter_remove(adapter); - - cxl_guest_remove_chardev(adapter); - device_unregister(&adapter->dev); -} - -static void release_adapter(struct device *dev) -{ - free_adapter(to_cxl_adapter(dev)); -} - -struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *pdev) -{ - struct cxl *adapter; - bool free = true; - int rc; - - if (!(adapter = cxl_alloc_adapter())) - return ERR_PTR(-ENOMEM); - - if (!(adapter->guest = kzalloc(sizeof(struct cxl_guest), GFP_KERNEL))) { - free_adapter(adapter); - return ERR_PTR(-ENOMEM); - } - - adapter->slices = 0; - adapter->guest->pdev = pdev; - adapter->dev.parent = &pdev->dev; - adapter->dev.release = release_adapter; - dev_set_drvdata(&pdev->dev, adapter); - - /* - * Hypervisor controls PSL timebase initialization (p1 register). - * On FW840, PSL is initialized. - */ - adapter->psl_timebase_synced = true; - - if ((rc = cxl_of_read_adapter_handle(adapter, np))) - goto err1; - - if ((rc = cxl_of_read_adapter_properties(adapter, np))) - goto err1; - - if ((rc = properties_look_ok(adapter))) - goto err1; - - if ((rc = cxl_guest_add_chardev(adapter))) - goto err1; - - /* - * After we call this function we must not free the adapter directly, - * even if it returns an error! - */ - if ((rc = cxl_register_adapter(adapter))) - goto err_put_dev; - - if ((rc = cxl_sysfs_adapter_add(adapter))) - goto err_del_dev; - - /* release the context lock as the adapter is configured */ - cxl_adapter_context_unlock(adapter); - - return adapter; - -err_del_dev: - device_del(&adapter->dev); -err_put_dev: - put_device(&adapter->dev); - free = false; - cxl_guest_remove_chardev(adapter); -err1: - if (free) - free_adapter(adapter); - return ERR_PTR(rc); -} - -void cxl_guest_reload_module(struct cxl *adapter) -{ - struct platform_device *pdev; - - pdev = adapter->guest->pdev; - cxl_guest_remove_adapter(adapter); - - cxl_of_probe(pdev); -} - -const struct cxl_backend_ops cxl_guest_ops = { - .module = THIS_MODULE, - .adapter_reset = guest_reset, - .alloc_one_irq = guest_alloc_one_irq, - .release_one_irq = guest_release_one_irq, - .alloc_irq_ranges = guest_alloc_irq_ranges, - .release_irq_ranges = guest_release_irq_ranges, - .setup_irq = NULL, - .handle_psl_slice_error = guest_handle_psl_slice_error, - .psl_interrupt = guest_psl_irq, - .ack_irq = guest_ack_irq, - .attach_process = guest_attach_process, - .detach_process = guest_detach_process, - .update_ivtes = NULL, - .support_attributes = guest_support_attributes, - .link_ok = guest_link_ok, - .release_afu = guest_release_afu, - .afu_read_err_buffer = guest_afu_read_err_buffer, - .afu_check_and_enable = guest_afu_check_and_enable, - .afu_activate_mode = guest_afu_activate_mode, - .afu_deactivate_mode = guest_afu_deactivate_mode, - .afu_reset = guest_afu_reset, - .afu_cr_read8 = guest_afu_cr_read8, - .afu_cr_read16 = guest_afu_cr_read16, - .afu_cr_read32 = guest_afu_cr_read32, - .afu_cr_read64 = guest_afu_cr_read64, - .afu_cr_write8 = guest_afu_cr_write8, - .afu_cr_write16 = guest_afu_cr_write16, - .afu_cr_write32 = guest_afu_cr_write32, - .read_adapter_vpd = cxl_guest_read_adapter_vpd, -}; diff --git a/drivers/misc/cxl/hcalls.c b/drivers/misc/cxl/hcalls.c deleted file mode 100644 index aba5e20eeb1f..000000000000 --- a/drivers/misc/cxl/hcalls.c +++ /dev/null @@ -1,643 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2015 IBM Corp. - */ - - -#include <linux/compiler.h> -#include <linux/types.h> -#include <linux/delay.h> -#include <asm/byteorder.h> -#include "hcalls.h" -#include "trace.h" - -#define CXL_HCALL_TIMEOUT 60000 -#define CXL_HCALL_TIMEOUT_DOWNLOAD 120000 - -#define H_ATTACH_CA_PROCESS 0x344 -#define H_CONTROL_CA_FUNCTION 0x348 -#define H_DETACH_CA_PROCESS 0x34C -#define H_COLLECT_CA_INT_INFO 0x350 -#define H_CONTROL_CA_FAULTS 0x354 -#define H_DOWNLOAD_CA_FUNCTION 0x35C -#define H_DOWNLOAD_CA_FACILITY 0x364 -#define H_CONTROL_CA_FACILITY 0x368 - -#define H_CONTROL_CA_FUNCTION_RESET 1 /* perform a reset */ -#define H_CONTROL_CA_FUNCTION_SUSPEND_PROCESS 2 /* suspend a process from being executed */ -#define H_CONTROL_CA_FUNCTION_RESUME_PROCESS 3 /* resume a process to be executed */ -#define H_CONTROL_CA_FUNCTION_READ_ERR_STATE 4 /* read the error state */ -#define H_CONTROL_CA_FUNCTION_GET_AFU_ERR 5 /* collect the AFU error buffer */ -#define H_CONTROL_CA_FUNCTION_GET_CONFIG 6 /* collect configuration record */ -#define H_CONTROL_CA_FUNCTION_GET_DOWNLOAD_STATE 7 /* query to return download status */ -#define H_CONTROL_CA_FUNCTION_TERMINATE_PROCESS 8 /* terminate the process before completion */ -#define H_CONTROL_CA_FUNCTION_COLLECT_VPD 9 /* collect VPD */ -#define H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT 11 /* read the function-wide error data based on an interrupt */ -#define H_CONTROL_CA_FUNCTION_ACK_FUNCTION_ERR_INT 12 /* acknowledge function-wide error data based on an interrupt */ -#define H_CONTROL_CA_FUNCTION_GET_ERROR_LOG 13 /* retrieve the Platform Log ID (PLID) of an error log */ - -#define H_CONTROL_CA_FAULTS_RESPOND_PSL 1 -#define H_CONTROL_CA_FAULTS_RESPOND_AFU 2 - -#define H_CONTROL_CA_FACILITY_RESET 1 /* perform a reset */ -#define H_CONTROL_CA_FACILITY_COLLECT_VPD 2 /* collect VPD */ - -#define H_DOWNLOAD_CA_FACILITY_DOWNLOAD 1 /* download adapter image */ -#define H_DOWNLOAD_CA_FACILITY_VALIDATE 2 /* validate adapter image */ - - -#define _CXL_LOOP_HCALL(call, rc, retbuf, fn, ...) \ - { \ - unsigned int delay, total_delay = 0; \ - u64 token = 0; \ - \ - memset(retbuf, 0, sizeof(retbuf)); \ - while (1) { \ - rc = call(fn, retbuf, __VA_ARGS__, token); \ - token = retbuf[0]; \ - if (rc != H_BUSY && !H_IS_LONG_BUSY(rc)) \ - break; \ - \ - if (rc == H_BUSY) \ - delay = 10; \ - else \ - delay = get_longbusy_msecs(rc); \ - \ - total_delay += delay; \ - if (total_delay > CXL_HCALL_TIMEOUT) { \ - WARN(1, "Warning: Giving up waiting for CXL hcall " \ - "%#x after %u msec\n", fn, total_delay); \ - rc = H_BUSY; \ - break; \ - } \ - msleep(delay); \ - } \ - } -#define CXL_H_WAIT_UNTIL_DONE(...) _CXL_LOOP_HCALL(plpar_hcall, __VA_ARGS__) -#define CXL_H9_WAIT_UNTIL_DONE(...) _CXL_LOOP_HCALL(plpar_hcall9, __VA_ARGS__) - -#define _PRINT_MSG(rc, format, ...) \ - { \ - if ((rc != H_SUCCESS) && (rc != H_CONTINUE)) \ - pr_err(format, __VA_ARGS__); \ - else \ - pr_devel(format, __VA_ARGS__); \ - } \ - - -static char *afu_op_names[] = { - "UNKNOWN_OP", /* 0 undefined */ - "RESET", /* 1 */ - "SUSPEND_PROCESS", /* 2 */ - "RESUME_PROCESS", /* 3 */ - "READ_ERR_STATE", /* 4 */ - "GET_AFU_ERR", /* 5 */ - "GET_CONFIG", /* 6 */ - "GET_DOWNLOAD_STATE", /* 7 */ - "TERMINATE_PROCESS", /* 8 */ - "COLLECT_VPD", /* 9 */ - "UNKNOWN_OP", /* 10 undefined */ - "GET_FUNCTION_ERR_INT", /* 11 */ - "ACK_FUNCTION_ERR_INT", /* 12 */ - "GET_ERROR_LOG", /* 13 */ -}; - -static char *control_adapter_op_names[] = { - "UNKNOWN_OP", /* 0 undefined */ - "RESET", /* 1 */ - "COLLECT_VPD", /* 2 */ -}; - -static char *download_op_names[] = { - "UNKNOWN_OP", /* 0 undefined */ - "DOWNLOAD", /* 1 */ - "VALIDATE", /* 2 */ -}; - -static char *op_str(unsigned int op, char *name_array[], int array_len) -{ - if (op >= array_len) - return "UNKNOWN_OP"; - return name_array[op]; -} - -#define OP_STR(op, name_array) op_str(op, name_array, ARRAY_SIZE(name_array)) - -#define OP_STR_AFU(op) OP_STR(op, afu_op_names) -#define OP_STR_CONTROL_ADAPTER(op) OP_STR(op, control_adapter_op_names) -#define OP_STR_DOWNLOAD_ADAPTER(op) OP_STR(op, download_op_names) - - -long cxl_h_attach_process(u64 unit_address, - struct cxl_process_element_hcall *element, - u64 *process_token, u64 *mmio_addr, u64 *mmio_size) -{ - unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; - long rc; - - CXL_H_WAIT_UNTIL_DONE(rc, retbuf, H_ATTACH_CA_PROCESS, unit_address, virt_to_phys(element)); - _PRINT_MSG(rc, "cxl_h_attach_process(%#.16llx, %#.16lx): %li\n", - unit_address, virt_to_phys(element), rc); - trace_cxl_hcall_attach(unit_address, virt_to_phys(element), retbuf[0], retbuf[1], retbuf[2], rc); - - pr_devel("token: 0x%.8lx mmio_addr: 0x%lx mmio_size: 0x%lx\nProcess Element Structure:\n", - retbuf[0], retbuf[1], retbuf[2]); - cxl_dump_debug_buffer(element, sizeof(*element)); - - switch (rc) { - case H_SUCCESS: /* The process info is attached to the coherent platform function */ - *process_token = retbuf[0]; - if (mmio_addr) - *mmio_addr = retbuf[1]; - if (mmio_size) - *mmio_size = retbuf[2]; - return 0; - case H_PARAMETER: /* An incorrect parameter was supplied. */ - case H_FUNCTION: /* The function is not supported. */ - return -EINVAL; - case H_AUTHORITY: /* The partition does not have authority to perform this hcall */ - case H_RESOURCE: /* The coherent platform function does not have enough additional resource to attach the process */ - case H_HARDWARE: /* A hardware event prevented the attach operation */ - case H_STATE: /* The coherent platform function is not in a valid state */ - case H_BUSY: - return -EBUSY; - default: - WARN(1, "Unexpected return code: %lx", rc); - return -EINVAL; - } -} - -/* - * cxl_h_detach_process - Detach a process element from a coherent - * platform function. - */ -long cxl_h_detach_process(u64 unit_address, u64 process_token) -{ - unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; - long rc; - - CXL_H_WAIT_UNTIL_DONE(rc, retbuf, H_DETACH_CA_PROCESS, unit_address, process_token); - _PRINT_MSG(rc, "cxl_h_detach_process(%#.16llx, 0x%.8llx): %li\n", unit_address, process_token, rc); - trace_cxl_hcall_detach(unit_address, process_token, rc); - - switch (rc) { - case H_SUCCESS: /* The process was detached from the coherent platform function */ - return 0; - case H_PARAMETER: /* An incorrect parameter was supplied. */ - return -EINVAL; - case H_AUTHORITY: /* The partition does not have authority to perform this hcall */ - case H_RESOURCE: /* The function has page table mappings for MMIO */ - case H_HARDWARE: /* A hardware event prevented the detach operation */ - case H_STATE: /* The coherent platform function is not in a valid state */ - case H_BUSY: - return -EBUSY; - default: - WARN(1, "Unexpected return code: %lx", rc); - return -EINVAL; - } -} - -/* - * cxl_h_control_function - This H_CONTROL_CA_FUNCTION hypervisor call allows - * the partition to manipulate or query - * certain coherent platform function behaviors. - */ -static long cxl_h_control_function(u64 unit_address, u64 op, - u64 p1, u64 p2, u64 p3, u64 p4, u64 *out) -{ - unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; - long rc; - - CXL_H9_WAIT_UNTIL_DONE(rc, retbuf, H_CONTROL_CA_FUNCTION, unit_address, op, p1, p2, p3, p4); - _PRINT_MSG(rc, "cxl_h_control_function(%#.16llx, %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li\n", - unit_address, OP_STR_AFU(op), p1, p2, p3, p4, retbuf[0], rc); - trace_cxl_hcall_control_function(unit_address, OP_STR_AFU(op), p1, p2, p3, p4, retbuf[0], rc); - - switch (rc) { - case H_SUCCESS: /* The operation is completed for the coherent platform function */ - if ((op == H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT || - op == H_CONTROL_CA_FUNCTION_READ_ERR_STATE || - op == H_CONTROL_CA_FUNCTION_COLLECT_VPD)) - *out = retbuf[0]; - return 0; - case H_PARAMETER: /* An incorrect parameter was supplied. */ - case H_FUNCTION: /* The function is not supported. */ - case H_NOT_FOUND: /* The operation supplied was not valid */ - case H_NOT_AVAILABLE: /* The operation cannot be performed because the AFU has not been downloaded */ - case H_SG_LIST: /* An block list entry was invalid */ - return -EINVAL; - case H_AUTHORITY: /* The partition does not have authority to perform this hcall */ - case H_RESOURCE: /* The function has page table mappings for MMIO */ - case H_HARDWARE: /* A hardware event prevented the attach operation */ - case H_STATE: /* The coherent platform function is not in a valid state */ - case H_BUSY: - return -EBUSY; - default: - WARN(1, "Unexpected return code: %lx", rc); - return -EINVAL; - } -} - -/* - * cxl_h_reset_afu - Perform a reset to the coherent platform function. - */ -long cxl_h_reset_afu(u64 unit_address) -{ - return cxl_h_control_function(unit_address, - H_CONTROL_CA_FUNCTION_RESET, - 0, 0, 0, 0, - NULL); -} - -/* - * cxl_h_suspend_process - Suspend a process from being executed - * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when - * process was attached. - */ -long cxl_h_suspend_process(u64 unit_address, u64 process_token) -{ - return cxl_h_control_function(unit_address, - H_CONTROL_CA_FUNCTION_SUSPEND_PROCESS, - process_token, 0, 0, 0, - NULL); -} - -/* - * cxl_h_resume_process - Resume a process to be executed - * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when - * process was attached. - */ -long cxl_h_resume_process(u64 unit_address, u64 process_token) -{ - return cxl_h_control_function(unit_address, - H_CONTROL_CA_FUNCTION_RESUME_PROCESS, - process_token, 0, 0, 0, - NULL); -} - -/* - * cxl_h_read_error_state - Checks the error state of the coherent - * platform function. - * R4 contains the error state - */ -long cxl_h_read_error_state(u64 unit_address, u64 *state) -{ - return cxl_h_control_function(unit_address, - H_CONTROL_CA_FUNCTION_READ_ERR_STATE, - 0, 0, 0, 0, - state); -} - -/* - * cxl_h_get_afu_err - collect the AFU error buffer - * Parameter1 = byte offset into error buffer to retrieve, valid values - * are between 0 and (ibm,error-buffer-size - 1) - * Parameter2 = 4K aligned real address of error buffer, to be filled in - * Parameter3 = length of error buffer, valid values are 4K or less - */ -long cxl_h_get_afu_err(u64 unit_address, u64 offset, - u64 buf_address, u64 len) -{ - return cxl_h_control_function(unit_address, - H_CONTROL_CA_FUNCTION_GET_AFU_ERR, - offset, buf_address, len, 0, - NULL); -} - -/* - * cxl_h_get_config - collect configuration record for the - * coherent platform function - * Parameter1 = # of configuration record to retrieve, valid values are - * between 0 and (ibm,#config-records - 1) - * Parameter2 = byte offset into configuration record to retrieve, - * valid values are between 0 and (ibm,config-record-size - 1) - * Parameter3 = 4K aligned real address of configuration record buffer, - * to be filled in - * Parameter4 = length of configuration buffer, valid values are 4K or less - */ -long cxl_h_get_config(u64 unit_address, u64 cr_num, u64 offset, - u64 buf_address, u64 len) -{ - return cxl_h_control_function(unit_address, - H_CONTROL_CA_FUNCTION_GET_CONFIG, - cr_num, offset, buf_address, len, - NULL); -} - -/* - * cxl_h_terminate_process - Terminate the process before completion - * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when - * process was attached. - */ -long cxl_h_terminate_process(u64 unit_address, u64 process_token) -{ - return cxl_h_control_function(unit_address, - H_CONTROL_CA_FUNCTION_TERMINATE_PROCESS, - process_token, 0, 0, 0, - NULL); -} - -/* - * cxl_h_collect_vpd - Collect VPD for the coherent platform function. - * Parameter1 = # of VPD record to retrieve, valid values are between 0 - * and (ibm,#config-records - 1). - * Parameter2 = 4K naturally aligned real buffer containing block - * list entries - * Parameter3 = number of block list entries in the block list, valid - * values are between 0 and 256 - */ -long cxl_h_collect_vpd(u64 unit_address, u64 record, u64 list_address, - u64 num, u64 *out) -{ - return cxl_h_control_function(unit_address, - H_CONTROL_CA_FUNCTION_COLLECT_VPD, - record, list_address, num, 0, - out); -} - -/* - * cxl_h_get_fn_error_interrupt - Read the function-wide error data based on an interrupt - */ -long cxl_h_get_fn_error_interrupt(u64 unit_address, u64 *reg) -{ - return cxl_h_control_function(unit_address, - H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT, - 0, 0, 0, 0, reg); -} - -/* - * cxl_h_ack_fn_error_interrupt - Acknowledge function-wide error data - * based on an interrupt - * Parameter1 = value to write to the function-wide error interrupt register - */ -long cxl_h_ack_fn_error_interrupt(u64 unit_address, u64 value) -{ - return cxl_h_control_function(unit_address, - H_CONTROL_CA_FUNCTION_ACK_FUNCTION_ERR_INT, - value, 0, 0, 0, - NULL); -} - -/* - * cxl_h_get_error_log - Retrieve the Platform Log ID (PLID) of - * an error log - */ -long cxl_h_get_error_log(u64 unit_address, u64 value) -{ - return cxl_h_control_function(unit_address, - H_CONTROL_CA_FUNCTION_GET_ERROR_LOG, - 0, 0, 0, 0, - NULL); -} - -/* - * cxl_h_collect_int_info - Collect interrupt info about a coherent - * platform function after an interrupt occurred. - */ -long cxl_h_collect_int_info(u64 unit_address, u64 process_token, - struct cxl_irq_info *info) -{ - long rc; - - BUG_ON(sizeof(*info) != sizeof(unsigned long[PLPAR_HCALL9_BUFSIZE])); - - rc = plpar_hcall9(H_COLLECT_CA_INT_INFO, (unsigned long *) info, - unit_address, process_token); - _PRINT_MSG(rc, "cxl_h_collect_int_info(%#.16llx, 0x%llx): %li\n", - unit_address, process_token, rc); - trace_cxl_hcall_collect_int_info(unit_address, process_token, rc); - - switch (rc) { - case H_SUCCESS: /* The interrupt info is returned in return registers. */ - pr_devel("dsisr:%#llx, dar:%#llx, dsr:%#llx, pid_tid:%#llx, afu_err:%#llx, errstat:%#llx\n", - info->dsisr, info->dar, info->dsr, info->reserved, - info->afu_err, info->errstat); - return 0; - case H_PARAMETER: /* An incorrect parameter was supplied. */ - return -EINVAL; - case H_AUTHORITY: /* The partition does not have authority to perform this hcall. */ - case H_HARDWARE: /* A hardware event prevented the collection of the interrupt info.*/ - case H_STATE: /* The coherent platform function is not in a valid state to collect interrupt info. */ - return -EBUSY; - default: - WARN(1, "Unexpected return code: %lx", rc); - return -EINVAL; - } -} - -/* - * cxl_h_control_faults - Control the operation of a coherent platform - * function after a fault occurs. - * - * Parameters - * control-mask: value to control the faults - * looks like PSL_TFC_An shifted >> 32 - * reset-mask: mask to control reset of function faults - * Set reset_mask = 1 to reset PSL errors - */ -long cxl_h_control_faults(u64 unit_address, u64 process_token, - u64 control_mask, u64 reset_mask) -{ - unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; - long rc; - - memset(retbuf, 0, sizeof(retbuf)); - - rc = plpar_hcall(H_CONTROL_CA_FAULTS, retbuf, unit_address, - H_CONTROL_CA_FAULTS_RESPOND_PSL, process_token, - control_mask, reset_mask); - _PRINT_MSG(rc, "cxl_h_control_faults(%#.16llx, 0x%llx, %#llx, %#llx): %li (%#lx)\n", - unit_address, process_token, control_mask, reset_mask, - rc, retbuf[0]); - trace_cxl_hcall_control_faults(unit_address, process_token, - control_mask, reset_mask, retbuf[0], rc); - - switch (rc) { - case H_SUCCESS: /* Faults were successfully controlled for the function. */ - return 0; - case H_PARAMETER: /* An incorrect parameter was supplied. */ - return -EINVAL; - case H_HARDWARE: /* A hardware event prevented the control of faults. */ - case H_STATE: /* The function was in an invalid state. */ - case H_AUTHORITY: /* The partition does not have authority to perform this hcall; the coherent platform facilities may need to be licensed. */ - return -EBUSY; - case H_FUNCTION: /* The function is not supported */ - case H_NOT_FOUND: /* The operation supplied was not valid */ - return -EINVAL; - default: - WARN(1, "Unexpected return code: %lx", rc); - return -EINVAL; - } -} - -/* - * cxl_h_control_facility - This H_CONTROL_CA_FACILITY hypervisor call - * allows the partition to manipulate or query - * certain coherent platform facility behaviors. - */ -static long cxl_h_control_facility(u64 unit_address, u64 op, - u64 p1, u64 p2, u64 p3, u64 p4, u64 *out) -{ - unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; - long rc; - - CXL_H9_WAIT_UNTIL_DONE(rc, retbuf, H_CONTROL_CA_FACILITY, unit_address, op, p1, p2, p3, p4); - _PRINT_MSG(rc, "cxl_h_control_facility(%#.16llx, %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li\n", - unit_address, OP_STR_CONTROL_ADAPTER(op), p1, p2, p3, p4, retbuf[0], rc); - trace_cxl_hcall_control_facility(unit_address, OP_STR_CONTROL_ADAPTER(op), p1, p2, p3, p4, retbuf[0], rc); - - switch (rc) { - case H_SUCCESS: /* The operation is completed for the coherent platform facility */ - if (op == H_CONTROL_CA_FACILITY_COLLECT_VPD) - *out = retbuf[0]; - return 0; - case H_PARAMETER: /* An incorrect parameter was supplied. */ - case H_FUNCTION: /* The function is not supported. */ - case H_NOT_FOUND: /* The operation supplied was not valid */ - case H_NOT_AVAILABLE: /* The operation cannot be performed because the AFU has not been downloaded */ - case H_SG_LIST: /* An block list entry was invalid */ - return -EINVAL; - case H_AUTHORITY: /* The partition does not have authority to perform this hcall */ - case H_RESOURCE: /* The function has page table mappings for MMIO */ - case H_HARDWARE: /* A hardware event prevented the attach operation */ - case H_STATE: /* The coherent platform facility is not in a valid state */ - case H_BUSY: - return -EBUSY; - default: - WARN(1, "Unexpected return code: %lx", rc); - return -EINVAL; - } -} - -/* - * cxl_h_reset_adapter - Perform a reset to the coherent platform facility. - */ -long cxl_h_reset_adapter(u64 unit_address) -{ - return cxl_h_control_facility(unit_address, - H_CONTROL_CA_FACILITY_RESET, - 0, 0, 0, 0, - NULL); -} - -/* - * cxl_h_collect_vpd - Collect VPD for the coherent platform function. - * Parameter1 = 4K naturally aligned real buffer containing block - * list entries - * Parameter2 = number of block list entries in the block list, valid - * values are between 0 and 256 - */ -long cxl_h_collect_vpd_adapter(u64 unit_address, u64 list_address, - u64 num, u64 *out) -{ - return cxl_h_control_facility(unit_address, - H_CONTROL_CA_FACILITY_COLLECT_VPD, - list_address, num, 0, 0, - out); -} - -/* - * cxl_h_download_facility - This H_DOWNLOAD_CA_FACILITY - * hypervisor call provide platform support for - * downloading a base adapter image to the coherent - * platform facility, and for validating the entire - * image after the download. - * Parameters - * op: operation to perform to the coherent platform function - * Download: operation = 1, the base image in the coherent platform - * facility is first erased, and then - * programmed using the image supplied - * in the scatter/gather list. - * Validate: operation = 2, the base image in the coherent platform - * facility is compared with the image - * supplied in the scatter/gather list. - * list_address: 4K naturally aligned real buffer containing - * scatter/gather list entries. - * num: number of block list entries in the scatter/gather list. - */ -static long cxl_h_download_facility(u64 unit_address, u64 op, - u64 list_address, u64 num, - u64 *out) -{ - unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; - unsigned int delay, total_delay = 0; - u64 token = 0; - long rc; - - if (*out != 0) - token = *out; - - memset(retbuf, 0, sizeof(retbuf)); - while (1) { - rc = plpar_hcall(H_DOWNLOAD_CA_FACILITY, retbuf, - unit_address, op, list_address, num, - token); - token = retbuf[0]; - if (rc != H_BUSY && !H_IS_LONG_BUSY(rc)) - break; - - if (rc != H_BUSY) { - delay = get_longbusy_msecs(rc); - total_delay += delay; - if (total_delay > CXL_HCALL_TIMEOUT_DOWNLOAD) { - WARN(1, "Warning: Giving up waiting for CXL hcall " - "%#x after %u msec\n", - H_DOWNLOAD_CA_FACILITY, total_delay); - rc = H_BUSY; - break; - } - msleep(delay); - } - } - _PRINT_MSG(rc, "cxl_h_download_facility(%#.16llx, %s(%#llx, %#llx), %#lx): %li\n", - unit_address, OP_STR_DOWNLOAD_ADAPTER(op), list_address, num, retbuf[0], rc); - trace_cxl_hcall_download_facility(unit_address, OP_STR_DOWNLOAD_ADAPTER(op), list_address, num, retbuf[0], rc); - - switch (rc) { - case H_SUCCESS: /* The operation is completed for the coherent platform facility */ - return 0; - case H_PARAMETER: /* An incorrect parameter was supplied */ - case H_FUNCTION: /* The function is not supported. */ - case H_SG_LIST: /* An block list entry was invalid */ - case H_BAD_DATA: /* Image verification failed */ - return -EINVAL; - case H_AUTHORITY: /* The partition does not have authority to perform this hcall */ - case H_RESOURCE: /* The function has page table mappings for MMIO */ - case H_HARDWARE: /* A hardware event prevented the attach operation */ - case H_STATE: /* The coherent platform facility is not in a valid state */ - case H_BUSY: - return -EBUSY; - case H_CONTINUE: - *out = retbuf[0]; - return 1; /* More data is needed for the complete image */ - default: - WARN(1, "Unexpected return code: %lx", rc); - return -EINVAL; - } -} - -/* - * cxl_h_download_adapter_image - Download the base image to the coherent - * platform facility. - */ -long cxl_h_download_adapter_image(u64 unit_address, - u64 list_address, u64 num, - u64 *out) -{ - return cxl_h_download_facility(unit_address, - H_DOWNLOAD_CA_FACILITY_DOWNLOAD, - list_address, num, out); -} - -/* - * cxl_h_validate_adapter_image - Validate the base image in the coherent - * platform facility. - */ -long cxl_h_validate_adapter_image(u64 unit_address, - u64 list_address, u64 num, - u64 *out) -{ - return cxl_h_download_facility(unit_address, - H_DOWNLOAD_CA_FACILITY_VALIDATE, - list_address, num, out); -} diff --git a/drivers/misc/cxl/hcalls.h b/drivers/misc/cxl/hcalls.h deleted file mode 100644 index d200465dc6ac..000000000000 --- a/drivers/misc/cxl/hcalls.h +++ /dev/null @@ -1,200 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright 2015 IBM Corp. - */ - -#ifndef _HCALLS_H -#define _HCALLS_H - -#include <linux/types.h> -#include <asm/byteorder.h> -#include <asm/hvcall.h> -#include "cxl.h" - -#define SG_BUFFER_SIZE 4096 -#define SG_MAX_ENTRIES 256 - -struct sg_list { - u64 phys_addr; - u64 len; -}; - -/* - * This is straight out of PAPR, but replacing some of the compound fields with - * a single field, where they were identical to the register layout. - * - * The 'flags' parameter regroups the various bit-fields - */ -#define CXL_PE_CSRP_VALID (1ULL << 63) -#define CXL_PE_PROBLEM_STATE (1ULL << 62) -#define CXL_PE_SECONDARY_SEGMENT_TBL_SRCH (1ULL << 61) -#define CXL_PE_TAGS_ACTIVE (1ULL << 60) -#define CXL_PE_USER_STATE (1ULL << 59) -#define CXL_PE_TRANSLATION_ENABLED (1ULL << 58) -#define CXL_PE_64_BIT (1ULL << 57) -#define CXL_PE_PRIVILEGED_PROCESS (1ULL << 56) - -#define CXL_PROCESS_ELEMENT_VERSION 1 -struct cxl_process_element_hcall { - __be64 version; - __be64 flags; - u8 reserved0[12]; - __be32 pslVirtualIsn; - u8 applicationVirtualIsnBitmap[256]; - u8 reserved1[144]; - struct cxl_process_element_common common; - u8 reserved4[12]; -} __packed; - -#define H_STATE_NORMAL 1 -#define H_STATE_DISABLE 2 -#define H_STATE_TEMP_UNAVAILABLE 3 -#define H_STATE_PERM_UNAVAILABLE 4 - -/* NOTE: element must be a logical real address, and must be pinned */ -long cxl_h_attach_process(u64 unit_address, struct cxl_process_element_hcall *element, - u64 *process_token, u64 *mmio_addr, u64 *mmio_size); - -/** - * cxl_h_detach_process - Detach a process element from a coherent - * platform function. - */ -long cxl_h_detach_process(u64 unit_address, u64 process_token); - -/** - * cxl_h_reset_afu - Perform a reset to the coherent platform function. - */ -long cxl_h_reset_afu(u64 unit_address); - -/** - * cxl_h_suspend_process - Suspend a process from being executed - * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when - * process was attached. - */ -long cxl_h_suspend_process(u64 unit_address, u64 process_token); - -/** - * cxl_h_resume_process - Resume a process to be executed - * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when - * process was attached. - */ -long cxl_h_resume_process(u64 unit_address, u64 process_token); - -/** - * cxl_h_read_error_state - Reads the error state of the coherent - * platform function. - * R4 contains the error state - */ -long cxl_h_read_error_state(u64 unit_address, u64 *state); - -/** - * cxl_h_get_afu_err - collect the AFU error buffer - * Parameter1 = byte offset into error buffer to retrieve, valid values - * are between 0 and (ibm,error-buffer-size - 1) - * Parameter2 = 4K aligned real address of error buffer, to be filled in - * Parameter3 = length of error buffer, valid values are 4K or less - */ -long cxl_h_get_afu_err(u64 unit_address, u64 offset, u64 buf_address, u64 len); - -/** - * cxl_h_get_config - collect configuration record for the - * coherent platform function - * Parameter1 = # of configuration record to retrieve, valid values are - * between 0 and (ibm,#config-records - 1) - * Parameter2 = byte offset into configuration record to retrieve, - * valid values are between 0 and (ibm,config-record-size - 1) - * Parameter3 = 4K aligned real address of configuration record buffer, - * to be filled in - * Parameter4 = length of configuration buffer, valid values are 4K or less - */ -long cxl_h_get_config(u64 unit_address, u64 cr_num, u64 offset, - u64 buf_address, u64 len); - -/** - * cxl_h_terminate_process - Terminate the process before completion - * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when - * process was attached. - */ -long cxl_h_terminate_process(u64 unit_address, u64 process_token); - -/** - * cxl_h_collect_vpd - Collect VPD for the coherent platform function. - * Parameter1 = # of VPD record to retrieve, valid values are between 0 - * and (ibm,#config-records - 1). - * Parameter2 = 4K naturally aligned real buffer containing block - * list entries - * Parameter3 = number of block list entries in the block list, valid - * values are between 0 and 256 - */ -long cxl_h_collect_vpd(u64 unit_address, u64 record, u64 list_address, - u64 num, u64 *out); - -/** - * cxl_h_get_fn_error_interrupt - Read the function-wide error data based on an interrupt - */ -long cxl_h_get_fn_error_interrupt(u64 unit_address, u64 *reg); - -/** - * cxl_h_ack_fn_error_interrupt - Acknowledge function-wide error data - * based on an interrupt - * Parameter1 = value to write to the function-wide error interrupt register - */ -long cxl_h_ack_fn_error_interrupt(u64 unit_address, u64 value); - -/** - * cxl_h_get_error_log - Retrieve the Platform Log ID (PLID) of - * an error log - */ -long cxl_h_get_error_log(u64 unit_address, u64 value); - -/** - * cxl_h_collect_int_info - Collect interrupt info about a coherent - * platform function after an interrupt occurred. - */ -long cxl_h_collect_int_info(u64 unit_address, u64 process_token, - struct cxl_irq_info *info); - -/** - * cxl_h_control_faults - Control the operation of a coherent platform - * function after a fault occurs. - * - * Parameters - * control-mask: value to control the faults - * looks like PSL_TFC_An shifted >> 32 - * reset-mask: mask to control reset of function faults - * Set reset_mask = 1 to reset PSL errors - */ -long cxl_h_control_faults(u64 unit_address, u64 process_token, - u64 control_mask, u64 reset_mask); - -/** - * cxl_h_reset_adapter - Perform a reset to the coherent platform facility. - */ -long cxl_h_reset_adapter(u64 unit_address); - -/** - * cxl_h_collect_vpd - Collect VPD for the coherent platform function. - * Parameter1 = 4K naturally aligned real buffer containing block - * list entries - * Parameter2 = number of block list entries in the block list, valid - * values are between 0 and 256 - */ -long cxl_h_collect_vpd_adapter(u64 unit_address, u64 list_address, - u64 num, u64 *out); - -/** - * cxl_h_download_adapter_image - Download the base image to the coherent - * platform facility. - */ -long cxl_h_download_adapter_image(u64 unit_address, - u64 list_address, u64 num, - u64 *out); - -/** - * cxl_h_validate_adapter_image - Validate the base image in the coherent - * platform facility. - */ -long cxl_h_validate_adapter_image(u64 unit_address, - u64 list_address, u64 num, - u64 *out); -#endif /* _HCALLS_H */ diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c deleted file mode 100644 index b730e022a48e..000000000000 --- a/drivers/misc/cxl/irq.c +++ /dev/null @@ -1,450 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2014 IBM Corp. - */ - -#include <linux/interrupt.h> -#include <linux/irqdomain.h> -#include <linux/workqueue.h> -#include <linux/sched.h> -#include <linux/wait.h> -#include <linux/slab.h> -#include <linux/pid.h> -#include <asm/cputable.h> -#include <misc/cxl-base.h> - -#include "cxl.h" -#include "trace.h" - -static int afu_irq_range_start(void) -{ - if (cpu_has_feature(CPU_FTR_HVMODE)) - return 1; - return 0; -} - -static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar) -{ - ctx->dsisr = dsisr; - ctx->dar = dar; - schedule_work(&ctx->fault_work); - return IRQ_HANDLED; -} - -irqreturn_t cxl_irq_psl9(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info) -{ - u64 dsisr, dar; - - dsisr = irq_info->dsisr; - dar = irq_info->dar; - - trace_cxl_psl9_irq(ctx, irq, dsisr, dar); - - pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar); - - if (dsisr & CXL_PSL9_DSISR_An_TF) { - pr_devel("CXL interrupt: Scheduling translation fault handling for later (pe: %i)\n", ctx->pe); - return schedule_cxl_fault(ctx, dsisr, dar); - } - - if (dsisr & CXL_PSL9_DSISR_An_PE) - return cxl_ops->handle_psl_slice_error(ctx, dsisr, - irq_info->errstat); - if (dsisr & CXL_PSL9_DSISR_An_AE) { - pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err); - - if (ctx->pending_afu_err) { - /* - * This shouldn't happen - the PSL treats these errors - * as fatal and will have reset the AFU, so there's not - * much point buffering multiple AFU errors. - * OTOH if we DO ever see a storm of these come in it's - * probably best that we log them somewhere: - */ - dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error undelivered to pe %i: 0x%016llx\n", - ctx->pe, irq_info->afu_err); - } else { - spin_lock(&ctx->lock); - ctx->afu_err = irq_info->afu_err; - ctx->pending_afu_err = 1; - spin_unlock(&ctx->lock); - - wake_up_all(&ctx->wq); - } - - cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0); - return IRQ_HANDLED; - } - if (dsisr & CXL_PSL9_DSISR_An_OC) - pr_devel("CXL interrupt: OS Context Warning\n"); - - WARN(1, "Unhandled CXL PSL IRQ\n"); - return IRQ_HANDLED; -} - -irqreturn_t cxl_irq_psl8(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info) -{ - u64 dsisr, dar; - - dsisr = irq_info->dsisr; - dar = irq_info->dar; - - trace_cxl_psl_irq(ctx, irq, dsisr, dar); - - pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar); - - if (dsisr & CXL_PSL_DSISR_An_DS) { - /* - * We don't inherently need to sleep to handle this, but we do - * need to get a ref to the task's mm, which we can't do from - * irq context without the potential for a deadlock since it - * takes the task_lock. An alternate option would be to keep a - * reference to the task's mm the entire time it has cxl open, - * but to do that we need to solve the issue where we hold a - * ref to the mm, but the mm can hold a ref to the fd after an - * mmap preventing anything from being cleaned up. - */ - pr_devel("Scheduling segment miss handling for later pe: %i\n", ctx->pe); - return schedule_cxl_fault(ctx, dsisr, dar); - } - - if (dsisr & CXL_PSL_DSISR_An_M) - pr_devel("CXL interrupt: PTE not found\n"); - if (dsisr & CXL_PSL_DSISR_An_P) - pr_devel("CXL interrupt: Storage protection violation\n"); - if (dsisr & CXL_PSL_DSISR_An_A) - pr_devel("CXL interrupt: AFU lock access to write through or cache inhibited storage\n"); - if (dsisr & CXL_PSL_DSISR_An_S) - pr_devel("CXL interrupt: Access was afu_wr or afu_zero\n"); - if (dsisr & CXL_PSL_DSISR_An_K) - pr_devel("CXL interrupt: Access not permitted by virtual page class key protection\n"); - - if (dsisr & CXL_PSL_DSISR_An_DM) { - /* - * In some cases we might be able to handle the fault - * immediately if hash_page would succeed, but we still need - * the task's mm, which as above we can't get without a lock - */ - pr_devel("Scheduling page fault handling for later pe: %i\n", ctx->pe); - return schedule_cxl_fault(ctx, dsisr, dar); - } - if (dsisr & CXL_PSL_DSISR_An_ST) - WARN(1, "CXL interrupt: Segment Table PTE not found\n"); - if (dsisr & CXL_PSL_DSISR_An_UR) - pr_devel("CXL interrupt: AURP PTE not found\n"); - if (dsisr & CXL_PSL_DSISR_An_PE) - return cxl_ops->handle_psl_slice_error(ctx, dsisr, - irq_info->errstat); - if (dsisr & CXL_PSL_DSISR_An_AE) { - pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err); - - if (ctx->pending_afu_err) { - /* - * This shouldn't happen - the PSL treats these errors - * as fatal and will have reset the AFU, so there's not - * much point buffering multiple AFU errors. - * OTOH if we DO ever see a storm of these come in it's - * probably best that we log them somewhere: - */ - dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error " - "undelivered to pe %i: 0x%016llx\n", - ctx->pe, irq_info->afu_err); - } else { - spin_lock(&ctx->lock); - ctx->afu_err = irq_info->afu_err; - ctx->pending_afu_err = true; - spin_unlock(&ctx->lock); - - wake_up_all(&ctx->wq); - } - - cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0); - return IRQ_HANDLED; - } - if (dsisr & CXL_PSL_DSISR_An_OC) - pr_devel("CXL interrupt: OS Context Warning\n"); - - WARN(1, "Unhandled CXL PSL IRQ\n"); - return IRQ_HANDLED; -} - -static irqreturn_t cxl_irq_afu(int irq, void *data) -{ - struct cxl_context *ctx = data; - irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq)); - int irq_off, afu_irq = 0; - __u16 range; - int r; - - /* - * Look for the interrupt number. - * On bare-metal, we know range 0 only contains the PSL - * interrupt so we could start counting at range 1 and initialize - * afu_irq at 1. - * In a guest, range 0 also contains AFU interrupts, so it must - * be counted for. Therefore we initialize afu_irq at 0 to take into - * account the PSL interrupt. - * - * For code-readability, it just seems easier to go over all - * the ranges on bare-metal and guest. The end result is the same. - */ - for (r = 0; r < CXL_IRQ_RANGES; r++) { - irq_off = hwirq - ctx->irqs.offset[r]; - range = ctx->irqs.range[r]; - if (irq_off >= 0 && irq_off < range) { - afu_irq += irq_off; - break; - } - afu_irq += range; - } - if (unlikely(r >= CXL_IRQ_RANGES)) { - WARN(1, "Received AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n", - ctx->pe, irq, hwirq); - return IRQ_HANDLED; - } - - trace_cxl_afu_irq(ctx, afu_irq, irq, hwirq); - pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n", - afu_irq, ctx->pe, irq, hwirq); - - if (unlikely(!ctx->irq_bitmap)) { - WARN(1, "Received AFU IRQ for context with no IRQ bitmap\n"); - return IRQ_HANDLED; - } - spin_lock(&ctx->lock); - set_bit(afu_irq - 1, ctx->irq_bitmap); - ctx->pending_irq = true; - spin_unlock(&ctx->lock); - - wake_up_all(&ctx->wq); - - return IRQ_HANDLED; -} - -unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq, - irq_handler_t handler, void *cookie, const char *name) -{ - unsigned int virq; - int result; - - /* IRQ Domain? */ - virq = irq_create_mapping(NULL, hwirq); - if (!virq) { - dev_warn(&adapter->dev, "cxl_map_irq: irq_create_mapping failed\n"); - return 0; - } - - if (cxl_ops->setup_irq) - cxl_ops->setup_irq(adapter, hwirq, virq); - - pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq); - - result = request_irq(virq, handler, 0, name, cookie); - if (result) { - dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result); - return 0; - } - - return virq; -} - -void cxl_unmap_irq(unsigned int virq, void *cookie) -{ - free_irq(virq, cookie); -} - -int cxl_register_one_irq(struct cxl *adapter, - irq_handler_t handler, - void *cookie, - irq_hw_number_t *dest_hwirq, - unsigned int *dest_virq, - const char *name) -{ - int hwirq, virq; - - if ((hwirq = cxl_ops->alloc_one_irq(adapter)) < 0) - return hwirq; - - if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name))) - goto err; - - *dest_hwirq = hwirq; - *dest_virq = virq; - - return 0; - -err: - cxl_ops->release_one_irq(adapter, hwirq); - return -ENOMEM; -} - -void afu_irq_name_free(struct cxl_context *ctx) -{ - struct cxl_irq_name *irq_name, *tmp; - - list_for_each_entry_safe(irq_name, tmp, &ctx->irq_names, list) { - kfree(irq_name->name); - list_del(&irq_name->list); - kfree(irq_name); - } -} - -int afu_allocate_irqs(struct cxl_context *ctx, u32 count) -{ - int rc, r, i, j = 1; - struct cxl_irq_name *irq_name; - int alloc_count; - - /* - * In native mode, range 0 is reserved for the multiplexed - * PSL interrupt. It has been allocated when the AFU was initialized. - * - * In a guest, the PSL interrupt is not mutliplexed, but per-context, - * and is the first interrupt from range 0. It still needs to be - * allocated, so bump the count by one. - */ - if (cpu_has_feature(CPU_FTR_HVMODE)) - alloc_count = count; - else - alloc_count = count + 1; - - if ((rc = cxl_ops->alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, - alloc_count))) - return rc; - - if (cpu_has_feature(CPU_FTR_HVMODE)) { - /* Multiplexed PSL Interrupt */ - ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq; - ctx->irqs.range[0] = 1; - } - - ctx->irq_count = count; - ctx->irq_bitmap = bitmap_zalloc(count, GFP_KERNEL); - if (!ctx->irq_bitmap) - goto out; - - /* - * Allocate names first. If any fail, bail out before allocating - * actual hardware IRQs. - */ - for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) { - for (i = 0; i < ctx->irqs.range[r]; i++) { - irq_name = kmalloc(sizeof(struct cxl_irq_name), - GFP_KERNEL); - if (!irq_name) - goto out; - irq_name->name = kasprintf(GFP_KERNEL, "cxl-%s-pe%i-%i", - dev_name(&ctx->afu->dev), - ctx->pe, j); - if (!irq_name->name) { - kfree(irq_name); - goto out; - } - /* Add to tail so next look get the correct order */ - list_add_tail(&irq_name->list, &ctx->irq_names); - j++; - } - } - return 0; - -out: - cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter); - bitmap_free(ctx->irq_bitmap); - afu_irq_name_free(ctx); - return -ENOMEM; -} - -static void afu_register_hwirqs(struct cxl_context *ctx) -{ - irq_hw_number_t hwirq; - struct cxl_irq_name *irq_name; - int r, i; - irqreturn_t (*handler)(int irq, void *data); - - /* We've allocated all memory now, so let's do the irq allocations */ - irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list); - for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) { - hwirq = ctx->irqs.offset[r]; - for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { - if (r == 0 && i == 0) - /* - * The very first interrupt of range 0 is - * always the PSL interrupt, but we only - * need to connect a handler for guests, - * because there's one PSL interrupt per - * context. - * On bare-metal, the PSL interrupt is - * multiplexed and was setup when the AFU - * was configured. - */ - handler = cxl_ops->psl_interrupt; - else - handler = cxl_irq_afu; - cxl_map_irq(ctx->afu->adapter, hwirq, handler, ctx, - irq_name->name); - irq_name = list_next_entry(irq_name, list); - } - } -} - -int afu_register_irqs(struct cxl_context *ctx, u32 count) -{ - int rc; - - rc = afu_allocate_irqs(ctx, count); - if (rc) - return rc; - - afu_register_hwirqs(ctx); - return 0; -} - -void afu_release_irqs(struct cxl_context *ctx, void *cookie) -{ - irq_hw_number_t hwirq; - unsigned int virq; - int r, i; - - for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) { - hwirq = ctx->irqs.offset[r]; - for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { - virq = irq_find_mapping(NULL, hwirq); - if (virq) - cxl_unmap_irq(virq, cookie); - } - } - - afu_irq_name_free(ctx); - cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter); - - ctx->irq_count = 0; -} - -void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr) -{ - dev_crit(&afu->dev, - "PSL Slice error received. Check AFU for root cause.\n"); - dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); - if (serr & CXL_PSL_SERR_An_afuto) - dev_crit(&afu->dev, "AFU MMIO Timeout\n"); - if (serr & CXL_PSL_SERR_An_afudis) - dev_crit(&afu->dev, - "MMIO targeted Accelerator that was not enabled\n"); - if (serr & CXL_PSL_SERR_An_afuov) - dev_crit(&afu->dev, "AFU CTAG Overflow\n"); - if (serr & CXL_PSL_SERR_An_badsrc) - dev_crit(&afu->dev, "Bad Interrupt Source\n"); - if (serr & CXL_PSL_SERR_An_badctx) - dev_crit(&afu->dev, "Bad Context Handle\n"); - if (serr & CXL_PSL_SERR_An_llcmdis) - dev_crit(&afu->dev, "LLCMD to Disabled AFU\n"); - if (serr & CXL_PSL_SERR_An_llcmdto) - dev_crit(&afu->dev, "LLCMD Timeout to AFU\n"); - if (serr & CXL_PSL_SERR_An_afupar) - dev_crit(&afu->dev, "AFU MMIO Parity Error\n"); - if (serr & CXL_PSL_SERR_An_afudup) - dev_crit(&afu->dev, "AFU MMIO Duplicate CTAG Error\n"); - if (serr & CXL_PSL_SERR_An_AE) - dev_crit(&afu->dev, - "AFU asserted JDONE with JERROR in AFU Directed Mode\n"); -} diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c deleted file mode 100644 index c1fbf6f588f7..000000000000 --- a/drivers/misc/cxl/main.c +++ /dev/null @@ -1,383 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2014 IBM Corp. - */ - -#include <linux/spinlock.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/device.h> -#include <linux/mutex.h> -#include <linux/init.h> -#include <linux/list.h> -#include <linux/mm.h> -#include <linux/of.h> -#include <linux/slab.h> -#include <linux/idr.h> -#include <linux/pci.h> -#include <linux/platform_device.h> -#include <linux/sched/task.h> - -#include <asm/cputable.h> -#include <asm/mmu.h> -#include <misc/cxl-base.h> - -#include "cxl.h" -#include "trace.h" - -static DEFINE_SPINLOCK(adapter_idr_lock); -static DEFINE_IDR(cxl_adapter_idr); - -uint cxl_verbose; -module_param_named(verbose, cxl_verbose, uint, 0600); -MODULE_PARM_DESC(verbose, "Enable verbose dmesg output"); - -const struct cxl_backend_ops *cxl_ops; - -int cxl_afu_slbia(struct cxl_afu *afu) -{ - unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); - - pr_devel("cxl_afu_slbia issuing SLBIA command\n"); - cxl_p2n_write(afu, CXL_SLBIA_An, CXL_TLB_SLB_IQ_ALL); - while (cxl_p2n_read(afu, CXL_SLBIA_An) & CXL_TLB_SLB_P) { - if (time_after_eq(jiffies, timeout)) { - dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n"); - return -EBUSY; - } - /* If the adapter has gone down, we can assume that we - * will PERST it and that will invalidate everything. - */ - if (!cxl_ops->link_ok(afu->adapter, afu)) - return -EIO; - cpu_relax(); - } - return 0; -} - -static inline void _cxl_slbia(struct cxl_context *ctx, struct mm_struct *mm) -{ - unsigned long flags; - - if (ctx->mm != mm) - return; - - pr_devel("%s matched mm - card: %i afu: %i pe: %i\n", __func__, - ctx->afu->adapter->adapter_num, ctx->afu->slice, ctx->pe); - - spin_lock_irqsave(&ctx->sste_lock, flags); - trace_cxl_slbia(ctx); - memset(ctx->sstp, 0, ctx->sst_size); - spin_unlock_irqrestore(&ctx->sste_lock, flags); - mb(); - cxl_afu_slbia(ctx->afu); -} - -static inline void cxl_slbia_core(struct mm_struct *mm) -{ - struct cxl *adapter; - struct cxl_afu *afu; - struct cxl_context *ctx; - int card, slice, id; - - pr_devel("%s called\n", __func__); - - spin_lock(&adapter_idr_lock); - idr_for_each_entry(&cxl_adapter_idr, adapter, card) { - /* XXX: Make this lookup faster with link from mm to ctx */ - spin_lock(&adapter->afu_list_lock); - for (slice = 0; slice < adapter->slices; slice++) { - afu = adapter->afu[slice]; - if (!afu || !afu->enabled) - continue; - rcu_read_lock(); - idr_for_each_entry(&afu->contexts_idr, ctx, id) - _cxl_slbia(ctx, mm); - rcu_read_unlock(); - } - spin_unlock(&adapter->afu_list_lock); - } - spin_unlock(&adapter_idr_lock); -} - -static struct cxl_calls cxl_calls = { - .cxl_slbia = cxl_slbia_core, - .owner = THIS_MODULE, -}; - -int cxl_alloc_sst(struct cxl_context *ctx) -{ - unsigned long vsid; - u64 ea_mask, size, sstp0, sstp1; - - sstp0 = 0; - sstp1 = 0; - - ctx->sst_size = PAGE_SIZE; - ctx->sst_lru = 0; - ctx->sstp = (struct cxl_sste *)get_zeroed_page(GFP_KERNEL); - if (!ctx->sstp) { - pr_err("cxl_alloc_sst: Unable to allocate segment table\n"); - return -ENOMEM; - } - pr_devel("SSTP allocated at 0x%p\n", ctx->sstp); - - vsid = get_kernel_vsid((u64)ctx->sstp, mmu_kernel_ssize) << 12; - - sstp0 |= (u64)mmu_kernel_ssize << CXL_SSTP0_An_B_SHIFT; - sstp0 |= (SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp) << 50; - - size = (((u64)ctx->sst_size >> 8) - 1) << CXL_SSTP0_An_SegTableSize_SHIFT; - if (unlikely(size & ~CXL_SSTP0_An_SegTableSize_MASK)) { - WARN(1, "Impossible segment table size\n"); - return -EINVAL; - } - sstp0 |= size; - - if (mmu_kernel_ssize == MMU_SEGSIZE_256M) - ea_mask = 0xfffff00ULL; - else - ea_mask = 0xffffffff00ULL; - - sstp0 |= vsid >> (50-14); /* Top 14 bits of VSID */ - sstp1 |= (vsid << (64-(50-14))) & ~ea_mask; - sstp1 |= (u64)ctx->sstp & ea_mask; - sstp1 |= CXL_SSTP1_An_V; - - pr_devel("Looked up %#llx: slbfee. %#llx (ssize: %x, vsid: %#lx), copied to SSTP0: %#llx, SSTP1: %#llx\n", - (u64)ctx->sstp, (u64)ctx->sstp & ESID_MASK, mmu_kernel_ssize, vsid, sstp0, sstp1); - - /* Store calculated sstp hardware points for use later */ - ctx->sstp0 = sstp0; - ctx->sstp1 = sstp1; - - return 0; -} - -/* print buffer content as integers when debugging */ -void cxl_dump_debug_buffer(void *buf, size_t buf_len) -{ -#ifdef DEBUG - int i, *ptr; - - /* - * We want to regroup up to 4 integers per line, which means they - * need to be in the same pr_devel() statement - */ - ptr = (int *) buf; - for (i = 0; i * 4 < buf_len; i += 4) { - if ((i + 3) * 4 < buf_len) - pr_devel("%.8x %.8x %.8x %.8x\n", ptr[i], ptr[i + 1], - ptr[i + 2], ptr[i + 3]); - else if ((i + 2) * 4 < buf_len) - pr_devel("%.8x %.8x %.8x\n", ptr[i], ptr[i + 1], - ptr[i + 2]); - else if ((i + 1) * 4 < buf_len) - pr_devel("%.8x %.8x\n", ptr[i], ptr[i + 1]); - else - pr_devel("%.8x\n", ptr[i]); - } -#endif /* DEBUG */ -} - -/* Find a CXL adapter by it's number and increase it's refcount */ -struct cxl *get_cxl_adapter(int num) -{ - struct cxl *adapter; - - spin_lock(&adapter_idr_lock); - if ((adapter = idr_find(&cxl_adapter_idr, num))) - get_device(&adapter->dev); - spin_unlock(&adapter_idr_lock); - - return adapter; -} - -static int cxl_alloc_adapter_nr(struct cxl *adapter) -{ - int i; - - idr_preload(GFP_KERNEL); - spin_lock(&adapter_idr_lock); - i = idr_alloc(&cxl_adapter_idr, adapter, 0, 0, GFP_NOWAIT); - spin_unlock(&adapter_idr_lock); - idr_preload_end(); - if (i < 0) - return i; - - adapter->adapter_num = i; - - return 0; -} - -void cxl_remove_adapter_nr(struct cxl *adapter) -{ - idr_remove(&cxl_adapter_idr, adapter->adapter_num); -} - -struct cxl *cxl_alloc_adapter(void) -{ - struct cxl *adapter; - - if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL))) - return NULL; - - spin_lock_init(&adapter->afu_list_lock); - - if (cxl_alloc_adapter_nr(adapter)) - goto err1; - - if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num)) - goto err2; - - /* start with context lock taken */ - atomic_set(&adapter->contexts_num, -1); - - return adapter; -err2: - cxl_remove_adapter_nr(adapter); -err1: - kfree(adapter); - return NULL; -} - -struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice) -{ - struct cxl_afu *afu; - - if (!(afu = kzalloc(sizeof(struct cxl_afu), GFP_KERNEL))) - return NULL; - - afu->adapter = adapter; - afu->dev.parent = &adapter->dev; - afu->dev.release = cxl_ops->release_afu; - afu->slice = slice; - idr_init(&afu->contexts_idr); - mutex_init(&afu->contexts_lock); - spin_lock_init(&afu->afu_cntl_lock); - atomic_set(&afu->configured_state, -1); - afu->prefault_mode = CXL_PREFAULT_NONE; - afu->irqs_max = afu->adapter->user_irqs; - - return afu; -} - -int cxl_afu_select_best_mode(struct cxl_afu *afu) -{ - if (afu->modes_supported & CXL_MODE_DIRECTED) - return cxl_ops->afu_activate_mode(afu, CXL_MODE_DIRECTED); - - if (afu->modes_supported & CXL_MODE_DEDICATED) - return cxl_ops->afu_activate_mode(afu, CXL_MODE_DEDICATED); - - dev_warn(&afu->dev, "No supported programming modes available\n"); - /* We don't fail this so the user can inspect sysfs */ - return 0; -} - -int cxl_adapter_context_get(struct cxl *adapter) -{ - int rc; - - rc = atomic_inc_unless_negative(&adapter->contexts_num); - return rc ? 0 : -EBUSY; -} - -void cxl_adapter_context_put(struct cxl *adapter) -{ - atomic_dec_if_positive(&adapter->contexts_num); -} - -int cxl_adapter_context_lock(struct cxl *adapter) -{ - int rc; - /* no active contexts -> contexts_num == 0 */ - rc = atomic_cmpxchg(&adapter->contexts_num, 0, -1); - return rc ? -EBUSY : 0; -} - -void cxl_adapter_context_unlock(struct cxl *adapter) -{ - int val = atomic_cmpxchg(&adapter->contexts_num, -1, 0); - - /* - * contexts lock taken -> contexts_num == -1 - * If not true then show a warning and force reset the lock. - * This will happen when context_unlock was requested without - * doing a context_lock. - */ - if (val != -1) { - atomic_set(&adapter->contexts_num, 0); - WARN(1, "Adapter context unlocked with %d active contexts", - val); - } -} - -static int __init init_cxl(void) -{ - int rc = 0; - - if (!tlbie_capable) - return -EINVAL; - - if ((rc = cxl_file_init())) - return rc; - - cxl_debugfs_init(); - - /* - * we don't register the callback on P9. slb callack is only - * used for the PSL8 MMU and CX4. - */ - if (cxl_is_power8()) { - rc = register_cxl_calls(&cxl_calls); - if (rc) - goto err; - } - - if (cpu_has_feature(CPU_FTR_HVMODE)) { - cxl_ops = &cxl_native_ops; - rc = pci_register_driver(&cxl_pci_driver); - } -#ifdef CONFIG_PPC_PSERIES - else { - cxl_ops = &cxl_guest_ops; - rc = platform_driver_register(&cxl_of_driver); - } -#endif - if (rc) - goto err1; - - return 0; -err1: - if (cxl_is_power8()) - unregister_cxl_calls(&cxl_calls); -err: - cxl_debugfs_exit(); - cxl_file_exit(); - - return rc; -} - -static void exit_cxl(void) -{ - if (cpu_has_feature(CPU_FTR_HVMODE)) - pci_unregister_driver(&cxl_pci_driver); -#ifdef CONFIG_PPC_PSERIES - else - platform_driver_unregister(&cxl_of_driver); -#endif - - cxl_debugfs_exit(); - cxl_file_exit(); - if (cxl_is_power8()) - unregister_cxl_calls(&cxl_calls); - idr_destroy(&cxl_adapter_idr); -} - -module_init(init_cxl); -module_exit(exit_cxl); - -MODULE_DESCRIPTION("IBM Coherent Accelerator"); -MODULE_AUTHOR("Ian Munsie <imunsie@au1.ibm.com>"); -MODULE_LICENSE("GPL"); diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c deleted file mode 100644 index 50b0c44bb8d7..000000000000 --- a/drivers/misc/cxl/native.c +++ /dev/null @@ -1,1597 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2014 IBM Corp. - */ - -#include <linux/spinlock.h> -#include <linux/sched.h> -#include <linux/sched/clock.h> -#include <linux/slab.h> -#include <linux/mutex.h> -#include <linux/mm.h> -#include <linux/uaccess.h> -#include <linux/delay.h> -#include <linux/irqdomain.h> -#include <asm/synch.h> -#include <asm/switch_to.h> -#include <misc/cxl-base.h> - -#include "cxl.h" -#include "trace.h" - -static int afu_control(struct cxl_afu *afu, u64 command, u64 clear, - u64 result, u64 mask, bool enabled) -{ - u64 AFU_Cntl; - unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); - int rc = 0; - - spin_lock(&afu->afu_cntl_lock); - pr_devel("AFU command starting: %llx\n", command); - - trace_cxl_afu_ctrl(afu, command); - - AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); - cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command); - - AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); - while ((AFU_Cntl & mask) != result) { - if (time_after_eq(jiffies, timeout)) { - dev_warn(&afu->dev, "WARNING: AFU control timed out!\n"); - rc = -EBUSY; - goto out; - } - - if (!cxl_ops->link_ok(afu->adapter, afu)) { - afu->enabled = enabled; - rc = -EIO; - goto out; - } - - pr_devel_ratelimited("AFU control... (0x%016llx)\n", - AFU_Cntl | command); - cpu_relax(); - AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); - } - - if (AFU_Cntl & CXL_AFU_Cntl_An_RA) { - /* - * Workaround for a bug in the XSL used in the Mellanox CX4 - * that fails to clear the RA bit after an AFU reset, - * preventing subsequent AFU resets from working. - */ - cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl & ~CXL_AFU_Cntl_An_RA); - } - - pr_devel("AFU command complete: %llx\n", command); - afu->enabled = enabled; -out: - trace_cxl_afu_ctrl_done(afu, command, rc); - spin_unlock(&afu->afu_cntl_lock); - - return rc; -} - -static int afu_enable(struct cxl_afu *afu) -{ - pr_devel("AFU enable request\n"); - - return afu_control(afu, CXL_AFU_Cntl_An_E, 0, - CXL_AFU_Cntl_An_ES_Enabled, - CXL_AFU_Cntl_An_ES_MASK, true); -} - -int cxl_afu_disable(struct cxl_afu *afu) -{ - pr_devel("AFU disable request\n"); - - return afu_control(afu, 0, CXL_AFU_Cntl_An_E, - CXL_AFU_Cntl_An_ES_Disabled, - CXL_AFU_Cntl_An_ES_MASK, false); -} - -/* This will disable as well as reset */ -static int native_afu_reset(struct cxl_afu *afu) -{ - int rc; - u64 serr; - - pr_devel("AFU reset request\n"); - - rc = afu_control(afu, CXL_AFU_Cntl_An_RA, 0, - CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled, - CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK, - false); - - /* - * Re-enable any masked interrupts when the AFU is not - * activated to avoid side effects after attaching a process - * in dedicated mode. - */ - if (afu->current_mode == 0) { - serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); - serr &= ~CXL_PSL_SERR_An_IRQ_MASKS; - cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); - } - - return rc; -} - -static int native_afu_check_and_enable(struct cxl_afu *afu) -{ - if (!cxl_ops->link_ok(afu->adapter, afu)) { - WARN(1, "Refusing to enable afu while link down!\n"); - return -EIO; - } - if (afu->enabled) - return 0; - return afu_enable(afu); -} - -int cxl_psl_purge(struct cxl_afu *afu) -{ - u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An); - u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); - u64 dsisr, dar; - u64 start, end; - u64 trans_fault = 0x0ULL; - unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); - int rc = 0; - - trace_cxl_psl_ctrl(afu, CXL_PSL_SCNTL_An_Pc); - - pr_devel("PSL purge request\n"); - - if (cxl_is_power8()) - trans_fault = CXL_PSL_DSISR_TRANS; - if (cxl_is_power9()) - trans_fault = CXL_PSL9_DSISR_An_TF; - - if (!cxl_ops->link_ok(afu->adapter, afu)) { - dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n"); - rc = -EIO; - goto out; - } - - if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) { - WARN(1, "psl_purge request while AFU not disabled!\n"); - cxl_afu_disable(afu); - } - - cxl_p1n_write(afu, CXL_PSL_SCNTL_An, - PSL_CNTL | CXL_PSL_SCNTL_An_Pc); - start = local_clock(); - PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An); - while ((PSL_CNTL & CXL_PSL_SCNTL_An_Ps_MASK) - == CXL_PSL_SCNTL_An_Ps_Pending) { - if (time_after_eq(jiffies, timeout)) { - dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n"); - rc = -EBUSY; - goto out; - } - if (!cxl_ops->link_ok(afu->adapter, afu)) { - rc = -EIO; - goto out; - } - - dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); - pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx PSL_DSISR: 0x%016llx\n", - PSL_CNTL, dsisr); - - if (dsisr & trans_fault) { - dar = cxl_p2n_read(afu, CXL_PSL_DAR_An); - dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n", - dsisr, dar); - cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); - } else if (dsisr) { - dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n", - dsisr); - cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); - } else { - cpu_relax(); - } - PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An); - } - end = local_clock(); - pr_devel("PSL purged in %lld ns\n", end - start); - - cxl_p1n_write(afu, CXL_PSL_SCNTL_An, - PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc); -out: - trace_cxl_psl_ctrl_done(afu, CXL_PSL_SCNTL_An_Pc, rc); - return rc; -} - -static int spa_max_procs(int spa_size) -{ - /* - * From the CAIA: - * end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255 - * Most of that junk is really just an overly-complicated way of saying - * the last 256 bytes are __aligned(128), so it's really: - * end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255 - * and - * end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1 - * so - * sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256 - * Ignore the alignment (which is safe in this case as long as we are - * careful with our rounding) and solve for n: - */ - return ((spa_size / 8) - 96) / 17; -} - -static int cxl_alloc_spa(struct cxl_afu *afu, int mode) -{ - unsigned spa_size; - - /* Work out how many pages to allocate */ - afu->native->spa_order = -1; - do { - afu->native->spa_order++; - spa_size = (1 << afu->native->spa_order) * PAGE_SIZE; - - if (spa_size > 0x100000) { - dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n", - afu->native->spa_max_procs, afu->native->spa_size); - if (mode != CXL_MODE_DEDICATED) - afu->num_procs = afu->native->spa_max_procs; - break; - } - - afu->native->spa_size = spa_size; - afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size); - } while (afu->native->spa_max_procs < afu->num_procs); - - if (!(afu->native->spa = (struct cxl_process_element *) - __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) { - pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n"); - return -ENOMEM; - } - pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n", - 1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs); - - return 0; -} - -static void attach_spa(struct cxl_afu *afu) -{ - u64 spap; - - afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa + - ((afu->native->spa_max_procs + 3) * 128)); - - spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr; - spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size; - spap |= CXL_PSL_SPAP_V; - pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", - afu->native->spa, afu->native->spa_max_procs, - afu->native->sw_command_status, spap); - cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap); -} - -static inline void detach_spa(struct cxl_afu *afu) -{ - cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); -} - -void cxl_release_spa(struct cxl_afu *afu) -{ - if (afu->native->spa) { - free_pages((unsigned long) afu->native->spa, - afu->native->spa_order); - afu->native->spa = NULL; - } -} - -/* - * Invalidation of all ERAT entries is no longer required by CAIA2. Use - * only for debug. - */ -int cxl_invalidate_all_psl9(struct cxl *adapter) -{ - unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); - u64 ierat; - - pr_devel("CXL adapter - invalidation of all ERAT entries\n"); - - /* Invalidates all ERAT entries for Radix or HPT */ - ierat = CXL_XSL9_IERAT_IALL; - if (radix_enabled()) - ierat |= CXL_XSL9_IERAT_INVR; - cxl_p1_write(adapter, CXL_XSL9_IERAT, ierat); - - while (cxl_p1_read(adapter, CXL_XSL9_IERAT) & CXL_XSL9_IERAT_IINPROG) { - if (time_after_eq(jiffies, timeout)) { - dev_warn(&adapter->dev, - "WARNING: CXL adapter invalidation of all ERAT entries timed out!\n"); - return -EBUSY; - } - if (!cxl_ops->link_ok(adapter, NULL)) - return -EIO; - cpu_relax(); - } - return 0; -} - -int cxl_invalidate_all_psl8(struct cxl *adapter) -{ - unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); - - pr_devel("CXL adapter wide TLBIA & SLBIA\n"); - - cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A); - - cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL); - while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) { - if (time_after_eq(jiffies, timeout)) { - dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n"); - return -EBUSY; - } - if (!cxl_ops->link_ok(adapter, NULL)) - return -EIO; - cpu_relax(); - } - - cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL); - while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) { - if (time_after_eq(jiffies, timeout)) { - dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n"); - return -EBUSY; - } - if (!cxl_ops->link_ok(adapter, NULL)) - return -EIO; - cpu_relax(); - } - return 0; -} - -int cxl_data_cache_flush(struct cxl *adapter) -{ - u64 reg; - unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); - - /* - * Do a datacache flush only if datacache is available. - * In case of PSL9D datacache absent hence flush operation. - * would timeout. - */ - if (adapter->native->no_data_cache) { - pr_devel("No PSL data cache. Ignoring cache flush req.\n"); - return 0; - } - - pr_devel("Flushing data cache\n"); - reg = cxl_p1_read(adapter, CXL_PSL_Control); - reg |= CXL_PSL_Control_Fr; - cxl_p1_write(adapter, CXL_PSL_Control, reg); - - reg = cxl_p1_read(adapter, CXL_PSL_Control); - while ((reg & CXL_PSL_Control_Fs_MASK) != CXL_PSL_Control_Fs_Complete) { - if (time_after_eq(jiffies, timeout)) { - dev_warn(&adapter->dev, "WARNING: cache flush timed out!\n"); - return -EBUSY; - } - - if (!cxl_ops->link_ok(adapter, NULL)) { - dev_warn(&adapter->dev, "WARNING: link down when flushing cache\n"); - return -EIO; - } - cpu_relax(); - reg = cxl_p1_read(adapter, CXL_PSL_Control); - } - - reg &= ~CXL_PSL_Control_Fr; - cxl_p1_write(adapter, CXL_PSL_Control, reg); - return 0; -} - -static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1) -{ - int rc; - - /* 1. Disable SSTP by writing 0 to SSTP1[V] */ - cxl_p2n_write(afu, CXL_SSTP1_An, 0); - - /* 2. Invalidate all SLB entries */ - if ((rc = cxl_afu_slbia(afu))) - return rc; - - /* 3. Set SSTP0_An */ - cxl_p2n_write(afu, CXL_SSTP0_An, sstp0); - - /* 4. Set SSTP1_An */ - cxl_p2n_write(afu, CXL_SSTP1_An, sstp1); - - return 0; -} - -/* Using per slice version may improve performance here. (ie. SLBIA_An) */ -static void slb_invalid(struct cxl_context *ctx) -{ - struct cxl *adapter = ctx->afu->adapter; - u64 slbia; - - WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex)); - - cxl_p1_write(adapter, CXL_PSL_LBISEL, - ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) | - be32_to_cpu(ctx->elem->lpid)); - cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID); - - while (1) { - if (!cxl_ops->link_ok(adapter, NULL)) - break; - slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA); - if (!(slbia & CXL_TLB_SLB_P)) - break; - cpu_relax(); - } -} - -static int do_process_element_cmd(struct cxl_context *ctx, - u64 cmd, u64 pe_state) -{ - u64 state; - unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); - int rc = 0; - - trace_cxl_llcmd(ctx, cmd); - - WARN_ON(!ctx->afu->enabled); - - ctx->elem->software_state = cpu_to_be32(pe_state); - smp_wmb(); - *(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe); - smp_mb(); - cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe); - while (1) { - if (time_after_eq(jiffies, timeout)) { - dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n"); - rc = -EBUSY; - goto out; - } - if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) { - dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n"); - rc = -EIO; - goto out; - } - state = be64_to_cpup(ctx->afu->native->sw_command_status); - if (state == ~0ULL) { - pr_err("cxl: Error adding process element to AFU\n"); - rc = -1; - goto out; - } - if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK | CXL_SPA_SW_LINK_MASK)) == - (cmd | (cmd >> 16) | ctx->pe)) - break; - /* - * The command won't finish in the PSL if there are - * outstanding DSIs. Hence we need to yield here in - * case there are outstanding DSIs that we need to - * service. Tuning possiblity: we could wait for a - * while before sched - */ - schedule(); - - } -out: - trace_cxl_llcmd_done(ctx, cmd, rc); - return rc; -} - -static int add_process_element(struct cxl_context *ctx) -{ - int rc = 0; - - mutex_lock(&ctx->afu->native->spa_mutex); - pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe); - if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V))) - ctx->pe_inserted = true; - pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe); - mutex_unlock(&ctx->afu->native->spa_mutex); - return rc; -} - -static int terminate_process_element(struct cxl_context *ctx) -{ - int rc = 0; - - /* fast path terminate if it's already invalid */ - if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V))) - return rc; - - mutex_lock(&ctx->afu->native->spa_mutex); - pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe); - /* We could be asked to terminate when the hw is down. That - * should always succeed: it's not running if the hw has gone - * away and is being reset. - */ - if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) - rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE, - CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T); - ctx->elem->software_state = 0; /* Remove Valid bit */ - pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe); - mutex_unlock(&ctx->afu->native->spa_mutex); - return rc; -} - -static int remove_process_element(struct cxl_context *ctx) -{ - int rc = 0; - - mutex_lock(&ctx->afu->native->spa_mutex); - pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe); - - /* We could be asked to remove when the hw is down. Again, if - * the hw is down, the PE is gone, so we succeed. - */ - if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) - rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0); - - if (!rc) - ctx->pe_inserted = false; - if (cxl_is_power8()) - slb_invalid(ctx); - pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe); - mutex_unlock(&ctx->afu->native->spa_mutex); - - return rc; -} - -void cxl_assign_psn_space(struct cxl_context *ctx) -{ - if (!ctx->afu->pp_size || ctx->master) { - ctx->psn_phys = ctx->afu->psn_phys; - ctx->psn_size = ctx->afu->adapter->ps_size; - } else { - ctx->psn_phys = ctx->afu->psn_phys + - (ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe); - ctx->psn_size = ctx->afu->pp_size; - } -} - -static int activate_afu_directed(struct cxl_afu *afu) -{ - int rc; - - dev_info(&afu->dev, "Activating AFU directed mode\n"); - - afu->num_procs = afu->max_procs_virtualised; - if (afu->native->spa == NULL) { - if (cxl_alloc_spa(afu, CXL_MODE_DIRECTED)) - return -ENOMEM; - } - attach_spa(afu); - - cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU); - if (cxl_is_power8()) - cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL); - cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L); - - afu->current_mode = CXL_MODE_DIRECTED; - - if ((rc = cxl_chardev_m_afu_add(afu))) - return rc; - - if ((rc = cxl_sysfs_afu_m_add(afu))) - goto err; - - if ((rc = cxl_chardev_s_afu_add(afu))) - goto err1; - - return 0; -err1: - cxl_sysfs_afu_m_remove(afu); -err: - cxl_chardev_afu_remove(afu); - return rc; -} - -#ifdef CONFIG_CPU_LITTLE_ENDIAN -#define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE) -#else -#define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE)) -#endif - -u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9) -{ - u64 sr = 0; - - set_endian(sr); - if (master) - sr |= CXL_PSL_SR_An_MP; - if (mfspr(SPRN_LPCR) & LPCR_TC) - sr |= CXL_PSL_SR_An_TC; - - if (kernel) { - if (!real_mode) - sr |= CXL_PSL_SR_An_R; - sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV; - } else { - sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R; - if (radix_enabled()) - sr |= CXL_PSL_SR_An_HV; - else - sr &= ~(CXL_PSL_SR_An_HV); - if (!test_tsk_thread_flag(current, TIF_32BIT)) - sr |= CXL_PSL_SR_An_SF; - } - if (p9) { - if (radix_enabled()) - sr |= CXL_PSL_SR_An_XLAT_ror; - else - sr |= CXL_PSL_SR_An_XLAT_hpt; - } - return sr; -} - -static u64 calculate_sr(struct cxl_context *ctx) -{ - return cxl_calculate_sr(ctx->master, ctx->kernel, false, - cxl_is_power9()); -} - -static void update_ivtes_directed(struct cxl_context *ctx) -{ - bool need_update = (ctx->status == STARTED); - int r; - - if (need_update) { - WARN_ON(terminate_process_element(ctx)); - WARN_ON(remove_process_element(ctx)); - } - - for (r = 0; r < CXL_IRQ_RANGES; r++) { - ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]); - ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]); - } - - /* - * Theoretically we could use the update llcmd, instead of a - * terminate/remove/add (or if an atomic update was required we could - * do a suspend/update/resume), however it seems there might be issues - * with the update llcmd on some cards (including those using an XSL on - * an ASIC) so for now it's safest to go with the commands that are - * known to work. In the future if we come across a situation where the - * card may be performing transactions using the same PE while we are - * doing this update we might need to revisit this. - */ - if (need_update) - WARN_ON(add_process_element(ctx)); -} - -static int process_element_entry_psl9(struct cxl_context *ctx, u64 wed, u64 amr) -{ - u32 pid; - int rc; - - cxl_assign_psn_space(ctx); - - ctx->elem->ctxtime = 0; /* disable */ - ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID)); - ctx->elem->haurp = 0; /* disable */ - - if (ctx->kernel) - pid = 0; - else { - if (ctx->mm == NULL) { - pr_devel("%s: unable to get mm for pe=%d pid=%i\n", - __func__, ctx->pe, pid_nr(ctx->pid)); - return -EINVAL; - } - pid = ctx->mm->context.id; - } - - /* Assign a unique TIDR (thread id) for the current thread */ - if (!(ctx->tidr) && (ctx->assign_tidr)) { - rc = set_thread_tidr(current); - if (rc) - return -ENODEV; - ctx->tidr = current->thread.tidr; - pr_devel("%s: current tidr: %d\n", __func__, ctx->tidr); - } - - ctx->elem->common.tid = cpu_to_be32(ctx->tidr); - ctx->elem->common.pid = cpu_to_be32(pid); - - ctx->elem->sr = cpu_to_be64(calculate_sr(ctx)); - - ctx->elem->common.csrp = 0; /* disable */ - - cxl_prefault(ctx, wed); - - /* - * Ensure we have the multiplexed PSL interrupt set up to take faults - * for kernel contexts that may not have allocated any AFU IRQs at all: - */ - if (ctx->irqs.range[0] == 0) { - ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq; - ctx->irqs.range[0] = 1; - } - - ctx->elem->common.amr = cpu_to_be64(amr); - ctx->elem->common.wed = cpu_to_be64(wed); - - return 0; -} - -int cxl_attach_afu_directed_psl9(struct cxl_context *ctx, u64 wed, u64 amr) -{ - int result; - - /* fill the process element entry */ - result = process_element_entry_psl9(ctx, wed, amr); - if (result) - return result; - - update_ivtes_directed(ctx); - - /* first guy needs to enable */ - result = cxl_ops->afu_check_and_enable(ctx->afu); - if (result) - return result; - - return add_process_element(ctx); -} - -int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr) -{ - u32 pid; - int result; - - cxl_assign_psn_space(ctx); - - ctx->elem->ctxtime = 0; /* disable */ - ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID)); - ctx->elem->haurp = 0; /* disable */ - ctx->elem->u.sdr = cpu_to_be64(mfspr(SPRN_SDR1)); - - pid = current->pid; - if (ctx->kernel) - pid = 0; - ctx->elem->common.tid = 0; - ctx->elem->common.pid = cpu_to_be32(pid); - - ctx->elem->sr = cpu_to_be64(calculate_sr(ctx)); - - ctx->elem->common.csrp = 0; /* disable */ - ctx->elem->common.u.psl8.aurp0 = 0; /* disable */ - ctx->elem->common.u.psl8.aurp1 = 0; /* disable */ - - cxl_prefault(ctx, wed); - - ctx->elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0); - ctx->elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1); - - /* - * Ensure we have the multiplexed PSL interrupt set up to take faults - * for kernel contexts that may not have allocated any AFU IRQs at all: - */ - if (ctx->irqs.range[0] == 0) { - ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq; - ctx->irqs.range[0] = 1; - } - - update_ivtes_directed(ctx); - - ctx->elem->common.amr = cpu_to_be64(amr); - ctx->elem->common.wed = cpu_to_be64(wed); - - /* first guy needs to enable */ - if ((result = cxl_ops->afu_check_and_enable(ctx->afu))) - return result; - - return add_process_element(ctx); -} - -static int deactivate_afu_directed(struct cxl_afu *afu) -{ - dev_info(&afu->dev, "Deactivating AFU directed mode\n"); - - afu->current_mode = 0; - afu->num_procs = 0; - - cxl_sysfs_afu_m_remove(afu); - cxl_chardev_afu_remove(afu); - - /* - * The CAIA section 2.2.1 indicates that the procedure for starting and - * stopping an AFU in AFU directed mode is AFU specific, which is not - * ideal since this code is generic and with one exception has no - * knowledge of the AFU. This is in contrast to the procedure for - * disabling a dedicated process AFU, which is documented to just - * require a reset. The architecture does indicate that both an AFU - * reset and an AFU disable should result in the AFU being disabled and - * we do both followed by a PSL purge for safety. - * - * Notably we used to have some issues with the disable sequence on PSL - * cards, which is why we ended up using this heavy weight procedure in - * the first place, however a bug was discovered that had rendered the - * disable operation ineffective, so it is conceivable that was the - * sole explanation for those difficulties. Careful regression testing - * is recommended if anyone attempts to remove or reorder these - * operations. - * - * The XSL on the Mellanox CX4 behaves a little differently from the - * PSL based cards and will time out an AFU reset if the AFU is still - * enabled. That card is special in that we do have a means to identify - * it from this code, so in that case we skip the reset and just use a - * disable/purge to avoid the timeout and corresponding noise in the - * kernel log. - */ - if (afu->adapter->native->sl_ops->needs_reset_before_disable) - cxl_ops->afu_reset(afu); - cxl_afu_disable(afu); - cxl_psl_purge(afu); - - return 0; -} - -int cxl_activate_dedicated_process_psl9(struct cxl_afu *afu) -{ - dev_info(&afu->dev, "Activating dedicated process mode\n"); - - /* - * If XSL is set to dedicated mode (Set in PSL_SCNTL reg), the - * XSL and AFU are programmed to work with a single context. - * The context information should be configured in the SPA area - * index 0 (so PSL_SPAP must be configured before enabling the - * AFU). - */ - afu->num_procs = 1; - if (afu->native->spa == NULL) { - if (cxl_alloc_spa(afu, CXL_MODE_DEDICATED)) - return -ENOMEM; - } - attach_spa(afu); - - cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process); - cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L); - - afu->current_mode = CXL_MODE_DEDICATED; - - return cxl_chardev_d_afu_add(afu); -} - -int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu) -{ - dev_info(&afu->dev, "Activating dedicated process mode\n"); - - cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process); - - cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */ - cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); /* disable */ - cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL); - cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID)); - cxl_p1n_write(afu, CXL_HAURP_An, 0); /* disable */ - cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1)); - - cxl_p2n_write(afu, CXL_CSRP_An, 0); /* disable */ - cxl_p2n_write(afu, CXL_AURP0_An, 0); /* disable */ - cxl_p2n_write(afu, CXL_AURP1_An, 0); /* disable */ - - afu->current_mode = CXL_MODE_DEDICATED; - afu->num_procs = 1; - - return cxl_chardev_d_afu_add(afu); -} - -void cxl_update_dedicated_ivtes_psl9(struct cxl_context *ctx) -{ - int r; - - for (r = 0; r < CXL_IRQ_RANGES; r++) { - ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]); - ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]); - } -} - -void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx) -{ - struct cxl_afu *afu = ctx->afu; - - cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, - (((u64)ctx->irqs.offset[0] & 0xffff) << 48) | - (((u64)ctx->irqs.offset[1] & 0xffff) << 32) | - (((u64)ctx->irqs.offset[2] & 0xffff) << 16) | - ((u64)ctx->irqs.offset[3] & 0xffff)); - cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64) - (((u64)ctx->irqs.range[0] & 0xffff) << 48) | - (((u64)ctx->irqs.range[1] & 0xffff) << 32) | - (((u64)ctx->irqs.range[2] & 0xffff) << 16) | - ((u64)ctx->irqs.range[3] & 0xffff)); -} - -int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr) -{ - struct cxl_afu *afu = ctx->afu; - int result; - - /* fill the process element entry */ - result = process_element_entry_psl9(ctx, wed, amr); - if (result) - return result; - - if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes) - afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx); - - ctx->elem->software_state = cpu_to_be32(CXL_PE_SOFTWARE_STATE_V); - /* - * Ideally we should do a wmb() here to make sure the changes to the - * PE are visible to the card before we call afu_enable. - * On ppc64 though all mmios are preceded by a 'sync' instruction hence - * we dont dont need one here. - */ - - result = cxl_ops->afu_reset(afu); - if (result) - return result; - - return afu_enable(afu); -} - -int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr) -{ - struct cxl_afu *afu = ctx->afu; - u64 pid; - int rc; - - pid = (u64)current->pid << 32; - if (ctx->kernel) - pid = 0; - cxl_p2n_write(afu, CXL_PSL_PID_TID_An, pid); - - cxl_p1n_write(afu, CXL_PSL_SR_An, calculate_sr(ctx)); - - if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1))) - return rc; - - cxl_prefault(ctx, wed); - - if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes) - afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx); - - cxl_p2n_write(afu, CXL_PSL_AMR_An, amr); - - /* master only context for dedicated */ - cxl_assign_psn_space(ctx); - - if ((rc = cxl_ops->afu_reset(afu))) - return rc; - - cxl_p2n_write(afu, CXL_PSL_WED_An, wed); - - return afu_enable(afu); -} - -static int deactivate_dedicated_process(struct cxl_afu *afu) -{ - dev_info(&afu->dev, "Deactivating dedicated process mode\n"); - - afu->current_mode = 0; - afu->num_procs = 0; - - cxl_chardev_afu_remove(afu); - - return 0; -} - -static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode) -{ - if (mode == CXL_MODE_DIRECTED) - return deactivate_afu_directed(afu); - if (mode == CXL_MODE_DEDICATED) - return deactivate_dedicated_process(afu); - return 0; -} - -static int native_afu_activate_mode(struct cxl_afu *afu, int mode) -{ - if (!mode) - return 0; - if (!(mode & afu->modes_supported)) - return -EINVAL; - - if (!cxl_ops->link_ok(afu->adapter, afu)) { - WARN(1, "Device link is down, refusing to activate!\n"); - return -EIO; - } - - if (mode == CXL_MODE_DIRECTED) - return activate_afu_directed(afu); - if ((mode == CXL_MODE_DEDICATED) && - (afu->adapter->native->sl_ops->activate_dedicated_process)) - return afu->adapter->native->sl_ops->activate_dedicated_process(afu); - - return -EINVAL; -} - -static int native_attach_process(struct cxl_context *ctx, bool kernel, - u64 wed, u64 amr) -{ - if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) { - WARN(1, "Device link is down, refusing to attach process!\n"); - return -EIO; - } - - ctx->kernel = kernel; - if ((ctx->afu->current_mode == CXL_MODE_DIRECTED) && - (ctx->afu->adapter->native->sl_ops->attach_afu_directed)) - return ctx->afu->adapter->native->sl_ops->attach_afu_directed(ctx, wed, amr); - - if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) && - (ctx->afu->adapter->native->sl_ops->attach_dedicated_process)) - return ctx->afu->adapter->native->sl_ops->attach_dedicated_process(ctx, wed, amr); - - return -EINVAL; -} - -static inline int detach_process_native_dedicated(struct cxl_context *ctx) -{ - /* - * The CAIA section 2.1.1 indicates that we need to do an AFU reset to - * stop the AFU in dedicated mode (we therefore do not make that - * optional like we do in the afu directed path). It does not indicate - * that we need to do an explicit disable (which should occur - * implicitly as part of the reset) or purge, but we do these as well - * to be on the safe side. - * - * Notably we used to have some issues with the disable sequence - * (before the sequence was spelled out in the architecture) which is - * why we were so heavy weight in the first place, however a bug was - * discovered that had rendered the disable operation ineffective, so - * it is conceivable that was the sole explanation for those - * difficulties. Point is, we should be careful and do some regression - * testing if we ever attempt to remove any part of this procedure. - */ - cxl_ops->afu_reset(ctx->afu); - cxl_afu_disable(ctx->afu); - cxl_psl_purge(ctx->afu); - return 0; -} - -static void native_update_ivtes(struct cxl_context *ctx) -{ - if (ctx->afu->current_mode == CXL_MODE_DIRECTED) - return update_ivtes_directed(ctx); - if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) && - (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)) - return ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx); - WARN(1, "native_update_ivtes: Bad mode\n"); -} - -static inline int detach_process_native_afu_directed(struct cxl_context *ctx) -{ - if (!ctx->pe_inserted) - return 0; - if (terminate_process_element(ctx)) - return -1; - if (remove_process_element(ctx)) - return -1; - - return 0; -} - -static int native_detach_process(struct cxl_context *ctx) -{ - trace_cxl_detach(ctx); - - if (ctx->afu->current_mode == CXL_MODE_DEDICATED) - return detach_process_native_dedicated(ctx); - - return detach_process_native_afu_directed(ctx); -} - -static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info) -{ - /* If the adapter has gone away, we can't get any meaningful - * information. - */ - if (!cxl_ops->link_ok(afu->adapter, afu)) - return -EIO; - - info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); - info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An); - if (cxl_is_power8()) - info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An); - info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An); - info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); - info->proc_handle = 0; - - return 0; -} - -void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx) -{ - u64 fir1, serr; - - fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR1); - - dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1); - if (ctx->afu->adapter->native->sl_ops->register_serr_irq) { - serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An); - cxl_afu_decode_psl_serr(ctx->afu, serr); - } -} - -void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx) -{ - u64 fir1, fir2, fir_slice, serr, afu_debug; - - fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1); - fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2); - fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An); - afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An); - - dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1); - dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2); - if (ctx->afu->adapter->native->sl_ops->register_serr_irq) { - serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An); - cxl_afu_decode_psl_serr(ctx->afu, serr); - } - dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); - dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); -} - -static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx, - u64 dsisr, u64 errstat) -{ - - dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat); - - if (ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers) - ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers(ctx); - - if (ctx->afu->adapter->native->sl_ops->debugfs_stop_trace) { - dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n"); - ctx->afu->adapter->native->sl_ops->debugfs_stop_trace(ctx->afu->adapter); - } - - return cxl_ops->ack_irq(ctx, 0, errstat); -} - -static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr) -{ - if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_TRANS)) - return true; - - if ((cxl_is_power9()) && (dsisr & CXL_PSL9_DSISR_An_TF)) - return true; - - return false; -} - -irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info) -{ - if (cxl_is_translation_fault(afu, irq_info->dsisr)) - cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); - else - cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); - - return IRQ_HANDLED; -} - -static irqreturn_t native_irq_multiplexed(int irq, void *data) -{ - struct cxl_afu *afu = data; - struct cxl_context *ctx; - struct cxl_irq_info irq_info; - u64 phreg = cxl_p2n_read(afu, CXL_PSL_PEHandle_An); - int ph, ret = IRQ_HANDLED, res; - - /* check if eeh kicked in while the interrupt was in flight */ - if (unlikely(phreg == ~0ULL)) { - dev_warn(&afu->dev, - "Ignoring slice interrupt(%d) due to fenced card", - irq); - return IRQ_HANDLED; - } - /* Mask the pe-handle from register value */ - ph = phreg & 0xffff; - if ((res = native_get_irq_info(afu, &irq_info))) { - WARN(1, "Unable to get CXL IRQ Info: %i\n", res); - if (afu->adapter->native->sl_ops->fail_irq) - return afu->adapter->native->sl_ops->fail_irq(afu, &irq_info); - return ret; - } - - rcu_read_lock(); - ctx = idr_find(&afu->contexts_idr, ph); - if (ctx) { - if (afu->adapter->native->sl_ops->handle_interrupt) - ret = afu->adapter->native->sl_ops->handle_interrupt(irq, ctx, &irq_info); - rcu_read_unlock(); - return ret; - } - rcu_read_unlock(); - - WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR" - " %016llx\n(Possible AFU HW issue - was a term/remove acked" - " with outstanding transactions?)\n", ph, irq_info.dsisr, - irq_info.dar); - if (afu->adapter->native->sl_ops->fail_irq) - ret = afu->adapter->native->sl_ops->fail_irq(afu, &irq_info); - return ret; -} - -static void native_irq_wait(struct cxl_context *ctx) -{ - u64 dsisr; - int timeout = 1000; - int ph; - - /* - * Wait until no further interrupts are presented by the PSL - * for this context. - */ - while (timeout--) { - ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff; - if (ph != ctx->pe) - return; - dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An); - if (cxl_is_power8() && - ((dsisr & CXL_PSL_DSISR_PENDING) == 0)) - return; - if (cxl_is_power9() && - ((dsisr & CXL_PSL9_DSISR_PENDING) == 0)) - return; - /* - * We are waiting for the workqueue to process our - * irq, so need to let that run here. - */ - msleep(1); - } - - dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i" - " DSISR %016llx!\n", ph, dsisr); - return; -} - -static irqreturn_t native_slice_irq_err(int irq, void *data) -{ - struct cxl_afu *afu = data; - u64 errstat, serr, afu_error, dsisr; - u64 fir_slice, afu_debug, irq_mask; - - /* - * slice err interrupt is only used with full PSL (no XSL) - */ - serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); - errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); - afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An); - dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); - cxl_afu_decode_psl_serr(afu, serr); - - if (cxl_is_power8()) { - fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An); - afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An); - dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); - dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); - } - dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat); - dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error); - dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr); - - /* mask off the IRQ so it won't retrigger until the AFU is reset */ - irq_mask = (serr & CXL_PSL_SERR_An_IRQS) >> 32; - serr |= irq_mask; - cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); - dev_info(&afu->dev, "Further such interrupts will be masked until the AFU is reset\n"); - - return IRQ_HANDLED; -} - -void cxl_native_err_irq_dump_regs_psl9(struct cxl *adapter) -{ - u64 fir1; - - fir1 = cxl_p1_read(adapter, CXL_PSL9_FIR1); - dev_crit(&adapter->dev, "PSL_FIR: 0x%016llx\n", fir1); -} - -void cxl_native_err_irq_dump_regs_psl8(struct cxl *adapter) -{ - u64 fir1, fir2; - - fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1); - fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2); - dev_crit(&adapter->dev, - "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", - fir1, fir2); -} - -static irqreturn_t native_irq_err(int irq, void *data) -{ - struct cxl *adapter = data; - u64 err_ivte; - - WARN(1, "CXL ERROR interrupt %i\n", irq); - - err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE); - dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte); - - if (adapter->native->sl_ops->debugfs_stop_trace) { - dev_crit(&adapter->dev, "STOPPING CXL TRACE\n"); - adapter->native->sl_ops->debugfs_stop_trace(adapter); - } - - if (adapter->native->sl_ops->err_irq_dump_registers) - adapter->native->sl_ops->err_irq_dump_registers(adapter); - - return IRQ_HANDLED; -} - -int cxl_native_register_psl_err_irq(struct cxl *adapter) -{ - int rc; - - adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", - dev_name(&adapter->dev)); - if (!adapter->irq_name) - return -ENOMEM; - - if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter, - &adapter->native->err_hwirq, - &adapter->native->err_virq, - adapter->irq_name))) { - kfree(adapter->irq_name); - adapter->irq_name = NULL; - return rc; - } - - cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff); - - return 0; -} - -void cxl_native_release_psl_err_irq(struct cxl *adapter) -{ - if (adapter->native->err_virq == 0 || - adapter->native->err_virq != - irq_find_mapping(NULL, adapter->native->err_hwirq)) - return; - - cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); - cxl_unmap_irq(adapter->native->err_virq, adapter); - cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq); - kfree(adapter->irq_name); - adapter->native->err_virq = 0; -} - -int cxl_native_register_serr_irq(struct cxl_afu *afu) -{ - u64 serr; - int rc; - - afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", - dev_name(&afu->dev)); - if (!afu->err_irq_name) - return -ENOMEM; - - if ((rc = cxl_register_one_irq(afu->adapter, native_slice_irq_err, afu, - &afu->serr_hwirq, - &afu->serr_virq, afu->err_irq_name))) { - kfree(afu->err_irq_name); - afu->err_irq_name = NULL; - return rc; - } - - serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); - if (cxl_is_power8()) - serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff); - if (cxl_is_power9()) { - /* - * By default, all errors are masked. So don't set all masks. - * Slice errors will be transfered. - */ - serr = (serr & ~0xff0000007fffffffULL) | (afu->serr_hwirq & 0xffff); - } - cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); - - return 0; -} - -void cxl_native_release_serr_irq(struct cxl_afu *afu) -{ - if (afu->serr_virq == 0 || - afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq)) - return; - - cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000); - cxl_unmap_irq(afu->serr_virq, afu); - cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); - kfree(afu->err_irq_name); - afu->serr_virq = 0; -} - -int cxl_native_register_psl_irq(struct cxl_afu *afu) -{ - int rc; - - afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s", - dev_name(&afu->dev)); - if (!afu->psl_irq_name) - return -ENOMEM; - - if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed, - afu, &afu->native->psl_hwirq, &afu->native->psl_virq, - afu->psl_irq_name))) { - kfree(afu->psl_irq_name); - afu->psl_irq_name = NULL; - } - return rc; -} - -void cxl_native_release_psl_irq(struct cxl_afu *afu) -{ - if (afu->native->psl_virq == 0 || - afu->native->psl_virq != - irq_find_mapping(NULL, afu->native->psl_hwirq)) - return; - - cxl_unmap_irq(afu->native->psl_virq, afu); - cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq); - kfree(afu->psl_irq_name); - afu->native->psl_virq = 0; -} - -static void recover_psl_err(struct cxl_afu *afu, u64 errstat) -{ - u64 dsisr; - - pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat); - - /* Clear PSL_DSISR[PE] */ - dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); - cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE); - - /* Write 1s to clear error status bits */ - cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat); -} - -static int native_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask) -{ - trace_cxl_psl_irq_ack(ctx, tfc); - if (tfc) - cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc); - if (psl_reset_mask) - recover_psl_err(ctx->afu, psl_reset_mask); - - return 0; -} - -int cxl_check_error(struct cxl_afu *afu) -{ - return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL); -} - -static bool native_support_attributes(const char *attr_name, - enum cxl_attrs type) -{ - return true; -} - -static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out) -{ - if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) - return -EIO; - if (unlikely(off >= afu->crs_len)) - return -ERANGE; - *out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset + - (cr * afu->crs_len) + off); - return 0; -} - -static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out) -{ - if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) - return -EIO; - if (unlikely(off >= afu->crs_len)) - return -ERANGE; - *out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset + - (cr * afu->crs_len) + off); - return 0; -} - -static int native_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out) -{ - u64 aligned_off = off & ~0x3L; - u32 val; - int rc; - - rc = native_afu_cr_read32(afu, cr, aligned_off, &val); - if (!rc) - *out = (val >> ((off & 0x3) * 8)) & 0xffff; - return rc; -} - -static int native_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out) -{ - u64 aligned_off = off & ~0x3L; - u32 val; - int rc; - - rc = native_afu_cr_read32(afu, cr, aligned_off, &val); - if (!rc) - *out = (val >> ((off & 0x3) * 8)) & 0xff; - return rc; -} - -static int native_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in) -{ - if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) - return -EIO; - if (unlikely(off >= afu->crs_len)) - return -ERANGE; - out_le32(afu->native->afu_desc_mmio + afu->crs_offset + - (cr * afu->crs_len) + off, in); - return 0; -} - -static int native_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in) -{ - u64 aligned_off = off & ~0x3L; - u32 val32, mask, shift; - int rc; - - rc = native_afu_cr_read32(afu, cr, aligned_off, &val32); - if (rc) - return rc; - shift = (off & 0x3) * 8; - WARN_ON(shift == 24); - mask = 0xffff << shift; - val32 = (val32 & ~mask) | (in << shift); - - rc = native_afu_cr_write32(afu, cr, aligned_off, val32); - return rc; -} - -static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in) -{ - u64 aligned_off = off & ~0x3L; - u32 val32, mask, shift; - int rc; - - rc = native_afu_cr_read32(afu, cr, aligned_off, &val32); - if (rc) - return rc; - shift = (off & 0x3) * 8; - mask = 0xff << shift; - val32 = (val32 & ~mask) | (in << shift); - - rc = native_afu_cr_write32(afu, cr, aligned_off, val32); - return rc; -} - -const struct cxl_backend_ops cxl_native_ops = { - .module = THIS_MODULE, - .adapter_reset = cxl_pci_reset, - .alloc_one_irq = cxl_pci_alloc_one_irq, - .release_one_irq = cxl_pci_release_one_irq, - .alloc_irq_ranges = cxl_pci_alloc_irq_ranges, - .release_irq_ranges = cxl_pci_release_irq_ranges, - .setup_irq = cxl_pci_setup_irq, - .handle_psl_slice_error = native_handle_psl_slice_error, - .psl_interrupt = NULL, - .ack_irq = native_ack_irq, - .irq_wait = native_irq_wait, - .attach_process = native_attach_process, - .detach_process = native_detach_process, - .update_ivtes = native_update_ivtes, - .support_attributes = native_support_attributes, - .link_ok = cxl_adapter_link_ok, - .release_afu = cxl_pci_release_afu, - .afu_read_err_buffer = cxl_pci_afu_read_err_buffer, - .afu_check_and_enable = native_afu_check_and_enable, - .afu_activate_mode = native_afu_activate_mode, - .afu_deactivate_mode = native_afu_deactivate_mode, - .afu_reset = native_afu_reset, - .afu_cr_read8 = native_afu_cr_read8, - .afu_cr_read16 = native_afu_cr_read16, - .afu_cr_read32 = native_afu_cr_read32, - .afu_cr_read64 = native_afu_cr_read64, - .afu_cr_write8 = native_afu_cr_write8, - .afu_cr_write16 = native_afu_cr_write16, - .afu_cr_write32 = native_afu_cr_write32, - .read_adapter_vpd = cxl_pci_read_adapter_vpd, -}; diff --git a/drivers/misc/cxl/of.c b/drivers/misc/cxl/of.c deleted file mode 100644 index 25ce725035e7..000000000000 --- a/drivers/misc/cxl/of.c +++ /dev/null @@ -1,506 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2015 IBM Corp. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/slab.h> -#include <linux/of_address.h> -#include <linux/of_platform.h> - -#include "cxl.h" - - -static const __be32 *read_prop_string(const struct device_node *np, - const char *prop_name) -{ - const __be32 *prop; - - prop = of_get_property(np, prop_name, NULL); - if (cxl_verbose && prop) - pr_info("%s: %s\n", prop_name, (char *) prop); - return prop; -} - -static const __be32 *read_prop_dword(const struct device_node *np, - const char *prop_name, u32 *val) -{ - const __be32 *prop; - - prop = of_get_property(np, prop_name, NULL); - if (prop) - *val = be32_to_cpu(prop[0]); - if (cxl_verbose && prop) - pr_info("%s: %#x (%u)\n", prop_name, *val, *val); - return prop; -} - -static const __be64 *read_prop64_dword(const struct device_node *np, - const char *prop_name, u64 *val) -{ - const __be64 *prop; - - prop = of_get_property(np, prop_name, NULL); - if (prop) - *val = be64_to_cpu(prop[0]); - if (cxl_verbose && prop) - pr_info("%s: %#llx (%llu)\n", prop_name, *val, *val); - return prop; -} - - -static int read_handle(struct device_node *np, u64 *handle) -{ - const __be32 *prop; - u64 size; - - /* Get address and size of the node */ - prop = of_get_address(np, 0, &size, NULL); - if (size) - return -EINVAL; - - /* Helper to read a big number; size is in cells (not bytes) */ - *handle = of_read_number(prop, of_n_addr_cells(np)); - return 0; -} - -static int read_phys_addr(struct device_node *np, char *prop_name, - struct cxl_afu *afu) -{ - int i, len, entry_size, naddr, nsize, type; - u64 addr, size; - const __be32 *prop; - - naddr = of_n_addr_cells(np); - nsize = of_n_size_cells(np); - - prop = of_get_property(np, prop_name, &len); - if (prop) { - entry_size = naddr + nsize; - for (i = 0; i < (len / 4); i += entry_size, prop += entry_size) { - type = be32_to_cpu(prop[0]); - addr = of_read_number(prop, naddr); - size = of_read_number(&prop[naddr], nsize); - switch (type) { - case 0: /* unit address */ - afu->guest->handle = addr; - break; - case 1: /* p2 area */ - afu->guest->p2n_phys += addr; - afu->guest->p2n_size = size; - break; - case 2: /* problem state area */ - afu->psn_phys += addr; - afu->adapter->ps_size = size; - break; - default: - pr_err("Invalid address type %d found in %s property of AFU\n", - type, prop_name); - return -EINVAL; - } - if (cxl_verbose) - pr_info("%s: %#x %#llx (size %#llx)\n", - prop_name, type, addr, size); - } - } - return 0; -} - -static int read_vpd(struct cxl *adapter, struct cxl_afu *afu) -{ - char vpd[256]; - int rc; - size_t len = sizeof(vpd); - - memset(vpd, 0, len); - - if (adapter) - rc = cxl_guest_read_adapter_vpd(adapter, vpd, len); - else - rc = cxl_guest_read_afu_vpd(afu, vpd, len); - - if (rc > 0) { - cxl_dump_debug_buffer(vpd, rc); - rc = 0; - } - return rc; -} - -int cxl_of_read_afu_handle(struct cxl_afu *afu, struct device_node *afu_np) -{ - if (read_handle(afu_np, &afu->guest->handle)) - return -EINVAL; - pr_devel("AFU handle: 0x%.16llx\n", afu->guest->handle); - - return 0; -} - -int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *np) -{ - int i, len, rc; - char *p; - const __be32 *prop; - u16 device_id, vendor_id; - u32 val = 0, class_code; - - /* Properties are read in the same order as listed in PAPR */ - - if (cxl_verbose) { - pr_info("Dump of the 'ibm,coherent-platform-function' node properties:\n"); - - prop = of_get_property(np, "compatible", &len); - i = 0; - while (i < len) { - p = (char *) prop + i; - pr_info("compatible: %s\n", p); - i += strlen(p) + 1; - } - read_prop_string(np, "name"); - } - - rc = read_phys_addr(np, "reg", afu); - if (rc) - return rc; - - rc = read_phys_addr(np, "assigned-addresses", afu); - if (rc) - return rc; - - if (afu->psn_phys == 0) - afu->psa = false; - else - afu->psa = true; - - if (cxl_verbose) { - read_prop_string(np, "ibm,loc-code"); - read_prop_string(np, "device_type"); - } - - read_prop_dword(np, "ibm,#processes", &afu->max_procs_virtualised); - - if (cxl_verbose) { - read_prop_dword(np, "ibm,scratchpad-size", &val); - read_prop_dword(np, "ibm,programmable", &val); - read_prop_string(np, "ibm,phandle"); - read_vpd(NULL, afu); - } - - read_prop_dword(np, "ibm,max-ints-per-process", &afu->guest->max_ints); - afu->irqs_max = afu->guest->max_ints; - - prop = read_prop_dword(np, "ibm,min-ints-per-process", &afu->pp_irqs); - if (prop) { - /* One extra interrupt for the PSL interrupt is already - * included. Remove it now to keep only AFU interrupts and - * match the native case. - */ - afu->pp_irqs--; - } - - if (cxl_verbose) { - read_prop_dword(np, "ibm,max-ints", &val); - read_prop_dword(np, "ibm,vpd-size", &val); - } - - read_prop64_dword(np, "ibm,error-buffer-size", &afu->eb_len); - afu->eb_offset = 0; - - if (cxl_verbose) - read_prop_dword(np, "ibm,config-record-type", &val); - - read_prop64_dword(np, "ibm,config-record-size", &afu->crs_len); - afu->crs_offset = 0; - - read_prop_dword(np, "ibm,#config-records", &afu->crs_num); - - if (cxl_verbose) { - for (i = 0; i < afu->crs_num; i++) { - rc = cxl_ops->afu_cr_read16(afu, i, PCI_DEVICE_ID, - &device_id); - if (!rc) - pr_info("record %d - device-id: %#x\n", - i, device_id); - rc = cxl_ops->afu_cr_read16(afu, i, PCI_VENDOR_ID, - &vendor_id); - if (!rc) - pr_info("record %d - vendor-id: %#x\n", - i, vendor_id); - rc = cxl_ops->afu_cr_read32(afu, i, PCI_CLASS_REVISION, - &class_code); - if (!rc) { - class_code >>= 8; - pr_info("record %d - class-code: %#x\n", - i, class_code); - } - } - - read_prop_dword(np, "ibm,function-number", &val); - read_prop_dword(np, "ibm,privileged-function", &val); - read_prop_dword(np, "vendor-id", &val); - read_prop_dword(np, "device-id", &val); - read_prop_dword(np, "revision-id", &val); - read_prop_dword(np, "class-code", &val); - read_prop_dword(np, "subsystem-vendor-id", &val); - read_prop_dword(np, "subsystem-id", &val); - } - /* - * if "ibm,process-mmio" doesn't exist then per-process mmio is - * not supported - */ - val = 0; - prop = read_prop_dword(np, "ibm,process-mmio", &val); - if (prop && val == 1) - afu->pp_psa = true; - else - afu->pp_psa = false; - - if (cxl_verbose) { - read_prop_dword(np, "ibm,supports-aur", &val); - read_prop_dword(np, "ibm,supports-csrp", &val); - read_prop_dword(np, "ibm,supports-prr", &val); - } - - prop = read_prop_dword(np, "ibm,function-error-interrupt", &val); - if (prop) - afu->serr_hwirq = val; - - pr_devel("AFU handle: %#llx\n", afu->guest->handle); - pr_devel("p2n_phys: %#llx (size %#llx)\n", - afu->guest->p2n_phys, afu->guest->p2n_size); - pr_devel("psn_phys: %#llx (size %#llx)\n", - afu->psn_phys, afu->adapter->ps_size); - pr_devel("Max number of processes virtualised=%i\n", - afu->max_procs_virtualised); - pr_devel("Per-process irqs min=%i, max=%i\n", afu->pp_irqs, - afu->irqs_max); - pr_devel("Slice error interrupt=%#lx\n", afu->serr_hwirq); - - return 0; -} - -static int read_adapter_irq_config(struct cxl *adapter, struct device_node *np) -{ - const __be32 *ranges; - int len, nranges, i; - struct irq_avail *cur; - - ranges = of_get_property(np, "interrupt-ranges", &len); - if (ranges == NULL || len < (2 * sizeof(int))) - return -EINVAL; - - /* - * encoded array of two cells per entry, each cell encoded as - * with encode-int - */ - nranges = len / (2 * sizeof(int)); - if (nranges == 0 || (nranges * 2 * sizeof(int)) != len) - return -EINVAL; - - adapter->guest->irq_avail = kcalloc(nranges, sizeof(struct irq_avail), - GFP_KERNEL); - if (adapter->guest->irq_avail == NULL) - return -ENOMEM; - - adapter->guest->irq_base_offset = be32_to_cpu(ranges[0]); - for (i = 0; i < nranges; i++) { - cur = &adapter->guest->irq_avail[i]; - cur->offset = be32_to_cpu(ranges[i * 2]); - cur->range = be32_to_cpu(ranges[i * 2 + 1]); - cur->bitmap = bitmap_zalloc(cur->range, GFP_KERNEL); - if (cur->bitmap == NULL) - goto err; - if (cur->offset < adapter->guest->irq_base_offset) - adapter->guest->irq_base_offset = cur->offset; - if (cxl_verbose) - pr_info("available IRQ range: %#lx-%#lx (%lu)\n", - cur->offset, cur->offset + cur->range - 1, - cur->range); - } - adapter->guest->irq_nranges = nranges; - spin_lock_init(&adapter->guest->irq_alloc_lock); - - return 0; -err: - for (i--; i >= 0; i--) { - cur = &adapter->guest->irq_avail[i]; - bitmap_free(cur->bitmap); - } - kfree(adapter->guest->irq_avail); - adapter->guest->irq_avail = NULL; - return -ENOMEM; -} - -int cxl_of_read_adapter_handle(struct cxl *adapter, struct device_node *np) -{ - if (read_handle(np, &adapter->guest->handle)) - return -EINVAL; - pr_devel("Adapter handle: 0x%.16llx\n", adapter->guest->handle); - - return 0; -} - -int cxl_of_read_adapter_properties(struct cxl *adapter, struct device_node *np) -{ - int rc, len, naddr, i; - char *p; - const __be32 *prop; - u32 val = 0; - - /* Properties are read in the same order as listed in PAPR */ - - naddr = of_n_addr_cells(np); - - if (cxl_verbose) { - pr_info("Dump of the 'ibm,coherent-platform-facility' node properties:\n"); - - read_prop_dword(np, "#address-cells", &val); - read_prop_dword(np, "#size-cells", &val); - - prop = of_get_property(np, "compatible", &len); - i = 0; - while (i < len) { - p = (char *) prop + i; - pr_info("compatible: %s\n", p); - i += strlen(p) + 1; - } - read_prop_string(np, "name"); - read_prop_string(np, "model"); - - prop = of_get_property(np, "reg", NULL); - if (prop) { - pr_info("reg: addr:%#llx size:%#x\n", - of_read_number(prop, naddr), - be32_to_cpu(prop[naddr])); - } - - read_prop_string(np, "ibm,loc-code"); - } - - if ((rc = read_adapter_irq_config(adapter, np))) - return rc; - - if (cxl_verbose) { - read_prop_string(np, "device_type"); - read_prop_string(np, "ibm,phandle"); - } - - prop = read_prop_dword(np, "ibm,caia-version", &val); - if (prop) { - adapter->caia_major = (val & 0xFF00) >> 8; - adapter->caia_minor = val & 0xFF; - } - - prop = read_prop_dword(np, "ibm,psl-revision", &val); - if (prop) - adapter->psl_rev = val; - - prop = read_prop_string(np, "status"); - if (prop) { - adapter->guest->status = kasprintf(GFP_KERNEL, "%s", (char *) prop); - if (adapter->guest->status == NULL) - return -ENOMEM; - } - - prop = read_prop_dword(np, "vendor-id", &val); - if (prop) - adapter->guest->vendor = val; - - prop = read_prop_dword(np, "device-id", &val); - if (prop) - adapter->guest->device = val; - - if (cxl_verbose) { - read_prop_dword(np, "ibm,privileged-facility", &val); - read_prop_dword(np, "revision-id", &val); - read_prop_dword(np, "class-code", &val); - } - - prop = read_prop_dword(np, "subsystem-vendor-id", &val); - if (prop) - adapter->guest->subsystem_vendor = val; - - prop = read_prop_dword(np, "subsystem-id", &val); - if (prop) - adapter->guest->subsystem = val; - - if (cxl_verbose) - read_vpd(adapter, NULL); - - return 0; -} - -static int cxl_of_remove(struct platform_device *pdev) -{ - struct cxl *adapter; - int afu; - - adapter = dev_get_drvdata(&pdev->dev); - for (afu = 0; afu < adapter->slices; afu++) - cxl_guest_remove_afu(adapter->afu[afu]); - - cxl_guest_remove_adapter(adapter); - return 0; -} - -static void cxl_of_shutdown(struct platform_device *pdev) -{ - cxl_of_remove(pdev); -} - -int cxl_of_probe(struct platform_device *pdev) -{ - struct device_node *np = NULL; - struct device_node *afu_np = NULL; - struct cxl *adapter = NULL; - int ret; - int slice = 0, slice_ok = 0; - - pr_devel("in %s\n", __func__); - - np = pdev->dev.of_node; - if (np == NULL) - return -ENODEV; - - /* init adapter */ - adapter = cxl_guest_init_adapter(np, pdev); - if (IS_ERR(adapter)) { - dev_err(&pdev->dev, "guest_init_adapter failed: %li\n", PTR_ERR(adapter)); - return PTR_ERR(adapter); - } - - /* init afu */ - for_each_child_of_node(np, afu_np) { - if ((ret = cxl_guest_init_afu(adapter, slice, afu_np))) - dev_err(&pdev->dev, "AFU %i failed to initialise: %i\n", - slice, ret); - else - slice_ok++; - slice++; - } - - if (slice_ok == 0) { - dev_info(&pdev->dev, "No active AFU"); - adapter->slices = 0; - } - - return 0; -} - -static const struct of_device_id cxl_of_match[] = { - { .compatible = "ibm,coherent-platform-facility",}, - {}, -}; -MODULE_DEVICE_TABLE(of, cxl_of_match); - -struct platform_driver cxl_of_driver = { - .driver = { - .name = "cxl_of", - .of_match_table = cxl_of_match, - .owner = THIS_MODULE - }, - .probe = cxl_of_probe, - .remove = cxl_of_remove, - .shutdown = cxl_of_shutdown, -}; diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c deleted file mode 100644 index 0ff944860dda..000000000000 --- a/drivers/misc/cxl/pci.c +++ /dev/null @@ -1,2116 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2014 IBM Corp. - */ - -#include <linux/pci_regs.h> -#include <linux/pci_ids.h> -#include <linux/device.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/sort.h> -#include <linux/pci.h> -#include <linux/of.h> -#include <linux/delay.h> -#include <asm/opal.h> -#include <asm/msi_bitmap.h> -#include <asm/pnv-pci.h> -#include <asm/io.h> -#include <asm/reg.h> - -#include "cxl.h" -#include <misc/cxl.h> - - -#define CXL_PCI_VSEC_ID 0x1280 -#define CXL_VSEC_MIN_SIZE 0x80 - -#define CXL_READ_VSEC_LENGTH(dev, vsec, dest) \ - { \ - pci_read_config_word(dev, vsec + 0x6, dest); \ - *dest >>= 4; \ - } -#define CXL_READ_VSEC_NAFUS(dev, vsec, dest) \ - pci_read_config_byte(dev, vsec + 0x8, dest) - -#define CXL_READ_VSEC_STATUS(dev, vsec, dest) \ - pci_read_config_byte(dev, vsec + 0x9, dest) -#define CXL_STATUS_SECOND_PORT 0x80 -#define CXL_STATUS_MSI_X_FULL 0x40 -#define CXL_STATUS_MSI_X_SINGLE 0x20 -#define CXL_STATUS_FLASH_RW 0x08 -#define CXL_STATUS_FLASH_RO 0x04 -#define CXL_STATUS_LOADABLE_AFU 0x02 -#define CXL_STATUS_LOADABLE_PSL 0x01 -/* If we see these features we won't try to use the card */ -#define CXL_UNSUPPORTED_FEATURES \ - (CXL_STATUS_MSI_X_FULL | CXL_STATUS_MSI_X_SINGLE) - -#define CXL_READ_VSEC_MODE_CONTROL(dev, vsec, dest) \ - pci_read_config_byte(dev, vsec + 0xa, dest) -#define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \ - pci_write_config_byte(dev, vsec + 0xa, val) -#define CXL_VSEC_PROTOCOL_MASK 0xe0 -#define CXL_VSEC_PROTOCOL_1024TB 0x80 -#define CXL_VSEC_PROTOCOL_512TB 0x40 -#define CXL_VSEC_PROTOCOL_256TB 0x20 /* Power 8/9 uses this */ -#define CXL_VSEC_PROTOCOL_ENABLE 0x01 - -#define CXL_READ_VSEC_PSL_REVISION(dev, vsec, dest) \ - pci_read_config_word(dev, vsec + 0xc, dest) -#define CXL_READ_VSEC_CAIA_MINOR(dev, vsec, dest) \ - pci_read_config_byte(dev, vsec + 0xe, dest) -#define CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, dest) \ - pci_read_config_byte(dev, vsec + 0xf, dest) -#define CXL_READ_VSEC_BASE_IMAGE(dev, vsec, dest) \ - pci_read_config_word(dev, vsec + 0x10, dest) - -#define CXL_READ_VSEC_IMAGE_STATE(dev, vsec, dest) \ - pci_read_config_byte(dev, vsec + 0x13, dest) -#define CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, val) \ - pci_write_config_byte(dev, vsec + 0x13, val) -#define CXL_VSEC_USER_IMAGE_LOADED 0x80 /* RO */ -#define CXL_VSEC_PERST_LOADS_IMAGE 0x20 /* RW */ -#define CXL_VSEC_PERST_SELECT_USER 0x10 /* RW */ - -#define CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, dest) \ - pci_read_config_dword(dev, vsec + 0x20, dest) -#define CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, dest) \ - pci_read_config_dword(dev, vsec + 0x24, dest) -#define CXL_READ_VSEC_PS_OFF(dev, vsec, dest) \ - pci_read_config_dword(dev, vsec + 0x28, dest) -#define CXL_READ_VSEC_PS_SIZE(dev, vsec, dest) \ - pci_read_config_dword(dev, vsec + 0x2c, dest) - - -/* This works a little different than the p1/p2 register accesses to make it - * easier to pull out individual fields */ -#define AFUD_READ(afu, off) in_be64(afu->native->afu_desc_mmio + off) -#define AFUD_READ_LE(afu, off) in_le64(afu->native->afu_desc_mmio + off) -#define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit))) -#define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be)) - -#define AFUD_READ_INFO(afu) AFUD_READ(afu, 0x0) -#define AFUD_NUM_INTS_PER_PROC(val) EXTRACT_PPC_BITS(val, 0, 15) -#define AFUD_NUM_PROCS(val) EXTRACT_PPC_BITS(val, 16, 31) -#define AFUD_NUM_CRS(val) EXTRACT_PPC_BITS(val, 32, 47) -#define AFUD_MULTIMODE(val) EXTRACT_PPC_BIT(val, 48) -#define AFUD_PUSH_BLOCK_TRANSFER(val) EXTRACT_PPC_BIT(val, 55) -#define AFUD_DEDICATED_PROCESS(val) EXTRACT_PPC_BIT(val, 59) -#define AFUD_AFU_DIRECTED(val) EXTRACT_PPC_BIT(val, 61) -#define AFUD_TIME_SLICED(val) EXTRACT_PPC_BIT(val, 63) -#define AFUD_READ_CR(afu) AFUD_READ(afu, 0x20) -#define AFUD_CR_LEN(val) EXTRACT_PPC_BITS(val, 8, 63) -#define AFUD_READ_CR_OFF(afu) AFUD_READ(afu, 0x28) -#define AFUD_READ_PPPSA(afu) AFUD_READ(afu, 0x30) -#define AFUD_PPPSA_PP(val) EXTRACT_PPC_BIT(val, 6) -#define AFUD_PPPSA_PSA(val) EXTRACT_PPC_BIT(val, 7) -#define AFUD_PPPSA_LEN(val) EXTRACT_PPC_BITS(val, 8, 63) -#define AFUD_READ_PPPSA_OFF(afu) AFUD_READ(afu, 0x38) -#define AFUD_READ_EB(afu) AFUD_READ(afu, 0x40) -#define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63) -#define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48) - -static const struct pci_device_id cxl_pci_tbl[] = { - { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), }, - { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), }, - { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), }, - { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0601), }, - { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0623), }, - { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0628), }, - { } -}; -MODULE_DEVICE_TABLE(pci, cxl_pci_tbl); - - -/* - * Mostly using these wrappers to avoid confusion: - * priv 1 is BAR2, while priv 2 is BAR0 - */ -static inline resource_size_t p1_base(struct pci_dev *dev) -{ - return pci_resource_start(dev, 2); -} - -static inline resource_size_t p1_size(struct pci_dev *dev) -{ - return pci_resource_len(dev, 2); -} - -static inline resource_size_t p2_base(struct pci_dev *dev) -{ - return pci_resource_start(dev, 0); -} - -static inline resource_size_t p2_size(struct pci_dev *dev) -{ - return pci_resource_len(dev, 0); -} - -static int find_cxl_vsec(struct pci_dev *dev) -{ - int vsec = 0; - u16 val; - - while ((vsec = pci_find_next_ext_capability(dev, vsec, PCI_EXT_CAP_ID_VNDR))) { - pci_read_config_word(dev, vsec + 0x4, &val); - if (val == CXL_PCI_VSEC_ID) - return vsec; - } - return 0; - -} - -static void dump_cxl_config_space(struct pci_dev *dev) -{ - int vsec; - u32 val; - - dev_info(&dev->dev, "dump_cxl_config_space\n"); - - pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &val); - dev_info(&dev->dev, "BAR0: %#.8x\n", val); - pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &val); - dev_info(&dev->dev, "BAR1: %#.8x\n", val); - pci_read_config_dword(dev, PCI_BASE_ADDRESS_2, &val); - dev_info(&dev->dev, "BAR2: %#.8x\n", val); - pci_read_config_dword(dev, PCI_BASE_ADDRESS_3, &val); - dev_info(&dev->dev, "BAR3: %#.8x\n", val); - pci_read_config_dword(dev, PCI_BASE_ADDRESS_4, &val); - dev_info(&dev->dev, "BAR4: %#.8x\n", val); - pci_read_config_dword(dev, PCI_BASE_ADDRESS_5, &val); - dev_info(&dev->dev, "BAR5: %#.8x\n", val); - - dev_info(&dev->dev, "p1 regs: %#llx, len: %#llx\n", - p1_base(dev), p1_size(dev)); - dev_info(&dev->dev, "p2 regs: %#llx, len: %#llx\n", - p2_base(dev), p2_size(dev)); - dev_info(&dev->dev, "BAR 4/5: %#llx, len: %#llx\n", - pci_resource_start(dev, 4), pci_resource_len(dev, 4)); - - if (!(vsec = find_cxl_vsec(dev))) - return; - -#define show_reg(name, what) \ - dev_info(&dev->dev, "cxl vsec: %30s: %#x\n", name, what) - - pci_read_config_dword(dev, vsec + 0x0, &val); - show_reg("Cap ID", (val >> 0) & 0xffff); - show_reg("Cap Ver", (val >> 16) & 0xf); - show_reg("Next Cap Ptr", (val >> 20) & 0xfff); - pci_read_config_dword(dev, vsec + 0x4, &val); - show_reg("VSEC ID", (val >> 0) & 0xffff); - show_reg("VSEC Rev", (val >> 16) & 0xf); - show_reg("VSEC Length", (val >> 20) & 0xfff); - pci_read_config_dword(dev, vsec + 0x8, &val); - show_reg("Num AFUs", (val >> 0) & 0xff); - show_reg("Status", (val >> 8) & 0xff); - show_reg("Mode Control", (val >> 16) & 0xff); - show_reg("Reserved", (val >> 24) & 0xff); - pci_read_config_dword(dev, vsec + 0xc, &val); - show_reg("PSL Rev", (val >> 0) & 0xffff); - show_reg("CAIA Ver", (val >> 16) & 0xffff); - pci_read_config_dword(dev, vsec + 0x10, &val); - show_reg("Base Image Rev", (val >> 0) & 0xffff); - show_reg("Reserved", (val >> 16) & 0x0fff); - show_reg("Image Control", (val >> 28) & 0x3); - show_reg("Reserved", (val >> 30) & 0x1); - show_reg("Image Loaded", (val >> 31) & 0x1); - - pci_read_config_dword(dev, vsec + 0x14, &val); - show_reg("Reserved", val); - pci_read_config_dword(dev, vsec + 0x18, &val); - show_reg("Reserved", val); - pci_read_config_dword(dev, vsec + 0x1c, &val); - show_reg("Reserved", val); - - pci_read_config_dword(dev, vsec + 0x20, &val); - show_reg("AFU Descriptor Offset", val); - pci_read_config_dword(dev, vsec + 0x24, &val); - show_reg("AFU Descriptor Size", val); - pci_read_config_dword(dev, vsec + 0x28, &val); - show_reg("Problem State Offset", val); - pci_read_config_dword(dev, vsec + 0x2c, &val); - show_reg("Problem State Size", val); - - pci_read_config_dword(dev, vsec + 0x30, &val); - show_reg("Reserved", val); - pci_read_config_dword(dev, vsec + 0x34, &val); - show_reg("Reserved", val); - pci_read_config_dword(dev, vsec + 0x38, &val); - show_reg("Reserved", val); - pci_read_config_dword(dev, vsec + 0x3c, &val); - show_reg("Reserved", val); - - pci_read_config_dword(dev, vsec + 0x40, &val); - show_reg("PSL Programming Port", val); - pci_read_config_dword(dev, vsec + 0x44, &val); - show_reg("PSL Programming Control", val); - - pci_read_config_dword(dev, vsec + 0x48, &val); - show_reg("Reserved", val); - pci_read_config_dword(dev, vsec + 0x4c, &val); - show_reg("Reserved", val); - - pci_read_config_dword(dev, vsec + 0x50, &val); - show_reg("Flash Address Register", val); - pci_read_config_dword(dev, vsec + 0x54, &val); - show_reg("Flash Size Register", val); - pci_read_config_dword(dev, vsec + 0x58, &val); - show_reg("Flash Status/Control Register", val); - pci_read_config_dword(dev, vsec + 0x58, &val); - show_reg("Flash Data Port", val); - -#undef show_reg -} - -static void dump_afu_descriptor(struct cxl_afu *afu) -{ - u64 val, afu_cr_num, afu_cr_off, afu_cr_len; - int i; - -#define show_reg(name, what) \ - dev_info(&afu->dev, "afu desc: %30s: %#llx\n", name, what) - - val = AFUD_READ_INFO(afu); - show_reg("num_ints_per_process", AFUD_NUM_INTS_PER_PROC(val)); - show_reg("num_of_processes", AFUD_NUM_PROCS(val)); - show_reg("num_of_afu_CRs", AFUD_NUM_CRS(val)); - show_reg("req_prog_mode", val & 0xffffULL); - afu_cr_num = AFUD_NUM_CRS(val); - - val = AFUD_READ(afu, 0x8); - show_reg("Reserved", val); - val = AFUD_READ(afu, 0x10); - show_reg("Reserved", val); - val = AFUD_READ(afu, 0x18); - show_reg("Reserved", val); - - val = AFUD_READ_CR(afu); - show_reg("Reserved", (val >> (63-7)) & 0xff); - show_reg("AFU_CR_len", AFUD_CR_LEN(val)); - afu_cr_len = AFUD_CR_LEN(val) * 256; - - val = AFUD_READ_CR_OFF(afu); - afu_cr_off = val; - show_reg("AFU_CR_offset", val); - - val = AFUD_READ_PPPSA(afu); - show_reg("PerProcessPSA_control", (val >> (63-7)) & 0xff); - show_reg("PerProcessPSA Length", AFUD_PPPSA_LEN(val)); - - val = AFUD_READ_PPPSA_OFF(afu); - show_reg("PerProcessPSA_offset", val); - - val = AFUD_READ_EB(afu); - show_reg("Reserved", (val >> (63-7)) & 0xff); - show_reg("AFU_EB_len", AFUD_EB_LEN(val)); - - val = AFUD_READ_EB_OFF(afu); - show_reg("AFU_EB_offset", val); - - for (i = 0; i < afu_cr_num; i++) { - val = AFUD_READ_LE(afu, afu_cr_off + i * afu_cr_len); - show_reg("CR Vendor", val & 0xffff); - show_reg("CR Device", (val >> 16) & 0xffff); - } -#undef show_reg -} - -#define P8_CAPP_UNIT0_ID 0xBA -#define P8_CAPP_UNIT1_ID 0XBE -#define P9_CAPP_UNIT0_ID 0xC0 -#define P9_CAPP_UNIT1_ID 0xE0 - -static int get_phb_index(struct device_node *np, u32 *phb_index) -{ - if (of_property_read_u32(np, "ibm,phb-index", phb_index)) - return -ENODEV; - return 0; -} - -static u64 get_capp_unit_id(struct device_node *np, u32 phb_index) -{ - /* - * POWER 8: - * - For chips other than POWER8NVL, we only have CAPP 0, - * irrespective of which PHB is used. - * - For POWER8NVL, assume CAPP 0 is attached to PHB0 and - * CAPP 1 is attached to PHB1. - */ - if (cxl_is_power8()) { - if (!pvr_version_is(PVR_POWER8NVL)) - return P8_CAPP_UNIT0_ID; - - if (phb_index == 0) - return P8_CAPP_UNIT0_ID; - - if (phb_index == 1) - return P8_CAPP_UNIT1_ID; - } - - /* - * POWER 9: - * PEC0 (PHB0). Capp ID = CAPP0 (0b1100_0000) - * PEC1 (PHB1 - PHB2). No capi mode - * PEC2 (PHB3 - PHB4 - PHB5): Capi mode on PHB3 only. Capp ID = CAPP1 (0b1110_0000) - */ - if (cxl_is_power9()) { - if (phb_index == 0) - return P9_CAPP_UNIT0_ID; - - if (phb_index == 3) - return P9_CAPP_UNIT1_ID; - } - - return 0; -} - -int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid, - u32 *phb_index, u64 *capp_unit_id) -{ - int rc; - struct device_node *np; - const __be32 *prop; - - if (!(np = pnv_pci_get_phb_node(dev))) - return -ENODEV; - - while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL))) - np = of_get_next_parent(np); - if (!np) - return -ENODEV; - - *chipid = be32_to_cpup(prop); - - rc = get_phb_index(np, phb_index); - if (rc) { - pr_err("cxl: invalid phb index\n"); - of_node_put(np); - return rc; - } - - *capp_unit_id = get_capp_unit_id(np, *phb_index); - of_node_put(np); - if (!*capp_unit_id) { - pr_err("cxl: No capp unit found for PHB[%lld,%d]. Make sure the adapter is on a capi-compatible slot\n", - *chipid, *phb_index); - return -ENODEV; - } - - return 0; -} - -static DEFINE_MUTEX(indications_mutex); - -static int get_phb_indications(struct pci_dev *dev, u64 *capiind, u64 *asnind, - u64 *nbwind) -{ - static u64 nbw, asn, capi = 0; - struct device_node *np; - const __be32 *prop; - - mutex_lock(&indications_mutex); - if (!capi) { - if (!(np = pnv_pci_get_phb_node(dev))) { - mutex_unlock(&indications_mutex); - return -ENODEV; - } - - prop = of_get_property(np, "ibm,phb-indications", NULL); - if (!prop) { - nbw = 0x0300UL; /* legacy values */ - asn = 0x0400UL; - capi = 0x0200UL; - } else { - nbw = (u64)be32_to_cpu(prop[2]); - asn = (u64)be32_to_cpu(prop[1]); - capi = (u64)be32_to_cpu(prop[0]); - } - of_node_put(np); - } - *capiind = capi; - *asnind = asn; - *nbwind = nbw; - mutex_unlock(&indications_mutex); - return 0; -} - -int cxl_get_xsl9_dsnctl(struct pci_dev *dev, u64 capp_unit_id, u64 *reg) -{ - u64 xsl_dsnctl; - u64 capiind, asnind, nbwind; - - /* - * CAPI Identifier bits [0:7] - * bit 61:60 MSI bits --> 0 - * bit 59 TVT selector --> 0 - */ - if (get_phb_indications(dev, &capiind, &asnind, &nbwind)) - return -ENODEV; - - /* - * Tell XSL where to route data to. - * The field chipid should match the PHB CAPI_CMPM register - */ - xsl_dsnctl = (capiind << (63-15)); /* Bit 57 */ - xsl_dsnctl |= (capp_unit_id << (63-15)); - - /* nMMU_ID Defaults to: b’000001001’*/ - xsl_dsnctl |= ((u64)0x09 << (63-28)); - - /* - * Used to identify CAPI packets which should be sorted into - * the Non-Blocking queues by the PHB. This field should match - * the PHB PBL_NBW_CMPM register - * nbwind=0x03, bits [57:58], must include capi indicator. - * Not supported on P9 DD1. - */ - xsl_dsnctl |= (nbwind << (63-55)); - - /* - * Upper 16b address bits of ASB_Notify messages sent to the - * system. Need to match the PHB’s ASN Compare/Mask Register. - * Not supported on P9 DD1. - */ - xsl_dsnctl |= asnind; - - *reg = xsl_dsnctl; - return 0; -} - -static int init_implementation_adapter_regs_psl9(struct cxl *adapter, - struct pci_dev *dev) -{ - u64 xsl_dsnctl, psl_fircntl; - u64 chipid; - u32 phb_index; - u64 capp_unit_id; - u64 psl_debug; - int rc; - - rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id); - if (rc) - return rc; - - rc = cxl_get_xsl9_dsnctl(dev, capp_unit_id, &xsl_dsnctl); - if (rc) - return rc; - - cxl_p1_write(adapter, CXL_XSL9_DSNCTL, xsl_dsnctl); - - /* Set fir_cntl to recommended value for production env */ - psl_fircntl = (0x2ULL << (63-3)); /* ce_report */ - psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */ - psl_fircntl |= 0x1ULL; /* ce_thresh */ - cxl_p1_write(adapter, CXL_PSL9_FIR_CNTL, psl_fircntl); - - /* Setup the PSL to transmit packets on the PCIe before the - * CAPP is enabled. Make sure that CAPP virtual machines are disabled - */ - cxl_p1_write(adapter, CXL_PSL9_DSNDCTL, 0x0001001000012A10ULL); - - /* - * A response to an ASB_Notify request is returned by the - * system as an MMIO write to the address defined in - * the PSL_TNR_ADDR register. - * keep the Reset Value: 0x00020000E0000000 - */ - - /* Enable XSL rty limit */ - cxl_p1_write(adapter, CXL_XSL9_DEF, 0x51F8000000000005ULL); - - /* Change XSL_INV dummy read threshold */ - cxl_p1_write(adapter, CXL_XSL9_INV, 0x0000040007FFC200ULL); - - if (phb_index == 3) { - /* disable machines 31-47 and 20-27 for DMA */ - cxl_p1_write(adapter, CXL_PSL9_APCDEDTYPE, 0x40000FF3FFFF0000ULL); - } - - /* Snoop machines */ - cxl_p1_write(adapter, CXL_PSL9_APCDEDALLOC, 0x800F000200000000ULL); - - /* Enable NORST and DD2 features */ - cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0xC000000000000000ULL); - - /* - * Check if PSL has data-cache. We need to flush adapter datacache - * when as its about to be removed. - */ - psl_debug = cxl_p1_read(adapter, CXL_PSL9_DEBUG); - if (psl_debug & CXL_PSL_DEBUG_CDC) { - dev_dbg(&dev->dev, "No data-cache present\n"); - adapter->native->no_data_cache = true; - } - - return 0; -} - -static int init_implementation_adapter_regs_psl8(struct cxl *adapter, struct pci_dev *dev) -{ - u64 psl_dsnctl, psl_fircntl; - u64 chipid; - u32 phb_index; - u64 capp_unit_id; - int rc; - - rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id); - if (rc) - return rc; - - psl_dsnctl = 0x0000900000000000ULL; /* pteupd ttype, scdone */ - psl_dsnctl |= (0x2ULL << (63-38)); /* MMIO hang pulse: 256 us */ - /* Tell PSL where to route data to */ - psl_dsnctl |= (chipid << (63-5)); - psl_dsnctl |= (capp_unit_id << (63-13)); - - cxl_p1_write(adapter, CXL_PSL_DSNDCTL, psl_dsnctl); - cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL); - /* snoop write mask */ - cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL); - /* set fir_cntl to recommended value for production env */ - psl_fircntl = (0x2ULL << (63-3)); /* ce_report */ - psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */ - psl_fircntl |= 0x1ULL; /* ce_thresh */ - cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, psl_fircntl); - /* for debugging with trace arrays */ - cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL); - - return 0; -} - -/* PSL */ -#define TBSYNC_CAL(n) (((u64)n & 0x7) << (63-3)) -#define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6)) -/* For the PSL this is a multiple for 0 < n <= 7: */ -#define PSL_2048_250MHZ_CYCLES 1 - -static void write_timebase_ctrl_psl8(struct cxl *adapter) -{ - cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT, - TBSYNC_CNT(2 * PSL_2048_250MHZ_CYCLES)); -} - -static u64 timebase_read_psl9(struct cxl *adapter) -{ - return cxl_p1_read(adapter, CXL_PSL9_Timebase); -} - -static u64 timebase_read_psl8(struct cxl *adapter) -{ - return cxl_p1_read(adapter, CXL_PSL_Timebase); -} - -static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) -{ - struct device_node *np; - - adapter->psl_timebase_synced = false; - - if (!(np = pnv_pci_get_phb_node(dev))) - return; - - /* Do not fail when CAPP timebase sync is not supported by OPAL */ - of_node_get(np); - if (! of_get_property(np, "ibm,capp-timebase-sync", NULL)) { - of_node_put(np); - dev_info(&dev->dev, "PSL timebase inactive: OPAL support missing\n"); - return; - } - of_node_put(np); - - /* - * Setup PSL Timebase Control and Status register - * with the recommended Timebase Sync Count value - */ - if (adapter->native->sl_ops->write_timebase_ctrl) - adapter->native->sl_ops->write_timebase_ctrl(adapter); - - /* Enable PSL Timebase */ - cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000); - cxl_p1_write(adapter, CXL_PSL_Control, CXL_PSL_Control_tb); - - return; -} - -static int init_implementation_afu_regs_psl9(struct cxl_afu *afu) -{ - return 0; -} - -static int init_implementation_afu_regs_psl8(struct cxl_afu *afu) -{ - /* read/write masks for this slice */ - cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL); - /* APC read/write masks for this slice */ - cxl_p1n_write(afu, CXL_PSL_COALLOC_A, 0xFF000000FEFEFEFEULL); - /* for debugging with trace arrays */ - cxl_p1n_write(afu, CXL_PSL_SLICE_TRACE, 0x0000FFFF00000000ULL); - cxl_p1n_write(afu, CXL_PSL_RXCTL_A, CXL_PSL_RXCTL_AFUHP_4S); - - return 0; -} - -int cxl_pci_setup_irq(struct cxl *adapter, unsigned int hwirq, - unsigned int virq) -{ - struct pci_dev *dev = to_pci_dev(adapter->dev.parent); - - return pnv_cxl_ioda_msi_setup(dev, hwirq, virq); -} - -int cxl_update_image_control(struct cxl *adapter) -{ - struct pci_dev *dev = to_pci_dev(adapter->dev.parent); - int rc; - int vsec; - u8 image_state; - - if (!(vsec = find_cxl_vsec(dev))) { - dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n"); - return -ENODEV; - } - - if ((rc = CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state))) { - dev_err(&dev->dev, "failed to read image state: %i\n", rc); - return rc; - } - - if (adapter->perst_loads_image) - image_state |= CXL_VSEC_PERST_LOADS_IMAGE; - else - image_state &= ~CXL_VSEC_PERST_LOADS_IMAGE; - - if (adapter->perst_select_user) - image_state |= CXL_VSEC_PERST_SELECT_USER; - else - image_state &= ~CXL_VSEC_PERST_SELECT_USER; - - if ((rc = CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, image_state))) { - dev_err(&dev->dev, "failed to update image control: %i\n", rc); - return rc; - } - - return 0; -} - -int cxl_pci_alloc_one_irq(struct cxl *adapter) -{ - struct pci_dev *dev = to_pci_dev(adapter->dev.parent); - - return pnv_cxl_alloc_hwirqs(dev, 1); -} - -void cxl_pci_release_one_irq(struct cxl *adapter, int hwirq) -{ - struct pci_dev *dev = to_pci_dev(adapter->dev.parent); - - return pnv_cxl_release_hwirqs(dev, hwirq, 1); -} - -int cxl_pci_alloc_irq_ranges(struct cxl_irq_ranges *irqs, - struct cxl *adapter, unsigned int num) -{ - struct pci_dev *dev = to_pci_dev(adapter->dev.parent); - - return pnv_cxl_alloc_hwirq_ranges(irqs, dev, num); -} - -void cxl_pci_release_irq_ranges(struct cxl_irq_ranges *irqs, - struct cxl *adapter) -{ - struct pci_dev *dev = to_pci_dev(adapter->dev.parent); - - pnv_cxl_release_hwirq_ranges(irqs, dev); -} - -static int setup_cxl_bars(struct pci_dev *dev) -{ - /* Safety check in case we get backported to < 3.17 without M64 */ - if ((p1_base(dev) < 0x100000000ULL) || - (p2_base(dev) < 0x100000000ULL)) { - dev_err(&dev->dev, "ABORTING: M32 BAR assignment incompatible with CXL\n"); - return -ENODEV; - } - - /* - * BAR 4/5 has a special meaning for CXL and must be programmed with a - * special value corresponding to the CXL protocol address range. - * For POWER 8/9 that means bits 48:49 must be set to 10 - */ - pci_write_config_dword(dev, PCI_BASE_ADDRESS_4, 0x00000000); - pci_write_config_dword(dev, PCI_BASE_ADDRESS_5, 0x00020000); - - return 0; -} - -/* pciex node: ibm,opal-m64-window = <0x3d058 0x0 0x3d058 0x0 0x8 0x0>; */ -static int switch_card_to_cxl(struct pci_dev *dev) -{ - int vsec; - u8 val; - int rc; - - dev_info(&dev->dev, "switch card to CXL\n"); - - if (!(vsec = find_cxl_vsec(dev))) { - dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n"); - return -ENODEV; - } - - if ((rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val))) { - dev_err(&dev->dev, "failed to read current mode control: %i", rc); - return rc; - } - val &= ~CXL_VSEC_PROTOCOL_MASK; - val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE; - if ((rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val))) { - dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc); - return rc; - } - /* - * The CAIA spec (v0.12 11.6 Bi-modal Device Support) states - * we must wait 100ms after this mode switch before touching - * PCIe config space. - */ - msleep(100); - - return 0; -} - -static int pci_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev) -{ - u64 p1n_base, p2n_base, afu_desc; - const u64 p1n_size = 0x100; - const u64 p2n_size = 0x1000; - - p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size); - p2n_base = p2_base(dev) + (afu->slice * p2n_size); - afu->psn_phys = p2_base(dev) + (adapter->native->ps_off + (afu->slice * adapter->ps_size)); - afu_desc = p2_base(dev) + adapter->native->afu_desc_off + (afu->slice * adapter->native->afu_desc_size); - - if (!(afu->native->p1n_mmio = ioremap(p1n_base, p1n_size))) - goto err; - if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size))) - goto err1; - if (afu_desc) { - if (!(afu->native->afu_desc_mmio = ioremap(afu_desc, adapter->native->afu_desc_size))) - goto err2; - } - - return 0; -err2: - iounmap(afu->p2n_mmio); -err1: - iounmap(afu->native->p1n_mmio); -err: - dev_err(&afu->dev, "Error mapping AFU MMIO regions\n"); - return -ENOMEM; -} - -static void pci_unmap_slice_regs(struct cxl_afu *afu) -{ - if (afu->p2n_mmio) { - iounmap(afu->p2n_mmio); - afu->p2n_mmio = NULL; - } - if (afu->native->p1n_mmio) { - iounmap(afu->native->p1n_mmio); - afu->native->p1n_mmio = NULL; - } - if (afu->native->afu_desc_mmio) { - iounmap(afu->native->afu_desc_mmio); - afu->native->afu_desc_mmio = NULL; - } -} - -void cxl_pci_release_afu(struct device *dev) -{ - struct cxl_afu *afu = to_cxl_afu(dev); - - pr_devel("%s\n", __func__); - - idr_destroy(&afu->contexts_idr); - cxl_release_spa(afu); - - kfree(afu->native); - kfree(afu); -} - -/* Expects AFU struct to have recently been zeroed out */ -static int cxl_read_afu_descriptor(struct cxl_afu *afu) -{ - u64 val; - - val = AFUD_READ_INFO(afu); - afu->pp_irqs = AFUD_NUM_INTS_PER_PROC(val); - afu->max_procs_virtualised = AFUD_NUM_PROCS(val); - afu->crs_num = AFUD_NUM_CRS(val); - - if (AFUD_AFU_DIRECTED(val)) - afu->modes_supported |= CXL_MODE_DIRECTED; - if (AFUD_DEDICATED_PROCESS(val)) - afu->modes_supported |= CXL_MODE_DEDICATED; - if (AFUD_TIME_SLICED(val)) - afu->modes_supported |= CXL_MODE_TIME_SLICED; - - val = AFUD_READ_PPPSA(afu); - afu->pp_size = AFUD_PPPSA_LEN(val) * 4096; - afu->psa = AFUD_PPPSA_PSA(val); - if ((afu->pp_psa = AFUD_PPPSA_PP(val))) - afu->native->pp_offset = AFUD_READ_PPPSA_OFF(afu); - - val = AFUD_READ_CR(afu); - afu->crs_len = AFUD_CR_LEN(val) * 256; - afu->crs_offset = AFUD_READ_CR_OFF(afu); - - - /* eb_len is in multiple of 4K */ - afu->eb_len = AFUD_EB_LEN(AFUD_READ_EB(afu)) * 4096; - afu->eb_offset = AFUD_READ_EB_OFF(afu); - - /* eb_off is 4K aligned so lower 12 bits are always zero */ - if (EXTRACT_PPC_BITS(afu->eb_offset, 0, 11) != 0) { - dev_warn(&afu->dev, - "Invalid AFU error buffer offset %Lx\n", - afu->eb_offset); - dev_info(&afu->dev, - "Ignoring AFU error buffer in the descriptor\n"); - /* indicate that no afu buffer exists */ - afu->eb_len = 0; - } - - return 0; -} - -static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu) -{ - int i, rc; - u32 val; - - if (afu->psa && afu->adapter->ps_size < - (afu->native->pp_offset + afu->pp_size*afu->max_procs_virtualised)) { - dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n"); - return -ENODEV; - } - - if (afu->pp_psa && (afu->pp_size < PAGE_SIZE)) - dev_warn(&afu->dev, "AFU uses pp_size(%#016llx) < PAGE_SIZE per-process PSA!\n", afu->pp_size); - - for (i = 0; i < afu->crs_num; i++) { - rc = cxl_ops->afu_cr_read32(afu, i, 0, &val); - if (rc || val == 0) { - dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i); - return -EINVAL; - } - } - - if ((afu->modes_supported & ~CXL_MODE_DEDICATED) && afu->max_procs_virtualised == 0) { - /* - * We could also check this for the dedicated process model - * since the architecture indicates it should be set to 1, but - * in that case we ignore the value and I'd rather not risk - * breaking any existing dedicated process AFUs that left it as - * 0 (not that I'm aware of any). It is clearly an error for an - * AFU directed AFU to set this to 0, and would have previously - * triggered a bug resulting in the maximum not being enforced - * at all since idr_alloc treats 0 as no maximum. - */ - dev_err(&afu->dev, "AFU does not support any processes\n"); - return -EINVAL; - } - - return 0; -} - -static int sanitise_afu_regs_psl9(struct cxl_afu *afu) -{ - u64 reg; - - /* - * Clear out any regs that contain either an IVTE or address or may be - * waiting on an acknowledgment to try to be a bit safer as we bring - * it online - */ - reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An); - if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) { - dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg); - if (cxl_ops->afu_reset(afu)) - return -EIO; - if (cxl_afu_disable(afu)) - return -EIO; - if (cxl_psl_purge(afu)) - return -EIO; - } - cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000); - cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000); - reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An); - if (reg) { - dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg); - if (reg & CXL_PSL9_DSISR_An_TF) - cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); - else - cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); - } - if (afu->adapter->native->sl_ops->register_serr_irq) { - reg = cxl_p1n_read(afu, CXL_PSL_SERR_An); - if (reg) { - if (reg & ~0x000000007fffffff) - dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg); - cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff); - } - } - reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); - if (reg) { - dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg); - cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg); - } - - return 0; -} - -static int sanitise_afu_regs_psl8(struct cxl_afu *afu) -{ - u64 reg; - - /* - * Clear out any regs that contain either an IVTE or address or may be - * waiting on an acknowledgement to try to be a bit safer as we bring - * it online - */ - reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An); - if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) { - dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg); - if (cxl_ops->afu_reset(afu)) - return -EIO; - if (cxl_afu_disable(afu)) - return -EIO; - if (cxl_psl_purge(afu)) - return -EIO; - } - cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000); - cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, 0x0000000000000000); - cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, 0x0000000000000000); - cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000); - cxl_p1n_write(afu, CXL_PSL_SPOffset_An, 0x0000000000000000); - cxl_p1n_write(afu, CXL_HAURP_An, 0x0000000000000000); - cxl_p2n_write(afu, CXL_CSRP_An, 0x0000000000000000); - cxl_p2n_write(afu, CXL_AURP1_An, 0x0000000000000000); - cxl_p2n_write(afu, CXL_AURP0_An, 0x0000000000000000); - cxl_p2n_write(afu, CXL_SSTP1_An, 0x0000000000000000); - cxl_p2n_write(afu, CXL_SSTP0_An, 0x0000000000000000); - reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An); - if (reg) { - dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg); - if (reg & CXL_PSL_DSISR_TRANS) - cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); - else - cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); - } - if (afu->adapter->native->sl_ops->register_serr_irq) { - reg = cxl_p1n_read(afu, CXL_PSL_SERR_An); - if (reg) { - if (reg & ~0xffff) - dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg); - cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff); - } - } - reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); - if (reg) { - dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg); - cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg); - } - - return 0; -} - -#define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE -/* - * afu_eb_read: - * Called from sysfs and reads the afu error info buffer. The h/w only supports - * 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte - * aligned the function uses a bounce buffer which can be max PAGE_SIZE. - */ -ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf, - loff_t off, size_t count) -{ - loff_t aligned_start, aligned_end; - size_t aligned_length; - void *tbuf; - const void __iomem *ebuf = afu->native->afu_desc_mmio + afu->eb_offset; - - if (count == 0 || off < 0 || (size_t)off >= afu->eb_len) - return 0; - - /* calculate aligned read window */ - count = min((size_t)(afu->eb_len - off), count); - aligned_start = round_down(off, 8); - aligned_end = round_up(off + count, 8); - aligned_length = aligned_end - aligned_start; - - /* max we can copy in one read is PAGE_SIZE */ - if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) { - aligned_length = ERR_BUFF_MAX_COPY_SIZE; - count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7); - } - - /* use bounce buffer for copy */ - tbuf = (void *)__get_free_page(GFP_KERNEL); - if (!tbuf) - return -ENOMEM; - - /* perform aligned read from the mmio region */ - memcpy_fromio(tbuf, ebuf + aligned_start, aligned_length); - memcpy(buf, tbuf + (off & 0x7), count); - - free_page((unsigned long)tbuf); - - return count; -} - -static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev) -{ - int rc; - - if ((rc = pci_map_slice_regs(afu, adapter, dev))) - return rc; - - if (adapter->native->sl_ops->sanitise_afu_regs) { - rc = adapter->native->sl_ops->sanitise_afu_regs(afu); - if (rc) - goto err1; - } - - /* We need to reset the AFU before we can read the AFU descriptor */ - if ((rc = cxl_ops->afu_reset(afu))) - goto err1; - - if (cxl_verbose) - dump_afu_descriptor(afu); - - if ((rc = cxl_read_afu_descriptor(afu))) - goto err1; - - if ((rc = cxl_afu_descriptor_looks_ok(afu))) - goto err1; - - if (adapter->native->sl_ops->afu_regs_init) - if ((rc = adapter->native->sl_ops->afu_regs_init(afu))) - goto err1; - - if (adapter->native->sl_ops->register_serr_irq) - if ((rc = adapter->native->sl_ops->register_serr_irq(afu))) - goto err1; - - if ((rc = cxl_native_register_psl_irq(afu))) - goto err2; - - atomic_set(&afu->configured_state, 0); - return 0; - -err2: - if (adapter->native->sl_ops->release_serr_irq) - adapter->native->sl_ops->release_serr_irq(afu); -err1: - pci_unmap_slice_regs(afu); - return rc; -} - -static void pci_deconfigure_afu(struct cxl_afu *afu) -{ - /* - * It's okay to deconfigure when AFU is already locked, otherwise wait - * until there are no readers - */ - if (atomic_read(&afu->configured_state) != -1) { - while (atomic_cmpxchg(&afu->configured_state, 0, -1) != -1) - schedule(); - } - cxl_native_release_psl_irq(afu); - if (afu->adapter->native->sl_ops->release_serr_irq) - afu->adapter->native->sl_ops->release_serr_irq(afu); - pci_unmap_slice_regs(afu); -} - -static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev) -{ - struct cxl_afu *afu; - int rc = -ENOMEM; - - afu = cxl_alloc_afu(adapter, slice); - if (!afu) - return -ENOMEM; - - afu->native = kzalloc(sizeof(struct cxl_afu_native), GFP_KERNEL); - if (!afu->native) - goto err_free_afu; - - mutex_init(&afu->native->spa_mutex); - - rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice); - if (rc) - goto err_free_native; - - rc = pci_configure_afu(afu, adapter, dev); - if (rc) - goto err_free_native; - - /* Don't care if this fails */ - cxl_debugfs_afu_add(afu); - - /* - * After we call this function we must not free the afu directly, even - * if it returns an error! - */ - if ((rc = cxl_register_afu(afu))) - goto err_put_dev; - - if ((rc = cxl_sysfs_afu_add(afu))) - goto err_del_dev; - - adapter->afu[afu->slice] = afu; - - if ((rc = cxl_pci_vphb_add(afu))) - dev_info(&afu->dev, "Can't register vPHB\n"); - - return 0; - -err_del_dev: - device_del(&afu->dev); -err_put_dev: - pci_deconfigure_afu(afu); - cxl_debugfs_afu_remove(afu); - put_device(&afu->dev); - return rc; - -err_free_native: - kfree(afu->native); -err_free_afu: - kfree(afu); - return rc; - -} - -static void cxl_pci_remove_afu(struct cxl_afu *afu) -{ - pr_devel("%s\n", __func__); - - if (!afu) - return; - - cxl_pci_vphb_remove(afu); - cxl_sysfs_afu_remove(afu); - cxl_debugfs_afu_remove(afu); - - spin_lock(&afu->adapter->afu_list_lock); - afu->adapter->afu[afu->slice] = NULL; - spin_unlock(&afu->adapter->afu_list_lock); - - cxl_context_detach_all(afu); - cxl_ops->afu_deactivate_mode(afu, afu->current_mode); - - pci_deconfigure_afu(afu); - device_unregister(&afu->dev); -} - -int cxl_pci_reset(struct cxl *adapter) -{ - struct pci_dev *dev = to_pci_dev(adapter->dev.parent); - int rc; - - if (adapter->perst_same_image) { - dev_warn(&dev->dev, - "cxl: refusing to reset/reflash when perst_reloads_same_image is set.\n"); - return -EINVAL; - } - - dev_info(&dev->dev, "CXL reset\n"); - - /* - * The adapter is about to be reset, so ignore errors. - */ - cxl_data_cache_flush(adapter); - - /* pcie_warm_reset requests a fundamental pci reset which includes a - * PERST assert/deassert. PERST triggers a loading of the image - * if "user" or "factory" is selected in sysfs */ - if ((rc = pci_set_pcie_reset_state(dev, pcie_warm_reset))) { - dev_err(&dev->dev, "cxl: pcie_warm_reset failed\n"); - return rc; - } - - return rc; -} - -static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev) -{ - if (pci_request_region(dev, 2, "priv 2 regs")) - goto err1; - if (pci_request_region(dev, 0, "priv 1 regs")) - goto err2; - - pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx", - p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev)); - - if (!(adapter->native->p1_mmio = ioremap(p1_base(dev), p1_size(dev)))) - goto err3; - - if (!(adapter->native->p2_mmio = ioremap(p2_base(dev), p2_size(dev)))) - goto err4; - - return 0; - -err4: - iounmap(adapter->native->p1_mmio); - adapter->native->p1_mmio = NULL; -err3: - pci_release_region(dev, 0); -err2: - pci_release_region(dev, 2); -err1: - return -ENOMEM; -} - -static void cxl_unmap_adapter_regs(struct cxl *adapter) -{ - if (adapter->native->p1_mmio) { - iounmap(adapter->native->p1_mmio); - adapter->native->p1_mmio = NULL; - pci_release_region(to_pci_dev(adapter->dev.parent), 2); - } - if (adapter->native->p2_mmio) { - iounmap(adapter->native->p2_mmio); - adapter->native->p2_mmio = NULL; - pci_release_region(to_pci_dev(adapter->dev.parent), 0); - } -} - -static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev) -{ - int vsec; - u32 afu_desc_off, afu_desc_size; - u32 ps_off, ps_size; - u16 vseclen; - u8 image_state; - - if (!(vsec = find_cxl_vsec(dev))) { - dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n"); - return -ENODEV; - } - - CXL_READ_VSEC_LENGTH(dev, vsec, &vseclen); - if (vseclen < CXL_VSEC_MIN_SIZE) { - dev_err(&dev->dev, "ABORTING: CXL VSEC too short\n"); - return -EINVAL; - } - - CXL_READ_VSEC_STATUS(dev, vsec, &adapter->vsec_status); - CXL_READ_VSEC_PSL_REVISION(dev, vsec, &adapter->psl_rev); - CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, &adapter->caia_major); - CXL_READ_VSEC_CAIA_MINOR(dev, vsec, &adapter->caia_minor); - CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image); - CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state); - adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED); - adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED); - adapter->perst_loads_image = !!(image_state & CXL_VSEC_PERST_LOADS_IMAGE); - - CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices); - CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off); - CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, &afu_desc_size); - CXL_READ_VSEC_PS_OFF(dev, vsec, &ps_off); - CXL_READ_VSEC_PS_SIZE(dev, vsec, &ps_size); - - /* Convert everything to bytes, because there is NO WAY I'd look at the - * code a month later and forget what units these are in ;-) */ - adapter->native->ps_off = ps_off * 64 * 1024; - adapter->ps_size = ps_size * 64 * 1024; - adapter->native->afu_desc_off = afu_desc_off * 64 * 1024; - adapter->native->afu_desc_size = afu_desc_size * 64 * 1024; - - /* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */ - adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices; - - return 0; -} - -/* - * Workaround a PCIe Host Bridge defect on some cards, that can cause - * malformed Transaction Layer Packet (TLP) errors to be erroneously - * reported. Mask this error in the Uncorrectable Error Mask Register. - * - * The upper nibble of the PSL revision is used to distinguish between - * different cards. The affected ones have it set to 0. - */ -static void cxl_fixup_malformed_tlp(struct cxl *adapter, struct pci_dev *dev) -{ - int aer; - u32 data; - - if (adapter->psl_rev & 0xf000) - return; - if (!(aer = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))) - return; - pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &data); - if (data & PCI_ERR_UNC_MALF_TLP) - if (data & PCI_ERR_UNC_INTN) - return; - data |= PCI_ERR_UNC_MALF_TLP; - data |= PCI_ERR_UNC_INTN; - pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, data); -} - -static bool cxl_compatible_caia_version(struct cxl *adapter) -{ - if (cxl_is_power8() && (adapter->caia_major == 1)) - return true; - - if (cxl_is_power9() && (adapter->caia_major == 2)) - return true; - - return false; -} - -static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev) -{ - if (adapter->vsec_status & CXL_STATUS_SECOND_PORT) - return -EBUSY; - - if (adapter->vsec_status & CXL_UNSUPPORTED_FEATURES) { - dev_err(&dev->dev, "ABORTING: CXL requires unsupported features\n"); - return -EINVAL; - } - - if (!cxl_compatible_caia_version(adapter)) { - dev_info(&dev->dev, "Ignoring card. PSL type is not supported (caia version: %d)\n", - adapter->caia_major); - return -ENODEV; - } - - if (!adapter->slices) { - /* Once we support dynamic reprogramming we can use the card if - * it supports loadable AFUs */ - dev_err(&dev->dev, "ABORTING: Device has no AFUs\n"); - return -EINVAL; - } - - if (!adapter->native->afu_desc_off || !adapter->native->afu_desc_size) { - dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n"); - return -EINVAL; - } - - if (adapter->ps_size > p2_size(dev) - adapter->native->ps_off) { - dev_err(&dev->dev, "ABORTING: Problem state size larger than " - "available in BAR2: 0x%llx > 0x%llx\n", - adapter->ps_size, p2_size(dev) - adapter->native->ps_off); - return -EINVAL; - } - - return 0; -} - -ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len) -{ - return pci_read_vpd(to_pci_dev(adapter->dev.parent), 0, len, buf); -} - -static void cxl_release_adapter(struct device *dev) -{ - struct cxl *adapter = to_cxl_adapter(dev); - - pr_devel("cxl_release_adapter\n"); - - cxl_remove_adapter_nr(adapter); - - kfree(adapter->native); - kfree(adapter); -} - -#define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31)) - -static int sanitise_adapter_regs(struct cxl *adapter) -{ - int rc = 0; - - /* Clear PSL tberror bit by writing 1 to it */ - cxl_p1_write(adapter, CXL_PSL_ErrIVTE, CXL_PSL_ErrIVTE_tberror); - - if (adapter->native->sl_ops->invalidate_all) { - /* do not invalidate ERAT entries when not reloading on PERST */ - if (cxl_is_power9() && (adapter->perst_loads_image)) - return 0; - rc = adapter->native->sl_ops->invalidate_all(adapter); - } - - return rc; -} - -/* This should contain *only* operations that can safely be done in - * both creation and recovery. - */ -static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) -{ - int rc; - - adapter->dev.parent = &dev->dev; - adapter->dev.release = cxl_release_adapter; - pci_set_drvdata(dev, adapter); - - rc = pci_enable_device(dev); - if (rc) { - dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc); - return rc; - } - - if ((rc = cxl_read_vsec(adapter, dev))) - return rc; - - if ((rc = cxl_vsec_looks_ok(adapter, dev))) - return rc; - - cxl_fixup_malformed_tlp(adapter, dev); - - if ((rc = setup_cxl_bars(dev))) - return rc; - - if ((rc = switch_card_to_cxl(dev))) - return rc; - - if ((rc = cxl_update_image_control(adapter))) - return rc; - - if ((rc = cxl_map_adapter_regs(adapter, dev))) - return rc; - - if ((rc = sanitise_adapter_regs(adapter))) - goto err; - - if ((rc = adapter->native->sl_ops->adapter_regs_init(adapter, dev))) - goto err; - - /* Required for devices using CAPP DMA mode, harmless for others */ - pci_set_master(dev); - - adapter->tunneled_ops_supported = false; - - if (cxl_is_power9()) { - if (pnv_pci_set_tunnel_bar(dev, 0x00020000E0000000ull, 1)) - dev_info(&dev->dev, "Tunneled operations unsupported\n"); - else - adapter->tunneled_ops_supported = true; - } - - if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode))) - goto err; - - /* If recovery happened, the last step is to turn on snooping. - * In the non-recovery case this has no effect */ - if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON))) - goto err; - - /* Ignore error, adapter init is not dependant on timebase sync */ - cxl_setup_psl_timebase(adapter, dev); - - if ((rc = cxl_native_register_psl_err_irq(adapter))) - goto err; - - return 0; - -err: - cxl_unmap_adapter_regs(adapter); - return rc; - -} - -static void cxl_deconfigure_adapter(struct cxl *adapter) -{ - struct pci_dev *pdev = to_pci_dev(adapter->dev.parent); - - if (cxl_is_power9()) - pnv_pci_set_tunnel_bar(pdev, 0x00020000E0000000ull, 0); - - cxl_native_release_psl_err_irq(adapter); - cxl_unmap_adapter_regs(adapter); - - pci_disable_device(pdev); -} - -static void cxl_stop_trace_psl9(struct cxl *adapter) -{ - int traceid; - u64 trace_state, trace_mask; - struct pci_dev *dev = to_pci_dev(adapter->dev.parent); - - /* read each tracearray state and issue mmio to stop them is needed */ - for (traceid = 0; traceid <= CXL_PSL9_TRACEID_MAX; ++traceid) { - trace_state = cxl_p1_read(adapter, CXL_PSL9_CTCCFG); - trace_mask = (0x3ULL << (62 - traceid * 2)); - trace_state = (trace_state & trace_mask) >> (62 - traceid * 2); - dev_dbg(&dev->dev, "cxl: Traceid-%d trace_state=0x%0llX\n", - traceid, trace_state); - - /* issue mmio if the trace array isn't in FIN state */ - if (trace_state != CXL_PSL9_TRACESTATE_FIN) - cxl_p1_write(adapter, CXL_PSL9_TRACECFG, - 0x8400000000000000ULL | traceid); - } -} - -static void cxl_stop_trace_psl8(struct cxl *adapter) -{ - int slice; - - /* Stop the trace */ - cxl_p1_write(adapter, CXL_PSL_TRACE, 0x8000000000000017LL); - - /* Stop the slice traces */ - spin_lock(&adapter->afu_list_lock); - for (slice = 0; slice < adapter->slices; slice++) { - if (adapter->afu[slice]) - cxl_p1n_write(adapter->afu[slice], CXL_PSL_SLICE_TRACE, - 0x8000000000000000LL); - } - spin_unlock(&adapter->afu_list_lock); -} - -static const struct cxl_service_layer_ops psl9_ops = { - .adapter_regs_init = init_implementation_adapter_regs_psl9, - .invalidate_all = cxl_invalidate_all_psl9, - .afu_regs_init = init_implementation_afu_regs_psl9, - .sanitise_afu_regs = sanitise_afu_regs_psl9, - .register_serr_irq = cxl_native_register_serr_irq, - .release_serr_irq = cxl_native_release_serr_irq, - .handle_interrupt = cxl_irq_psl9, - .fail_irq = cxl_fail_irq_psl, - .activate_dedicated_process = cxl_activate_dedicated_process_psl9, - .attach_afu_directed = cxl_attach_afu_directed_psl9, - .attach_dedicated_process = cxl_attach_dedicated_process_psl9, - .update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl9, - .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl9, - .debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl9, - .psl_irq_dump_registers = cxl_native_irq_dump_regs_psl9, - .err_irq_dump_registers = cxl_native_err_irq_dump_regs_psl9, - .debugfs_stop_trace = cxl_stop_trace_psl9, - .timebase_read = timebase_read_psl9, - .capi_mode = OPAL_PHB_CAPI_MODE_CAPI, - .needs_reset_before_disable = true, -}; - -static const struct cxl_service_layer_ops psl8_ops = { - .adapter_regs_init = init_implementation_adapter_regs_psl8, - .invalidate_all = cxl_invalidate_all_psl8, - .afu_regs_init = init_implementation_afu_regs_psl8, - .sanitise_afu_regs = sanitise_afu_regs_psl8, - .register_serr_irq = cxl_native_register_serr_irq, - .release_serr_irq = cxl_native_release_serr_irq, - .handle_interrupt = cxl_irq_psl8, - .fail_irq = cxl_fail_irq_psl, - .activate_dedicated_process = cxl_activate_dedicated_process_psl8, - .attach_afu_directed = cxl_attach_afu_directed_psl8, - .attach_dedicated_process = cxl_attach_dedicated_process_psl8, - .update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl8, - .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl8, - .debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl8, - .psl_irq_dump_registers = cxl_native_irq_dump_regs_psl8, - .err_irq_dump_registers = cxl_native_err_irq_dump_regs_psl8, - .debugfs_stop_trace = cxl_stop_trace_psl8, - .write_timebase_ctrl = write_timebase_ctrl_psl8, - .timebase_read = timebase_read_psl8, - .capi_mode = OPAL_PHB_CAPI_MODE_CAPI, - .needs_reset_before_disable = true, -}; - -static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev) -{ - if (cxl_is_power8()) { - dev_info(&dev->dev, "Device uses a PSL8\n"); - adapter->native->sl_ops = &psl8_ops; - } else { - dev_info(&dev->dev, "Device uses a PSL9\n"); - adapter->native->sl_ops = &psl9_ops; - } -} - - -static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev) -{ - struct cxl *adapter; - int rc; - - adapter = cxl_alloc_adapter(); - if (!adapter) - return ERR_PTR(-ENOMEM); - - adapter->native = kzalloc(sizeof(struct cxl_native), GFP_KERNEL); - if (!adapter->native) { - rc = -ENOMEM; - goto err_release; - } - - set_sl_ops(adapter, dev); - - /* Set defaults for parameters which need to persist over - * configure/reconfigure - */ - adapter->perst_loads_image = true; - adapter->perst_same_image = false; - - rc = cxl_configure_adapter(adapter, dev); - if (rc) { - pci_disable_device(dev); - goto err_release; - } - - /* Don't care if this one fails: */ - cxl_debugfs_adapter_add(adapter); - - /* - * After we call this function we must not free the adapter directly, - * even if it returns an error! - */ - if ((rc = cxl_register_adapter(adapter))) - goto err_put_dev; - - if ((rc = cxl_sysfs_adapter_add(adapter))) - goto err_del_dev; - - /* Release the context lock as adapter is configured */ - cxl_adapter_context_unlock(adapter); - - return adapter; - -err_del_dev: - device_del(&adapter->dev); -err_put_dev: - /* This should mirror cxl_remove_adapter, except without the - * sysfs parts - */ - cxl_debugfs_adapter_remove(adapter); - cxl_deconfigure_adapter(adapter); - put_device(&adapter->dev); - return ERR_PTR(rc); - -err_release: - cxl_release_adapter(&adapter->dev); - return ERR_PTR(rc); -} - -static void cxl_pci_remove_adapter(struct cxl *adapter) -{ - pr_devel("cxl_remove_adapter\n"); - - cxl_sysfs_adapter_remove(adapter); - cxl_debugfs_adapter_remove(adapter); - - /* - * Flush adapter datacache as its about to be removed. - */ - cxl_data_cache_flush(adapter); - - cxl_deconfigure_adapter(adapter); - - device_unregister(&adapter->dev); -} - -#define CXL_MAX_PCIEX_PARENT 2 - -int cxl_slot_is_switched(struct pci_dev *dev) -{ - struct device_node *np; - int depth = 0; - - if (!(np = pci_device_to_OF_node(dev))) { - pr_err("cxl: np = NULL\n"); - return -ENODEV; - } - of_node_get(np); - while (np) { - np = of_get_next_parent(np); - if (!of_node_is_type(np, "pciex")) - break; - depth++; - } - of_node_put(np); - return (depth > CXL_MAX_PCIEX_PARENT); -} - -static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) -{ - struct cxl *adapter; - int slice; - int rc; - - if (cxl_pci_is_vphb_device(dev)) { - dev_dbg(&dev->dev, "cxl_init_adapter: Ignoring cxl vphb device\n"); - return -ENODEV; - } - - if (cxl_slot_is_switched(dev)) { - dev_info(&dev->dev, "Ignoring card on incompatible PCI slot\n"); - return -ENODEV; - } - - if (cxl_is_power9() && !radix_enabled()) { - dev_info(&dev->dev, "Only Radix mode supported\n"); - return -ENODEV; - } - - if (cxl_verbose) - dump_cxl_config_space(dev); - - adapter = cxl_pci_init_adapter(dev); - if (IS_ERR(adapter)) { - dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter)); - return PTR_ERR(adapter); - } - - for (slice = 0; slice < adapter->slices; slice++) { - if ((rc = pci_init_afu(adapter, slice, dev))) { - dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc); - continue; - } - - rc = cxl_afu_select_best_mode(adapter->afu[slice]); - if (rc) - dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc); - } - - return 0; -} - -static void cxl_remove(struct pci_dev *dev) -{ - struct cxl *adapter = pci_get_drvdata(dev); - struct cxl_afu *afu; - int i; - - /* - * Lock to prevent someone grabbing a ref through the adapter list as - * we are removing it - */ - for (i = 0; i < adapter->slices; i++) { - afu = adapter->afu[i]; - cxl_pci_remove_afu(afu); - } - cxl_pci_remove_adapter(adapter); -} - -static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu, - pci_channel_state_t state) -{ - struct pci_dev *afu_dev; - struct pci_driver *afu_drv; - const struct pci_error_handlers *err_handler; - pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET; - pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET; - - /* There should only be one entry, but go through the list - * anyway - */ - if (afu == NULL || afu->phb == NULL) - return result; - - list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { - afu_drv = to_pci_driver(afu_dev->dev.driver); - if (!afu_drv) - continue; - - afu_dev->error_state = state; - - err_handler = afu_drv->err_handler; - if (err_handler) - afu_result = err_handler->error_detected(afu_dev, - state); - /* Disconnect trumps all, NONE trumps NEED_RESET */ - if (afu_result == PCI_ERS_RESULT_DISCONNECT) - result = PCI_ERS_RESULT_DISCONNECT; - else if ((afu_result == PCI_ERS_RESULT_NONE) && - (result == PCI_ERS_RESULT_NEED_RESET)) - result = PCI_ERS_RESULT_NONE; - } - return result; -} - -static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) -{ - struct cxl *adapter = pci_get_drvdata(pdev); - struct cxl_afu *afu; - pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET; - pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET; - int i; - - /* At this point, we could still have an interrupt pending. - * Let's try to get them out of the way before they do - * anything we don't like. - */ - schedule(); - - /* If we're permanently dead, give up. */ - if (state == pci_channel_io_perm_failure) { - spin_lock(&adapter->afu_list_lock); - for (i = 0; i < adapter->slices; i++) { - afu = adapter->afu[i]; - /* - * Tell the AFU drivers; but we don't care what they - * say, we're going away. - */ - cxl_vphb_error_detected(afu, state); - } - spin_unlock(&adapter->afu_list_lock); - return PCI_ERS_RESULT_DISCONNECT; - } - - /* Are we reflashing? - * - * If we reflash, we could come back as something entirely - * different, including a non-CAPI card. As such, by default - * we don't participate in the process. We'll be unbound and - * the slot re-probed. (TODO: check EEH doesn't blindly rebind - * us!) - * - * However, this isn't the entire story: for reliablity - * reasons, we usually want to reflash the FPGA on PERST in - * order to get back to a more reliable known-good state. - * - * This causes us a bit of a problem: if we reflash we can't - * trust that we'll come back the same - we could have a new - * image and been PERSTed in order to load that - * image. However, most of the time we actually *will* come - * back the same - for example a regular EEH event. - * - * Therefore, we allow the user to assert that the image is - * indeed the same and that we should continue on into EEH - * anyway. - */ - if (adapter->perst_loads_image && !adapter->perst_same_image) { - /* TODO take the PHB out of CXL mode */ - dev_info(&pdev->dev, "reflashing, so opting out of EEH!\n"); - return PCI_ERS_RESULT_NONE; - } - - /* - * At this point, we want to try to recover. We'll always - * need a complete slot reset: we don't trust any other reset. - * - * Now, we go through each AFU: - * - We send the driver, if bound, an error_detected callback. - * We expect it to clean up, but it can also tell us to give - * up and permanently detach the card. To simplify things, if - * any bound AFU driver doesn't support EEH, we give up on EEH. - * - * - We detach all contexts associated with the AFU. This - * does not free them, but puts them into a CLOSED state - * which causes any the associated files to return useful - * errors to userland. It also unmaps, but does not free, - * any IRQs. - * - * - We clean up our side: releasing and unmapping resources we hold - * so we can wire them up again when the hardware comes back up. - * - * Driver authors should note: - * - * - Any contexts you create in your kernel driver (except - * those associated with anonymous file descriptors) are - * your responsibility to free and recreate. Likewise with - * any attached resources. - * - * - We will take responsibility for re-initialising the - * device context (the one set up for you in - * cxl_pci_enable_device_hook and accessed through - * cxl_get_context). If you've attached IRQs or other - * resources to it, they remains yours to free. - * - * You can call the same functions to release resources as you - * normally would: we make sure that these functions continue - * to work when the hardware is down. - * - * Two examples: - * - * 1) If you normally free all your resources at the end of - * each request, or if you use anonymous FDs, your - * error_detected callback can simply set a flag to tell - * your driver not to start any new calls. You can then - * clear the flag in the resume callback. - * - * 2) If you normally allocate your resources on startup: - * * Set a flag in error_detected as above. - * * Let CXL detach your contexts. - * * In slot_reset, free the old resources and allocate new ones. - * * In resume, clear the flag to allow things to start. - */ - - /* Make sure no one else changes the afu list */ - spin_lock(&adapter->afu_list_lock); - - for (i = 0; i < adapter->slices; i++) { - afu = adapter->afu[i]; - - if (afu == NULL) - continue; - - afu_result = cxl_vphb_error_detected(afu, state); - cxl_context_detach_all(afu); - cxl_ops->afu_deactivate_mode(afu, afu->current_mode); - pci_deconfigure_afu(afu); - - /* Disconnect trumps all, NONE trumps NEED_RESET */ - if (afu_result == PCI_ERS_RESULT_DISCONNECT) - result = PCI_ERS_RESULT_DISCONNECT; - else if ((afu_result == PCI_ERS_RESULT_NONE) && - (result == PCI_ERS_RESULT_NEED_RESET)) - result = PCI_ERS_RESULT_NONE; - } - spin_unlock(&adapter->afu_list_lock); - - /* should take the context lock here */ - if (cxl_adapter_context_lock(adapter) != 0) - dev_warn(&adapter->dev, - "Couldn't take context lock with %d active-contexts\n", - atomic_read(&adapter->contexts_num)); - - cxl_deconfigure_adapter(adapter); - - return result; -} - -static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev) -{ - struct cxl *adapter = pci_get_drvdata(pdev); - struct cxl_afu *afu; - struct cxl_context *ctx; - struct pci_dev *afu_dev; - struct pci_driver *afu_drv; - const struct pci_error_handlers *err_handler; - pci_ers_result_t afu_result = PCI_ERS_RESULT_RECOVERED; - pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED; - int i; - - if (cxl_configure_adapter(adapter, pdev)) - goto err; - - /* - * Unlock context activation for the adapter. Ideally this should be - * done in cxl_pci_resume but cxlflash module tries to activate the - * master context as part of slot_reset callback. - */ - cxl_adapter_context_unlock(adapter); - - spin_lock(&adapter->afu_list_lock); - for (i = 0; i < adapter->slices; i++) { - afu = adapter->afu[i]; - - if (afu == NULL) - continue; - - if (pci_configure_afu(afu, adapter, pdev)) - goto err_unlock; - - if (cxl_afu_select_best_mode(afu)) - goto err_unlock; - - if (afu->phb == NULL) - continue; - - list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { - /* Reset the device context. - * TODO: make this less disruptive - */ - ctx = cxl_get_context(afu_dev); - - if (ctx && cxl_release_context(ctx)) - goto err_unlock; - - ctx = cxl_dev_context_init(afu_dev); - if (IS_ERR(ctx)) - goto err_unlock; - - afu_dev->dev.archdata.cxl_ctx = ctx; - - if (cxl_ops->afu_check_and_enable(afu)) - goto err_unlock; - - afu_dev->error_state = pci_channel_io_normal; - - /* If there's a driver attached, allow it to - * chime in on recovery. Drivers should check - * if everything has come back OK, but - * shouldn't start new work until we call - * their resume function. - */ - afu_drv = to_pci_driver(afu_dev->dev.driver); - if (!afu_drv) - continue; - - err_handler = afu_drv->err_handler; - if (err_handler && err_handler->slot_reset) - afu_result = err_handler->slot_reset(afu_dev); - - if (afu_result == PCI_ERS_RESULT_DISCONNECT) - result = PCI_ERS_RESULT_DISCONNECT; - } - } - - spin_unlock(&adapter->afu_list_lock); - return result; - -err_unlock: - spin_unlock(&adapter->afu_list_lock); - -err: - /* All the bits that happen in both error_detected and cxl_remove - * should be idempotent, so we don't need to worry about leaving a mix - * of unconfigured and reconfigured resources. - */ - dev_err(&pdev->dev, "EEH recovery failed. Asking to be disconnected.\n"); - return PCI_ERS_RESULT_DISCONNECT; -} - -static void cxl_pci_resume(struct pci_dev *pdev) -{ - struct cxl *adapter = pci_get_drvdata(pdev); - struct cxl_afu *afu; - struct pci_dev *afu_dev; - struct pci_driver *afu_drv; - const struct pci_error_handlers *err_handler; - int i; - - /* Everything is back now. Drivers should restart work now. - * This is not the place to be checking if everything came back up - * properly, because there's no return value: do that in slot_reset. - */ - spin_lock(&adapter->afu_list_lock); - for (i = 0; i < adapter->slices; i++) { - afu = adapter->afu[i]; - - if (afu == NULL || afu->phb == NULL) - continue; - - list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { - afu_drv = to_pci_driver(afu_dev->dev.driver); - if (!afu_drv) - continue; - - err_handler = afu_drv->err_handler; - if (err_handler && err_handler->resume) - err_handler->resume(afu_dev); - } - } - spin_unlock(&adapter->afu_list_lock); -} - -static const struct pci_error_handlers cxl_err_handler = { - .error_detected = cxl_pci_error_detected, - .slot_reset = cxl_pci_slot_reset, - .resume = cxl_pci_resume, -}; - -struct pci_driver cxl_pci_driver = { - .name = "cxl-pci", - .id_table = cxl_pci_tbl, - .probe = cxl_probe, - .remove = cxl_remove, - .shutdown = cxl_remove, - .err_handler = &cxl_err_handler, -}; diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c deleted file mode 100644 index 315c43f17dd3..000000000000 --- a/drivers/misc/cxl/sysfs.c +++ /dev/null @@ -1,771 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2014 IBM Corp. - */ - -#include <linux/kernel.h> -#include <linux/device.h> -#include <linux/sysfs.h> -#include <linux/pci_regs.h> - -#include "cxl.h" - -#define to_afu_chardev_m(d) dev_get_drvdata(d) - -/********* Adapter attributes **********************************************/ - -static ssize_t caia_version_show(struct device *device, - struct device_attribute *attr, - char *buf) -{ - struct cxl *adapter = to_cxl_adapter(device); - - return scnprintf(buf, PAGE_SIZE, "%i.%i\n", adapter->caia_major, - adapter->caia_minor); -} - -static ssize_t psl_revision_show(struct device *device, - struct device_attribute *attr, - char *buf) -{ - struct cxl *adapter = to_cxl_adapter(device); - - return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_rev); -} - -static ssize_t base_image_show(struct device *device, - struct device_attribute *attr, - char *buf) -{ - struct cxl *adapter = to_cxl_adapter(device); - - return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->base_image); -} - -static ssize_t image_loaded_show(struct device *device, - struct device_attribute *attr, - char *buf) -{ - struct cxl *adapter = to_cxl_adapter(device); - - if (adapter->user_image_loaded) - return scnprintf(buf, PAGE_SIZE, "user\n"); - return scnprintf(buf, PAGE_SIZE, "factory\n"); -} - -static ssize_t psl_timebase_synced_show(struct device *device, - struct device_attribute *attr, - char *buf) -{ - struct cxl *adapter = to_cxl_adapter(device); - u64 psl_tb, delta; - - /* Recompute the status only in native mode */ - if (cpu_has_feature(CPU_FTR_HVMODE)) { - psl_tb = adapter->native->sl_ops->timebase_read(adapter); - delta = abs(mftb() - psl_tb); - - /* CORE TB and PSL TB difference <= 16usecs ? */ - adapter->psl_timebase_synced = (tb_to_ns(delta) < 16000) ? true : false; - pr_devel("PSL timebase %s - delta: 0x%016llx\n", - (tb_to_ns(delta) < 16000) ? "synchronized" : - "not synchronized", tb_to_ns(delta)); - } - return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced); -} - -static ssize_t tunneled_ops_supported_show(struct device *device, - struct device_attribute *attr, - char *buf) -{ - struct cxl *adapter = to_cxl_adapter(device); - - return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->tunneled_ops_supported); -} - -static ssize_t reset_adapter_store(struct device *device, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct cxl *adapter = to_cxl_adapter(device); - int rc; - int val; - - rc = sscanf(buf, "%i", &val); - if ((rc != 1) || (val != 1 && val != -1)) - return -EINVAL; - - /* - * See if we can lock the context mapping that's only allowed - * when there are no contexts attached to the adapter. Once - * taken this will also prevent any context from getting activated. - */ - if (val == 1) { - rc = cxl_adapter_context_lock(adapter); - if (rc) - goto out; - - rc = cxl_ops->adapter_reset(adapter); - /* In case reset failed release context lock */ - if (rc) - cxl_adapter_context_unlock(adapter); - - } else if (val == -1) { - /* Perform a forced adapter reset */ - rc = cxl_ops->adapter_reset(adapter); - } - -out: - return rc ? rc : count; -} - -static ssize_t load_image_on_perst_show(struct device *device, - struct device_attribute *attr, - char *buf) -{ - struct cxl *adapter = to_cxl_adapter(device); - - if (!adapter->perst_loads_image) - return scnprintf(buf, PAGE_SIZE, "none\n"); - - if (adapter->perst_select_user) - return scnprintf(buf, PAGE_SIZE, "user\n"); - return scnprintf(buf, PAGE_SIZE, "factory\n"); -} - -static ssize_t load_image_on_perst_store(struct device *device, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct cxl *adapter = to_cxl_adapter(device); - int rc; - - if (!strncmp(buf, "none", 4)) - adapter->perst_loads_image = false; - else if (!strncmp(buf, "user", 4)) { - adapter->perst_select_user = true; - adapter->perst_loads_image = true; - } else if (!strncmp(buf, "factory", 7)) { - adapter->perst_select_user = false; - adapter->perst_loads_image = true; - } else - return -EINVAL; - - if ((rc = cxl_update_image_control(adapter))) - return rc; - - return count; -} - -static ssize_t perst_reloads_same_image_show(struct device *device, - struct device_attribute *attr, - char *buf) -{ - struct cxl *adapter = to_cxl_adapter(device); - - return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->perst_same_image); -} - -static ssize_t perst_reloads_same_image_store(struct device *device, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct cxl *adapter = to_cxl_adapter(device); - int rc; - int val; - - rc = sscanf(buf, "%i", &val); - if ((rc != 1) || !(val == 1 || val == 0)) - return -EINVAL; - - adapter->perst_same_image = (val == 1); - return count; -} - -static struct device_attribute adapter_attrs[] = { - __ATTR_RO(caia_version), - __ATTR_RO(psl_revision), - __ATTR_RO(base_image), - __ATTR_RO(image_loaded), - __ATTR_RO(psl_timebase_synced), - __ATTR_RO(tunneled_ops_supported), - __ATTR_RW(load_image_on_perst), - __ATTR_RW(perst_reloads_same_image), - __ATTR(reset, S_IWUSR, NULL, reset_adapter_store), -}; - - -/********* AFU master specific attributes **********************************/ - -static ssize_t mmio_size_show_master(struct device *device, - struct device_attribute *attr, - char *buf) -{ - struct cxl_afu *afu = to_afu_chardev_m(device); - - return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size); -} - -static ssize_t pp_mmio_off_show(struct device *device, - struct device_attribute *attr, - char *buf) -{ - struct cxl_afu *afu = to_afu_chardev_m(device); - - return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->native->pp_offset); -} - -static ssize_t pp_mmio_len_show(struct device *device, - struct device_attribute *attr, - char *buf) -{ - struct cxl_afu *afu = to_afu_chardev_m(device); - - return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size); -} - -static struct device_attribute afu_master_attrs[] = { - __ATTR(mmio_size, S_IRUGO, mmio_size_show_master, NULL), - __ATTR_RO(pp_mmio_off), - __ATTR_RO(pp_mmio_len), -}; - - -/********* AFU attributes **************************************************/ - -static ssize_t mmio_size_show(struct device *device, - struct device_attribute *attr, - char *buf) -{ - struct cxl_afu *afu = to_cxl_afu(device); - - if (afu->pp_size) - return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size); - return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size); -} - -static ssize_t reset_store_afu(struct device *device, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct cxl_afu *afu = to_cxl_afu(device); - int rc; - - /* Not safe to reset if it is currently in use */ - mutex_lock(&afu->contexts_lock); - if (!idr_is_empty(&afu->contexts_idr)) { - rc = -EBUSY; - goto err; - } - - if ((rc = cxl_ops->afu_reset(afu))) - goto err; - - rc = count; -err: - mutex_unlock(&afu->contexts_lock); - return rc; -} - -static ssize_t irqs_min_show(struct device *device, - struct device_attribute *attr, - char *buf) -{ - struct cxl_afu *afu = to_cxl_afu(device); - - return scnprintf(buf, PAGE_SIZE, "%i\n", afu->pp_irqs); -} - -static ssize_t irqs_max_show(struct device *device, - struct device_attribute *attr, - char *buf) -{ - struct cxl_afu *afu = to_cxl_afu(device); - - return scnprintf(buf, PAGE_SIZE, "%i\n", afu->irqs_max); -} - -static ssize_t irqs_max_store(struct device *device, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct cxl_afu *afu = to_cxl_afu(device); - ssize_t ret; - int irqs_max; - - ret = sscanf(buf, "%i", &irqs_max); - if (ret != 1) - return -EINVAL; - - if (irqs_max < afu->pp_irqs) - return -EINVAL; - - if (cpu_has_feature(CPU_FTR_HVMODE)) { - if (irqs_max > afu->adapter->user_irqs) - return -EINVAL; - } else { - /* pHyp sets a per-AFU limit */ - if (irqs_max > afu->guest->max_ints) - return -EINVAL; - } - - afu->irqs_max = irqs_max; - return count; -} - -static ssize_t modes_supported_show(struct device *device, - struct device_attribute *attr, char *buf) -{ - struct cxl_afu *afu = to_cxl_afu(device); - char *p = buf, *end = buf + PAGE_SIZE; - - if (afu->modes_supported & CXL_MODE_DEDICATED) - p += scnprintf(p, end - p, "dedicated_process\n"); - if (afu->modes_supported & CXL_MODE_DIRECTED) - p += scnprintf(p, end - p, "afu_directed\n"); - return (p - buf); -} - -static ssize_t prefault_mode_show(struct device *device, - struct device_attribute *attr, - char *buf) -{ - struct cxl_afu *afu = to_cxl_afu(device); - - switch (afu->prefault_mode) { - case CXL_PREFAULT_WED: - return scnprintf(buf, PAGE_SIZE, "work_element_descriptor\n"); - case CXL_PREFAULT_ALL: - return scnprintf(buf, PAGE_SIZE, "all\n"); - default: - return scnprintf(buf, PAGE_SIZE, "none\n"); - } -} - -static ssize_t prefault_mode_store(struct device *device, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct cxl_afu *afu = to_cxl_afu(device); - enum prefault_modes mode = -1; - - if (!strncmp(buf, "none", 4)) - mode = CXL_PREFAULT_NONE; - else { - if (!radix_enabled()) { - - /* only allowed when not in radix mode */ - if (!strncmp(buf, "work_element_descriptor", 23)) - mode = CXL_PREFAULT_WED; - if (!strncmp(buf, "all", 3)) - mode = CXL_PREFAULT_ALL; - } else { - dev_err(device, "Cannot prefault with radix enabled\n"); - } - } - - if (mode == -1) - return -EINVAL; - - afu->prefault_mode = mode; - return count; -} - -static ssize_t mode_show(struct device *device, - struct device_attribute *attr, - char *buf) -{ - struct cxl_afu *afu = to_cxl_afu(device); - - if (afu->current_mode == CXL_MODE_DEDICATED) - return scnprintf(buf, PAGE_SIZE, "dedicated_process\n"); - if (afu->current_mode == CXL_MODE_DIRECTED) - return scnprintf(buf, PAGE_SIZE, "afu_directed\n"); - return scnprintf(buf, PAGE_SIZE, "none\n"); -} - -static ssize_t mode_store(struct device *device, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct cxl_afu *afu = to_cxl_afu(device); - int old_mode, mode = -1; - int rc = -EBUSY; - - /* can't change this if we have a user */ - mutex_lock(&afu->contexts_lock); - if (!idr_is_empty(&afu->contexts_idr)) - goto err; - - if (!strncmp(buf, "dedicated_process", 17)) - mode = CXL_MODE_DEDICATED; - if (!strncmp(buf, "afu_directed", 12)) - mode = CXL_MODE_DIRECTED; - if (!strncmp(buf, "none", 4)) - mode = 0; - - if (mode == -1) { - rc = -EINVAL; - goto err; - } - - /* - * afu_deactivate_mode needs to be done outside the lock, prevent - * other contexts coming in before we are ready: - */ - old_mode = afu->current_mode; - afu->current_mode = 0; - afu->num_procs = 0; - - mutex_unlock(&afu->contexts_lock); - - if ((rc = cxl_ops->afu_deactivate_mode(afu, old_mode))) - return rc; - if ((rc = cxl_ops->afu_activate_mode(afu, mode))) - return rc; - - return count; -err: - mutex_unlock(&afu->contexts_lock); - return rc; -} - -static ssize_t api_version_show(struct device *device, - struct device_attribute *attr, - char *buf) -{ - return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION); -} - -static ssize_t api_version_compatible_show(struct device *device, - struct device_attribute *attr, - char *buf) -{ - return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION_COMPATIBLE); -} - -static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, char *buf, - loff_t off, size_t count) -{ - struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj)); - - return cxl_ops->afu_read_err_buffer(afu, buf, off, count); -} - -static struct device_attribute afu_attrs[] = { - __ATTR_RO(mmio_size), - __ATTR_RO(irqs_min), - __ATTR_RW(irqs_max), - __ATTR_RO(modes_supported), - __ATTR_RW(mode), - __ATTR_RW(prefault_mode), - __ATTR_RO(api_version), - __ATTR_RO(api_version_compatible), - __ATTR(reset, S_IWUSR, NULL, reset_store_afu), -}; - -int cxl_sysfs_adapter_add(struct cxl *adapter) -{ - struct device_attribute *dev_attr; - int i, rc; - - for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) { - dev_attr = &adapter_attrs[i]; - if (cxl_ops->support_attributes(dev_attr->attr.name, - CXL_ADAPTER_ATTRS)) { - if ((rc = device_create_file(&adapter->dev, dev_attr))) - goto err; - } - } - return 0; -err: - for (i--; i >= 0; i--) { - dev_attr = &adapter_attrs[i]; - if (cxl_ops->support_attributes(dev_attr->attr.name, - CXL_ADAPTER_ATTRS)) - device_remove_file(&adapter->dev, dev_attr); - } - return rc; -} - -void cxl_sysfs_adapter_remove(struct cxl *adapter) -{ - struct device_attribute *dev_attr; - int i; - - for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) { - dev_attr = &adapter_attrs[i]; - if (cxl_ops->support_attributes(dev_attr->attr.name, - CXL_ADAPTER_ATTRS)) - device_remove_file(&adapter->dev, dev_attr); - } -} - -struct afu_config_record { - struct kobject kobj; - struct bin_attribute config_attr; - struct list_head list; - int cr; - u16 device; - u16 vendor; - u32 class; -}; - -#define to_cr(obj) container_of(obj, struct afu_config_record, kobj) - -static ssize_t vendor_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) -{ - struct afu_config_record *cr = to_cr(kobj); - - return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->vendor); -} - -static ssize_t device_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) -{ - struct afu_config_record *cr = to_cr(kobj); - - return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->device); -} - -static ssize_t class_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) -{ - struct afu_config_record *cr = to_cr(kobj); - - return scnprintf(buf, PAGE_SIZE, "0x%.6x\n", cr->class); -} - -static ssize_t afu_read_config(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, char *buf, - loff_t off, size_t count) -{ - struct afu_config_record *cr = to_cr(kobj); - struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent)); - - u64 i, j, val, rc; - - for (i = 0; i < count;) { - rc = cxl_ops->afu_cr_read64(afu, cr->cr, off & ~0x7, &val); - if (rc) - val = ~0ULL; - for (j = off & 0x7; j < 8 && i < count; i++, j++, off++) - buf[i] = (val >> (j * 8)) & 0xff; - } - - return count; -} - -static struct kobj_attribute vendor_attribute = - __ATTR_RO(vendor); -static struct kobj_attribute device_attribute = - __ATTR_RO(device); -static struct kobj_attribute class_attribute = - __ATTR_RO(class); - -static struct attribute *afu_cr_attrs[] = { - &vendor_attribute.attr, - &device_attribute.attr, - &class_attribute.attr, - NULL, -}; -ATTRIBUTE_GROUPS(afu_cr); - -static void release_afu_config_record(struct kobject *kobj) -{ - struct afu_config_record *cr = to_cr(kobj); - - kfree(cr); -} - -static struct kobj_type afu_config_record_type = { - .sysfs_ops = &kobj_sysfs_ops, - .release = release_afu_config_record, - .default_groups = afu_cr_groups, -}; - -static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int cr_idx) -{ - struct afu_config_record *cr; - int rc; - - cr = kzalloc(sizeof(struct afu_config_record), GFP_KERNEL); - if (!cr) - return ERR_PTR(-ENOMEM); - - cr->cr = cr_idx; - - rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID, &cr->device); - if (rc) - goto err; - rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID, &cr->vendor); - if (rc) - goto err; - rc = cxl_ops->afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION, &cr->class); - if (rc) - goto err; - cr->class >>= 8; - - /* - * Export raw AFU PCIe like config record. For now this is read only by - * root - we can expand that later to be readable by non-root and maybe - * even writable provided we have a good use-case. Once we support - * exposing AFUs through a virtual PHB they will get that for free from - * Linux' PCI infrastructure, but until then it's not clear that we - * need it for anything since the main use case is just identifying - * AFUs, which can be done via the vendor, device and class attributes. - */ - sysfs_bin_attr_init(&cr->config_attr); - cr->config_attr.attr.name = "config"; - cr->config_attr.attr.mode = S_IRUSR; - cr->config_attr.size = afu->crs_len; - cr->config_attr.read = afu_read_config; - - rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type, - &afu->dev.kobj, "cr%i", cr->cr); - if (rc) - goto err1; - - rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr); - if (rc) - goto err1; - - rc = kobject_uevent(&cr->kobj, KOBJ_ADD); - if (rc) - goto err2; - - return cr; -err2: - sysfs_remove_bin_file(&cr->kobj, &cr->config_attr); -err1: - kobject_put(&cr->kobj); - return ERR_PTR(rc); -err: - kfree(cr); - return ERR_PTR(rc); -} - -void cxl_sysfs_afu_remove(struct cxl_afu *afu) -{ - struct device_attribute *dev_attr; - struct afu_config_record *cr, *tmp; - int i; - - /* remove the err buffer bin attribute */ - if (afu->eb_len) - device_remove_bin_file(&afu->dev, &afu->attr_eb); - - for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) { - dev_attr = &afu_attrs[i]; - if (cxl_ops->support_attributes(dev_attr->attr.name, - CXL_AFU_ATTRS)) - device_remove_file(&afu->dev, &afu_attrs[i]); - } - - list_for_each_entry_safe(cr, tmp, &afu->crs, list) { - sysfs_remove_bin_file(&cr->kobj, &cr->config_attr); - kobject_put(&cr->kobj); - } -} - -int cxl_sysfs_afu_add(struct cxl_afu *afu) -{ - struct device_attribute *dev_attr; - struct afu_config_record *cr; - int i, rc; - - INIT_LIST_HEAD(&afu->crs); - - for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) { - dev_attr = &afu_attrs[i]; - if (cxl_ops->support_attributes(dev_attr->attr.name, - CXL_AFU_ATTRS)) { - if ((rc = device_create_file(&afu->dev, &afu_attrs[i]))) - goto err; - } - } - - /* conditionally create the add the binary file for error info buffer */ - if (afu->eb_len) { - sysfs_attr_init(&afu->attr_eb.attr); - - afu->attr_eb.attr.name = "afu_err_buff"; - afu->attr_eb.attr.mode = S_IRUGO; - afu->attr_eb.size = afu->eb_len; - afu->attr_eb.read = afu_eb_read; - - rc = device_create_bin_file(&afu->dev, &afu->attr_eb); - if (rc) { - dev_err(&afu->dev, - "Unable to create eb attr for the afu. Err(%d)\n", - rc); - goto err; - } - } - - for (i = 0; i < afu->crs_num; i++) { - cr = cxl_sysfs_afu_new_cr(afu, i); - if (IS_ERR(cr)) { - rc = PTR_ERR(cr); - goto err1; - } - list_add(&cr->list, &afu->crs); - } - - return 0; - -err1: - cxl_sysfs_afu_remove(afu); - return rc; -err: - /* reset the eb_len as we havent created the bin attr */ - afu->eb_len = 0; - - for (i--; i >= 0; i--) { - dev_attr = &afu_attrs[i]; - if (cxl_ops->support_attributes(dev_attr->attr.name, - CXL_AFU_ATTRS)) - device_remove_file(&afu->dev, &afu_attrs[i]); - } - return rc; -} - -int cxl_sysfs_afu_m_add(struct cxl_afu *afu) -{ - struct device_attribute *dev_attr; - int i, rc; - - for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) { - dev_attr = &afu_master_attrs[i]; - if (cxl_ops->support_attributes(dev_attr->attr.name, - CXL_AFU_MASTER_ATTRS)) { - if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i]))) - goto err; - } - } - - return 0; - -err: - for (i--; i >= 0; i--) { - dev_attr = &afu_master_attrs[i]; - if (cxl_ops->support_attributes(dev_attr->attr.name, - CXL_AFU_MASTER_ATTRS)) - device_remove_file(afu->chardev_m, &afu_master_attrs[i]); - } - return rc; -} - -void cxl_sysfs_afu_m_remove(struct cxl_afu *afu) -{ - struct device_attribute *dev_attr; - int i; - - for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) { - dev_attr = &afu_master_attrs[i]; - if (cxl_ops->support_attributes(dev_attr->attr.name, - CXL_AFU_MASTER_ATTRS)) - device_remove_file(afu->chardev_m, &afu_master_attrs[i]); - } -} diff --git a/drivers/misc/cxl/trace.c b/drivers/misc/cxl/trace.c deleted file mode 100644 index 86f654b99efb..000000000000 --- a/drivers/misc/cxl/trace.c +++ /dev/null @@ -1,9 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2015 IBM Corp. - */ - -#ifndef __CHECKER__ -#define CREATE_TRACE_POINTS -#include "trace.h" -#endif diff --git a/drivers/misc/cxl/trace.h b/drivers/misc/cxl/trace.h deleted file mode 100644 index c474157c6857..000000000000 --- a/drivers/misc/cxl/trace.h +++ /dev/null @@ -1,691 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright 2015 IBM Corp. - */ - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM cxl - -#if !defined(_CXL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) -#define _CXL_TRACE_H - -#include <linux/tracepoint.h> - -#include "cxl.h" - -#define dsisr_psl9_flags(flags) \ - __print_flags(flags, "|", \ - { CXL_PSL9_DSISR_An_CO_MASK, "FR" }, \ - { CXL_PSL9_DSISR_An_TF, "TF" }, \ - { CXL_PSL9_DSISR_An_PE, "PE" }, \ - { CXL_PSL9_DSISR_An_AE, "AE" }, \ - { CXL_PSL9_DSISR_An_OC, "OC" }, \ - { CXL_PSL9_DSISR_An_S, "S" }) - -#define DSISR_FLAGS \ - { CXL_PSL_DSISR_An_DS, "DS" }, \ - { CXL_PSL_DSISR_An_DM, "DM" }, \ - { CXL_PSL_DSISR_An_ST, "ST" }, \ - { CXL_PSL_DSISR_An_UR, "UR" }, \ - { CXL_PSL_DSISR_An_PE, "PE" }, \ - { CXL_PSL_DSISR_An_AE, "AE" }, \ - { CXL_PSL_DSISR_An_OC, "OC" }, \ - { CXL_PSL_DSISR_An_M, "M" }, \ - { CXL_PSL_DSISR_An_P, "P" }, \ - { CXL_PSL_DSISR_An_A, "A" }, \ - { CXL_PSL_DSISR_An_S, "S" }, \ - { CXL_PSL_DSISR_An_K, "K" } - -#define TFC_FLAGS \ - { CXL_PSL_TFC_An_A, "A" }, \ - { CXL_PSL_TFC_An_C, "C" }, \ - { CXL_PSL_TFC_An_AE, "AE" }, \ - { CXL_PSL_TFC_An_R, "R" } - -#define LLCMD_NAMES \ - { CXL_SPA_SW_CMD_TERMINATE, "TERMINATE" }, \ - { CXL_SPA_SW_CMD_REMOVE, "REMOVE" }, \ - { CXL_SPA_SW_CMD_SUSPEND, "SUSPEND" }, \ - { CXL_SPA_SW_CMD_RESUME, "RESUME" }, \ - { CXL_SPA_SW_CMD_ADD, "ADD" }, \ - { CXL_SPA_SW_CMD_UPDATE, "UPDATE" } - -#define AFU_COMMANDS \ - { 0, "DISABLE" }, \ - { CXL_AFU_Cntl_An_E, "ENABLE" }, \ - { CXL_AFU_Cntl_An_RA, "RESET" } - -#define PSL_COMMANDS \ - { CXL_PSL_SCNTL_An_Pc, "PURGE" }, \ - { CXL_PSL_SCNTL_An_Sc, "SUSPEND" } - - -DECLARE_EVENT_CLASS(cxl_pe_class, - TP_PROTO(struct cxl_context *ctx), - - TP_ARGS(ctx), - - TP_STRUCT__entry( - __field(u8, card) - __field(u8, afu) - __field(u16, pe) - ), - - TP_fast_assign( - __entry->card = ctx->afu->adapter->adapter_num; - __entry->afu = ctx->afu->slice; - __entry->pe = ctx->pe; - ), - - TP_printk("afu%i.%i pe=%i", - __entry->card, - __entry->afu, - __entry->pe - ) -); - - -TRACE_EVENT(cxl_attach, - TP_PROTO(struct cxl_context *ctx, u64 wed, s16 num_interrupts, u64 amr), - - TP_ARGS(ctx, wed, num_interrupts, amr), - - TP_STRUCT__entry( - __field(u8, card) - __field(u8, afu) - __field(u16, pe) - __field(pid_t, pid) - __field(u64, wed) - __field(u64, amr) - __field(s16, num_interrupts) - ), - - TP_fast_assign( - __entry->card = ctx->afu->adapter->adapter_num; - __entry->afu = ctx->afu->slice; - __entry->pe = ctx->pe; - __entry->pid = pid_nr(ctx->pid); - __entry->wed = wed; - __entry->amr = amr; - __entry->num_interrupts = num_interrupts; - ), - - TP_printk("afu%i.%i pid=%i pe=%i wed=0x%016llx irqs=%i amr=0x%llx", - __entry->card, - __entry->afu, - __entry->pid, - __entry->pe, - __entry->wed, - __entry->num_interrupts, - __entry->amr - ) -); - -DEFINE_EVENT(cxl_pe_class, cxl_detach, - TP_PROTO(struct cxl_context *ctx), - TP_ARGS(ctx) -); - -TRACE_EVENT(cxl_afu_irq, - TP_PROTO(struct cxl_context *ctx, int afu_irq, int virq, irq_hw_number_t hwirq), - - TP_ARGS(ctx, afu_irq, virq, hwirq), - - TP_STRUCT__entry( - __field(u8, card) - __field(u8, afu) - __field(u16, pe) - __field(u16, afu_irq) - __field(int, virq) - __field(irq_hw_number_t, hwirq) - ), - - TP_fast_assign( - __entry->card = ctx->afu->adapter->adapter_num; - __entry->afu = ctx->afu->slice; - __entry->pe = ctx->pe; - __entry->afu_irq = afu_irq; - __entry->virq = virq; - __entry->hwirq = hwirq; - ), - - TP_printk("afu%i.%i pe=%i afu_irq=%i virq=%i hwirq=0x%lx", - __entry->card, - __entry->afu, - __entry->pe, - __entry->afu_irq, - __entry->virq, - __entry->hwirq - ) -); - -TRACE_EVENT(cxl_psl9_irq, - TP_PROTO(struct cxl_context *ctx, int irq, u64 dsisr, u64 dar), - - TP_ARGS(ctx, irq, dsisr, dar), - - TP_STRUCT__entry( - __field(u8, card) - __field(u8, afu) - __field(u16, pe) - __field(int, irq) - __field(u64, dsisr) - __field(u64, dar) - ), - - TP_fast_assign( - __entry->card = ctx->afu->adapter->adapter_num; - __entry->afu = ctx->afu->slice; - __entry->pe = ctx->pe; - __entry->irq = irq; - __entry->dsisr = dsisr; - __entry->dar = dar; - ), - - TP_printk("afu%i.%i pe=%i irq=%i dsisr=0x%016llx dsisr=%s dar=0x%016llx", - __entry->card, - __entry->afu, - __entry->pe, - __entry->irq, - __entry->dsisr, - dsisr_psl9_flags(__entry->dsisr), - __entry->dar - ) -); - -TRACE_EVENT(cxl_psl_irq, - TP_PROTO(struct cxl_context *ctx, int irq, u64 dsisr, u64 dar), - - TP_ARGS(ctx, irq, dsisr, dar), - - TP_STRUCT__entry( - __field(u8, card) - __field(u8, afu) - __field(u16, pe) - __field(int, irq) - __field(u64, dsisr) - __field(u64, dar) - ), - - TP_fast_assign( - __entry->card = ctx->afu->adapter->adapter_num; - __entry->afu = ctx->afu->slice; - __entry->pe = ctx->pe; - __entry->irq = irq; - __entry->dsisr = dsisr; - __entry->dar = dar; - ), - - TP_printk("afu%i.%i pe=%i irq=%i dsisr=%s dar=0x%016llx", - __entry->card, - __entry->afu, - __entry->pe, - __entry->irq, - __print_flags(__entry->dsisr, "|", DSISR_FLAGS), - __entry->dar - ) -); - -TRACE_EVENT(cxl_psl_irq_ack, - TP_PROTO(struct cxl_context *ctx, u64 tfc), - - TP_ARGS(ctx, tfc), - - TP_STRUCT__entry( - __field(u8, card) - __field(u8, afu) - __field(u16, pe) - __field(u64, tfc) - ), - - TP_fast_assign( - __entry->card = ctx->afu->adapter->adapter_num; - __entry->afu = ctx->afu->slice; - __entry->pe = ctx->pe; - __entry->tfc = tfc; - ), - - TP_printk("afu%i.%i pe=%i tfc=%s", - __entry->card, - __entry->afu, - __entry->pe, - __print_flags(__entry->tfc, "|", TFC_FLAGS) - ) -); - -TRACE_EVENT(cxl_ste_miss, - TP_PROTO(struct cxl_context *ctx, u64 dar), - - TP_ARGS(ctx, dar), - - TP_STRUCT__entry( - __field(u8, card) - __field(u8, afu) - __field(u16, pe) - __field(u64, dar) - ), - - TP_fast_assign( - __entry->card = ctx->afu->adapter->adapter_num; - __entry->afu = ctx->afu->slice; - __entry->pe = ctx->pe; - __entry->dar = dar; - ), - - TP_printk("afu%i.%i pe=%i dar=0x%016llx", - __entry->card, - __entry->afu, - __entry->pe, - __entry->dar - ) -); - -TRACE_EVENT(cxl_ste_write, - TP_PROTO(struct cxl_context *ctx, unsigned int idx, u64 e, u64 v), - - TP_ARGS(ctx, idx, e, v), - - TP_STRUCT__entry( - __field(u8, card) - __field(u8, afu) - __field(u16, pe) - __field(unsigned int, idx) - __field(u64, e) - __field(u64, v) - ), - - TP_fast_assign( - __entry->card = ctx->afu->adapter->adapter_num; - __entry->afu = ctx->afu->slice; - __entry->pe = ctx->pe; - __entry->idx = idx; - __entry->e = e; - __entry->v = v; - ), - - TP_printk("afu%i.%i pe=%i SSTE[%i] E=0x%016llx V=0x%016llx", - __entry->card, - __entry->afu, - __entry->pe, - __entry->idx, - __entry->e, - __entry->v - ) -); - -TRACE_EVENT(cxl_pte_miss, - TP_PROTO(struct cxl_context *ctx, u64 dsisr, u64 dar), - - TP_ARGS(ctx, dsisr, dar), - - TP_STRUCT__entry( - __field(u8, card) - __field(u8, afu) - __field(u16, pe) - __field(u64, dsisr) - __field(u64, dar) - ), - - TP_fast_assign( - __entry->card = ctx->afu->adapter->adapter_num; - __entry->afu = ctx->afu->slice; - __entry->pe = ctx->pe; - __entry->dsisr = dsisr; - __entry->dar = dar; - ), - - TP_printk("afu%i.%i pe=%i dsisr=%s dar=0x%016llx", - __entry->card, - __entry->afu, - __entry->pe, - __print_flags(__entry->dsisr, "|", DSISR_FLAGS), - __entry->dar - ) -); - -TRACE_EVENT(cxl_llcmd, - TP_PROTO(struct cxl_context *ctx, u64 cmd), - - TP_ARGS(ctx, cmd), - - TP_STRUCT__entry( - __field(u8, card) - __field(u8, afu) - __field(u16, pe) - __field(u64, cmd) - ), - - TP_fast_assign( - __entry->card = ctx->afu->adapter->adapter_num; - __entry->afu = ctx->afu->slice; - __entry->pe = ctx->pe; - __entry->cmd = cmd; - ), - - TP_printk("afu%i.%i pe=%i cmd=%s", - __entry->card, - __entry->afu, - __entry->pe, - __print_symbolic_u64(__entry->cmd, LLCMD_NAMES) - ) -); - -TRACE_EVENT(cxl_llcmd_done, - TP_PROTO(struct cxl_context *ctx, u64 cmd, int rc), - - TP_ARGS(ctx, cmd, rc), - - TP_STRUCT__entry( - __field(u8, card) - __field(u8, afu) - __field(u16, pe) - __field(u64, cmd) - __field(int, rc) - ), - - TP_fast_assign( - __entry->card = ctx->afu->adapter->adapter_num; - __entry->afu = ctx->afu->slice; - __entry->pe = ctx->pe; - __entry->rc = rc; - __entry->cmd = cmd; - ), - - TP_printk("afu%i.%i pe=%i cmd=%s rc=%i", - __entry->card, - __entry->afu, - __entry->pe, - __print_symbolic_u64(__entry->cmd, LLCMD_NAMES), - __entry->rc - ) -); - -DECLARE_EVENT_CLASS(cxl_afu_psl_ctrl, - TP_PROTO(struct cxl_afu *afu, u64 cmd), - - TP_ARGS(afu, cmd), - - TP_STRUCT__entry( - __field(u8, card) - __field(u8, afu) - __field(u64, cmd) - ), - - TP_fast_assign( - __entry->card = afu->adapter->adapter_num; - __entry->afu = afu->slice; - __entry->cmd = cmd; - ), - - TP_printk("afu%i.%i cmd=%s", - __entry->card, - __entry->afu, - __print_symbolic_u64(__entry->cmd, AFU_COMMANDS) - ) -); - -DECLARE_EVENT_CLASS(cxl_afu_psl_ctrl_done, - TP_PROTO(struct cxl_afu *afu, u64 cmd, int rc), - - TP_ARGS(afu, cmd, rc), - - TP_STRUCT__entry( - __field(u8, card) - __field(u8, afu) - __field(u64, cmd) - __field(int, rc) - ), - - TP_fast_assign( - __entry->card = afu->adapter->adapter_num; - __entry->afu = afu->slice; - __entry->rc = rc; - __entry->cmd = cmd; - ), - - TP_printk("afu%i.%i cmd=%s rc=%i", - __entry->card, - __entry->afu, - __print_symbolic_u64(__entry->cmd, AFU_COMMANDS), - __entry->rc - ) -); - -DEFINE_EVENT(cxl_afu_psl_ctrl, cxl_afu_ctrl, - TP_PROTO(struct cxl_afu *afu, u64 cmd), - TP_ARGS(afu, cmd) -); - -DEFINE_EVENT(cxl_afu_psl_ctrl_done, cxl_afu_ctrl_done, - TP_PROTO(struct cxl_afu *afu, u64 cmd, int rc), - TP_ARGS(afu, cmd, rc) -); - -DEFINE_EVENT_PRINT(cxl_afu_psl_ctrl, cxl_psl_ctrl, - TP_PROTO(struct cxl_afu *afu, u64 cmd), - TP_ARGS(afu, cmd), - - TP_printk("psl%i.%i cmd=%s", - __entry->card, - __entry->afu, - __print_symbolic_u64(__entry->cmd, PSL_COMMANDS) - ) -); - -DEFINE_EVENT_PRINT(cxl_afu_psl_ctrl_done, cxl_psl_ctrl_done, - TP_PROTO(struct cxl_afu *afu, u64 cmd, int rc), - TP_ARGS(afu, cmd, rc), - - TP_printk("psl%i.%i cmd=%s rc=%i", - __entry->card, - __entry->afu, - __print_symbolic_u64(__entry->cmd, PSL_COMMANDS), - __entry->rc - ) -); - -DEFINE_EVENT(cxl_pe_class, cxl_slbia, - TP_PROTO(struct cxl_context *ctx), - TP_ARGS(ctx) -); - -TRACE_EVENT(cxl_hcall, - TP_PROTO(u64 unit_address, u64 process_token, long rc), - - TP_ARGS(unit_address, process_token, rc), - - TP_STRUCT__entry( - __field(u64, unit_address) - __field(u64, process_token) - __field(long, rc) - ), - - TP_fast_assign( - __entry->unit_address = unit_address; - __entry->process_token = process_token; - __entry->rc = rc; - ), - - TP_printk("unit_address=0x%016llx process_token=0x%016llx rc=%li", - __entry->unit_address, - __entry->process_token, - __entry->rc - ) -); - -TRACE_EVENT(cxl_hcall_control, - TP_PROTO(u64 unit_address, char *fct, u64 p1, u64 p2, u64 p3, - u64 p4, unsigned long r4, long rc), - - TP_ARGS(unit_address, fct, p1, p2, p3, p4, r4, rc), - - TP_STRUCT__entry( - __field(u64, unit_address) - __field(char *, fct) - __field(u64, p1) - __field(u64, p2) - __field(u64, p3) - __field(u64, p4) - __field(unsigned long, r4) - __field(long, rc) - ), - - TP_fast_assign( - __entry->unit_address = unit_address; - __entry->fct = fct; - __entry->p1 = p1; - __entry->p2 = p2; - __entry->p3 = p3; - __entry->p4 = p4; - __entry->r4 = r4; - __entry->rc = rc; - ), - - TP_printk("unit_address=%#.16llx %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li", - __entry->unit_address, - __entry->fct, - __entry->p1, - __entry->p2, - __entry->p3, - __entry->p4, - __entry->r4, - __entry->rc - ) -); - -TRACE_EVENT(cxl_hcall_attach, - TP_PROTO(u64 unit_address, u64 phys_addr, unsigned long process_token, - unsigned long mmio_addr, unsigned long mmio_size, long rc), - - TP_ARGS(unit_address, phys_addr, process_token, - mmio_addr, mmio_size, rc), - - TP_STRUCT__entry( - __field(u64, unit_address) - __field(u64, phys_addr) - __field(unsigned long, process_token) - __field(unsigned long, mmio_addr) - __field(unsigned long, mmio_size) - __field(long, rc) - ), - - TP_fast_assign( - __entry->unit_address = unit_address; - __entry->phys_addr = phys_addr; - __entry->process_token = process_token; - __entry->mmio_addr = mmio_addr; - __entry->mmio_size = mmio_size; - __entry->rc = rc; - ), - - TP_printk("unit_address=0x%016llx phys_addr=0x%016llx " - "token=0x%.8lx mmio_addr=0x%lx mmio_size=0x%lx rc=%li", - __entry->unit_address, - __entry->phys_addr, - __entry->process_token, - __entry->mmio_addr, - __entry->mmio_size, - __entry->rc - ) -); - -DEFINE_EVENT(cxl_hcall, cxl_hcall_detach, - TP_PROTO(u64 unit_address, u64 process_token, long rc), - TP_ARGS(unit_address, process_token, rc) -); - -DEFINE_EVENT(cxl_hcall_control, cxl_hcall_control_function, - TP_PROTO(u64 unit_address, char *fct, u64 p1, u64 p2, u64 p3, - u64 p4, unsigned long r4, long rc), - TP_ARGS(unit_address, fct, p1, p2, p3, p4, r4, rc) -); - -DEFINE_EVENT(cxl_hcall, cxl_hcall_collect_int_info, - TP_PROTO(u64 unit_address, u64 process_token, long rc), - TP_ARGS(unit_address, process_token, rc) -); - -TRACE_EVENT(cxl_hcall_control_faults, - TP_PROTO(u64 unit_address, u64 process_token, - u64 control_mask, u64 reset_mask, unsigned long r4, - long rc), - - TP_ARGS(unit_address, process_token, - control_mask, reset_mask, r4, rc), - - TP_STRUCT__entry( - __field(u64, unit_address) - __field(u64, process_token) - __field(u64, control_mask) - __field(u64, reset_mask) - __field(unsigned long, r4) - __field(long, rc) - ), - - TP_fast_assign( - __entry->unit_address = unit_address; - __entry->process_token = process_token; - __entry->control_mask = control_mask; - __entry->reset_mask = reset_mask; - __entry->r4 = r4; - __entry->rc = rc; - ), - - TP_printk("unit_address=0x%016llx process_token=0x%llx " - "control_mask=%#llx reset_mask=%#llx r4=%#lx rc=%li", - __entry->unit_address, - __entry->process_token, - __entry->control_mask, - __entry->reset_mask, - __entry->r4, - __entry->rc - ) -); - -DEFINE_EVENT(cxl_hcall_control, cxl_hcall_control_facility, - TP_PROTO(u64 unit_address, char *fct, u64 p1, u64 p2, u64 p3, - u64 p4, unsigned long r4, long rc), - TP_ARGS(unit_address, fct, p1, p2, p3, p4, r4, rc) -); - -TRACE_EVENT(cxl_hcall_download_facility, - TP_PROTO(u64 unit_address, char *fct, u64 list_address, u64 num, - unsigned long r4, long rc), - - TP_ARGS(unit_address, fct, list_address, num, r4, rc), - - TP_STRUCT__entry( - __field(u64, unit_address) - __field(char *, fct) - __field(u64, list_address) - __field(u64, num) - __field(unsigned long, r4) - __field(long, rc) - ), - - TP_fast_assign( - __entry->unit_address = unit_address; - __entry->fct = fct; - __entry->list_address = list_address; - __entry->num = num; - __entry->r4 = r4; - __entry->rc = rc; - ), - - TP_printk("%#.16llx, %s(%#llx, %#llx), %#lx): %li", - __entry->unit_address, - __entry->fct, - __entry->list_address, - __entry->num, - __entry->r4, - __entry->rc - ) -); - -#endif /* _CXL_TRACE_H */ - -/* This part must be outside protection */ -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . -#define TRACE_INCLUDE_FILE trace -#include <trace/define_trace.h> diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c deleted file mode 100644 index 6332db8044bd..000000000000 --- a/drivers/misc/cxl/vphb.c +++ /dev/null @@ -1,309 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2014 IBM Corp. - */ - -#include <linux/pci.h> -#include <misc/cxl.h> -#include "cxl.h" - -static int cxl_pci_probe_mode(struct pci_bus *bus) -{ - return PCI_PROBE_NORMAL; -} - -static int cxl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) -{ - return -ENODEV; -} - -static void cxl_teardown_msi_irqs(struct pci_dev *pdev) -{ - /* - * MSI should never be set but need still need to provide this call - * back. - */ -} - -static bool cxl_pci_enable_device_hook(struct pci_dev *dev) -{ - struct pci_controller *phb; - struct cxl_afu *afu; - struct cxl_context *ctx; - - phb = pci_bus_to_host(dev->bus); - afu = (struct cxl_afu *)phb->private_data; - - if (!cxl_ops->link_ok(afu->adapter, afu)) { - dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__); - return false; - } - - dev->dev.archdata.dma_offset = PAGE_OFFSET; - - /* - * Allocate a context to do cxl things too. If we eventually do real - * DMA ops, we'll need a default context to attach them to - */ - ctx = cxl_dev_context_init(dev); - if (IS_ERR(ctx)) - return false; - dev->dev.archdata.cxl_ctx = ctx; - - return (cxl_ops->afu_check_and_enable(afu) == 0); -} - -static void cxl_pci_disable_device(struct pci_dev *dev) -{ - struct cxl_context *ctx = cxl_get_context(dev); - - if (ctx) { - if (ctx->status == STARTED) { - dev_err(&dev->dev, "Default context started\n"); - return; - } - dev->dev.archdata.cxl_ctx = NULL; - cxl_release_context(ctx); - } -} - -static void cxl_pci_reset_secondary_bus(struct pci_dev *dev) -{ - /* Should we do an AFU reset here ? */ -} - -static int cxl_pcie_cfg_record(u8 bus, u8 devfn) -{ - return (bus << 8) + devfn; -} - -static inline struct cxl_afu *pci_bus_to_afu(struct pci_bus *bus) -{ - struct pci_controller *phb = bus ? pci_bus_to_host(bus) : NULL; - - return phb ? phb->private_data : NULL; -} - -static void cxl_afu_configured_put(struct cxl_afu *afu) -{ - atomic_dec_if_positive(&afu->configured_state); -} - -static bool cxl_afu_configured_get(struct cxl_afu *afu) -{ - return atomic_inc_unless_negative(&afu->configured_state); -} - -static inline int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn, - struct cxl_afu *afu, int *_record) -{ - int record; - - record = cxl_pcie_cfg_record(bus->number, devfn); - if (record > afu->crs_num) - return PCIBIOS_DEVICE_NOT_FOUND; - - *_record = record; - return 0; -} - -static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn, - int offset, int len, u32 *val) -{ - int rc, record; - struct cxl_afu *afu; - u8 val8; - u16 val16; - u32 val32; - - afu = pci_bus_to_afu(bus); - /* Grab a reader lock on afu. */ - if (afu == NULL || !cxl_afu_configured_get(afu)) - return PCIBIOS_DEVICE_NOT_FOUND; - - rc = cxl_pcie_config_info(bus, devfn, afu, &record); - if (rc) - goto out; - - switch (len) { - case 1: - rc = cxl_ops->afu_cr_read8(afu, record, offset, &val8); - *val = val8; - break; - case 2: - rc = cxl_ops->afu_cr_read16(afu, record, offset, &val16); - *val = val16; - break; - case 4: - rc = cxl_ops->afu_cr_read32(afu, record, offset, &val32); - *val = val32; - break; - default: - WARN_ON(1); - } - -out: - cxl_afu_configured_put(afu); - return rc ? PCIBIOS_DEVICE_NOT_FOUND : 0; -} - -static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn, - int offset, int len, u32 val) -{ - int rc, record; - struct cxl_afu *afu; - - afu = pci_bus_to_afu(bus); - /* Grab a reader lock on afu. */ - if (afu == NULL || !cxl_afu_configured_get(afu)) - return PCIBIOS_DEVICE_NOT_FOUND; - - rc = cxl_pcie_config_info(bus, devfn, afu, &record); - if (rc) - goto out; - - switch (len) { - case 1: - rc = cxl_ops->afu_cr_write8(afu, record, offset, val & 0xff); - break; - case 2: - rc = cxl_ops->afu_cr_write16(afu, record, offset, val & 0xffff); - break; - case 4: - rc = cxl_ops->afu_cr_write32(afu, record, offset, val); - break; - default: - WARN_ON(1); - } - -out: - cxl_afu_configured_put(afu); - return rc ? PCIBIOS_SET_FAILED : 0; -} - -static struct pci_ops cxl_pcie_pci_ops = -{ - .read = cxl_pcie_read_config, - .write = cxl_pcie_write_config, -}; - - -static struct pci_controller_ops cxl_pci_controller_ops = -{ - .probe_mode = cxl_pci_probe_mode, - .enable_device_hook = cxl_pci_enable_device_hook, - .disable_device = cxl_pci_disable_device, - .release_device = cxl_pci_disable_device, - .reset_secondary_bus = cxl_pci_reset_secondary_bus, - .setup_msi_irqs = cxl_setup_msi_irqs, - .teardown_msi_irqs = cxl_teardown_msi_irqs, -}; - -int cxl_pci_vphb_add(struct cxl_afu *afu) -{ - struct pci_controller *phb; - struct device_node *vphb_dn; - struct device *parent; - - /* - * If there are no AFU configuration records we won't have anything to - * expose under the vPHB, so skip creating one, returning success since - * this is still a valid case. This will also opt us out of EEH - * handling since we won't have anything special to do if there are no - * kernel drivers attached to the vPHB, and EEH handling is not yet - * supported in the peer model. - */ - if (!afu->crs_num) - return 0; - - /* The parent device is the adapter. Reuse the device node of - * the adapter. - * We don't seem to care what device node is used for the vPHB, - * but tools such as lsvpd walk up the device parents looking - * for a valid location code, so we might as well show devices - * attached to the adapter as being located on that adapter. - */ - parent = afu->adapter->dev.parent; - vphb_dn = parent->of_node; - - /* Alloc and setup PHB data structure */ - phb = pcibios_alloc_controller(vphb_dn); - if (!phb) - return -ENODEV; - - /* Setup parent in sysfs */ - phb->parent = parent; - - /* Setup the PHB using arch provided callback */ - phb->ops = &cxl_pcie_pci_ops; - phb->cfg_addr = NULL; - phb->cfg_data = NULL; - phb->private_data = afu; - phb->controller_ops = cxl_pci_controller_ops; - - /* Scan the bus */ - pcibios_scan_phb(phb); - if (phb->bus == NULL) - return -ENXIO; - - /* Set release hook on root bus */ - pci_set_host_bridge_release(to_pci_host_bridge(phb->bus->bridge), - pcibios_free_controller_deferred, - (void *) phb); - - /* Claim resources. This might need some rework as well depending - * whether we are doing probe-only or not, like assigning unassigned - * resources etc... - */ - pcibios_claim_one_bus(phb->bus); - - /* Add probed PCI devices to the device model */ - pci_bus_add_devices(phb->bus); - - afu->phb = phb; - - return 0; -} - -void cxl_pci_vphb_remove(struct cxl_afu *afu) -{ - struct pci_controller *phb; - - /* If there is no configuration record we won't have one of these */ - if (!afu || !afu->phb) - return; - - phb = afu->phb; - afu->phb = NULL; - - pci_remove_root_bus(phb->bus); - /* - * We don't free phb here - that's handled by - * pcibios_free_controller_deferred() - */ -} - -bool cxl_pci_is_vphb_device(struct pci_dev *dev) -{ - struct pci_controller *phb; - - phb = pci_bus_to_host(dev->bus); - - return (phb->ops == &cxl_pcie_pci_ops); -} - -struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev) -{ - struct pci_controller *phb; - - phb = pci_bus_to_host(dev->bus); - - return (struct cxl_afu *)phb->private_data; -} -EXPORT_SYMBOL_GPL(cxl_pci_to_afu); - -unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev) -{ - return cxl_pcie_cfg_record(dev->bus->number, dev->devfn); -} -EXPORT_SYMBOL_GPL(cxl_pci_to_cfg_record); diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c index 21fc5bc85c5c..cb09e056531a 100644 --- a/drivers/misc/ds1682.c +++ b/drivers/misc/ds1682.c @@ -32,6 +32,7 @@ #include <linux/i2c.h> #include <linux/string.h> #include <linux/list.h> +#include <linux/nvmem-provider.h> #include <linux/sysfs.h> #include <linux/ctype.h> #include <linux/hwmon-sysfs.h> @@ -153,7 +154,7 @@ static const struct attribute_group ds1682_group = { * User data attribute */ static ssize_t ds1682_eeprom_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct i2c_client *client = kobj_to_i2c_client(kobj); @@ -171,7 +172,7 @@ static ssize_t ds1682_eeprom_read(struct file *filp, struct kobject *kobj, } static ssize_t ds1682_eeprom_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct i2c_client *client = kobj_to_i2c_client(kobj); @@ -197,11 +198,43 @@ static const struct bin_attribute ds1682_eeprom_attr = { .write = ds1682_eeprom_write, }; +static int ds1682_nvmem_read(void *priv, unsigned int offset, void *val, + size_t bytes) +{ + struct i2c_client *client = priv; + int ret; + + ret = i2c_smbus_read_i2c_block_data(client, DS1682_REG_EEPROM + offset, + bytes, val); + return ret < 0 ? ret : 0; +} + +static int ds1682_nvmem_write(void *priv, unsigned int offset, void *val, + size_t bytes) +{ + struct i2c_client *client = priv; + int ret; + + ret = i2c_smbus_write_i2c_block_data(client, DS1682_REG_EEPROM + offset, + bytes, val); + return ret < 0 ? ret : 0; +} + /* * Called when a ds1682 device is matched with this driver */ static int ds1682_probe(struct i2c_client *client) { + struct nvmem_config config = { + .dev = &client->dev, + .owner = THIS_MODULE, + .type = NVMEM_TYPE_EEPROM, + .reg_read = ds1682_nvmem_read, + .reg_write = ds1682_nvmem_write, + .size = DS1682_EEPROM_SIZE, + .priv = client, + }; + struct nvmem_device *nvmem; int rc; if (!i2c_check_functionality(client->adapter, @@ -211,6 +244,10 @@ static int ds1682_probe(struct i2c_client *client) goto exit; } + nvmem = devm_nvmem_register(&client->dev, &config); + if (IS_ENABLED(CONFIG_NVMEM) && IS_ERR(nvmem)) + return PTR_ERR(nvmem); + rc = sysfs_create_group(&client->dev.kobj, &ds1682_group); if (rc) goto exit; @@ -234,7 +271,7 @@ static void ds1682_remove(struct i2c_client *client) } static const struct i2c_device_id ds1682_id[] = { - { "ds1682", 0 }, + { "ds1682" }, { } }; MODULE_DEVICE_TABLE(i2c, ds1682_id); diff --git a/drivers/misc/dw-xdata-pcie.c b/drivers/misc/dw-xdata-pcie.c index 257c25da5199..a604c0e9c038 100644 --- a/drivers/misc/dw-xdata-pcie.c +++ b/drivers/misc/dw-xdata-pcie.c @@ -16,6 +16,7 @@ #include <linux/mutex.h> #include <linux/delay.h> #include <linux/pci.h> +#include <linux/string_choices.h> #define DW_XDATA_DRIVER_NAME "dw-xdata-pcie" @@ -132,7 +133,7 @@ static void dw_xdata_start(struct dw_xdata *dw, bool write) if (!(status & STATUS_DONE)) dev_dbg(dev, "xData: started %s direction\n", - write ? "write" : "read"); + str_write_read(write)); } static void dw_xdata_perf_meas(struct dw_xdata *dw, u64 *data, bool write) @@ -195,7 +196,7 @@ static void dw_xdata_perf(struct dw_xdata *dw, u64 *rate, bool write) mutex_unlock(&dw->mutex); dev_dbg(dev, "xData: time=%llu us, %s=%llu MB/s\n", - diff, write ? "write" : "read", *rate); + diff, str_write_read(write), *rate); } static struct dw_xdata *misc_dev_to_dw(struct miscdevice *misc_dev) @@ -333,7 +334,7 @@ static int dw_xdata_pcie_probe(struct pci_dev *pdev, dw->pdev = pdev; - id = ida_simple_get(&xdata_ida, 0, 0, GFP_KERNEL); + id = ida_alloc(&xdata_ida, GFP_KERNEL); if (id < 0) { dev_err(dev, "xData: unable to get id\n"); return id; @@ -377,7 +378,7 @@ err_kfree_name: kfree(dw->misc_dev.name); err_ida_remove: - ida_simple_remove(&xdata_ida, id); + ida_free(&xdata_ida, id); return err; } @@ -396,7 +397,7 @@ static void dw_xdata_pcie_remove(struct pci_dev *pdev) dw_xdata_stop(dw); misc_deregister(&dw->misc_dev); kfree(dw->misc_dev.name); - ida_simple_remove(&xdata_ida, id); + ida_free(&xdata_ida, id); } static const struct pci_device_id dw_xdata_pcie_id_table[] = { diff --git a/drivers/misc/echo/Kconfig b/drivers/misc/echo/Kconfig deleted file mode 100644 index ce0a37a47fc1..000000000000 --- a/drivers/misc/echo/Kconfig +++ /dev/null @@ -1,9 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -config ECHO - tristate "Line Echo Canceller support" - help - This driver provides line echo cancelling support for mISDN and - Zaptel drivers. - - To compile this driver as a module, choose M here. The module - will be called echo. diff --git a/drivers/misc/echo/Makefile b/drivers/misc/echo/Makefile deleted file mode 100644 index 5b97467ffb7d..000000000000 --- a/drivers/misc/echo/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_ECHO) += echo.o diff --git a/drivers/misc/echo/echo.c b/drivers/misc/echo/echo.c deleted file mode 100644 index 3c4eaba86576..000000000000 --- a/drivers/misc/echo/echo.c +++ /dev/null @@ -1,589 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * SpanDSP - a series of DSP components for telephony - * - * echo.c - A line echo canceller. This code is being developed - * against and partially complies with G168. - * - * Written by Steve Underwood <steveu@coppice.org> - * and David Rowe <david_at_rowetel_dot_com> - * - * Copyright (C) 2001, 2003 Steve Underwood, 2007 David Rowe - * - * Based on a bit from here, a bit from there, eye of toad, ear of - * bat, 15 years of failed attempts by David and a few fried brain - * cells. - * - * All rights reserved. - */ - -/*! \file */ - -/* Implementation Notes - David Rowe - April 2007 - - This code started life as Steve's NLMS algorithm with a tap - rotation algorithm to handle divergence during double talk. I - added a Geigel Double Talk Detector (DTD) [2] and performed some - G168 tests. However I had trouble meeting the G168 requirements, - especially for double talk - there were always cases where my DTD - failed, for example where near end speech was under the 6dB - threshold required for declaring double talk. - - So I tried a two path algorithm [1], which has so far given better - results. The original tap rotation/Geigel algorithm is available - in SVN http://svn.rowetel.com/software/oslec/tags/before_16bit. - It's probably possible to make it work if some one wants to put some - serious work into it. - - At present no special treatment is provided for tones, which - generally cause NLMS algorithms to diverge. Initial runs of a - subset of the G168 tests for tones (e.g ./echo_test 6) show the - current algorithm is passing OK, which is kind of surprising. The - full set of tests needs to be performed to confirm this result. - - One other interesting change is that I have managed to get the NLMS - code to work with 16 bit coefficients, rather than the original 32 - bit coefficents. This reduces the MIPs and storage required. - I evaulated the 16 bit port using g168_tests.sh and listening tests - on 4 real-world samples. - - I also attempted the implementation of a block based NLMS update - [2] but although this passes g168_tests.sh it didn't converge well - on the real-world samples. I have no idea why, perhaps a scaling - problem. The block based code is also available in SVN - http://svn.rowetel.com/software/oslec/tags/before_16bit. If this - code can be debugged, it will lead to further reduction in MIPS, as - the block update code maps nicely onto DSP instruction sets (it's a - dot product) compared to the current sample-by-sample update. - - Steve also has some nice notes on echo cancellers in echo.h - - References: - - [1] Ochiai, Areseki, and Ogihara, "Echo Canceller with Two Echo - Path Models", IEEE Transactions on communications, COM-25, - No. 6, June - 1977. - https://www.rowetel.com/images/echo/dual_path_paper.pdf - - [2] The classic, very useful paper that tells you how to - actually build a real world echo canceller: - Messerschmitt, Hedberg, Cole, Haoui, Winship, "Digital Voice - Echo Canceller with a TMS320020, - https://www.rowetel.com/images/echo/spra129.pdf - - [3] I have written a series of blog posts on this work, here is - Part 1: http://www.rowetel.com/blog/?p=18 - - [4] The source code http://svn.rowetel.com/software/oslec/ - - [5] A nice reference on LMS filters: - https://en.wikipedia.org/wiki/Least_mean_squares_filter - - Credits: - - Thanks to Steve Underwood, Jean-Marc Valin, and Ramakrishnan - Muthukrishnan for their suggestions and email discussions. Thanks - also to those people who collected echo samples for me such as - Mark, Pawel, and Pavel. -*/ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/slab.h> - -#include "echo.h" - -#define MIN_TX_POWER_FOR_ADAPTION 64 -#define MIN_RX_POWER_FOR_ADAPTION 64 -#define DTD_HANGOVER 600 /* 600 samples, or 75ms */ -#define DC_LOG2BETA 3 /* log2() of DC filter Beta */ - -/* adapting coeffs using the traditional stochastic descent (N)LMS algorithm */ - -static inline void lms_adapt_bg(struct oslec_state *ec, int clean, int shift) -{ - int i; - - int offset1; - int offset2; - int factor; - int exp; - - if (shift > 0) - factor = clean << shift; - else - factor = clean >> -shift; - - /* Update the FIR taps */ - - offset2 = ec->curr_pos; - offset1 = ec->taps - offset2; - - for (i = ec->taps - 1; i >= offset1; i--) { - exp = (ec->fir_state_bg.history[i - offset1] * factor); - ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15); - } - for (; i >= 0; i--) { - exp = (ec->fir_state_bg.history[i + offset2] * factor); - ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15); - } -} - -static inline int top_bit(unsigned int bits) -{ - if (bits == 0) - return -1; - else - return (int)fls((int32_t) bits) - 1; -} - -struct oslec_state *oslec_create(int len, int adaption_mode) -{ - struct oslec_state *ec; - int i; - const int16_t *history; - - ec = kzalloc(sizeof(*ec), GFP_KERNEL); - if (!ec) - return NULL; - - ec->taps = len; - ec->log2taps = top_bit(len); - ec->curr_pos = ec->taps - 1; - - ec->fir_taps16[0] = - kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL); - if (!ec->fir_taps16[0]) - goto error_oom_0; - - ec->fir_taps16[1] = - kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL); - if (!ec->fir_taps16[1]) - goto error_oom_1; - - history = fir16_create(&ec->fir_state, ec->fir_taps16[0], ec->taps); - if (!history) - goto error_state; - history = fir16_create(&ec->fir_state_bg, ec->fir_taps16[1], ec->taps); - if (!history) - goto error_state_bg; - - for (i = 0; i < 5; i++) - ec->xvtx[i] = ec->yvtx[i] = ec->xvrx[i] = ec->yvrx[i] = 0; - - ec->cng_level = 1000; - oslec_adaption_mode(ec, adaption_mode); - - ec->snapshot = kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL); - if (!ec->snapshot) - goto error_snap; - - ec->cond_met = 0; - ec->pstates = 0; - ec->ltxacc = ec->lrxacc = ec->lcleanacc = ec->lclean_bgacc = 0; - ec->ltx = ec->lrx = ec->lclean = ec->lclean_bg = 0; - ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0; - ec->lbgn = ec->lbgn_acc = 0; - ec->lbgn_upper = 200; - ec->lbgn_upper_acc = ec->lbgn_upper << 13; - - return ec; - -error_snap: - fir16_free(&ec->fir_state_bg); -error_state_bg: - fir16_free(&ec->fir_state); -error_state: - kfree(ec->fir_taps16[1]); -error_oom_1: - kfree(ec->fir_taps16[0]); -error_oom_0: - kfree(ec); - return NULL; -} -EXPORT_SYMBOL_GPL(oslec_create); - -void oslec_free(struct oslec_state *ec) -{ - int i; - - fir16_free(&ec->fir_state); - fir16_free(&ec->fir_state_bg); - for (i = 0; i < 2; i++) - kfree(ec->fir_taps16[i]); - kfree(ec->snapshot); - kfree(ec); -} -EXPORT_SYMBOL_GPL(oslec_free); - -void oslec_adaption_mode(struct oslec_state *ec, int adaption_mode) -{ - ec->adaption_mode = adaption_mode; -} -EXPORT_SYMBOL_GPL(oslec_adaption_mode); - -void oslec_flush(struct oslec_state *ec) -{ - int i; - - ec->ltxacc = ec->lrxacc = ec->lcleanacc = ec->lclean_bgacc = 0; - ec->ltx = ec->lrx = ec->lclean = ec->lclean_bg = 0; - ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0; - - ec->lbgn = ec->lbgn_acc = 0; - ec->lbgn_upper = 200; - ec->lbgn_upper_acc = ec->lbgn_upper << 13; - - ec->nonupdate_dwell = 0; - - fir16_flush(&ec->fir_state); - fir16_flush(&ec->fir_state_bg); - ec->fir_state.curr_pos = ec->taps - 1; - ec->fir_state_bg.curr_pos = ec->taps - 1; - for (i = 0; i < 2; i++) - memset(ec->fir_taps16[i], 0, ec->taps * sizeof(int16_t)); - - ec->curr_pos = ec->taps - 1; - ec->pstates = 0; -} -EXPORT_SYMBOL_GPL(oslec_flush); - -void oslec_snapshot(struct oslec_state *ec) -{ - memcpy(ec->snapshot, ec->fir_taps16[0], ec->taps * sizeof(int16_t)); -} -EXPORT_SYMBOL_GPL(oslec_snapshot); - -/* Dual Path Echo Canceller */ - -int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx) -{ - int32_t echo_value; - int clean_bg; - int tmp; - int tmp1; - - /* - * Input scaling was found be required to prevent problems when tx - * starts clipping. Another possible way to handle this would be the - * filter coefficent scaling. - */ - - ec->tx = tx; - ec->rx = rx; - tx >>= 1; - rx >>= 1; - - /* - * Filter DC, 3dB point is 160Hz (I think), note 32 bit precision - * required otherwise values do not track down to 0. Zero at DC, Pole - * at (1-Beta) on real axis. Some chip sets (like Si labs) don't - * need this, but something like a $10 X100P card does. Any DC really - * slows down convergence. - * - * Note: removes some low frequency from the signal, this reduces the - * speech quality when listening to samples through headphones but may - * not be obvious through a telephone handset. - * - * Note that the 3dB frequency in radians is approx Beta, e.g. for Beta - * = 2^(-3) = 0.125, 3dB freq is 0.125 rads = 159Hz. - */ - - if (ec->adaption_mode & ECHO_CAN_USE_RX_HPF) { - tmp = rx << 15; - - /* - * Make sure the gain of the HPF is 1.0. This can still - * saturate a little under impulse conditions, and it might - * roll to 32768 and need clipping on sustained peak level - * signals. However, the scale of such clipping is small, and - * the error due to any saturation should not markedly affect - * the downstream processing. - */ - tmp -= (tmp >> 4); - - ec->rx_1 += -(ec->rx_1 >> DC_LOG2BETA) + tmp - ec->rx_2; - - /* - * hard limit filter to prevent clipping. Note that at this - * stage rx should be limited to +/- 16383 due to right shift - * above - */ - tmp1 = ec->rx_1 >> 15; - if (tmp1 > 16383) - tmp1 = 16383; - if (tmp1 < -16383) - tmp1 = -16383; - rx = tmp1; - ec->rx_2 = tmp; - } - - /* Block average of power in the filter states. Used for - adaption power calculation. */ - - { - int new, old; - - /* efficient "out with the old and in with the new" algorithm so - we don't have to recalculate over the whole block of - samples. */ - new = (int)tx * (int)tx; - old = (int)ec->fir_state.history[ec->fir_state.curr_pos] * - (int)ec->fir_state.history[ec->fir_state.curr_pos]; - ec->pstates += - ((new - old) + (1 << (ec->log2taps - 1))) >> ec->log2taps; - if (ec->pstates < 0) - ec->pstates = 0; - } - - /* Calculate short term average levels using simple single pole IIRs */ - - ec->ltxacc += abs(tx) - ec->ltx; - ec->ltx = (ec->ltxacc + (1 << 4)) >> 5; - ec->lrxacc += abs(rx) - ec->lrx; - ec->lrx = (ec->lrxacc + (1 << 4)) >> 5; - - /* Foreground filter */ - - ec->fir_state.coeffs = ec->fir_taps16[0]; - echo_value = fir16(&ec->fir_state, tx); - ec->clean = rx - echo_value; - ec->lcleanacc += abs(ec->clean) - ec->lclean; - ec->lclean = (ec->lcleanacc + (1 << 4)) >> 5; - - /* Background filter */ - - echo_value = fir16(&ec->fir_state_bg, tx); - clean_bg = rx - echo_value; - ec->lclean_bgacc += abs(clean_bg) - ec->lclean_bg; - ec->lclean_bg = (ec->lclean_bgacc + (1 << 4)) >> 5; - - /* Background Filter adaption */ - - /* Almost always adap bg filter, just simple DT and energy - detection to minimise adaption in cases of strong double talk. - However this is not critical for the dual path algorithm. - */ - ec->factor = 0; - ec->shift = 0; - if (!ec->nonupdate_dwell) { - int p, logp, shift; - - /* Determine: - - f = Beta * clean_bg_rx/P ------ (1) - - where P is the total power in the filter states. - - The Boffins have shown that if we obey (1) we converge - quickly and avoid instability. - - The correct factor f must be in Q30, as this is the fixed - point format required by the lms_adapt_bg() function, - therefore the scaled version of (1) is: - - (2^30) * f = (2^30) * Beta * clean_bg_rx/P - factor = (2^30) * Beta * clean_bg_rx/P ----- (2) - - We have chosen Beta = 0.25 by experiment, so: - - factor = (2^30) * (2^-2) * clean_bg_rx/P - - (30 - 2 - log2(P)) - factor = clean_bg_rx 2 ----- (3) - - To avoid a divide we approximate log2(P) as top_bit(P), - which returns the position of the highest non-zero bit in - P. This approximation introduces an error as large as a - factor of 2, but the algorithm seems to handle it OK. - - Come to think of it a divide may not be a big deal on a - modern DSP, so its probably worth checking out the cycles - for a divide versus a top_bit() implementation. - */ - - p = MIN_TX_POWER_FOR_ADAPTION + ec->pstates; - logp = top_bit(p) + ec->log2taps; - shift = 30 - 2 - logp; - ec->shift = shift; - - lms_adapt_bg(ec, clean_bg, shift); - } - - /* very simple DTD to make sure we dont try and adapt with strong - near end speech */ - - ec->adapt = 0; - if ((ec->lrx > MIN_RX_POWER_FOR_ADAPTION) && (ec->lrx > ec->ltx)) - ec->nonupdate_dwell = DTD_HANGOVER; - if (ec->nonupdate_dwell) - ec->nonupdate_dwell--; - - /* Transfer logic */ - - /* These conditions are from the dual path paper [1], I messed with - them a bit to improve performance. */ - - if ((ec->adaption_mode & ECHO_CAN_USE_ADAPTION) && - (ec->nonupdate_dwell == 0) && - /* (ec->Lclean_bg < 0.875*ec->Lclean) */ - (8 * ec->lclean_bg < 7 * ec->lclean) && - /* (ec->Lclean_bg < 0.125*ec->Ltx) */ - (8 * ec->lclean_bg < ec->ltx)) { - if (ec->cond_met == 6) { - /* - * BG filter has had better results for 6 consecutive - * samples - */ - ec->adapt = 1; - memcpy(ec->fir_taps16[0], ec->fir_taps16[1], - ec->taps * sizeof(int16_t)); - } else - ec->cond_met++; - } else - ec->cond_met = 0; - - /* Non-Linear Processing */ - - ec->clean_nlp = ec->clean; - if (ec->adaption_mode & ECHO_CAN_USE_NLP) { - /* - * Non-linear processor - a fancy way to say "zap small - * signals, to avoid residual echo due to (uLaw/ALaw) - * non-linearity in the channel.". - */ - - if ((16 * ec->lclean < ec->ltx)) { - /* - * Our e/c has improved echo by at least 24 dB (each - * factor of 2 is 6dB, so 2*2*2*2=16 is the same as - * 6+6+6+6=24dB) - */ - if (ec->adaption_mode & ECHO_CAN_USE_CNG) { - ec->cng_level = ec->lbgn; - - /* - * Very elementary comfort noise generation. - * Just random numbers rolled off very vaguely - * Hoth-like. DR: This noise doesn't sound - * quite right to me - I suspect there are some - * overflow issues in the filtering as it's too - * "crackly". - * TODO: debug this, maybe just play noise at - * high level or look at spectrum. - */ - - ec->cng_rndnum = - 1664525U * ec->cng_rndnum + 1013904223U; - ec->cng_filter = - ((ec->cng_rndnum & 0xFFFF) - 32768 + - 5 * ec->cng_filter) >> 3; - ec->clean_nlp = - (ec->cng_filter * ec->cng_level * 8) >> 14; - - } else if (ec->adaption_mode & ECHO_CAN_USE_CLIP) { - /* This sounds much better than CNG */ - if (ec->clean_nlp > ec->lbgn) - ec->clean_nlp = ec->lbgn; - if (ec->clean_nlp < -ec->lbgn) - ec->clean_nlp = -ec->lbgn; - } else { - /* - * just mute the residual, doesn't sound very - * good, used mainly in G168 tests - */ - ec->clean_nlp = 0; - } - } else { - /* - * Background noise estimator. I tried a few - * algorithms here without much luck. This very simple - * one seems to work best, we just average the level - * using a slow (1 sec time const) filter if the - * current level is less than a (experimentally - * derived) constant. This means we dont include high - * level signals like near end speech. When combined - * with CNG or especially CLIP seems to work OK. - */ - if (ec->lclean < 40) { - ec->lbgn_acc += abs(ec->clean) - ec->lbgn; - ec->lbgn = (ec->lbgn_acc + (1 << 11)) >> 12; - } - } - } - - /* Roll around the taps buffer */ - if (ec->curr_pos <= 0) - ec->curr_pos = ec->taps; - ec->curr_pos--; - - if (ec->adaption_mode & ECHO_CAN_DISABLE) - ec->clean_nlp = rx; - - /* Output scaled back up again to match input scaling */ - - return (int16_t) ec->clean_nlp << 1; -} -EXPORT_SYMBOL_GPL(oslec_update); - -/* This function is separated from the echo canceller is it is usually called - as part of the tx process. See rx HP (DC blocking) filter above, it's - the same design. - - Some soft phones send speech signals with a lot of low frequency - energy, e.g. down to 20Hz. This can make the hybrid non-linear - which causes the echo canceller to fall over. This filter can help - by removing any low frequency before it gets to the tx port of the - hybrid. - - It can also help by removing and DC in the tx signal. DC is bad - for LMS algorithms. - - This is one of the classic DC removal filters, adjusted to provide - sufficient bass rolloff to meet the above requirement to protect hybrids - from things that upset them. The difference between successive samples - produces a lousy HPF, and then a suitably placed pole flattens things out. - The final result is a nicely rolled off bass end. The filtering is - implemented with extended fractional precision, which noise shapes things, - giving very clean DC removal. -*/ - -int16_t oslec_hpf_tx(struct oslec_state *ec, int16_t tx) -{ - int tmp; - int tmp1; - - if (ec->adaption_mode & ECHO_CAN_USE_TX_HPF) { - tmp = tx << 15; - - /* - * Make sure the gain of the HPF is 1.0. The first can still - * saturate a little under impulse conditions, and it might - * roll to 32768 and need clipping on sustained peak level - * signals. However, the scale of such clipping is small, and - * the error due to any saturation should not markedly affect - * the downstream processing. - */ - tmp -= (tmp >> 4); - - ec->tx_1 += -(ec->tx_1 >> DC_LOG2BETA) + tmp - ec->tx_2; - tmp1 = ec->tx_1 >> 15; - if (tmp1 > 32767) - tmp1 = 32767; - if (tmp1 < -32767) - tmp1 = -32767; - tx = tmp1; - ec->tx_2 = tmp; - } - - return tx; -} -EXPORT_SYMBOL_GPL(oslec_hpf_tx); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("David Rowe"); -MODULE_DESCRIPTION("Open Source Line Echo Canceller"); -MODULE_VERSION("0.3.0"); diff --git a/drivers/misc/echo/echo.h b/drivers/misc/echo/echo.h deleted file mode 100644 index 56b4b95fd020..000000000000 --- a/drivers/misc/echo/echo.h +++ /dev/null @@ -1,175 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * SpanDSP - a series of DSP components for telephony - * - * echo.c - A line echo canceller. This code is being developed - * against and partially complies with G168. - * - * Written by Steve Underwood <steveu@coppice.org> - * and David Rowe <david_at_rowetel_dot_com> - * - * Copyright (C) 2001 Steve Underwood and 2007 David Rowe - * - * All rights reserved. - */ - -#ifndef __ECHO_H -#define __ECHO_H - -/* -Line echo cancellation for voice - -What does it do? - -This module aims to provide G.168-2002 compliant echo cancellation, to remove -electrical echoes (e.g. from 2-4 wire hybrids) from voice calls. - -How does it work? - -The heart of the echo cancellor is FIR filter. This is adapted to match the -echo impulse response of the telephone line. It must be long enough to -adequately cover the duration of that impulse response. The signal transmitted -to the telephone line is passed through the FIR filter. Once the FIR is -properly adapted, the resulting output is an estimate of the echo signal -received from the line. This is subtracted from the received signal. The result -is an estimate of the signal which originated at the far end of the line, free -from echos of our own transmitted signal. - -The least mean squares (LMS) algorithm is attributed to Widrow and Hoff, and -was introduced in 1960. It is the commonest form of filter adaption used in -things like modem line equalisers and line echo cancellers. There it works very -well. However, it only works well for signals of constant amplitude. It works -very poorly for things like speech echo cancellation, where the signal level -varies widely. This is quite easy to fix. If the signal level is normalised - -similar to applying AGC - LMS can work as well for a signal of varying -amplitude as it does for a modem signal. This normalised least mean squares -(NLMS) algorithm is the commonest one used for speech echo cancellation. Many -other algorithms exist - e.g. RLS (essentially the same as Kalman filtering), -FAP, etc. Some perform significantly better than NLMS. However, factors such -as computational complexity and patents favour the use of NLMS. - -A simple refinement to NLMS can improve its performance with speech. NLMS tends -to adapt best to the strongest parts of a signal. If the signal is white noise, -the NLMS algorithm works very well. However, speech has more low frequency than -high frequency content. Pre-whitening (i.e. filtering the signal to flatten its -spectrum) the echo signal improves the adapt rate for speech, and ensures the -final residual signal is not heavily biased towards high frequencies. A very -low complexity filter is adequate for this, so pre-whitening adds little to the -compute requirements of the echo canceller. - -An FIR filter adapted using pre-whitened NLMS performs well, provided certain -conditions are met: - - - The transmitted signal has poor self-correlation. - - There is no signal being generated within the environment being - cancelled. - -The difficulty is that neither of these can be guaranteed. - -If the adaption is performed while transmitting noise (or something fairly -noise like, such as voice) the adaption works very well. If the adaption is -performed while transmitting something highly correlative (typically narrow -band energy such as signalling tones or DTMF), the adaption can go seriously -wrong. The reason is there is only one solution for the adaption on a near -random signal - the impulse response of the line. For a repetitive signal, -there are any number of solutions which converge the adaption, and nothing -guides the adaption to choose the generalised one. Allowing an untrained -canceller to converge on this kind of narrowband energy probably a good thing, -since at least it cancels the tones. Allowing a well converged canceller to -continue converging on such energy is just a way to ruin its generalised -adaption. A narrowband detector is needed, so adapation can be suspended at -appropriate times. - -The adaption process is based on trying to eliminate the received signal. When -there is any signal from within the environment being cancelled it may upset -the adaption process. Similarly, if the signal we are transmitting is small, -noise may dominate and disturb the adaption process. If we can ensure that the -adaption is only performed when we are transmitting a significant signal level, -and the environment is not, things will be OK. Clearly, it is easy to tell when -we are sending a significant signal. Telling, if the environment is generating -a significant signal, and doing it with sufficient speed that the adaption will -not have diverged too much more we stop it, is a little harder. - -The key problem in detecting when the environment is sourcing significant -energy is that we must do this very quickly. Given a reasonably long sample of -the received signal, there are a number of strategies which may be used to -assess whether that signal contains a strong far end component. However, by the -time that assessment is complete the far end signal will have already caused -major mis-convergence in the adaption process. An assessment algorithm is -needed which produces a fairly accurate result from a very short burst of far -end energy. - -How do I use it? - -The echo cancellor processes both the transmit and receive streams sample by -sample. The processing function is not declared inline. Unfortunately, -cancellation requires many operations per sample, so the call overhead is only -a minor burden. -*/ - -#include "fir.h" -#include "oslec.h" - -/* - G.168 echo canceller descriptor. This defines the working state for a line - echo canceller. -*/ -struct oslec_state { - int16_t tx; - int16_t rx; - int16_t clean; - int16_t clean_nlp; - - int nonupdate_dwell; - int curr_pos; - int taps; - int log2taps; - int adaption_mode; - - int cond_met; - int32_t pstates; - int16_t adapt; - int32_t factor; - int16_t shift; - - /* Average levels and averaging filter states */ - int ltxacc; - int lrxacc; - int lcleanacc; - int lclean_bgacc; - int ltx; - int lrx; - int lclean; - int lclean_bg; - int lbgn; - int lbgn_acc; - int lbgn_upper; - int lbgn_upper_acc; - - /* foreground and background filter states */ - struct fir16_state_t fir_state; - struct fir16_state_t fir_state_bg; - int16_t *fir_taps16[2]; - - /* DC blocking filter states */ - int tx_1; - int tx_2; - int rx_1; - int rx_2; - - /* optional High Pass Filter states */ - int32_t xvtx[5]; - int32_t yvtx[5]; - int32_t xvrx[5]; - int32_t yvrx[5]; - - /* Parameters for the optional Hoth noise generator */ - int cng_level; - int cng_rndnum; - int cng_filter; - - /* snapshot sample of coeffs used for development */ - int16_t *snapshot; -}; - -#endif /* __ECHO_H */ diff --git a/drivers/misc/echo/fir.h b/drivers/misc/echo/fir.h deleted file mode 100644 index 4d0821025223..000000000000 --- a/drivers/misc/echo/fir.h +++ /dev/null @@ -1,154 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * SpanDSP - a series of DSP components for telephony - * - * fir.h - General telephony FIR routines - * - * Written by Steve Underwood <steveu@coppice.org> - * - * Copyright (C) 2002 Steve Underwood - * - * All rights reserved. - */ - -#if !defined(_FIR_H_) -#define _FIR_H_ - -/* - Ideas for improvement: - - 1/ Rewrite filter for dual MAC inner loop. The issue here is handling - history sample offsets that are 16 bit aligned - the dual MAC needs - 32 bit aligmnent. There are some good examples in libbfdsp. - - 2/ Use the hardware circular buffer facility tohalve memory usage. - - 3/ Consider using internal memory. - - Using less memory might also improve speed as cache misses will be - reduced. A drop in MIPs and memory approaching 50% should be - possible. - - The foreground and background filters currenlty use a total of - about 10 MIPs/ch as measured with speedtest.c on a 256 TAP echo - can. -*/ - -/* - * 16 bit integer FIR descriptor. This defines the working state for a single - * instance of an FIR filter using 16 bit integer coefficients. - */ -struct fir16_state_t { - int taps; - int curr_pos; - const int16_t *coeffs; - int16_t *history; -}; - -/* - * 32 bit integer FIR descriptor. This defines the working state for a single - * instance of an FIR filter using 32 bit integer coefficients, and filtering - * 16 bit integer data. - */ -struct fir32_state_t { - int taps; - int curr_pos; - const int32_t *coeffs; - int16_t *history; -}; - -/* - * Floating point FIR descriptor. This defines the working state for a single - * instance of an FIR filter using floating point coefficients and data. - */ -struct fir_float_state_t { - int taps; - int curr_pos; - const float *coeffs; - float *history; -}; - -static inline const int16_t *fir16_create(struct fir16_state_t *fir, - const int16_t *coeffs, int taps) -{ - fir->taps = taps; - fir->curr_pos = taps - 1; - fir->coeffs = coeffs; - fir->history = kcalloc(taps, sizeof(int16_t), GFP_KERNEL); - return fir->history; -} - -static inline void fir16_flush(struct fir16_state_t *fir) -{ - memset(fir->history, 0, fir->taps * sizeof(int16_t)); -} - -static inline void fir16_free(struct fir16_state_t *fir) -{ - kfree(fir->history); -} - -static inline int16_t fir16(struct fir16_state_t *fir, int16_t sample) -{ - int32_t y; - int i; - int offset1; - int offset2; - - fir->history[fir->curr_pos] = sample; - - offset2 = fir->curr_pos; - offset1 = fir->taps - offset2; - y = 0; - for (i = fir->taps - 1; i >= offset1; i--) - y += fir->coeffs[i] * fir->history[i - offset1]; - for (; i >= 0; i--) - y += fir->coeffs[i] * fir->history[i + offset2]; - if (fir->curr_pos <= 0) - fir->curr_pos = fir->taps; - fir->curr_pos--; - return (int16_t) (y >> 15); -} - -static inline const int16_t *fir32_create(struct fir32_state_t *fir, - const int32_t *coeffs, int taps) -{ - fir->taps = taps; - fir->curr_pos = taps - 1; - fir->coeffs = coeffs; - fir->history = kcalloc(taps, sizeof(int16_t), GFP_KERNEL); - return fir->history; -} - -static inline void fir32_flush(struct fir32_state_t *fir) -{ - memset(fir->history, 0, fir->taps * sizeof(int16_t)); -} - -static inline void fir32_free(struct fir32_state_t *fir) -{ - kfree(fir->history); -} - -static inline int16_t fir32(struct fir32_state_t *fir, int16_t sample) -{ - int i; - int32_t y; - int offset1; - int offset2; - - fir->history[fir->curr_pos] = sample; - offset2 = fir->curr_pos; - offset1 = fir->taps - offset2; - y = 0; - for (i = fir->taps - 1; i >= offset1; i--) - y += fir->coeffs[i] * fir->history[i - offset1]; - for (; i >= 0; i--) - y += fir->coeffs[i] * fir->history[i + offset2]; - if (fir->curr_pos <= 0) - fir->curr_pos = fir->taps; - fir->curr_pos--; - return (int16_t) (y >> 15); -} - -#endif diff --git a/drivers/misc/echo/oslec.h b/drivers/misc/echo/oslec.h deleted file mode 100644 index f1adac143b90..000000000000 --- a/drivers/misc/echo/oslec.h +++ /dev/null @@ -1,81 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * OSLEC - A line echo canceller. This code is being developed - * against and partially complies with G168. Using code from SpanDSP - * - * Written by Steve Underwood <steveu@coppice.org> - * and David Rowe <david_at_rowetel_dot_com> - * - * Copyright (C) 2001 Steve Underwood and 2007-2008 David Rowe - * - * All rights reserved. - */ - -#ifndef __OSLEC_H -#define __OSLEC_H - -/* Mask bits for the adaption mode */ -#define ECHO_CAN_USE_ADAPTION 0x01 -#define ECHO_CAN_USE_NLP 0x02 -#define ECHO_CAN_USE_CNG 0x04 -#define ECHO_CAN_USE_CLIP 0x08 -#define ECHO_CAN_USE_TX_HPF 0x10 -#define ECHO_CAN_USE_RX_HPF 0x20 -#define ECHO_CAN_DISABLE 0x40 - -/** - * oslec_state: G.168 echo canceller descriptor. - * - * This defines the working state for a line echo canceller. - */ -struct oslec_state; - -/** - * oslec_create - Create a voice echo canceller context. - * @len: The length of the canceller, in samples. - * @return: The new canceller context, or NULL if the canceller could not be - * created. - */ -struct oslec_state *oslec_create(int len, int adaption_mode); - -/** - * oslec_free - Free a voice echo canceller context. - * @ec: The echo canceller context. - */ -void oslec_free(struct oslec_state *ec); - -/** - * oslec_flush - Flush (reinitialise) a voice echo canceller context. - * @ec: The echo canceller context. - */ -void oslec_flush(struct oslec_state *ec); - -/** - * oslec_adaption_mode - set the adaption mode of a voice echo canceller context. - * @ec The echo canceller context. - * @adaption_mode: The mode. - */ -void oslec_adaption_mode(struct oslec_state *ec, int adaption_mode); - -void oslec_snapshot(struct oslec_state *ec); - -/** - * oslec_update: Process a sample through a voice echo canceller. - * @ec: The echo canceller context. - * @tx: The transmitted audio sample. - * @rx: The received audio sample. - * - * The return value is the clean (echo cancelled) received sample. - */ -int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx); - -/** - * oslec_hpf_tx: Process to high pass filter the tx signal. - * @ec: The echo canceller context. - * @tx: The transmitted auio sample. - * - * The return value is the HP filtered transmit sample, send this to your D/A. - */ -int16_t oslec_hpf_tx(struct oslec_state *ec, int16_t tx); - -#endif /* __OSLEC_H */ diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig index 2d240bfa819f..4d0ce47aa282 100644 --- a/drivers/misc/eeprom/Kconfig +++ b/drivers/misc/eeprom/Kconfig @@ -37,6 +37,7 @@ config EEPROM_AT25 depends on SPI && SYSFS select NVMEM select NVMEM_SYSFS + select SPI_MEM help Enable this driver to get read/write support to most SPI EEPROMs and Cypress FRAMs, @@ -46,20 +47,6 @@ config EEPROM_AT25 This driver can also be built as a module. If so, the module will be called at25. -config EEPROM_LEGACY - tristate "Old I2C EEPROM reader (DEPRECATED)" - depends on I2C && SYSFS - help - If you say yes here you get read-only access to the EEPROM data - available on modern memory DIMMs and Sony Vaio laptops via I2C. Such - EEPROMs could theoretically be available on other devices as well. - - This driver is deprecated and will be removed soon, please use the - better at24 driver instead. - - This driver can also be built as a module. If so, the module - will be called eeprom. - config EEPROM_MAX6875 tristate "Maxim MAX6874/5 power supply supervisor" depends on I2C @@ -111,11 +98,11 @@ config EEPROM_DIGSY_MTC_CFG If unsure, say N. config EEPROM_IDT_89HPESX - tristate "IDT 89HPESx PCIe-swtiches EEPROM / CSR support" + tristate "IDT 89HPESx PCIe-switches EEPROM / CSR support" depends on I2C && SYSFS help Enable this driver to get read/write access to EEPROM / CSRs - over IDT PCIe-swtich i2c-slave interface. + over IDT PCIe-switch i2c-slave interface. This driver can also be built as a module. If so, the module will be called idt_89hpesx. @@ -123,6 +110,8 @@ config EEPROM_IDT_89HPESX config EEPROM_EE1004 tristate "SPD EEPROMs on DDR4 memory modules" depends on I2C && SYSFS + select NVMEM + select NVMEM_SYSFS help Enable this driver to get read support to SPD EEPROMs following the JEDEC EE1004 standard. These are typically found on DDR4 @@ -131,4 +120,22 @@ config EEPROM_EE1004 This driver can also be built as a module. If so, the module will be called ee1004. +config EEPROM_M24LR + tristate "STMicroelectronics M24LR RFID/NFC EEPROM support" + depends on I2C && SYSFS + select REGMAP_I2C + select NVMEM + select NVMEM_SYSFS + help + This enables support for STMicroelectronics M24LR RFID/NFC EEPROM + chips. These dual-interface devices expose two I2C addresses: + one for EEPROM memory access and another for control and system + configuration (e.g. UID, password handling). + + This driver provides a sysfs interface for control functions and + integrates with the nvmem subsystem for EEPROM access. + + To compile this driver as a module, choose M here: the + module will be called m24lr. + endmenu diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile index a9b4b6579b75..8f311fd6a4ce 100644 --- a/drivers/misc/eeprom/Makefile +++ b/drivers/misc/eeprom/Makefile @@ -1,10 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_EEPROM_AT24) += at24.o obj-$(CONFIG_EEPROM_AT25) += at25.o -obj-$(CONFIG_EEPROM_LEGACY) += eeprom.o obj-$(CONFIG_EEPROM_MAX6875) += max6875.o obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o obj-$(CONFIG_EEPROM_IDT_89HPESX) += idt_89hpesx.o obj-$(CONFIG_EEPROM_EE1004) += ee1004.o +obj-$(CONFIG_EEPROM_M24LR) += m24lr.o diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index dbbf7db4ff2f..f721825199ce 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -18,7 +18,6 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/nvmem-provider.h> -#include <linux/of_device.h> #include <linux/pm_runtime.h> #include <linux/property.h> #include <linux/regmap.h> @@ -92,7 +91,7 @@ struct at24_data { * them for us. */ u8 bank_addr_shift; - struct regmap *client_regmaps[]; + struct regmap *client_regmaps[] __counted_by(num_addresses); }; /* @@ -173,6 +172,10 @@ AT24_CHIP_DATA(at24_data_24mac402, 48 / 8, AT24_FLAG_MAC | AT24_FLAG_READONLY); AT24_CHIP_DATA(at24_data_24mac602, 64 / 8, AT24_FLAG_MAC | AT24_FLAG_READONLY); +AT24_CHIP_DATA(at24_data_24aa025e48, 48 / 8, + AT24_FLAG_READONLY); +AT24_CHIP_DATA(at24_data_24aa025e64, 64 / 8, + AT24_FLAG_READONLY); /* spd is a 24c02 in memory DIMMs */ AT24_CHIP_DATA(at24_data_spd, 2048 / 8, AT24_FLAG_READONLY | AT24_FLAG_IRUGO); @@ -191,13 +194,19 @@ AT24_CHIP_DATA(at24_data_24c16, 16384 / 8, 0); AT24_CHIP_DATA(at24_data_24cs16, 16, AT24_FLAG_SERIAL | AT24_FLAG_READONLY); AT24_CHIP_DATA(at24_data_24c32, 32768 / 8, AT24_FLAG_ADDR16); +/* M24C32-D Additional Write lockable page (M24C32-D order codes) */ +AT24_CHIP_DATA(at24_data_24c32d_wlp, 32, AT24_FLAG_ADDR16); AT24_CHIP_DATA(at24_data_24cs32, 16, AT24_FLAG_ADDR16 | AT24_FLAG_SERIAL | AT24_FLAG_READONLY); AT24_CHIP_DATA(at24_data_24c64, 65536 / 8, AT24_FLAG_ADDR16); +/* M24C64-D Additional Write lockable page (M24C64-D order codes) */ +AT24_CHIP_DATA(at24_data_24c64d_wlp, 32, AT24_FLAG_ADDR16); AT24_CHIP_DATA(at24_data_24cs64, 16, AT24_FLAG_ADDR16 | AT24_FLAG_SERIAL | AT24_FLAG_READONLY); AT24_CHIP_DATA(at24_data_24c128, 131072 / 8, AT24_FLAG_ADDR16); AT24_CHIP_DATA(at24_data_24c256, 262144 / 8, AT24_FLAG_ADDR16); +/* M24256E Additional Write lockable page (M24256E-F order codes) */ +AT24_CHIP_DATA(at24_data_24256e_wlp, 64, AT24_FLAG_ADDR16); AT24_CHIP_DATA(at24_data_24c512, 524288 / 8, AT24_FLAG_ADDR16); AT24_CHIP_DATA(at24_data_24c1024, 1048576 / 8, AT24_FLAG_ADDR16); AT24_CHIP_DATA_BS(at24_data_24c1025, 1048576 / 8, AT24_FLAG_ADDR16, 2); @@ -213,6 +222,8 @@ static const struct i2c_device_id at24_ids[] = { { "24cs02", (kernel_ulong_t)&at24_data_24cs02 }, { "24mac402", (kernel_ulong_t)&at24_data_24mac402 }, { "24mac602", (kernel_ulong_t)&at24_data_24mac602 }, + { "24aa025e48", (kernel_ulong_t)&at24_data_24aa025e48 }, + { "24aa025e64", (kernel_ulong_t)&at24_data_24aa025e64 }, { "spd", (kernel_ulong_t)&at24_data_spd }, { "24c02-vaio", (kernel_ulong_t)&at24_data_24c02_vaio }, { "24c04", (kernel_ulong_t)&at24_data_24c04 }, @@ -222,11 +233,14 @@ static const struct i2c_device_id at24_ids[] = { { "24c16", (kernel_ulong_t)&at24_data_24c16 }, { "24cs16", (kernel_ulong_t)&at24_data_24cs16 }, { "24c32", (kernel_ulong_t)&at24_data_24c32 }, + { "24c32d-wl", (kernel_ulong_t)&at24_data_24c32d_wlp }, { "24cs32", (kernel_ulong_t)&at24_data_24cs32 }, { "24c64", (kernel_ulong_t)&at24_data_24c64 }, + { "24c64-wl", (kernel_ulong_t)&at24_data_24c64d_wlp }, { "24cs64", (kernel_ulong_t)&at24_data_24cs64 }, { "24c128", (kernel_ulong_t)&at24_data_24c128 }, { "24c256", (kernel_ulong_t)&at24_data_24c256 }, + { "24256e-wl", (kernel_ulong_t)&at24_data_24256e_wlp }, { "24c512", (kernel_ulong_t)&at24_data_24c512 }, { "24c1024", (kernel_ulong_t)&at24_data_24c1024 }, { "24c1025", (kernel_ulong_t)&at24_data_24c1025 }, @@ -252,8 +266,10 @@ static const struct of_device_id at24_of_match[] = { { .compatible = "atmel,24c16", .data = &at24_data_24c16 }, { .compatible = "atmel,24cs16", .data = &at24_data_24cs16 }, { .compatible = "atmel,24c32", .data = &at24_data_24c32 }, + { .compatible = "atmel,24c32d-wl", .data = &at24_data_24c32d_wlp }, { .compatible = "atmel,24cs32", .data = &at24_data_24cs32 }, { .compatible = "atmel,24c64", .data = &at24_data_24c64 }, + { .compatible = "atmel,24c64d-wl", .data = &at24_data_24c64d_wlp }, { .compatible = "atmel,24cs64", .data = &at24_data_24cs64 }, { .compatible = "atmel,24c128", .data = &at24_data_24c128 }, { .compatible = "atmel,24c256", .data = &at24_data_24c256 }, @@ -261,11 +277,14 @@ static const struct of_device_id at24_of_match[] = { { .compatible = "atmel,24c1024", .data = &at24_data_24c1024 }, { .compatible = "atmel,24c1025", .data = &at24_data_24c1025 }, { .compatible = "atmel,24c2048", .data = &at24_data_24c2048 }, + { .compatible = "microchip,24aa025e48", .data = &at24_data_24aa025e48 }, + { .compatible = "microchip,24aa025e64", .data = &at24_data_24aa025e64 }, + { .compatible = "st,24256e-wl", .data = &at24_data_24256e_wlp }, { /* END OF LIST */ }, }; MODULE_DEVICE_TABLE(of, at24_of_match); -static const struct acpi_device_id __maybe_unused at24_acpi_ids[] = { +static const struct acpi_device_id at24_acpi_ids[] = { { "INT3499", (kernel_ulong_t)&at24_data_INT3499 }, { "TPF0001", (kernel_ulong_t)&at24_data_24c1024 }, { /* END OF LIST */ } @@ -431,12 +450,9 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count) if (off + count > at24->byte_len) return -EINVAL; - ret = pm_runtime_get_sync(dev); - if (ret < 0) { - pm_runtime_put_noidle(dev); + ret = pm_runtime_resume_and_get(dev); + if (ret) return ret; - } - /* * Read data from chip, protecting against concurrent updates * from this host, but not from other I2C masters. @@ -478,12 +494,9 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count) if (off + count > at24->byte_len) return -EINVAL; - ret = pm_runtime_get_sync(dev); - if (ret < 0) { - pm_runtime_put_noidle(dev); + ret = pm_runtime_resume_and_get(dev); + if (ret) return ret; - } - /* * Write data to chip, protecting against concurrent updates * from this host, but not from other I2C masters. @@ -509,32 +522,6 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count) return 0; } -static const struct at24_chip_data *at24_get_chip_data(struct device *dev) -{ - struct device_node *of_node = dev->of_node; - const struct at24_chip_data *cdata; - const struct i2c_device_id *id; - - id = i2c_match_id(at24_ids, to_i2c_client(dev)); - - /* - * The I2C core allows OF nodes compatibles to match against the - * I2C device ID table as a fallback, so check not only if an OF - * node is present but also if it matches an OF device ID entry. - */ - if (of_node && of_match_device(at24_of_match, dev)) - cdata = of_device_get_match_data(dev); - else if (id) - cdata = (void *)id->driver_data; - else - cdata = acpi_device_get_match_data(dev); - - if (!cdata) - return ERR_PTR(-ENODEV); - - return cdata; -} - static int at24_make_dummy_client(struct at24_data *at24, unsigned int index, struct i2c_client *base_client, struct regmap_config *regmap_config) @@ -581,6 +568,31 @@ static unsigned int at24_get_offset_adj(u8 flags, unsigned int byte_len) } } +static void at24_probe_temp_sensor(struct i2c_client *client) +{ + struct at24_data *at24 = i2c_get_clientdata(client); + struct i2c_board_info info = { .type = "jc42" }; + int ret; + u8 val; + + /* + * Byte 2 has value 11 for DDR3, earlier versions don't + * support the thermal sensor present flag + */ + ret = at24_read(at24, 2, &val, 1); + if (ret || val != 11) + return; + + /* Byte 32, bit 7 is set if temp sensor is present */ + ret = at24_read(at24, 32, &val, 1); + if (ret || !(val & BIT(7))) + return; + + info.addr = 0x18 | (client->addr & 7); + + i2c_new_client_device(client->adapter, &info); +} + static int at24_probe(struct i2c_client *client) { struct regmap_config regmap_config = { }; @@ -601,9 +613,9 @@ static int at24_probe(struct i2c_client *client) i2c_fn_block = i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_I2C_BLOCK); - cdata = at24_get_chip_data(dev); - if (IS_ERR(cdata)) - return PTR_ERR(cdata); + cdata = i2c_get_match_data(client); + if (!cdata) + return -ENODEV; err = device_property_read_u32(dev, "pagesize", &page_size); if (err) @@ -756,15 +768,6 @@ static int at24_probe(struct i2c_client *client) } pm_runtime_enable(dev); - at24->nvmem = devm_nvmem_register(dev, &nvmem_config); - if (IS_ERR(at24->nvmem)) { - pm_runtime_disable(dev); - if (!pm_runtime_status_suspended(dev)) - regulator_disable(at24->vcc_reg); - return dev_err_probe(dev, PTR_ERR(at24->nvmem), - "failed to register nvmem\n"); - } - /* * Perform a one-byte test read to verify that the chip is functional, * unless powering on the device is to be avoided during probe (i.e. @@ -780,6 +783,19 @@ static int at24_probe(struct i2c_client *client) } } + at24->nvmem = devm_nvmem_register(dev, &nvmem_config); + if (IS_ERR(at24->nvmem)) { + pm_runtime_disable(dev); + if (!pm_runtime_status_suspended(dev)) + regulator_disable(at24->vcc_reg); + return dev_err_probe(dev, PTR_ERR(at24->nvmem), + "failed to register nvmem\n"); + } + + /* If this a SPD EEPROM, probe for DDR3 thermal sensor */ + if (cdata == &at24_data_spd) + at24_probe_temp_sensor(client); + pm_runtime_idle(dev); if (writable) @@ -831,7 +847,7 @@ static struct i2c_driver at24_driver = { .name = "at24", .pm = &at24_pm_ops, .of_match_table = at24_of_match, - .acpi_match_table = ACPI_PTR(at24_acpi_ids), + .acpi_match_table = at24_acpi_ids, }, .probe = at24_probe, .remove = at24_remove, diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index 65d49a6de1a7..883dfd0ed658 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c @@ -7,8 +7,10 @@ */ #include <linux/bits.h> +#include <linux/cleanup.h> #include <linux/delay.h> #include <linux/device.h> +#include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/property.h> @@ -17,6 +19,7 @@ #include <linux/spi/eeprom.h> #include <linux/spi/spi.h> +#include <linux/spi/spi-mem.h> #include <linux/nvmem-provider.h> @@ -35,13 +38,12 @@ struct at25_data { struct spi_eeprom chip; - struct spi_device *spi; + struct spi_mem *spimem; struct mutex lock; unsigned addrlen; struct nvmem_config nvmem_config; struct nvmem_device *nvmem; u8 sernum[FM25_SN_LEN]; - u8 command[EE_MAXADDRLEN + 1]; }; #define AT25_WREN 0x06 /* latch the write enable */ @@ -74,20 +76,29 @@ struct at25_data { #define io_limit PAGE_SIZE /* bytes */ +/* Handle the address MSB as part of instruction byte */ +static u8 at25_instr(struct at25_data *at25, u8 instr, unsigned int off) +{ + if (!(at25->chip.flags & EE_INSTR_BIT3_IS_ADDR)) + return instr; + if (off < BIT(at25->addrlen * 8)) + return instr; + return instr | AT25_INSTR_BIT3; +} + static int at25_ee_read(void *priv, unsigned int offset, void *val, size_t count) { + u8 *bounce __free(kfree) = kmalloc(min(count, io_limit), GFP_KERNEL); struct at25_data *at25 = priv; char *buf = val; - size_t max_chunk = spi_max_transfer_size(at25->spi); unsigned int msg_offset = offset; size_t bytes_left = count; size_t segment; - u8 *cp; - ssize_t status; - struct spi_transfer t[2]; - struct spi_message m; - u8 instr; + int status; + + if (!bounce) + return -ENOMEM; if (unlikely(offset >= at25->chip.byte_len)) return -EINVAL; @@ -97,87 +108,67 @@ static int at25_ee_read(void *priv, unsigned int offset, return -EINVAL; do { - segment = min(bytes_left, max_chunk); - cp = at25->command; - - instr = AT25_READ; - if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR) - if (msg_offset >= BIT(at25->addrlen * 8)) - instr |= AT25_INSTR_BIT3; - - mutex_lock(&at25->lock); - - *cp++ = instr; - - /* 8/16/24-bit address is written MSB first */ - switch (at25->addrlen) { - default: /* case 3 */ - *cp++ = msg_offset >> 16; - fallthrough; - case 2: - *cp++ = msg_offset >> 8; - fallthrough; - case 1: - case 0: /* can't happen: for better code generation */ - *cp++ = msg_offset >> 0; - } + struct spi_mem_op op; - spi_message_init(&m); - memset(t, 0, sizeof(t)); + segment = min(bytes_left, io_limit); - t[0].tx_buf = at25->command; - t[0].len = at25->addrlen + 1; - spi_message_add_tail(&t[0], &m); + op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(at25_instr(at25, AT25_READ, + msg_offset), 1), + SPI_MEM_OP_ADDR(at25->addrlen, msg_offset, 1), + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_IN(segment, bounce, 1)); - t[1].rx_buf = buf; - t[1].len = segment; - spi_message_add_tail(&t[1], &m); - - status = spi_sync(at25->spi, &m); + status = spi_mem_adjust_op_size(at25->spimem, &op); + if (status) + return status; + segment = op.data.nbytes; + mutex_lock(&at25->lock); + status = spi_mem_exec_op(at25->spimem, &op); mutex_unlock(&at25->lock); - if (status) return status; + memcpy(buf, bounce, segment); msg_offset += segment; buf += segment; bytes_left -= segment; } while (bytes_left > 0); - dev_dbg(&at25->spi->dev, "read %zu bytes at %d\n", + dev_dbg(&at25->spimem->spi->dev, "read %zu bytes at %d\n", count, offset); return 0; } -/* Read extra registers as ID or serial number */ +/* + * Read extra registers as ID or serial number + * + * Allow for the callers to provide @buf on stack (not necessary DMA-capable) + * by allocating a bounce buffer internally. + */ static int fm25_aux_read(struct at25_data *at25, u8 *buf, uint8_t command, int len) { + u8 *bounce __free(kfree) = kmalloc(len, GFP_KERNEL); + struct spi_mem_op op; int status; - struct spi_transfer t[2]; - struct spi_message m; - - spi_message_init(&m); - memset(t, 0, sizeof(t)); - t[0].tx_buf = at25->command; - t[0].len = 1; - spi_message_add_tail(&t[0], &m); - - t[1].rx_buf = buf; - t[1].len = len; - spi_message_add_tail(&t[1], &m); + if (!bounce) + return -ENOMEM; - mutex_lock(&at25->lock); + op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(command, 1), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_IN(len, bounce, 1)); - at25->command[0] = command; + status = spi_mem_exec_op(at25->spimem, &op); + dev_dbg(&at25->spimem->spi->dev, "read %d aux bytes --> %d\n", len, status); + if (status) + return status; - status = spi_sync(at25->spi, &m); - dev_dbg(&at25->spi->dev, "read %d aux bytes --> %d\n", len, status); + memcpy(buf, bounce, len); - mutex_unlock(&at25->lock); - return status; + return 0; } static ssize_t sernum_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -195,14 +186,47 @@ static struct attribute *sernum_attrs[] = { }; ATTRIBUTE_GROUPS(sernum); +/* + * Poll Read Status Register with timeout + * + * Return: + * 0, if the chip is ready + * [positive] Status Register value as-is, if the chip is busy + * [negative] error code in case of read failure + */ +static int at25_wait_ready(struct at25_data *at25) +{ + u8 *bounce __free(kfree) = kmalloc(1, GFP_KERNEL); + struct spi_mem_op op; + int status; + + if (!bounce) + return -ENOMEM; + + op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(AT25_RDSR, 1), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_IN(1, bounce, 1)); + + read_poll_timeout(spi_mem_exec_op, status, + status || !(bounce[0] & AT25_SR_nRDY), false, + USEC_PER_MSEC, USEC_PER_MSEC * EE_TIMEOUT, + at25->spimem, &op); + if (status < 0) + return status; + if (!(bounce[0] & AT25_SR_nRDY)) + return 0; + + return bounce[0]; +} + static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count) { + u8 *bounce __free(kfree) = kmalloc(min(count, io_limit), GFP_KERNEL); struct at25_data *at25 = priv; - size_t maxsz = spi_max_transfer_size(at25->spi); const char *buf = val; - int status = 0; - unsigned buf_size; - u8 *bounce; + unsigned int buf_size; + int status; if (unlikely(off >= at25->chip.byte_len)) return -EFBIG; @@ -211,11 +235,8 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count) if (unlikely(!count)) return -EINVAL; - /* Temp buffer starts with command and address */ buf_size = at25->chip.page_size; - if (buf_size > io_limit) - buf_size = io_limit; - bounce = kmalloc(buf_size + at25->addrlen + 1, GFP_KERNEL); + if (!bounce) return -ENOMEM; @@ -223,85 +244,64 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count) * For write, rollover is within the page ... so we write at * most one page, then manually roll over to the next page. */ - mutex_lock(&at25->lock); + guard(mutex)(&at25->lock); do { - unsigned long timeout, retries; - unsigned segment; - unsigned offset = off; - u8 *cp = bounce; - int sr; - u8 instr; - - *cp = AT25_WREN; - status = spi_write(at25->spi, cp, 1); - if (status < 0) { - dev_dbg(&at25->spi->dev, "WREN --> %d\n", status); - break; - } + struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(AT25_WREN, 1), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_NO_DATA); + unsigned int segment; - instr = AT25_WRITE; - if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR) - if (offset >= BIT(at25->addrlen * 8)) - instr |= AT25_INSTR_BIT3; - *cp++ = instr; - - /* 8/16/24-bit address is written MSB first */ - switch (at25->addrlen) { - default: /* case 3 */ - *cp++ = offset >> 16; - fallthrough; - case 2: - *cp++ = offset >> 8; - fallthrough; - case 1: - case 0: /* can't happen: for better code generation */ - *cp++ = offset >> 0; + status = spi_mem_exec_op(at25->spimem, &op); + if (status < 0) { + dev_dbg(&at25->spimem->spi->dev, "WREN --> %d\n", status); + return status; } /* Write as much of a page as we can */ - segment = buf_size - (offset % buf_size); + segment = buf_size - (off % buf_size); if (segment > count) segment = count; - if (segment > maxsz) - segment = maxsz; - memcpy(cp, buf, segment); - status = spi_write(at25->spi, bounce, - segment + at25->addrlen + 1); - dev_dbg(&at25->spi->dev, "write %u bytes at %u --> %d\n", - segment, offset, status); - if (status < 0) - break; + if (segment > io_limit) + segment = io_limit; + + op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(at25_instr(at25, AT25_WRITE, off), + 1), + SPI_MEM_OP_ADDR(at25->addrlen, off, 1), + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_OUT(segment, bounce, 1)); + + status = spi_mem_adjust_op_size(at25->spimem, &op); + if (status) + return status; + segment = op.data.nbytes; + + memcpy(bounce, buf, segment); + + status = spi_mem_exec_op(at25->spimem, &op); + dev_dbg(&at25->spimem->spi->dev, "write %u bytes at %u --> %d\n", + segment, off, status); + if (status) + return status; /* * REVISIT this should detect (or prevent) failed writes * to read-only sections of the EEPROM... */ - /* Wait for non-busy status */ - timeout = jiffies + msecs_to_jiffies(EE_TIMEOUT); - retries = 0; - do { - - sr = spi_w8r8(at25->spi, AT25_RDSR); - if (sr < 0 || (sr & AT25_SR_nRDY)) { - dev_dbg(&at25->spi->dev, - "rdsr --> %d (%02x)\n", sr, sr); - /* at HZ=100, this is sloooow */ - msleep(1); - continue; - } - if (!(sr & AT25_SR_nRDY)) - break; - } while (retries++ < 3 || time_before_eq(jiffies, timeout)); - - if ((sr < 0) || (sr & AT25_SR_nRDY)) { - dev_err(&at25->spi->dev, + status = at25_wait_ready(at25); + if (status < 0) { + dev_err_probe(&at25->spimem->spi->dev, status, + "Read Status Redister command failed\n"); + return status; + } + if (status) { + dev_dbg(&at25->spimem->spi->dev, + "Status %02x\n", status); + dev_err(&at25->spimem->spi->dev, "write %u bytes offset %u, timeout after %u msecs\n", - segment, offset, - jiffies_to_msecs(jiffies - - (timeout - EE_TIMEOUT))); - status = -ETIMEDOUT; - break; + segment, off, EE_TIMEOUT); + return -ETIMEDOUT; } off += segment; @@ -310,9 +310,6 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count) } while (count > 0); - mutex_unlock(&at25->lock); - - kfree(bounce); return status; } @@ -382,35 +379,56 @@ static int at25_fram_to_chip(struct device *dev, struct spi_eeprom *chip) struct at25_data *at25 = container_of(chip, struct at25_data, chip); u8 sernum[FM25_SN_LEN]; u8 id[FM25_ID_LEN]; + u32 val; int i; strscpy(chip->name, "fm25", sizeof(chip->name)); - /* Get ID of chip */ - fm25_aux_read(at25, id, FM25_RDID, FM25_ID_LEN); - if (id[6] != 0xc2) { - dev_err(dev, "Error: no Cypress FRAM (id %02x)\n", id[6]); - return -ENODEV; - } - /* Set size found in ID */ - if (id[7] < 0x21 || id[7] > 0x26) { - dev_err(dev, "Error: unsupported size (id %02x)\n", id[7]); - return -ENODEV; + if (!device_property_read_u32(dev, "size", &val)) { + chip->byte_len = val; + } else { + /* Get ID of chip */ + fm25_aux_read(at25, id, FM25_RDID, FM25_ID_LEN); + /* There are inside-out FRAM variations, detect them and reverse the ID bytes */ + if (id[6] == 0x7f && id[2] == 0xc2) + for (i = 0; i < ARRAY_SIZE(id) / 2; i++) { + u8 tmp = id[i]; + int j = ARRAY_SIZE(id) - i - 1; + + id[i] = id[j]; + id[j] = tmp; + } + if (id[6] != 0xc2) { + dev_err(dev, "Error: no Cypress FRAM with device ID (manufacturer ID bank 7: %02x)\n", id[6]); + return -ENODEV; + } + + switch (id[7]) { + case 0x21 ... 0x26: + chip->byte_len = BIT(id[7] - 0x21 + 4) * 1024; + break; + case 0x2a ... 0x30: + /* CY15B102QN ... CY15B116QN */ + chip->byte_len = BIT(((id[7] >> 1) & 0xf) + 13); + break; + default: + dev_err(dev, "Error: unsupported size (id %02x)\n", id[7]); + return -ENODEV; + } + + if (id[8]) { + fm25_aux_read(at25, sernum, FM25_RDSN, FM25_SN_LEN); + /* Swap byte order */ + for (i = 0; i < FM25_SN_LEN; i++) + at25->sernum[i] = sernum[FM25_SN_LEN - 1 - i]; + } } - chip->byte_len = BIT(id[7] - 0x21 + 4) * 1024; if (chip->byte_len > 64 * 1024) chip->flags |= EE_ADDR3; else chip->flags |= EE_ADDR2; - if (id[8]) { - fm25_aux_read(at25, sernum, FM25_RDSN, FM25_SN_LEN); - /* Swap byte order */ - for (i = 0; i < FM25_SN_LEN; i++) - at25->sernum[i] = sernum[FM25_SN_LEN - 1 - i]; - } - chip->page_size = PAGE_SIZE; return 0; } @@ -429,31 +447,33 @@ static const struct spi_device_id at25_spi_ids[] = { }; MODULE_DEVICE_TABLE(spi, at25_spi_ids); -static int at25_probe(struct spi_device *spi) +static int at25_probe(struct spi_mem *mem) { - struct at25_data *at25 = NULL; - int err; - int sr; + struct spi_device *spi = mem->spi; struct spi_eeprom *pdata; + struct at25_data *at25; bool is_fram; + int err; + + at25 = devm_kzalloc(&spi->dev, sizeof(*at25), GFP_KERNEL); + if (!at25) + return -ENOMEM; + + at25->spimem = mem; /* * Ping the chip ... the status register is pretty portable, - * unlike probing manufacturer IDs. We do expect that system - * firmware didn't write it in the past few milliseconds! + * unlike probing manufacturer IDs. */ - sr = spi_w8r8(spi, AT25_RDSR); - if (sr < 0 || sr & AT25_SR_nRDY) { - dev_dbg(&spi->dev, "rdsr --> %d (%02x)\n", sr, sr); + err = at25_wait_ready(at25); + if (err < 0) + return dev_err_probe(&spi->dev, err, "Read Status Register command failed\n"); + if (err) { + dev_err(&spi->dev, "Not ready (%02x)\n", err); return -ENXIO; } - at25 = devm_kzalloc(&spi->dev, sizeof(*at25), GFP_KERNEL); - if (!at25) - return -ENOMEM; - mutex_init(&at25->lock); - at25->spi = spi; spi_set_drvdata(spi, at25); is_fram = fwnode_device_is_compatible(dev_fwnode(&spi->dev), "cypress,fm25"); @@ -514,19 +534,20 @@ static int at25_probe(struct spi_device *spi) /*-------------------------------------------------------------------------*/ -static struct spi_driver at25_driver = { - .driver = { - .name = "at25", - .of_match_table = at25_of_match, - .dev_groups = sernum_groups, +static struct spi_mem_driver at25_driver = { + .spidrv = { + .driver = { + .name = "at25", + .of_match_table = at25_of_match, + .dev_groups = sernum_groups, + }, + .id_table = at25_spi_ids, }, .probe = at25_probe, - .id_table = at25_spi_ids, }; -module_spi_driver(at25_driver); +module_spi_mem_driver(at25_driver); MODULE_DESCRIPTION("Driver for most SPI EEPROMs"); MODULE_AUTHOR("David Brownell"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("spi:at25"); diff --git a/drivers/misc/eeprom/digsy_mtc_eeprom.c b/drivers/misc/eeprom/digsy_mtc_eeprom.c index f1f766b70965..ee58f7ce5bfa 100644 --- a/drivers/misc/eeprom/digsy_mtc_eeprom.c +++ b/drivers/misc/eeprom/digsy_mtc_eeprom.c @@ -14,13 +14,12 @@ * and delete this driver. */ -#include <linux/gpio.h> #include <linux/gpio/machine.h> #include <linux/init.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/spi/spi.h> #include <linux/spi/spi_gpio.h> -#include <linux/eeprom_93xx46.h> #define GPIO_EEPROM_CLK 216 #define GPIO_EEPROM_CS 210 @@ -29,22 +28,13 @@ #define GPIO_EEPROM_OE 255 #define EE_SPI_BUS_NUM 1 -static void digsy_mtc_op_prepare(void *p) -{ - /* enable */ - gpio_set_value(GPIO_EEPROM_OE, 0); -} - -static void digsy_mtc_op_finish(void *p) -{ - /* disable */ - gpio_set_value(GPIO_EEPROM_OE, 1); -} +static const struct property_entry digsy_mtc_spi_properties[] = { + PROPERTY_ENTRY_U32("data-size", 8), + { } +}; -struct eeprom_93xx46_platform_data digsy_mtc_eeprom_data = { - .flags = EE_ADDR8, - .prepare = digsy_mtc_op_prepare, - .finish = digsy_mtc_op_finish, +static const struct software_node digsy_mtc_spi_node = { + .properties = digsy_mtc_spi_properties, }; static struct spi_gpio_platform_data eeprom_spi_gpio_data = { @@ -60,7 +50,7 @@ static struct platform_device digsy_mtc_eeprom = { }; static struct gpiod_lookup_table eeprom_spi_gpiod_table = { - .dev_id = "spi_gpio", + .dev_id = "spi_gpio.1", .table = { GPIO_LOOKUP("gpio@b00", GPIO_EEPROM_CLK, "sck", GPIO_ACTIVE_HIGH), @@ -70,18 +60,19 @@ static struct gpiod_lookup_table eeprom_spi_gpiod_table = { "miso", GPIO_ACTIVE_HIGH), GPIO_LOOKUP("gpio@b00", GPIO_EEPROM_CS, "cs", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("gpio@b00", GPIO_EEPROM_OE, + "select", GPIO_ACTIVE_LOW), { }, }, }; static struct spi_board_info digsy_mtc_eeprom_info[] __initdata = { { - .modalias = "93xx46", + .modalias = "eeprom-93xx46", .max_speed_hz = 1000000, .bus_num = EE_SPI_BUS_NUM, .chip_select = 0, .mode = SPI_MODE_0, - .platform_data = &digsy_mtc_eeprom_data, }, }; @@ -89,15 +80,18 @@ static int __init digsy_mtc_eeprom_devices_init(void) { int ret; - ret = gpio_request_one(GPIO_EEPROM_OE, GPIOF_OUT_INIT_HIGH, - "93xx46 EEPROMs OE"); - if (ret) { - pr_err("can't request gpio %d\n", GPIO_EEPROM_OE); - return ret; - } gpiod_add_lookup_table(&eeprom_spi_gpiod_table); spi_register_board_info(digsy_mtc_eeprom_info, ARRAY_SIZE(digsy_mtc_eeprom_info)); - return platform_device_register(&digsy_mtc_eeprom); + + ret = device_add_software_node(&digsy_mtc_eeprom.dev, &digsy_mtc_spi_node); + if (ret) + return ret; + + ret = platform_device_register(&digsy_mtc_eeprom); + if (ret) + device_remove_software_node(&digsy_mtc_eeprom.dev); + + return ret; } device_initcall(digsy_mtc_eeprom_devices_init); diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c index a1acd77130f2..e13f9fdd9d7b 100644 --- a/drivers/misc/eeprom/ee1004.c +++ b/drivers/misc/eeprom/ee1004.c @@ -9,12 +9,14 @@ * Copyright (C) 2008 Wolfram Sang, Pengutronix */ +#include <linux/device.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/nvmem-provider.h> /* * DDR4 memory modules use special EEPROMs following the Jedec EE1004 @@ -31,6 +33,7 @@ * over performance. */ +#define EE1004_MAX_BUSSES 8 #define EE1004_ADDR_SET_PAGE 0x36 #define EE1004_NUM_PAGES 2 #define EE1004_PAGE_SIZE 256 @@ -42,23 +45,45 @@ * from page selection to end of read. */ static DEFINE_MUTEX(ee1004_bus_lock); -static struct i2c_client *ee1004_set_page[EE1004_NUM_PAGES]; -static unsigned int ee1004_dev_count; -static int ee1004_current_page; + +static struct ee1004_bus_data { + struct i2c_adapter *adap; + struct i2c_client *set_page[EE1004_NUM_PAGES]; + unsigned int dev_count; + int current_page; +} ee1004_bus_data[EE1004_MAX_BUSSES]; static const struct i2c_device_id ee1004_ids[] = { - { "ee1004", 0 }, + { "ee1004" }, { } }; MODULE_DEVICE_TABLE(i2c, ee1004_ids); /*-------------------------------------------------------------------------*/ -static int ee1004_get_current_page(void) +static struct ee1004_bus_data *ee1004_get_bus_data(struct i2c_adapter *adap) +{ + int i; + + for (i = 0; i < EE1004_MAX_BUSSES; i++) + if (ee1004_bus_data[i].adap == adap) + return ee1004_bus_data + i; + + /* If not existent yet, create new entry */ + for (i = 0; i < EE1004_MAX_BUSSES; i++) + if (!ee1004_bus_data[i].adap) { + ee1004_bus_data[i].adap = adap; + return ee1004_bus_data + i; + } + + return NULL; +} + +static int ee1004_get_current_page(struct ee1004_bus_data *bd) { int err; - err = i2c_smbus_read_byte(ee1004_set_page[0]); + err = i2c_smbus_read_byte(bd->set_page[0]); if (err == -ENXIO) { /* Nack means page 1 is selected */ return 1; @@ -72,28 +97,29 @@ static int ee1004_get_current_page(void) return 0; } -static int ee1004_set_current_page(struct device *dev, int page) +static int ee1004_set_current_page(struct i2c_client *client, int page) { + struct ee1004_bus_data *bd = i2c_get_clientdata(client); int ret; - if (page == ee1004_current_page) + if (page == bd->current_page) return 0; /* Data is ignored */ - ret = i2c_smbus_write_byte(ee1004_set_page[page], 0x00); + ret = i2c_smbus_write_byte(bd->set_page[page], 0x00); /* * Don't give up just yet. Some memory modules will select the page * but not ack the command. Check which page is selected now. */ - if (ret == -ENXIO && ee1004_get_current_page() == page) + if (ret == -ENXIO && ee1004_get_current_page(bd) == page) ret = 0; if (ret < 0) { - dev_err(dev, "Failed to select page %d (%d)\n", page, ret); + dev_err(&client->dev, "Failed to select page %d (%d)\n", page, ret); return ret; } - dev_dbg(dev, "Selected page %d\n", page); - ee1004_current_page = page; + dev_dbg(&client->dev, "Selected page %d\n", page); + bd->current_page = page; return 0; } @@ -106,7 +132,7 @@ static ssize_t ee1004_eeprom_read(struct i2c_client *client, char *buf, page = offset >> EE1004_PAGE_SHIFT; offset &= (1 << EE1004_PAGE_SHIFT) - 1; - status = ee1004_set_current_page(&client->dev, page); + status = ee1004_set_current_page(client, page); if (status) return status; @@ -120,13 +146,17 @@ static ssize_t ee1004_eeprom_read(struct i2c_client *client, char *buf, return i2c_smbus_read_i2c_block_data_or_emulated(client, offset, count, buf); } -static ssize_t eeprom_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t off, size_t count) +static int ee1004_read(void *priv, unsigned int off, void *val, size_t count) { - struct i2c_client *client = kobj_to_i2c_client(kobj); - size_t requested = count; - int ret = 0; + struct i2c_client *client = priv; + char *buf = val; + int ret; + + if (unlikely(!count)) + return count; + + if (off + count > EE1004_EEPROM_SIZE) + return -EINVAL; /* * Read data from chip, protecting against concurrent access to @@ -136,95 +166,174 @@ static ssize_t eeprom_read(struct file *filp, struct kobject *kobj, while (count) { ret = ee1004_eeprom_read(client, buf, off, count); - if (ret < 0) - goto out; + if (ret < 0) { + mutex_unlock(&ee1004_bus_lock); + return ret; + } buf += ret; off += ret; count -= ret; } -out: + mutex_unlock(&ee1004_bus_lock); - return ret < 0 ? ret : requested; + return 0; } -static BIN_ATTR_RO(eeprom, EE1004_EEPROM_SIZE); +static void ee1004_probe_temp_sensor(struct i2c_client *client) +{ + struct i2c_board_info info = { .type = "jc42" }; + unsigned short addr = 0x18 | (client->addr & 7); + unsigned short addr_list[] = { addr, I2C_CLIENT_END }; + u8 data[2]; + int ret; -static struct bin_attribute *ee1004_attrs[] = { - &bin_attr_eeprom, - NULL -}; + /* byte 14, bit 7 is set if temp sensor is present */ + ret = ee1004_eeprom_read(client, data, 14, 1); + if (ret != 1) + return; + + if (!(data[0] & BIT(7))) { + /* + * If the SPD data suggests that there is no temperature + * sensor, it may still be there for SPD revision 1.0. + * See SPD Annex L, Revision 1 and 2, for details. + * Check DIMM type and SPD revision; if it is a DDR4 + * with SPD revision 1.0, check the thermal sensor address + * and instantiate the jc42 driver if a chip is found at + * that address. + * It is not necessary to check if there is a chip at the + * temperature sensor address since i2c_new_scanned_device() + * will do that and return silently if no chip is found. + */ + ret = ee1004_eeprom_read(client, data, 1, 2); + if (ret != 2 || data[0] != 0x10 || data[1] != 0x0c) + return; + } + i2c_new_scanned_device(client->adapter, &info, addr_list, NULL); +} -BIN_ATTRIBUTE_GROUPS(ee1004); +static void ee1004_cleanup(int idx, struct ee1004_bus_data *bd) +{ + if (--bd->dev_count == 0) { + while (--idx >= 0) + i2c_unregister_device(bd->set_page[idx]); + memset(bd, 0, sizeof(struct ee1004_bus_data)); + } +} -static void ee1004_cleanup(int idx) +static void ee1004_cleanup_bus_data(void *data) { - if (--ee1004_dev_count == 0) - while (--idx >= 0) { - i2c_unregister_device(ee1004_set_page[idx]); - ee1004_set_page[idx] = NULL; - } + struct ee1004_bus_data *bd = data; + + /* Remove page select clients if this is the last device */ + mutex_lock(&ee1004_bus_lock); + ee1004_cleanup(EE1004_NUM_PAGES, bd); + mutex_unlock(&ee1004_bus_lock); } -static int ee1004_probe(struct i2c_client *client) +static int ee1004_init_bus_data(struct i2c_client *client) { + struct ee1004_bus_data *bd; int err, cnr = 0; - /* Make sure we can operate on this adapter */ - if (!i2c_check_functionality(client->adapter, - I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_READ_I2C_BLOCK) && - !i2c_check_functionality(client->adapter, - I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_READ_BYTE_DATA)) - return -EPFNOSUPPORT; + bd = ee1004_get_bus_data(client->adapter); + if (!bd) + return dev_err_probe(&client->dev, -ENOSPC, "Only %d busses supported", + EE1004_MAX_BUSSES); - /* Use 2 dummy devices for page select command */ - mutex_lock(&ee1004_bus_lock); - if (++ee1004_dev_count == 1) { + i2c_set_clientdata(client, bd); + + if (++bd->dev_count == 1) { + /* Use 2 dummy devices for page select command */ for (cnr = 0; cnr < EE1004_NUM_PAGES; cnr++) { struct i2c_client *cl; cl = i2c_new_dummy_device(client->adapter, EE1004_ADDR_SET_PAGE + cnr); if (IS_ERR(cl)) { err = PTR_ERR(cl); - goto err_clients; + goto err_out; } - ee1004_set_page[cnr] = cl; + + bd->set_page[cnr] = cl; } /* Remember current page to avoid unneeded page select */ - err = ee1004_get_current_page(); + err = ee1004_get_current_page(bd); if (err < 0) - goto err_clients; + goto err_out; + dev_dbg(&client->dev, "Currently selected page: %d\n", err); - ee1004_current_page = err; - } else if (client->adapter != ee1004_set_page[0]->adapter) { - dev_err(&client->dev, - "Driver only supports devices on a single I2C bus\n"); - err = -EOPNOTSUPP; - goto err_clients; + bd->current_page = err; } - mutex_unlock(&ee1004_bus_lock); - - dev_info(&client->dev, - "%u byte EE1004-compliant SPD EEPROM, read-only\n", - EE1004_EEPROM_SIZE); return 0; - err_clients: - ee1004_cleanup(cnr); - mutex_unlock(&ee1004_bus_lock); +err_out: + ee1004_cleanup(cnr, bd); return err; } -static void ee1004_remove(struct i2c_client *client) +static int ee1004_probe(struct i2c_client *client) { - /* Remove page select clients if this is the last device */ + struct nvmem_config config = { + .dev = &client->dev, + .name = dev_name(&client->dev), + .id = NVMEM_DEVID_NONE, + .owner = THIS_MODULE, + .type = NVMEM_TYPE_EEPROM, + .read_only = true, + .root_only = false, + .reg_read = ee1004_read, + .size = EE1004_EEPROM_SIZE, + .word_size = 1, + .stride = 1, + .priv = client, + .compat = true, + .base_dev = &client->dev, + }; + struct nvmem_device *ndev; + int err; + + /* Make sure we can operate on this adapter */ + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_READ_I2C_BLOCK) && + !i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_READ_BYTE_DATA)) + return -EPFNOSUPPORT; + + err = i2c_smbus_read_byte(client); + if (err < 0) + return -ENODEV; + mutex_lock(&ee1004_bus_lock); - ee1004_cleanup(EE1004_NUM_PAGES); + + err = ee1004_init_bus_data(client); + if (err < 0) { + mutex_unlock(&ee1004_bus_lock); + return err; + } + + ee1004_probe_temp_sensor(client); + mutex_unlock(&ee1004_bus_lock); + + err = devm_add_action_or_reset(&client->dev, ee1004_cleanup_bus_data, + i2c_get_clientdata(client)); + if (err < 0) + return err; + + ndev = devm_nvmem_register(&client->dev, &config); + if (IS_ERR(ndev)) + return PTR_ERR(ndev); + + dev_info(&client->dev, + "%u byte EE1004-compliant SPD EEPROM, read-only\n", + EE1004_EEPROM_SIZE); + + return 0; } /*-------------------------------------------------------------------------*/ @@ -232,10 +341,8 @@ static void ee1004_remove(struct i2c_client *client) static struct i2c_driver ee1004_driver = { .driver = { .name = "ee1004", - .dev_groups = ee1004_groups, }, .probe = ee1004_probe, - .remove = ee1004_remove, .id_table = ee1004_ids, }; module_i2c_driver(ee1004_driver); diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c deleted file mode 100644 index ccb7c2f7ee2f..000000000000 --- a/drivers/misc/eeprom/eeprom.c +++ /dev/null @@ -1,214 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl> and - * Philip Edelbrock <phil@netroedge.com> - * Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com> - * Copyright (C) 2003 IBM Corp. - * Copyright (C) 2004 Jean Delvare <jdelvare@suse.de> - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/device.h> -#include <linux/capability.h> -#include <linux/jiffies.h> -#include <linux/i2c.h> -#include <linux/mutex.h> - -/* Addresses to scan */ -static const unsigned short normal_i2c[] = { 0x50, 0x51, 0x52, 0x53, 0x54, - 0x55, 0x56, 0x57, I2C_CLIENT_END }; - - -/* Size of EEPROM in bytes */ -#define EEPROM_SIZE 256 - -/* possible types of eeprom devices */ -enum eeprom_nature { - UNKNOWN, - VAIO, -}; - -/* Each client has this additional data */ -struct eeprom_data { - struct mutex update_lock; - u8 valid; /* bitfield, bit!=0 if slice is valid */ - unsigned long last_updated[8]; /* In jiffies, 8 slices */ - u8 data[EEPROM_SIZE]; /* Register values */ - enum eeprom_nature nature; -}; - - -static void eeprom_update_client(struct i2c_client *client, u8 slice) -{ - struct eeprom_data *data = i2c_get_clientdata(client); - int i; - - mutex_lock(&data->update_lock); - - if (!(data->valid & (1 << slice)) || - time_after(jiffies, data->last_updated[slice] + 300 * HZ)) { - dev_dbg(&client->dev, "Starting eeprom update, slice %u\n", slice); - - if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_I2C_BLOCK)) { - for (i = slice << 5; i < (slice + 1) << 5; i += 32) - if (i2c_smbus_read_i2c_block_data(client, i, - 32, data->data + i) - != 32) - goto exit; - } else { - for (i = slice << 5; i < (slice + 1) << 5; i += 2) { - int word = i2c_smbus_read_word_data(client, i); - if (word < 0) - goto exit; - data->data[i] = word & 0xff; - data->data[i + 1] = word >> 8; - } - } - data->last_updated[slice] = jiffies; - data->valid |= (1 << slice); - } -exit: - mutex_unlock(&data->update_lock); -} - -static ssize_t eeprom_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t off, size_t count) -{ - struct i2c_client *client = kobj_to_i2c_client(kobj); - struct eeprom_data *data = i2c_get_clientdata(client); - u8 slice; - - /* Only refresh slices which contain requested bytes */ - for (slice = off >> 5; slice <= (off + count - 1) >> 5; slice++) - eeprom_update_client(client, slice); - - /* Hide Vaio private settings to regular users: - - BIOS passwords: bytes 0x00 to 0x0f - - UUID: bytes 0x10 to 0x1f - - Serial number: 0xc0 to 0xdf */ - if (data->nature == VAIO && !capable(CAP_SYS_ADMIN)) { - int i; - - for (i = 0; i < count; i++) { - if ((off + i <= 0x1f) || - (off + i >= 0xc0 && off + i <= 0xdf)) - buf[i] = 0; - else - buf[i] = data->data[off + i]; - } - } else { - memcpy(buf, &data->data[off], count); - } - - return count; -} - -static const struct bin_attribute eeprom_attr = { - .attr = { - .name = "eeprom", - .mode = S_IRUGO, - }, - .size = EEPROM_SIZE, - .read = eeprom_read, -}; - -/* Return 0 if detection is successful, -ENODEV otherwise */ -static int eeprom_detect(struct i2c_client *client, struct i2c_board_info *info) -{ - struct i2c_adapter *adapter = client->adapter; - - /* EDID EEPROMs are often 24C00 EEPROMs, which answer to all - addresses 0x50-0x57, but we only care about 0x50. So decline - attaching to addresses >= 0x51 on DDC buses */ - if (!(adapter->class & I2C_CLASS_SPD) && client->addr >= 0x51) - return -ENODEV; - - /* There are four ways we can read the EEPROM data: - (1) I2C block reads (faster, but unsupported by most adapters) - (2) Word reads (128% overhead) - (3) Consecutive byte reads (88% overhead, unsafe) - (4) Regular byte data reads (265% overhead) - The third and fourth methods are not implemented by this driver - because all known adapters support one of the first two. */ - if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_WORD_DATA) - && !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_I2C_BLOCK)) - return -ENODEV; - - strscpy(info->type, "eeprom", I2C_NAME_SIZE); - - return 0; -} - -static int eeprom_probe(struct i2c_client *client) -{ - struct i2c_adapter *adapter = client->adapter; - struct eeprom_data *data; - - data = devm_kzalloc(&client->dev, sizeof(struct eeprom_data), - GFP_KERNEL); - if (!data) - return -ENOMEM; - - memset(data->data, 0xff, EEPROM_SIZE); - i2c_set_clientdata(client, data); - mutex_init(&data->update_lock); - data->nature = UNKNOWN; - - /* Detect the Vaio nature of EEPROMs. - We use the "PCG-" or "VGN-" prefix as the signature. */ - if (client->addr == 0x57 - && i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_BYTE_DATA)) { - char name[4]; - - name[0] = i2c_smbus_read_byte_data(client, 0x80); - name[1] = i2c_smbus_read_byte_data(client, 0x81); - name[2] = i2c_smbus_read_byte_data(client, 0x82); - name[3] = i2c_smbus_read_byte_data(client, 0x83); - - if (!memcmp(name, "PCG-", 4) || !memcmp(name, "VGN-", 4)) { - dev_info(&client->dev, "Vaio EEPROM detected, " - "enabling privacy protection\n"); - data->nature = VAIO; - } - } - - /* Let the users know they are using deprecated driver */ - dev_notice(&client->dev, - "eeprom driver is deprecated, please use at24 instead\n"); - - /* create the sysfs eeprom file */ - return sysfs_create_bin_file(&client->dev.kobj, &eeprom_attr); -} - -static void eeprom_remove(struct i2c_client *client) -{ - sysfs_remove_bin_file(&client->dev.kobj, &eeprom_attr); -} - -static const struct i2c_device_id eeprom_id[] = { - { "eeprom", 0 }, - { } -}; - -static struct i2c_driver eeprom_driver = { - .driver = { - .name = "eeprom", - }, - .probe = eeprom_probe, - .remove = eeprom_remove, - .id_table = eeprom_id, - - .class = I2C_CLASS_DDC | I2C_CLASS_SPD, - .detect = eeprom_detect, - .address_list = normal_i2c, -}; - -module_i2c_driver(eeprom_driver); - -MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and " - "Philip Edelbrock <phil@netroedge.com> and " - "Greg Kroah-Hartman <greg@kroah.com>"); -MODULE_DESCRIPTION("I2C EEPROM driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/misc/eeprom/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c index 9627294fe3e9..e6f0e0fc1ca2 100644 --- a/drivers/misc/eeprom/eeprom_93cx6.c +++ b/drivers/misc/eeprom/eeprom_93cx6.c @@ -8,6 +8,7 @@ * Supported chipsets: 93c46 & 93c66. */ +#include <linux/bits.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> @@ -102,7 +103,7 @@ static void eeprom_93cx6_write_bits(struct eeprom_93cx6 *eeprom, /* * Check if this bit needs to be set. */ - eeprom->reg_data_in = !!(data & (1 << (i - 1))); + eeprom->reg_data_in = !!(data & BIT(i - 1)); /* * Write the bit to the eeprom register. @@ -152,7 +153,7 @@ static void eeprom_93cx6_read_bits(struct eeprom_93cx6 *eeprom, * Read if the bit has been set. */ if (eeprom->reg_data_out) - buf |= (1 << (i - 1)); + buf |= BIT(i - 1); eeprom_93cx6_pulse_low(eeprom); } @@ -186,6 +187,11 @@ void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom, const u8 word, eeprom_93cx6_write_bits(eeprom, command, PCI_EEPROM_WIDTH_OPCODE + eeprom->width); + if (has_quirk_extra_read_cycle(eeprom)) { + eeprom_93cx6_pulse_high(eeprom); + eeprom_93cx6_pulse_low(eeprom); + } + /* * Read the requested 16 bits. */ @@ -252,6 +258,11 @@ void eeprom_93cx6_readb(struct eeprom_93cx6 *eeprom, const u8 byte, eeprom_93cx6_write_bits(eeprom, command, PCI_EEPROM_WIDTH_OPCODE + eeprom->width + 1); + if (has_quirk_extra_read_cycle(eeprom)) { + eeprom_93cx6_pulse_high(eeprom); + eeprom_93cx6_pulse_low(eeprom); + } + /* * Read the requested 8 bits. */ diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c index b630625b3024..9cae6f530679 100644 --- a/drivers/misc/eeprom/eeprom_93xx46.c +++ b/drivers/misc/eeprom/eeprom_93xx46.c @@ -5,20 +5,42 @@ * (C) 2011 DENX Software Engineering, Anatolij Gustschin <agust@denx.de> */ +#include <linux/array_size.h> +#include <linux/bits.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/gpio/consumer.h> -#include <linux/kernel.h> +#include <linux/kstrtox.h> #include <linux/log2.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/mutex.h> -#include <linux/of.h> -#include <linux/of_device.h> -#include <linux/of_gpio.h> +#include <linux/property.h> #include <linux/slab.h> #include <linux/spi/spi.h> +#include <linux/string_choices.h> + #include <linux/nvmem-provider.h> -#include <linux/eeprom_93xx46.h> + +struct eeprom_93xx46_platform_data { + unsigned char flags; +#define EE_ADDR8 0x01 /* 8 bit addr. cfg */ +#define EE_ADDR16 0x02 /* 16 bit addr. cfg */ +#define EE_READONLY 0x08 /* forbid writing */ +#define EE_SIZE1K 0x10 /* 1 kb of data, that is a 93xx46 */ +#define EE_SIZE2K 0x20 /* 2 kb of data, that is a 93xx56 */ +#define EE_SIZE4K 0x40 /* 4 kb of data, that is a 93xx66 */ + + unsigned int quirks; +/* Single word read transfers only; no sequential read. */ +#define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0) +/* Instructions such as EWEN are (addrlen + 2) in length. */ +#define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1) +/* Add extra cycle after address during a read */ +#define EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE BIT(2) + + struct gpio_desc *select; +}; #define OP_START 0x4 #define OP_WRITE (OP_START | 0x1) @@ -97,15 +119,14 @@ static int eeprom_93xx46_read(void *priv, unsigned int off, mutex_lock(&edev->lock); - if (edev->pdata->prepare) - edev->pdata->prepare(edev); + gpiod_set_value_cansleep(edev->pdata->select, 1); /* The opcode in front of the address is three bits. */ bits = edev->addrlen + 3; while (count) { struct spi_message m; - struct spi_transfer t[2] = { { 0 } }; + struct spi_transfer t[2] = {}; u16 cmd_addr = OP_READ << edev->addrlen; size_t nbytes = count; @@ -127,25 +148,23 @@ static int eeprom_93xx46_read(void *priv, unsigned int off, bits += 1; } - spi_message_init(&m); - t[0].tx_buf = (char *)&cmd_addr; t[0].len = 2; t[0].bits_per_word = bits; - spi_message_add_tail(&t[0], &m); t[1].rx_buf = buf; t[1].len = count; t[1].bits_per_word = 8; - spi_message_add_tail(&t[1], &m); + + spi_message_init_with_transfers(&m, t, ARRAY_SIZE(t)); err = spi_sync(edev->spi, &m); /* have to wait at least Tcsl ns */ ndelay(250); if (err) { - dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n", - nbytes, (int)off, err); + dev_err(&edev->spi->dev, "read %zu bytes at %u: err. %d\n", + nbytes, off, err); break; } @@ -154,8 +173,7 @@ static int eeprom_93xx46_read(void *priv, unsigned int off, count -= nbytes; } - if (edev->pdata->finish) - edev->pdata->finish(edev); + gpiod_set_value_cansleep(edev->pdata->select, 0); mutex_unlock(&edev->lock); @@ -165,7 +183,7 @@ static int eeprom_93xx46_read(void *priv, unsigned int off, static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on) { struct spi_message m; - struct spi_transfer t; + struct spi_transfer t = {}; int bits, ret; u16 cmd_addr; @@ -183,31 +201,27 @@ static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on) bits += 2; } - dev_dbg(&edev->spi->dev, "ew%s cmd 0x%04x, %d bits\n", - is_on ? "en" : "ds", cmd_addr, bits); - - spi_message_init(&m); - memset(&t, 0, sizeof(t)); + dev_dbg(&edev->spi->dev, "ew %s cmd 0x%04x, %d bits\n", + str_enable_disable(is_on), cmd_addr, bits); t.tx_buf = &cmd_addr; t.len = 2; t.bits_per_word = bits; - spi_message_add_tail(&t, &m); + + spi_message_init_with_transfers(&m, &t, 1); mutex_lock(&edev->lock); - if (edev->pdata->prepare) - edev->pdata->prepare(edev); + gpiod_set_value_cansleep(edev->pdata->select, 1); ret = spi_sync(edev->spi, &m); /* have to wait at least Tcsl ns */ ndelay(250); if (ret) - dev_err(&edev->spi->dev, "erase/write %sable error %d\n", - is_on ? "en" : "dis", ret); + dev_err(&edev->spi->dev, "erase/write %s error %d\n", + str_enable_disable(is_on), ret); - if (edev->pdata->finish) - edev->pdata->finish(edev); + gpiod_set_value_cansleep(edev->pdata->select, 0); mutex_unlock(&edev->lock); return ret; @@ -215,10 +229,10 @@ static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on) static ssize_t eeprom_93xx46_write_word(struct eeprom_93xx46_dev *edev, - const char *buf, unsigned off) + const char *buf, unsigned int off) { struct spi_message m; - struct spi_transfer t[2]; + struct spi_transfer t[2] = {}; int bits, data_len, ret; u16 cmd_addr; @@ -240,18 +254,15 @@ eeprom_93xx46_write_word(struct eeprom_93xx46_dev *edev, dev_dbg(&edev->spi->dev, "write cmd 0x%x\n", cmd_addr); - spi_message_init(&m); - memset(t, 0, sizeof(t)); - t[0].tx_buf = (char *)&cmd_addr; t[0].len = 2; t[0].bits_per_word = bits; - spi_message_add_tail(&t[0], &m); t[1].tx_buf = buf; t[1].len = data_len; t[1].bits_per_word = 8; - spi_message_add_tail(&t[1], &m); + + spi_message_init_with_transfers(&m, t, ARRAY_SIZE(t)); ret = spi_sync(edev->spi, &m); /* have to wait program cycle time Twc ms */ @@ -264,7 +275,8 @@ static int eeprom_93xx46_write(void *priv, unsigned int off, { struct eeprom_93xx46_dev *edev = priv; char *buf = val; - int i, ret, step = 1; + int ret, step = 1; + unsigned int i; if (unlikely(off >= edev->size)) return -EFBIG; @@ -286,20 +298,17 @@ static int eeprom_93xx46_write(void *priv, unsigned int off, mutex_lock(&edev->lock); - if (edev->pdata->prepare) - edev->pdata->prepare(edev); + gpiod_set_value_cansleep(edev->pdata->select, 1); for (i = 0; i < count; i += step) { ret = eeprom_93xx46_write_word(edev, &buf[i], off + i); if (ret) { - dev_err(&edev->spi->dev, "write failed at %d: %d\n", - (int)off + i, ret); + dev_err(&edev->spi->dev, "write failed at %u: %d\n", off + i, ret); break; } } - if (edev->pdata->finish) - edev->pdata->finish(edev); + gpiod_set_value_cansleep(edev->pdata->select, 0); mutex_unlock(&edev->lock); @@ -310,9 +319,8 @@ static int eeprom_93xx46_write(void *priv, unsigned int off, static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev) { - struct eeprom_93xx46_platform_data *pd = edev->pdata; struct spi_message m; - struct spi_transfer t; + struct spi_transfer t = {}; int bits, ret; u16 cmd_addr; @@ -332,18 +340,15 @@ static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev) dev_dbg(&edev->spi->dev, "eral cmd 0x%04x, %d bits\n", cmd_addr, bits); - spi_message_init(&m); - memset(&t, 0, sizeof(t)); - t.tx_buf = &cmd_addr; t.len = 2; t.bits_per_word = bits; - spi_message_add_tail(&t, &m); + + spi_message_init_with_transfers(&m, &t, 1); mutex_lock(&edev->lock); - if (edev->pdata->prepare) - edev->pdata->prepare(edev); + gpiod_set_value_cansleep(edev->pdata->select, 1); ret = spi_sync(edev->spi, &m); if (ret) @@ -351,21 +356,23 @@ static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev) /* have to wait erase cycle time Tec ms */ mdelay(6); - if (pd->finish) - pd->finish(edev); + gpiod_set_value_cansleep(edev->pdata->select, 0); mutex_unlock(&edev->lock); return ret; } -static ssize_t eeprom_93xx46_store_erase(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t erase_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct eeprom_93xx46_dev *edev = dev_get_drvdata(dev); - int erase = 0, ret; + bool erase; + int ret; + + ret = kstrtobool(buf, &erase); + if (ret) + return ret; - sscanf(buf, "%d", &erase); if (erase) { ret = eeprom_93xx46_ew(edev, 1); if (ret) @@ -379,21 +386,7 @@ static ssize_t eeprom_93xx46_store_erase(struct device *dev, } return count; } -static DEVICE_ATTR(erase, S_IWUSR, NULL, eeprom_93xx46_store_erase); - -static void select_assert(void *context) -{ - struct eeprom_93xx46_dev *edev = context; - - gpiod_set_value_cansleep(edev->pdata->select, 1); -} - -static void select_deassert(void *context) -{ - struct eeprom_93xx46_dev *edev = context; - - gpiod_set_value_cansleep(edev->pdata->select, 0); -} +static DEVICE_ATTR_WO(erase); static const struct of_device_id eeprom_93xx46_of_table[] = { { .compatible = "eeprom-93xx46", .data = &at93c46_data, }, @@ -423,22 +416,20 @@ static const struct spi_device_id eeprom_93xx46_spi_ids[] = { }; MODULE_DEVICE_TABLE(spi, eeprom_93xx46_spi_ids); -static int eeprom_93xx46_probe_dt(struct spi_device *spi) +static int eeprom_93xx46_probe_fw(struct device *dev) { - const struct of_device_id *of_id = - of_match_device(eeprom_93xx46_of_table, &spi->dev); - struct device_node *np = spi->dev.of_node; + const struct eeprom_93xx46_devtype_data *data; struct eeprom_93xx46_platform_data *pd; u32 tmp; int ret; - pd = devm_kzalloc(&spi->dev, sizeof(*pd), GFP_KERNEL); + pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); if (!pd) return -ENOMEM; - ret = of_property_read_u32(np, "data-size", &tmp); + ret = device_property_read_u32(dev, "data-size", &tmp); if (ret < 0) { - dev_err(&spi->dev, "data-size property not found\n"); + dev_err(dev, "data-size property not found\n"); return ret; } @@ -447,30 +438,25 @@ static int eeprom_93xx46_probe_dt(struct spi_device *spi) } else if (tmp == 16) { pd->flags |= EE_ADDR16; } else { - dev_err(&spi->dev, "invalid data-size (%d)\n", tmp); + dev_err(dev, "invalid data-size (%d)\n", tmp); return -EINVAL; } - if (of_property_read_bool(np, "read-only")) + if (device_property_read_bool(dev, "read-only")) pd->flags |= EE_READONLY; - pd->select = devm_gpiod_get_optional(&spi->dev, "select", - GPIOD_OUT_LOW); + pd->select = devm_gpiod_get_optional(dev, "select", GPIOD_OUT_LOW); if (IS_ERR(pd->select)) return PTR_ERR(pd->select); + gpiod_set_consumer_name(pd->select, "93xx46 EEPROMs OE"); - pd->prepare = select_assert; - pd->finish = select_deassert; - gpiod_direction_output(pd->select, 0); - - if (of_id->data) { - const struct eeprom_93xx46_devtype_data *data = of_id->data; - + data = spi_get_device_match_data(to_spi_device(dev)); + if (data) { pd->quirks = data->quirks; pd->flags |= data->flags; } - spi->dev.platform_data = pd; + dev->platform_data = pd; return 0; } @@ -479,13 +465,12 @@ static int eeprom_93xx46_probe(struct spi_device *spi) { struct eeprom_93xx46_platform_data *pd; struct eeprom_93xx46_dev *edev; + struct device *dev = &spi->dev; int err; - if (spi->dev.of_node) { - err = eeprom_93xx46_probe_dt(spi); - if (err < 0) - return err; - } + err = eeprom_93xx46_probe_fw(dev); + if (err < 0) + return err; pd = spi->dev.platform_data; if (!pd) { @@ -566,7 +551,7 @@ static void eeprom_93xx46_remove(struct spi_device *spi) static struct spi_driver eeprom_93xx46_driver = { .driver = { .name = "93xx46", - .of_match_table = of_match_ptr(eeprom_93xx46_of_table), + .of_match_table = eeprom_93xx46_of_table, }, .probe = eeprom_93xx46_probe, .remove = eeprom_93xx46_remove, @@ -579,5 +564,3 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Driver for 93xx46 EEPROMs"); MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>"); MODULE_ALIAS("spi:93xx46"); -MODULE_ALIAS("spi:eeprom-93xx46"); -MODULE_ALIAS("spi:93lc46b"); diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c index 740c06382b83..60c42170d147 100644 --- a/drivers/misc/eeprom/idt_89hpesx.c +++ b/drivers/misc/eeprom/idt_89hpesx.c @@ -61,11 +61,6 @@ MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("T-platforms"); /* - * csr_dbgdir - CSR read/write operations Debugfs directory - */ -static struct dentry *csr_dbgdir; - -/* * struct idt_89hpesx_dev - IDT 89HPESx device data structure * @eesize: Size of EEPROM in bytes (calculated from "idt,eecompatible") * @eero: EEPROM Read-only flag @@ -129,7 +124,7 @@ struct idt_smb_seq { struct idt_eeprom_seq { u8 cmd; u8 eeaddr; - u16 memaddr; + __le16 memaddr; u8 data; } __packed; @@ -141,8 +136,8 @@ struct idt_eeprom_seq { */ struct idt_csr_seq { u8 cmd; - u16 csraddr; - u32 data; + __le16 csraddr; + __le32 data; } __packed; /* @@ -847,7 +842,7 @@ err_mutex_unlock: * @count: Number of bytes to write */ static ssize_t eeprom_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct idt_89hpesx_dev *pdev; @@ -871,7 +866,7 @@ static ssize_t eeprom_write(struct file *filp, struct kobject *kobj, * @count: Number of bytes to write */ static ssize_t eeprom_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct idt_89hpesx_dev *pdev; @@ -905,7 +900,7 @@ static ssize_t idt_dbgfs_csr_write(struct file *filep, const char __user *ubuf, { struct idt_89hpesx_dev *pdev = filep->private_data; char *colon_ch, *csraddr_str, *csrval_str; - int ret, csraddr_len; + int ret; u32 csraddr, csrval; char *buf; @@ -913,15 +908,9 @@ static ssize_t idt_dbgfs_csr_write(struct file *filep, const char __user *ubuf, return 0; /* Copy data from User-space */ - buf = kmalloc(count + 1, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - if (copy_from_user(buf, ubuf, count)) { - ret = -EFAULT; - goto free_buf; - } - buf[count] = 0; + buf = memdup_user_nul(ubuf, count); + if (IS_ERR(buf)) + return PTR_ERR(buf); /* Find position of colon in the buffer */ colon_ch = strnchr(buf, count, ':'); @@ -933,21 +922,16 @@ static ssize_t idt_dbgfs_csr_write(struct file *filep, const char __user *ubuf, * no new CSR value */ if (colon_ch != NULL) { - csraddr_len = colon_ch - buf; - csraddr_str = - kmalloc(csraddr_len + 1, GFP_KERNEL); + /* Copy the register address to the substring buffer */ + csraddr_str = kmemdup_nul(buf, colon_ch - buf, GFP_KERNEL); if (csraddr_str == NULL) { ret = -ENOMEM; goto free_buf; } - /* Copy the register address to the substring buffer */ - strncpy(csraddr_str, buf, csraddr_len); - csraddr_str[csraddr_len] = '\0'; /* Register value must follow the colon */ csrval_str = colon_ch + 1; } else /* if (str_colon == NULL) */ { csraddr_str = (char *)buf; /* Just to shut warning up */ - csraddr_len = strnlen(csraddr_str, count); csrval_str = NULL; } @@ -1028,7 +1012,7 @@ static ssize_t idt_dbgfs_csr_read(struct file *filep, char __user *ubuf, * NOTE Size will be changed in compliance with OF node. EEPROM attribute will * be read-only as well if the corresponding flag is specified in OF node. */ -static BIN_ATTR_RW(eeprom, EEPROM_DEF_SIZE); +static const BIN_ATTR_RW(eeprom, EEPROM_DEF_SIZE); /* * csr_dbgfs_ops - CSR debugfs-node read/write operations @@ -1294,14 +1278,15 @@ static int idt_create_sysfs_files(struct idt_89hpesx_dev *pdev) return 0; } - /* Allocate memory for attribute file */ - pdev->ee_file = devm_kmalloc(dev, sizeof(*pdev->ee_file), GFP_KERNEL); + /* + * Allocate memory for attribute file and copy the declared EEPROM attr + * structure to change some of fields + */ + pdev->ee_file = devm_kmemdup(dev, &bin_attr_eeprom, + sizeof(*pdev->ee_file), GFP_KERNEL); if (!pdev->ee_file) return -ENOMEM; - /* Copy the declared EEPROM attr structure to change some of fields */ - memcpy(pdev->ee_file, &bin_attr_eeprom, sizeof(*pdev->ee_file)); - /* In case of read-only EEPROM get rid of write ability */ if (pdev->eero) { pdev->ee_file->attr.mode &= ~0200; @@ -1335,35 +1320,6 @@ static void idt_remove_sysfs_files(struct idt_89hpesx_dev *pdev) } /* - * idt_create_dbgfs_files() - create debugfs files - * @pdev: Pointer to the driver data - */ -#define CSRNAME_LEN ((size_t)32) -static void idt_create_dbgfs_files(struct idt_89hpesx_dev *pdev) -{ - struct i2c_client *cli = pdev->client; - char fname[CSRNAME_LEN]; - - /* Create Debugfs directory for CSR file */ - snprintf(fname, CSRNAME_LEN, "%d-%04hx", cli->adapter->nr, cli->addr); - pdev->csr_dir = debugfs_create_dir(fname, csr_dbgdir); - - /* Create Debugfs file for CSR read/write operations */ - debugfs_create_file(cli->name, 0600, pdev->csr_dir, pdev, - &csr_dbgfs_ops); -} - -/* - * idt_remove_dbgfs_files() - remove debugfs files - * @pdev: Pointer to the driver data - */ -static void idt_remove_dbgfs_files(struct idt_89hpesx_dev *pdev) -{ - /* Remove CSR directory and it sysfs-node */ - debugfs_remove_recursive(pdev->csr_dir); -} - -/* * idt_probe() - IDT 89HPESx driver probe() callback method */ static int idt_probe(struct i2c_client *client) @@ -1392,7 +1348,7 @@ static int idt_probe(struct i2c_client *client) goto err_free_pdev; /* Create debugfs files */ - idt_create_dbgfs_files(pdev); + debugfs_create_file(pdev->client->name, 0600, client->debugfs, pdev, &csr_dbgfs_ops); return 0; @@ -1409,9 +1365,6 @@ static void idt_remove(struct i2c_client *client) { struct idt_89hpesx_dev *pdev = i2c_get_clientdata(client); - /* Remove debugfs files first */ - idt_remove_dbgfs_files(pdev); - /* Remove sysfs files */ idt_remove_sysfs_files(pdev); @@ -1436,58 +1389,58 @@ MODULE_DEVICE_TABLE(i2c, ee_ids); * idt_ids - supported IDT 89HPESx devices */ static const struct i2c_device_id idt_ids[] = { - { "89hpes8nt2", 0 }, - { "89hpes12nt3", 0 }, - - { "89hpes24nt6ag2", 0 }, - { "89hpes32nt8ag2", 0 }, - { "89hpes32nt8bg2", 0 }, - { "89hpes12nt12g2", 0 }, - { "89hpes16nt16g2", 0 }, - { "89hpes24nt24g2", 0 }, - { "89hpes32nt24ag2", 0 }, - { "89hpes32nt24bg2", 0 }, - - { "89hpes12n3", 0 }, - { "89hpes12n3a", 0 }, - { "89hpes24n3", 0 }, - { "89hpes24n3a", 0 }, - - { "89hpes32h8", 0 }, - { "89hpes32h8g2", 0 }, - { "89hpes48h12", 0 }, - { "89hpes48h12g2", 0 }, - { "89hpes48h12ag2", 0 }, - { "89hpes16h16", 0 }, - { "89hpes22h16", 0 }, - { "89hpes22h16g2", 0 }, - { "89hpes34h16", 0 }, - { "89hpes34h16g2", 0 }, - { "89hpes64h16", 0 }, - { "89hpes64h16g2", 0 }, - { "89hpes64h16ag2", 0 }, - - /* { "89hpes3t3", 0 }, // No SMBus-slave iface */ - { "89hpes12t3g2", 0 }, - { "89hpes24t3g2", 0 }, - /* { "89hpes4t4", 0 }, // No SMBus-slave iface */ - { "89hpes16t4", 0 }, - { "89hpes4t4g2", 0 }, - { "89hpes10t4g2", 0 }, - { "89hpes16t4g2", 0 }, - { "89hpes16t4ag2", 0 }, - { "89hpes5t5", 0 }, - { "89hpes6t5", 0 }, - { "89hpes8t5", 0 }, - { "89hpes8t5a", 0 }, - { "89hpes24t6", 0 }, - { "89hpes6t6g2", 0 }, - { "89hpes24t6g2", 0 }, - { "89hpes16t7", 0 }, - { "89hpes32t8", 0 }, - { "89hpes32t8g2", 0 }, - { "89hpes48t12", 0 }, - { "89hpes48t12g2", 0 }, + { "89hpes8nt2" }, + { "89hpes12nt3" }, + + { "89hpes24nt6ag2" }, + { "89hpes32nt8ag2" }, + { "89hpes32nt8bg2" }, + { "89hpes12nt12g2" }, + { "89hpes16nt16g2" }, + { "89hpes24nt24g2" }, + { "89hpes32nt24ag2" }, + { "89hpes32nt24bg2" }, + + { "89hpes12n3" }, + { "89hpes12n3a" }, + { "89hpes24n3" }, + { "89hpes24n3a" }, + + { "89hpes32h8" }, + { "89hpes32h8g2" }, + { "89hpes48h12" }, + { "89hpes48h12g2" }, + { "89hpes48h12ag2" }, + { "89hpes16h16" }, + { "89hpes22h16" }, + { "89hpes22h16g2" }, + { "89hpes34h16" }, + { "89hpes34h16g2" }, + { "89hpes64h16" }, + { "89hpes64h16g2" }, + { "89hpes64h16ag2" }, + + /* { "89hpes3t3" }, // No SMBus-slave iface */ + { "89hpes12t3g2" }, + { "89hpes24t3g2" }, + /* { "89hpes4t4" }, // No SMBus-slave iface */ + { "89hpes16t4" }, + { "89hpes4t4g2" }, + { "89hpes10t4g2" }, + { "89hpes16t4g2" }, + { "89hpes16t4ag2" }, + { "89hpes5t5" }, + { "89hpes6t5" }, + { "89hpes8t5" }, + { "89hpes8t5a" }, + { "89hpes24t6" }, + { "89hpes6t6g2" }, + { "89hpes24t6g2" }, + { "89hpes16t7" }, + { "89hpes32t8" }, + { "89hpes32t8g2" }, + { "89hpes48t12" }, + { "89hpes48t12g2" }, { /* END OF LIST */ } }; MODULE_DEVICE_TABLE(i2c, idt_ids); @@ -1560,38 +1513,4 @@ static struct i2c_driver idt_driver = { .remove = idt_remove, .id_table = idt_ids, }; - -/* - * idt_init() - IDT 89HPESx driver init() callback method - */ -static int __init idt_init(void) -{ - int ret; - - /* Create Debugfs directory first */ - if (debugfs_initialized()) - csr_dbgdir = debugfs_create_dir("idt_csr", NULL); - - /* Add new i2c-device driver */ - ret = i2c_add_driver(&idt_driver); - if (ret) { - debugfs_remove_recursive(csr_dbgdir); - return ret; - } - - return 0; -} -module_init(idt_init); - -/* - * idt_exit() - IDT 89HPESx driver exit() callback method - */ -static void __exit idt_exit(void) -{ - /* Discard debugfs directory and all files if any */ - debugfs_remove_recursive(csr_dbgdir); - - /* Unregister i2c-device driver */ - i2c_del_driver(&idt_driver); -} -module_exit(idt_exit); +module_i2c_driver(idt_driver); diff --git a/drivers/misc/eeprom/m24lr.c b/drivers/misc/eeprom/m24lr.c new file mode 100644 index 000000000000..7a9fd45a8e46 --- /dev/null +++ b/drivers/misc/eeprom/m24lr.c @@ -0,0 +1,606 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * m24lr.c - Sysfs control interface for ST M24LR series RFID/NFC chips + * + * Copyright (c) 2025 Abd-Alrhman Masalkhi <abd.masalkhi@gmail.com> + * + * This driver implements both the sysfs-based control interface and EEPROM + * access for STMicroelectronics M24LR series chips (e.g., M24LR04E-R). + * It provides access to control registers for features such as password + * authentication, memory protection, and device configuration. In addition, + * it manages read and write operations to the EEPROM region of the chip. + */ + +#include <linux/device.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/regmap.h> + +#define M24LR_WRITE_TIMEOUT 25u +#define M24LR_READ_TIMEOUT (M24LR_WRITE_TIMEOUT) + +/** + * struct m24lr_chip - describes chip-specific sysfs layout + * @sss_len: the length of the sss region + * @page_size: chip-specific limit on the maximum number of bytes allowed + * in a single write operation. + * @eeprom_size: size of the EEPROM in byte + * + * Supports multiple M24LR chip variants (e.g., M24LRxx) by allowing each + * to define its own set of sysfs attributes, depending on its available + * registers and features. + */ +struct m24lr_chip { + unsigned int sss_len; + unsigned int page_size; + unsigned int eeprom_size; +}; + +/** + * struct m24lr - core driver data for M24LR chip control + * @uid: 64 bits unique identifier stored in the device + * @sss_len: the length of the sss region + * @page_size: chip-specific limit on the maximum number of bytes allowed + * in a single write operation. + * @eeprom_size: size of the EEPROM in byte + * @ctl_regmap: regmap interface for accessing the system parameter sector + * @eeprom_regmap: regmap interface for accessing the EEPROM + * @lock: mutex to synchronize operations to the device + * + * Central data structure holding the state and resources used by the + * M24LR device driver. + */ +struct m24lr { + u64 uid; + unsigned int sss_len; + unsigned int page_size; + unsigned int eeprom_size; + struct regmap *ctl_regmap; + struct regmap *eeprom_regmap; + struct mutex lock; /* synchronize operations to the device */ +}; + +static const struct regmap_range m24lr_ctl_vo_ranges[] = { + regmap_reg_range(0, 63), +}; + +static const struct regmap_access_table m24lr_ctl_vo_table = { + .yes_ranges = m24lr_ctl_vo_ranges, + .n_yes_ranges = ARRAY_SIZE(m24lr_ctl_vo_ranges), +}; + +static const struct regmap_config m24lr_ctl_regmap_conf = { + .name = "m24lr_ctl", + .reg_stride = 1, + .reg_bits = 16, + .val_bits = 8, + .disable_locking = false, + .cache_type = REGCACHE_RBTREE,/* Flat can't be used, there's huge gap */ + .volatile_table = &m24lr_ctl_vo_table, +}; + +/* Chip descriptor for M24LR04E-R variant */ +static const struct m24lr_chip m24lr04e_r_chip = { + .page_size = 4, + .eeprom_size = 512, + .sss_len = 4, +}; + +/* Chip descriptor for M24LR16E-R variant */ +static const struct m24lr_chip m24lr16e_r_chip = { + .page_size = 4, + .eeprom_size = 2048, + .sss_len = 16, +}; + +/* Chip descriptor for M24LR64E-R variant */ +static const struct m24lr_chip m24lr64e_r_chip = { + .page_size = 4, + .eeprom_size = 8192, + .sss_len = 64, +}; + +static const struct i2c_device_id m24lr_ids[] = { + { "m24lr04e-r", (kernel_ulong_t)&m24lr04e_r_chip}, + { "m24lr16e-r", (kernel_ulong_t)&m24lr16e_r_chip}, + { "m24lr64e-r", (kernel_ulong_t)&m24lr64e_r_chip}, + { } +}; +MODULE_DEVICE_TABLE(i2c, m24lr_ids); + +static const struct of_device_id m24lr_of_match[] = { + { .compatible = "st,m24lr04e-r", .data = &m24lr04e_r_chip}, + { .compatible = "st,m24lr16e-r", .data = &m24lr16e_r_chip}, + { .compatible = "st,m24lr64e-r", .data = &m24lr64e_r_chip}, + { } +}; +MODULE_DEVICE_TABLE(of, m24lr_of_match); + +/** + * m24lr_regmap_read - read data using regmap with retry on failure + * @regmap: regmap instance for the device + * @buf: buffer to store the read data + * @size: number of bytes to read + * @offset: starting register address + * + * Attempts to read a block of data from the device with retries and timeout. + * Some M24LR chips may transiently NACK reads (e.g., during internal write + * cycles), so this function retries with a short sleep until the timeout + * expires. + * + * Returns: + * Number of bytes read on success, + * -ETIMEDOUT if the read fails within the timeout window. + */ +static ssize_t m24lr_regmap_read(struct regmap *regmap, u8 *buf, + size_t size, unsigned int offset) +{ + int err; + unsigned long timeout, read_time; + ssize_t ret = -ETIMEDOUT; + + timeout = jiffies + msecs_to_jiffies(M24LR_READ_TIMEOUT); + do { + read_time = jiffies; + + err = regmap_bulk_read(regmap, offset, buf, size); + if (!err) { + ret = size; + break; + } + + usleep_range(1000, 2000); + } while (time_before(read_time, timeout)); + + return ret; +} + +/** + * m24lr_regmap_write - write data using regmap with retry on failure + * @regmap: regmap instance for the device + * @buf: buffer containing the data to write + * @size: number of bytes to write + * @offset: starting register address + * + * Attempts to write a block of data to the device with retries and a timeout. + * Some M24LR devices may NACK I2C writes while an internal write operation + * is in progress. This function retries the write operation with a short delay + * until it succeeds or the timeout is reached. + * + * Returns: + * Number of bytes written on success, + * -ETIMEDOUT if the write fails within the timeout window. + */ +static ssize_t m24lr_regmap_write(struct regmap *regmap, const u8 *buf, + size_t size, unsigned int offset) +{ + int err; + unsigned long timeout, write_time; + ssize_t ret = -ETIMEDOUT; + + timeout = jiffies + msecs_to_jiffies(M24LR_WRITE_TIMEOUT); + + do { + write_time = jiffies; + + err = regmap_bulk_write(regmap, offset, buf, size); + if (!err) { + ret = size; + break; + } + + usleep_range(1000, 2000); + } while (time_before(write_time, timeout)); + + return ret; +} + +static ssize_t m24lr_read(struct m24lr *m24lr, u8 *buf, size_t size, + unsigned int offset, bool is_eeprom) +{ + struct regmap *regmap; + ssize_t ret; + + if (is_eeprom) + regmap = m24lr->eeprom_regmap; + else + regmap = m24lr->ctl_regmap; + + mutex_lock(&m24lr->lock); + ret = m24lr_regmap_read(regmap, buf, size, offset); + mutex_unlock(&m24lr->lock); + + return ret; +} + +/** + * m24lr_write - write buffer to M24LR device with page alignment handling + * @m24lr: pointer to driver context + * @buf: data buffer to write + * @size: number of bytes to write + * @offset: target register address in the device + * @is_eeprom: true if the write should target the EEPROM, + * false if it should target the system parameters sector. + * + * Writes data to the M24LR device using regmap, split into chunks no larger + * than page_size to respect device-specific write limitations (e.g., page + * size or I2C hold-time concerns). Each chunk is aligned to the page boundary + * defined by page_size. + * + * Returns: + * Total number of bytes written on success, + * A negative error code if any write fails. + */ +static ssize_t m24lr_write(struct m24lr *m24lr, const u8 *buf, size_t size, + unsigned int offset, bool is_eeprom) +{ + unsigned int n, next_sector; + struct regmap *regmap; + ssize_t ret = 0; + ssize_t err; + + if (is_eeprom) + regmap = m24lr->eeprom_regmap; + else + regmap = m24lr->ctl_regmap; + + n = min_t(unsigned int, size, m24lr->page_size); + next_sector = roundup(offset + 1, m24lr->page_size); + if (offset + n > next_sector) + n = next_sector - offset; + + mutex_lock(&m24lr->lock); + while (n) { + err = m24lr_regmap_write(regmap, buf + offset, n, offset); + if (IS_ERR_VALUE(err)) { + if (!ret) + ret = err; + + break; + } + + offset += n; + size -= n; + ret += n; + n = min_t(unsigned int, size, m24lr->page_size); + } + mutex_unlock(&m24lr->lock); + + return ret; +} + +/** + * m24lr_write_pass - Write password to M24LR043-R using secure format + * @m24lr: Pointer to device control structure + * @buf: Input buffer containing hex-encoded password + * @count: Number of bytes in @buf + * @code: Operation code to embed between password copies + * + * This function parses a 4-byte password, encodes it in big-endian format, + * and constructs a 9-byte sequence of the form: + * + * [BE(password), code, BE(password)] + * + * The result is written to register 0x0900 (2304), which is the password + * register in M24LR04E-R chip. + * + * Return: Number of bytes written on success, or negative error code on failure + */ +static ssize_t m24lr_write_pass(struct m24lr *m24lr, const char *buf, + size_t count, u8 code) +{ + __be32 be_pass; + u8 output[9]; + ssize_t ret; + u32 pass; + int err; + + if (!count) + return -EINVAL; + + if (count > 8) + return -EINVAL; + + err = kstrtou32(buf, 16, &pass); + if (err) + return err; + + be_pass = cpu_to_be32(pass); + + memcpy(output, &be_pass, sizeof(be_pass)); + output[4] = code; + memcpy(output + 5, &be_pass, sizeof(be_pass)); + + mutex_lock(&m24lr->lock); + ret = m24lr_regmap_write(m24lr->ctl_regmap, output, 9, 2304); + mutex_unlock(&m24lr->lock); + + return ret; +} + +static ssize_t m24lr_read_reg_le(struct m24lr *m24lr, u64 *val, + unsigned int reg_addr, + unsigned int reg_size) +{ + ssize_t ret; + __le64 input = 0; + + ret = m24lr_read(m24lr, (u8 *)&input, reg_size, reg_addr, false); + if (IS_ERR_VALUE(ret)) + return ret; + + if (ret != reg_size) + return -EINVAL; + + switch (reg_size) { + case 1: + *val = *(u8 *)&input; + break; + case 2: + *val = le16_to_cpu((__le16)input); + break; + case 4: + *val = le32_to_cpu((__le32)input); + break; + case 8: + *val = le64_to_cpu((__le64)input); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int m24lr_nvmem_read(void *priv, unsigned int offset, void *val, + size_t bytes) +{ + ssize_t err; + struct m24lr *m24lr = priv; + + if (!bytes) + return bytes; + + if (offset + bytes > m24lr->eeprom_size) + return -EINVAL; + + err = m24lr_read(m24lr, val, bytes, offset, true); + if (IS_ERR_VALUE(err)) + return err; + + return 0; +} + +static int m24lr_nvmem_write(void *priv, unsigned int offset, void *val, + size_t bytes) +{ + ssize_t err; + struct m24lr *m24lr = priv; + + if (!bytes) + return -EINVAL; + + if (offset + bytes > m24lr->eeprom_size) + return -EINVAL; + + err = m24lr_write(m24lr, val, bytes, offset, true); + if (IS_ERR_VALUE(err)) + return err; + + return 0; +} + +static ssize_t m24lr_ctl_sss_read(struct file *filep, struct kobject *kobj, + const struct bin_attribute *attr, char *buf, + loff_t offset, size_t count) +{ + struct m24lr *m24lr = attr->private; + + if (!count) + return count; + + if (size_add(offset, count) > m24lr->sss_len) + return -EINVAL; + + return m24lr_read(m24lr, buf, count, offset, false); +} + +static ssize_t m24lr_ctl_sss_write(struct file *filep, struct kobject *kobj, + const struct bin_attribute *attr, char *buf, + loff_t offset, size_t count) +{ + struct m24lr *m24lr = attr->private; + + if (!count) + return -EINVAL; + + if (size_add(offset, count) > m24lr->sss_len) + return -EINVAL; + + return m24lr_write(m24lr, buf, count, offset, false); +} +static BIN_ATTR(sss, 0600, m24lr_ctl_sss_read, m24lr_ctl_sss_write, 0); + +static ssize_t new_pass_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct m24lr *m24lr = i2c_get_clientdata(to_i2c_client(dev)); + + return m24lr_write_pass(m24lr, buf, count, 7); +} +static DEVICE_ATTR_WO(new_pass); + +static ssize_t unlock_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct m24lr *m24lr = i2c_get_clientdata(to_i2c_client(dev)); + + return m24lr_write_pass(m24lr, buf, count, 9); +} +static DEVICE_ATTR_WO(unlock); + +static ssize_t uid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct m24lr *m24lr = i2c_get_clientdata(to_i2c_client(dev)); + + return sysfs_emit(buf, "%llx\n", m24lr->uid); +} +static DEVICE_ATTR_RO(uid); + +static ssize_t total_sectors_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct m24lr *m24lr = i2c_get_clientdata(to_i2c_client(dev)); + + return sysfs_emit(buf, "%x\n", m24lr->sss_len); +} +static DEVICE_ATTR_RO(total_sectors); + +static struct attribute *m24lr_ctl_dev_attrs[] = { + &dev_attr_unlock.attr, + &dev_attr_new_pass.attr, + &dev_attr_uid.attr, + &dev_attr_total_sectors.attr, + NULL, +}; + +static const struct m24lr_chip *m24lr_get_chip(struct device *dev) +{ + const struct m24lr_chip *ret; + const struct i2c_device_id *id; + + id = i2c_match_id(m24lr_ids, to_i2c_client(dev)); + + if (dev->of_node && of_match_device(m24lr_of_match, dev)) + ret = of_device_get_match_data(dev); + else if (id) + ret = (void *)id->driver_data; + else + ret = acpi_device_get_match_data(dev); + + return ret; +} + +static int m24lr_probe(struct i2c_client *client) +{ + struct regmap_config eeprom_regmap_conf = {0}; + struct nvmem_config nvmem_conf = {0}; + struct device *dev = &client->dev; + struct i2c_client *eeprom_client; + const struct m24lr_chip *chip; + struct regmap *eeprom_regmap; + struct nvmem_device *nvmem; + struct regmap *ctl_regmap; + struct m24lr *m24lr; + u32 regs[2]; + long err; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) + return -EOPNOTSUPP; + + chip = m24lr_get_chip(dev); + if (!chip) + return -ENODEV; + + m24lr = devm_kzalloc(dev, sizeof(struct m24lr), GFP_KERNEL); + if (!m24lr) + return -ENOMEM; + + err = device_property_read_u32_array(dev, "reg", regs, ARRAY_SIZE(regs)); + if (err) + return dev_err_probe(dev, err, "Failed to read 'reg' property\n"); + + /* Create a second I2C client for the eeprom interface */ + eeprom_client = devm_i2c_new_dummy_device(dev, client->adapter, regs[1]); + if (IS_ERR(eeprom_client)) + return dev_err_probe(dev, PTR_ERR(eeprom_client), + "Failed to create dummy I2C client for the EEPROM\n"); + + ctl_regmap = devm_regmap_init_i2c(client, &m24lr_ctl_regmap_conf); + if (IS_ERR(ctl_regmap)) + return dev_err_probe(dev, PTR_ERR(ctl_regmap), + "Failed to init regmap\n"); + + eeprom_regmap_conf.name = "m24lr_eeprom"; + eeprom_regmap_conf.reg_bits = 16; + eeprom_regmap_conf.val_bits = 8; + eeprom_regmap_conf.disable_locking = true; + eeprom_regmap_conf.max_register = chip->eeprom_size - 1; + + eeprom_regmap = devm_regmap_init_i2c(eeprom_client, + &eeprom_regmap_conf); + if (IS_ERR(eeprom_regmap)) + return dev_err_probe(dev, PTR_ERR(eeprom_regmap), + "Failed to init regmap\n"); + + mutex_init(&m24lr->lock); + m24lr->sss_len = chip->sss_len; + m24lr->page_size = chip->page_size; + m24lr->eeprom_size = chip->eeprom_size; + m24lr->eeprom_regmap = eeprom_regmap; + m24lr->ctl_regmap = ctl_regmap; + + nvmem_conf.dev = &eeprom_client->dev; + nvmem_conf.owner = THIS_MODULE; + nvmem_conf.type = NVMEM_TYPE_EEPROM; + nvmem_conf.reg_read = m24lr_nvmem_read; + nvmem_conf.reg_write = m24lr_nvmem_write; + nvmem_conf.size = chip->eeprom_size; + nvmem_conf.word_size = 1; + nvmem_conf.stride = 1; + nvmem_conf.priv = m24lr; + + nvmem = devm_nvmem_register(dev, &nvmem_conf); + if (IS_ERR(nvmem)) + return dev_err_probe(dev, PTR_ERR(nvmem), + "Failed to register nvmem\n"); + + i2c_set_clientdata(client, m24lr); + i2c_set_clientdata(eeprom_client, m24lr); + + bin_attr_sss.size = chip->sss_len; + bin_attr_sss.private = m24lr; + err = sysfs_create_bin_file(&dev->kobj, &bin_attr_sss); + if (err) + return dev_err_probe(dev, err, + "Failed to create sss bin file\n"); + + /* test by reading the uid, if success store it */ + err = m24lr_read_reg_le(m24lr, &m24lr->uid, 2324, sizeof(m24lr->uid)); + if (IS_ERR_VALUE(err)) + goto remove_bin_file; + + return 0; + +remove_bin_file: + sysfs_remove_bin_file(&dev->kobj, &bin_attr_sss); + + return err; +} + +static void m24lr_remove(struct i2c_client *client) +{ + sysfs_remove_bin_file(&client->dev.kobj, &bin_attr_sss); +} + +ATTRIBUTE_GROUPS(m24lr_ctl_dev); + +static struct i2c_driver m24lr_driver = { + .driver = { + .name = "m24lr", + .of_match_table = m24lr_of_match, + .dev_groups = m24lr_ctl_dev_groups, + }, + .probe = m24lr_probe, + .remove = m24lr_remove, + .id_table = m24lr_ids, +}; +module_i2c_driver(m24lr_driver); + +MODULE_AUTHOR("Abd-Alrhman Masalkhi"); +MODULE_DESCRIPTION("st m24lr control driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c index cb6b1efeafe0..a3e4cada3b51 100644 --- a/drivers/misc/eeprom/max6875.c +++ b/drivers/misc/eeprom/max6875.c @@ -104,7 +104,7 @@ exit_up: } static ssize_t max6875_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct i2c_client *client = kobj_to_i2c_client(kobj); @@ -183,7 +183,7 @@ static void max6875_remove(struct i2c_client *client) } static const struct i2c_device_id max6875_id[] = { - { "max6875", 0 }, + { "max6875" }, { } }; MODULE_DEVICE_TABLE(i2c, max6875_id); diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index 76511d279aff..ca4c420e4a2f 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c @@ -17,6 +17,7 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> +#include <linux/string_choices.h> static LIST_HEAD(container_list); static DEFINE_MUTEX(container_list_lock); @@ -592,7 +593,7 @@ static ssize_t get_component_power_status(struct device *cdev, if (ecomp->power_status == -1) return (edev->cb->get_power_status) ? -EIO : -ENOTTY; - return sysfs_emit(buf, "%s\n", ecomp->power_status ? "on" : "off"); + return sysfs_emit(buf, "%s\n", str_on_off(ecomp->power_status)); } static ssize_t set_component_power_status(struct device *cdev, diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 9666d28037e1..ee652ef01534 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -13,6 +13,7 @@ #include <linux/module.h> #include <linux/of_address.h> #include <linux/of.h> +#include <linux/platform_device.h> #include <linux/sort.h> #include <linux/of_platform.h> #include <linux/rpmsg.h> @@ -26,7 +27,7 @@ #define MDSP_DOMAIN_ID (1) #define SDSP_DOMAIN_ID (2) #define CDSP_DOMAIN_ID (3) -#define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/ +#define GDSP_DOMAIN_ID (4) #define FASTRPC_MAX_SESSIONS 14 #define FASTRPC_MAX_VMIDS 16 #define FASTRPC_ALIGN 128 @@ -104,8 +105,6 @@ #define miscdev_to_fdevice(d) container_of(d, struct fastrpc_device, miscdev) -static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp", - "sdsp", "cdsp"}; struct fastrpc_phy_page { u64 addr; /* physical address */ u64 size; /* size of contiguous region */ @@ -137,14 +136,14 @@ struct fastrpc_mmap_rsp_msg { }; struct fastrpc_mmap_req_msg { - s32 pgid; + s32 client_id; u32 flags; u64 vaddr; s32 num; }; struct fastrpc_mem_map_req_msg { - s32 pgid; + s32 client_id; s32 fd; s32 offset; u32 flags; @@ -154,20 +153,20 @@ struct fastrpc_mem_map_req_msg { }; struct fastrpc_munmap_req_msg { - s32 pgid; + s32 client_id; u64 vaddr; u64 size; }; struct fastrpc_mem_unmap_req_msg { - s32 pgid; + s32 client_id; s32 fd; u64 vaddrin; u64 len; }; struct fastrpc_msg { - int pid; /* process group id */ + int client_id; /* process client id */ int tid; /* thread id */ u64 ctx; /* invoke caller context */ u32 handle; /* handle to invoke */ @@ -232,7 +231,7 @@ struct fastrpc_invoke_ctx { int nbufs; int retval; int pid; - int tgid; + int client_id; u32 sc; u32 *crc; u64 ctxid; @@ -262,7 +261,6 @@ struct fastrpc_channel_ctx { int domain_id; int sesscount; int vmcount; - u64 perms; struct qcom_scm_vmperm vmperms[FASTRPC_MAX_VMIDS]; struct rpmsg_device *rpdev; struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS]; @@ -298,7 +296,7 @@ struct fastrpc_user { struct fastrpc_session_ctx *sctx; struct fastrpc_buf *init_mem; - int tgid; + int client_id; int pd; bool is_secure_dev; /* Lock for lists */ @@ -322,11 +320,11 @@ static void fastrpc_free_map(struct kref *ref) perm.vmid = QCOM_SCM_VMID_HLOS; perm.perm = QCOM_SCM_PERM_RWX; - err = qcom_scm_assign_mem(map->phys, map->size, + err = qcom_scm_assign_mem(map->phys, map->len, &src_perms, &perm, 1); if (err) { - dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", - map->phys, map->size, err); + dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n", + map->phys, map->len, err); return; } } @@ -362,32 +360,29 @@ static int fastrpc_map_get(struct fastrpc_map *map) static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd, - struct fastrpc_map **ppmap, bool take_ref) + struct fastrpc_map **ppmap) { - struct fastrpc_session_ctx *sess = fl->sctx; struct fastrpc_map *map = NULL; + struct dma_buf *buf; int ret = -ENOENT; + buf = dma_buf_get(fd); + if (IS_ERR(buf)) + return PTR_ERR(buf); + spin_lock(&fl->lock); list_for_each_entry(map, &fl->maps, node) { - if (map->fd != fd) + if (map->fd != fd || map->buf != buf) continue; - if (take_ref) { - ret = fastrpc_map_get(map); - if (ret) { - dev_dbg(sess->dev, "%s: Failed to get map fd=%d ret=%d\n", - __func__, fd, ret); - break; - } - } - *ppmap = map; ret = 0; break; } spin_unlock(&fl->lock); + dma_buf_put(buf); + return ret; } @@ -613,7 +608,7 @@ static struct fastrpc_invoke_ctx *fastrpc_context_alloc( ctx->sc = sc; ctx->retval = -1; ctx->pid = current->pid; - ctx->tgid = user->tgid; + ctx->client_id = user->client_id; ctx->cctx = cctx; init_completion(&ctx->work); INIT_WORK(&ctx->put_work, fastrpc_context_put_wq); @@ -751,15 +746,14 @@ static const struct dma_buf_ops fastrpc_dma_buf_ops = { .release = fastrpc_release, }; -static int fastrpc_map_create(struct fastrpc_user *fl, int fd, +static int fastrpc_map_attach(struct fastrpc_user *fl, int fd, u64 len, u32 attr, struct fastrpc_map **ppmap) { struct fastrpc_session_ctx *sess = fl->sctx; struct fastrpc_map *map = NULL; - int err = 0; - - if (!fastrpc_map_lookup(fl, fd, ppmap, true)) - return 0; + struct sg_table *table; + struct scatterlist *sgl = NULL; + int err = 0, sgl_index = 0; map = kzalloc(sizeof(*map), GFP_KERNEL); if (!map) @@ -783,11 +777,12 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd, goto attach_err; } - map->table = dma_buf_map_attachment_unlocked(map->attach, DMA_BIDIRECTIONAL); - if (IS_ERR(map->table)) { - err = PTR_ERR(map->table); + table = dma_buf_map_attachment_unlocked(map->attach, DMA_BIDIRECTIONAL); + if (IS_ERR(table)) { + err = PTR_ERR(table); goto map_err; } + map->table = table; if (attr & FASTRPC_ATTR_SECUREMAP) { map->phys = sg_phys(map->table->sgl); @@ -795,7 +790,15 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd, map->phys = sg_dma_address(map->table->sgl); map->phys += ((u64)fl->sctx->sid << 32); } - map->size = len; + for_each_sg(map->table->sgl, sgl, map->table->nents, + sgl_index) + map->size += sg_dma_len(sgl); + if (len > map->size) { + dev_dbg(sess->dev, "Bad size passed len 0x%llx map size 0x%llx\n", + len, map->size); + err = -EINVAL; + goto map_err; + } map->va = sg_virt(map->table->sgl); map->len = len; @@ -812,10 +815,10 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd, dst_perms[1].vmid = fl->cctx->vmperms[0].vmid; dst_perms[1].perm = QCOM_SCM_PERM_RWX; map->attr = attr; - err = qcom_scm_assign_mem(map->phys, (u64)map->size, &src_perms, dst_perms, 2); + err = qcom_scm_assign_mem(map->phys, (u64)map->len, &src_perms, dst_perms, 2); if (err) { - dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d", - map->phys, map->size, err); + dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n", + map->phys, map->len, err); goto map_err; } } @@ -836,6 +839,24 @@ get_err: return err; } +static int fastrpc_map_create(struct fastrpc_user *fl, int fd, + u64 len, u32 attr, struct fastrpc_map **ppmap) +{ + struct fastrpc_session_ctx *sess = fl->sctx; + int err = 0; + + if (!fastrpc_map_lookup(fl, fd, ppmap)) { + if (!fastrpc_map_get(*ppmap)) + return 0; + dev_dbg(sess->dev, "%s: Failed to get map fd=%d\n", + __func__, fd); + } + + err = fastrpc_map_attach(fl, fd, len, attr, ppmap); + + return err; +} + /* * Fastrpc payload buffer with metadata looks like: * @@ -908,8 +929,12 @@ static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx) ctx->args[i].length == 0) continue; - err = fastrpc_map_create(ctx->fl, ctx->args[i].fd, - ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]); + if (i < ctx->nbufs) + err = fastrpc_map_create(ctx->fl, ctx->args[i].fd, + ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]); + else + err = fastrpc_map_attach(ctx->fl, ctx->args[i].fd, + ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]); if (err) { dev_err(dev, "Error Creating map %d\n", err); return -EINVAL; @@ -951,10 +976,14 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx) ctx->msg_sz = pkt_size; - err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf); + if (ctx->fl->sctx->sid) + err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf); + else + err = fastrpc_remote_heap_alloc(ctx->fl, dev, pkt_size, &ctx->buf); if (err) return err; + memset(ctx->buf->virt, 0, pkt_size); rpra = ctx->buf->virt; list = fastrpc_invoke_buf_start(rpra, ctx->nscalars); pages = fastrpc_phy_page_start(list, ctx->nscalars); @@ -985,7 +1014,7 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx) mmap_read_lock(current->mm); vma = find_vma(current->mm, ctx->args[i].ptr); if (vma) - pages[i].addr += ctx->args[i].ptr - + pages[i].addr += (ctx->args[i].ptr & PAGE_MASK) - vma->vm_start; mmap_read_unlock(current->mm); @@ -1012,8 +1041,8 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx) (pkt_size - rlen); pages[i].addr = pages[i].addr & PAGE_MASK; - pg_start = (args & PAGE_MASK) >> PAGE_SHIFT; - pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT; + pg_start = (rpra[i].buf.pv & PAGE_MASK) >> PAGE_SHIFT; + pg_end = ((rpra[i].buf.pv + len - 1) & PAGE_MASK) >> PAGE_SHIFT; pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE; args = args + mlen; rlen -= mlen; @@ -1064,6 +1093,7 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx, struct fastrpc_phy_page *pages; u64 *fdlist; int i, inbufs, outbufs, handles; + int ret = 0; inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc); @@ -1079,22 +1109,26 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx, u64 len = rpra[i].buf.len; if (!kernel) { - if (copy_to_user((void __user *)dst, src, len)) - return -EFAULT; + if (copy_to_user((void __user *)dst, src, len)) { + ret = -EFAULT; + goto cleanup_fdlist; + } } else { memcpy(dst, src, len); } } } +cleanup_fdlist: + /* Clean up fdlist which is updated by DSP */ for (i = 0; i < FASTRPC_MAX_FDLIST; i++) { if (!fdlist[i]) break; - if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false)) + if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap)) fastrpc_map_put(mmap); } - return 0; + return ret; } static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx, @@ -1107,11 +1141,11 @@ static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx, int ret; cctx = fl->cctx; - msg->pid = fl->tgid; + msg->client_id = fl->client_id; msg->tid = current->pid; if (kernel) - msg->pid = 0; + msg->client_id = 0; msg->ctx = ctx->ctxid | fl->pd; msg->handle = handle; @@ -1153,11 +1187,9 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, if (IS_ERR(ctx)) return PTR_ERR(ctx); - if (ctx->nscalars) { - err = fastrpc_get_args(kernel, ctx); - if (err) - goto bail; - } + err = fastrpc_get_args(kernel, ctx); + if (err) + goto bail; /* make sure that all CPU memory writes are seen by DSP */ dma_wmb(); @@ -1176,20 +1208,18 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, if (err) goto bail; + /* make sure that all memory writes by DSP are seen by CPU */ + dma_rmb(); + /* populate all the output buffers with results */ + err = fastrpc_put_args(ctx, kernel); + if (err) + goto bail; + /* Check the response from remote dsp */ err = ctx->retval; if (err) goto bail; - if (ctx->nscalars) { - /* make sure that all memory writes by DSP are seen by CPU */ - dma_rmb(); - /* populate all the output buffers with results */ - err = fastrpc_put_args(ctx, kernel); - if (err) - goto bail; - } - bail: if (err != -ERESTARTSYS && err != -ETIMEDOUT) { /* We are done with this compute context */ @@ -1222,7 +1252,7 @@ static bool is_session_rejected(struct fastrpc_user *fl, bool unsigned_pd_reques * that does not support unsigned PD offload */ if (!fl->cctx->unsigned_support || !unsigned_pd_request) { - dev_err(&fl->cctx->rpdev->dev, "Error: Untrusted application trying to offload to signed PD"); + dev_err(&fl->cctx->rpdev->dev, "Error: Untrusted application trying to offload to signed PD\n"); return true; } } @@ -1238,8 +1268,9 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl, struct fastrpc_phy_page pages[1]; char *name; int err; + bool scm_done = false; struct { - int pgid; + int client_id; u32 namelen; u32 pageslen; } inbuf; @@ -1259,17 +1290,12 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl, goto err; } - name = kzalloc(init.namelen, GFP_KERNEL); - if (!name) { - err = -ENOMEM; + name = memdup_user(u64_to_user_ptr(init.name), init.namelen); + if (IS_ERR(name)) { + err = PTR_ERR(name); goto err; } - if (copy_from_user(name, (void __user *)(uintptr_t)init.name, init.namelen)) { - err = -EFAULT; - goto err_name; - } - if (!fl->cctx->remote_heap) { err = fastrpc_remote_heap_alloc(fl, fl->sctx->dev, init.memlen, &fl->cctx->remote_heap); @@ -1278,19 +1304,22 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl, /* Map if we have any heap VMIDs associated with this ADSP Static Process. */ if (fl->cctx->vmcount) { + u64 src_perms = BIT(QCOM_SCM_VMID_HLOS); + err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys, (u64)fl->cctx->remote_heap->size, - &fl->cctx->perms, + &src_perms, fl->cctx->vmperms, fl->cctx->vmcount); if (err) { - dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d", + dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n", fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err); goto err_map; } + scm_done = true; } } - inbuf.pgid = fl->tgid; + inbuf.client_id = fl->client_id; inbuf.namelen = init.namelen; inbuf.pageslen = 0; fl->pd = USER_PD; @@ -1318,19 +1347,25 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl, goto err_invoke; kfree(args); + kfree(name); return 0; err_invoke: - if (fl->cctx->vmcount) { - struct qcom_scm_vmperm perm; + if (fl->cctx->vmcount && scm_done) { + u64 src_perms = 0; + struct qcom_scm_vmperm dst_perms; + u32 i; + + for (i = 0; i < fl->cctx->vmcount; i++) + src_perms |= BIT(fl->cctx->vmperms[i].vmid); - perm.vmid = QCOM_SCM_VMID_HLOS; - perm.perm = QCOM_SCM_PERM_RWX; + dst_perms.vmid = QCOM_SCM_VMID_HLOS; + dst_perms.perm = QCOM_SCM_PERM_RWX; err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys, (u64)fl->cctx->remote_heap->size, - &fl->cctx->perms, &perm, 1); + &src_perms, &dst_perms, 1); if (err) - dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", + dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n", fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err); } err_map: @@ -1354,7 +1389,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl, int memlen; int err; struct { - int pgid; + int client_id; u32 namelen; u32 filelen; u32 pageslen; @@ -1386,7 +1421,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl, goto err; } - inbuf.pgid = fl->tgid; + inbuf.client_id = fl->client_id; inbuf.namelen = strlen(current->comm) + 1; inbuf.filelen = init.filelen; inbuf.pageslen = 1; @@ -1460,8 +1495,9 @@ err: } static struct fastrpc_session_ctx *fastrpc_session_alloc( - struct fastrpc_channel_ctx *cctx) + struct fastrpc_user *fl) { + struct fastrpc_channel_ctx *cctx = fl->cctx; struct fastrpc_session_ctx *session = NULL; unsigned long flags; int i; @@ -1471,6 +1507,8 @@ static struct fastrpc_session_ctx *fastrpc_session_alloc( if (!cctx->session[i].used && cctx->session[i].valid) { cctx->session[i].used = true; session = &cctx->session[i]; + /* any non-zero ID will work, session_idx + 1 is the simplest one */ + fl->client_id = i + 1; break; } } @@ -1492,12 +1530,12 @@ static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx, static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl) { struct fastrpc_invoke_args args[1]; - int tgid = 0; + int client_id = 0; u32 sc; - tgid = fl->tgid; - args[0].ptr = (u64)(uintptr_t) &tgid; - args[0].length = sizeof(tgid); + client_id = fl->client_id; + args[0].ptr = (u64)(uintptr_t) &client_id; + args[0].length = sizeof(client_id); args[0].fd = -1; sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0); @@ -1570,11 +1608,10 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) INIT_LIST_HEAD(&fl->maps); INIT_LIST_HEAD(&fl->mmaps); INIT_LIST_HEAD(&fl->user); - fl->tgid = current->tgid; fl->cctx = cctx; fl->is_secure_dev = fdevice->secure; - fl->sctx = fastrpc_session_alloc(cctx); + fl->sctx = fastrpc_session_alloc(fl); if (!fl->sctx) { dev_err(&cctx->rpdev->dev, "No session available\n"); mutex_destroy(&fl->mutex); @@ -1638,11 +1675,11 @@ static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp) static int fastrpc_init_attach(struct fastrpc_user *fl, int pd) { struct fastrpc_invoke_args args[1]; - int tgid = fl->tgid; + int client_id = fl->client_id; u32 sc; - args[0].ptr = (u64)(uintptr_t) &tgid; - args[0].length = sizeof(tgid); + args[0].ptr = (u64)(uintptr_t) &client_id; + args[0].length = sizeof(client_id); args[0].fd = -1; sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0); fl->pd = pd; @@ -1686,16 +1723,20 @@ static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr { struct fastrpc_invoke_args args[2] = { 0 }; - /* Capability filled in userspace */ + /* + * Capability filled in userspace. This carries the information + * about the remoteproc support which is fetched from the remoteproc + * sysfs node by userspace. + */ dsp_attr_buf[0] = 0; + dsp_attr_buf_len -= 1; args[0].ptr = (u64)(uintptr_t)&dsp_attr_buf_len; args[0].length = sizeof(dsp_attr_buf_len); args[0].fd = -1; args[1].ptr = (u64)(uintptr_t)&dsp_attr_buf[1]; - args[1].length = dsp_attr_buf_len; + args[1].length = dsp_attr_buf_len * sizeof(u32); args[1].fd = -1; - fl->pd = USER_PD; return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE, FASTRPC_SCALARS(0, 1, 1), args); @@ -1708,7 +1749,6 @@ static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability *cap, uint32_t attribute_id = cap->attribute_id; uint32_t *dsp_attributes; unsigned long flags; - uint32_t domain = cap->domain; int err; spin_lock_irqsave(&cctx->lock, flags); @@ -1723,10 +1763,10 @@ static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability *cap, if (!dsp_attributes) return -ENOMEM; - err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN); + err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES); if (err == DSP_UNSUPPORTED_API) { dev_info(&cctx->rpdev->dev, - "Warning: DSP capabilities not supported on domain: %d\n", domain); + "Warning: DSP capabilities not supported\n"); kfree(dsp_attributes); return -EOPNOTSUPP; } else if (err) { @@ -1754,17 +1794,6 @@ static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp) return -EFAULT; cap.capability = 0; - if (cap.domain >= FASTRPC_DEV_MAX) { - dev_err(&fl->cctx->rpdev->dev, "Error: Invalid domain id:%d, err:%d\n", - cap.domain, err); - return -ECHRNG; - } - - /* Fastrpc Capablities does not support modem domain */ - if (cap.domain == MDSP_DOMAIN_ID) { - dev_err(&fl->cctx->rpdev->dev, "Error: modem not supported %d\n", err); - return -ECHRNG; - } if (cap.attribute_id >= FASTRPC_MAX_DSP_ATTRIBUTES) { dev_err(&fl->cctx->rpdev->dev, "Error: invalid attribute: %d, err: %d\n", @@ -1776,7 +1805,7 @@ static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp) if (err) return err; - if (copy_to_user(argp, &cap.capability, sizeof(cap.capability))) + if (copy_to_user(argp, &cap, sizeof(cap))) return -EFAULT; return 0; @@ -1790,7 +1819,7 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf * int err; u32 sc; - req_msg.pgid = fl->tgid; + req_msg.client_id = fl->client_id; req_msg.size = buf->size; req_msg.vaddr = buf->raddr; @@ -1866,13 +1895,17 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) return -EINVAL; } - err = fastrpc_buf_alloc(fl, fl->sctx->dev, req.size, &buf); + if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR) + err = fastrpc_remote_heap_alloc(fl, dev, req.size, &buf); + else + err = fastrpc_buf_alloc(fl, dev, req.size, &buf); + if (err) { dev_err(dev, "failed to allocate buffer\n"); return err; } - req_msg.pgid = fl->tgid; + req_msg.client_id = fl->client_id; req_msg.flags = req.flags; req_msg.vaddr = req.vaddrin; req_msg.num = sizeof(pages); @@ -1894,7 +1927,8 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) &args[0]); if (err) { dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size); - goto err_invoke; + fastrpc_buf_free(buf); + return err; } /* update the buffer to be able to deallocate the memory on the DSP */ @@ -1905,12 +1939,10 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) /* Add memory to static PD pool, protection thru hypervisor */ if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) { - struct qcom_scm_vmperm perm; + u64 src_perms = BIT(QCOM_SCM_VMID_HLOS); - perm.vmid = QCOM_SCM_VMID_HLOS; - perm.perm = QCOM_SCM_PERM_RWX; - err = qcom_scm_assign_mem(buf->phys, buf->size, - &fl->cctx->perms, &perm, 1); + err = qcom_scm_assign_mem(buf->phys, (u64)buf->size, + &src_perms, fl->cctx->vmperms, fl->cctx->vmcount); if (err) { dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", buf->phys, buf->size, err); @@ -1934,8 +1966,6 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) err_assign: fastrpc_req_munmap_impl(fl, buf); -err_invoke: - fastrpc_buf_free(buf); return err; } @@ -1964,7 +1994,7 @@ static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_me return -EINVAL; } - req_msg.pgid = fl->tgid; + req_msg.client_id = fl->client_id; req_msg.len = map->len; req_msg.vaddrin = map->raddr; req_msg.fd = map->fd; @@ -1975,11 +2005,13 @@ static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_me sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_UNMAP, 1, 0); err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]); - fastrpc_map_put(map); - if (err) + if (err) { dev_err(dev, "unmmap\tpt fd = %d, 0x%09llx error\n", map->fd, map->raddr); + return err; + } + fastrpc_map_put(map); - return err; + return 0; } static int fastrpc_req_mem_unmap(struct fastrpc_user *fl, char __user *argp) @@ -2015,7 +2047,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp) return err; } - req_msg.pgid = fl->tgid; + req_msg.client_id = fl->client_id; req_msg.fd = req.fd; req_msg.offset = req.offset; req_msg.vaddrin = req.vaddrin; @@ -2028,7 +2060,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp) args[0].length = sizeof(req_msg); pages.addr = map->phys; - pages.size = map->size; + pages.size = map->len; args[1].ptr = (u64) (uintptr_t) &pages; args[1].length = sizeof(pages); @@ -2043,7 +2075,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp) err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]); if (err) { dev_err(dev, "mem mmap error, fd %d, vaddr %llx, size %lld\n", - req.fd, req.vaddrin, map->size); + req.fd, req.vaddrin, map->len); goto err_invoke; } @@ -2056,7 +2088,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp) if (copy_to_user((void __user *)argp, &req, sizeof(req))) { /* unmap the memory and release the buffer */ req_unmap.vaddr = (uintptr_t) rsp_msg.vaddr; - req_unmap.length = map->size; + req_unmap.length = map->len; fastrpc_req_mem_unmap_impl(fl, &req_unmap); return -EFAULT; } @@ -2175,7 +2207,7 @@ static int fastrpc_cb_probe(struct platform_device *pdev) return 0; } -static int fastrpc_cb_remove(struct platform_device *pdev) +static void fastrpc_cb_remove(struct platform_device *pdev) { struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent); struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev); @@ -2183,15 +2215,13 @@ static int fastrpc_cb_remove(struct platform_device *pdev) int i; spin_lock_irqsave(&cctx->lock, flags); - for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) { + for (i = 0; i < FASTRPC_MAX_SESSIONS; i++) { if (cctx->session[i].sid == sess->sid) { cctx->session[i].valid = false; cctx->sesscount--; } } spin_unlock_irqrestore(&cctx->lock, flags); - - return 0; } static const struct of_device_id fastrpc_match_table[] = { @@ -2239,6 +2269,22 @@ static int fastrpc_device_register(struct device *dev, struct fastrpc_channel_ct return err; } +static int fastrpc_get_domain_id(const char *domain) +{ + if (!strncmp(domain, "adsp", 4)) + return ADSP_DOMAIN_ID; + else if (!strncmp(domain, "cdsp", 4)) + return CDSP_DOMAIN_ID; + else if (!strncmp(domain, "mdsp", 4)) + return MDSP_DOMAIN_ID; + else if (!strncmp(domain, "sdsp", 4)) + return SDSP_DOMAIN_ID; + else if (!strncmp(domain, "gdsp", 4)) + return GDSP_DOMAIN_ID; + + return -EINVAL; +} + static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) { struct device *rdev = &rpdev->dev; @@ -2254,15 +2300,10 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) return err; } - for (i = 0; i <= CDSP_DOMAIN_ID; i++) { - if (!strcmp(domains[i], domain)) { - domain_id = i; - break; - } - } + domain_id = fastrpc_get_domain_id(domain); if (domain_id < 0) { - dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id); + dev_info(rdev, "FastRPC Domain %s not supported\n", domain); return -EINVAL; } @@ -2282,13 +2323,26 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) if (vmcount) { data->vmcount = vmcount; - data->perms = BIT(QCOM_SCM_VMID_HLOS); for (i = 0; i < data->vmcount; i++) { data->vmperms[i].vmid = vmids[i]; data->vmperms[i].perm = QCOM_SCM_PERM_RWX; } } + if (domain_id == SDSP_DOMAIN_ID) { + struct resource res; + u64 src_perms; + + err = of_reserved_mem_region_to_resource(rdev->of_node, 0, &res); + if (!err) { + src_perms = BIT(QCOM_SCM_VMID_HLOS); + + qcom_scm_assign_mem(res.start, resource_size(&res), &src_perms, + data->vmperms, data->vmcount); + } + + } + secure_dsp = !(of_property_read_bool(rdev->of_node, "qcom,non-secure-domain")); data->secure = secure_dsp; @@ -2296,26 +2350,27 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) case ADSP_DOMAIN_ID: case MDSP_DOMAIN_ID: case SDSP_DOMAIN_ID: - /* Unsigned PD offloading is only supported on CDSP*/ + /* Unsigned PD offloading is only supported on CDSP and GDSP */ data->unsigned_support = false; - err = fastrpc_device_register(rdev, data, secure_dsp, domains[domain_id]); + err = fastrpc_device_register(rdev, data, secure_dsp, domain); if (err) - goto fdev_error; + goto err_free_data; break; case CDSP_DOMAIN_ID: + case GDSP_DOMAIN_ID: data->unsigned_support = true; /* Create both device nodes so that we can allow both Signed and Unsigned PD */ - err = fastrpc_device_register(rdev, data, true, domains[domain_id]); + err = fastrpc_device_register(rdev, data, true, domain); if (err) - goto fdev_error; + goto err_free_data; - err = fastrpc_device_register(rdev, data, false, domains[domain_id]); + err = fastrpc_device_register(rdev, data, false, domain); if (err) - goto fdev_error; + goto err_deregister_fdev; break; default: err = -EINVAL; - goto fdev_error; + goto err_free_data; } kref_init(&data->refcount); @@ -2332,17 +2387,17 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) err = of_platform_populate(rdev->of_node, NULL, NULL, rdev); if (err) - goto populate_error; + goto err_deregister_fdev; return 0; -populate_error: +err_deregister_fdev: if (data->fdevice) misc_deregister(&data->fdevice->miscdev); if (data->secure_fdevice) misc_deregister(&data->secure_fdevice->miscdev); -fdev_error: +err_free_data: kfree(data); return err; } @@ -2470,5 +2525,6 @@ static void fastrpc_exit(void) } module_exit(fastrpc_exit); +MODULE_DESCRIPTION("Qualcomm FastRPC"); MODULE_LICENSE("GPL v2"); -MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS("DMA_BUF"); diff --git a/drivers/misc/gehc-achc.c b/drivers/misc/gehc-achc.c index 4c9c5394da6f..b8fca4d393c6 100644 --- a/drivers/misc/gehc-achc.c +++ b/drivers/misc/gehc-achc.c @@ -65,7 +65,7 @@ static int ezport_start_programming(struct spi_device *spi, struct gpio_desc *re struct spi_transfer release_cs = { }; int ret; - spi_bus_lock(spi->master); + spi_bus_lock(spi->controller); /* assert chip select */ spi_message_init(&msg); @@ -85,16 +85,16 @@ static int ezport_start_programming(struct spi_device *spi, struct gpio_desc *re ret = spi_sync_locked(spi, &msg); fail: - spi_bus_unlock(spi->master); + spi_bus_unlock(spi->controller); return ret; } static void ezport_stop_programming(struct spi_device *spi, struct gpio_desc *reset) { /* reset without asserted chip select to return into normal mode */ - spi_bus_lock(spi->master); + spi_bus_lock(spi->controller); ezport_reset(reset); - spi_bus_unlock(spi->master); + spi_bus_unlock(spi->controller); } static int ezport_get_status_register(struct spi_device *spi) diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c index b03010810b89..224a7e97cbea 100644 --- a/drivers/misc/genwqe/card_base.c +++ b/drivers/misc/genwqe/card_base.c @@ -42,7 +42,7 @@ MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); static char genwqe_driver_name[] = GENWQE_DEVNAME; -static struct class *class_genwqe; + static struct dentry *debugfs_genwqe; static struct genwqe_dev *genwqe_devices[GENWQE_CARD_NO_MAX]; @@ -105,6 +105,26 @@ static const struct pci_device_id genwqe_device_table[] = { MODULE_DEVICE_TABLE(pci, genwqe_device_table); /** + * genwqe_devnode() - Set default access mode for genwqe devices. + * @dev: Pointer to device (unused) + * @mode: Carrier to pass-back given mode (permissions) + * + * Default mode should be rw for everybody. Do not change default + * device name. + */ +static char *genwqe_devnode(const struct device *dev, umode_t *mode) +{ + if (mode) + *mode = 0666; + return NULL; +} + +static const struct class class_genwqe = { + .name = GENWQE_DEVNAME, + .devnode = genwqe_devnode, +}; + +/** * genwqe_dev_alloc() - Create and prepare a new card descriptor * * Return: Pointer to card descriptor, or ERR_PTR(err) on error @@ -126,7 +146,7 @@ static struct genwqe_dev *genwqe_dev_alloc(void) return ERR_PTR(-ENOMEM); cd->card_idx = i; - cd->class_genwqe = class_genwqe; + cd->class_genwqe = &class_genwqe; cd->debugfs_genwqe = debugfs_genwqe; /* @@ -1340,35 +1360,18 @@ static struct pci_driver genwqe_driver = { }; /** - * genwqe_devnode() - Set default access mode for genwqe devices. - * @dev: Pointer to device (unused) - * @mode: Carrier to pass-back given mode (permissions) - * - * Default mode should be rw for everybody. Do not change default - * device name. - */ -static char *genwqe_devnode(const struct device *dev, umode_t *mode) -{ - if (mode) - *mode = 0666; - return NULL; -} - -/** * genwqe_init_module() - Driver registration and initialization */ static int __init genwqe_init_module(void) { int rc; - class_genwqe = class_create(GENWQE_DEVNAME); - if (IS_ERR(class_genwqe)) { + rc = class_register(&class_genwqe); + if (rc) { pr_err("[%s] create class failed\n", __func__); return -ENOMEM; } - class_genwqe->devnode = genwqe_devnode; - debugfs_genwqe = debugfs_create_dir(GENWQE_DEVNAME, NULL); rc = pci_register_driver(&genwqe_driver); @@ -1381,7 +1384,7 @@ static int __init genwqe_init_module(void) err_out0: debugfs_remove(debugfs_genwqe); - class_destroy(class_genwqe); + class_unregister(&class_genwqe); return rc; } @@ -1392,7 +1395,7 @@ static void __exit genwqe_exit_module(void) { pci_unregister_driver(&genwqe_driver); debugfs_remove(debugfs_genwqe); - class_destroy(class_genwqe); + class_unregister(&class_genwqe); } module_init(genwqe_init_module); diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h index 0e902977d35f..d700266f2cd0 100644 --- a/drivers/misc/genwqe/card_base.h +++ b/drivers/misc/genwqe/card_base.h @@ -289,7 +289,7 @@ struct genwqe_dev { /* char device */ dev_t devnum_genwqe; /* major/minor num card */ - struct class *class_genwqe; /* reference to class object */ + const struct class *class_genwqe; /* reference to class object */ struct device *dev; /* for device creation */ struct cdev cdev_genwqe; /* char device for card */ diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c index 500b1feaf1f6..fd7d5cd50d39 100644 --- a/drivers/misc/genwqe/card_ddcb.c +++ b/drivers/misc/genwqe/card_ddcb.c @@ -923,7 +923,7 @@ int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd, } if (cmd->asv_length > DDCB_ASV_LENGTH) { dev_err(&pci_dev->dev, "[%s] err: wrong asv_length of %d\n", - __func__, cmd->asiv_length); + __func__, cmd->asv_length); return -EINVAL; } rc = __genwqe_enqueue_ddcb(cd, req, f_flags); diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c index 55fc5b80e649..4441aca2280a 100644 --- a/drivers/misc/genwqe/card_dev.c +++ b/drivers/misc/genwqe/card_dev.c @@ -443,7 +443,7 @@ static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma) if (vsize == 0) return -EINVAL; - if (get_order(vsize) > MAX_ORDER) + if (get_order(vsize) > MAX_PAGE_ORDER) return -ENOMEM; dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL); diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c index 1c798d6b2dfb..a2c4a9b4f871 100644 --- a/drivers/misc/genwqe/card_utils.c +++ b/drivers/misc/genwqe/card_utils.c @@ -210,7 +210,7 @@ u32 genwqe_crc32(u8 *buff, size_t len, u32 init) void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size, dma_addr_t *dma_handle) { - if (get_order(size) > MAX_ORDER) + if (get_order(size) > MAX_PAGE_ORDER) return NULL; return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle, @@ -308,7 +308,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, sgl->write = write; sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages); - if (get_order(sgl->sgl_size) > MAX_ORDER) { + if (get_order(sgl->sgl_size) > MAX_PAGE_ORDER) { dev_err(&pci_dev->dev, "[%s] err: too much memory requested!\n", __func__); return ret; diff --git a/drivers/misc/hi6421v600-irq.c b/drivers/misc/hi6421v600-irq.c index caa3de37698b..5ba40222eb12 100644 --- a/drivers/misc/hi6421v600-irq.c +++ b/drivers/misc/hi6421v600-irq.c @@ -11,7 +11,6 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/module.h> -#include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/irqdomain.h> @@ -215,7 +214,6 @@ static void hi6421v600_irq_init(struct hi6421v600_irq *priv) static int hi6421v600_irq_probe(struct platform_device *pdev) { struct device *pmic_dev = pdev->dev.parent; - struct device_node *np = pmic_dev->of_node; struct platform_device *pmic_pdev; struct device *dev = &pdev->dev; struct hi6421v600_irq *priv; @@ -244,10 +242,8 @@ static int hi6421v600_irq_probe(struct platform_device *pdev) pmic_pdev = container_of(pmic_dev, struct platform_device, dev); priv->irq = platform_get_irq(pmic_pdev, 0); - if (priv->irq < 0) { - dev_err(dev, "Error %d when getting IRQs\n", priv->irq); + if (priv->irq < 0) return priv->irq; - } platform_set_drvdata(pdev, priv); @@ -257,8 +253,8 @@ static int hi6421v600_irq_probe(struct platform_device *pdev) if (!priv->irqs) return -ENOMEM; - priv->domain = irq_domain_add_simple(np, PMIC_IRQ_LIST_MAX, 0, - &hi6421v600_domain_ops, priv); + priv->domain = irq_domain_create_simple(dev_fwnode(pmic_dev), PMIC_IRQ_LIST_MAX, 0, + &hi6421v600_domain_ops, priv); if (!priv->domain) { dev_err(dev, "Failed to create IRQ domain\n"); return -ENODEV; diff --git a/drivers/misc/hisi_hikey_usb.c b/drivers/misc/hisi_hikey_usb.c index 2165ec35a343..2c6e448a47f1 100644 --- a/drivers/misc/hisi_hikey_usb.c +++ b/drivers/misc/hisi_hikey_usb.c @@ -14,11 +14,11 @@ #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/notifier.h> -#include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> +#include <linux/string_choices.h> #include <linux/usb/role.h> #define DEVICE_DRIVER_NAME "hisi_hikey_usb" @@ -68,7 +68,7 @@ static void hub_power_ctrl(struct hisi_hikey_usb *hisi_hikey_usb, int value) if (ret) dev_err(hisi_hikey_usb->dev, "Can't switch regulator state to %s\n", - value ? "enabled" : "disabled"); + str_enabled_disabled(value)); } static void usb_switch_ctrl(struct hisi_hikey_usb *hisi_hikey_usb, @@ -239,7 +239,7 @@ static int hisi_hikey_usb_probe(struct platform_device *pdev) return 0; } -static int hisi_hikey_usb_remove(struct platform_device *pdev) +static void hisi_hikey_usb_remove(struct platform_device *pdev) { struct hisi_hikey_usb *hisi_hikey_usb = platform_get_drvdata(pdev); @@ -251,8 +251,6 @@ static int hisi_hikey_usb_remove(struct platform_device *pdev) } else { hub_power_ctrl(hisi_hikey_usb, HUB_VBUS_POWER_OFF); } - - return 0; } static const struct of_device_id id_table_hisi_hikey_usb[] = { diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c index 759eaeb64307..ff92c6edff6b 100644 --- a/drivers/misc/hmc6352.c +++ b/drivers/misc/hmc6352.c @@ -121,7 +121,7 @@ static void hmc6352_remove(struct i2c_client *client) } static const struct i2c_device_id hmc6352_id[] = { - { "hmc6352", 0 }, + { "hmc6352" }, { } }; diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c index 2fde8d63c5fe..04bd34c8c506 100644 --- a/drivers/misc/hpilo.c +++ b/drivers/misc/hpilo.c @@ -25,7 +25,9 @@ #include <linux/slab.h> #include "hpilo.h" -static struct class *ilo_class; +static const struct class ilo_class = { + .name = "iLO", +}; static unsigned int ilo_major; static unsigned int max_ccb = 16; static char ilo_hwdev[MAX_ILO_DEV]; @@ -746,7 +748,7 @@ static void ilo_remove(struct pci_dev *pdev) minor = MINOR(ilo_hw->cdev.dev); for (i = minor; i < minor + max_ccb; i++) - device_destroy(ilo_class, MKDEV(ilo_major, i)); + device_destroy(&ilo_class, MKDEV(ilo_major, i)); cdev_del(&ilo_hw->cdev); ilo_disable_interrupts(ilo_hw); @@ -768,7 +770,7 @@ static void ilo_remove(struct pci_dev *pdev) static int ilo_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - int devnum, minor, start, error = 0; + int devnum, slot, start, error = 0; struct ilo_hwinfo *ilo_hw; if (pci_match_id(ilo_blacklist, pdev)) { @@ -837,11 +839,11 @@ static int ilo_probe(struct pci_dev *pdev, goto remove_isr; } - for (minor = 0 ; minor < max_ccb; minor++) { + for (slot = 0; slot < max_ccb; slot++) { struct device *dev; - dev = device_create(ilo_class, &pdev->dev, - MKDEV(ilo_major, minor), NULL, - "hpilo!d%dccb%d", devnum, minor); + dev = device_create(&ilo_class, &pdev->dev, + MKDEV(ilo_major, start + slot), NULL, + "hpilo!d%dccb%d", devnum, slot); if (IS_ERR(dev)) dev_err(&pdev->dev, "Could not create files\n"); } @@ -882,11 +884,9 @@ static int __init ilo_init(void) int error; dev_t dev; - ilo_class = class_create("iLO"); - if (IS_ERR(ilo_class)) { - error = PTR_ERR(ilo_class); + error = class_register(&ilo_class); + if (error) goto out; - } error = alloc_chrdev_region(&dev, 0, MAX_OPEN, ILO_NAME); if (error) @@ -902,7 +902,7 @@ static int __init ilo_init(void) chr_remove: unregister_chrdev_region(dev, MAX_OPEN); class_destroy: - class_destroy(ilo_class); + class_unregister(&ilo_class); out: return error; } @@ -911,7 +911,7 @@ static void __exit ilo_exit(void) { pci_unregister_driver(&ilo_driver); unregister_chrdev_region(MKDEV(ilo_major, 0), MAX_OPEN); - class_destroy(ilo_class); + class_unregister(&ilo_class); } MODULE_VERSION("1.5.0"); diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c index 5867af9f592c..824c5b664985 100644 --- a/drivers/misc/ibmasm/ibmasmfs.c +++ b/drivers/misc/ibmasm/ibmasmfs.c @@ -94,16 +94,14 @@ static int ibmasmfs_init_fs_context(struct fs_context *fc) static const struct super_operations ibmasmfs_s_ops = { .statfs = simple_statfs, - .drop_inode = generic_delete_inode, + .drop_inode = inode_just_drop, }; -static const struct file_operations *ibmasmfs_dir_ops = &simple_dir_operations; - static struct file_system_type ibmasmfs_type = { .owner = THIS_MODULE, .name = "ibmasmfs", .init_fs_context = ibmasmfs_init_fs_context, - .kill_sb = kill_litter_super, + .kill_sb = kill_anon_super, }; MODULE_ALIAS_FS("ibmasmfs"); @@ -122,7 +120,7 @@ static int ibmasmfs_fill_super(struct super_block *sb, struct fs_context *fc) return -ENOMEM; root->i_op = &simple_dir_inode_operations; - root->i_fop = ibmasmfs_dir_ops; + root->i_fop = &simple_dir_operations; sb->s_root = d_make_root(root); if (!sb->s_root) @@ -139,12 +137,12 @@ static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode) if (ret) { ret->i_ino = get_next_ino(); ret->i_mode = mode; - ret->i_atime = ret->i_mtime = inode_set_ctime_current(ret); + simple_inode_init_ts(ret); } return ret; } -static struct dentry *ibmasmfs_create_file(struct dentry *parent, +static int ibmasmfs_create_file(struct dentry *parent, const char *name, const struct file_operations *fops, void *data, @@ -155,19 +153,20 @@ static struct dentry *ibmasmfs_create_file(struct dentry *parent, dentry = d_alloc_name(parent, name); if (!dentry) - return NULL; + return -ENOMEM; inode = ibmasmfs_make_inode(parent->d_sb, S_IFREG | mode); if (!inode) { dput(dentry); - return NULL; + return -ENOMEM; } inode->i_fop = fops; inode->i_private = data; - d_add(dentry, inode); - return dentry; + d_make_persistent(dentry, inode); + dput(dentry); + return 0; } static struct dentry *ibmasmfs_create_dir(struct dentry *parent, @@ -187,10 +186,11 @@ static struct dentry *ibmasmfs_create_dir(struct dentry *parent, } inode->i_op = &simple_dir_inode_operations; - inode->i_fop = ibmasmfs_dir_ops; + inode->i_fop = &simple_dir_operations; - d_add(dentry, inode); - return dentry; + d_make_persistent(dentry, inode); + dput(dentry); + return dentry; // borrowed } int ibmasmfs_register(void) @@ -525,15 +525,9 @@ static ssize_t remote_settings_file_write(struct file *file, const char __user * if (*offset != 0) return 0; - buff = kzalloc (count + 1, GFP_KERNEL); - if (!buff) - return -ENOMEM; - - - if (copy_from_user(buff, ubuff, count)) { - kfree(buff); - return -EFAULT; - } + buff = memdup_user_nul(ubuff, count); + if (IS_ERR(buff)) + return PTR_ERR(buff); value = simple_strtoul(buff, NULL, 10); writel(value, address); diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c index 2101eb12bcba..e5f935b5249d 100644 --- a/drivers/misc/ibmvmc.c +++ b/drivers/misc/ibmvmc.c @@ -1124,7 +1124,7 @@ static ssize_t ibmvmc_write(struct file *file, const char *buffer, goto out; inode = file_inode(file); - inode->i_mtime = inode_set_ctime_current(inode); + inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); mark_inode_dirty(inode); dev_dbg(adapter->dev, "write: file = 0x%lx, count = 0x%lx\n", @@ -1249,9 +1249,7 @@ static long ibmvmc_ioctl_sethmcid(struct ibmvmc_file_session *session, return -EIO; } - /* Make sure buffer is NULL terminated before trying to print it */ - memset(print_buffer, 0, HMC_ID_LEN + 1); - strncpy(print_buffer, hmc->hmc_id, HMC_ID_LEN); + strscpy(print_buffer, hmc->hmc_id, sizeof(print_buffer)); pr_info("ibmvmc: sethmcid: Set HMC ID: \"%s\"\n", print_buffer); memcpy(buffer->real_addr_local, hmc->hmc_id, HMC_ID_LEN); diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c index ee6296b98078..4cdb1087838f 100644 --- a/drivers/misc/ics932s401.c +++ b/drivers/misc/ics932s401.c @@ -95,7 +95,7 @@ static int ics932s401_detect(struct i2c_client *client, static void ics932s401_remove(struct i2c_client *client); static const struct i2c_device_id ics932s401_id[] = { - { "ics932s401", 0 }, + { "ics932s401" }, { } }; MODULE_DEVICE_TABLE(i2c, ics932s401_id); diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c index ebf0635aee64..9f26db467a81 100644 --- a/drivers/misc/isl29003.c +++ b/drivers/misc/isl29003.c @@ -449,7 +449,7 @@ static SIMPLE_DEV_PM_OPS(isl29003_pm_ops, isl29003_suspend, isl29003_resume); #endif /* CONFIG_PM_SLEEP */ static const struct i2c_device_id isl29003_id[] = { - { "isl29003", 0 }, + { "isl29003" }, {} }; MODULE_DEVICE_TABLE(i2c, isl29003_id); diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c index c5976fa8c825..c288aeec16c0 100644 --- a/drivers/misc/isl29020.c +++ b/drivers/misc/isl29020.c @@ -68,7 +68,7 @@ static ssize_t als_lux_input_data_show(struct device *dev, if (val < 0) return val; lux = ((((1 << (2 * (val & 3))))*1000) * ret_val) / 65536; - return sprintf(buf, "%ld\n", lux); + return sprintf(buf, "%lu\n", lux); } static ssize_t als_sensing_range_store(struct device *dev, @@ -177,7 +177,7 @@ static void isl29020_remove(struct i2c_client *client) } static const struct i2c_device_id isl29020_id[] = { - { "isl29020", 0 }, + { "isl29020" }, { } }; diff --git a/drivers/misc/keba/Kconfig b/drivers/misc/keba/Kconfig new file mode 100644 index 000000000000..d6d47197a963 --- /dev/null +++ b/drivers/misc/keba/Kconfig @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: GPL-2.0 +config KEBA_CP500 + tristate "KEBA CP500 system FPGA support" + depends on X86_64 || ARM64 || COMPILE_TEST + depends on PCI + depends on I2C + select AUXILIARY_BUS + help + This driver supports the KEBA CP500 system FPGA, which is used in + KEBA CP500 devices. It registers all sub devices present on the CP500 + system FPGA as separate devices. A driver is needed for each sub + device. + + This driver can also be built as a module. If so, the module will be + called cp500. + +config KEBA_LAN9252 + tristate "KEBA CP500 LAN9252 configuration" + depends on SPI + depends on KEBA_CP500 || COMPILE_TEST + help + This driver is used for updating the configuration of the LAN9252 + controller on KEBA CP500 devices. + + This driver can also be built as a module. If so, the module will be + called lan9252. diff --git a/drivers/misc/keba/Makefile b/drivers/misc/keba/Makefile new file mode 100644 index 000000000000..05e9efcad54f --- /dev/null +++ b/drivers/misc/keba/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_KEBA_CP500) += cp500.o +obj-$(CONFIG_KEBA_LAN9252) += lan9252.o diff --git a/drivers/misc/keba/cp500.c b/drivers/misc/keba/cp500.c new file mode 100644 index 000000000000..d0c6113dcff3 --- /dev/null +++ b/drivers/misc/keba/cp500.c @@ -0,0 +1,989 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) KEBA Industrial Automation Gmbh 2024 + * + * Driver for KEBA system FPGA + * + * The KEBA system FPGA implements various devices. This driver registers + * auxiliary devices for every device within the FPGA. + */ + +#include <linux/device.h> +#include <linux/i2c.h> +#include <linux/misc/keba.h> +#include <linux/module.h> +#include <linux/mtd/partitions.h> +#include <linux/nvmem-consumer.h> +#include <linux/nvmem-provider.h> +#include <linux/pci.h> +#include <linux/spi/flash.h> +#include <linux/spi/spi.h> + +#define CP500 "cp500" + +#define PCI_VENDOR_ID_KEBA 0xCEBA +#define PCI_DEVICE_ID_KEBA_CP035 0x2706 +#define PCI_DEVICE_ID_KEBA_CP505 0x2703 +#define PCI_DEVICE_ID_KEBA_CP520 0x2696 + +#define CP500_SYS_BAR 0 +#define CP500_ECM_BAR 1 + +/* BAR 0 registers */ +#define CP500_VERSION_REG 0x00 +#define CP500_RECONFIG_REG 0x11 /* upper 8-bits of STARTUP register */ +#define CP500_PRESENT_REG 0x20 +#define CP500_AXI_REG 0x40 + +/* Bits in BUILD_REG */ +#define CP500_BUILD_TEST 0x8000 /* FPGA test version */ + +/* Bits in RECONFIG_REG */ +#define CP500_RECFG_REQ 0x01 /* reconfigure FPGA on next reset */ + +/* Bits in PRESENT_REG */ +#define CP500_PRESENT_FAN0 0x01 + +/* MSIX */ +#define CP500_AXI_MSIX 3 +#define CP500_RFB_UART_MSIX 4 +#define CP500_DEBUG_UART_MSIX 5 +#define CP500_SI1_UART_MSIX 6 +#define CP500_NUM_MSIX 8 +#define CP500_NUM_MSIX_NO_MMI 2 +#define CP500_NUM_MSIX_NO_AXI 3 + +/* EEPROM */ +#define CP500_EEPROM_DA_OFFSET 0x016F +#define CP500_EEPROM_DA_ESC_TYPE_MASK 0x01 +#define CP500_EEPROM_ESC_LAN9252 0x00 +#define CP500_EEPROM_ESC_ET1100 0x01 +#define CP500_EEPROM_CPU_NAME "cpu_eeprom" +#define CP500_EEPROM_CPU_OFFSET 0 +#define CP500_EEPROM_CPU_SIZE 3072 +#define CP500_EEPROM_USER_NAME "user_eeprom" +#define CP500_EEPROM_USER_OFFSET 3072 +#define CP500_EEPROM_USER_SIZE 1024 + +/* SPI flash running at full speed */ +#define CP500_FLASH_HZ (33 * 1000 * 1000) + +/* LAN9252 */ +#define CP500_LAN9252_HZ (10 * 1000 * 1000) + +#define CP500_IS_CP035(dev) ((dev)->pci_dev->device == PCI_DEVICE_ID_KEBA_CP035) +#define CP500_IS_CP505(dev) ((dev)->pci_dev->device == PCI_DEVICE_ID_KEBA_CP505) +#define CP500_IS_CP520(dev) ((dev)->pci_dev->device == PCI_DEVICE_ID_KEBA_CP520) + +struct cp500_dev_info { + off_t offset; + size_t size; + unsigned int msix; +}; + +struct cp500_devs { + struct cp500_dev_info startup; + struct cp500_dev_info spi; + struct cp500_dev_info i2c; + struct cp500_dev_info fan; + struct cp500_dev_info batt; + struct cp500_dev_info uart0_rfb; + struct cp500_dev_info uart1_dbg; + struct cp500_dev_info uart2_si1; +}; + +/* list of devices within FPGA of CP035 family (CP035, CP056, CP057) */ +static struct cp500_devs cp035_devices = { + .startup = { 0x0000, SZ_4K }, + .spi = { 0x1000, SZ_4K }, + .i2c = { 0x4000, SZ_4K }, + .fan = { 0x9000, SZ_4K }, + .batt = { 0xA000, SZ_4K }, + .uart0_rfb = { 0xB000, SZ_4K, CP500_RFB_UART_MSIX }, + .uart2_si1 = { 0xD000, SZ_4K, CP500_SI1_UART_MSIX }, +}; + +/* list of devices within FPGA of CP505 family (CP503, CP505, CP507) */ +static struct cp500_devs cp505_devices = { + .startup = { 0x0000, SZ_4K }, + .spi = { 0x4000, SZ_4K }, + .i2c = { 0x5000, SZ_4K }, + .fan = { 0x9000, SZ_4K }, + .batt = { 0xA000, SZ_4K }, + .uart0_rfb = { 0xB000, SZ_4K, CP500_RFB_UART_MSIX }, + .uart2_si1 = { 0xD000, SZ_4K, CP500_SI1_UART_MSIX }, +}; + +/* list of devices within FPGA of CP520 family (CP520, CP530) */ +static struct cp500_devs cp520_devices = { + .startup = { 0x0000, SZ_4K }, + .spi = { 0x4000, SZ_4K }, + .i2c = { 0x5000, SZ_4K }, + .fan = { 0x8000, SZ_4K }, + .batt = { 0x9000, SZ_4K }, + .uart0_rfb = { 0xC000, SZ_4K, CP500_RFB_UART_MSIX }, + .uart1_dbg = { 0xD000, SZ_4K, CP500_DEBUG_UART_MSIX }, +}; + +struct cp500_nvmem { + struct nvmem_device *base_nvmem; + unsigned int offset; + struct nvmem_device *nvmem; +}; + +struct cp500 { + struct pci_dev *pci_dev; + struct cp500_devs *devs; + int msix_num; + struct { + int major; + int minor; + int build; + } version; + struct notifier_block nvmem_notifier; + atomic_t nvmem_notified; + + /* system FPGA BAR */ + resource_size_t sys_hwbase; + struct keba_spi_auxdev *spi; + struct keba_i2c_auxdev *i2c; + struct keba_fan_auxdev *fan; + struct keba_batt_auxdev *batt; + struct keba_uart_auxdev *uart0_rfb; + struct keba_uart_auxdev *uart1_dbg; + struct keba_uart_auxdev *uart2_si1; + + /* ECM EtherCAT BAR */ + resource_size_t ecm_hwbase; + + /* NVMEM devices */ + struct cp500_nvmem nvmem_cpu; + struct cp500_nvmem nvmem_user; + + void __iomem *system_startup_addr; +}; + +/* I2C devices */ +#define CP500_EEPROM_ADDR 0x50 +static struct i2c_board_info cp500_i2c_info[] = { + { /* temperature sensor */ + I2C_BOARD_INFO("emc1403", 0x4c), + }, + { /* + * CPU EEPROM + * CP035 family: CPU board + * CP505 family: bridge board + * CP520 family: carrier board + */ + I2C_BOARD_INFO("24c32", CP500_EEPROM_ADDR), + }, + { /* interface board EEPROM */ + I2C_BOARD_INFO("24c32", CP500_EEPROM_ADDR + 1), + }, + { /* + * EEPROM (optional) + * CP505 family: CPU board + * CP520 family: MMI board + */ + I2C_BOARD_INFO("24c32", CP500_EEPROM_ADDR + 2), + }, + { /* extension module 0 EEPROM (optional) */ + I2C_BOARD_INFO("24c32", CP500_EEPROM_ADDR + 3), + }, + { /* extension module 1 EEPROM (optional) */ + I2C_BOARD_INFO("24c32", CP500_EEPROM_ADDR + 4), + }, + { /* extension module 2 EEPROM (optional) */ + I2C_BOARD_INFO("24c32", CP500_EEPROM_ADDR + 5), + }, + { /* extension module 3 EEPROM (optional) */ + I2C_BOARD_INFO("24c32", CP500_EEPROM_ADDR + 6), + } +}; + +/* SPI devices */ +static struct mtd_partition cp500_partitions[] = { + { + .name = "system-flash-parts", + .size = MTDPART_SIZ_FULL, + .offset = 0, + .mask_flags = 0 + } +}; +static const struct flash_platform_data cp500_w25q32 = { + .type = "w25q32", + .name = "system-flash", + .parts = cp500_partitions, + .nr_parts = ARRAY_SIZE(cp500_partitions), +}; +static const struct flash_platform_data cp500_m25p16 = { + .type = "m25p16", + .name = "system-flash", + .parts = cp500_partitions, + .nr_parts = ARRAY_SIZE(cp500_partitions), +}; +static struct spi_board_info cp500_spi_info[] = { + { /* system FPGA configuration bitstream flash */ + .modalias = "m25p80", + .platform_data = &cp500_m25p16, + .max_speed_hz = CP500_FLASH_HZ, + .chip_select = 0, + .mode = SPI_MODE_3, + }, { /* LAN9252 EtherCAT slave controller */ + .modalias = "lan9252", + .platform_data = NULL, + .max_speed_hz = CP500_LAN9252_HZ, + .chip_select = 1, + .mode = SPI_MODE_3, + } +}; + +static ssize_t cp500_get_fpga_version(struct cp500 *cp500, char *buf, + size_t max_len) +{ + int n; + + if (CP500_IS_CP035(cp500)) + n = scnprintf(buf, max_len, "CP035"); + else if (CP500_IS_CP505(cp500)) + n = scnprintf(buf, max_len, "CP505"); + else + n = scnprintf(buf, max_len, "CP500"); + + n += scnprintf(buf + n, max_len - n, "_FPGA_%d.%02d", + cp500->version.major, cp500->version.minor); + + /* test versions have test bit set */ + if (cp500->version.build & CP500_BUILD_TEST) + n += scnprintf(buf + n, max_len - n, "Test%d", + cp500->version.build & ~CP500_BUILD_TEST); + + n += scnprintf(buf + n, max_len - n, "\n"); + + return n; +} + +static ssize_t version_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct cp500 *cp500 = dev_get_drvdata(dev); + + return cp500_get_fpga_version(cp500, buf, PAGE_SIZE); +} +static DEVICE_ATTR_RO(version); + +static ssize_t keep_cfg_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct cp500 *cp500 = dev_get_drvdata(dev); + unsigned long keep_cfg = 1; + + /* + * FPGA configuration stream is kept during reset when RECONFIG bit is + * zero + */ + if (ioread8(cp500->system_startup_addr + CP500_RECONFIG_REG) & + CP500_RECFG_REQ) + keep_cfg = 0; + + return sysfs_emit(buf, "%lu\n", keep_cfg); +} + +static ssize_t keep_cfg_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cp500 *cp500 = dev_get_drvdata(dev); + unsigned long keep_cfg; + + if (kstrtoul(buf, 10, &keep_cfg) < 0) + return -EINVAL; + + /* + * In normal operation "keep_cfg" is "1". This means that the FPGA keeps + * its configuration stream during a reset. + * In case of a firmware update of the FPGA, the configuration stream + * needs to be reloaded. This can be done without a powercycle by + * writing a "0" into the "keep_cfg" attribute. After a reset/reboot th + * new configuration stream will be loaded. + */ + if (keep_cfg) + iowrite8(0, cp500->system_startup_addr + CP500_RECONFIG_REG); + else + iowrite8(CP500_RECFG_REQ, + cp500->system_startup_addr + CP500_RECONFIG_REG); + + return count; +} +static DEVICE_ATTR_RW(keep_cfg); + +static struct attribute *cp500_attrs[] = { + &dev_attr_version.attr, + &dev_attr_keep_cfg.attr, + NULL +}; +ATTRIBUTE_GROUPS(cp500); + +static void cp500_i2c_release(struct device *dev) +{ + struct keba_i2c_auxdev *i2c = + container_of(dev, struct keba_i2c_auxdev, auxdev.dev); + + kfree(i2c); +} + +static int cp500_register_i2c(struct cp500 *cp500) +{ + int ret; + + cp500->i2c = kzalloc(sizeof(*cp500->i2c), GFP_KERNEL); + if (!cp500->i2c) + return -ENOMEM; + + cp500->i2c->auxdev.name = "i2c"; + cp500->i2c->auxdev.id = 0; + cp500->i2c->auxdev.dev.release = cp500_i2c_release; + cp500->i2c->auxdev.dev.parent = &cp500->pci_dev->dev; + cp500->i2c->io = (struct resource) { + /* I2C register area */ + .start = (resource_size_t) cp500->sys_hwbase + + cp500->devs->i2c.offset, + .end = (resource_size_t) cp500->sys_hwbase + + cp500->devs->i2c.offset + + cp500->devs->i2c.size - 1, + .flags = IORESOURCE_MEM, + }; + cp500->i2c->info_size = ARRAY_SIZE(cp500_i2c_info); + cp500->i2c->info = cp500_i2c_info; + + ret = auxiliary_device_init(&cp500->i2c->auxdev); + if (ret) { + kfree(cp500->i2c); + cp500->i2c = NULL; + + return ret; + } + ret = __auxiliary_device_add(&cp500->i2c->auxdev, "keba"); + if (ret) { + auxiliary_device_uninit(&cp500->i2c->auxdev); + cp500->i2c = NULL; + + return ret; + } + + return 0; +} + +static void cp500_spi_release(struct device *dev) +{ + struct keba_spi_auxdev *spi = + container_of(dev, struct keba_spi_auxdev, auxdev.dev); + + kfree(spi); +} + +static int cp500_register_spi(struct cp500 *cp500, u8 esc_type) +{ + int info_size; + int ret; + + cp500->spi = kzalloc(sizeof(*cp500->spi), GFP_KERNEL); + if (!cp500->spi) + return -ENOMEM; + + if (CP500_IS_CP035(cp500)) + cp500_spi_info[0].platform_data = &cp500_w25q32; + if (esc_type == CP500_EEPROM_ESC_LAN9252) + info_size = ARRAY_SIZE(cp500_spi_info); + else + info_size = ARRAY_SIZE(cp500_spi_info) - 1; + + cp500->spi->auxdev.name = "spi"; + cp500->spi->auxdev.id = 0; + cp500->spi->auxdev.dev.release = cp500_spi_release; + cp500->spi->auxdev.dev.parent = &cp500->pci_dev->dev; + cp500->spi->io = (struct resource) { + /* SPI register area */ + .start = (resource_size_t) cp500->sys_hwbase + + cp500->devs->spi.offset, + .end = (resource_size_t) cp500->sys_hwbase + + cp500->devs->spi.offset + + cp500->devs->spi.size - 1, + .flags = IORESOURCE_MEM, + }; + cp500->spi->info_size = info_size; + cp500->spi->info = cp500_spi_info; + + ret = auxiliary_device_init(&cp500->spi->auxdev); + if (ret) { + kfree(cp500->spi); + cp500->spi = NULL; + + return ret; + } + ret = __auxiliary_device_add(&cp500->spi->auxdev, "keba"); + if (ret) { + auxiliary_device_uninit(&cp500->spi->auxdev); + cp500->spi = NULL; + + return ret; + } + + return 0; +} + +static void cp500_fan_release(struct device *dev) +{ + struct keba_fan_auxdev *fan = + container_of(dev, struct keba_fan_auxdev, auxdev.dev); + + kfree(fan); +} + +static int cp500_register_fan(struct cp500 *cp500) +{ + int ret; + + cp500->fan = kzalloc(sizeof(*cp500->fan), GFP_KERNEL); + if (!cp500->fan) + return -ENOMEM; + + cp500->fan->auxdev.name = "fan"; + cp500->fan->auxdev.id = 0; + cp500->fan->auxdev.dev.release = cp500_fan_release; + cp500->fan->auxdev.dev.parent = &cp500->pci_dev->dev; + cp500->fan->io = (struct resource) { + /* fan register area */ + .start = (resource_size_t) cp500->sys_hwbase + + cp500->devs->fan.offset, + .end = (resource_size_t) cp500->sys_hwbase + + cp500->devs->fan.offset + + cp500->devs->fan.size - 1, + .flags = IORESOURCE_MEM, + }; + + ret = auxiliary_device_init(&cp500->fan->auxdev); + if (ret) { + kfree(cp500->fan); + cp500->fan = NULL; + + return ret; + } + ret = __auxiliary_device_add(&cp500->fan->auxdev, "keba"); + if (ret) { + auxiliary_device_uninit(&cp500->fan->auxdev); + cp500->fan = NULL; + + return ret; + } + + return 0; +} + +static void cp500_batt_release(struct device *dev) +{ + struct keba_batt_auxdev *fan = + container_of(dev, struct keba_batt_auxdev, auxdev.dev); + + kfree(fan); +} + +static int cp500_register_batt(struct cp500 *cp500) +{ + int ret; + + cp500->batt = kzalloc(sizeof(*cp500->batt), GFP_KERNEL); + if (!cp500->batt) + return -ENOMEM; + + cp500->batt->auxdev.name = "batt"; + cp500->batt->auxdev.id = 0; + cp500->batt->auxdev.dev.release = cp500_batt_release; + cp500->batt->auxdev.dev.parent = &cp500->pci_dev->dev; + cp500->batt->io = (struct resource) { + /* battery register area */ + .start = (resource_size_t) cp500->sys_hwbase + + cp500->devs->batt.offset, + .end = (resource_size_t) cp500->sys_hwbase + + cp500->devs->batt.offset + + cp500->devs->batt.size - 1, + .flags = IORESOURCE_MEM, + }; + + ret = auxiliary_device_init(&cp500->batt->auxdev); + if (ret) { + kfree(cp500->batt); + cp500->batt = NULL; + + return ret; + } + ret = __auxiliary_device_add(&cp500->batt->auxdev, "keba"); + if (ret) { + auxiliary_device_uninit(&cp500->batt->auxdev); + cp500->batt = NULL; + + return ret; + } + + return 0; +} + +static void cp500_uart_release(struct device *dev) +{ + struct keba_uart_auxdev *uart = + container_of(dev, struct keba_uart_auxdev, auxdev.dev); + + kfree(uart); +} + +static int cp500_register_uart(struct cp500 *cp500, + struct keba_uart_auxdev **uart, const char *name, + struct cp500_dev_info *info, unsigned int irq) +{ + int ret; + + *uart = kzalloc(sizeof(**uart), GFP_KERNEL); + if (!*uart) + return -ENOMEM; + + (*uart)->auxdev.name = name; + (*uart)->auxdev.id = 0; + (*uart)->auxdev.dev.release = cp500_uart_release; + (*uart)->auxdev.dev.parent = &cp500->pci_dev->dev; + (*uart)->io = (struct resource) { + /* UART register area */ + .start = (resource_size_t) cp500->sys_hwbase + info->offset, + .end = (resource_size_t) cp500->sys_hwbase + info->offset + + info->size - 1, + .flags = IORESOURCE_MEM, + }; + (*uart)->irq = irq; + + ret = auxiliary_device_init(&(*uart)->auxdev); + if (ret) { + kfree(*uart); + *uart = NULL; + + return ret; + } + ret = __auxiliary_device_add(&(*uart)->auxdev, "keba"); + if (ret) { + auxiliary_device_uninit(&(*uart)->auxdev); + *uart = NULL; + + return ret; + } + + return 0; +} + +static int cp500_nvmem_read(void *priv, unsigned int offset, void *val, + size_t bytes) +{ + struct cp500_nvmem *nvmem = priv; + int ret; + + ret = nvmem_device_read(nvmem->base_nvmem, nvmem->offset + offset, + bytes, val); + if (ret != bytes) + return ret; + + return 0; +} + +static int cp500_nvmem_write(void *priv, unsigned int offset, void *val, + size_t bytes) +{ + struct cp500_nvmem *nvmem = priv; + int ret; + + ret = nvmem_device_write(nvmem->base_nvmem, nvmem->offset + offset, + bytes, val); + if (ret != bytes) + return ret; + + return 0; +} + +static int cp500_nvmem_register(struct cp500 *cp500, + struct nvmem_device *base_nvmem) +{ + struct device *dev = &cp500->pci_dev->dev; + struct nvmem_config nvmem_config = {}; + struct nvmem_device *tmp; + + /* + * The main EEPROM of CP500 devices is logically split into two EEPROMs. + * The first logical EEPROM with 3 kB contains the type label which is + * programmed during production of the device. The second logical EEPROM + * with 1 kB is not programmed during production and can be used for + * arbitrary user data. + */ + + nvmem_config.dev = dev; + nvmem_config.owner = THIS_MODULE; + nvmem_config.id = NVMEM_DEVID_NONE; + nvmem_config.type = NVMEM_TYPE_EEPROM; + nvmem_config.root_only = true; + nvmem_config.reg_read = cp500_nvmem_read; + nvmem_config.reg_write = cp500_nvmem_write; + + cp500->nvmem_cpu.base_nvmem = base_nvmem; + cp500->nvmem_cpu.offset = CP500_EEPROM_CPU_OFFSET; + nvmem_config.name = CP500_EEPROM_CPU_NAME; + nvmem_config.size = CP500_EEPROM_CPU_SIZE; + nvmem_config.priv = &cp500->nvmem_cpu; + tmp = nvmem_register(&nvmem_config); + if (IS_ERR(tmp)) + return PTR_ERR(tmp); + cp500->nvmem_cpu.nvmem = tmp; + + cp500->nvmem_user.base_nvmem = base_nvmem; + cp500->nvmem_user.offset = CP500_EEPROM_USER_OFFSET; + nvmem_config.name = CP500_EEPROM_USER_NAME; + nvmem_config.size = CP500_EEPROM_USER_SIZE; + nvmem_config.priv = &cp500->nvmem_user; + tmp = nvmem_register(&nvmem_config); + if (IS_ERR(tmp)) { + nvmem_unregister(cp500->nvmem_cpu.nvmem); + cp500->nvmem_cpu.nvmem = NULL; + + return PTR_ERR(tmp); + } + cp500->nvmem_user.nvmem = tmp; + + return 0; +} + +static void cp500_nvmem_unregister(struct cp500 *cp500) +{ + int notified; + + if (cp500->nvmem_user.nvmem) { + nvmem_unregister(cp500->nvmem_user.nvmem); + cp500->nvmem_user.nvmem = NULL; + } + if (cp500->nvmem_cpu.nvmem) { + nvmem_unregister(cp500->nvmem_cpu.nvmem); + cp500->nvmem_cpu.nvmem = NULL; + } + + /* CPU and user nvmem use the same base_nvmem, put only once */ + notified = atomic_read(&cp500->nvmem_notified); + if (notified) + nvmem_device_put(cp500->nvmem_cpu.base_nvmem); +} + +static int cp500_nvmem_match(struct device *dev, const void *data) +{ + const struct cp500 *cp500 = data; + struct i2c_client *client; + + /* match only CPU EEPROM below the cp500 device */ + dev = dev->parent; + client = i2c_verify_client(dev); + if (!client || client->addr != CP500_EEPROM_ADDR) + return 0; + while ((dev = dev->parent)) + if (dev == &cp500->pci_dev->dev) + return 1; + + return 0; +} + +static int cp500_nvmem(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct nvmem_device *nvmem; + struct cp500 *cp500; + struct device *dev; + int notified; + u8 esc_type; + int ret; + + if (action != NVMEM_ADD) + return NOTIFY_DONE; + cp500 = container_of(nb, struct cp500, nvmem_notifier); + dev = &cp500->pci_dev->dev; + + /* process CPU EEPROM content only once */ + notified = atomic_read(&cp500->nvmem_notified); + if (notified) + return NOTIFY_DONE; + nvmem = nvmem_device_find(cp500, cp500_nvmem_match); + if (IS_ERR_OR_NULL(nvmem)) + return NOTIFY_DONE; + if (!atomic_try_cmpxchg_relaxed(&cp500->nvmem_notified, ¬ified, 1)) { + nvmem_device_put(nvmem); + + return NOTIFY_DONE; + } + + ret = cp500_nvmem_register(cp500, nvmem); + if (ret) + return ret; + + ret = nvmem_device_read(nvmem, CP500_EEPROM_DA_OFFSET, sizeof(esc_type), + (void *)&esc_type); + if (ret != sizeof(esc_type)) { + dev_warn(dev, "Failed to read device assembly!\n"); + + return NOTIFY_DONE; + } + esc_type &= CP500_EEPROM_DA_ESC_TYPE_MASK; + + if (cp500_register_spi(cp500, esc_type)) + dev_warn(dev, "Failed to register SPI!\n"); + + return NOTIFY_OK; +} + +static void cp500_register_auxiliary_devs(struct cp500 *cp500) +{ + struct device *dev = &cp500->pci_dev->dev; + u8 present = ioread8(cp500->system_startup_addr + CP500_PRESENT_REG); + + if (cp500_register_i2c(cp500)) + dev_warn(dev, "Failed to register I2C!\n"); + if (present & CP500_PRESENT_FAN0) + if (cp500_register_fan(cp500)) + dev_warn(dev, "Failed to register fan!\n"); + if (cp500_register_batt(cp500)) + dev_warn(dev, "Failed to register battery!\n"); + if (cp500->devs->uart0_rfb.size && + cp500->devs->uart0_rfb.msix < cp500->msix_num) { + int irq = pci_irq_vector(cp500->pci_dev, + cp500->devs->uart0_rfb.msix); + + if (cp500_register_uart(cp500, &cp500->uart0_rfb, "rs485-uart", + &cp500->devs->uart0_rfb, irq)) + dev_warn(dev, "Failed to register RFB UART!\n"); + } + if (cp500->devs->uart1_dbg.size && + cp500->devs->uart1_dbg.msix < cp500->msix_num) { + int irq = pci_irq_vector(cp500->pci_dev, + cp500->devs->uart1_dbg.msix); + + if (cp500_register_uart(cp500, &cp500->uart1_dbg, "rs232-uart", + &cp500->devs->uart1_dbg, irq)) + dev_warn(dev, "Failed to register debug UART!\n"); + } + if (cp500->devs->uart2_si1.size && + cp500->devs->uart2_si1.msix < cp500->msix_num) { + int irq = pci_irq_vector(cp500->pci_dev, + cp500->devs->uart2_si1.msix); + + if (cp500_register_uart(cp500, &cp500->uart2_si1, "uart", + &cp500->devs->uart2_si1, irq)) + dev_warn(dev, "Failed to register SI1 UART!\n"); + } +} + +static void cp500_unregister_dev(struct auxiliary_device *auxdev) +{ + auxiliary_device_delete(auxdev); + auxiliary_device_uninit(auxdev); +} + +static void cp500_unregister_auxiliary_devs(struct cp500 *cp500) +{ + if (cp500->spi) { + cp500_unregister_dev(&cp500->spi->auxdev); + cp500->spi = NULL; + } + if (cp500->i2c) { + cp500_unregister_dev(&cp500->i2c->auxdev); + cp500->i2c = NULL; + } + if (cp500->fan) { + cp500_unregister_dev(&cp500->fan->auxdev); + cp500->fan = NULL; + } + if (cp500->batt) { + cp500_unregister_dev(&cp500->batt->auxdev); + cp500->batt = NULL; + } + if (cp500->uart0_rfb) { + cp500_unregister_dev(&cp500->uart0_rfb->auxdev); + cp500->uart0_rfb = NULL; + } + if (cp500->uart1_dbg) { + cp500_unregister_dev(&cp500->uart1_dbg->auxdev); + cp500->uart1_dbg = NULL; + } + if (cp500->uart2_si1) { + cp500_unregister_dev(&cp500->uart2_si1->auxdev); + cp500->uart2_si1 = NULL; + } +} + +static irqreturn_t cp500_axi_handler(int irq, void *dev) +{ + struct cp500 *cp500 = dev; + u32 axi_address = ioread32(cp500->system_startup_addr + CP500_AXI_REG); + + /* + * FPGA signals AXI response error, print AXI address to indicate which + * IP core was affected + */ + dev_err(&cp500->pci_dev->dev, "AXI response error at 0x%08x\n", + axi_address); + + return IRQ_HANDLED; +} + +static int cp500_enable(struct cp500 *cp500) +{ + int axi_irq = -1; + int ret; + + if (cp500->msix_num > CP500_NUM_MSIX_NO_AXI) { + axi_irq = pci_irq_vector(cp500->pci_dev, CP500_AXI_MSIX); + ret = request_irq(axi_irq, cp500_axi_handler, 0, + CP500, cp500); + if (ret != 0) { + dev_err(&cp500->pci_dev->dev, + "Failed to register AXI response error!\n"); + return ret; + } + } + + return 0; +} + +static void cp500_disable(struct cp500 *cp500) +{ + int axi_irq; + + if (cp500->msix_num > CP500_NUM_MSIX_NO_AXI) { + axi_irq = pci_irq_vector(cp500->pci_dev, CP500_AXI_MSIX); + free_irq(axi_irq, cp500); + } +} + +static int cp500_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) +{ + struct device *dev = &pci_dev->dev; + struct resource startup; + struct cp500 *cp500; + u32 cp500_vers; + char buf[64]; + int ret; + + cp500 = devm_kzalloc(dev, sizeof(*cp500), GFP_KERNEL); + if (!cp500) + return -ENOMEM; + cp500->pci_dev = pci_dev; + cp500->sys_hwbase = pci_resource_start(pci_dev, CP500_SYS_BAR); + cp500->ecm_hwbase = pci_resource_start(pci_dev, CP500_ECM_BAR); + if (!cp500->sys_hwbase || !cp500->ecm_hwbase) + return -ENODEV; + + if (CP500_IS_CP035(cp500)) + cp500->devs = &cp035_devices; + else if (CP500_IS_CP505(cp500)) + cp500->devs = &cp505_devices; + else if (CP500_IS_CP520(cp500)) + cp500->devs = &cp520_devices; + else + return -ENODEV; + + ret = pci_enable_device(pci_dev); + if (ret) + return ret; + pci_set_master(pci_dev); + + startup = *pci_resource_n(pci_dev, CP500_SYS_BAR); + startup.end = startup.start + cp500->devs->startup.size - 1; + cp500->system_startup_addr = devm_ioremap_resource(&pci_dev->dev, + &startup); + if (IS_ERR(cp500->system_startup_addr)) { + ret = PTR_ERR(cp500->system_startup_addr); + goto out_disable; + } + + cp500->msix_num = pci_alloc_irq_vectors(pci_dev, CP500_NUM_MSIX_NO_MMI, + CP500_NUM_MSIX, PCI_IRQ_MSIX); + if (cp500->msix_num < CP500_NUM_MSIX_NO_MMI) { + dev_err(&pci_dev->dev, + "Hardware does not support enough MSI-X interrupts\n"); + ret = -ENODEV; + goto out_disable; + } + + cp500_vers = ioread32(cp500->system_startup_addr + CP500_VERSION_REG); + cp500->version.major = (cp500_vers & 0xff); + cp500->version.minor = (cp500_vers >> 8) & 0xff; + cp500->version.build = (cp500_vers >> 16) & 0xffff; + cp500_get_fpga_version(cp500, buf, sizeof(buf)); + + dev_info(&pci_dev->dev, "FPGA version %s", buf); + + pci_set_drvdata(pci_dev, cp500); + + cp500->nvmem_notifier.notifier_call = cp500_nvmem; + ret = nvmem_register_notifier(&cp500->nvmem_notifier); + if (ret != 0) + goto out_free_irq; + + ret = cp500_enable(cp500); + if (ret != 0) + goto out_unregister_nvmem; + + cp500_register_auxiliary_devs(cp500); + + return 0; + +out_unregister_nvmem: + nvmem_unregister_notifier(&cp500->nvmem_notifier); +out_free_irq: + pci_free_irq_vectors(pci_dev); +out_disable: + pci_clear_master(pci_dev); + pci_disable_device(pci_dev); + + return ret; +} + +static void cp500_remove(struct pci_dev *pci_dev) +{ + struct cp500 *cp500 = pci_get_drvdata(pci_dev); + + /* + * unregister CPU and user nvmem and put base_nvmem before parent + * auxiliary device of base_nvmem is unregistered + */ + nvmem_unregister_notifier(&cp500->nvmem_notifier); + cp500_nvmem_unregister(cp500); + + cp500_unregister_auxiliary_devs(cp500); + + cp500_disable(cp500); + + pci_set_drvdata(pci_dev, 0); + + pci_free_irq_vectors(pci_dev); + + pci_clear_master(pci_dev); + pci_disable_device(pci_dev); +} + +static struct pci_device_id cp500_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_KEBA, PCI_DEVICE_ID_KEBA_CP035) }, + { PCI_DEVICE(PCI_VENDOR_ID_KEBA, PCI_DEVICE_ID_KEBA_CP505) }, + { PCI_DEVICE(PCI_VENDOR_ID_KEBA, PCI_DEVICE_ID_KEBA_CP520) }, + { } +}; +MODULE_DEVICE_TABLE(pci, cp500_ids); + +static struct pci_driver cp500_driver = { + .name = CP500, + .id_table = cp500_ids, + .probe = cp500_probe, + .remove = cp500_remove, + .dev_groups = cp500_groups, +}; +module_pci_driver(cp500_driver); + +MODULE_AUTHOR("Gerhard Engleder <eg@keba.com>"); +MODULE_DESCRIPTION("KEBA CP500 system FPGA driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/keba/lan9252.c b/drivers/misc/keba/lan9252.c new file mode 100644 index 000000000000..fc54afd1d05b --- /dev/null +++ b/drivers/misc/keba/lan9252.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) KEBA Industrial Automation Gmbh 2024 + * + * Driver for LAN9252 on KEBA CP500 devices + * + * This driver is used for updating the configuration of the LAN9252 controller + * on KEBA CP500 devices. The LAN9252 is connected over SPI, which is also named + * PDI. + */ + +#include <linux/spi/spi.h> +#include <linux/mii.h> + +/* SPI commands */ +#define LAN9252_SPI_READ 0x3 +#define LAN9252_SPI_WRITE 0x2 + +struct lan9252_read_cmd { + u8 cmd; + u8 addr_0; + u8 addr_1; +} __packed; + +struct lan9252_write_cmd { + u8 cmd; + u8 addr_0; + u8 addr_1; + u32 data; +} __packed; + +/* byte test register */ +#define LAN9252_BYTE_TEST 0x64 +#define LAN9252_BYTE_TEST_VALUE 0x87654321 + +/* hardware configuration register */ +#define LAN9252_HW_CFG 0x74 +#define LAN9252_HW_CFG_READY 0x08000000 + +/* EtherCAT CSR interface data register */ +#define LAN9252_ECAT_CSR_DATA 0x300 + +/* EtherCAT CSR interface command register */ +#define LAN9252_ECAT_CSR_CMD 0x304 +#define LAN9252_ECAT_CSR_BUSY 0x80000000 +#define LAN9252_ECAT_CSR_READ 0x40000000 + +/* EtherCAT slave controller MII register */ +#define LAN9252_ESC_MII 0x510 +#define LAN9252_ESC_MII_BUSY 0x8000 +#define LAN9252_ESC_MII_CMD_ERR 0x4000 +#define LAN9252_ESC_MII_READ_ERR 0x2000 +#define LAN9252_ESC_MII_ERR_MASK (LAN9252_ESC_MII_CMD_ERR | \ + LAN9252_ESC_MII_READ_ERR) +#define LAN9252_ESC_MII_WRITE 0x0200 +#define LAN9252_ESC_MII_READ 0x0100 + +/* EtherCAT slave controller PHY address register */ +#define LAN9252_ESC_PHY_ADDR 0x512 + +/* EtherCAT slave controller PHY register address register */ +#define LAN9252_ESC_PHY_REG_ADDR 0x513 + +/* EtherCAT slave controller PHY data register */ +#define LAN9252_ESC_PHY_DATA 0x514 + +/* EtherCAT slave controller PDI access state register */ +#define LAN9252_ESC_MII_PDI 0x517 +#define LAN9252_ESC_MII_ACCESS_PDI 0x01 +#define LAN9252_ESC_MII_ACCESS_ECAT 0x00 + +/* PHY address */ +#define PHY_ADDRESS 2 + +#define SPI_RETRY_COUNT 10 +#define SPI_WAIT_US 100 +#define SPI_CSR_WAIT_US 500 + +static int lan9252_spi_read(struct spi_device *spi, u16 addr, u32 *data) +{ + struct lan9252_read_cmd cmd; + + cmd.cmd = LAN9252_SPI_READ; + cmd.addr_0 = (addr >> 8) & 0xFF; + cmd.addr_1 = addr & 0xFF; + + return spi_write_then_read(spi, (u8 *)&cmd, + sizeof(struct lan9252_read_cmd), + (u8 *)data, sizeof(u32)); +} + +static int lan9252_spi_write(struct spi_device *spi, u16 addr, u32 data) +{ + struct lan9252_write_cmd cmd; + + cmd.cmd = LAN9252_SPI_WRITE; + cmd.addr_0 = (addr >> 8) & 0xFF; + cmd.addr_1 = addr & 0xFF; + cmd.data = data; + + return spi_write(spi, (u8 *)&cmd, sizeof(struct lan9252_write_cmd)); +} + +static bool lan9252_init(struct spi_device *spi) +{ + u32 data; + int ret; + + ret = lan9252_spi_read(spi, LAN9252_BYTE_TEST, &data); + if (ret || data != LAN9252_BYTE_TEST_VALUE) + return false; + + ret = lan9252_spi_read(spi, LAN9252_HW_CFG, &data); + if (ret || !(data & LAN9252_HW_CFG_READY)) + return false; + + return true; +} + +static u8 lan9252_esc_get_size(u16 addr) +{ + if (addr == LAN9252_ESC_MII || addr == LAN9252_ESC_PHY_DATA) + return 2; + + return 1; +} + +static int lan9252_esc_wait(struct spi_device *spi) +{ + ktime_t timeout = ktime_add_us(ktime_get(), SPI_WAIT_US); + u32 data; + int ret; + + /* wait while CSR command is busy */ + for (;;) { + ret = lan9252_spi_read(spi, LAN9252_ECAT_CSR_CMD, &data); + if (ret) + return ret; + if (!(data & LAN9252_ECAT_CSR_BUSY)) + return 0; + + if (ktime_compare(ktime_get(), timeout) > 0) { + ret = lan9252_spi_read(spi, LAN9252_ECAT_CSR_CMD, &data); + if (ret) + return ret; + break; + } + } + + return (!(data & LAN9252_ECAT_CSR_BUSY)) ? 0 : -ETIMEDOUT; +} + +static int lan9252_esc_read(struct spi_device *spi, u16 addr, u32 *data) +{ + u32 csr_cmd; + u8 size; + int ret; + + size = lan9252_esc_get_size(addr); + csr_cmd = LAN9252_ECAT_CSR_BUSY | LAN9252_ECAT_CSR_READ; + csr_cmd |= (size << 16) | addr; + ret = lan9252_spi_write(spi, LAN9252_ECAT_CSR_CMD, csr_cmd); + if (ret) + return ret; + + ret = lan9252_esc_wait(spi); + if (ret) + return ret; + + ret = lan9252_spi_read(spi, LAN9252_ECAT_CSR_DATA, data); + if (ret) + return ret; + + return 0; +} + +static int lan9252_esc_write(struct spi_device *spi, u16 addr, u32 data) +{ + u32 csr_cmd; + u8 size; + int ret; + + ret = lan9252_spi_write(spi, LAN9252_ECAT_CSR_DATA, data); + if (ret) + return ret; + + size = lan9252_esc_get_size(addr); + csr_cmd = LAN9252_ECAT_CSR_BUSY; + csr_cmd |= (size << 16) | addr; + ret = lan9252_spi_write(spi, LAN9252_ECAT_CSR_CMD, csr_cmd); + if (ret) + return ret; + + ret = lan9252_esc_wait(spi); + if (ret) + return ret; + + return 0; +} + +static int lan9252_access_mii(struct spi_device *spi, bool access) +{ + u32 data; + + if (access) + data = LAN9252_ESC_MII_ACCESS_PDI; + else + data = LAN9252_ESC_MII_ACCESS_ECAT; + + return lan9252_esc_write(spi, LAN9252_ESC_MII_PDI, data); +} + +static int lan9252_mii_wait(struct spi_device *spi) +{ + ktime_t timeout = ktime_add_us(ktime_get(), SPI_CSR_WAIT_US); + u32 data; + int ret; + + /* wait while MII control state machine is busy */ + for (;;) { + ret = lan9252_esc_read(spi, LAN9252_ESC_MII, &data); + if (ret) + return ret; + if (data & LAN9252_ESC_MII_ERR_MASK) + return -EIO; + if (!(data & LAN9252_ESC_MII_BUSY)) + return 0; + + if (ktime_compare(ktime_get(), timeout) > 0) { + ret = lan9252_esc_read(spi, LAN9252_ESC_MII, &data); + if (ret) + return ret; + if (data & LAN9252_ESC_MII_ERR_MASK) + return -EIO; + break; + } + } + + return (!(data & LAN9252_ESC_MII_BUSY)) ? 0 : -ETIMEDOUT; +} + +static int lan9252_mii_read(struct spi_device *spi, u8 phy_addr, u8 reg_addr, + u32 *data) +{ + int ret; + + ret = lan9252_esc_write(spi, LAN9252_ESC_PHY_ADDR, phy_addr); + if (ret) + return ret; + ret = lan9252_esc_write(spi, LAN9252_ESC_PHY_REG_ADDR, reg_addr); + if (ret) + return ret; + + ret = lan9252_esc_write(spi, LAN9252_ESC_MII, LAN9252_ESC_MII_READ); + if (ret) + return ret; + + ret = lan9252_mii_wait(spi); + if (ret) + return ret; + + return lan9252_esc_read(spi, LAN9252_ESC_PHY_DATA, data); +} + +static int lan9252_mii_write(struct spi_device *spi, u8 phy_addr, u8 reg_addr, + u32 data) +{ + int ret; + + ret = lan9252_esc_write(spi, LAN9252_ESC_PHY_ADDR, phy_addr); + if (ret) + return ret; + ret = lan9252_esc_write(spi, LAN9252_ESC_PHY_REG_ADDR, reg_addr); + if (ret) + return ret; + ret = lan9252_esc_write(spi, LAN9252_ESC_PHY_DATA, data); + if (ret) + return ret; + + ret = lan9252_esc_write(spi, LAN9252_ESC_MII, LAN9252_ESC_MII_WRITE); + if (ret) + return ret; + + return lan9252_mii_wait(spi); +} + +static int lan9252_probe(struct spi_device *spi) +{ + u32 data; + int retry = SPI_RETRY_COUNT; + int ret; + + /* execute specified initialization sequence */ + while (retry && !lan9252_init(spi)) + retry--; + if (retry == 0) { + dev_err(&spi->dev, + "Can't initialize LAN9252 SPI communication!"); + return -EIO; + } + + /* enable access to MII management for PDI */ + ret = lan9252_access_mii(spi, true); + if (ret) { + dev_err(&spi->dev, "Can't enable access to MII management!"); + return ret; + } + + /* + * check PHY configuration and configure if necessary + * - full duplex + * - auto negotiation disabled + * - 100 Mbps + */ + ret = lan9252_mii_read(spi, PHY_ADDRESS, MII_BMCR, &data); + if (ret) { + dev_err(&spi->dev, "Can't read LAN9252 configuration!"); + goto out; + } + if (!(data & BMCR_FULLDPLX) || (data & BMCR_ANENABLE) || + !(data & BMCR_SPEED100)) { + /* + */ + data &= ~(BMCR_ANENABLE); + data |= (BMCR_FULLDPLX | BMCR_SPEED100); + ret = lan9252_mii_write(spi, PHY_ADDRESS, MII_BMCR, data); + if (ret) + dev_err(&spi->dev, + "Can't write LAN9252 configuration!"); + } + + dev_info(&spi->dev, "LAN9252 PHY configuration"); + +out: + /* disable access to MII management for PDI */ + lan9252_access_mii(spi, false); + + return ret; +} + +static const struct spi_device_id lan9252_id[] = { + {"lan9252"}, + {} +}; +MODULE_DEVICE_TABLE(spi, lan9252_id); + +static struct spi_driver lan9252_driver = { + .driver = { + .name = "lan9252", + }, + .probe = lan9252_probe, + .id_table = lan9252_id, +}; +module_spi_driver(lan9252_driver); + +MODULE_AUTHOR("Petar Bojanic <boja@keba.com>"); +MODULE_AUTHOR("Gerhard Engleder <eg@keba.com>"); +MODULE_DESCRIPTION("KEBA LAN9252 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index 88b91ad8e541..0cf31164b470 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c @@ -95,6 +95,7 @@ #include <linux/kallsyms.h> #include <asm/sections.h> +#include <asm/rwonce.h> #define v1printk(a...) do { \ if (verbose) \ @@ -126,7 +127,6 @@ static int final_ack; static int force_hwbrks; static int hwbreaks_ok; static int hw_break_val; -static int hw_break_val2; static int cont_instead_of_sstep; static unsigned long cont_thread_id; static unsigned long sstep_thread_id; @@ -284,7 +284,7 @@ static void hw_rem_access_break(char *arg) static void hw_break_val_access(void) { - hw_break_val2 = hw_break_val; + READ_ONCE(hw_break_val); } static void hw_break_val_write(void) diff --git a/drivers/misc/lan966x_pci.c b/drivers/misc/lan966x_pci.c new file mode 100644 index 000000000000..9c79b58137e5 --- /dev/null +++ b/drivers/misc/lan966x_pci.c @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Microchip LAN966x PCI driver + * + * Copyright (c) 2024 Microchip Technology Inc. and its subsidiaries. + * + * Authors: + * Clément Léger <clement.leger@bootlin.com> + * Hervé Codina <herve.codina@bootlin.com> + */ + +#include <linux/device.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/pci.h> +#include <linux/pci_ids.h> +#include <linux/slab.h> + +/* Embedded dtbo symbols created by cmd_wrap_S_dtb in scripts/Makefile.lib */ +extern char __dtbo_lan966x_pci_begin[]; +extern char __dtbo_lan966x_pci_end[]; + +struct pci_dev_intr_ctrl { + struct pci_dev *pci_dev; + struct irq_domain *irq_domain; + int irq; +}; + +static int pci_dev_irq_domain_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) +{ + irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_simple_irq); + return 0; +} + +static const struct irq_domain_ops pci_dev_irq_domain_ops = { + .map = pci_dev_irq_domain_map, + .xlate = irq_domain_xlate_onecell, +}; + +static irqreturn_t pci_dev_irq_handler(int irq, void *data) +{ + struct pci_dev_intr_ctrl *intr_ctrl = data; + int ret; + + ret = generic_handle_domain_irq(intr_ctrl->irq_domain, 0); + return ret ? IRQ_NONE : IRQ_HANDLED; +} + +static struct pci_dev_intr_ctrl *pci_dev_create_intr_ctrl(struct pci_dev *pdev) +{ + struct pci_dev_intr_ctrl *intr_ctrl __free(kfree) = NULL; + struct fwnode_handle *fwnode; + int ret; + + fwnode = dev_fwnode(&pdev->dev); + if (!fwnode) + return ERR_PTR(-ENODEV); + + intr_ctrl = kmalloc(sizeof(*intr_ctrl), GFP_KERNEL); + if (!intr_ctrl) + return ERR_PTR(-ENOMEM); + + intr_ctrl->pci_dev = pdev; + + intr_ctrl->irq_domain = irq_domain_create_linear(fwnode, 1, &pci_dev_irq_domain_ops, + intr_ctrl); + if (!intr_ctrl->irq_domain) { + pci_err(pdev, "Failed to create irqdomain\n"); + return ERR_PTR(-ENOMEM); + } + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX); + if (ret < 0) { + pci_err(pdev, "Unable alloc irq vector (%d)\n", ret); + goto err_remove_domain; + } + intr_ctrl->irq = pci_irq_vector(pdev, 0); + ret = request_irq(intr_ctrl->irq, pci_dev_irq_handler, IRQF_SHARED, + pci_name(pdev), intr_ctrl); + if (ret) { + pci_err(pdev, "Unable to request irq %d (%d)\n", intr_ctrl->irq, ret); + goto err_free_irq_vector; + } + + return_ptr(intr_ctrl); + +err_free_irq_vector: + pci_free_irq_vectors(pdev); +err_remove_domain: + irq_domain_remove(intr_ctrl->irq_domain); + return ERR_PTR(ret); +} + +static void pci_dev_remove_intr_ctrl(struct pci_dev_intr_ctrl *intr_ctrl) +{ + free_irq(intr_ctrl->irq, intr_ctrl); + pci_free_irq_vectors(intr_ctrl->pci_dev); + irq_dispose_mapping(irq_find_mapping(intr_ctrl->irq_domain, 0)); + irq_domain_remove(intr_ctrl->irq_domain); + kfree(intr_ctrl); +} + +static void devm_pci_dev_remove_intr_ctrl(void *intr_ctrl) +{ + pci_dev_remove_intr_ctrl(intr_ctrl); +} + +static int devm_pci_dev_create_intr_ctrl(struct pci_dev *pdev) +{ + struct pci_dev_intr_ctrl *intr_ctrl; + + intr_ctrl = pci_dev_create_intr_ctrl(pdev); + if (IS_ERR(intr_ctrl)) + return PTR_ERR(intr_ctrl); + + return devm_add_action_or_reset(&pdev->dev, devm_pci_dev_remove_intr_ctrl, intr_ctrl); +} + +struct lan966x_pci { + struct device *dev; + int ovcs_id; +}; + +static int lan966x_pci_load_overlay(struct lan966x_pci *data) +{ + u32 dtbo_size = __dtbo_lan966x_pci_end - __dtbo_lan966x_pci_begin; + void *dtbo_start = __dtbo_lan966x_pci_begin; + + return of_overlay_fdt_apply(dtbo_start, dtbo_size, &data->ovcs_id, dev_of_node(data->dev)); +} + +static void lan966x_pci_unload_overlay(struct lan966x_pci *data) +{ + of_overlay_remove(&data->ovcs_id); +} + +static int lan966x_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct lan966x_pci *data; + int ret; + + /* + * On ACPI system, fwnode can point to the ACPI node. + * This driver needs an of_node to be used as the device-tree overlay + * target. This of_node should be set by the PCI core if it succeeds in + * creating it (CONFIG_PCI_DYNAMIC_OF_NODES feature). + * Check here for the validity of this of_node. + */ + if (!dev_of_node(dev)) + return dev_err_probe(dev, -EINVAL, "Missing of_node for device\n"); + + /* Need to be done before devm_pci_dev_create_intr_ctrl. + * It allocates an IRQ and so pdev->irq is updated. + */ + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + ret = devm_pci_dev_create_intr_ctrl(pdev); + if (ret) + return ret; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + pci_set_drvdata(pdev, data); + data->dev = dev; + + ret = lan966x_pci_load_overlay(data); + if (ret) + return ret; + + pci_set_master(pdev); + + ret = of_platform_default_populate(dev_of_node(dev), NULL, dev); + if (ret) + goto err_unload_overlay; + + return 0; + +err_unload_overlay: + lan966x_pci_unload_overlay(data); + return ret; +} + +static void lan966x_pci_remove(struct pci_dev *pdev) +{ + struct lan966x_pci *data = pci_get_drvdata(pdev); + + of_platform_depopulate(data->dev); + + lan966x_pci_unload_overlay(data); +} + +static struct pci_device_id lan966x_pci_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_EFAR, 0x9660) }, + { } +}; +MODULE_DEVICE_TABLE(pci, lan966x_pci_ids); + +static struct pci_driver lan966x_pci_driver = { + .name = "mchp_lan966x_pci", + .id_table = lan966x_pci_ids, + .probe = lan966x_pci_probe, + .remove = lan966x_pci_remove, +}; +module_pci_driver(lan966x_pci_driver); + +MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>"); +MODULE_DESCRIPTION("Microchip LAN966x PCI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/lan966x_pci.dtso b/drivers/misc/lan966x_pci.dtso new file mode 100644 index 000000000000..7b196b0a0eb6 --- /dev/null +++ b/drivers/misc/lan966x_pci.dtso @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Microchip UNG + */ + +#include <dt-bindings/clock/microchip,lan966x.h> +#include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/interrupt-controller/irq.h> +#include <dt-bindings/mfd/atmel-flexcom.h> +#include <dt-bindings/phy/phy-lan966x-serdes.h> + +/dts-v1/; +/plugin/; + +/ { + fragment@0 { + target-path = ""; + + /* + * These properties allow to avoid a dtc warnings. + * The real interrupt controller is the PCI device itself. It + * is the node on which the device tree overlay will be applied. + * This node has those properties. + */ + #interrupt-cells = <1>; + interrupt-controller; + + __overlay__ { + #address-cells = <3>; + #size-cells = <2>; + + cpu_clk: clock-600000000 { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <600000000>; /* CPU clock = 600MHz */ + }; + + ddr_clk: clock-30000000 { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <30000000>; /* Fabric clock = 30MHz */ + }; + + sys_clk: clock-15625000 { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <15625000>; /* System clock = 15.625MHz */ + }; + + pci-ep-bus@0 { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + + /* + * map @0xe2000000 (32MB) to BAR0 (CPU) + * map @0xe0000000 (16MB) to BAR1 (AMBA) + */ + ranges = <0xe2000000 0x00 0x00 0x00 0x2000000 + 0xe0000000 0x01 0x00 0x00 0x1000000>; + + oic: oic@e00c0120 { + compatible = "microchip,lan966x-oic"; + #interrupt-cells = <2>; + interrupt-controller; + interrupts = <0>; /* PCI INTx assigned interrupt */ + reg = <0xe00c0120 0x190>; + }; + + cpu_ctrl: syscon@e00c0000 { + compatible = "microchip,lan966x-cpu-syscon", "syscon"; + reg = <0xe00c0000 0xa8>; + }; + + reset: reset@e200400c { + compatible = "microchip,lan966x-switch-reset"; + reg = <0xe200400c 0x4>, <0xe00c0000 0xa8>; + reg-names = "gcb","cpu"; + #reset-cells = <1>; + cpu-syscon = <&cpu_ctrl>; + }; + + gpio: pinctrl@e2004064 { + compatible = "microchip,lan966x-pinctrl"; + reg = <0xe2004064 0xb4>, + <0xe2010024 0x138>; + resets = <&reset 0>; + reset-names = "switch"; + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&gpio 0 0 78>; + interrupt-parent = <&oic>; + interrupt-controller; + interrupts = <17 IRQ_TYPE_LEVEL_HIGH>; + #interrupt-cells = <2>; + + tod_pins: tod_pins { + pins = "GPIO_36"; + function = "ptpsync_1"; + }; + + fc0_a_pins: fcb4-i2c-pins { + /* RXD, TXD */ + pins = "GPIO_9", "GPIO_10"; + function = "fc0_a"; + }; + + }; + + serdes: serdes@e202c000 { + compatible = "microchip,lan966x-serdes"; + reg = <0xe202c000 0x9c>, + <0xe2004010 0x4>; + #phy-cells = <2>; + }; + + mdio1: mdio@e200413c { + #address-cells = <1>; + #size-cells = <0>; + compatible = "microchip,lan966x-miim"; + reg = <0xe200413c 0x24>, + <0xe2010020 0x4>; + + resets = <&reset 0>; + reset-names = "switch"; + + lan966x_phy0: ethernet-lan966x_phy@1 { + reg = <1>; + }; + + lan966x_phy1: ethernet-lan966x_phy@2 { + reg = <2>; + }; + }; + + switch: switch@e0000000 { + compatible = "microchip,lan966x-switch"; + reg = <0xe0000000 0x0100000>, + <0xe2000000 0x0800000>; + reg-names = "cpu", "gcb"; + + interrupt-parent = <&oic>; + interrupts = <12 IRQ_TYPE_LEVEL_HIGH>, + <9 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "xtr", "ana"; + + resets = <&reset 0>; + reset-names = "switch"; + + pinctrl-names = "default"; + pinctrl-0 = <&tod_pins>; + + ethernet-ports { + #address-cells = <1>; + #size-cells = <0>; + + port0: port@0 { + phy-handle = <&lan966x_phy0>; + + reg = <0>; + phy-mode = "gmii"; + phys = <&serdes 0 CU(0)>; + }; + + port1: port@1 { + phy-handle = <&lan966x_phy1>; + + reg = <1>; + phy-mode = "gmii"; + phys = <&serdes 1 CU(1)>; + }; + }; + }; + }; + }; + }; +}; diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c index bac4df2e5231..93949df3bcff 100644 --- a/drivers/misc/lattice-ecp3-config.c +++ b/drivers/misc/lattice-ecp3-config.c @@ -11,7 +11,7 @@ #include <linux/spi/spi.h> #include <linux/platform_device.h> #include <linux/delay.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #define FIRMWARE_NAME "lattice-ecp3.bit" diff --git a/drivers/misc/lis3lv02d/Kconfig b/drivers/misc/lis3lv02d/Kconfig index bb2fec4b5880..9d546a42a563 100644 --- a/drivers/misc/lis3lv02d/Kconfig +++ b/drivers/misc/lis3lv02d/Kconfig @@ -4,13 +4,13 @@ # config SENSORS_LIS3_SPI - tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (SPI)" + tristate "STMicroelectronics LIS3LV02Dx three-axis digital accelerometer (SPI)" depends on !ACPI && SPI_MASTER && INPUT select SENSORS_LIS3LV02D help This driver provides support for the LIS3LV02Dx accelerometer connected via SPI. The accelerometer data is readable via - /sys/devices/platform/lis3lv02d. + /sys/devices/faux/lis3lv02d. This driver also provides an absolute input class device, allowing the laptop to act as a pinball machine-esque joystick. @@ -20,13 +20,13 @@ config SENSORS_LIS3_SPI is called lis3lv02d_spi. config SENSORS_LIS3_I2C - tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (I2C)" + tristate "STMicroelectronics LIS3LV02Dx three-axis digital accelerometer (I2C)" depends on I2C && INPUT select SENSORS_LIS3LV02D help This driver provides support for the LIS3LV02Dx accelerometer connected via I2C. The accelerometer data is readable via - /sys/devices/platform/lis3lv02d. + /sys/devices/faux/lis3lv02d. This driver also provides an absolute input class device, allowing the device to act as a pinball machine-esque joystick. diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c index 299d316f1bda..1a634ac1a241 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d.c +++ b/drivers/misc/lis3lv02d/lis3lv02d.c @@ -12,9 +12,9 @@ #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/dmi.h> +#include <linux/minmax.h> #include <linux/module.h> #include <linux/types.h> -#include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/delay.h> @@ -26,7 +26,7 @@ #include <linux/miscdevice.h> #include <linux/pm_runtime.h> #include <linux/atomic.h> -#include <linux/of_device.h> +#include <linux/of.h> #include "lis3lv02d.h" #define DRIVER_NAME "lis3lv02d" @@ -230,7 +230,7 @@ static int lis3lv02d_get_pwron_wait(struct lis3lv02d *lis3) return 0; } - dev_err(&lis3->pdev->dev, "Error unknown odrs-index: %d\n", odr_idx); + dev_err(&lis3->fdev->dev, "Error unknown odrs-index: %d\n", odr_idx); return -ENXIO; } @@ -630,10 +630,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf, schedule(); } - if (data < 255) - byte_data = data; - else - byte_data = 255; + byte_data = min(data, 255); /* make sure we are not going into copy_to_user() with * TASK_INTERRUPTIBLE state */ @@ -669,7 +666,6 @@ static int lis3lv02d_misc_fasync(int fd, struct file *file, int on) static const struct file_operations lis3lv02d_misc_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .read = lis3lv02d_misc_read, .open = lis3lv02d_misc_open, .release = lis3lv02d_misc_release, @@ -695,7 +691,7 @@ int lis3lv02d_joystick_enable(struct lis3lv02d *lis3) input_dev->phys = DRIVER_NAME "/input0"; input_dev->id.bustype = BUS_HOST; input_dev->id.vendor = 0; - input_dev->dev.parent = &lis3->pdev->dev; + input_dev->dev.parent = &lis3->fdev->dev; input_dev->open = lis3lv02d_joystick_open; input_dev->close = lis3lv02d_joystick_close; @@ -856,32 +852,27 @@ static DEVICE_ATTR(position, S_IRUGO, lis3lv02d_position_show, NULL); static DEVICE_ATTR(rate, S_IRUGO | S_IWUSR, lis3lv02d_rate_show, lis3lv02d_rate_set); -static struct attribute *lis3lv02d_attributes[] = { +static struct attribute *lis3lv02d_attrs[] = { &dev_attr_selftest.attr, &dev_attr_position.attr, &dev_attr_rate.attr, NULL }; - -static const struct attribute_group lis3lv02d_attribute_group = { - .attrs = lis3lv02d_attributes -}; - +ATTRIBUTE_GROUPS(lis3lv02d); static int lis3lv02d_add_fs(struct lis3lv02d *lis3) { - lis3->pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0); - if (IS_ERR(lis3->pdev)) - return PTR_ERR(lis3->pdev); + lis3->fdev = faux_device_create_with_groups(DRIVER_NAME, NULL, NULL, lis3lv02d_groups); + if (!lis3->fdev) + return -ENODEV; - platform_set_drvdata(lis3->pdev, lis3); - return sysfs_create_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group); + faux_device_set_drvdata(lis3->fdev, lis3); + return 0; } void lis3lv02d_remove_fs(struct lis3lv02d *lis3) { - sysfs_remove_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group); - platform_device_unregister(lis3->pdev); + faux_device_destroy(lis3->fdev); if (lis3->pm_dev) { /* Barrier after the sysfs remove */ pm_runtime_barrier(lis3->pm_dev); @@ -1038,7 +1029,7 @@ int lis3lv02d_init_dt(struct lis3lv02d *lis3) pdata->wakeup_flags |= LIS3_WAKEUP_Z_LO; if (of_property_read_bool(np, "st,wakeup-z-hi")) pdata->wakeup_flags |= LIS3_WAKEUP_Z_HI; - if (of_get_property(np, "st,wakeup-threshold", &val)) + if (!of_property_read_u32(np, "st,wakeup-threshold", &val)) pdata->wakeup_thresh = val; if (of_property_read_bool(np, "st,wakeup2-x-lo")) @@ -1053,7 +1044,7 @@ int lis3lv02d_init_dt(struct lis3lv02d *lis3) pdata->wakeup_flags2 |= LIS3_WAKEUP_Z_LO; if (of_property_read_bool(np, "st,wakeup2-z-hi")) pdata->wakeup_flags2 |= LIS3_WAKEUP_Z_HI; - if (of_get_property(np, "st,wakeup2-threshold", &val)) + if (!of_property_read_u32(np, "st,wakeup2-threshold", &val)) pdata->wakeup_thresh2 = val; if (!of_property_read_u32(np, "st,highpass-cutoff-hz", &val)) { diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h index 195bd2fd2eb5..989a49e57554 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d.h +++ b/drivers/misc/lis3lv02d/lis3lv02d.h @@ -5,7 +5,7 @@ * Copyright (C) 2007-2008 Yan Burman * Copyright (C) 2008-2009 Eric Piel */ -#include <linux/platform_device.h> +#include <linux/device/faux.h> #include <linux/input.h> #include <linux/regulator/consumer.h> #include <linux/miscdevice.h> @@ -282,7 +282,7 @@ struct lis3lv02d { */ struct input_dev *idev; /* input device */ - struct platform_device *pdev; /* platform device */ + struct faux_device *fdev; /* faux device */ struct regulator_bulk_data regulators[2]; atomic_t count; /* interrupt count after last read */ union axis_conversion ac; /* hw -> logical axis */ diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c index 3882e97e96a7..15119584473c 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c +++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c @@ -150,6 +150,7 @@ static int lis3lv02d_i2c_probe(struct i2c_client *client) lis3_dev.init = lis3_i2c_init; lis3_dev.read = lis3_i2c_read; lis3_dev.write = lis3_i2c_write; + lis3_dev.reg_ctrl = lis3_reg_ctrl; lis3_dev.irq = client->irq; lis3_dev.ac = lis3lv02d_axis_map; lis3_dev.pm_dev = &client->dev; @@ -197,8 +198,14 @@ static int lis3lv02d_i2c_suspend(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct lis3lv02d *lis3 = i2c_get_clientdata(client); - if (!lis3->pdata || !lis3->pdata->wakeup_flags) + /* Turn on for wakeup if turned off by runtime suspend */ + if (lis3->pdata && lis3->pdata->wakeup_flags) { + if (pm_runtime_suspended(dev)) + lis3lv02d_poweron(lis3); + /* For non wakeup turn off if not already turned off by runtime suspend */ + } else if (!pm_runtime_suspended(dev)) lis3lv02d_poweroff(lis3); + return 0; } @@ -207,13 +214,12 @@ static int lis3lv02d_i2c_resume(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct lis3lv02d *lis3 = i2c_get_clientdata(client); - /* - * pm_runtime documentation says that devices should always - * be powered on at resume. Pm_runtime turns them off after system - * wide resume is complete. - */ - if (!lis3->pdata || !lis3->pdata->wakeup_flags || - pm_runtime_suspended(dev)) + /* Turn back off if turned on for wakeup and runtime suspended*/ + if (lis3->pdata && lis3->pdata->wakeup_flags) { + if (pm_runtime_suspended(dev)) + lis3lv02d_poweroff(lis3); + /* For non wakeup turn back on if not runtime suspended */ + } else if (!pm_runtime_suspended(dev)) lis3lv02d_poweron(lis3); return 0; diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile index 95ef971b5e1c..03ebe33185f9 100644 --- a/drivers/misc/lkdtm/Makefile +++ b/drivers/misc/lkdtm/Makefile @@ -8,18 +8,14 @@ lkdtm-$(CONFIG_LKDTM) += perms.o lkdtm-$(CONFIG_LKDTM) += refcount.o lkdtm-$(CONFIG_LKDTM) += rodata_objcopy.o lkdtm-$(CONFIG_LKDTM) += usercopy.o -lkdtm-$(CONFIG_LKDTM) += stackleak.o +lkdtm-$(CONFIG_LKDTM) += kstack_erase.o lkdtm-$(CONFIG_LKDTM) += cfi.o lkdtm-$(CONFIG_LKDTM) += fortify.o lkdtm-$(CONFIG_PPC_64S_HASH_MMU) += powerpc.o KASAN_SANITIZE_stackleak.o := n -KASAN_SANITIZE_rodata.o := n -KCSAN_SANITIZE_rodata.o := n -KCOV_INSTRUMENT_rodata.o := n -OBJECT_FILES_NON_STANDARD_rodata.o := y -CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS) +CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS) $(CC_FLAGS_CFI) OBJCOPYFLAGS := OBJCOPYFLAGS_rodata_objcopy.o := \ diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c index c66cc05a68c4..376047beea3d 100644 --- a/drivers/misc/lkdtm/bugs.c +++ b/drivers/misc/lkdtm/bugs.c @@ -6,12 +6,14 @@ * test source files. */ #include "lkdtm.h" +#include <linux/cpu.h> #include <linux/list.h> #include <linux/sched.h> #include <linux/sched/signal.h> #include <linux/sched/task_stack.h> -#include <linux/uaccess.h> #include <linux/slab.h> +#include <linux/stop_machine.h> +#include <linux/uaccess.h> #if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML) #include <asm/desc.h> @@ -73,6 +75,31 @@ static void lkdtm_PANIC(void) panic("dumptest"); } +static int panic_stop_irqoff_fn(void *arg) +{ + atomic_t *v = arg; + + /* + * As stop_machine() disables interrupts, all CPUs within this function + * have interrupts disabled and cannot take a regular IPI. + * + * The last CPU which enters here will trigger a panic, and as all CPUs + * cannot take a regular IPI, we'll only be able to stop secondaries if + * smp_send_stop() or crash_smp_send_stop() uses an NMI. + */ + if (atomic_inc_return(v) == num_online_cpus()) + panic("panic stop irqoff test"); + + for (;;) + cpu_relax(); +} + +static void lkdtm_PANIC_STOP_IRQOFF(void) +{ + atomic_t v = ATOMIC_INIT(0); + stop_machine(panic_stop_irqoff_fn, &v, cpu_online_mask); +} + static void lkdtm_BUG(void) { BUG(); @@ -259,6 +286,35 @@ static void lkdtm_HARDLOCKUP(void) cpu_relax(); } +static void __lkdtm_SMP_CALL_LOCKUP(void *unused) +{ + for (;;) + cpu_relax(); +} + +static void lkdtm_SMP_CALL_LOCKUP(void) +{ + unsigned int cpu, target; + + cpus_read_lock(); + + cpu = get_cpu(); + target = cpumask_any_but(cpu_online_mask, cpu); + + if (target >= nr_cpu_ids) { + pr_err("FAIL: no other online CPUs\n"); + goto out_put_cpus; + } + + smp_call_function_single(target, __lkdtm_SMP_CALL_LOCKUP, NULL, 1); + + pr_err("FAIL: did not hang\n"); + +out_put_cpus: + put_cpu(); + cpus_read_unlock(); +} + static void lkdtm_SPINLOCKUP(void) { /* Must be called twice to trigger. */ @@ -267,10 +323,11 @@ static void lkdtm_SPINLOCKUP(void) __release(&lock_me_up); } -static void lkdtm_HUNG_TASK(void) +static void __noreturn lkdtm_HUNG_TASK(void) { set_current_state(TASK_UNINTERRUPTIBLE); schedule(); + BUG(); } static volatile unsigned int huge = INT_MAX - 2; @@ -388,8 +445,8 @@ static void lkdtm_FAM_BOUNDS(void) pr_err("FAIL: survived access of invalid flexible array member index!\n"); - if (!__has_attribute(__counted_by__)) - pr_warn("This is expected since this %s was built a compiler supporting __counted_by\n", + if (!IS_ENABLED(CONFIG_CC_HAS_COUNTED_BY)) + pr_warn("This is expected since this %s was built with a compiler that does not support __counted_by\n", lkdtm_kernel_info); else if (IS_ENABLED(CONFIG_UBSAN_BOUNDS)) pr_expected_config(CONFIG_UBSAN_TRAP); @@ -638,6 +695,7 @@ static noinline void lkdtm_CORRUPT_PAC(void) static struct crashtype crashtypes[] = { CRASHTYPE(PANIC), + CRASHTYPE(PANIC_STOP_IRQOFF), CRASHTYPE(BUG), CRASHTYPE(WARNING), CRASHTYPE(WARNING_MESSAGE), @@ -651,6 +709,7 @@ static struct crashtype crashtypes[] = { CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE), CRASHTYPE(SOFTLOCKUP), CRASHTYPE(HARDLOCKUP), + CRASHTYPE(SMP_CALL_LOCKUP), CRASHTYPE(SPINLOCKUP), CRASHTYPE(HUNG_TASK), CRASHTYPE(OVERFLOW_SIGNED), diff --git a/drivers/misc/lkdtm/cfi.c b/drivers/misc/lkdtm/cfi.c index fc28714ae3a6..c3971f7caa65 100644 --- a/drivers/misc/lkdtm/cfi.c +++ b/drivers/misc/lkdtm/cfi.c @@ -43,7 +43,7 @@ static void lkdtm_CFI_FORWARD_PROTO(void) lkdtm_indirect_call((void *)lkdtm_increment_int); pr_err("FAIL: survived mismatched prototype function call!\n"); - pr_expected_config(CONFIG_CFI_CLANG); + pr_expected_config(CONFIG_CFI); } /* @@ -68,12 +68,20 @@ static void lkdtm_CFI_FORWARD_PROTO(void) #define no_pac_addr(addr) \ ((__force __typeof__(addr))((uintptr_t)(addr) | PAGE_OFFSET)) +#ifdef CONFIG_RISCV +/* https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#frame-pointer-convention */ +#define FRAME_RA_OFFSET (-1) +#else +#define FRAME_RA_OFFSET 1 +#endif + /* The ultimate ROP gadget. */ static noinline __no_ret_protection void set_return_addr_unchecked(unsigned long *expected, unsigned long *addr) { /* Use of volatile is to make sure final write isn't seen as a dead store. */ - unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1; + unsigned long * volatile *ret_addr = + (unsigned long **)__builtin_frame_address(0) + FRAME_RA_OFFSET; /* Make sure we've found the right place on the stack before writing it. */ if (no_pac_addr(*ret_addr) == expected) @@ -88,7 +96,8 @@ static noinline void set_return_addr(unsigned long *expected, unsigned long *addr) { /* Use of volatile is to make sure final write isn't seen as a dead store. */ - unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1; + unsigned long * volatile *ret_addr = + (unsigned long **)__builtin_frame_address(0) + FRAME_RA_OFFSET; /* Make sure we've found the right place on the stack before writing it. */ if (no_pac_addr(*ret_addr) == expected) diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c index 0772e4a4757e..5732fd59a227 100644 --- a/drivers/misc/lkdtm/core.c +++ b/drivers/misc/lkdtm/core.c @@ -153,12 +153,17 @@ static const struct crashtype *find_crashtype(const char *name) /* * This is forced noinline just so it distinctly shows up in the stackdump * which makes validation of expected lkdtm crashes easier. + * + * NOTE: having a valid return value helps prevent the compiler from doing + * tail call optimizations and taking this out of the stack trace. */ -static noinline void lkdtm_do_action(const struct crashtype *crashtype) +static noinline int lkdtm_do_action(const struct crashtype *crashtype) { if (WARN_ON(!crashtype || !crashtype->func)) - return; + return -EINVAL; crashtype->func(); + + return 0; } static int lkdtm_register_cpoint(struct crashpoint *crashpoint, @@ -167,10 +172,8 @@ static int lkdtm_register_cpoint(struct crashpoint *crashpoint, int ret; /* If this doesn't have a symbol, just call immediately. */ - if (!crashpoint->kprobe.symbol_name) { - lkdtm_do_action(crashtype); - return 0; - } + if (!crashpoint->kprobe.symbol_name) + return lkdtm_do_action(crashtype); if (lkdtm_kprobe != NULL) unregister_kprobe(lkdtm_kprobe); @@ -216,7 +219,7 @@ static int lkdtm_kprobe_handler(struct kprobe *kp, struct pt_regs *regs) spin_unlock_irqrestore(&crash_count_lock, flags); if (do_it) - lkdtm_do_action(lkdtm_crashtype); + return lkdtm_do_action(lkdtm_crashtype); return 0; } @@ -303,6 +306,7 @@ static ssize_t direct_entry(struct file *f, const char __user *user_buf, { const struct crashtype *crashtype; char *buf; + int err; if (count >= PAGE_SIZE) return -EINVAL; @@ -326,9 +330,11 @@ static ssize_t direct_entry(struct file *f, const char __user *user_buf, return -EINVAL; pr_info("Performing direct entry %s\n", crashtype->name); - lkdtm_do_action(crashtype); + err = lkdtm_do_action(crashtype); *off += count; + if (err) + return err; return count; } diff --git a/drivers/misc/lkdtm/fortify.c b/drivers/misc/lkdtm/fortify.c index 015927665678..00ed2147113e 100644 --- a/drivers/misc/lkdtm/fortify.c +++ b/drivers/misc/lkdtm/fortify.c @@ -44,6 +44,9 @@ static void lkdtm_FORTIFY_STR_MEMBER(void) char *src; src = kmalloc(size, GFP_KERNEL); + if (!src) + return; + strscpy(src, "over ten bytes", size); size = strlen(src) + 1; @@ -109,6 +112,9 @@ static void lkdtm_FORTIFY_MEM_MEMBER(void) char *src; src = kmalloc(size, GFP_KERNEL); + if (!src) + return; + strscpy(src, "over ten bytes", size); size = strlen(src) + 1; diff --git a/drivers/misc/lkdtm/heap.c b/drivers/misc/lkdtm/heap.c index 0ce4cbf6abda..c1a05b935894 100644 --- a/drivers/misc/lkdtm/heap.c +++ b/drivers/misc/lkdtm/heap.c @@ -4,6 +4,7 @@ * page allocation and slab allocations. */ #include "lkdtm.h" +#include <linux/kfence.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/sched.h> @@ -47,7 +48,7 @@ static void lkdtm_VMALLOC_LINEAR_OVERFLOW(void) * correctly. * * This should get caught by either memory tagging, KASan, or by using - * CONFIG_SLUB_DEBUG=y and slub_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y). + * CONFIG_SLUB_DEBUG=y and slab_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y). */ static void lkdtm_SLAB_LINEAR_OVERFLOW(void) { @@ -132,6 +133,64 @@ static void lkdtm_READ_AFTER_FREE(void) kfree(val); } +static void lkdtm_KFENCE_READ_AFTER_FREE(void) +{ + int *base, val, saw; + unsigned long timeout, resched_after; + size_t len = 1024; + /* + * The slub allocator will use the either the first word or + * the middle of the allocation to store the free pointer, + * depending on configurations. Store in the second word to + * avoid running into the freelist. + */ + size_t offset = sizeof(*base); + + /* + * 100x the sample interval should be more than enough to ensure we get + * a KFENCE allocation eventually. + */ + timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval); + /* + * Especially for non-preemption kernels, ensure the allocation-gate + * timer can catch up: after @resched_after, every failed allocation + * attempt yields, to ensure the allocation-gate timer is scheduled. + */ + resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval); + do { + base = kmalloc(len, GFP_KERNEL); + if (!base) { + pr_err("FAIL: Unable to allocate kfence memory!\n"); + return; + } + + if (is_kfence_address(base)) { + val = 0x12345678; + base[offset] = val; + pr_info("Value in memory before free: %x\n", base[offset]); + + kfree(base); + + pr_info("Attempting bad read from freed memory\n"); + saw = base[offset]; + if (saw != val) { + /* Good! Poisoning happened, so declare a win. */ + pr_info("Memory correctly poisoned (%x)\n", saw); + } else { + pr_err("FAIL: Memory was not poisoned!\n"); + pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free"); + } + return; + } + + kfree(base); + if (time_after(jiffies, resched_after)) + cond_resched(); + } while (time_before(jiffies, timeout)); + + pr_err("FAIL: kfence memory never allocated!\n"); +} + static void lkdtm_WRITE_BUDDY_AFTER_FREE(void) { unsigned long p = __get_free_page(GFP_KERNEL); @@ -296,23 +355,12 @@ static void lkdtm_SLAB_FREE_PAGE(void) free_page(p); } -/* - * We have constructors to keep the caches distinctly separated without - * needing to boot with "slab_nomerge". - */ -static void ctor_double_free(void *region) -{ } -static void ctor_a(void *region) -{ } -static void ctor_b(void *region) -{ } - void __init lkdtm_heap_init(void) { double_free_cache = kmem_cache_create("lkdtm-heap-double_free", - 64, 0, 0, ctor_double_free); - a_cache = kmem_cache_create("lkdtm-heap-a", 64, 0, 0, ctor_a); - b_cache = kmem_cache_create("lkdtm-heap-b", 64, 0, 0, ctor_b); + 64, 0, SLAB_NO_MERGE, NULL); + a_cache = kmem_cache_create("lkdtm-heap-a", 64, 0, SLAB_NO_MERGE, NULL); + b_cache = kmem_cache_create("lkdtm-heap-b", 64, 0, SLAB_NO_MERGE, NULL); } void __exit lkdtm_heap_exit(void) @@ -327,6 +375,7 @@ static struct crashtype crashtypes[] = { CRASHTYPE(VMALLOC_LINEAR_OVERFLOW), CRASHTYPE(WRITE_AFTER_FREE), CRASHTYPE(READ_AFTER_FREE), + CRASHTYPE(KFENCE_READ_AFTER_FREE), CRASHTYPE(WRITE_BUDDY_AFTER_FREE), CRASHTYPE(READ_BUDDY_AFTER_FREE), CRASHTYPE(SLAB_INIT_ON_ALLOC), diff --git a/drivers/misc/lkdtm/stackleak.c b/drivers/misc/lkdtm/kstack_erase.c index f1d022160913..4fd9b0bfb874 100644 --- a/drivers/misc/lkdtm/stackleak.c +++ b/drivers/misc/lkdtm/kstack_erase.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * This code tests that the current task stack is properly erased (filled - * with STACKLEAK_POISON). + * with KSTACK_ERASE_POISON). * * Authors: * Alexander Popov <alex.popov@linux.com> @@ -9,9 +9,9 @@ */ #include "lkdtm.h" -#include <linux/stackleak.h> +#include <linux/kstack_erase.h> -#if defined(CONFIG_GCC_PLUGIN_STACKLEAK) +#if defined(CONFIG_KSTACK_ERASE) /* * Check that stackleak tracks the lowest stack pointer and erases the stack * below this as expected. @@ -85,7 +85,7 @@ static void noinstr check_stackleak_irqoff(void) while (poison_low > task_stack_low) { poison_low -= sizeof(unsigned long); - if (*(unsigned long *)poison_low == STACKLEAK_POISON) + if (*(unsigned long *)poison_low == KSTACK_ERASE_POISON) continue; instrumentation_begin(); @@ -96,7 +96,7 @@ static void noinstr check_stackleak_irqoff(void) } instrumentation_begin(); - pr_info("stackleak stack usage:\n" + pr_info("kstack erase stack usage:\n" " high offset: %lu bytes\n" " current: %lu bytes\n" " lowest: %lu bytes\n" @@ -121,7 +121,7 @@ out: instrumentation_end(); } -static void lkdtm_STACKLEAK_ERASING(void) +static void lkdtm_KSTACK_ERASE(void) { unsigned long flags; @@ -129,19 +129,19 @@ static void lkdtm_STACKLEAK_ERASING(void) check_stackleak_irqoff(); local_irq_restore(flags); } -#else /* defined(CONFIG_GCC_PLUGIN_STACKLEAK) */ -static void lkdtm_STACKLEAK_ERASING(void) +#else /* defined(CONFIG_KSTACK_ERASE) */ +static void lkdtm_KSTACK_ERASE(void) { - if (IS_ENABLED(CONFIG_HAVE_ARCH_STACKLEAK)) { - pr_err("XFAIL: stackleak is not enabled (CONFIG_GCC_PLUGIN_STACKLEAK=n)\n"); + if (IS_ENABLED(CONFIG_HAVE_ARCH_KSTACK_ERASE)) { + pr_err("XFAIL: stackleak is not enabled (CONFIG_KSTACK_ERASE=n)\n"); } else { - pr_err("XFAIL: stackleak is not supported on this arch (HAVE_ARCH_STACKLEAK=n)\n"); + pr_err("XFAIL: stackleak is not supported on this arch (HAVE_ARCH_KSTACK_ERASE=n)\n"); } } -#endif /* defined(CONFIG_GCC_PLUGIN_STACKLEAK) */ +#endif /* defined(CONFIG_KSTACK_ERASE) */ static struct crashtype crashtypes[] = { - CRASHTYPE(STACKLEAK_ERASING), + CRASHTYPE(KSTACK_ERASE), }; struct crashtype_category stackleak_crashtypes = { diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c index b93404d65650..e1f5e9abb301 100644 --- a/drivers/misc/lkdtm/perms.c +++ b/drivers/misc/lkdtm/perms.c @@ -9,6 +9,7 @@ #include <linux/vmalloc.h> #include <linux/mman.h> #include <linux/uaccess.h> +#include <linux/objtool.h> #include <asm/cacheflush.h> #include <asm/sections.h> @@ -29,6 +30,13 @@ static const unsigned long rodata = 0xAA55AA55; static unsigned long ro_after_init __ro_after_init = 0x55AA5500; /* + * This is a pointer to do_nothing() which is initialized at runtime rather + * than build time to avoid objtool IBT validation warnings caused by an + * inlined unrolled memcpy() in execute_location(). + */ +static void __ro_after_init *do_nothing_ptr; + +/* * This just returns to the caller. It is designed to be copied into * non-executable memory regions. */ @@ -61,17 +69,16 @@ static void *setup_function_descriptor(func_desc_t *fdesc, void *dst) return fdesc; } -static noinline void execute_location(void *dst, bool write) +static noinline __nocfi void execute_location(void *dst, bool write) { void (*func)(void); func_desc_t fdesc; - void *do_nothing_text = dereference_function_descriptor(do_nothing); - pr_info("attempting ok execution at %px\n", do_nothing_text); + pr_info("attempting ok execution at %px\n", do_nothing_ptr); do_nothing(); if (write == CODE_WRITE) { - memcpy(dst, do_nothing_text, EXEC_SIZE); + memcpy(dst, do_nothing_ptr, EXEC_SIZE); flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE); } @@ -80,6 +87,10 @@ static noinline void execute_location(void *dst, bool write) func(); pr_err("FAIL: func returned\n"); } +/* + * Explicitly doing the wrong thing for testing. + */ +ANNOTATE_NOCFI_SYM(execute_location); static void execute_user_location(void *dst) { @@ -267,6 +278,8 @@ static void lkdtm_ACCESS_NULL(void) void __init lkdtm_perms_init(void) { + do_nothing_ptr = dereference_function_descriptor(do_nothing); + /* Make sure we can write to __ro_after_init values during __init */ ro_after_init |= 0xAA; } diff --git a/drivers/misc/lkdtm/refcount.c b/drivers/misc/lkdtm/refcount.c index 5cd488f54cfa..8f744bee6fbd 100644 --- a/drivers/misc/lkdtm/refcount.c +++ b/drivers/misc/lkdtm/refcount.c @@ -182,6 +182,21 @@ static void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void) check_negative(&neg, 3); } +/* + * A refcount_sub_and_test() by zero when the counter is at zero should act like + * refcount_sub_and_test() above when going negative. + */ +static void lkdtm_REFCOUNT_SUB_AND_TEST_ZERO(void) +{ + refcount_t neg = REFCOUNT_INIT(0); + + pr_info("attempting bad refcount_sub_and_test() at zero\n"); + if (refcount_sub_and_test(0, &neg)) + pr_warn("Weird: refcount_sub_and_test() reported zero\n"); + + check_negative(&neg, 0); +} + static void check_from_zero(refcount_t *ref) { switch (refcount_read(ref)) { @@ -400,6 +415,7 @@ static struct crashtype crashtypes[] = { CRASHTYPE(REFCOUNT_DEC_NEGATIVE), CRASHTYPE(REFCOUNT_DEC_AND_TEST_NEGATIVE), CRASHTYPE(REFCOUNT_SUB_AND_TEST_NEGATIVE), + CRASHTYPE(REFCOUNT_SUB_AND_TEST_ZERO), CRASHTYPE(REFCOUNT_INC_ZERO), CRASHTYPE(REFCOUNT_ADD_ZERO), CRASHTYPE(REFCOUNT_INC_SATURATED), diff --git a/drivers/misc/mchp_pci1xxxx/Kconfig b/drivers/misc/mchp_pci1xxxx/Kconfig index 4abb47de7219..64e457581fb4 100644 --- a/drivers/misc/mchp_pci1xxxx/Kconfig +++ b/drivers/misc/mchp_pci1xxxx/Kconfig @@ -2,6 +2,7 @@ config GP_PCI1XXXX tristate "Microchip PCI1XXXX PCIe to GPIO Expander + OTP/EEPROM manager" depends on PCI depends on GPIOLIB + depends on NVMEM_SYSFS select GPIOLIB_IRQCHIP select AUXILIARY_BUS help diff --git a/drivers/misc/mchp_pci1xxxx/Makefile b/drivers/misc/mchp_pci1xxxx/Makefile index fc4615cfe28b..ae31251dab37 100644 --- a/drivers/misc/mchp_pci1xxxx/Makefile +++ b/drivers/misc/mchp_pci1xxxx/Makefile @@ -1 +1 @@ -obj-$(CONFIG_GP_PCI1XXXX) := mchp_pci1xxxx_gp.o mchp_pci1xxxx_gpio.o +obj-$(CONFIG_GP_PCI1XXXX) := mchp_pci1xxxx_gp.o mchp_pci1xxxx_gpio.o mchp_pci1xxxx_otpe2p.o diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c index 32af2b14ff34..34c9be437432 100644 --- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c +++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c @@ -69,8 +69,10 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id aux_bus->aux_device_wrapper[1] = kzalloc(sizeof(*aux_bus->aux_device_wrapper[1]), GFP_KERNEL); - if (!aux_bus->aux_device_wrapper[1]) - return -ENOMEM; + if (!aux_bus->aux_device_wrapper[1]) { + retval = -ENOMEM; + goto err_aux_dev_add_0; + } retval = ida_alloc(&gp_client_ida, GFP_KERNEL); if (retval < 0) @@ -111,6 +113,7 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id err_aux_dev_add_1: auxiliary_device_uninit(&aux_bus->aux_device_wrapper[1]->aux_dev); + goto err_aux_dev_add_0; err_aux_dev_init_1: ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[1]->aux_dev.id); @@ -120,6 +123,7 @@ err_ida_alloc_1: err_aux_dev_add_0: auxiliary_device_uninit(&aux_bus->aux_device_wrapper[0]->aux_dev); + goto err_ret; err_aux_dev_init_0: ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[0]->aux_dev.id); @@ -127,6 +131,7 @@ err_aux_dev_init_0: err_ida_alloc_0: kfree(aux_bus->aux_device_wrapper[0]); +err_ret: return retval; } diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c index e616e3ec2b42..8eddbaa1fccd 100644 --- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c +++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c @@ -7,12 +7,14 @@ #include <linux/gpio/driver.h> #include <linux/bio.h> #include <linux/mutex.h> +#include <linux/pci.h> #include <linux/kthread.h> #include <linux/interrupt.h> #include "mchp_pci1xxxx_gp.h" #define PCI1XXXX_NR_PINS 93 +#define PCI_DEV_REV_OFFSET 0x08 #define PERI_GEN_RESET 0 #define OUT_EN_OFFSET(x) ((((x) / 32) * 4) + 0x400) #define INP_EN_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x10) @@ -37,11 +39,30 @@ struct pci1xxxx_gpio { struct auxiliary_device *aux_dev; void __iomem *reg_base; + raw_spinlock_t wa_lock; struct gpio_chip gpio; spinlock_t lock; + u32 gpio_wake_mask[3]; int irq_base; + u8 dev_rev; }; +static int pci1xxxx_gpio_get_device_revision(struct pci1xxxx_gpio *priv) +{ + struct device *parent = priv->aux_dev->dev.parent; + struct pci_dev *pcidev = to_pci_dev(parent); + int ret; + u32 val; + + ret = pci_read_config_dword(pcidev, PCI_DEV_REV_OFFSET, &val); + if (ret) + return ret; + + priv->dev_rev = val; + + return 0; +} + static int pci1xxxx_gpio_get_direction(struct gpio_chip *gpio, unsigned int nr) { struct pci1xxxx_gpio *priv = gpiochip_get_data(gpio); @@ -114,8 +135,7 @@ static int pci1xxxx_gpio_direction_output(struct gpio_chip *gpio, return 0; } -static void pci1xxxx_gpio_set(struct gpio_chip *gpio, - unsigned int nr, int val) +static int pci1xxxx_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val) { struct pci1xxxx_gpio *priv = gpiochip_get_data(gpio); unsigned long flags; @@ -123,6 +143,8 @@ static void pci1xxxx_gpio_set(struct gpio_chip *gpio, spin_lock_irqsave(&priv->lock, flags); pci1xxx_assign_bit(priv->reg_base, OUT_OFFSET(nr), (nr % 32), val); spin_unlock_irqrestore(&priv->lock, flags); + + return 0; } static int pci1xxxx_gpio_set_config(struct gpio_chip *gpio, unsigned int offset, @@ -147,8 +169,11 @@ static int pci1xxxx_gpio_set_config(struct gpio_chip *gpio, unsigned int offset, case PIN_CONFIG_DRIVE_OPEN_DRAIN: pci1xxx_assign_bit(priv->reg_base, OPENDRAIN_OFFSET(offset), (offset % 32), true); break; + case PIN_CONFIG_DRIVE_PUSH_PULL: + pci1xxx_assign_bit(priv->reg_base, OPENDRAIN_OFFSET(offset), (offset % 32), false); + break; default: - ret = -EOPNOTSUPP; + ret = -ENOTSUPP; break; } spin_unlock_irqrestore(&priv->lock, flags); @@ -164,7 +189,7 @@ static void pci1xxxx_gpio_irq_ack(struct irq_data *data) unsigned long flags; spin_lock_irqsave(&priv->lock, flags); - pci1xxx_assign_bit(priv->reg_base, INTR_STAT_OFFSET(gpio), (gpio % 32), true); + writel(BIT(gpio % 32), priv->reg_base + INTR_STAT_OFFSET(gpio)); spin_unlock_irqrestore(&priv->lock, flags); } @@ -249,11 +274,28 @@ static int pci1xxxx_gpio_set_type(struct irq_data *data, unsigned int trigger_ty return true; } +static int pci1xxxx_gpio_set_wake(struct irq_data *data, unsigned int enable) +{ + struct gpio_chip *chip = irq_data_get_irq_chip_data(data); + struct pci1xxxx_gpio *priv = gpiochip_get_data(chip); + unsigned int gpio = irqd_to_hwirq(data); + unsigned int bitpos = gpio % 32; + unsigned int bank = gpio / 32; + + if (enable) + priv->gpio_wake_mask[bank] |= (1 << bitpos); + else + priv->gpio_wake_mask[bank] &= ~(1 << bitpos); + + return 0; +} + static irqreturn_t pci1xxxx_gpio_irq_handler(int irq, void *dev_id) { struct pci1xxxx_gpio *priv = dev_id; struct gpio_chip *gc = &priv->gpio; unsigned long int_status = 0; + unsigned long wa_flags; unsigned long flags; u8 pincount; int bit; @@ -277,7 +319,9 @@ static irqreturn_t pci1xxxx_gpio_irq_handler(int irq, void *dev_id) writel(BIT(bit), priv->reg_base + INTR_STATUS_OFFSET(gpiobank)); spin_unlock_irqrestore(&priv->lock, flags); irq = irq_find_mapping(gc->irq.domain, (bit + (gpiobank * 32))); + raw_spin_lock_irqsave(&priv->wa_lock, wa_flags); generic_handle_irq(irq); + raw_spin_unlock_irqrestore(&priv->wa_lock, wa_flags); } } spin_lock_irqsave(&priv->lock, flags); @@ -293,6 +337,7 @@ static const struct irq_chip pci1xxxx_gpio_irqchip = { .irq_mask = pci1xxxx_gpio_irq_mask, .irq_unmask = pci1xxxx_gpio_irq_unmask, .irq_set_type = pci1xxxx_gpio_set_type, + .irq_set_wake = pci1xxxx_gpio_set_wake, .flags = IRQCHIP_IMMUTABLE, GPIOCHIP_IRQ_RESOURCE_HELPERS, }; @@ -300,32 +345,83 @@ static const struct irq_chip pci1xxxx_gpio_irqchip = { static int pci1xxxx_gpio_suspend(struct device *dev) { struct pci1xxxx_gpio *priv = dev_get_drvdata(dev); + struct device *parent = priv->aux_dev->dev.parent; + struct pci_dev *pcidev = to_pci_dev(parent); + unsigned int gpio_bank_base; + unsigned int wake_mask; + unsigned int gpiobank; unsigned long flags; + for (gpiobank = 0; gpiobank < 3; gpiobank++) { + wake_mask = priv->gpio_wake_mask[gpiobank]; + + if (wake_mask) { + gpio_bank_base = gpiobank * 32; + + pci1xxx_assign_bit(priv->reg_base, + PIO_PCI_CTRL_REG_OFFSET, 0, true); + writel(~wake_mask, priv->reg_base + + WAKEMASK_OFFSET(gpio_bank_base)); + } + } + spin_lock_irqsave(&priv->lock, flags); pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET, 16, true); pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET, 17, false); pci1xxx_assign_bit(priv->reg_base, PERI_GEN_RESET, 16, true); + + if (priv->dev_rev >= 0xC0) + pci1xxx_assign_bit(priv->reg_base, PERI_GEN_RESET, 17, true); + spin_unlock_irqrestore(&priv->lock, flags); + device_set_wakeup_enable(&pcidev->dev, true); + pci_wake_from_d3(pcidev, true); + return 0; } static int pci1xxxx_gpio_resume(struct device *dev) { struct pci1xxxx_gpio *priv = dev_get_drvdata(dev); + struct device *parent = priv->aux_dev->dev.parent; + struct pci_dev *pcidev = to_pci_dev(parent); + unsigned int gpio_bank_base; + unsigned int wake_mask; + unsigned int gpiobank; unsigned long flags; + for (gpiobank = 0; gpiobank < 3; gpiobank++) { + wake_mask = priv->gpio_wake_mask[gpiobank]; + + if (wake_mask) { + gpio_bank_base = gpiobank * 32; + + writel(wake_mask, priv->reg_base + + INTR_STAT_OFFSET(gpio_bank_base)); + pci1xxx_assign_bit(priv->reg_base, + PIO_PCI_CTRL_REG_OFFSET, 0, false); + writel(0xffffffff, priv->reg_base + + WAKEMASK_OFFSET(gpio_bank_base)); + } + } + spin_lock_irqsave(&priv->lock, flags); pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET, 17, true); pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET, 16, false); pci1xxx_assign_bit(priv->reg_base, PERI_GEN_RESET, 16, false); + + if (priv->dev_rev >= 0xC0) + pci1xxx_assign_bit(priv->reg_base, PERI_GEN_RESET, 17, false); + spin_unlock_irqrestore(&priv->lock, flags); + pci_wake_from_d3(pcidev, false); + return 0; } @@ -405,6 +501,10 @@ static int pci1xxxx_gpio_probe(struct auxiliary_device *aux_dev, if (retval < 0) return retval; + retval = pci1xxxx_gpio_get_device_revision(priv); + if (retval) + return retval; + dev_set_drvdata(&aux_dev->dev, priv); return devm_gpiochip_add_data(&aux_dev->dev, &priv->gpio, priv); diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c new file mode 100644 index 000000000000..a2ed477e0370 --- /dev/null +++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c @@ -0,0 +1,441 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2022-2023 Microchip Technology Inc. +// PCI1xxxx OTP/EEPROM driver + +#include <linux/auxiliary_bus.h> +#include <linux/device.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> + +#include "mchp_pci1xxxx_gp.h" + +#define AUX_DRIVER_NAME "PCI1xxxxOTPE2P" +#define EEPROM_NAME "pci1xxxx_eeprom" +#define OTP_NAME "pci1xxxx_otp" + +#define PERI_PF3_SYSTEM_REG_ADDR_BASE 0x2000 +#define PERI_PF3_SYSTEM_REG_LENGTH 0x4000 + +#define EEPROM_SIZE_BYTES 8192 +#define OTP_SIZE_BYTES 8192 + +#define CONFIG_REG_ADDR_BASE 0 +#define EEPROM_REG_ADDR_BASE 0x0E00 +#define OTP_REG_ADDR_BASE 0x1000 + +#define MMAP_OTP_OFFSET(x) (OTP_REG_ADDR_BASE + (x)) +#define MMAP_EEPROM_OFFSET(x) (EEPROM_REG_ADDR_BASE + (x)) +#define MMAP_CFG_OFFSET(x) (CONFIG_REG_ADDR_BASE + (x)) + +#define EEPROM_CMD_REG 0x00 +#define EEPROM_DATA_REG 0x04 + +#define EEPROM_CMD_EPC_WRITE (BIT(29) | BIT(28)) +#define EEPROM_CMD_EPC_TIMEOUT_BIT BIT(17) +#define EEPROM_CMD_EPC_BUSY_BIT BIT(31) + +#define STATUS_READ_DELAY_US 1 +#define STATUS_READ_TIMEOUT_US 20000 + +#define OTP_ADDR_HIGH_OFFSET 0x04 +#define OTP_ADDR_LOW_OFFSET 0x08 +#define OTP_PRGM_DATA_OFFSET 0x10 +#define OTP_PRGM_MODE_OFFSET 0x14 +#define OTP_RD_DATA_OFFSET 0x18 +#define OTP_FUNC_CMD_OFFSET 0x20 +#define OTP_CMD_GO_OFFSET 0x28 +#define OTP_PASS_FAIL_OFFSET 0x2C +#define OTP_STATUS_OFFSET 0x30 + +#define OTP_FUNC_RD_BIT BIT(0) +#define OTP_FUNC_PGM_BIT BIT(1) +#define OTP_CMD_GO_BIT BIT(0) +#define OTP_STATUS_BUSY_BIT BIT(0) +#define OTP_PGM_MODE_BYTE_BIT BIT(0) +#define OTP_FAIL_BIT BIT(0) + +#define OTP_PWR_DN_BIT BIT(0) +#define OTP_PWR_DN_OFFSET 0x00 + +#define CFG_SYS_LOCK_OFFSET 0xA0 +#define CFG_SYS_LOCK_PF3 BIT(5) + +#define BYTE_LOW (GENMASK(7, 0)) +#define BYTE_HIGH (GENMASK(12, 8)) + +struct pci1xxxx_otp_eeprom_device { + struct auxiliary_device *pdev; + void __iomem *reg_base; + struct nvmem_config nvmem_config_eeprom; + struct nvmem_device *nvmem_eeprom; + struct nvmem_config nvmem_config_otp; + struct nvmem_device *nvmem_otp; +}; + +static int set_sys_lock(struct pci1xxxx_otp_eeprom_device *priv) +{ + void __iomem *sys_lock = priv->reg_base + + MMAP_CFG_OFFSET(CFG_SYS_LOCK_OFFSET); + u8 data; + + writel(CFG_SYS_LOCK_PF3, sys_lock); + data = readl(sys_lock); + if (data != CFG_SYS_LOCK_PF3) + return -EPERM; + + return 0; +} + +static void release_sys_lock(struct pci1xxxx_otp_eeprom_device *priv) +{ + void __iomem *sys_lock = priv->reg_base + + MMAP_CFG_OFFSET(CFG_SYS_LOCK_OFFSET); + writel(0, sys_lock); +} + +static bool is_eeprom_responsive(struct pci1xxxx_otp_eeprom_device *priv) +{ + void __iomem *rb = priv->reg_base; + u32 regval; + int ret; + + writel(EEPROM_CMD_EPC_TIMEOUT_BIT, + rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG)); + writel(EEPROM_CMD_EPC_BUSY_BIT, + rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG)); + + /* Wait for the EPC_BUSY bit to get cleared or timeout bit to get set*/ + ret = read_poll_timeout(readl, regval, !(regval & EEPROM_CMD_EPC_BUSY_BIT), + STATUS_READ_DELAY_US, STATUS_READ_TIMEOUT_US, + true, rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG)); + + /* Return failure if either of software or hardware timeouts happen */ + if (ret < 0 || (!ret && (regval & EEPROM_CMD_EPC_TIMEOUT_BIT))) + return false; + + return true; +} + +static int pci1xxxx_eeprom_read(void *priv_t, unsigned int off, + void *buf_t, size_t count) +{ + struct pci1xxxx_otp_eeprom_device *priv = priv_t; + void __iomem *rb = priv->reg_base; + char *buf = buf_t; + u32 regval; + u32 byte; + int ret; + + if (off >= priv->nvmem_config_eeprom.size) + return -EFAULT; + + if ((off + count) > priv->nvmem_config_eeprom.size) + count = priv->nvmem_config_eeprom.size - off; + + ret = set_sys_lock(priv); + if (ret) + return ret; + + for (byte = 0; byte < count; byte++) { + writel(EEPROM_CMD_EPC_BUSY_BIT | (off + byte), rb + + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG)); + + ret = read_poll_timeout(readl, regval, + !(regval & EEPROM_CMD_EPC_BUSY_BIT), + STATUS_READ_DELAY_US, + STATUS_READ_TIMEOUT_US, true, + rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG)); + if (ret < 0 || (!ret && (regval & EEPROM_CMD_EPC_TIMEOUT_BIT))) { + ret = -EIO; + goto error; + } + + buf[byte] = readl(rb + MMAP_EEPROM_OFFSET(EEPROM_DATA_REG)); + } +error: + release_sys_lock(priv); + return ret; +} + +static int pci1xxxx_eeprom_write(void *priv_t, unsigned int off, + void *value_t, size_t count) +{ + struct pci1xxxx_otp_eeprom_device *priv = priv_t; + void __iomem *rb = priv->reg_base; + char *value = value_t; + u32 regval; + u32 byte; + int ret; + + if (off >= priv->nvmem_config_eeprom.size) + return -EFAULT; + + if ((off + count) > priv->nvmem_config_eeprom.size) + count = priv->nvmem_config_eeprom.size - off; + + ret = set_sys_lock(priv); + if (ret) + return ret; + + for (byte = 0; byte < count; byte++) { + writel(*(value + byte), rb + MMAP_EEPROM_OFFSET(EEPROM_DATA_REG)); + regval = EEPROM_CMD_EPC_TIMEOUT_BIT | EEPROM_CMD_EPC_WRITE | + (off + byte); + writel(regval, rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG)); + writel(EEPROM_CMD_EPC_BUSY_BIT | regval, + rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG)); + + ret = read_poll_timeout(readl, regval, + !(regval & EEPROM_CMD_EPC_BUSY_BIT), + STATUS_READ_DELAY_US, + STATUS_READ_TIMEOUT_US, true, + rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG)); + if (ret < 0 || (!ret && (regval & EEPROM_CMD_EPC_TIMEOUT_BIT))) { + ret = -EIO; + goto error; + } + } +error: + release_sys_lock(priv); + return ret; +} + +static void otp_device_set_address(struct pci1xxxx_otp_eeprom_device *priv, + u16 address) +{ + u16 lo, hi; + + lo = address & BYTE_LOW; + hi = (address & BYTE_HIGH) >> 8; + writew(lo, priv->reg_base + MMAP_OTP_OFFSET(OTP_ADDR_LOW_OFFSET)); + writew(hi, priv->reg_base + MMAP_OTP_OFFSET(OTP_ADDR_HIGH_OFFSET)); +} + +static int pci1xxxx_otp_read(void *priv_t, unsigned int off, + void *buf_t, size_t count) +{ + struct pci1xxxx_otp_eeprom_device *priv = priv_t; + void __iomem *rb = priv->reg_base; + char *buf = buf_t; + u32 regval; + u32 byte; + int ret; + u8 data; + + if (off >= priv->nvmem_config_otp.size) + return -EFAULT; + + if ((off + count) > priv->nvmem_config_otp.size) + count = priv->nvmem_config_otp.size - off; + + ret = set_sys_lock(priv); + if (ret) + return ret; + + for (byte = 0; byte < count; byte++) { + otp_device_set_address(priv, (u16)(off + byte)); + data = readl(rb + MMAP_OTP_OFFSET(OTP_FUNC_CMD_OFFSET)); + writel(data | OTP_FUNC_RD_BIT, + rb + MMAP_OTP_OFFSET(OTP_FUNC_CMD_OFFSET)); + data = readl(rb + MMAP_OTP_OFFSET(OTP_CMD_GO_OFFSET)); + writel(data | OTP_CMD_GO_BIT, + rb + MMAP_OTP_OFFSET(OTP_CMD_GO_OFFSET)); + + ret = read_poll_timeout(readl, regval, + !(regval & OTP_STATUS_BUSY_BIT), + STATUS_READ_DELAY_US, + STATUS_READ_TIMEOUT_US, true, + rb + MMAP_OTP_OFFSET(OTP_STATUS_OFFSET)); + + data = readl(rb + MMAP_OTP_OFFSET(OTP_PASS_FAIL_OFFSET)); + if (ret < 0 || data & OTP_FAIL_BIT) { + ret = -EIO; + goto error; + } + + buf[byte] = readl(rb + MMAP_OTP_OFFSET(OTP_RD_DATA_OFFSET)); + } +error: + release_sys_lock(priv); + return ret; +} + +static int pci1xxxx_otp_write(void *priv_t, unsigned int off, + void *value_t, size_t count) +{ + struct pci1xxxx_otp_eeprom_device *priv = priv_t; + void __iomem *rb = priv->reg_base; + char *value = value_t; + u32 regval; + u32 byte; + int ret; + u8 data; + + if (off >= priv->nvmem_config_otp.size) + return -EFAULT; + + if ((off + count) > priv->nvmem_config_otp.size) + count = priv->nvmem_config_otp.size - off; + + ret = set_sys_lock(priv); + if (ret) + return ret; + + for (byte = 0; byte < count; byte++) { + otp_device_set_address(priv, (u16)(off + byte)); + + /* + * Set OTP_PGM_MODE_BYTE command bit in OTP_PRGM_MODE register + * to enable Byte programming + */ + data = readl(rb + MMAP_OTP_OFFSET(OTP_PRGM_MODE_OFFSET)); + writel(data | OTP_PGM_MODE_BYTE_BIT, + rb + MMAP_OTP_OFFSET(OTP_PRGM_MODE_OFFSET)); + writel(*(value + byte), rb + MMAP_OTP_OFFSET(OTP_PRGM_DATA_OFFSET)); + data = readl(rb + MMAP_OTP_OFFSET(OTP_FUNC_CMD_OFFSET)); + writel(data | OTP_FUNC_PGM_BIT, + rb + MMAP_OTP_OFFSET(OTP_FUNC_CMD_OFFSET)); + data = readl(rb + MMAP_OTP_OFFSET(OTP_CMD_GO_OFFSET)); + writel(data | OTP_CMD_GO_BIT, + rb + MMAP_OTP_OFFSET(OTP_CMD_GO_OFFSET)); + + ret = read_poll_timeout(readl, regval, + !(regval & OTP_STATUS_BUSY_BIT), + STATUS_READ_DELAY_US, + STATUS_READ_TIMEOUT_US, true, + rb + MMAP_OTP_OFFSET(OTP_STATUS_OFFSET)); + + data = readl(rb + MMAP_OTP_OFFSET(OTP_PASS_FAIL_OFFSET)); + if (ret < 0 || data & OTP_FAIL_BIT) { + ret = -EIO; + goto error; + } + } +error: + release_sys_lock(priv); + return ret; +} + +static int pci1xxxx_otp_eeprom_probe(struct auxiliary_device *aux_dev, + const struct auxiliary_device_id *id) +{ + struct auxiliary_device_wrapper *aux_dev_wrapper; + struct pci1xxxx_otp_eeprom_device *priv; + struct gp_aux_data_type *pdata; + int ret; + u8 data; + + aux_dev_wrapper = container_of(aux_dev, struct auxiliary_device_wrapper, + aux_dev); + pdata = &aux_dev_wrapper->gp_aux_data; + if (!pdata) + return -EINVAL; + + priv = devm_kzalloc(&aux_dev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->pdev = aux_dev; + + if (!devm_request_mem_region(&aux_dev->dev, pdata->region_start + + PERI_PF3_SYSTEM_REG_ADDR_BASE, + PERI_PF3_SYSTEM_REG_LENGTH, + aux_dev->name)) + return -ENOMEM; + + priv->reg_base = devm_ioremap(&aux_dev->dev, pdata->region_start + + PERI_PF3_SYSTEM_REG_ADDR_BASE, + PERI_PF3_SYSTEM_REG_LENGTH); + if (!priv->reg_base) + return -ENOMEM; + + ret = set_sys_lock(priv); + if (ret) + return ret; + + /* Set OTP_PWR_DN to 0 to make OTP Operational */ + data = readl(priv->reg_base + MMAP_OTP_OFFSET(OTP_PWR_DN_OFFSET)); + writel(data & ~OTP_PWR_DN_BIT, + priv->reg_base + MMAP_OTP_OFFSET(OTP_PWR_DN_OFFSET)); + + dev_set_drvdata(&aux_dev->dev, priv); + + if (is_eeprom_responsive(priv)) { + priv->nvmem_config_eeprom.type = NVMEM_TYPE_EEPROM; + priv->nvmem_config_eeprom.name = EEPROM_NAME; + priv->nvmem_config_eeprom.id = NVMEM_DEVID_AUTO; + priv->nvmem_config_eeprom.dev = &aux_dev->dev; + priv->nvmem_config_eeprom.owner = THIS_MODULE; + priv->nvmem_config_eeprom.reg_read = pci1xxxx_eeprom_read; + priv->nvmem_config_eeprom.reg_write = pci1xxxx_eeprom_write; + priv->nvmem_config_eeprom.priv = priv; + priv->nvmem_config_eeprom.stride = 1; + priv->nvmem_config_eeprom.word_size = 1; + priv->nvmem_config_eeprom.size = EEPROM_SIZE_BYTES; + + priv->nvmem_eeprom = devm_nvmem_register(&aux_dev->dev, + &priv->nvmem_config_eeprom); + if (IS_ERR(priv->nvmem_eeprom)) + return PTR_ERR(priv->nvmem_eeprom); + } + + release_sys_lock(priv); + + priv->nvmem_config_otp.type = NVMEM_TYPE_OTP; + priv->nvmem_config_otp.name = OTP_NAME; + priv->nvmem_config_otp.id = NVMEM_DEVID_AUTO; + priv->nvmem_config_otp.dev = &aux_dev->dev; + priv->nvmem_config_otp.owner = THIS_MODULE; + priv->nvmem_config_otp.reg_read = pci1xxxx_otp_read; + priv->nvmem_config_otp.reg_write = pci1xxxx_otp_write; + priv->nvmem_config_otp.priv = priv; + priv->nvmem_config_otp.stride = 1; + priv->nvmem_config_otp.word_size = 1; + priv->nvmem_config_otp.size = OTP_SIZE_BYTES; + + priv->nvmem_otp = devm_nvmem_register(&aux_dev->dev, + &priv->nvmem_config_otp); + if (IS_ERR(priv->nvmem_otp)) + return PTR_ERR(priv->nvmem_otp); + + return ret; +} + +static void pci1xxxx_otp_eeprom_remove(struct auxiliary_device *aux_dev) +{ + struct pci1xxxx_otp_eeprom_device *priv; + void __iomem *sys_lock; + + priv = dev_get_drvdata(&aux_dev->dev); + sys_lock = priv->reg_base + MMAP_CFG_OFFSET(CFG_SYS_LOCK_OFFSET); + writel(CFG_SYS_LOCK_PF3, sys_lock); + + /* Shut down OTP */ + writel(OTP_PWR_DN_BIT, + priv->reg_base + MMAP_OTP_OFFSET(OTP_PWR_DN_OFFSET)); + + writel(0, sys_lock); +} + +static const struct auxiliary_device_id pci1xxxx_otp_eeprom_auxiliary_id_table[] = { + {.name = "mchp_pci1xxxx_gp.gp_otp_e2p"}, + {}, +}; +MODULE_DEVICE_TABLE(auxiliary, pci1xxxx_otp_eeprom_auxiliary_id_table); + +static struct auxiliary_driver pci1xxxx_otp_eeprom_driver = { + .driver = { + .name = AUX_DRIVER_NAME, + }, + .probe = pci1xxxx_otp_eeprom_probe, + .remove = pci1xxxx_otp_eeprom_remove, + .id_table = pci1xxxx_otp_eeprom_auxiliary_id_table +}; +module_auxiliary_driver(pci1xxxx_otp_eeprom_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Kumaravel Thiagarajan <kumaravel.thiagarajan@microchip.com>"); +MODULE_AUTHOR("Tharun Kumar P <tharunkumar.pasumarthi@microchip.com>"); +MODULE_AUTHOR("Vaibhaav Ram T.L <vaibhaavram.tl@microchip.com>"); +MODULE_DESCRIPTION("Microchip Technology Inc. PCI1xxxx OTP EEPROM Programmer"); diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig index 37db142de413..f4eb307cd35e 100644 --- a/drivers/misc/mei/Kconfig +++ b/drivers/misc/mei/Kconfig @@ -3,6 +3,7 @@ config INTEL_MEI tristate "Intel Management Engine Interface" depends on X86 && PCI + default X86_64 || MATOM help The Intel Management Engine (Intel ME) provides Manageability, Security and Media services for system containing Intel chipsets. @@ -11,10 +12,11 @@ config INTEL_MEI For more information see <https://software.intel.com/en-us/manageability/> +if INTEL_MEI + config INTEL_MEI_ME tristate "ME Enabled Intel Chipsets" - select INTEL_MEI - depends on X86 && PCI + default y help MEI support for ME Enabled Intel chipsets. @@ -38,8 +40,6 @@ config INTEL_MEI_ME config INTEL_MEI_TXE tristate "Intel Trusted Execution Environment with ME Interface" - select INTEL_MEI - depends on X86 && PCI help MEI Support for Trusted Execution Environment device on Intel SoCs @@ -48,10 +48,8 @@ config INTEL_MEI_TXE config INTEL_MEI_GSC tristate "Intel MEI GSC embedded device" - depends on INTEL_MEI depends on INTEL_MEI_ME - depends on X86 && PCI - depends on DRM_I915 + depends on DRM_I915 || DRM_XE help Intel auxiliary driver for GSC devices embedded in Intel graphics devices. @@ -60,6 +58,44 @@ config INTEL_MEI_GSC tasks such as graphics card firmware update and security tasks. +config INTEL_MEI_VSC_HW + tristate "Intel visual sensing controller device transport driver" + depends on ACPI && SPI + depends on GPIOLIB || COMPILE_TEST + help + Intel SPI transport driver between host and Intel visual sensing + controller (IVSC) device. + + This driver can also be built as a module. If so, the module + will be called mei-vsc-hw. + +config INTEL_MEI_VSC + tristate "Intel visual sensing controller device with ME interface" + depends on INTEL_MEI_VSC_HW + help + Intel MEI over SPI driver for Intel visual sensing controller + (IVSC) device embedded in IA platform. It supports camera sharing + between IVSC for context sensing and IPU for typical media usage. + Select this config should enable transport layer for IVSC device. + + This driver can also be built as a module. If so, the module + will be called mei-vsc. + +config INTEL_MEI_LB + tristate "Intel Late Binding (LB) support on ME Interface" + depends on INTEL_MEI_ME + depends on DRM_XE + help + Enable support for Intel Late Binding (LB) via the MEI interface. + + Late Binding is a method for applying firmware updates at runtime, + allowing the Intel Xe driver to load firmware payloads such as + fan controller or voltage regulator. These firmware updates are + authenticated and versioned, and do not require firmware flashing + or system reboot. + source "drivers/misc/mei/hdcp/Kconfig" source "drivers/misc/mei/pxp/Kconfig" source "drivers/misc/mei/gsc_proxy/Kconfig" + +endif diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile index 14aee253ae48..a203ed766b33 100644 --- a/drivers/misc/mei/Makefile +++ b/drivers/misc/mei/Makefile @@ -31,3 +31,11 @@ CFLAGS_mei-trace.o = -I$(src) obj-$(CONFIG_INTEL_MEI_HDCP) += hdcp/ obj-$(CONFIG_INTEL_MEI_PXP) += pxp/ obj-$(CONFIG_INTEL_MEI_GSC_PROXY) += gsc_proxy/ +obj-$(CONFIG_INTEL_MEI_LB) += mei_lb.o + +obj-$(CONFIG_INTEL_MEI_VSC_HW) += mei-vsc-hw.o +mei-vsc-hw-y := vsc-tp.o +mei-vsc-hw-y += vsc-fw-loader.o + +obj-$(CONFIG_INTEL_MEI_VSC) += mei-vsc.o +mei-vsc-y := platform-vsc.o diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index b8b716faf192..e6a1d3534663 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c @@ -80,6 +80,8 @@ static void whitelist(struct mei_cl_device *cldev) cldev->do_match = 1; } +#define MKHI_SEND_MAX_TIMEOUT_MSEC 4000 + #define OSTYPE_LINUX 2 struct mei_os_ver { __le16 build; @@ -128,7 +130,7 @@ static int mei_osver(struct mei_cl_device *cldev) os_ver = (struct mei_os_ver *)fwcaps->data; os_ver->os_type = OSTYPE_LINUX; - return __mei_cl_send(cldev->cl, buf, size, 0, mode); + return __mei_cl_send_timeout(cldev->cl, buf, size, 0, mode, MKHI_SEND_MAX_TIMEOUT_MSEC); } #define MKHI_FWVER_BUF_LEN (sizeof(struct mkhi_msg_hdr) + \ @@ -148,8 +150,8 @@ static int mei_fwver(struct mei_cl_device *cldev) req.hdr.group_id = MKHI_GEN_GROUP_ID; req.hdr.command = MKHI_GEN_GET_FW_VERSION_CMD; - ret = __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req), 0, - MEI_CL_IO_TX_BLOCKING); + ret = __mei_cl_send_timeout(cldev->cl, (u8 *)&req, sizeof(req), 0, + MEI_CL_IO_TX_BLOCKING, MKHI_SEND_MAX_TIMEOUT_MSEC); if (ret < 0) { dev_info(&cldev->dev, "Could not send ReqFWVersion cmd ret = %d\n", ret); return ret; @@ -184,6 +186,7 @@ static int mei_fwver(struct mei_cl_device *cldev) cldev->bus->fw_ver[i].hotfix = fwver->ver[i].hotfix; cldev->bus->fw_ver[i].buildno = fwver->ver[i].buildno; } + cldev->bus->fw_ver_received = 1; return ret; } @@ -237,8 +240,11 @@ static void mei_gsc_mkhi_ver(struct mei_cl_device *cldev) { int ret; - /* No need to enable the client if nothing is needed from it */ - if (!cldev->bus->fw_f_fw_ver_supported) + /* + * No need to enable the client if nothing is needed from it. + * No need to fill in version if it is already filled in by the fix address client. + */ + if (!cldev->bus->fw_f_fw_ver_supported || cldev->bus->fw_ver_received) return; ret = mei_cldev_enable(cldev); @@ -380,7 +386,7 @@ static int mei_nfc_if_version(struct mei_cl *cl, ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(cmd), 0, MEI_CL_IO_TX_BLOCKING); if (ret < 0) { - dev_err(bus->dev, "Could not send IF version cmd ret = %d\n", ret); + dev_err(&bus->dev, "Could not send IF version cmd ret = %d\n", ret); return ret; } @@ -395,14 +401,14 @@ static int mei_nfc_if_version(struct mei_cl *cl, bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, &vtag, 0, 0); if (bytes_recv < 0 || (size_t)bytes_recv < if_version_length) { - dev_err(bus->dev, "Could not read IF version ret = %d\n", bytes_recv); + dev_err(&bus->dev, "Could not read IF version ret = %d\n", bytes_recv); ret = -EIO; goto err; } memcpy(ver, reply->data, sizeof(*ver)); - dev_info(bus->dev, "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n", + dev_info(&bus->dev, "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n", ver->fw_ivn, ver->vendor_id, ver->radio_type); err: @@ -457,14 +463,14 @@ static void mei_nfc(struct mei_cl_device *cldev) if (IS_ERR(cl)) { ret = PTR_ERR(cl); cl = NULL; - dev_err(bus->dev, "nfc hook alloc failed %d\n", ret); + dev_err(&cldev->dev, "nfc hook alloc failed %d\n", ret); goto out; } me_cl = mei_me_cl_by_uuid(bus, &mei_nfc_info_guid); if (!me_cl) { ret = -ENOTTY; - dev_err(bus->dev, "Cannot find nfc info %d\n", ret); + dev_err(&cldev->dev, "Cannot find nfc info %d\n", ret); goto out; } @@ -490,13 +496,13 @@ static void mei_nfc(struct mei_cl_device *cldev) goto disconnect; } - dev_dbg(bus->dev, "nfc radio %s\n", radio_name); + dev_dbg(&cldev->dev, "nfc radio %s\n", radio_name); strscpy(cldev->name, radio_name, sizeof(cldev->name)); disconnect: mutex_lock(&bus->device_lock); if (mei_cl_disconnect(cl) < 0) - dev_err(bus->dev, "Can't disconnect the NFC INFO ME\n"); + dev_err(&cldev->dev, "Can't disconnect the NFC INFO ME\n"); mei_cl_flush_queues(cl, NULL); @@ -509,7 +515,7 @@ out: if (ret) cldev->do_match = 0; - dev_dbg(bus->dev, "end of fixup match = %d\n", cldev->do_match); + dev_dbg(&cldev->dev, "end of fixup match = %d\n", cldev->do_match); } /** @@ -555,8 +561,8 @@ static struct mei_fixup { MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc), MEI_FIXUP(MEI_UUID_WD, mei_wd), MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix), - MEI_FIXUP(MEI_UUID_IGSC_MKHI, mei_gsc_mkhi_ver), MEI_FIXUP(MEI_UUID_IGSC_MKHI_FIX, mei_gsc_mkhi_fix_ver), + MEI_FIXUP(MEI_UUID_IGSC_MKHI, mei_gsc_mkhi_ver), MEI_FIXUP(MEI_UUID_HDCP, whitelist), MEI_FIXUP(MEI_UUID_ANY, vt_support), MEI_FIXUP(MEI_UUID_PAVP, pxp_is_ready), diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 33ec6424dfee..2c810ab12e62 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -19,7 +19,7 @@ #include "mei_dev.h" #include "client.h" -#define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver) +#define to_mei_cl_driver(d) container_of_const(d, struct mei_cl_driver, driver) /** * __mei_cl_send - internal client send (write) @@ -145,8 +145,8 @@ out: * @cl: host client * @buf: buffer to receive * @length: buffer length - * @mode: io mode * @vtag: virtual tag + * @mode: io mode * @timeout: recv timeout, 0 for infinite timeout * * Return: read size in bytes of < 0 on error @@ -257,7 +257,7 @@ out: } /** - * mei_cldev_send_vtag - me device send with vtag (write) + * mei_cldev_send_vtag - me device send with vtag (write) * * @cldev: me client device * @buf: buffer to send @@ -279,6 +279,29 @@ ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf, EXPORT_SYMBOL_GPL(mei_cldev_send_vtag); /** + * mei_cldev_send_vtag_timeout - me device send with vtag and timeout (write) + * + * @cldev: me client device + * @buf: buffer to send + * @length: buffer length + * @vtag: virtual tag + * @timeout: send timeout in milliseconds, 0 for infinite timeout + * + * Return: + * * written size in bytes + * * < 0 on error + */ + +ssize_t mei_cldev_send_vtag_timeout(struct mei_cl_device *cldev, const u8 *buf, + size_t length, u8 vtag, unsigned long timeout) +{ + struct mei_cl *cl = cldev->cl; + + return __mei_cl_send_timeout(cl, buf, length, vtag, MEI_CL_IO_TX_BLOCKING, timeout); +} +EXPORT_SYMBOL_GPL(mei_cldev_send_vtag_timeout); + +/** * mei_cldev_recv_vtag - client receive with vtag (read) * * @cldev: me client device @@ -301,29 +324,49 @@ ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag); /** - * mei_cldev_recv_nonblock_vtag - non block client receive with vtag (read) + * mei_cldev_recv_timeout - client receive with timeout (read) + * + * @cldev: me client device + * @buf: buffer to receive + * @length: buffer length + * @timeout: send timeout in milliseconds, 0 for infinite timeout + * + * Return: + * * read size in bytes + * * < 0 on error + */ +ssize_t mei_cldev_recv_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length, + unsigned long timeout) +{ + return mei_cldev_recv_vtag_timeout(cldev, buf, length, NULL, timeout); +} +EXPORT_SYMBOL_GPL(mei_cldev_recv_timeout); + +/** + * mei_cldev_recv_vtag_timeout - client receive with vtag (read) * * @cldev: me client device * @buf: buffer to receive * @length: buffer length * @vtag: virtual tag + * @timeout: recv timeout in milliseconds, 0 for infinite timeout * * Return: * * read size in bytes - * * -EAGAIN if function will block. - * * < 0 on other error + * * < 0 on error */ -ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf, - size_t length, u8 *vtag) + +ssize_t mei_cldev_recv_vtag_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length, + u8 *vtag, unsigned long timeout) { struct mei_cl *cl = cldev->cl; - return __mei_cl_recv(cl, buf, length, vtag, MEI_CL_IO_RX_NONBLOCK, 0); + return __mei_cl_recv(cl, buf, length, vtag, 0, timeout); } -EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock_vtag); +EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag_timeout); /** - * mei_cldev_send - me device send (write) + * mei_cldev_send - me device send (write) * * @cldev: me client device * @buf: buffer to send @@ -340,36 +383,38 @@ ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, size_t length EXPORT_SYMBOL_GPL(mei_cldev_send); /** - * mei_cldev_recv - client receive (read) + * mei_cldev_send_timeout - me device send with timeout (write) * * @cldev: me client device - * @buf: buffer to receive + * @buf: buffer to send * @length: buffer length + * @timeout: send timeout in milliseconds, 0 for infinite timeout * - * Return: read size in bytes of < 0 on error + * Return: + * * written size in bytes + * * < 0 on error */ -ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length) +ssize_t mei_cldev_send_timeout(struct mei_cl_device *cldev, const u8 *buf, size_t length, + unsigned long timeout) { - return mei_cldev_recv_vtag(cldev, buf, length, NULL); + return mei_cldev_send_vtag_timeout(cldev, buf, length, 0, timeout); } -EXPORT_SYMBOL_GPL(mei_cldev_recv); +EXPORT_SYMBOL_GPL(mei_cldev_send_timeout); /** - * mei_cldev_recv_nonblock - non block client receive (read) + * mei_cldev_recv - client receive (read) * * @cldev: me client device * @buf: buffer to receive * @length: buffer length * * Return: read size in bytes of < 0 on error - * -EAGAIN if function will block. */ -ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, - size_t length) +ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length) { - return mei_cldev_recv_nonblock_vtag(cldev, buf, length, NULL); + return mei_cldev_recv_vtag(cldev, buf, length, NULL); } -EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock); +EXPORT_SYMBOL_GPL(mei_cldev_recv); /** * mei_cl_bus_rx_work - dispatch rx event for a bus device @@ -557,30 +602,30 @@ void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data) EXPORT_SYMBOL_GPL(mei_cldev_set_drvdata); /** - * mei_cldev_uuid - return uuid of the underlying me client + * mei_cldev_ver - return protocol version of the underlying me client * * @cldev: mei client device * - * Return: me client uuid + * Return: me client protocol version */ -const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev) +u8 mei_cldev_ver(const struct mei_cl_device *cldev) { - return mei_me_cl_uuid(cldev->me_cl); + return mei_me_cl_ver(cldev->me_cl); } -EXPORT_SYMBOL_GPL(mei_cldev_uuid); +EXPORT_SYMBOL_GPL(mei_cldev_ver); /** - * mei_cldev_ver - return protocol version of the underlying me client + * mei_cldev_mtu - max message that client can send and receive * * @cldev: mei client device * - * Return: me client protocol version + * Return: mtu or 0 if client is not connected */ -u8 mei_cldev_ver(const struct mei_cl_device *cldev) +size_t mei_cldev_mtu(const struct mei_cl_device *cldev) { - return mei_me_cl_ver(cldev->me_cl); + return mei_cl_mtu(cldev->cl); } -EXPORT_SYMBOL_GPL(mei_cldev_ver); +EXPORT_SYMBOL_GPL(mei_cldev_mtu); /** * mei_cldev_enabled - check whether the device is enabled @@ -605,7 +650,7 @@ EXPORT_SYMBOL_GPL(mei_cldev_enabled); */ static bool mei_cl_bus_module_get(struct mei_cl_device *cldev) { - return try_module_get(cldev->bus->dev->driver->owner); + return try_module_get(cldev->bus->parent->driver->owner); } /** @@ -615,7 +660,7 @@ static bool mei_cl_bus_module_get(struct mei_cl_device *cldev) */ static void mei_cl_bus_module_put(struct mei_cl_device *cldev) { - module_put(cldev->bus->dev->driver->owner); + module_put(cldev->bus->parent->driver->owner); } /** @@ -782,7 +827,7 @@ int mei_cldev_enable(struct mei_cl_device *cldev) ret = mei_cl_connect(cl, cldev->me_cl, NULL); if (ret < 0) { - dev_err(&cldev->dev, "cannot connect\n"); + dev_dbg(&cldev->dev, "cannot connect\n"); mei_cl_bus_vtag_free(cldev); } @@ -843,14 +888,14 @@ int mei_cldev_disable(struct mei_cl_device *cldev) mei_cl_bus_vtag_free(cldev); if (!mei_cl_is_connected(cl)) { - dev_dbg(bus->dev, "Already disconnected\n"); + dev_dbg(&cldev->dev, "Already disconnected\n"); err = 0; goto out; } err = mei_cl_disconnect(cl); if (err < 0) - dev_err(bus->dev, "Could not disconnect from the ME client\n"); + dev_err(&cldev->dev, "Could not disconnect from the ME client\n"); out: /* Flush queues and remove any pending read unless we have mapped DMA */ @@ -903,7 +948,7 @@ ssize_t mei_cldev_send_gsc_command(struct mei_cl_device *cldev, cl = cldev->cl; bus = cldev->bus; - dev_dbg(bus->dev, "client_id %u, fence_id %u\n", client_id, fence_id); + dev_dbg(&cldev->dev, "client_id %u, fence_id %u\n", client_id, fence_id); if (!bus->hbm_f_gsc_supported) return -EOPNOTSUPP; @@ -951,11 +996,11 @@ ssize_t mei_cldev_send_gsc_command(struct mei_cl_device *cldev, /* send the message to GSC */ ret = __mei_cl_send(cl, (u8 *)ext_hdr, buf_sz, 0, MEI_CL_IO_SGL); if (ret < 0) { - dev_err(bus->dev, "__mei_cl_send failed, returned %zd\n", ret); + dev_err(&cldev->dev, "__mei_cl_send failed, returned %zd\n", ret); goto end; } if (ret != buf_sz) { - dev_err(bus->dev, "__mei_cl_send returned %zd instead of expected %zd\n", + dev_err(&cldev->dev, "__mei_cl_send returned %zd instead of expected %zd\n", ret, buf_sz); ret = -EIO; goto end; @@ -965,7 +1010,7 @@ ssize_t mei_cldev_send_gsc_command(struct mei_cl_device *cldev, ret = __mei_cl_recv(cl, (u8 *)&rx_msg, sizeof(rx_msg), NULL, MEI_CL_IO_SGL, 0); if (ret != sizeof(rx_msg)) { - dev_err(bus->dev, "__mei_cl_recv returned %zd instead of expected %zd\n", + dev_err(&cldev->dev, "__mei_cl_recv returned %zd instead of expected %zd\n", ret, sizeof(rx_msg)); if (ret >= 0) ret = -EIO; @@ -974,13 +1019,13 @@ ssize_t mei_cldev_send_gsc_command(struct mei_cl_device *cldev, /* check rx_msg.client_id and rx_msg.fence_id match the ones we send */ if (rx_msg.client_id != client_id || rx_msg.fence_id != fence_id) { - dev_err(bus->dev, "received client_id/fence_id %u/%u instead of %u/%u sent\n", + dev_err(&cldev->dev, "received client_id/fence_id %u/%u instead of %u/%u sent\n", rx_msg.client_id, rx_msg.fence_id, client_id, fence_id); ret = -EFAULT; goto end; } - dev_dbg(bus->dev, "gsc command: successfully written %u bytes\n", rx_msg.written); + dev_dbg(&cldev->dev, "gsc command: successfully written %u bytes\n", rx_msg.written); ret = rx_msg.written; end: @@ -1040,7 +1085,7 @@ struct mei_cl_device_id *mei_cl_device_find(const struct mei_cl_device *cldev, * * Return: 1 if matching device was found 0 otherwise */ -static int mei_cl_device_match(struct device *dev, struct device_driver *drv) +static int mei_cl_device_match(struct device *dev, const struct device_driver *drv) { const struct mei_cl_device *cldev = to_mei_cl_device(dev); const struct mei_cl_driver *cldrv = to_mei_cl_driver(drv); @@ -1124,7 +1169,7 @@ static ssize_t name_show(struct device *dev, struct device_attribute *a, { struct mei_cl_device *cldev = to_mei_cl_device(dev); - return scnprintf(buf, PAGE_SIZE, "%s", cldev->name); + return sysfs_emit(buf, "%s", cldev->name); } static DEVICE_ATTR_RO(name); @@ -1134,7 +1179,7 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *a, struct mei_cl_device *cldev = to_mei_cl_device(dev); const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); - return sprintf(buf, "%pUl", uuid); + return sysfs_emit(buf, "%pUl", uuid); } static DEVICE_ATTR_RO(uuid); @@ -1144,7 +1189,7 @@ static ssize_t version_show(struct device *dev, struct device_attribute *a, struct mei_cl_device *cldev = to_mei_cl_device(dev); u8 version = mei_me_cl_ver(cldev->me_cl); - return sprintf(buf, "%02X", version); + return sysfs_emit(buf, "%02X", version); } static DEVICE_ATTR_RO(version); @@ -1155,8 +1200,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a, const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); u8 version = mei_me_cl_ver(cldev->me_cl); - return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:", - cldev->name, uuid, version); + return sysfs_emit(buf, "mei:%s:%pUl:%02X:", cldev->name, uuid, version); } static DEVICE_ATTR_RO(modalias); @@ -1166,7 +1210,7 @@ static ssize_t max_conn_show(struct device *dev, struct device_attribute *a, struct mei_cl_device *cldev = to_mei_cl_device(dev); u8 maxconn = mei_me_cl_max_conn(cldev->me_cl); - return sprintf(buf, "%d", maxconn); + return sysfs_emit(buf, "%d", maxconn); } static DEVICE_ATTR_RO(max_conn); @@ -1176,7 +1220,7 @@ static ssize_t fixed_show(struct device *dev, struct device_attribute *a, struct mei_cl_device *cldev = to_mei_cl_device(dev); u8 fixed = mei_me_cl_fixed(cldev->me_cl); - return sprintf(buf, "%d", fixed); + return sysfs_emit(buf, "%d", fixed); } static DEVICE_ATTR_RO(fixed); @@ -1186,7 +1230,7 @@ static ssize_t vtag_show(struct device *dev, struct device_attribute *a, struct mei_cl_device *cldev = to_mei_cl_device(dev); bool vt = mei_me_cl_vt(cldev->me_cl); - return sprintf(buf, "%d", vt); + return sysfs_emit(buf, "%d", vt); } static DEVICE_ATTR_RO(vtag); @@ -1196,7 +1240,7 @@ static ssize_t max_len_show(struct device *dev, struct device_attribute *a, struct mei_cl_device *cldev = to_mei_cl_device(dev); u32 maxlen = mei_me_cl_max_len(cldev->me_cl); - return sprintf(buf, "%u", maxlen); + return sysfs_emit(buf, "%u", maxlen); } static DEVICE_ATTR_RO(max_len); @@ -1243,7 +1287,7 @@ static int mei_cl_device_uevent(const struct device *dev, struct kobj_uevent_env return 0; } -static struct bus_type mei_cl_bus_type = { +static const struct bus_type mei_cl_bus_type = { .name = "mei", .dev_groups = mei_cldev_groups, .match = mei_cl_device_match, @@ -1254,25 +1298,35 @@ static struct bus_type mei_cl_bus_type = { static struct mei_device *mei_dev_bus_get(struct mei_device *bus) { - if (bus) - get_device(bus->dev); + if (bus) { + get_device(&bus->dev); + get_device(bus->parent); + } return bus; } static void mei_dev_bus_put(struct mei_device *bus) { - if (bus) - put_device(bus->dev); + if (bus) { + put_device(bus->parent); + put_device(&bus->dev); + } } static void mei_cl_bus_dev_release(struct device *dev) { struct mei_cl_device *cldev = to_mei_cl_device(dev); + struct mei_device *mdev = cldev->cl->dev; + struct mei_cl *cl; mei_cl_flush_queues(cldev->cl, NULL); mei_me_cl_put(cldev->me_cl); mei_dev_bus_put(cldev->bus); + + list_for_each_entry(cl, &mdev->file_list, link) + WARN_ON(cl == cldev->cl); + kfree(cldev->cl); kfree(cldev); } @@ -1291,7 +1345,7 @@ static const struct device_type mei_cl_device_type = { static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev) { dev_set_name(&cldev->dev, "%s-%pUl", - dev_name(cldev->bus->dev), + dev_name(cldev->bus->parent), mei_me_cl_uuid(cldev->me_cl)); } @@ -1301,7 +1355,7 @@ static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev) * @bus: mei device * @me_cl: me client * - * Return: allocated device structur or NULL on allocation failure + * Return: allocated device structure or NULL on allocation failure */ static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus, struct mei_me_client *me_cl) @@ -1320,7 +1374,7 @@ static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus, } device_initialize(&cldev->dev); - cldev->dev.parent = bus->dev; + cldev->dev.parent = bus->parent; cldev->dev.bus = &mei_cl_bus_type; cldev->dev.type = &mei_cl_device_type; cldev->bus = mei_dev_bus_get(bus); @@ -1329,6 +1383,7 @@ static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus, mei_cl_bus_set_name(cldev); cldev->is_added = 0; INIT_LIST_HEAD(&cldev->bus_list); + device_enable_async_suspend(&cldev->dev); return cldev; } @@ -1360,13 +1415,13 @@ static bool mei_cl_bus_dev_setup(struct mei_device *bus, * * @cldev: me client device * - * Return: 0 on success; < 0 on failre + * Return: 0 on success; < 0 on failure */ static int mei_cl_bus_dev_add(struct mei_cl_device *cldev) { int ret; - dev_dbg(cldev->bus->dev, "adding %pUL:%02X\n", + dev_dbg(&cldev->dev, "adding %pUL:%02X\n", mei_me_cl_uuid(cldev->me_cl), mei_me_cl_ver(cldev->me_cl)); ret = device_add(&cldev->dev); @@ -1454,7 +1509,7 @@ static void mei_cl_bus_dev_init(struct mei_device *bus, WARN_ON(!mutex_is_locked(&bus->cl_bus_lock)); - dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl)); + dev_dbg(&bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl)); if (me_cl->bus_added) return; @@ -1505,7 +1560,7 @@ static void mei_cl_bus_rescan(struct mei_device *bus) } mutex_unlock(&bus->cl_bus_lock); - dev_dbg(bus->dev, "rescan end"); + dev_dbg(&bus->dev, "rescan end"); } void mei_cl_bus_rescan_work(struct work_struct *work) diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 5c19097266fe..5dc665515263 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -48,9 +48,9 @@ struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl) /** * mei_me_cl_release - free me client * - * Locking: called under "dev->device_lock" lock - * * @ref: me_client refcount + * + * Locking: called under "dev->device_lock" lock */ static void mei_me_cl_release(struct kref *ref) { @@ -63,9 +63,9 @@ static void mei_me_cl_release(struct kref *ref) /** * mei_me_cl_put - decrease me client refcount and free client if necessary * - * Locking: called under "dev->device_lock" lock - * * @me_cl: me client + * + * Locking: called under "dev->device_lock" lock */ void mei_me_cl_put(struct mei_me_client *me_cl) { @@ -262,7 +262,7 @@ void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid) { struct mei_me_client *me_cl; - dev_dbg(dev->dev, "remove %pUl\n", uuid); + dev_dbg(&dev->dev, "remove %pUl\n", uuid); down_write(&dev->me_clients_rwsem); me_cl = __mei_me_cl_by_uuid(dev, uuid); @@ -272,28 +272,6 @@ void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid) } /** - * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id - * - * @dev: the device structure - * @uuid: me client uuid - * @id: me client id - * - * Locking: called under "dev->device_lock" lock - */ -void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id) -{ - struct mei_me_client *me_cl; - - dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id); - - down_write(&dev->me_clients_rwsem); - me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id); - __mei_me_cl_del(dev, me_cl); - mei_me_cl_put(me_cl); - up_write(&dev->me_clients_rwsem); -} - -/** * mei_me_cl_rm_all - remove all me clients * * @dev: the device structure @@ -321,7 +299,7 @@ void mei_io_cb_free(struct mei_cl_cb *cb) return; list_del(&cb->list); - kfree(cb->buf.data); + kvfree(cb->buf.data); kfree(cb->ext_hdr); kfree(cb); } @@ -329,10 +307,10 @@ void mei_io_cb_free(struct mei_cl_cb *cb) /** * mei_tx_cb_enqueue - queue tx callback * - * Locking: called under "dev->device_lock" lock - * * @cb: mei callback struct * @head: an instance of list to queue on + * + * Locking: called under "dev->device_lock" lock */ static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb, struct list_head *head) @@ -344,9 +322,9 @@ static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb, /** * mei_tx_cb_dequeue - dequeue tx callback * - * Locking: called under "dev->device_lock" lock - * * @cb: mei callback struct to dequeue and free + * + * Locking: called under "dev->device_lock" lock */ static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb) { @@ -359,10 +337,10 @@ static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb) /** * mei_cl_set_read_by_fp - set pending_read flag to vtag struct for given fp * - * Locking: called under "dev->device_lock" lock - * * @cl: mei client * @fp: pointer to file structure + * + * Locking: called under "dev->device_lock" lock */ static void mei_cl_set_read_by_fp(const struct mei_cl *cl, const struct file *fp) @@ -497,7 +475,7 @@ struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, if (length == 0) return cb; - cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL); + cb->buf.data = kvmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL); if (!cb->buf.data) { mei_io_cb_free(cb); return NULL; @@ -657,12 +635,12 @@ int mei_cl_link(struct mei_cl *cl) id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX); if (id >= MEI_CLIENTS_MAX) { - dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX); + dev_err(&dev->dev, "id exceeded %d", MEI_CLIENTS_MAX); return -EMFILE; } if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { - dev_err(dev->dev, "open_handle_count exceeded %d", + dev_err(&dev->dev, "open_handle_count exceeded %d", MEI_MAX_OPEN_HANDLE_COUNT); return -EMFILE; } @@ -731,9 +709,8 @@ void mei_host_client_init(struct mei_device *dev) schedule_work(&dev->bus_rescan_work); - pm_runtime_mark_last_busy(dev->dev); - dev_dbg(dev->dev, "rpm: autosuspend\n"); - pm_request_autosuspend(dev->dev); + dev_dbg(&dev->dev, "rpm: autosuspend\n"); + pm_request_autosuspend(dev->parent); } /** @@ -746,12 +723,12 @@ bool mei_hbuf_acquire(struct mei_device *dev) { if (mei_pg_state(dev) == MEI_PG_ON || mei_pg_in_transition(dev)) { - dev_dbg(dev->dev, "device is in pg\n"); + dev_dbg(&dev->dev, "device is in pg\n"); return false; } if (!dev->hbuf_is_ready) { - dev_dbg(dev->dev, "hbuf is not ready\n"); + dev_dbg(&dev->dev, "hbuf is not ready\n"); return false; } @@ -1003,9 +980,9 @@ int mei_cl_disconnect(struct mei_cl *cl) return 0; } - rets = pm_runtime_get(dev->dev); + rets = pm_runtime_get(dev->parent); if (rets < 0 && rets != -EINPROGRESS) { - pm_runtime_put_noidle(dev->dev); + pm_runtime_put_noidle(dev->parent); cl_err(dev, cl, "rpm: get failed %d\n", rets); return rets; } @@ -1013,8 +990,7 @@ int mei_cl_disconnect(struct mei_cl *cl) rets = __mei_cl_disconnect(cl); cl_dbg(dev, cl, "rpm: autosuspend\n"); - pm_runtime_mark_last_busy(dev->dev); - pm_runtime_put_autosuspend(dev->dev); + pm_runtime_put_autosuspend(dev->parent); return rets; } @@ -1140,9 +1116,9 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, goto nortpm; } - rets = pm_runtime_get(dev->dev); + rets = pm_runtime_get(dev->parent); if (rets < 0 && rets != -EINPROGRESS) { - pm_runtime_put_noidle(dev->dev); + pm_runtime_put_noidle(dev->parent); cl_err(dev, cl, "rpm: get failed %d\n", rets); goto nortpm; } @@ -1189,8 +1165,7 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, rets = cl->status; out: cl_dbg(dev, cl, "rpm: autosuspend\n"); - pm_runtime_mark_last_busy(dev->dev); - pm_runtime_put_autosuspend(dev->dev); + pm_runtime_put_autosuspend(dev->parent); mei_io_cb_free(cb); @@ -1539,9 +1514,9 @@ int mei_cl_notify_request(struct mei_cl *cl, if (!mei_cl_is_connected(cl)) return -ENODEV; - rets = pm_runtime_get(dev->dev); + rets = pm_runtime_get(dev->parent); if (rets < 0 && rets != -EINPROGRESS) { - pm_runtime_put_noidle(dev->dev); + pm_runtime_put_noidle(dev->parent); cl_err(dev, cl, "rpm: get failed %d\n", rets); return rets; } @@ -1576,8 +1551,7 @@ int mei_cl_notify_request(struct mei_cl *cl, out: cl_dbg(dev, cl, "rpm: autosuspend\n"); - pm_runtime_mark_last_busy(dev->dev); - pm_runtime_put_autosuspend(dev->dev); + pm_runtime_put_autosuspend(dev->parent); mei_io_cb_free(cb); return rets; @@ -1705,9 +1679,9 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp) mei_cl_set_read_by_fp(cl, fp); - rets = pm_runtime_get(dev->dev); + rets = pm_runtime_get(dev->parent); if (rets < 0 && rets != -EINPROGRESS) { - pm_runtime_put_noidle(dev->dev); + pm_runtime_put_noidle(dev->parent); cl_err(dev, cl, "rpm: get failed %d\n", rets); goto nortpm; } @@ -1724,8 +1698,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp) out: cl_dbg(dev, cl, "rpm: autosuspend\n"); - pm_runtime_mark_last_busy(dev->dev); - pm_runtime_put_autosuspend(dev->dev); + pm_runtime_put_autosuspend(dev->parent); nortpm: if (rets) mei_io_cb_free(cb); @@ -1994,9 +1967,9 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long time blocking = cb->blocking; data = buf->data; - rets = pm_runtime_get(dev->dev); + rets = pm_runtime_get(dev->parent); if (rets < 0 && rets != -EINPROGRESS) { - pm_runtime_put_noidle(dev->dev); + pm_runtime_put_noidle(dev->parent); cl_err(dev, cl, "rpm: get failed %zd\n", rets); goto free; } @@ -2011,7 +1984,7 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long time mei_hdr = mei_msg_hdr_init(cb); if (IS_ERR(mei_hdr)) { - rets = -PTR_ERR(mei_hdr); + rets = PTR_ERR(mei_hdr); mei_hdr = NULL; goto err; } @@ -2032,7 +2005,7 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long time hbuf_slots = mei_hbuf_empty_slots(dev); if (hbuf_slots < 0) { - rets = -EOVERFLOW; + buf_len = -EOVERFLOW; goto out; } @@ -2114,8 +2087,7 @@ out: rets = buf_len; err: cl_dbg(dev, cl, "rpm: autosuspend\n"); - pm_runtime_mark_last_busy(dev->dev); - pm_runtime_put_autosuspend(dev->dev); + pm_runtime_put_autosuspend(dev->parent); free: mei_io_cb_free(cb); @@ -2138,12 +2110,10 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) case MEI_FOP_WRITE: mei_tx_cb_dequeue(cb); cl->writing_state = MEI_WRITE_COMPLETE; - if (waitqueue_active(&cl->tx_wait)) { + if (waitqueue_active(&cl->tx_wait)) wake_up_interruptible(&cl->tx_wait); - } else { - pm_runtime_mark_last_busy(dev->dev); - pm_request_autosuspend(dev->dev); - } + else + pm_request_autosuspend(dev->parent); break; case MEI_FOP_READ: @@ -2273,7 +2243,7 @@ int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb, static int mei_cl_dma_alloc(struct mei_cl *cl, u8 buf_id, size_t size) { - cl->dma.vaddr = dmam_alloc_coherent(cl->dev->dev, size, + cl->dma.vaddr = dmam_alloc_coherent(&cl->dev->dev, size, &cl->dma.daddr, GFP_KERNEL); if (!cl->dma.vaddr) return -ENOMEM; @@ -2287,7 +2257,7 @@ static int mei_cl_dma_alloc(struct mei_cl *cl, u8 buf_id, size_t size) static void mei_cl_dma_free(struct mei_cl *cl) { cl->dma.buffer_id = 0; - dmam_free_coherent(cl->dev->dev, + dmam_free_coherent(&cl->dev->dev, cl->dma.size, cl->dma.vaddr, cl->dma.daddr); cl->dma.size = 0; cl->dma.vaddr = NULL; @@ -2343,16 +2313,16 @@ int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp, return -EPROTO; } - rets = pm_runtime_get(dev->dev); + rets = pm_runtime_get(dev->parent); if (rets < 0 && rets != -EINPROGRESS) { - pm_runtime_put_noidle(dev->dev); + pm_runtime_put_noidle(dev->parent); cl_err(dev, cl, "rpm: get failed %d\n", rets); return rets; } rets = mei_cl_dma_alloc(cl, buffer_id, size); if (rets) { - pm_runtime_put_noidle(dev->dev); + pm_runtime_put_noidle(dev->parent); return rets; } @@ -2388,8 +2358,7 @@ out: mei_cl_dma_free(cl); cl_dbg(dev, cl, "rpm: autosuspend\n"); - pm_runtime_mark_last_busy(dev->dev); - pm_runtime_put_autosuspend(dev->dev); + pm_runtime_put_autosuspend(dev->parent); mei_io_cb_free(cb); return rets; @@ -2428,9 +2397,9 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp) if (!cl->dma_mapped) return -EPROTO; - rets = pm_runtime_get(dev->dev); + rets = pm_runtime_get(dev->parent); if (rets < 0 && rets != -EINPROGRESS) { - pm_runtime_put_noidle(dev->dev); + pm_runtime_put_noidle(dev->parent); cl_err(dev, cl, "rpm: get failed %d\n", rets); return rets; } @@ -2466,8 +2435,7 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp) mei_cl_dma_free(cl); out: cl_dbg(dev, cl, "rpm: autosuspend\n"); - pm_runtime_mark_last_busy(dev->dev); - pm_runtime_put_autosuspend(dev->dev); + pm_runtime_put_autosuspend(dev->parent); mei_io_cb_free(cb); return rets; diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h index 9052860bcfe0..031114478bcb 100644 --- a/drivers/misc/mei/client.h +++ b/drivers/misc/mei/client.h @@ -29,8 +29,6 @@ struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id); struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 client_id); void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid); -void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, - const uuid_le *uuid, u8 id); void mei_me_cl_rm_all(struct mei_device *dev); /** @@ -277,12 +275,12 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp); #define MEI_CL_PRM(cl) (cl)->host_client_id, mei_cl_me_id(cl) #define cl_dbg(dev, cl, format, arg...) \ - dev_dbg((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) + dev_dbg(&(dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) #define cl_warn(dev, cl, format, arg...) \ - dev_warn((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) + dev_warn(&(dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) #define cl_err(dev, cl, format, arg...) \ - dev_err((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) + dev_err(&(dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) #endif /* _MEI_CLIENT_H_ */ diff --git a/drivers/misc/mei/dma-ring.c b/drivers/misc/mei/dma-ring.c index ef56f849b251..6277c4a5b0fd 100644 --- a/drivers/misc/mei/dma-ring.c +++ b/drivers/misc/mei/dma-ring.c @@ -30,7 +30,7 @@ static int mei_dmam_dscr_alloc(struct mei_device *dev, if (dscr->vaddr) return 0; - dscr->vaddr = dmam_alloc_coherent(dev->dev, dscr->size, &dscr->daddr, + dscr->vaddr = dmam_alloc_coherent(dev->parent, dscr->size, &dscr->daddr, GFP_KERNEL); if (!dscr->vaddr) return -ENOMEM; @@ -50,7 +50,7 @@ static void mei_dmam_dscr_free(struct mei_device *dev, if (!dscr->vaddr) return; - dmam_free_coherent(dev->dev, dscr->size, dscr->vaddr, dscr->daddr); + dmam_free_coherent(dev->parent, dscr->size, dscr->vaddr, dscr->daddr); dscr->vaddr = NULL; } @@ -124,6 +124,8 @@ void mei_dma_ring_reset(struct mei_device *dev) * @buf: data buffer * @offset: offset in slots. * @n: number of slots to copy. + * + * Return: number of bytes copied */ static size_t mei_dma_copy_from(struct mei_device *dev, unsigned char *buf, u32 offset, u32 n) @@ -144,6 +146,8 @@ static size_t mei_dma_copy_from(struct mei_device *dev, unsigned char *buf, * @buf: data buffer * @offset: offset in slots. * @n: number of slots to copy. + * + * Return: number of bytes copied */ static size_t mei_dma_copy_to(struct mei_device *dev, unsigned char *buf, u32 offset, u32 n) @@ -161,7 +165,7 @@ static size_t mei_dma_copy_to(struct mei_device *dev, unsigned char *buf, /** * mei_dma_ring_read() - read data from the ring * @dev: mei device - * @buf: buffer to read into: may be NULL in case of droping the data. + * @buf: buffer to read into: may be NULL in case of dropping the data. * @len: length to read. */ void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len) @@ -173,7 +177,7 @@ void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len) if (WARN_ON(!ctrl)) return; - dev_dbg(dev->dev, "reading from dma %u bytes\n", len); + dev_dbg(&dev->dev, "reading from dma %u bytes\n", len); if (!len) return; @@ -250,7 +254,7 @@ void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len) if (WARN_ON(!ctrl)) return; - dev_dbg(dev->dev, "writing to dma %u bytes\n", len); + dev_dbg(&dev->dev, "writing to dma %u bytes\n", len); hbuf_depth = mei_dma_ring_hbuf_depth(dev); wr_idx = READ_ONCE(ctrl->hbuf_wr_idx) & (hbuf_depth - 1); slots = mei_data2slots(len); diff --git a/drivers/misc/mei/gsc-me.c b/drivers/misc/mei/gsc-me.c index e63cabd0818d..93cba090ea08 100644 --- a/drivers/misc/mei/gsc-me.c +++ b/drivers/misc/mei/gsc-me.c @@ -106,11 +106,15 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev, } } + ret = mei_register(dev, device); + if (ret) + goto deinterrupt; + pm_runtime_get_noresume(device); pm_runtime_set_active(device); pm_runtime_enable(device); - /* Continue to char device setup in spite of firmware handshake failure. + /* Continue in spite of firmware handshake failure. * In order to provide access to the firmware status registers to the user * space via sysfs. */ @@ -120,18 +124,12 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev, pm_runtime_set_autosuspend_delay(device, MEI_GSC_RPM_TIMEOUT); pm_runtime_use_autosuspend(device); - ret = mei_register(dev, device); - if (ret) - goto register_err; - pm_runtime_put_noidle(device); return 0; -register_err: - mei_stop(dev); +deinterrupt: if (!mei_me_hw_use_polling(hw)) devm_free_irq(device, hw->irq, dev); - err: dev_err(device, "probe failed: %d\n", ret); dev_set_drvdata(device, NULL); @@ -144,9 +142,6 @@ static void mei_gsc_remove(struct auxiliary_device *aux_dev) struct mei_me_hw *hw; dev = dev_get_drvdata(&aux_dev->dev); - if (!dev) - return; - hw = to_me_hw(dev); mei_stop(dev); @@ -155,22 +150,19 @@ static void mei_gsc_remove(struct auxiliary_device *aux_dev) if (mei_me_hw_use_polling(hw)) kthread_stop(hw->polling_thread); - mei_deregister(dev); - pm_runtime_disable(&aux_dev->dev); mei_disable_interrupts(dev); if (!mei_me_hw_use_polling(hw)) devm_free_irq(&aux_dev->dev, hw->irq, dev); + + mei_deregister(dev); } static int __maybe_unused mei_gsc_pm_suspend(struct device *device) { struct mei_device *dev = dev_get_drvdata(device); - if (!dev) - return -ENODEV; - mei_stop(dev); mei_disable_interrupts(dev); @@ -186,9 +178,6 @@ static int __maybe_unused mei_gsc_pm_resume(struct device *device) int err; struct mei_me_hw *hw; - if (!dev) - return -ENODEV; - hw = to_me_hw(dev); aux_dev = to_auxiliary_dev(device); adev = auxiliary_dev_to_mei_aux_dev(aux_dev); @@ -211,8 +200,6 @@ static int __maybe_unused mei_gsc_pm_runtime_idle(struct device *device) { struct mei_device *dev = dev_get_drvdata(device); - if (!dev) - return -ENODEV; if (mei_write_is_idle(dev)) pm_runtime_autosuspend(device); @@ -225,9 +212,6 @@ static int __maybe_unused mei_gsc_pm_runtime_suspend(struct device *device) struct mei_me_hw *hw; int ret; - if (!dev) - return -ENODEV; - mutex_lock(&dev->device_lock); if (mei_write_is_idle(dev)) { @@ -252,9 +236,6 @@ static int __maybe_unused mei_gsc_pm_runtime_resume(struct device *device) struct mei_me_hw *hw; irqreturn_t irq_ret; - if (!dev) - return -ENODEV; - mutex_lock(&dev->device_lock); hw = to_me_hw(dev); @@ -269,7 +250,7 @@ static int __maybe_unused mei_gsc_pm_runtime_resume(struct device *device) irq_ret = mei_me_irq_thread_handler(1, dev); if (irq_ret != IRQ_HANDLED) - dev_err(dev->dev, "thread handler fail %d\n", irq_ret); + dev_err(&dev->dev, "thread handler fail %d\n", irq_ret); return 0; } @@ -293,6 +274,10 @@ static const struct auxiliary_device_id mei_gsc_id_table[] = { .driver_data = MEI_ME_GSCFI_CFG, }, { + .name = "xe.mei-gscfi", + .driver_data = MEI_ME_GSCFI_CFG, + }, + { /* sentinel */ } }; @@ -312,4 +297,6 @@ module_auxiliary_driver(mei_gsc_driver); MODULE_AUTHOR("Intel Corporation"); MODULE_ALIAS("auxiliary:i915.mei-gsc"); MODULE_ALIAS("auxiliary:i915.mei-gscfi"); +MODULE_ALIAS("auxiliary:xe.mei-gscfi"); +MODULE_DESCRIPTION("Intel(R) Graphics System Controller"); MODULE_LICENSE("GPL"); diff --git a/drivers/misc/mei/gsc_proxy/Kconfig b/drivers/misc/mei/gsc_proxy/Kconfig index 5f68d9f3d691..ac78b9d1eccd 100644 --- a/drivers/misc/mei/gsc_proxy/Kconfig +++ b/drivers/misc/mei/gsc_proxy/Kconfig @@ -3,7 +3,7 @@ # config INTEL_MEI_GSC_PROXY tristate "Intel GSC Proxy services of ME Interface" - select INTEL_MEI_ME + depends on INTEL_MEI_ME depends on DRM_I915 help MEI Support for GSC Proxy Services on Intel platforms. diff --git a/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c b/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c index be52b113aea9..f52fe23a6c0b 100644 --- a/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c +++ b/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c @@ -17,8 +17,8 @@ #include <linux/slab.h> #include <linux/uuid.h> #include <drm/drm_connector.h> -#include <drm/i915_component.h> -#include <drm/i915_gsc_proxy_mei_interface.h> +#include <drm/intel/i915_component.h> +#include <drm/intel/i915_gsc_proxy_mei_interface.h> /** * mei_gsc_proxy_send - Sends a proxy message to ME FW. @@ -96,7 +96,8 @@ static const struct component_master_ops mei_component_master_ops = { * * The function checks if the device is pci device and * Intel VGA adapter, the subcomponent is SW Proxy - * and the parent of MEI PCI and the parent of VGA are the same PCH device. + * and the VGA is on the bus 0 reserved for built-in devices + * to reject discrete GFX. * * @dev: master device * @subcomponent: subcomponent to match (I915_COMPONENT_SWPROXY) @@ -123,7 +124,8 @@ static int mei_gsc_proxy_component_match(struct device *dev, int subcomponent, if (subcomponent != I915_COMPONENT_GSC_PROXY) return 0; - return component_compare_dev(dev->parent, ((struct device *)data)->parent); + /* Only built-in GFX */ + return (pdev->bus->number == 0); } static int mei_gsc_proxy_probe(struct mei_cl_device *cldev, @@ -146,7 +148,7 @@ static int mei_gsc_proxy_probe(struct mei_cl_device *cldev, } component_match_add_typed(&cldev->dev, &master_match, - mei_gsc_proxy_component_match, cldev->dev.parent); + mei_gsc_proxy_component_match, NULL); if (IS_ERR_OR_NULL(master_match)) { ret = -ENOMEM; goto err_exit; diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index 12a62a911e42..ccd9df5d1c7d 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -91,6 +91,8 @@ static int mei_cl_conn_status_to_errno(enum mei_cl_connect_status status) * @dev: mei device * @hdr: mei header * @data: payload + * + * Return: >=0 on success, <0 on error */ static inline int mei_hbm_write_message(struct mei_device *dev, struct mei_msg_hdr *hdr, @@ -111,7 +113,7 @@ void mei_hbm_idle(struct mei_device *dev) } /** - * mei_hbm_reset - reset hbm counters and book keeping data structurs + * mei_hbm_reset - reset hbm counters and book keeping data structures * * @dev: the device structure */ @@ -237,7 +239,7 @@ int mei_hbm_start_wait(struct mei_device *dev) if (ret == 0 && (dev->hbm_state <= MEI_HBM_STARTING)) { dev->hbm_state = MEI_HBM_IDLE; - dev_err(dev->dev, "waiting for mei start failed\n"); + dev_err(&dev->dev, "waiting for mei start failed\n"); return -ETIME; } return 0; @@ -269,8 +271,7 @@ int mei_hbm_start_req(struct mei_device *dev) dev->hbm_state = MEI_HBM_IDLE; ret = mei_hbm_write_message(dev, &mei_hdr, &req); if (ret) { - dev_err(dev->dev, "version message write failed: ret = %d\n", - ret); + dev_err(&dev->dev, "version message write failed: ret = %d\n", ret); return ret; } @@ -310,8 +311,7 @@ static int mei_hbm_dma_setup_req(struct mei_device *dev) ret = mei_hbm_write_message(dev, &mei_hdr, &req); if (ret) { - dev_err(dev->dev, "dma setup request write failed: ret = %d.\n", - ret); + dev_err(&dev->dev, "dma setup request write failed: ret = %d.\n", ret); return ret; } @@ -349,8 +349,7 @@ static int mei_hbm_capabilities_req(struct mei_device *dev) ret = mei_hbm_write_message(dev, &mei_hdr, &req); if (ret) { - dev_err(dev->dev, - "capabilities request write failed: ret = %d.\n", ret); + dev_err(&dev->dev, "capabilities request write failed: ret = %d.\n", ret); return ret; } @@ -384,8 +383,7 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev) ret = mei_hbm_write_message(dev, &mei_hdr, &req); if (ret) { - dev_err(dev->dev, "enumeration request write failed: ret = %d.\n", - ret); + dev_err(&dev->dev, "enumeration request write failed: ret = %d.\n", ret); return ret; } dev->hbm_state = MEI_HBM_ENUM_CLIENTS; @@ -441,7 +439,7 @@ static int mei_hbm_add_cl_resp(struct mei_device *dev, u8 addr, u8 status) struct hbm_add_client_response resp; int ret; - dev_dbg(dev->dev, "adding client response\n"); + dev_dbg(&dev->dev, "adding client response\n"); mei_hbm_hdr(&mei_hdr, sizeof(resp)); @@ -452,8 +450,7 @@ static int mei_hbm_add_cl_resp(struct mei_device *dev, u8 addr, u8 status) ret = mei_hbm_write_message(dev, &mei_hdr, &resp); if (ret) - dev_err(dev->dev, "add client response write failed: ret = %d\n", - ret); + dev_err(&dev->dev, "add client response write failed: ret = %d\n", ret); return ret; } @@ -508,7 +505,7 @@ int mei_hbm_cl_notify_req(struct mei_device *dev, ret = mei_hbm_write_message(dev, &mei_hdr, &req); if (ret) - dev_err(dev->dev, "notify request failed: ret = %d\n", ret); + cl_err(dev, cl, "notify request failed: ret = %d\n", ret); return ret; } @@ -624,7 +621,7 @@ int mei_hbm_cl_dma_map_req(struct mei_device *dev, struct mei_cl *cl) ret = mei_hbm_write_message(dev, &mei_hdr, &req); if (ret) - dev_err(dev->dev, "dma map request failed: ret = %d\n", ret); + cl_err(dev, cl, "dma map request failed: ret = %d\n", ret); return ret; } @@ -652,7 +649,7 @@ int mei_hbm_cl_dma_unmap_req(struct mei_device *dev, struct mei_cl *cl) ret = mei_hbm_write_message(dev, &mei_hdr, &req); if (ret) - dev_err(dev->dev, "dma unmap request failed: ret = %d\n", ret); + cl_err(dev, cl, "dma unmap request failed: ret = %d\n", ret); return ret; } @@ -677,10 +674,10 @@ static void mei_hbm_cl_dma_map_res(struct mei_device *dev, return; if (res->status) { - dev_err(dev->dev, "cl dma map failed %d\n", res->status); + cl_err(dev, cl, "cl dma map failed %d\n", res->status); cl->status = -EFAULT; } else { - dev_dbg(dev->dev, "cl dma map succeeded\n"); + cl_dbg(dev, cl, "cl dma map succeeded\n"); cl->dma_mapped = 1; cl->status = 0; } @@ -707,10 +704,10 @@ static void mei_hbm_cl_dma_unmap_res(struct mei_device *dev, return; if (res->status) { - dev_err(dev->dev, "cl dma unmap failed %d\n", res->status); + cl_err(dev, cl, "cl dma unmap failed %d\n", res->status); cl->status = -EFAULT; } else { - dev_dbg(dev->dev, "cl dma unmap succeeded\n"); + cl_dbg(dev, cl, "cl dma unmap succeeded\n"); cl->dma_mapped = 0; cl->status = 0; } @@ -750,7 +747,7 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx) ret = mei_hbm_write_message(dev, &mei_hdr, &req); if (ret) { - dev_err(dev->dev, "properties request write failed: ret = %d\n", + dev_err(&dev->dev, "properties request write failed: ret = %d\n", ret); return ret; } @@ -786,7 +783,7 @@ int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd) ret = mei_hbm_write_message(dev, &mei_hdr, &req); if (ret) - dev_err(dev->dev, "power gate command write failed.\n"); + dev_err(&dev->dev, "power gate command write failed.\n"); return ret; } EXPORT_SYMBOL_GPL(mei_hbm_pg); @@ -845,7 +842,7 @@ static int mei_hbm_add_single_tx_flow_ctrl_creds(struct mei_device *dev, me_cl = mei_me_cl_by_id(dev, fctrl->me_addr); if (!me_cl) { - dev_err(dev->dev, "no such me client %d\n", fctrl->me_addr); + dev_err(&dev->dev, "no such me client %d\n", fctrl->me_addr); return -ENOENT; } @@ -855,7 +852,7 @@ static int mei_hbm_add_single_tx_flow_ctrl_creds(struct mei_device *dev, } me_cl->tx_flow_ctrl_creds++; - dev_dbg(dev->dev, "recv flow ctrl msg ME %d (single) creds = %d.\n", + dev_dbg(&dev->dev, "recv flow ctrl msg ME %d (single) creds = %d.\n", fctrl->me_addr, me_cl->tx_flow_ctrl_creds); rets = 0; @@ -907,7 +904,7 @@ int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl) } /** - * mei_hbm_cl_disconnect_rsp - sends disconnect respose to the FW + * mei_hbm_cl_disconnect_rsp - sends disconnect response to the FW * * @dev: the device structure * @cl: a client to disconnect from @@ -1083,7 +1080,7 @@ static int mei_hbm_pg_enter_res(struct mei_device *dev) { if (mei_pg_state(dev) != MEI_PG_OFF || dev->pg_event != MEI_PG_EVENT_WAIT) { - dev_err(dev->dev, "hbm: pg entry response: state mismatch [%s, %d]\n", + dev_err(&dev->dev, "hbm: pg entry response: state mismatch [%s, %d]\n", mei_pg_state_str(mei_pg_state(dev)), dev->pg_event); return -EPROTO; } @@ -1101,7 +1098,7 @@ static int mei_hbm_pg_enter_res(struct mei_device *dev) */ void mei_hbm_pg_resume(struct mei_device *dev) { - pm_request_resume(dev->dev); + pm_request_resume(dev->parent); } EXPORT_SYMBOL_GPL(mei_hbm_pg_resume); @@ -1117,7 +1114,7 @@ static int mei_hbm_pg_exit_res(struct mei_device *dev) if (mei_pg_state(dev) != MEI_PG_ON || (dev->pg_event != MEI_PG_EVENT_WAIT && dev->pg_event != MEI_PG_EVENT_IDLE)) { - dev_err(dev->dev, "hbm: pg exit response: state mismatch [%s, %d]\n", + dev_err(&dev->dev, "hbm: pg exit response: state mismatch [%s, %d]\n", mei_pg_state_str(mei_pg_state(dev)), dev->pg_event); return -EPROTO; } @@ -1274,19 +1271,19 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) * hbm is put to idle during system reset */ if (dev->hbm_state == MEI_HBM_IDLE) { - dev_dbg(dev->dev, "hbm: state is idle ignore spurious messages\n"); + dev_dbg(&dev->dev, "hbm: state is idle ignore spurious messages\n"); return 0; } switch (mei_msg->hbm_cmd) { case HOST_START_RES_CMD: - dev_dbg(dev->dev, "hbm: start: response message received.\n"); + dev_dbg(&dev->dev, "hbm: start: response message received.\n"); dev->init_clients_timer = 0; version_res = (struct hbm_host_version_response *)mei_msg; - dev_dbg(dev->dev, "HBM VERSION: DRIVER=%02d:%02d DEVICE=%02d:%02d\n", + dev_dbg(&dev->dev, "HBM VERSION: DRIVER=%02d:%02d DEVICE=%02d:%02d\n", HBM_MAJOR_VERSION, HBM_MINOR_VERSION, version_res->me_max_version.major_version, version_res->me_max_version.minor_version); @@ -1302,11 +1299,11 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) } if (!mei_hbm_version_is_supported(dev)) { - dev_warn(dev->dev, "hbm: start: version mismatch - stopping the driver.\n"); + dev_warn(&dev->dev, "hbm: start: version mismatch - stopping the driver.\n"); dev->hbm_state = MEI_HBM_STOPPED; if (mei_hbm_stop_req(dev)) { - dev_err(dev->dev, "hbm: start: failed to send stop request\n"); + dev_err(&dev->dev, "hbm: start: failed to send stop request\n"); return -EIO; } break; @@ -1318,10 +1315,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) dev->hbm_state != MEI_HBM_STARTING) { if (dev->dev_state == MEI_DEV_POWER_DOWN || dev->dev_state == MEI_DEV_POWERING_DOWN) { - dev_dbg(dev->dev, "hbm: start: on shutdown, ignoring\n"); + dev_dbg(&dev->dev, "hbm: start: on shutdown, ignoring\n"); return 0; } - dev_err(dev->dev, "hbm: start: state mismatch, [%d, %d]\n", + dev_err(&dev->dev, "hbm: start: state mismatch, [%d, %d]\n", dev->dev_state, dev->hbm_state); return -EPROTO; } @@ -1335,7 +1332,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) if (dev->hbm_f_dr_supported) { if (mei_dmam_ring_alloc(dev)) - dev_info(dev->dev, "running w/o dma ring\n"); + dev_info(&dev->dev, "running w/o dma ring\n"); if (mei_dma_ring_is_allocated(dev)) { if (mei_hbm_dma_setup_req(dev)) return -EIO; @@ -1355,7 +1352,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) break; case MEI_HBM_CAPABILITIES_RES_CMD: - dev_dbg(dev->dev, "hbm: capabilities response: message received.\n"); + dev_dbg(&dev->dev, "hbm: capabilities response: message received.\n"); dev->init_clients_timer = 0; @@ -1363,10 +1360,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) dev->hbm_state != MEI_HBM_CAP_SETUP) { if (dev->dev_state == MEI_DEV_POWER_DOWN || dev->dev_state == MEI_DEV_POWERING_DOWN) { - dev_dbg(dev->dev, "hbm: capabilities response: on shutdown, ignoring\n"); + dev_dbg(&dev->dev, "hbm: capabilities response: on shutdown, ignoring\n"); return 0; } - dev_err(dev->dev, "hbm: capabilities response: state mismatch, [%d, %d]\n", + dev_err(&dev->dev, "hbm: capabilities response: state mismatch, [%d, %d]\n", dev->dev_state, dev->hbm_state); return -EPROTO; } @@ -1382,7 +1379,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) if (dev->hbm_f_dr_supported) { if (mei_dmam_ring_alloc(dev)) - dev_info(dev->dev, "running w/o dma ring\n"); + dev_info(&dev->dev, "running w/o dma ring\n"); if (mei_dma_ring_is_allocated(dev)) { if (mei_hbm_dma_setup_req(dev)) return -EIO; @@ -1398,7 +1395,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) break; case MEI_HBM_DMA_SETUP_RES_CMD: - dev_dbg(dev->dev, "hbm: dma setup response: message received.\n"); + dev_dbg(&dev->dev, "hbm: dma setup response: message received.\n"); dev->init_clients_timer = 0; @@ -1406,10 +1403,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) dev->hbm_state != MEI_HBM_DR_SETUP) { if (dev->dev_state == MEI_DEV_POWER_DOWN || dev->dev_state == MEI_DEV_POWERING_DOWN) { - dev_dbg(dev->dev, "hbm: dma setup response: on shutdown, ignoring\n"); + dev_dbg(&dev->dev, "hbm: dma setup response: on shutdown, ignoring\n"); return 0; } - dev_err(dev->dev, "hbm: dma setup response: state mismatch, [%d, %d]\n", + dev_err(&dev->dev, "hbm: dma setup response: state mismatch, [%d, %d]\n", dev->dev_state, dev->hbm_state); return -EPROTO; } @@ -1420,9 +1417,9 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) u8 status = dma_setup_res->status; if (status == MEI_HBMS_NOT_ALLOWED) { - dev_dbg(dev->dev, "hbm: dma setup not allowed\n"); + dev_dbg(&dev->dev, "hbm: dma setup not allowed\n"); } else { - dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n", + dev_info(&dev->dev, "hbm: dma setup response: failure = %d %s\n", status, mei_hbm_status_str(status)); } @@ -1435,38 +1432,38 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) break; case CLIENT_CONNECT_RES_CMD: - dev_dbg(dev->dev, "hbm: client connect response: message received.\n"); + dev_dbg(&dev->dev, "hbm: client connect response: message received.\n"); mei_hbm_cl_res(dev, cl_cmd, MEI_FOP_CONNECT); break; case CLIENT_DISCONNECT_RES_CMD: - dev_dbg(dev->dev, "hbm: client disconnect response: message received.\n"); + dev_dbg(&dev->dev, "hbm: client disconnect response: message received.\n"); mei_hbm_cl_res(dev, cl_cmd, MEI_FOP_DISCONNECT); break; case MEI_FLOW_CONTROL_CMD: - dev_dbg(dev->dev, "hbm: client flow control response: message received.\n"); + dev_dbg(&dev->dev, "hbm: client flow control response: message received.\n"); fctrl = (struct hbm_flow_control *)mei_msg; mei_hbm_cl_tx_flow_ctrl_creds_res(dev, fctrl); break; case MEI_PG_ISOLATION_ENTRY_RES_CMD: - dev_dbg(dev->dev, "hbm: power gate isolation entry response received\n"); + dev_dbg(&dev->dev, "hbm: power gate isolation entry response received\n"); ret = mei_hbm_pg_enter_res(dev); if (ret) return ret; break; case MEI_PG_ISOLATION_EXIT_REQ_CMD: - dev_dbg(dev->dev, "hbm: power gate isolation exit request received\n"); + dev_dbg(&dev->dev, "hbm: power gate isolation exit request received\n"); ret = mei_hbm_pg_exit_res(dev); if (ret) return ret; break; case HOST_CLIENT_PROPERTIES_RES_CMD: - dev_dbg(dev->dev, "hbm: properties response: message received.\n"); + dev_dbg(&dev->dev, "hbm: properties response: message received.\n"); dev->init_clients_timer = 0; @@ -1474,10 +1471,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) { if (dev->dev_state == MEI_DEV_POWER_DOWN || dev->dev_state == MEI_DEV_POWERING_DOWN) { - dev_dbg(dev->dev, "hbm: properties response: on shutdown, ignoring\n"); + dev_dbg(&dev->dev, "hbm: properties response: on shutdown, ignoring\n"); return 0; } - dev_err(dev->dev, "hbm: properties response: state mismatch, [%d, %d]\n", + dev_err(&dev->dev, "hbm: properties response: state mismatch, [%d, %d]\n", dev->dev_state, dev->hbm_state); return -EPROTO; } @@ -1485,10 +1482,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) props_res = (struct hbm_props_response *)mei_msg; if (props_res->status == MEI_HBMS_CLIENT_NOT_FOUND) { - dev_dbg(dev->dev, "hbm: properties response: %d CLIENT_NOT_FOUND\n", + dev_dbg(&dev->dev, "hbm: properties response: %d CLIENT_NOT_FOUND\n", props_res->me_addr); } else if (props_res->status) { - dev_err(dev->dev, "hbm: properties response: wrong status = %d %s\n", + dev_err(&dev->dev, "hbm: properties response: wrong status = %d %s\n", props_res->status, mei_hbm_status_str(props_res->status)); return -EPROTO; @@ -1503,7 +1500,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) break; case HOST_ENUM_RES_CMD: - dev_dbg(dev->dev, "hbm: enumeration response: message received\n"); + dev_dbg(&dev->dev, "hbm: enumeration response: message received\n"); dev->init_clients_timer = 0; @@ -1517,10 +1514,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) dev->hbm_state != MEI_HBM_ENUM_CLIENTS) { if (dev->dev_state == MEI_DEV_POWER_DOWN || dev->dev_state == MEI_DEV_POWERING_DOWN) { - dev_dbg(dev->dev, "hbm: enumeration response: on shutdown, ignoring\n"); + dev_dbg(&dev->dev, "hbm: enumeration response: on shutdown, ignoring\n"); return 0; } - dev_err(dev->dev, "hbm: enumeration response: state mismatch, [%d, %d]\n", + dev_err(&dev->dev, "hbm: enumeration response: state mismatch, [%d, %d]\n", dev->dev_state, dev->hbm_state); return -EPROTO; } @@ -1534,77 +1531,77 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) break; case HOST_STOP_RES_CMD: - dev_dbg(dev->dev, "hbm: stop response: message received\n"); + dev_dbg(&dev->dev, "hbm: stop response: message received\n"); dev->init_clients_timer = 0; if (dev->hbm_state != MEI_HBM_STOPPED) { - dev_err(dev->dev, "hbm: stop response: state mismatch, [%d, %d]\n", + dev_err(&dev->dev, "hbm: stop response: state mismatch, [%d, %d]\n", dev->dev_state, dev->hbm_state); return -EPROTO; } mei_set_devstate(dev, MEI_DEV_POWER_DOWN); - dev_info(dev->dev, "hbm: stop response: resetting.\n"); + dev_info(&dev->dev, "hbm: stop response: resetting.\n"); /* force the reset */ return -EPROTO; case CLIENT_DISCONNECT_REQ_CMD: - dev_dbg(dev->dev, "hbm: disconnect request: message received\n"); + dev_dbg(&dev->dev, "hbm: disconnect request: message received\n"); disconnect_req = (struct hbm_client_connect_request *)mei_msg; mei_hbm_fw_disconnect_req(dev, disconnect_req); break; case ME_STOP_REQ_CMD: - dev_dbg(dev->dev, "hbm: stop request: message received\n"); + dev_dbg(&dev->dev, "hbm: stop request: message received\n"); dev->hbm_state = MEI_HBM_STOPPED; if (mei_hbm_stop_req(dev)) { - dev_err(dev->dev, "hbm: stop request: failed to send stop request\n"); + dev_err(&dev->dev, "hbm: stop request: failed to send stop request\n"); return -EIO; } break; case MEI_HBM_ADD_CLIENT_REQ_CMD: - dev_dbg(dev->dev, "hbm: add client request received\n"); + dev_dbg(&dev->dev, "hbm: add client request received\n"); /* * after the host receives the enum_resp * message clients may be added or removed */ if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS || dev->hbm_state >= MEI_HBM_STOPPED) { - dev_err(dev->dev, "hbm: add client: state mismatch, [%d, %d]\n", + dev_err(&dev->dev, "hbm: add client: state mismatch, [%d, %d]\n", dev->dev_state, dev->hbm_state); return -EPROTO; } add_cl_req = (struct hbm_add_client_request *)mei_msg; ret = mei_hbm_fw_add_cl_req(dev, add_cl_req); if (ret) { - dev_err(dev->dev, "hbm: add client: failed to send response %d\n", + dev_err(&dev->dev, "hbm: add client: failed to send response %d\n", ret); return -EIO; } - dev_dbg(dev->dev, "hbm: add client request processed\n"); + dev_dbg(&dev->dev, "hbm: add client request processed\n"); break; case MEI_HBM_NOTIFY_RES_CMD: - dev_dbg(dev->dev, "hbm: notify response received\n"); + dev_dbg(&dev->dev, "hbm: notify response received\n"); mei_hbm_cl_res(dev, cl_cmd, notify_res_to_fop(cl_cmd)); break; case MEI_HBM_NOTIFICATION_CMD: - dev_dbg(dev->dev, "hbm: notification\n"); + dev_dbg(&dev->dev, "hbm: notification\n"); mei_hbm_cl_notify(dev, cl_cmd); break; case MEI_HBM_CLIENT_DMA_MAP_RES_CMD: - dev_dbg(dev->dev, "hbm: client dma map response: message received.\n"); + dev_dbg(&dev->dev, "hbm: client dma map response: message received.\n"); client_dma_res = (struct hbm_client_dma_response *)mei_msg; mei_hbm_cl_dma_map_res(dev, client_dma_res); break; case MEI_HBM_CLIENT_DMA_UNMAP_RES_CMD: - dev_dbg(dev->dev, "hbm: client dma unmap response: message received.\n"); + dev_dbg(&dev->dev, "hbm: client dma unmap response: message received.\n"); client_dma_res = (struct hbm_client_dma_response *)mei_msg; mei_hbm_cl_dma_unmap_res(dev, client_dma_res); break; diff --git a/drivers/misc/mei/hdcp/Kconfig b/drivers/misc/mei/hdcp/Kconfig index 54e1c9526909..631dd9651d7c 100644 --- a/drivers/misc/mei/hdcp/Kconfig +++ b/drivers/misc/mei/hdcp/Kconfig @@ -3,8 +3,8 @@ # config INTEL_MEI_HDCP tristate "Intel HDCP2.2 services of ME Interface" - select INTEL_MEI_ME - depends on DRM_I915 + depends on INTEL_MEI_ME + depends on DRM_I915 || DRM_XE help MEI Support for HDCP2.2 Services on Intel platforms. diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c index 51359cc5ece9..323f10620d90 100644 --- a/drivers/misc/mei/hdcp/mei_hdcp.c +++ b/drivers/misc/mei/hdcp/mei_hdcp.c @@ -17,13 +17,14 @@ */ #include <linux/module.h> +#include <linux/pci.h> #include <linux/slab.h> #include <linux/mei.h> #include <linux/mei_cl_bus.h> #include <linux/component.h> #include <drm/drm_connector.h> -#include <drm/i915_component.h> -#include <drm/i915_hdcp_interface.h> +#include <drm/intel/i915_component.h> +#include <drm/intel/i915_hdcp_interface.h> #include "mei_hdcp.h" @@ -781,9 +782,18 @@ static int mei_hdcp_component_match(struct device *dev, int subcomponent, void *data) { struct device *base = data; + struct pci_dev *pdev; - if (!dev->driver || strcmp(dev->driver->name, "i915") || - subcomponent != I915_COMPONENT_HDCP) + if (!dev_is_pci(dev)) + return 0; + + pdev = to_pci_dev(dev); + + if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8) || + pdev->vendor != PCI_VENDOR_ID_INTEL) + return 0; + + if (subcomponent != I915_COMPONENT_HDCP) return 0; base = base->parent; diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index bdc65d50b945..a4f75dc36929 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -112,6 +112,15 @@ #define MEI_DEV_ID_RPL_S 0x7A68 /* Raptor Lake Point S */ #define MEI_DEV_ID_MTL_M 0x7E70 /* Meteor Lake Point M */ +#define MEI_DEV_ID_ARL_S 0x7F68 /* Arrow Lake Point S */ +#define MEI_DEV_ID_ARL_H 0x7770 /* Arrow Lake Point H */ + +#define MEI_DEV_ID_LNL_M 0xA870 /* Lunar Lake Point M */ + +#define MEI_DEV_ID_PTL_H 0xE370 /* Panther Lake H */ +#define MEI_DEV_ID_PTL_P 0xE470 /* Panther Lake P */ + +#define MEI_DEV_ID_WCL_P 0x4D70 /* Wildcat Lake P */ /* * MEI HW Section @@ -123,6 +132,9 @@ # define PCI_CFG_HFS_1_OPMODE_MSK 0xf0000 /* OP MODE Mask: SPS <= 4.0 */ # define PCI_CFG_HFS_1_OPMODE_SPS 0xf0000 /* SPS SKU : SPS <= 4.0 */ #define PCI_CFG_HFS_2 0x48 +# define PCI_CFG_HFS_2_PM_CMOFF_TO_CMX_ERROR 0x1000000 /* CMoff->CMx wake after an error */ +# define PCI_CFG_HFS_2_PM_CM_RESET_ERROR 0x5000000 /* CME reset due to exception */ +# define PCI_CFG_HFS_2_PM_EVENT_MASK 0xf000000 #define PCI_CFG_HFS_3 0x60 # define PCI_CFG_HFS_3_FW_SKU_MSK 0x00000070 # define PCI_CFG_HFS_3_FW_SKU_IGN 0x00000000 diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index da4ef0b51954..d4612c659784 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -84,7 +84,7 @@ static inline u32 mei_me_mecsr_read(const struct mei_device *dev) u32 reg; reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA); - trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg); + trace_mei_reg_read(&dev->dev, "ME_CSR_HA", ME_CSR_HA, reg); return reg; } @@ -101,7 +101,7 @@ static inline u32 mei_hcsr_read(const struct mei_device *dev) u32 reg; reg = mei_me_reg_read(to_me_hw(dev), H_CSR); - trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg); + trace_mei_reg_read(&dev->dev, "H_CSR", H_CSR, reg); return reg; } @@ -114,7 +114,7 @@ static inline u32 mei_hcsr_read(const struct mei_device *dev) */ static inline void mei_hcsr_write(struct mei_device *dev, u32 reg) { - trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg); + trace_mei_reg_write(&dev->dev, "H_CSR", H_CSR, reg); mei_me_reg_write(to_me_hw(dev), H_CSR, reg); } @@ -156,7 +156,7 @@ static inline u32 mei_me_d0i3c_read(const struct mei_device *dev) u32 reg; reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C); - trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg); + trace_mei_reg_read(&dev->dev, "H_D0I3C", H_D0I3C, reg); return reg; } @@ -169,7 +169,7 @@ static inline u32 mei_me_d0i3c_read(const struct mei_device *dev) */ static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg) { - trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg); + trace_mei_reg_write(&dev->dev, "H_D0I3C", H_D0I3C, reg); mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg); } @@ -189,7 +189,7 @@ static int mei_me_trc_status(struct mei_device *dev, u32 *trc) return -EOPNOTSUPP; *trc = mei_me_reg_read(hw, ME_TRC); - trace_mei_reg_read(dev->dev, "ME_TRC", ME_TRC, *trc); + trace_mei_reg_read(&dev->dev, "ME_TRC", ME_TRC, *trc); return 0; } @@ -217,7 +217,7 @@ static int mei_me_fw_status(struct mei_device *dev, for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) { ret = hw->read_fws(dev, fw_src->status[i], &fw_status->status[i]); - trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_X", + trace_mei_pci_cfg_read(&dev->dev, "PCI_CFG_HFS_X", fw_src->status[i], fw_status->status[i]); if (ret) @@ -251,7 +251,7 @@ static int mei_me_hw_config(struct mei_device *dev) reg = 0; hw->read_fws(dev, PCI_CFG_HFS_1, ®); - trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg); + trace_mei_pci_cfg_read(&dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg); hw->d0i3_supported = ((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK); @@ -443,16 +443,27 @@ static void mei_gsc_pxp_check(struct mei_device *dev) struct mei_me_hw *hw = to_me_hw(dev); u32 fwsts5 = 0; - if (dev->pxp_mode == MEI_DEV_PXP_DEFAULT) + if (!kind_is_gsc(dev) && !kind_is_gscfi(dev)) return; hw->read_fws(dev, PCI_CFG_HFS_5, &fwsts5); - trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_5", PCI_CFG_HFS_5, fwsts5); + trace_mei_pci_cfg_read(&dev->dev, "PCI_CFG_HFS_5", PCI_CFG_HFS_5, fwsts5); + if ((fwsts5 & GSC_CFG_HFS_5_BOOT_TYPE_MSK) == GSC_CFG_HFS_5_BOOT_TYPE_PXP) { - dev_dbg(dev->dev, "pxp mode is ready 0x%08x\n", fwsts5); + if (dev->gsc_reset_to_pxp == MEI_DEV_RESET_TO_PXP_DEFAULT) + dev->gsc_reset_to_pxp = MEI_DEV_RESET_TO_PXP_PERFORMED; + } else { + dev->gsc_reset_to_pxp = MEI_DEV_RESET_TO_PXP_DEFAULT; + } + + if (dev->pxp_mode == MEI_DEV_PXP_DEFAULT) + return; + + if ((fwsts5 & GSC_CFG_HFS_5_BOOT_TYPE_MSK) == GSC_CFG_HFS_5_BOOT_TYPE_PXP) { + dev_dbg(&dev->dev, "pxp mode is ready 0x%08x\n", fwsts5); dev->pxp_mode = MEI_DEV_PXP_READY; } else { - dev_dbg(dev->dev, "pxp mode is not ready 0x%08x\n", fwsts5); + dev_dbg(&dev->dev, "pxp mode is not ready 0x%08x\n", fwsts5); } } @@ -471,7 +482,7 @@ static int mei_me_hw_ready_wait(struct mei_device *dev) dev->timeouts.hw_ready); mutex_lock(&dev->device_lock); if (!dev->recvd_hw_ready) { - dev_err(dev->dev, "wait hw ready failed\n"); + dev_err(&dev->dev, "wait hw ready failed\n"); return -ETIME; } @@ -492,9 +503,12 @@ static int mei_me_hw_start(struct mei_device *dev) { int ret = mei_me_hw_ready_wait(dev); + if ((kind_is_gsc(dev) || kind_is_gscfi(dev)) && + dev->gsc_reset_to_pxp == MEI_DEV_RESET_TO_PXP_PERFORMED) + dev->gsc_reset_to_pxp = MEI_DEV_RESET_TO_PXP_DONE; if (ret) return ret; - dev_dbg(dev->dev, "hw is ready\n"); + dev_dbg(&dev->dev, "hw is ready\n"); mei_me_host_set_ready(dev); return ret; @@ -594,14 +608,14 @@ static int mei_me_hbuf_write(struct mei_device *dev, return -EINVAL; if (!data && data_len) { - dev_err(dev->dev, "wrong parameters null data with data_len = %zu\n", data_len); + dev_err(&dev->dev, "wrong parameters null data with data_len = %zu\n", data_len); return -EINVAL; } - dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr)); + dev_dbg(&dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr)); empty_slots = mei_hbuf_empty_slots(dev); - dev_dbg(dev->dev, "empty slots = %d.\n", empty_slots); + dev_dbg(&dev->dev, "empty slots = %d.\n", empty_slots); if (empty_slots < 0) return -EOVERFLOW; @@ -656,7 +670,7 @@ static int mei_me_count_full_read_slots(struct mei_device *dev) if (filled_slots > buffer_depth) return -EOVERFLOW; - dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots); + dev_dbg(&dev->dev, "filled_slots =%08x\n", filled_slots); return (int)filled_slots; } @@ -698,11 +712,11 @@ static void mei_me_pg_set(struct mei_device *dev) u32 reg; reg = mei_me_reg_read(hw, H_HPG_CSR); - trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); + trace_mei_reg_read(&dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); reg |= H_HPG_CSR_PGI; - trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); + trace_mei_reg_write(&dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); mei_me_reg_write(hw, H_HPG_CSR, reg); } @@ -717,13 +731,13 @@ static void mei_me_pg_unset(struct mei_device *dev) u32 reg; reg = mei_me_reg_read(hw, H_HPG_CSR); - trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); + trace_mei_reg_read(&dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n"); reg |= H_HPG_CSR_PGIHEXR; - trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); + trace_mei_reg_write(&dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); mei_me_reg_write(hw, H_HPG_CSR, reg); } @@ -855,7 +869,7 @@ static bool mei_me_pg_is_enabled(struct mei_device *dev) return true; notsupported: - dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n", + dev_dbg(&dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n", hw->d0i3_supported, !!(reg & ME_PGIC_HRA), dev->version.major_version, @@ -924,7 +938,7 @@ static int mei_me_d0i3_enter_sync(struct mei_device *dev) reg = mei_me_d0i3c_read(dev); if (reg & H_D0I3C_I3) { /* we are in d0i3, nothing to do */ - dev_dbg(dev->dev, "d0i3 set not needed\n"); + dev_dbg(&dev->dev, "d0i3 set not needed\n"); ret = 0; goto on; } @@ -953,7 +967,7 @@ static int mei_me_d0i3_enter_sync(struct mei_device *dev) reg = mei_me_d0i3_set(dev, true); if (!(reg & H_D0I3C_CIP)) { - dev_dbg(dev->dev, "d0i3 enter wait not needed\n"); + dev_dbg(&dev->dev, "d0i3 enter wait not needed\n"); ret = 0; goto on; } @@ -977,7 +991,7 @@ on: hw->pg_state = MEI_PG_ON; out: dev->pg_event = MEI_PG_EVENT_IDLE; - dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret); + dev_dbg(&dev->dev, "d0i3 enter ret = %d\n", ret); return ret; } @@ -999,7 +1013,7 @@ static int mei_me_d0i3_enter(struct mei_device *dev) reg = mei_me_d0i3c_read(dev); if (reg & H_D0I3C_I3) { /* we are in d0i3, nothing to do */ - dev_dbg(dev->dev, "already d0i3 : set not needed\n"); + dev_dbg(&dev->dev, "already d0i3 : set not needed\n"); goto on; } @@ -1007,7 +1021,7 @@ static int mei_me_d0i3_enter(struct mei_device *dev) on: hw->pg_state = MEI_PG_ON; dev->pg_event = MEI_PG_EVENT_IDLE; - dev_dbg(dev->dev, "d0i3 enter\n"); + dev_dbg(&dev->dev, "d0i3 enter\n"); return 0; } @@ -1029,14 +1043,14 @@ static int mei_me_d0i3_exit_sync(struct mei_device *dev) reg = mei_me_d0i3c_read(dev); if (!(reg & H_D0I3C_I3)) { /* we are not in d0i3, nothing to do */ - dev_dbg(dev->dev, "d0i3 exit not needed\n"); + dev_dbg(&dev->dev, "d0i3 exit not needed\n"); ret = 0; goto off; } reg = mei_me_d0i3_unset(dev); if (!(reg & H_D0I3C_CIP)) { - dev_dbg(dev->dev, "d0i3 exit wait not needed\n"); + dev_dbg(&dev->dev, "d0i3 exit wait not needed\n"); ret = 0; goto off; } @@ -1061,7 +1075,7 @@ off: out: dev->pg_event = MEI_PG_EVENT_IDLE; - dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret); + dev_dbg(&dev->dev, "d0i3 exit ret = %d\n", ret); return ret; } @@ -1104,7 +1118,7 @@ static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source) * force H_RDY because it could be * wiped off during PG */ - dev_dbg(dev->dev, "d0i3 set host ready\n"); + dev_dbg(&dev->dev, "d0i3 set host ready\n"); mei_me_host_set_ready(dev); } } else { @@ -1120,7 +1134,7 @@ static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source) * we got here because of HW initiated exit from D0i3. * Start runtime pm resume sequence to exit low power state. */ - dev_dbg(dev->dev, "d0i3 want resume\n"); + dev_dbg(&dev->dev, "d0i3 want resume\n"); mei_hbm_pg_resume(dev); } } @@ -1200,7 +1214,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) } } - pm_runtime_set_active(dev->dev); + pm_runtime_set_active(dev->parent); hcsr = mei_hcsr_read(dev); /* H_RST may be found lit before reset is started, @@ -1209,7 +1223,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) * we need to clean H_RST bit to start a successful reset sequence. */ if ((hcsr & H_RST) == H_RST) { - dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr); + dev_warn(&dev->dev, "H_RST is set = 0x%08X", hcsr); hcsr &= ~H_RST; mei_hcsr_set(dev, hcsr); hcsr = mei_hcsr_read(dev); @@ -1230,10 +1244,10 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) hcsr = mei_hcsr_read(dev); if ((hcsr & H_RST) == 0) - dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr); + dev_warn(&dev->dev, "H_RST is not set = 0x%08X", hcsr); if ((hcsr & H_RDY) == H_RDY) - dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr); + dev_warn(&dev->dev, "H_RDY is not cleared 0x%08X", hcsr); if (!intr_enable) { mei_me_hw_reset_release(dev); @@ -1263,7 +1277,7 @@ irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id) if (!me_intr_src(hcsr)) return IRQ_NONE; - dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr)); + dev_dbg(&dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr)); /* disable interrupts on device */ me_intr_disable(dev, hcsr); @@ -1289,7 +1303,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) u32 hcsr; int rets = 0; - dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n"); + dev_dbg(&dev->dev, "function called after ISR to handle the interrupt processing.\n"); /* initialize our complete list */ mutex_lock(&dev->device_lock); @@ -1300,8 +1314,13 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) /* check if ME wants a reset */ if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) { - dev_warn(dev->dev, "FW not ready: resetting: dev_state = %d pxp = %d\n", - dev->dev_state, dev->pxp_mode); + if (kind_is_gsc(dev) || kind_is_gscfi(dev)) { + dev_dbg(&dev->dev, "FW not ready: resetting: dev_state = %d\n", + dev->dev_state); + } else { + dev_warn(&dev->dev, "FW not ready: resetting: dev_state = %d\n", + dev->dev_state); + } if (dev->dev_state == MEI_DEV_POWERING_DOWN || dev->dev_state == MEI_DEV_POWER_DOWN) mei_cl_all_disconnect(dev); @@ -1318,18 +1337,29 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) /* check if we need to start the dev */ if (!mei_host_is_ready(dev)) { if (mei_hw_is_ready(dev)) { - dev_dbg(dev->dev, "we need to start the dev.\n"); - dev->recvd_hw_ready = true; - wake_up(&dev->wait_hw_ready); + /* synchronized by dev mutex */ + if (waitqueue_active(&dev->wait_hw_ready)) { + dev_dbg(&dev->dev, "we need to start the dev.\n"); + dev->recvd_hw_ready = true; + wake_up(&dev->wait_hw_ready); + } else if (dev->dev_state != MEI_DEV_UNINITIALIZED && + dev->dev_state != MEI_DEV_POWERING_DOWN && + dev->dev_state != MEI_DEV_POWER_DOWN) { + dev_dbg(&dev->dev, "Force link reset.\n"); + schedule_work(&dev->reset_work); + } else { + dev_dbg(&dev->dev, "Ignore this interrupt in state = %d\n", + dev->dev_state); + } } else { - dev_dbg(dev->dev, "Spurious Interrupt\n"); + dev_dbg(&dev->dev, "Spurious Interrupt\n"); } goto end; } /* check slots available for reading */ slots = mei_count_full_read_slots(dev); while (slots > 0) { - dev_dbg(dev->dev, "slots to read = %08x\n", slots); + dev_dbg(&dev->dev, "slots to read = %08x\n", slots); rets = mei_irq_read_handler(dev, &cmpl_list, &slots); /* There is a race between ME write and interrupt delivery: * Not all data is always available immediately after the @@ -1339,7 +1369,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) break; if (rets) { - dev_err(dev->dev, "mei_irq_read_handler ret = %d, state = %d.\n", + dev_err(&dev->dev, "mei_irq_read_handler ret = %d, state = %d.\n", rets, dev->dev_state); if (dev->dev_state != MEI_DEV_RESETTING && dev->dev_state != MEI_DEV_DISABLED && @@ -1366,7 +1396,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) mei_irq_compl_handler(dev, &cmpl_list); end: - dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets); + dev_dbg(&dev->dev, "interrupt thread end ret = %d\n", rets); mei_me_intr_enable(dev); mutex_unlock(&dev->device_lock); return IRQ_HANDLED; @@ -1379,6 +1409,8 @@ EXPORT_SYMBOL_GPL(mei_me_irq_thread_handler); /** * mei_me_polling_thread - interrupt register polling thread * + * @_dev: mei device + * * The thread monitors the interrupt source register and calls * mei_me_irq_thread_handler() to handle the firmware * input. @@ -1388,8 +1420,6 @@ EXPORT_SYMBOL_GPL(mei_me_irq_thread_handler); * time increases yet again by MEI_POLLING_TIMEOUT_ACTIVE * up to MEI_POLLING_TIMEOUT_IDLE. * - * @_dev: mei device - * * Return: always 0 */ int mei_me_polling_thread(void *_dev) @@ -1398,7 +1428,7 @@ int mei_me_polling_thread(void *_dev) irqreturn_t irq_ret; long polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE; - dev_dbg(dev->dev, "kernel thread is running\n"); + dev_dbg(&dev->dev, "kernel thread is running\n"); while (!kthread_should_stop()) { struct mei_me_hw *hw = to_me_hw(dev); u32 hcsr; @@ -1415,7 +1445,7 @@ int mei_me_polling_thread(void *_dev) polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE; irq_ret = mei_me_irq_thread_handler(1, dev); if (irq_ret != IRQ_HANDLED) - dev_err(dev->dev, "irq_ret %d\n", irq_ret); + dev_err(&dev->dev, "irq_ret %d\n", irq_ret); } else { /* * Increase timeout by MEI_POLLING_TIMEOUT_ACTIVE @@ -1468,12 +1498,12 @@ static const struct mei_hw_ops mei_me_hw_ops = { /** * mei_me_fw_type_nm() - check for nm sku * + * @pdev: pci device + * * Read ME FW Status register to check for the Node Manager (NM) Firmware. * The NM FW is only signaled in PCI function 0. * __Note__: Deprecated by PCH8 and newer. * - * @pdev: pci device - * * Return: true in case of NM firmware */ static bool mei_me_fw_type_nm(const struct pci_dev *pdev) @@ -1494,12 +1524,12 @@ static bool mei_me_fw_type_nm(const struct pci_dev *pdev) /** * mei_me_fw_type_sps_4() - check for sps 4.0 sku * + * @pdev: pci device + * * Read ME FW Status register to check for SPS Firmware. * The SPS FW is only signaled in the PCI function 0. * __Note__: Deprecated by SPS 5.0 and newer. * - * @pdev: pci device - * * Return: true in case of SPS firmware */ static bool mei_me_fw_type_sps_4(const struct pci_dev *pdev) @@ -1519,11 +1549,11 @@ static bool mei_me_fw_type_sps_4(const struct pci_dev *pdev) /** * mei_me_fw_type_sps_ign() - check for sps or ign sku * + * @pdev: pci device + * * Read ME FW Status register to check for SPS or IGN Firmware. * The SPS/IGN FW is only signaled in pci function 0 * - * @pdev: pci device - * * Return: true in case of SPS/IGN firmware */ static bool mei_me_fw_type_sps_ign(const struct pci_dev *pdev) @@ -1749,7 +1779,7 @@ struct mei_device *mei_me_dev_init(struct device *parent, struct mei_me_hw *hw; int i; - dev = devm_kzalloc(parent, sizeof(*dev) + sizeof(*hw), GFP_KERNEL); + dev = kzalloc(sizeof(*dev) + sizeof(*hw), GFP_KERNEL); if (!dev) return NULL; diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h index 95cf830b7c7b..204b92af6c47 100644 --- a/drivers/misc/mei/hw-me.h +++ b/drivers/misc/mei/hw-me.h @@ -102,10 +102,14 @@ static inline bool mei_me_hw_use_polling(const struct mei_me_hw *hw) * @MEI_ME_PCH12_SPS_CFG: Platform Controller Hub Gen12 5.0 and newer * servers platforms with quirk for * SPS firmware exclusion. + * @MEI_ME_PCH12_SPS_ITOUCH_CFG: Platform Controller Hub Gen12 + * client platforms (iTouch) * @MEI_ME_PCH15_CFG: Platform Controller Hub Gen15 and newer * @MEI_ME_PCH15_SPS_CFG: Platform Controller Hub Gen15 and newer * servers platforms with quirk for * SPS firmware exclusion. + * @MEI_ME_GSC_CFG: Graphics System Controller + * @MEI_ME_GSCFI_CFG: Graphics System Controller Firmware Interface * @MEI_ME_NUM_CFG: Upper Sentinel. */ enum mei_cfg_idx { diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c index 5d0f68b95c29..e4688c391027 100644 --- a/drivers/misc/mei/hw-txe.c +++ b/drivers/misc/mei/hw-txe.c @@ -160,7 +160,7 @@ static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req) struct mei_txe_hw *hw = to_txe_hw(dev); bool do_req = hw->aliveness != req; - dev_dbg(dev->dev, "Aliveness current=%d request=%d\n", + dev_dbg(&dev->dev, "Aliveness current=%d request=%d\n", hw->aliveness, req); if (do_req) { dev->pg_event = MEI_PG_EVENT_WAIT; @@ -227,7 +227,7 @@ static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected) hw->aliveness = mei_txe_aliveness_get(dev); if (hw->aliveness == expected) { dev->pg_event = MEI_PG_EVENT_IDLE; - dev_dbg(dev->dev, "aliveness settled after %lld usecs\n", + dev_dbg(&dev->dev, "aliveness settled after %lld usecs\n", ktime_to_us(ktime_sub(ktime_get(), start))); return 0; } @@ -235,7 +235,7 @@ static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected) } while (ktime_before(ktime_get(), stop)); dev->pg_event = MEI_PG_EVENT_IDLE; - dev_err(dev->dev, "aliveness timed out\n"); + dev_err(&dev->dev, "aliveness timed out\n"); return -ETIME; } @@ -270,10 +270,10 @@ static int mei_txe_aliveness_wait(struct mei_device *dev, u32 expected) ret = hw->aliveness == expected ? 0 : -ETIME; if (ret) - dev_warn(dev->dev, "aliveness timed out = %ld aliveness = %d event = %d\n", + dev_warn(&dev->dev, "aliveness timed out = %ld aliveness = %d event = %d\n", err, hw->aliveness, dev->pg_event); else - dev_dbg(dev->dev, "aliveness settled after = %d msec aliveness = %d event = %d\n", + dev_dbg(&dev->dev, "aliveness settled after = %d msec aliveness = %d event = %d\n", jiffies_to_msecs(timeout - err), hw->aliveness, dev->pg_event); @@ -438,7 +438,7 @@ static void mei_txe_intr_enable(struct mei_device *dev) */ static void mei_txe_synchronize_irq(struct mei_device *dev) { - struct pci_dev *pdev = to_pci_dev(dev->dev); + struct pci_dev *pdev = to_pci_dev(dev->parent); synchronize_irq(pdev->irq); } @@ -464,7 +464,7 @@ static bool mei_txe_pending_interrupts(struct mei_device *dev) TXE_INTR_OUT_DB)); if (ret) { - dev_dbg(dev->dev, + dev_dbg(&dev->dev, "Pending Interrupts InReady=%01d Readiness=%01d, Aliveness=%01d, OutDoor=%01d\n", !!(hw->intr_cause & TXE_INTR_IN_READY), !!(hw->intr_cause & TXE_INTR_READINESS), @@ -612,7 +612,7 @@ static int mei_txe_readiness_wait(struct mei_device *dev) msecs_to_jiffies(SEC_RESET_WAIT_TIMEOUT)); mutex_lock(&dev->device_lock); if (!dev->recvd_hw_ready) { - dev_err(dev->dev, "wait for readiness failed\n"); + dev_err(&dev->dev, "wait for readiness failed\n"); return -ETIME; } @@ -638,7 +638,7 @@ static int mei_txe_fw_status(struct mei_device *dev, struct mei_fw_status *fw_status) { const struct mei_fw_status *fw_src = &mei_txe_fw_sts; - struct pci_dev *pdev = to_pci_dev(dev->dev); + struct pci_dev *pdev = to_pci_dev(dev->parent); int ret; int i; @@ -649,7 +649,7 @@ static int mei_txe_fw_status(struct mei_device *dev, for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) { ret = pci_read_config_dword(pdev, fw_src->status[i], &fw_status->status[i]); - trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X", + trace_mei_pci_cfg_read(&dev->dev, "PCI_CFG_HSF_X", fw_src->status[i], fw_status->status[i]); if (ret) @@ -677,7 +677,7 @@ static int mei_txe_hw_config(struct mei_device *dev) hw->aliveness = mei_txe_aliveness_get(dev); hw->readiness = mei_txe_readiness_get(dev); - dev_dbg(dev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n", + dev_dbg(&dev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n", hw->aliveness, hw->readiness); return 0; @@ -708,7 +708,7 @@ static int mei_txe_write(struct mei_device *dev, if (WARN_ON(!hdr || !data || hdr_len & 0x3)) return -EINVAL; - dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr)); + dev_dbg(&dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr)); dw_cnt = mei_data2slots(hdr_len + data_len); if (dw_cnt > slots) @@ -724,7 +724,7 @@ static int mei_txe_write(struct mei_device *dev, char fw_sts_str[MEI_FW_STATUS_STR_SZ]; mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ); - dev_err(dev->dev, "Input is not ready %s\n", fw_sts_str); + dev_err(&dev->dev, "Input is not ready %s\n", fw_sts_str); return -EAGAIN; } @@ -828,13 +828,13 @@ static int mei_txe_read(struct mei_device *dev, reg_buf = (u32 *)buf; rem = len & 0x3; - dev_dbg(dev->dev, "buffer-length = %lu buf[0]0x%08X\n", + dev_dbg(&dev->dev, "buffer-length = %lu buf[0]0x%08X\n", len, mei_txe_out_data_read(dev, 0)); for (i = 0; i < len / MEI_SLOT_SIZE; i++) { /* skip header: index starts from 1 */ reg = mei_txe_out_data_read(dev, i + 1); - dev_dbg(dev->dev, "buf[%d] = 0x%08X\n", i, reg); + dev_dbg(&dev->dev, "buf[%d] = 0x%08X\n", i, reg); *reg_buf++ = reg; } @@ -879,7 +879,7 @@ static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable) */ if (aliveness_req != hw->aliveness) if (mei_txe_aliveness_poll(dev, aliveness_req) < 0) { - dev_err(dev->dev, "wait for aliveness settle failed ... bailing out\n"); + dev_err(&dev->dev, "wait for aliveness settle failed ... bailing out\n"); return -EIO; } @@ -889,7 +889,7 @@ static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable) if (aliveness_req) { mei_txe_aliveness_set(dev, 0); if (mei_txe_aliveness_poll(dev, 0) < 0) { - dev_err(dev->dev, "wait for aliveness failed ... bailing out\n"); + dev_err(&dev->dev, "wait for aliveness failed ... bailing out\n"); return -EIO; } } @@ -921,7 +921,7 @@ static int mei_txe_hw_start(struct mei_device *dev) ret = mei_txe_readiness_wait(dev); if (ret < 0) { - dev_err(dev->dev, "waiting for readiness failed\n"); + dev_err(&dev->dev, "waiting for readiness failed\n"); return ret; } @@ -937,11 +937,11 @@ static int mei_txe_hw_start(struct mei_device *dev) ret = mei_txe_aliveness_set_sync(dev, 1); if (ret < 0) { - dev_err(dev->dev, "wait for aliveness failed ... bailing out\n"); + dev_err(&dev->dev, "wait for aliveness failed ... bailing out\n"); return ret; } - pm_runtime_set_active(dev->dev); + pm_runtime_set_active(dev->parent); /* enable input ready interrupts: * SEC_IPC_HOST_INT_MASK.IPC_INPUT_READY_INT_MASK @@ -1049,7 +1049,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id) s32 slots; int rets = 0; - dev_dbg(dev->dev, "irq thread: Interrupt Registers HHISR|HISR|SEC=%02X|%04X|%02X\n", + dev_dbg(&dev->dev, "irq thread: Interrupt Registers HHISR|HISR|SEC=%02X|%04X|%02X\n", mei_txe_br_reg_read(hw, HHISR_REG), mei_txe_br_reg_read(hw, HISR_REG), mei_txe_sec_reg_read_silent(hw, SEC_IPC_HOST_INT_STATUS_REG)); @@ -1059,7 +1059,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id) mutex_lock(&dev->device_lock); INIT_LIST_HEAD(&cmpl_list); - if (pci_dev_msi_enabled(to_pci_dev(dev->dev))) + if (pci_dev_msi_enabled(to_pci_dev(dev->parent))) mei_txe_check_and_ack_intrs(dev, true); /* show irq events */ @@ -1073,17 +1073,17 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id) * or TXE driver resetting the HECI interface. */ if (test_and_clear_bit(TXE_INTR_READINESS_BIT, &hw->intr_cause)) { - dev_dbg(dev->dev, "Readiness Interrupt was received...\n"); + dev_dbg(&dev->dev, "Readiness Interrupt was received...\n"); /* Check if SeC is going through reset */ if (mei_txe_readiness_is_sec_rdy(hw->readiness)) { - dev_dbg(dev->dev, "we need to start the dev.\n"); + dev_dbg(&dev->dev, "we need to start the dev.\n"); dev->recvd_hw_ready = true; } else { dev->recvd_hw_ready = false; if (dev->dev_state != MEI_DEV_RESETTING) { - dev_warn(dev->dev, "FW not ready: resetting.\n"); + dev_warn(&dev->dev, "FW not ready: resetting.\n"); schedule_work(&dev->reset_work); goto end; @@ -1100,7 +1100,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id) if (test_and_clear_bit(TXE_INTR_ALIVENESS_BIT, &hw->intr_cause)) { /* Clear the interrupt cause */ - dev_dbg(dev->dev, + dev_dbg(&dev->dev, "Aliveness Interrupt: Status: %d\n", hw->aliveness); dev->pg_event = MEI_PG_EVENT_RECEIVED; if (waitqueue_active(&hw->wait_aliveness_resp)) @@ -1118,7 +1118,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id) if (rets && (dev->dev_state != MEI_DEV_RESETTING && dev->dev_state != MEI_DEV_POWER_DOWN)) { - dev_err(dev->dev, + dev_err(&dev->dev, "mei_irq_read_handler ret = %d.\n", rets); schedule_work(&dev->reset_work); @@ -1136,7 +1136,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id) dev->hbuf_is_ready = mei_hbuf_is_ready(dev); rets = mei_irq_write_handler(dev, &cmpl_list); if (rets && rets != -EMSGSIZE) - dev_err(dev->dev, "mei_irq_write_handler ret = %d.\n", + dev_err(&dev->dev, "mei_irq_write_handler ret = %d.\n", rets); dev->hbuf_is_ready = mei_hbuf_is_ready(dev); } @@ -1144,7 +1144,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id) mei_irq_compl_handler(dev, &cmpl_list); end: - dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets); + dev_dbg(&dev->dev, "interrupt thread end ret = %d\n", rets); mutex_unlock(&dev->device_lock); @@ -1197,7 +1197,7 @@ struct mei_device *mei_txe_dev_init(struct pci_dev *pdev) struct mei_device *dev; struct mei_txe_hw *hw; - dev = devm_kzalloc(&pdev->dev, sizeof(*dev) + sizeof(*hw), GFP_KERNEL); + dev = kzalloc(sizeof(*dev) + sizeof(*hw), GFP_KERNEL); if (!dev) return NULL; @@ -1209,48 +1209,3 @@ struct mei_device *mei_txe_dev_init(struct pci_dev *pdev) return dev; } - -/** - * mei_txe_setup_satt2 - SATT2 configuration for DMA support. - * - * @dev: the device structure - * @addr: physical address start of the range - * @range: physical range size - * - * Return: 0 on success an error code otherwise - */ -int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range) -{ - struct mei_txe_hw *hw = to_txe_hw(dev); - - u32 lo32 = lower_32_bits(addr); - u32 hi32 = upper_32_bits(addr); - u32 ctrl; - - /* SATT is limited to 36 Bits */ - if (hi32 & ~0xF) - return -EINVAL; - - /* SATT has to be 16Byte aligned */ - if (lo32 & 0xF) - return -EINVAL; - - /* SATT range has to be 4Bytes aligned */ - if (range & 0x4) - return -EINVAL; - - /* SATT is limited to 32 MB range*/ - if (range > SATT_RANGE_MAX) - return -EINVAL; - - ctrl = SATT2_CTRL_VALID_MSK; - ctrl |= hi32 << SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT; - - mei_txe_br_reg_write(hw, SATT2_SAP_SIZE_REG, range); - mei_txe_br_reg_write(hw, SATT2_BRG_BA_LSB_REG, lo32); - mei_txe_br_reg_write(hw, SATT2_CTRL_REG, ctrl); - dev_dbg(dev->dev, "SATT2: SAP_SIZE_OFFSET=0x%08X, BRG_BA_LSB_OFFSET=0x%08X, CTRL_OFFSET=0x%08X\n", - range, lo32, ctrl); - - return 0; -} diff --git a/drivers/misc/mei/hw-txe.h b/drivers/misc/mei/hw-txe.h index 96511b04bf88..6790e646895d 100644 --- a/drivers/misc/mei/hw-txe.h +++ b/drivers/misc/mei/hw-txe.h @@ -59,7 +59,5 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id); int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req); -int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range); - #endif /* _MEI_HW_TXE_H_ */ diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index e910302fcd1f..3771aa09c592 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h @@ -27,6 +27,8 @@ #define MKHI_RCV_TIMEOUT 500 /* receive timeout in msec */ #define MKHI_RCV_TIMEOUT_SLOW 10000 /* receive timeout in msec, slow FW */ +#define MEI_LINK_RESET_WAIT_TIMEOUT_MSEC 500 /* Max wait timeout for link reset, in msec */ + /* * FW page size for DMA allocations */ @@ -247,12 +249,10 @@ enum mei_ext_hdr_type { * struct mei_ext_hdr - extend header descriptor (TLV) * @type: enum mei_ext_hdr_type * @length: length excluding descriptor - * @data: the extended header payload */ struct mei_ext_hdr { u8 type; u8 length; - u8 data[]; } __packed; /** @@ -429,7 +429,7 @@ struct mei_bus_message { } __packed; /** - * struct hbm_cl_cmd - client specific host bus command + * struct mei_hbm_cl_cmd - client specific host bus command * CONNECT, DISCONNECT, and FlOW CONTROL * * @hbm_cmd: bus message command header @@ -733,7 +733,7 @@ struct hbm_dma_setup_response { } __packed; /** - * struct mei_dma_ring_ctrl - dma ring control block + * struct hbm_dma_ring_ctrl - dma ring control block * * @hbuf_wr_idx: host circular buffer write index in slots * @reserved1: reserved for alignment @@ -806,8 +806,8 @@ struct hbm_client_dma_map_request { } __packed; /** - * struct hbm_client_dma_unmap_request - * client dma unmap request from the host to the firmware + * struct hbm_client_dma_unmap_request - client dma unmap request + * from the host to the firmware * * @hbm_cmd: bus message command header * @status: unmap status @@ -822,8 +822,8 @@ struct hbm_client_dma_unmap_request { } __packed; /** - * struct hbm_client_dma_response - * client dma unmap response from the firmware to the host + * struct hbm_client_dma_response - client dma unmap response + * from the firmware to the host * * @hbm_cmd: bus message command header * @status: command status diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index bac8852aad51..b789c4d9c709 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c @@ -109,8 +109,13 @@ int mei_reset(struct mei_device *dev) char fw_sts_str[MEI_FW_STATUS_STR_SZ]; mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ); - dev_warn(dev->dev, "unexpected reset: dev_state = %s fw status = %s\n", - mei_dev_state_str(state), fw_sts_str); + if (kind_is_gsc(dev) || kind_is_gscfi(dev)) { + dev_dbg(&dev->dev, "unexpected reset: dev_state = %s fw status = %s\n", + mei_dev_state_str(state), fw_sts_str); + } else { + dev_warn(&dev->dev, "unexpected reset: dev_state = %s fw status = %s\n", + mei_dev_state_str(state), fw_sts_str); + } } mei_clear_interrupts(dev); @@ -128,7 +133,7 @@ int mei_reset(struct mei_device *dev) dev->reset_count++; if (dev->reset_count > MEI_MAX_CONSEC_RESET) { - dev_err(dev->dev, "reset: reached maximal consecutive resets: disabling the device\n"); + dev_err(&dev->dev, "reset: reached maximal consecutive resets: disabling the device\n"); mei_set_devstate(dev, MEI_DEV_DISABLED); return -ENODEV; } @@ -142,36 +147,42 @@ int mei_reset(struct mei_device *dev) mei_hbm_reset(dev); + /* clean stale FW version */ + dev->fw_ver_received = 0; + memset(dev->rd_msg_hdr, 0, sizeof(dev->rd_msg_hdr)); if (ret) { - dev_err(dev->dev, "hw_reset failed ret = %d\n", ret); + dev_err(&dev->dev, "hw_reset failed ret = %d\n", ret); return ret; } if (state == MEI_DEV_POWER_DOWN) { - dev_dbg(dev->dev, "powering down: end of reset\n"); + dev_dbg(&dev->dev, "powering down: end of reset\n"); mei_set_devstate(dev, MEI_DEV_DISABLED); return 0; } ret = mei_hw_start(dev); if (ret) { - dev_err(dev->dev, "hw_start failed ret = %d\n", ret); + char fw_sts_str[MEI_FW_STATUS_STR_SZ]; + + mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ); + dev_err(&dev->dev, "hw_start failed ret = %d fw status = %s\n", ret, fw_sts_str); return ret; } if (dev->dev_state != MEI_DEV_RESETTING) { - dev_dbg(dev->dev, "wrong state = %d on link start\n", dev->dev_state); + dev_dbg(&dev->dev, "wrong state = %d on link start\n", dev->dev_state); return 0; } - dev_dbg(dev->dev, "link is established start sending messages.\n"); + dev_dbg(&dev->dev, "link is established start sending messages.\n"); mei_set_devstate(dev, MEI_DEV_INIT_CLIENTS); ret = mei_hbm_start_req(dev); if (ret) { - dev_err(dev->dev, "hbm_start failed ret = %d\n", ret); + dev_err(&dev->dev, "hbm_start failed ret = %d\n", ret); mei_set_devstate(dev, MEI_DEV_RESETTING); return ret; } @@ -200,7 +211,7 @@ int mei_start(struct mei_device *dev) if (ret) goto err; - dev_dbg(dev->dev, "reset in start the mei device.\n"); + dev_dbg(&dev->dev, "reset in start the mei device.\n"); dev->reset_count = 0; do { @@ -208,27 +219,27 @@ int mei_start(struct mei_device *dev) ret = mei_reset(dev); if (ret == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) { - dev_err(dev->dev, "reset failed ret = %d", ret); + dev_err(&dev->dev, "reset failed ret = %d", ret); goto err; } } while (ret); if (mei_hbm_start_wait(dev)) { - dev_err(dev->dev, "HBM haven't started"); + dev_err(&dev->dev, "HBM haven't started"); goto err; } if (!mei_hbm_version_is_supported(dev)) { - dev_dbg(dev->dev, "MEI start failed.\n"); + dev_dbg(&dev->dev, "MEI start failed.\n"); goto err; } - dev_dbg(dev->dev, "link layer has been established.\n"); + dev_dbg(&dev->dev, "link layer has been established.\n"); mutex_unlock(&dev->device_lock); return 0; err: - dev_err(dev->dev, "link layer initialization failed.\n"); + dev_err(&dev->dev, "link layer initialization failed.\n"); mei_set_devstate(dev, MEI_DEV_DISABLED); mutex_unlock(&dev->device_lock); return -ENODEV; @@ -256,7 +267,7 @@ int mei_restart(struct mei_device *dev) mutex_unlock(&dev->device_lock); if (err == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) { - dev_err(dev->dev, "device disabled = %d\n", err); + dev_err(&dev->dev, "device disabled = %d\n", err); return -ENODEV; } @@ -285,7 +296,7 @@ static void mei_reset_work(struct work_struct *work) mutex_unlock(&dev->device_lock); if (dev->dev_state == MEI_DEV_DISABLED) { - dev_err(dev->dev, "device disabled = %d\n", ret); + dev_err(&dev->dev, "device disabled = %d\n", ret); return; } @@ -296,7 +307,7 @@ static void mei_reset_work(struct work_struct *work) void mei_stop(struct mei_device *dev) { - dev_dbg(dev->dev, "stopping the device.\n"); + dev_dbg(&dev->dev, "stopping the device.\n"); mutex_lock(&dev->device_lock); mei_set_devstate(dev, MEI_DEV_POWERING_DOWN); @@ -337,7 +348,7 @@ bool mei_write_is_idle(struct mei_device *dev) list_empty(&dev->write_list) && list_empty(&dev->write_waiting_list)); - dev_dbg(dev->dev, "write pg: is idle[%d] state=%s ctrl=%01d write=%01d wwait=%01d\n", + dev_dbg(&dev->dev, "write pg: is idle[%d] state=%s ctrl=%01d write=%01d wwait=%01d\n", idle, mei_dev_state_str(dev->dev_state), list_empty(&dev->ctrl_wr_list), @@ -352,12 +363,12 @@ EXPORT_SYMBOL_GPL(mei_write_is_idle); * mei_device_init - initialize mei_device structure * * @dev: the mei device - * @device: the device structure + * @parent: the parent device * @slow_fw: configure longer timeouts as FW is slow * @hw_ops: hw operations */ void mei_device_init(struct mei_device *dev, - struct device *device, + struct device *parent, bool slow_fw, const struct mei_hw_ops *hw_ops) { @@ -371,7 +382,8 @@ void mei_device_init(struct mei_device *dev, init_waitqueue_head(&dev->wait_hw_ready); init_waitqueue_head(&dev->wait_pg); init_waitqueue_head(&dev->wait_hbm_start); - dev->dev_state = MEI_DEV_INITIALIZING; + dev->dev_state = MEI_DEV_UNINITIALIZED; + init_waitqueue_head(&dev->wait_dev_state); dev->reset_count = 0; INIT_LIST_HEAD(&dev->write_list); @@ -388,6 +400,7 @@ void mei_device_init(struct mei_device *dev, dev->open_handle_count = 0; dev->pxp_mode = MEI_DEV_PXP_DEFAULT; + dev->gsc_reset_to_pxp = MEI_DEV_RESET_TO_PXP_DEFAULT; /* * Reserving the first client ID @@ -397,7 +410,7 @@ void mei_device_init(struct mei_device *dev, dev->pg_event = MEI_PG_EVENT_IDLE; dev->ops = hw_ops; - dev->dev = device; + dev->parent = parent; dev->timeouts.hw_ready = mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT); dev->timeouts.connect = MEI_CONNECT_TIMEOUT; @@ -413,6 +426,6 @@ void mei_device_init(struct mei_device *dev, dev->timeouts.hbm = mei_secs_to_jiffies(MEI_HBM_TIMEOUT); dev->timeouts.mkhi_recv = msecs_to_jiffies(MKHI_RCV_TIMEOUT); } + dev->timeouts.link_reset_wait = msecs_to_jiffies(MEI_LINK_RESET_WAIT_TIMEOUT_MSEC); } EXPORT_SYMBOL_GPL(mei_device_init); - diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index 0a0e984e5673..3f210413fd32 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c @@ -35,7 +35,7 @@ void mei_irq_compl_handler(struct mei_device *dev, struct list_head *cmpl_list) cl = cb->cl; list_del_init(&cb->list); - dev_dbg(dev->dev, "completing call back.\n"); + cl_dbg(dev, cl, "completing call back.\n"); mei_cl_complete(cl, cb); } } @@ -72,11 +72,11 @@ static void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr, discard_len = 0; } /* - * no need to check for size as it is guarantied + * no need to check for size as it is guaranteed * that length fits into rd_msg_buf */ mei_read_slots(dev, dev->rd_msg_buf, discard_len); - dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n", + dev_dbg(&dev->dev, "discarding message " MEI_HDR_FMT "\n", MEI_HDR_PRM(hdr)); } @@ -133,7 +133,7 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl, break; case MEI_EXT_HDR_GSC: gsc_f2h = (struct mei_ext_hdr_gsc_f2h *)ext; - cb->ext_hdr = kzalloc(sizeof(*gsc_f2h), GFP_KERNEL); + cb->ext_hdr = (struct mei_ext_hdr *)kzalloc(sizeof(*gsc_f2h), GFP_KERNEL); if (!cb->ext_hdr) { cb->status = -ENOMEM; goto discard; @@ -229,8 +229,7 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl, cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx); list_move_tail(&cb->list, cmpl_list); } else { - pm_runtime_mark_last_busy(dev->dev); - pm_request_autosuspend(dev->dev); + pm_request_autosuspend(dev->parent); } return 0; @@ -310,8 +309,7 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, return ret; } - pm_runtime_mark_last_busy(dev->dev); - pm_request_autosuspend(dev->dev); + pm_request_autosuspend(dev->parent); list_move_tail(&cb->list, &cl->rd_pending); @@ -373,21 +371,21 @@ int mei_irq_read_handler(struct mei_device *dev, dev->rd_msg_hdr[0] = mei_read_hdr(dev); dev->rd_msg_hdr_count = 1; (*slots)--; - dev_dbg(dev->dev, "slots =%08x.\n", *slots); + dev_dbg(&dev->dev, "slots =%08x.\n", *slots); ret = hdr_is_valid(dev->rd_msg_hdr[0]); if (ret) { - dev_err(dev->dev, "corrupted message header 0x%08X\n", + dev_err(&dev->dev, "corrupted message header 0x%08X\n", dev->rd_msg_hdr[0]); goto end; } } mei_hdr = (struct mei_msg_hdr *)dev->rd_msg_hdr; - dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); + dev_dbg(&dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); if (mei_slots2data(*slots) < mei_hdr->length) { - dev_err(dev->dev, "less data available than length=%08x.\n", + dev_err(&dev->dev, "less data available than length=%08x.\n", *slots); /* we can't read the message */ ret = -ENODATA; @@ -402,18 +400,18 @@ int mei_irq_read_handler(struct mei_device *dev, dev->rd_msg_hdr[1] = mei_read_hdr(dev); dev->rd_msg_hdr_count++; (*slots)--; - dev_dbg(dev->dev, "extended header is %08x\n", dev->rd_msg_hdr[1]); + dev_dbg(&dev->dev, "extended header is %08x\n", dev->rd_msg_hdr[1]); } meta_hdr = ((struct mei_ext_meta_hdr *)&dev->rd_msg_hdr[1]); if (check_add_overflow((u32)sizeof(*meta_hdr), mei_slots2data(meta_hdr->size), &hdr_size_ext)) { - dev_err(dev->dev, "extended message size too big %d\n", + dev_err(&dev->dev, "extended message size too big %d\n", meta_hdr->size); return -EBADMSG; } if (hdr_size_left < hdr_size_ext) { - dev_err(dev->dev, "corrupted message header len %d\n", + dev_err(&dev->dev, "corrupted message header len %d\n", mei_hdr->length); return -EBADMSG; } @@ -422,7 +420,7 @@ int mei_irq_read_handler(struct mei_device *dev, ext_hdr_end = meta_hdr->size + 2; for (i = dev->rd_msg_hdr_count; i < ext_hdr_end; i++) { dev->rd_msg_hdr[i] = mei_read_hdr(dev); - dev_dbg(dev->dev, "extended header %d is %08x\n", i, + dev_dbg(&dev->dev, "extended header %d is %08x\n", i, dev->rd_msg_hdr[i]); dev->rd_msg_hdr_count++; (*slots)--; @@ -431,7 +429,7 @@ int mei_irq_read_handler(struct mei_device *dev, if (mei_hdr->dma_ring) { if (hdr_size_left != sizeof(dev->rd_msg_hdr[ext_hdr_end])) { - dev_err(dev->dev, "corrupted message header len %d\n", + dev_err(&dev->dev, "corrupted message header len %d\n", mei_hdr->length); return -EBADMSG; } @@ -446,8 +444,7 @@ int mei_irq_read_handler(struct mei_device *dev, if (hdr_is_hbm(mei_hdr)) { ret = mei_hbm_dispatch(dev, mei_hdr); if (ret) { - dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n", - ret); + dev_dbg(&dev->dev, "mei_hbm_dispatch failed ret = %d\n", ret); goto end; } goto reset_slots; @@ -474,7 +471,7 @@ int mei_irq_read_handler(struct mei_device *dev, ret = 0; goto reset_slots; } - dev_err(dev->dev, "no destination client found 0x%08X\n", dev->rd_msg_hdr[0]); + dev_err(&dev->dev, "no destination client found 0x%08X\n", dev->rd_msg_hdr[0]); ret = -EBADMSG; goto end; @@ -485,7 +482,7 @@ reset_slots: *slots = mei_count_full_read_slots(dev); if (*slots == -EOVERFLOW) { /* overflow - reset */ - dev_err(dev->dev, "resetting due to slots overflow.\n"); + dev_err(&dev->dev, "resetting due to slots overflow.\n"); /* set the event since message has been read */ ret = -ERANGE; goto end; @@ -525,7 +522,7 @@ int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list) return -EMSGSIZE; /* complete all waiting for write CB */ - dev_dbg(dev->dev, "complete all waiting for write cb.\n"); + dev_dbg(&dev->dev, "complete all waiting for write cb.\n"); list_for_each_entry_safe(cb, next, &dev->write_waiting_list, list) { cl = cb->cl; @@ -537,7 +534,7 @@ int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list) } /* complete control write list CB */ - dev_dbg(dev->dev, "complete control write list cb.\n"); + dev_dbg(&dev->dev, "complete control write list cb.\n"); list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list, list) { cl = cb->cl; switch (cb->fop_type) { @@ -591,7 +588,7 @@ int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list) } /* complete write list CB */ - dev_dbg(dev->dev, "complete write list cb.\n"); + dev_dbg(&dev->dev, "complete write list cb.\n"); list_for_each_entry_safe(cb, next, &dev->write_list, list) { cl = cb->cl; ret = mei_cl_irq_write(cl, cb, cmpl_list); @@ -626,9 +623,9 @@ static void mei_connect_timeout(struct mei_cl *cl) /** * mei_schedule_stall_timer - re-arm stall_timer work * - * Schedule stall timer - * * @dev: the device structure + * + * Schedule stall timer */ void mei_schedule_stall_timer(struct mei_device *dev) { @@ -656,7 +653,7 @@ void mei_timer(struct work_struct *work) if (dev->init_clients_timer) { if (--dev->init_clients_timer == 0) { - dev_err(dev->dev, "timer: init clients timeout hbm_state = %d.\n", + dev_err(&dev->dev, "timer: init clients timeout hbm_state = %d.\n", dev->hbm_state); mei_reset(dev); goto out; @@ -672,7 +669,7 @@ void mei_timer(struct work_struct *work) list_for_each_entry(cl, &dev->file_list, link) { if (cl->timer_count) { if (--cl->timer_count == 0) { - dev_err(dev->dev, "timer: connect/disconnect timeout.\n"); + dev_err(&dev->dev, "timer: connect/disconnect timeout.\n"); mei_connect_timeout(cl); goto out; } diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 51876da3fd65..6f26d5160788 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -27,7 +27,10 @@ #include "mei_dev.h" #include "client.h" -static struct class *mei_class; +static const struct class mei_class = { + .name = "mei", +}; + static dev_t mei_devt; #define MEI_MAX_DEVS MINORMASK static DEFINE_MUTEX(mei_minor_lock); @@ -48,12 +51,15 @@ static int mei_open(struct inode *inode, struct file *file) int err; - dev = container_of(inode->i_cdev, struct mei_device, cdev); + dev = idr_find(&mei_idr, iminor(inode)); + if (!dev) + return -ENODEV; + get_device(&dev->dev); mutex_lock(&dev->device_lock); if (dev->dev_state != MEI_DEV_ENABLED) { - dev_dbg(dev->dev, "dev_state != MEI_ENABLED dev_state = %s\n", + dev_dbg(&dev->dev, "dev_state != MEI_ENABLED dev_state = %s\n", mei_dev_state_str(dev->dev_state)); err = -ENODEV; goto err_unlock; @@ -74,6 +80,7 @@ static int mei_open(struct inode *inode, struct file *file) err_unlock: mutex_unlock(&dev->device_lock); + put_device(&dev->dev); return err; } @@ -149,6 +156,7 @@ out: file->private_data = NULL; mutex_unlock(&dev->device_lock); + put_device(&dev->dev); return rets; } @@ -253,7 +261,7 @@ copy_buffer: length = min_t(size_t, length, cb->buf_idx - *offset); if (copy_to_user(ubuf, cb->buf.data + *offset, length)) { - dev_dbg(dev->dev, "failed to copy data to userland\n"); + cl_dbg(dev, cl, "failed to copy data to userland\n"); rets = -EFAULT; goto free; } @@ -326,7 +334,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, } if (!mei_cl_is_connected(cl)) { - cl_err(dev, cl, "is not connected"); + cl_dbg(dev, cl, "is not connected"); rets = -ENODEV; goto out; } @@ -376,7 +384,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, rets = copy_from_user(cb->buf.data, ubuf, length); if (rets) { - dev_dbg(dev->dev, "failed to copy data from userland\n"); + cl_dbg(dev, cl, "failed to copy data from userland\n"); rets = -EFAULT; mei_io_cb_free(cb); goto out; @@ -415,10 +423,11 @@ static int mei_ioctl_connect_client(struct file *file, cl->state != MEI_FILE_DISCONNECTED) return -EBUSY; +retry: /* find ME client we're trying to connect to */ me_cl = mei_me_cl_by_uuid(dev, in_client_uuid); if (!me_cl) { - dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n", + cl_dbg(dev, cl, "Cannot connect to FW Client UUID = %pUl\n", in_client_uuid); rets = -ENOTTY; goto end; @@ -428,27 +437,46 @@ static int mei_ioctl_connect_client(struct file *file, bool forbidden = dev->override_fixed_address ? !dev->allow_fixed_address : !dev->hbm_f_fa_supported; if (forbidden) { - dev_dbg(dev->dev, "Connection forbidden to FW Client UUID = %pUl\n", + cl_dbg(dev, cl, "Connection forbidden to FW Client UUID = %pUl\n", in_client_uuid); rets = -ENOTTY; goto end; } } - dev_dbg(dev->dev, "Connect to FW Client ID = %d\n", - me_cl->client_id); - dev_dbg(dev->dev, "FW Client - Protocol Version = %d\n", - me_cl->props.protocol_version); - dev_dbg(dev->dev, "FW Client - Max Msg Len = %d\n", - me_cl->props.max_msg_length); + cl_dbg(dev, cl, "Connect to FW Client ID = %d\n", me_cl->client_id); + cl_dbg(dev, cl, "FW Client - Protocol Version = %d\n", me_cl->props.protocol_version); + cl_dbg(dev, cl, "FW Client - Max Msg Len = %d\n", me_cl->props.max_msg_length); /* prepare the output buffer */ client->max_msg_length = me_cl->props.max_msg_length; client->protocol_version = me_cl->props.protocol_version; - dev_dbg(dev->dev, "Can connect?\n"); + cl_dbg(dev, cl, "Can connect?\n"); rets = mei_cl_connect(cl, me_cl, file); + if (rets && cl->status == -EFAULT && + (dev->dev_state == MEI_DEV_RESETTING || + dev->dev_state == MEI_DEV_INIT_CLIENTS)) { + /* in link reset, wait for it completion */ + mutex_unlock(&dev->device_lock); + rets = wait_event_interruptible_timeout(dev->wait_dev_state, + dev->dev_state == MEI_DEV_ENABLED, + dev->timeouts.link_reset_wait); + mutex_lock(&dev->device_lock); + if (rets < 0) { + if (signal_pending(current)) + rets = -EINTR; + goto end; + } + if (dev->dev_state != MEI_DEV_ENABLED) { + rets = -ETIME; + goto end; + } + mei_me_cl_put(me_cl); + goto retry; + } + end: mei_me_cl_put(me_cl); return rets; @@ -457,11 +485,11 @@ end: /** * mei_vt_support_check - check if client support vtags * - * Locking: called under "dev->device_lock" lock - * * @dev: mei_device * @uuid: client UUID * + * Locking: called under "dev->device_lock" lock + * * Return: * 0 - supported * -ENOTTY - no such client @@ -477,7 +505,7 @@ static int mei_vt_support_check(struct mei_device *dev, const uuid_le *uuid) me_cl = mei_me_cl_by_uuid(dev, uuid); if (!me_cl) { - dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n", + dev_dbg(&dev->dev, "Cannot connect to FW Client UUID = %pUl\n", uuid); return -ENOTTY; } @@ -512,19 +540,19 @@ static int mei_ioctl_connect_vtag(struct file *file, cl = file->private_data; dev = cl->dev; - dev_dbg(dev->dev, "FW Client %pUl vtag %d\n", in_client_uuid, vtag); + cl_dbg(dev, cl, "FW Client %pUl vtag %d\n", in_client_uuid, vtag); switch (cl->state) { case MEI_FILE_DISCONNECTED: if (mei_cl_vtag_by_fp(cl, file) != vtag) { - dev_err(dev->dev, "reconnect with different vtag\n"); + cl_err(dev, cl, "reconnect with different vtag\n"); return -EINVAL; } break; case MEI_FILE_INITIALIZING: /* malicious connect from another thread may push vtag */ if (!IS_ERR(mei_cl_fp_by_vtag(cl, vtag))) { - dev_err(dev->dev, "vtag already filled\n"); + cl_err(dev, cl, "vtag already filled\n"); return -EINVAL; } @@ -543,7 +571,7 @@ static int mei_ioctl_connect_vtag(struct file *file, continue; /* replace cl with acquired one */ - dev_dbg(dev->dev, "replacing with existing cl\n"); + cl_dbg(dev, cl, "replacing with existing cl\n"); mei_cl_unlink(cl); kfree(cl); file->private_data = pos; @@ -584,8 +612,8 @@ static int mei_ioctl_connect_vtag(struct file *file, } /** - * mei_ioctl_client_notify_request - - * propagate event notification request to client + * mei_ioctl_client_notify_request - propagate event notification + * request to client * * @file: pointer to file structure * @request: 0 - disable, 1 - enable @@ -641,7 +669,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) struct mei_cl *cl = file->private_data; struct mei_connect_client_data conn; struct mei_connect_client_data_vtag conn_vtag; - const uuid_le *cl_uuid; + uuid_le cl_uuid; struct mei_client *props; u8 vtag; u32 notify_get, notify_req; @@ -653,7 +681,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) dev = cl->dev; - dev_dbg(dev->dev, "IOCTL cmd = 0x%x", cmd); + cl_dbg(dev, cl, "IOCTL cmd = 0x%x", cmd); mutex_lock(&dev->device_lock); if (dev->dev_state != MEI_DEV_ENABLED) { @@ -663,30 +691,30 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) switch (cmd) { case IOCTL_MEI_CONNECT_CLIENT: - dev_dbg(dev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n"); + cl_dbg(dev, cl, "IOCTL_MEI_CONNECT_CLIENT\n"); if (copy_from_user(&conn, (char __user *)data, sizeof(conn))) { - dev_dbg(dev->dev, "failed to copy data from userland\n"); + cl_dbg(dev, cl, "failed to copy data from userland\n"); rets = -EFAULT; goto out; } - cl_uuid = &conn.in_client_uuid; + cl_uuid = conn.in_client_uuid; props = &conn.out_client_properties; vtag = 0; - rets = mei_vt_support_check(dev, cl_uuid); + rets = mei_vt_support_check(dev, &cl_uuid); if (rets == -ENOTTY) goto out; if (!rets) - rets = mei_ioctl_connect_vtag(file, cl_uuid, props, + rets = mei_ioctl_connect_vtag(file, &cl_uuid, props, vtag); else - rets = mei_ioctl_connect_client(file, cl_uuid, props); + rets = mei_ioctl_connect_client(file, &cl_uuid, props); if (rets) goto out; /* if all is ok, copying the data back to user. */ if (copy_to_user((char __user *)data, &conn, sizeof(conn))) { - dev_dbg(dev->dev, "failed to copy data to userland\n"); + cl_dbg(dev, cl, "failed to copy data to userland\n"); rets = -EFAULT; goto out; } @@ -694,39 +722,39 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) break; case IOCTL_MEI_CONNECT_CLIENT_VTAG: - dev_dbg(dev->dev, "IOCTL_MEI_CONNECT_CLIENT_VTAG\n"); + cl_dbg(dev, cl, "IOCTL_MEI_CONNECT_CLIENT_VTAG\n"); if (copy_from_user(&conn_vtag, (char __user *)data, sizeof(conn_vtag))) { - dev_dbg(dev->dev, "failed to copy data from userland\n"); + cl_dbg(dev, cl, "failed to copy data from userland\n"); rets = -EFAULT; goto out; } - cl_uuid = &conn_vtag.connect.in_client_uuid; + cl_uuid = conn_vtag.connect.in_client_uuid; props = &conn_vtag.out_client_properties; vtag = conn_vtag.connect.vtag; - rets = mei_vt_support_check(dev, cl_uuid); + rets = mei_vt_support_check(dev, &cl_uuid); if (rets == -EOPNOTSUPP) - dev_dbg(dev->dev, "FW Client %pUl does not support vtags\n", - cl_uuid); + cl_dbg(dev, cl, "FW Client %pUl does not support vtags\n", + &cl_uuid); if (rets) goto out; if (!vtag) { - dev_dbg(dev->dev, "vtag can't be zero\n"); + cl_dbg(dev, cl, "vtag can't be zero\n"); rets = -EINVAL; goto out; } - rets = mei_ioctl_connect_vtag(file, cl_uuid, props, vtag); + rets = mei_ioctl_connect_vtag(file, &cl_uuid, props, vtag); if (rets) goto out; /* if all is ok, copying the data back to user. */ if (copy_to_user((char __user *)data, &conn_vtag, sizeof(conn_vtag))) { - dev_dbg(dev->dev, "failed to copy data to userland\n"); + cl_dbg(dev, cl, "failed to copy data to userland\n"); rets = -EFAULT; goto out; } @@ -734,10 +762,10 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) break; case IOCTL_MEI_NOTIFY_SET: - dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_SET.\n"); + cl_dbg(dev, cl, "IOCTL_MEI_NOTIFY_SET\n"); if (copy_from_user(¬ify_req, (char __user *)data, sizeof(notify_req))) { - dev_dbg(dev->dev, "failed to copy data from userland\n"); + cl_dbg(dev, cl, "failed to copy data from userland\n"); rets = -EFAULT; goto out; } @@ -745,15 +773,15 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) break; case IOCTL_MEI_NOTIFY_GET: - dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_GET.\n"); + cl_dbg(dev, cl, "IOCTL_MEI_NOTIFY_GET\n"); rets = mei_ioctl_client_notify_get(file, ¬ify_get); if (rets) goto out; - dev_dbg(dev->dev, "copy connect data to user\n"); + cl_dbg(dev, cl, "copy connect data to user\n"); if (copy_to_user((char __user *)data, ¬ify_get, sizeof(notify_get))) { - dev_dbg(dev->dev, "failed to copy data to userland\n"); + cl_dbg(dev, cl, "failed to copy data to userland\n"); rets = -EFAULT; goto out; @@ -1115,7 +1143,12 @@ void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state) dev->dev_state = state; - clsdev = class_find_device_by_devt(mei_class, dev->cdev.dev); + wake_up_interruptible_all(&dev->wait_dev_state); + + if (!dev->cdev) + return; + + clsdev = class_find_device_by_devt(&mei_class, dev->cdev->dev); if (clsdev) { sysfs_notify(&clsdev->kobj, NULL, "dev_state"); put_device(clsdev); @@ -1173,7 +1206,6 @@ static const struct file_operations mei_fops = { .poll = mei_poll, .fsync = mei_fsync, .fasync = mei_fasync, - .llseek = no_llseek }; /** @@ -1192,7 +1224,7 @@ static int mei_minor_get(struct mei_device *dev) if (ret >= 0) dev->minor = ret; else if (ret == -ENOSPC) - dev_err(dev->dev, "too many mei devices\n"); + dev_err(&dev->dev, "too many mei devices\n"); mutex_unlock(&mei_minor_lock); return ret; @@ -1201,56 +1233,82 @@ static int mei_minor_get(struct mei_device *dev) /** * mei_minor_free - mark device minor number as free * - * @dev: device pointer + * @minor: minor number to free */ -static void mei_minor_free(struct mei_device *dev) +static void mei_minor_free(int minor) { mutex_lock(&mei_minor_lock); - idr_remove(&mei_idr, dev->minor); + idr_remove(&mei_idr, minor); mutex_unlock(&mei_minor_lock); } +static void mei_device_release(struct device *dev) +{ + kfree(dev_get_drvdata(dev)); +} + int mei_register(struct mei_device *dev, struct device *parent) { - struct device *clsdev; /* class device */ int ret, devno; + int minor; ret = mei_minor_get(dev); if (ret < 0) return ret; + minor = dev->minor; + /* Fill in the data structures */ devno = MKDEV(MAJOR(mei_devt), dev->minor); - cdev_init(&dev->cdev, &mei_fops); - dev->cdev.owner = parent->driver->owner; + + device_initialize(&dev->dev); + dev->dev.devt = devno; + dev->dev.class = &mei_class; + dev->dev.parent = parent; + dev->dev.groups = mei_groups; + dev->dev.release = mei_device_release; + dev_set_drvdata(&dev->dev, dev); + + dev->cdev = cdev_alloc(); + if (!dev->cdev) { + ret = -ENOMEM; + goto err; + } + dev->cdev->ops = &mei_fops; + dev->cdev->owner = parent->driver->owner; + cdev_set_parent(dev->cdev, &dev->dev.kobj); /* Add the device */ - ret = cdev_add(&dev->cdev, devno, 1); + ret = cdev_add(dev->cdev, devno, 1); if (ret) { - dev_err(parent, "unable to add device %d:%d\n", + dev_err(parent, "unable to add cdev for device %d:%d\n", MAJOR(mei_devt), dev->minor); - goto err_dev_add; + goto err_del_cdev; } - clsdev = device_create_with_groups(mei_class, parent, devno, - dev, mei_groups, - "mei%d", dev->minor); + ret = dev_set_name(&dev->dev, "mei%d", dev->minor); + if (ret) { + dev_err(parent, "unable to set name to device %d:%d ret = %d\n", + MAJOR(mei_devt), dev->minor, ret); + goto err_del_cdev; + } - if (IS_ERR(clsdev)) { - dev_err(parent, "unable to create device %d:%d\n", - MAJOR(mei_devt), dev->minor); - ret = PTR_ERR(clsdev); - goto err_dev_create; + ret = device_add(&dev->dev); + if (ret) { + dev_err(parent, "unable to add device %d:%d ret = %d\n", + MAJOR(mei_devt), dev->minor, ret); + goto err_del_cdev; } - mei_dbgfs_register(dev, dev_name(clsdev)); + mei_dbgfs_register(dev, dev_name(&dev->dev)); return 0; -err_dev_create: - cdev_del(&dev->cdev); -err_dev_add: - mei_minor_free(dev); +err_del_cdev: + cdev_del(dev->cdev); +err: + put_device(&dev->dev); + mei_minor_free(minor); return ret; } EXPORT_SYMBOL_GPL(mei_register); @@ -1258,15 +1316,16 @@ EXPORT_SYMBOL_GPL(mei_register); void mei_deregister(struct mei_device *dev) { int devno; + int minor = dev->minor; - devno = dev->cdev.dev; - cdev_del(&dev->cdev); + devno = dev->cdev->dev; + cdev_del(dev->cdev); mei_dbgfs_deregister(dev); - device_destroy(mei_class, devno); + device_destroy(&mei_class, devno); - mei_minor_free(dev); + mei_minor_free(minor); } EXPORT_SYMBOL_GPL(mei_deregister); @@ -1274,12 +1333,9 @@ static int __init mei_init(void) { int ret; - mei_class = class_create("mei"); - if (IS_ERR(mei_class)) { - pr_err("couldn't create class\n"); - ret = PTR_ERR(mei_class); - goto err; - } + ret = class_register(&mei_class); + if (ret) + return ret; ret = alloc_chrdev_region(&mei_devt, 0, MEI_MAX_DEVS, "mei"); if (ret < 0) { @@ -1298,15 +1354,14 @@ static int __init mei_init(void) err_chrdev: unregister_chrdev_region(mei_devt, MEI_MAX_DEVS); err_class: - class_destroy(mei_class); -err: + class_unregister(&mei_class); return ret; } static void __exit mei_exit(void) { unregister_chrdev_region(mei_devt, MEI_MAX_DEVS); - class_destroy(mei_class); + class_unregister(&mei_class); mei_cl_bus_exit(); } diff --git a/drivers/misc/mei/mei-trace.h b/drivers/misc/mei/mei-trace.h index fe46ff2b9d69..5312edbf5190 100644 --- a/drivers/misc/mei/mei-trace.h +++ b/drivers/misc/mei/mei-trace.h @@ -26,7 +26,7 @@ TRACE_EVENT(mei_reg_read, __field(u32, val) ), TP_fast_assign( - __assign_str(dev, dev_name(dev)); + __assign_str(dev); __entry->reg = reg; __entry->offs = offs; __entry->val = val; @@ -45,7 +45,7 @@ TRACE_EVENT(mei_reg_write, __field(u32, val) ), TP_fast_assign( - __assign_str(dev, dev_name(dev)); + __assign_str(dev); __entry->reg = reg; __entry->offs = offs; __entry->val = val; @@ -64,7 +64,7 @@ TRACE_EVENT(mei_pci_cfg_read, __field(u32, val) ), TP_fast_assign( - __assign_str(dev, dev_name(dev)); + __assign_str(dev); __entry->reg = reg; __entry->offs = offs; __entry->val = val; diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 895011b7a0bf..0bf8d552c3ea 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -57,7 +57,8 @@ enum file_state { /* MEI device states */ enum mei_dev_state { - MEI_DEV_INITIALIZING = 0, + MEI_DEV_UNINITIALIZED = 0, + MEI_DEV_INITIALIZING, MEI_DEV_INIT_CLIENTS, MEI_DEV_ENABLED, MEI_DEV_RESETTING, @@ -70,9 +71,9 @@ enum mei_dev_state { /** * enum mei_dev_pxp_mode - MEI PXP mode state * - * @MEI_DEV_PXP_DEFAULT: PCH based device, no initailization required + * @MEI_DEV_PXP_DEFAULT: PCH based device, no initialization required * @MEI_DEV_PXP_INIT: device requires initialization, send setup message to firmware - * @MEI_DEV_PXP_SETUP: device is in setup stage, waiting for firmware repsonse + * @MEI_DEV_PXP_SETUP: device is in setup stage, waiting for firmware response * @MEI_DEV_PXP_READY: device initialized */ enum mei_dev_pxp_mode { @@ -82,6 +83,19 @@ enum mei_dev_pxp_mode { MEI_DEV_PXP_READY = 3, }; +/** + * enum mei_dev_reset_to_pxp - reset to PXP mode performed + * + * @MEI_DEV_RESET_TO_PXP_DEFAULT: before reset + * @MEI_DEV_RESET_TO_PXP_PERFORMED: reset performed + * @MEI_DEV_RESET_TO_PXP_DONE: reset processed + */ +enum mei_dev_reset_to_pxp { + MEI_DEV_RESET_TO_PXP_DEFAULT = 0, + MEI_DEV_RESET_TO_PXP_PERFORMED = 1, + MEI_DEV_RESET_TO_PXP_DONE = 2, +}; + const char *mei_dev_state_str(int state); enum mei_file_transaction_states { @@ -452,13 +466,15 @@ struct mei_dev_timeouts { unsigned int d0i3; /* D0i3 set/unset max response time, in jiffies */ unsigned long hbm; /* HBM operation timeout, in jiffies */ unsigned long mkhi_recv; /* receive timeout, in jiffies */ + unsigned long link_reset_wait; /* link reset wait timeout, in jiffies */ }; /** * struct mei_device - MEI private device struct * - * @dev : device on a bus - * @cdev : character device + * @parent : device on a bus + * @dev : device object + * @cdev : character device pointer * @minor : minor number allocated for device * * @write_list : write pending list @@ -481,6 +497,7 @@ struct mei_dev_timeouts { * * @reset_count : number of consecutive resets * @dev_state : device state + * @wait_dev_state: wait queue for device state change * @hbm_state : state of host bus message protocol * @pxp_mode : PXP device mode * @init_clients_timer : HBM init handshake timeout @@ -512,6 +529,7 @@ struct mei_dev_timeouts { * @fw_ver : FW versions * * @fw_f_fw_ver_supported : fw feature: fw version supported + * @fw_ver_received : fw version received * * @me_clients_rwsem: rw lock over me_clients list * @me_clients : list of FW clients @@ -533,12 +551,15 @@ struct mei_dev_timeouts { * * @dbgfs_dir : debugfs mei root directory * + * @gsc_reset_to_pxp : state of reset to the PXP mode + * * @ops: : hw specific operations * @hw : hw specific data */ struct mei_device { - struct device *dev; - struct cdev cdev; + struct device *parent; + struct device dev; + struct cdev *cdev; int minor; struct list_head write_list; @@ -566,6 +587,7 @@ struct mei_device { */ unsigned long reset_count; enum mei_dev_state dev_state; + wait_queue_head_t wait_dev_state; enum mei_hbm_state hbm_state; enum mei_dev_pxp_mode pxp_mode; u16 init_clients_timer; @@ -604,6 +626,7 @@ struct mei_device { struct mei_fw_version fw_ver[MEI_MAX_FW_VER_BLOCKS]; unsigned int fw_f_fw_ver_supported:1; + unsigned int fw_ver_received:1; struct rw_semaphore me_clients_rwsem; struct list_head me_clients; @@ -628,6 +651,8 @@ struct mei_device { struct dentry *dbgfs_dir; #endif /* CONFIG_DEBUG_FS */ + enum mei_dev_reset_to_pxp gsc_reset_to_pxp; + const struct mei_hw_ops *ops; char hw[] __aligned(sizeof(void *)); }; @@ -678,7 +703,7 @@ static inline u32 mei_slots2data(int slots) * mei init function prototypes */ void mei_device_init(struct mei_device *dev, - struct device *device, + struct device *parent, bool slow_fw, const struct mei_hw_ops *hw_ops); int mei_reset(struct mei_device *dev); @@ -872,5 +897,29 @@ static inline ssize_t mei_fw_status_str(struct mei_device *dev, return ret; } +/** + * kind_is_gsc - checks whether the device is gsc + * + * @dev: the device structure + * + * Return: whether the device is gsc + */ +static inline bool kind_is_gsc(struct mei_device *dev) +{ + /* check kind for NULL because it may be not set, like at the fist call to hw_start */ + return dev->kind && (strcmp(dev->kind, "gsc") == 0); +} +/** + * kind_is_gscfi - checks whether the device is gscfi + * + * @dev: the device structure + * + * Return: whether the device is gscfi + */ +static inline bool kind_is_gscfi(struct mei_device *dev) +{ + /* check kind for NULL because it may be not set, like at the fist call to hw_start */ + return dev->kind && (strcmp(dev->kind, "gscfi") == 0); +} #endif diff --git a/drivers/misc/mei/mei_lb.c b/drivers/misc/mei/mei_lb.c new file mode 100644 index 000000000000..78717ee8ac9a --- /dev/null +++ b/drivers/misc/mei/mei_lb.c @@ -0,0 +1,311 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2025 Intel Corporation + */ + +#include <linux/component.h> +#include <linux/mei_cl_bus.h> +#include <linux/module.h> +#include <linux/overflow.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/uuid.h> + +#include <drm/intel/i915_component.h> +#include <drm/intel/intel_lb_mei_interface.h> + +#include "mkhi.h" + +/** + * DOC: Late Binding Firmware Update/Upload + * + * Late Binding is a firmware update/upload mechanism that allows configuration + * payloads to be securely delivered and applied at runtime, rather than + * being embedded in the system firmware image (e.g., IFWI or SPI flash). + * + * This mechanism is used to update device-level configuration such as: + * - Fan controller + * - Voltage regulator (VR) + * + * Key Characteristics: + * --------------------- + * - Runtime Delivery: + * Firmware blobs are loaded by the host driver (e.g., Xe KMD) + * after the GPU or SoC has booted. + * + * - Secure and Authenticated: + * All payloads are signed and verified by the authentication firmware. + * + * - No Firmware Flashing Required: + * Updates are applied in volatile memory and do not require SPI flash + * modification or system reboot. + * + * - Re-entrant: + * Multiple updates of the same or different types can be applied + * sequentially within a single boot session. + * + * - Version Controlled: + * Each payload includes version and security version number (SVN) + * metadata to support anti-rollback enforcement. + * + * Upload Flow: + * ------------ + * 1. Host driver (KMD or user-space tool) loads the late binding firmware. + * 2. Firmware is passed to the MEI interface and forwarded to + * authentication firmware. + * 3. Authentication firmware authenticates the payload and extracts + * command and data arrays. + * 4. Authentication firmware delivers the configuration to PUnit/PCODE. + * 5. Status is returned back to the host via MEI. + */ + +#define INTEL_LB_CMD 0x12 +#define INTEL_LB_RSP (INTEL_LB_CMD | 0x80) + +#define INTEL_LB_SEND_TIMEOUT_MSEC 3000 +#define INTEL_LB_RECV_TIMEOUT_MSEC 3000 + +/** + * struct mei_lb_req - Late Binding request structure + * @header: MKHI message header (see struct mkhi_msg_hdr) + * @type: Type of the Late Binding payload + * @flags: Flags to be passed to the authentication firmware (e.g. %INTEL_LB_FLAGS_IS_PERSISTENT) + * @reserved: Reserved for future use by authentication firmware, must be set to 0 + * @payload_size: Size of the payload data in bytes + * @payload: Payload data to be sent to the authentication firmware + */ +struct mei_lb_req { + struct mkhi_msg_hdr header; + __le32 type; + __le32 flags; + __le32 reserved[2]; + __le32 payload_size; + u8 payload[] __counted_by(payload_size); +} __packed; + +/** + * struct mei_lb_rsp - Late Binding response structure + * @header: MKHI message header (see struct mkhi_msg_hdr) + * @type: Type of the Late Binding payload + * @reserved: Reserved for future use by authentication firmware, must be set to 0 + * @status: Status returned by authentication firmware (see &enum intel_lb_status) + */ +struct mei_lb_rsp { + struct mkhi_msg_hdr header; + __le32 type; + __le32 reserved[2]; + __le32 status; +} __packed; + +static bool mei_lb_check_response(const struct device *dev, ssize_t bytes, + struct mei_lb_rsp *rsp) +{ + /* + * Received message size may be smaller than the full message size when + * reply contains only MKHI header with result field set to the error code. + * Check the header size and content first to output exact error, if needed, + * and then process to the whole message. + */ + if (bytes < sizeof(rsp->header)) { + dev_err(dev, "Received less than header size from the firmware: %zd < %zu\n", + bytes, sizeof(rsp->header)); + return false; + } + if (rsp->header.group_id != MKHI_GROUP_ID_GFX) { + dev_err(dev, "Mismatch group id: 0x%x instead of 0x%x\n", + rsp->header.group_id, MKHI_GROUP_ID_GFX); + return false; + } + if (rsp->header.command != INTEL_LB_RSP) { + dev_err(dev, "Mismatch command: 0x%x instead of 0x%x\n", + rsp->header.command, INTEL_LB_RSP); + return false; + } + if (rsp->header.result) { + dev_err(dev, "Error in result: 0x%x\n", rsp->header.result); + return false; + } + if (bytes < sizeof(*rsp)) { + dev_err(dev, "Received less than message size from the firmware: %zd < %zu\n", + bytes, sizeof(*rsp)); + return false; + } + + return true; +} + +static int mei_lb_push_payload(struct device *dev, u32 type, u32 flags, + const void *payload, size_t payload_size) +{ + struct mei_cl_device *cldev; + struct mei_lb_req *req = NULL; + struct mei_lb_rsp rsp; + size_t req_size; + ssize_t bytes; + int ret; + + cldev = to_mei_cl_device(dev); + + ret = mei_cldev_enable(cldev); + if (ret) { + dev_dbg(dev, "Failed to enable firmware client. %d\n", ret); + return ret; + } + + req_size = struct_size(req, payload, payload_size); + if (req_size > mei_cldev_mtu(cldev)) { + dev_err(dev, "Payload is too big: %zu\n", payload_size); + ret = -EMSGSIZE; + goto end; + } + + req = kmalloc(req_size, GFP_KERNEL); + if (!req) { + ret = -ENOMEM; + goto end; + } + + req->header.group_id = MKHI_GROUP_ID_GFX; + req->header.command = INTEL_LB_CMD; + req->type = cpu_to_le32(type); + req->flags = cpu_to_le32(flags); + req->reserved[0] = 0; + req->reserved[1] = 0; + req->payload_size = cpu_to_le32(payload_size); + memcpy(req->payload, payload, payload_size); + + bytes = mei_cldev_send_timeout(cldev, (u8 *)req, req_size, + INTEL_LB_SEND_TIMEOUT_MSEC); + if (bytes < 0) { + dev_err(dev, "Failed to send late binding request to firmware. %zd\n", bytes); + ret = bytes; + goto end; + } + + bytes = mei_cldev_recv_timeout(cldev, (u8 *)&rsp, sizeof(rsp), + INTEL_LB_RECV_TIMEOUT_MSEC); + if (bytes < 0) { + dev_err(dev, "Failed to receive late binding reply from MEI firmware. %zd\n", + bytes); + ret = bytes; + goto end; + } + if (!mei_lb_check_response(dev, bytes, &rsp)) { + dev_err(dev, "Bad response from the firmware. header: %02x %02x %02x %02x\n", + rsp.header.group_id, rsp.header.command, + rsp.header.reserved, rsp.header.result); + ret = -EPROTO; + goto end; + } + + dev_dbg(dev, "status = %u\n", le32_to_cpu(rsp.status)); + ret = (int)le32_to_cpu(rsp.status); +end: + mei_cldev_disable(cldev); + kfree(req); + return ret; +} + +static const struct intel_lb_component_ops mei_lb_ops = { + .push_payload = mei_lb_push_payload, +}; + +static int mei_lb_component_master_bind(struct device *dev) +{ + return component_bind_all(dev, (void *)&mei_lb_ops); +} + +static void mei_lb_component_master_unbind(struct device *dev) +{ + component_unbind_all(dev, (void *)&mei_lb_ops); +} + +static const struct component_master_ops mei_lb_component_master_ops = { + .bind = mei_lb_component_master_bind, + .unbind = mei_lb_component_master_unbind, +}; + +static int mei_lb_component_match(struct device *dev, int subcomponent, + void *data) +{ + /* + * This function checks if requester is Intel %PCI_CLASS_DISPLAY_VGA or + * %PCI_CLASS_DISPLAY_OTHER device, and checks if the requester is the + * grand parent of mei_if i.e. late bind MEI device + */ + struct device *base = data; + struct pci_dev *pdev; + + if (!dev) + return 0; + + if (!dev_is_pci(dev)) + return 0; + + pdev = to_pci_dev(dev); + + if (pdev->vendor != PCI_VENDOR_ID_INTEL) + return 0; + + if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8) && + pdev->class != (PCI_CLASS_DISPLAY_OTHER << 8)) + return 0; + + if (subcomponent != INTEL_COMPONENT_LB) + return 0; + + base = base->parent; + if (!base) /* mei device */ + return 0; + + base = base->parent; /* pci device */ + + return !!base && dev == base; +} + +static int mei_lb_probe(struct mei_cl_device *cldev, + const struct mei_cl_device_id *id) +{ + struct component_match *master_match = NULL; + int ret; + + component_match_add_typed(&cldev->dev, &master_match, + mei_lb_component_match, &cldev->dev); + if (IS_ERR_OR_NULL(master_match)) + return -ENOMEM; + + ret = component_master_add_with_match(&cldev->dev, + &mei_lb_component_master_ops, + master_match); + if (ret < 0) + dev_err(&cldev->dev, "Failed to add late binding master component. %d\n", ret); + + return ret; +} + +static void mei_lb_remove(struct mei_cl_device *cldev) +{ + component_master_del(&cldev->dev, &mei_lb_component_master_ops); +} + +#define MEI_GUID_MKHI UUID_LE(0xe2c2afa2, 0x3817, 0x4d19, \ + 0x9d, 0x95, 0x6, 0xb1, 0x6b, 0x58, 0x8a, 0x5d) + +static const struct mei_cl_device_id mei_lb_tbl[] = { + { .uuid = MEI_GUID_MKHI, .version = MEI_CL_VERSION_ANY }, + { } +}; +MODULE_DEVICE_TABLE(mei, mei_lb_tbl); + +static struct mei_cl_driver mei_lb_driver = { + .id_table = mei_lb_tbl, + .name = "mei_lb", + .probe = mei_lb_probe, + .remove = mei_lb_remove, +}; + +module_mei_cl_driver(mei_lb_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MEI Late Binding Firmware Update/Upload"); diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 676d566f38dd..73cad914be9f 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -116,9 +116,18 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_SPS_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ARL_H, MEI_ME_PCH15_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_LNL_M, MEI_ME_PCH15_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_PTL_H, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PTL_P, MEI_ME_PCH15_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_WCL_P, MEI_ME_PCH15_CFG)}, /* required last entry */ {0, } @@ -136,7 +145,7 @@ static inline void mei_me_unset_pm_domain(struct mei_device *dev) {} static int mei_me_read_fws(const struct mei_device *dev, int where, u32 *val) { - struct pci_dev *pdev = to_pci_dev(dev->dev); + struct pci_dev *pdev = to_pci_dev(dev->parent); return pci_read_config_dword(pdev, where, val); } @@ -214,6 +223,10 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->mem_addr = pcim_iomap_table(pdev)[0]; hw->read_fws = mei_me_read_fws; + err = mei_register(dev, &pdev->dev); + if (err) + goto end; + pci_enable_msi(pdev); hw->irq = pdev->irq; @@ -228,22 +241,18 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) { dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n", pdev->irq); - goto end; + goto deregister; } if (mei_start(dev)) { dev_err(&pdev->dev, "init hw failure.\n"); err = -ENODEV; - goto release_irq; + goto deregister; } pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); - err = mei_register(dev, &pdev->dev); - if (err) - goto stop; - pci_set_drvdata(pdev, dev); /* @@ -273,12 +282,11 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; -stop: - mei_stop(dev); -release_irq: +deregister: mei_cancel_work(dev); mei_disable_interrupts(dev); free_irq(pdev->irq, dev); + mei_deregister(dev); end: dev_err(&pdev->dev, "initialization failed.\n"); return err; @@ -295,11 +303,7 @@ end: */ static void mei_me_shutdown(struct pci_dev *pdev) { - struct mei_device *dev; - - dev = pci_get_drvdata(pdev); - if (!dev) - return; + struct mei_device *dev = pci_get_drvdata(pdev); dev_dbg(&pdev->dev, "shutdown\n"); mei_stop(dev); @@ -320,11 +324,7 @@ static void mei_me_shutdown(struct pci_dev *pdev) */ static void mei_me_remove(struct pci_dev *pdev) { - struct mei_device *dev; - - dev = pci_get_drvdata(pdev); - if (!dev) - return; + struct mei_device *dev = pci_get_drvdata(pdev); if (mei_pg_is_enabled(dev)) pm_runtime_get_noresume(&pdev->dev); @@ -353,9 +353,6 @@ static int mei_me_pci_suspend(struct device *device) struct pci_dev *pdev = to_pci_dev(device); struct mei_device *dev = pci_get_drvdata(pdev); - if (!dev) - return -ENODEV; - dev_dbg(&pdev->dev, "suspend\n"); mei_stop(dev); @@ -371,14 +368,10 @@ static int mei_me_pci_suspend(struct device *device) static int mei_me_pci_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); - struct mei_device *dev; + struct mei_device *dev = pci_get_drvdata(pdev); unsigned int irqflags; int err; - dev = pci_get_drvdata(pdev); - if (!dev) - return -ENODEV; - pci_enable_msi(pdev); irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED; @@ -396,8 +389,10 @@ static int mei_me_pci_resume(struct device *device) } err = mei_restart(dev); - if (err) + if (err) { + free_irq(pdev->irq, dev); return err; + } /* Start timer if stopped in suspend */ schedule_delayed_work(&dev->timer_work, HZ); @@ -419,13 +414,10 @@ static void mei_me_pci_complete(struct device *device) #ifdef CONFIG_PM static int mei_me_pm_runtime_idle(struct device *device) { - struct mei_device *dev; + struct mei_device *dev = dev_get_drvdata(device); dev_dbg(device, "rpm: me: runtime_idle\n"); - dev = dev_get_drvdata(device); - if (!dev) - return -ENODEV; if (mei_write_is_idle(dev)) pm_runtime_autosuspend(device); @@ -434,15 +426,11 @@ static int mei_me_pm_runtime_idle(struct device *device) static int mei_me_pm_runtime_suspend(struct device *device) { - struct mei_device *dev; + struct mei_device *dev = dev_get_drvdata(device); int ret; dev_dbg(device, "rpm: me: runtime suspend\n"); - dev = dev_get_drvdata(device); - if (!dev) - return -ENODEV; - mutex_lock(&dev->device_lock); if (mei_write_is_idle(dev)) @@ -462,15 +450,11 @@ static int mei_me_pm_runtime_suspend(struct device *device) static int mei_me_pm_runtime_resume(struct device *device) { - struct mei_device *dev; + struct mei_device *dev = dev_get_drvdata(device); int ret; dev_dbg(device, "rpm: me: runtime resume\n"); - dev = dev_get_drvdata(device); - if (!dev) - return -ENODEV; - mutex_lock(&dev->device_lock); ret = mei_me_pg_exit_sync(dev); @@ -492,7 +476,7 @@ static int mei_me_pm_runtime_resume(struct device *device) */ static inline void mei_me_set_pm_domain(struct mei_device *dev) { - struct pci_dev *pdev = to_pci_dev(dev->dev); + struct pci_dev *pdev = to_pci_dev(dev->parent); if (pdev->dev.bus && pdev->dev.bus->pm) { dev->pg_domain.ops = *pdev->dev.bus->pm; @@ -513,7 +497,7 @@ static inline void mei_me_set_pm_domain(struct mei_device *dev) static inline void mei_me_unset_pm_domain(struct mei_device *dev) { /* stop using pm callbacks if any */ - dev_pm_domain_set(dev->dev, NULL); + dev_pm_domain_set(dev->parent, NULL); } static const struct dev_pm_ops mei_me_pm_ops = { diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c index fa20d9a27813..98d1bc2c7f4b 100644 --- a/drivers/misc/mei/pci-txe.c +++ b/drivers/misc/mei/pci-txe.c @@ -87,6 +87,10 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw = to_txe_hw(dev); hw->mem_addr = pcim_iomap_table(pdev); + err = mei_register(dev, &pdev->dev); + if (err) + goto end; + pci_enable_msi(pdev); /* clear spurious interrupts */ @@ -106,22 +110,18 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) { dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n", pdev->irq); - goto end; + goto deregister; } if (mei_start(dev)) { dev_err(&pdev->dev, "init hw failure.\n"); err = -ENODEV; - goto release_irq; + goto deregister; } pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); - err = mei_register(dev, &pdev->dev); - if (err) - goto stop; - pci_set_drvdata(pdev, dev); /* @@ -144,12 +144,11 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; -stop: - mei_stop(dev); -release_irq: +deregister: mei_cancel_work(dev); mei_disable_interrupts(dev); free_irq(pdev->irq, dev); + mei_deregister(dev); end: dev_err(&pdev->dev, "initialization failed.\n"); return err; @@ -166,11 +165,7 @@ end: */ static void mei_txe_shutdown(struct pci_dev *pdev) { - struct mei_device *dev; - - dev = pci_get_drvdata(pdev); - if (!dev) - return; + struct mei_device *dev = pci_get_drvdata(pdev); dev_dbg(&pdev->dev, "shutdown\n"); mei_stop(dev); @@ -191,13 +186,7 @@ static void mei_txe_shutdown(struct pci_dev *pdev) */ static void mei_txe_remove(struct pci_dev *pdev) { - struct mei_device *dev; - - dev = pci_get_drvdata(pdev); - if (!dev) { - dev_err(&pdev->dev, "mei: dev == NULL\n"); - return; - } + struct mei_device *dev = pci_get_drvdata(pdev); pm_runtime_get_noresume(&pdev->dev); @@ -218,9 +207,6 @@ static int mei_txe_pci_suspend(struct device *device) struct pci_dev *pdev = to_pci_dev(device); struct mei_device *dev = pci_get_drvdata(pdev); - if (!dev) - return -ENODEV; - dev_dbg(&pdev->dev, "suspend\n"); mei_stop(dev); @@ -236,13 +222,9 @@ static int mei_txe_pci_suspend(struct device *device) static int mei_txe_pci_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); - struct mei_device *dev; + struct mei_device *dev = pci_get_drvdata(pdev); int err; - dev = pci_get_drvdata(pdev); - if (!dev) - return -ENODEV; - pci_enable_msi(pdev); mei_clear_interrupts(dev); @@ -273,13 +255,10 @@ static int mei_txe_pci_resume(struct device *device) #ifdef CONFIG_PM static int mei_txe_pm_runtime_idle(struct device *device) { - struct mei_device *dev; + struct mei_device *dev = dev_get_drvdata(device); dev_dbg(device, "rpm: txe: runtime_idle\n"); - dev = dev_get_drvdata(device); - if (!dev) - return -ENODEV; if (mei_write_is_idle(dev)) pm_runtime_autosuspend(device); @@ -287,15 +266,11 @@ static int mei_txe_pm_runtime_idle(struct device *device) } static int mei_txe_pm_runtime_suspend(struct device *device) { - struct mei_device *dev; + struct mei_device *dev = dev_get_drvdata(device); int ret; dev_dbg(device, "rpm: txe: runtime suspend\n"); - dev = dev_get_drvdata(device); - if (!dev) - return -ENODEV; - mutex_lock(&dev->device_lock); if (mei_write_is_idle(dev)) @@ -317,15 +292,11 @@ static int mei_txe_pm_runtime_suspend(struct device *device) static int mei_txe_pm_runtime_resume(struct device *device) { - struct mei_device *dev; + struct mei_device *dev = dev_get_drvdata(device); int ret; dev_dbg(device, "rpm: txe: runtime resume\n"); - dev = dev_get_drvdata(device); - if (!dev) - return -ENODEV; - mutex_lock(&dev->device_lock); mei_enable_interrupts(dev); @@ -349,7 +320,7 @@ static int mei_txe_pm_runtime_resume(struct device *device) */ static inline void mei_txe_set_pm_domain(struct mei_device *dev) { - struct pci_dev *pdev = to_pci_dev(dev->dev); + struct pci_dev *pdev = to_pci_dev(dev->parent); if (pdev->dev.bus && pdev->dev.bus->pm) { dev->pg_domain.ops = *pdev->dev.bus->pm; @@ -370,7 +341,7 @@ static inline void mei_txe_set_pm_domain(struct mei_device *dev) static inline void mei_txe_unset_pm_domain(struct mei_device *dev) { /* stop using pm callbacks if any */ - dev_pm_domain_set(dev->dev, NULL); + dev_pm_domain_set(dev->parent, NULL); } static const struct dev_pm_ops mei_txe_pm_ops = { diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c new file mode 100644 index 000000000000..9787b9cee71c --- /dev/null +++ b/drivers/misc/mei/platform-vsc.c @@ -0,0 +1,459 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023, Intel Corporation. + * Intel Visual Sensing Controller Interface Linux driver + */ + +#include <linux/align.h> +#include <linux/cache.h> +#include <linux/cleanup.h> +#include <linux/iopoll.h> +#include <linux/list.h> +#include <linux/mei.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/overflow.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/timekeeping.h> +#include <linux/types.h> + +#include <asm-generic/bug.h> +#include <linux/unaligned.h> + +#include "mei_dev.h" +#include "vsc-tp.h" + +#define MEI_VSC_DRV_NAME "intel_vsc" + +#define MEI_VSC_MAX_MSG_SIZE 512 + +#define MEI_VSC_POLL_DELAY_US (100 * USEC_PER_MSEC) +#define MEI_VSC_POLL_TIMEOUT_US (400 * USEC_PER_MSEC) + +#define mei_dev_to_vsc_hw(dev) ((struct mei_vsc_hw *)((dev)->hw)) + +struct mei_vsc_host_timestamp { + u64 realtime; + u64 boottime; +}; + +struct mei_vsc_hw { + struct vsc_tp *tp; + + bool fw_ready; + bool host_ready; + + atomic_t write_lock_cnt; + + u32 rx_len; + u32 rx_hdr; + + /* buffer for tx */ + char tx_buf[MEI_VSC_MAX_MSG_SIZE + sizeof(struct mei_msg_hdr)] ____cacheline_aligned; + /* buffer for rx */ + char rx_buf[MEI_VSC_MAX_MSG_SIZE + sizeof(struct mei_msg_hdr)] ____cacheline_aligned; +}; + +static int mei_vsc_read_helper(struct mei_vsc_hw *hw, u8 *buf, + u32 max_len) +{ + struct mei_vsc_host_timestamp ts = { + .realtime = ktime_to_ns(ktime_get_real()), + .boottime = ktime_to_ns(ktime_get_boottime()), + }; + + return vsc_tp_xfer(hw->tp, VSC_TP_CMD_READ, &ts, sizeof(ts), + buf, max_len); +} + +static int mei_vsc_write_helper(struct mei_vsc_hw *hw, u8 *buf, u32 len) +{ + u8 status; + + return vsc_tp_xfer(hw->tp, VSC_TP_CMD_WRITE, buf, len, &status, + sizeof(status)); +} + +static int mei_vsc_fw_status(struct mei_device *mei_dev, + struct mei_fw_status *fw_status) +{ + if (!fw_status) + return -EINVAL; + + fw_status->count = 0; + + return 0; +} + +static inline enum mei_pg_state mei_vsc_pg_state(struct mei_device *mei_dev) +{ + return MEI_PG_OFF; +} + +static void mei_vsc_intr_enable(struct mei_device *mei_dev) +{ + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + + vsc_tp_intr_enable(hw->tp); +} + +static void mei_vsc_intr_disable(struct mei_device *mei_dev) +{ + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + + vsc_tp_intr_disable(hw->tp); +} + +/* mei framework requires this ops */ +static void mei_vsc_intr_clear(struct mei_device *mei_dev) +{ +} + +/* wait for pending irq handler */ +static void mei_vsc_synchronize_irq(struct mei_device *mei_dev) +{ + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + + vsc_tp_intr_synchronize(hw->tp); +} + +static int mei_vsc_hw_config(struct mei_device *mei_dev) +{ + return 0; +} + +static bool mei_vsc_host_is_ready(struct mei_device *mei_dev) +{ + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + + return hw->host_ready; +} + +static bool mei_vsc_hw_is_ready(struct mei_device *mei_dev) +{ + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + + return hw->fw_ready; +} + +static int mei_vsc_hw_start(struct mei_device *mei_dev) +{ + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + int ret, rlen; + u8 buf; + + hw->host_ready = true; + + vsc_tp_intr_enable(hw->tp); + + ret = read_poll_timeout(mei_vsc_read_helper, rlen, + rlen >= 0, MEI_VSC_POLL_DELAY_US, + MEI_VSC_POLL_TIMEOUT_US, true, + hw, &buf, sizeof(buf)); + if (ret) { + dev_err(&mei_dev->dev, "wait fw ready failed: %d\n", ret); + return ret; + } + + hw->fw_ready = true; + + return 0; +} + +static bool mei_vsc_hbuf_is_ready(struct mei_device *mei_dev) +{ + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + + return atomic_read(&hw->write_lock_cnt) == 0; +} + +static int mei_vsc_hbuf_empty_slots(struct mei_device *mei_dev) +{ + return MEI_VSC_MAX_MSG_SIZE / MEI_SLOT_SIZE; +} + +static u32 mei_vsc_hbuf_depth(const struct mei_device *mei_dev) +{ + return MEI_VSC_MAX_MSG_SIZE / MEI_SLOT_SIZE; +} + +static int mei_vsc_write(struct mei_device *mei_dev, + const void *hdr, size_t hdr_len, + const void *data, size_t data_len) +{ + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + char *buf = hw->tx_buf; + int ret; + + if (WARN_ON(!hdr || !IS_ALIGNED(hdr_len, 4))) + return -EINVAL; + + if (!data || data_len > MEI_VSC_MAX_MSG_SIZE) + return -EINVAL; + + atomic_inc(&hw->write_lock_cnt); + + memcpy(buf, hdr, hdr_len); + memcpy(buf + hdr_len, data, data_len); + + ret = mei_vsc_write_helper(hw, buf, hdr_len + data_len); + + atomic_dec_if_positive(&hw->write_lock_cnt); + + return ret < 0 ? ret : 0; +} + +static inline u32 mei_vsc_read(const struct mei_device *mei_dev) +{ + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + int ret; + + ret = mei_vsc_read_helper(hw, hw->rx_buf, sizeof(hw->rx_buf)); + if (ret < 0 || ret < sizeof(u32)) + return 0; + hw->rx_len = ret; + + hw->rx_hdr = get_unaligned_le32(hw->rx_buf); + + return hw->rx_hdr; +} + +static int mei_vsc_count_full_read_slots(struct mei_device *mei_dev) +{ + return MEI_VSC_MAX_MSG_SIZE / MEI_SLOT_SIZE; +} + +static int mei_vsc_read_slots(struct mei_device *mei_dev, unsigned char *buf, + unsigned long len) +{ + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + struct mei_msg_hdr *hdr; + + hdr = (struct mei_msg_hdr *)&hw->rx_hdr; + if (len != hdr->length || hdr->length + sizeof(*hdr) != hw->rx_len) + return -EINVAL; + + memcpy(buf, hw->rx_buf + sizeof(*hdr), len); + + return 0; +} + +static bool mei_vsc_pg_in_transition(struct mei_device *mei_dev) +{ + return mei_dev->pg_event >= MEI_PG_EVENT_WAIT && + mei_dev->pg_event <= MEI_PG_EVENT_INTR_WAIT; +} + +static bool mei_vsc_pg_is_enabled(struct mei_device *mei_dev) +{ + return false; +} + +static int mei_vsc_hw_reset(struct mei_device *mei_dev, bool intr_enable) +{ + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + + vsc_tp_reset(hw->tp); + + if (!intr_enable) + return 0; + + return vsc_tp_init(hw->tp, mei_dev->parent); +} + +static const struct mei_hw_ops mei_vsc_hw_ops = { + .fw_status = mei_vsc_fw_status, + .pg_state = mei_vsc_pg_state, + + .host_is_ready = mei_vsc_host_is_ready, + .hw_is_ready = mei_vsc_hw_is_ready, + .hw_reset = mei_vsc_hw_reset, + .hw_config = mei_vsc_hw_config, + .hw_start = mei_vsc_hw_start, + + .pg_in_transition = mei_vsc_pg_in_transition, + .pg_is_enabled = mei_vsc_pg_is_enabled, + + .intr_clear = mei_vsc_intr_clear, + .intr_enable = mei_vsc_intr_enable, + .intr_disable = mei_vsc_intr_disable, + .synchronize_irq = mei_vsc_synchronize_irq, + + .hbuf_free_slots = mei_vsc_hbuf_empty_slots, + .hbuf_is_ready = mei_vsc_hbuf_is_ready, + .hbuf_depth = mei_vsc_hbuf_depth, + .write = mei_vsc_write, + + .rdbuf_full_slots = mei_vsc_count_full_read_slots, + .read_hdr = mei_vsc_read, + .read = mei_vsc_read_slots, +}; + +static void mei_vsc_event_cb(void *context) +{ + struct mei_device *mei_dev = context; + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + struct list_head cmpl_list; + s32 slots; + int ret; + + if (mei_dev->dev_state == MEI_DEV_RESETTING || + mei_dev->dev_state == MEI_DEV_INITIALIZING) + return; + + INIT_LIST_HEAD(&cmpl_list); + + guard(mutex)(&mei_dev->device_lock); + + while (vsc_tp_need_read(hw->tp)) { + /* check slots available for reading */ + slots = mei_count_full_read_slots(mei_dev); + + ret = mei_irq_read_handler(mei_dev, &cmpl_list, &slots); + if (ret) { + if (ret != -ENODATA) { + if (mei_dev->dev_state != MEI_DEV_RESETTING && + mei_dev->dev_state != MEI_DEV_POWER_DOWN) + schedule_work(&mei_dev->reset_work); + } + + return; + } + } + + mei_dev->hbuf_is_ready = mei_hbuf_is_ready(mei_dev); + ret = mei_irq_write_handler(mei_dev, &cmpl_list); + if (ret) + dev_err(&mei_dev->dev, "dispatch write request failed: %d\n", ret); + + mei_dev->hbuf_is_ready = mei_hbuf_is_ready(mei_dev); + mei_irq_compl_handler(mei_dev, &cmpl_list); +} + +static int mei_vsc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mei_device *mei_dev; + struct mei_vsc_hw *hw; + struct vsc_tp *tp; + int ret; + + tp = *(struct vsc_tp **)dev_get_platdata(dev); + if (!tp) + return dev_err_probe(dev, -ENODEV, "no platform data\n"); + + mei_dev = kzalloc(size_add(sizeof(*mei_dev), sizeof(*hw)), GFP_KERNEL); + if (!mei_dev) + return -ENOMEM; + + mei_device_init(mei_dev, dev, false, &mei_vsc_hw_ops); + + mei_dev->fw_f_fw_ver_supported = 0; + mei_dev->kind = "ivsc"; + + hw = mei_dev_to_vsc_hw(mei_dev); + atomic_set(&hw->write_lock_cnt, 0); + hw->tp = tp; + + platform_set_drvdata(pdev, mei_dev); + + vsc_tp_register_event_cb(tp, mei_vsc_event_cb, mei_dev); + + ret = mei_register(mei_dev, dev); + if (ret) + goto err; + + ret = mei_start(mei_dev); + if (ret) { + dev_err_probe(dev, ret, "init hw failed\n"); + goto err; + } + + pm_runtime_enable(mei_dev->parent); + + return 0; + +err: + mei_cancel_work(mei_dev); + + vsc_tp_register_event_cb(tp, NULL, NULL); + + mei_disable_interrupts(mei_dev); + + mei_deregister(mei_dev); + + return ret; +} + +static void mei_vsc_remove(struct platform_device *pdev) +{ + struct mei_device *mei_dev = platform_get_drvdata(pdev); + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + + pm_runtime_disable(mei_dev->parent); + + mei_stop(mei_dev); + + vsc_tp_register_event_cb(hw->tp, NULL, NULL); + + mei_disable_interrupts(mei_dev); + + mei_deregister(mei_dev); +} + +static int mei_vsc_suspend(struct device *dev) +{ + struct mei_device *mei_dev; + int ret = 0; + + mei_dev = dev_get_drvdata(dev); + if (!mei_dev) + return -ENODEV; + + mutex_lock(&mei_dev->device_lock); + + if (!mei_write_is_idle(mei_dev)) + ret = -EAGAIN; + + mutex_unlock(&mei_dev->device_lock); + + return ret; +} + +static int mei_vsc_resume(struct device *dev) +{ + struct mei_device *mei_dev; + + mei_dev = dev_get_drvdata(dev); + if (!mei_dev) + return -ENODEV; + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume); + +static const struct platform_device_id mei_vsc_id_table[] = { + { MEI_VSC_DRV_NAME }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(platform, mei_vsc_id_table); + +static struct platform_driver mei_vsc_drv = { + .probe = mei_vsc_probe, + .remove = mei_vsc_remove, + .id_table = mei_vsc_id_table, + .driver = { + .name = MEI_VSC_DRV_NAME, + .pm = &mei_vsc_pm_ops, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; +module_platform_driver(mei_vsc_drv); + +MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>"); +MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>"); +MODULE_DESCRIPTION("Intel Visual Sensing Controller Interface"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS("VSC_TP"); diff --git a/drivers/misc/mei/pxp/Kconfig b/drivers/misc/mei/pxp/Kconfig index 4029b96afc04..aa2dece4a927 100644 --- a/drivers/misc/mei/pxp/Kconfig +++ b/drivers/misc/mei/pxp/Kconfig @@ -1,11 +1,10 @@ - # SPDX-License-Identifier: GPL-2.0 # Copyright (c) 2020, Intel Corporation. All rights reserved. # config INTEL_MEI_PXP tristate "Intel PXP services of ME Interface" - select INTEL_MEI_ME - depends on DRM_I915 + depends on INTEL_MEI_ME + depends on DRM_I915 || DRM_XE help MEI Support for PXP Services on Intel platforms. diff --git a/drivers/misc/mei/pxp/mei_pxp.c b/drivers/misc/mei/pxp/mei_pxp.c index 3bf560bbdee0..2820d389c88e 100644 --- a/drivers/misc/mei/pxp/mei_pxp.c +++ b/drivers/misc/mei/pxp/mei_pxp.c @@ -11,39 +11,80 @@ * negotiation messages to ME FW command payloads and vice versa. */ +#include <linux/delay.h> #include <linux/module.h> +#include <linux/pci.h> #include <linux/slab.h> #include <linux/mei.h> #include <linux/mei_cl_bus.h> #include <linux/component.h> #include <drm/drm_connector.h> -#include <drm/i915_component.h> -#include <drm/i915_pxp_tee_interface.h> +#include <drm/intel/i915_component.h> +#include <drm/intel/i915_pxp_tee_interface.h> #include "mei_pxp.h" +static inline int mei_pxp_reenable(const struct device *dev, struct mei_cl_device *cldev) +{ + int ret; + + dev_warn(dev, "Trying to reset the channel...\n"); + ret = mei_cldev_disable(cldev); + if (ret < 0) + dev_warn(dev, "mei_cldev_disable failed. %d\n", ret); + /* + * Explicitly ignoring disable failure, + * enable may fix the states and succeed + */ + ret = mei_cldev_enable(cldev); + if (ret < 0) + dev_err(dev, "mei_cldev_enable failed. %d\n", ret); + return ret; +} + /** * mei_pxp_send_message() - Sends a PXP message to ME FW. * @dev: device corresponding to the mei_cl_device * @message: a message buffer to send * @size: size of the message - * Return: 0 on Success, <0 on Failure + * @timeout_ms: timeout in milliseconds, zero means wait indefinitely. + * + * Returns: 0 on Success, <0 on Failure with the following defined failures. + * -ENODEV: Client was not connected. + * Caller may attempt to try again immediately. + * -ENOMEM: Internal memory allocation failure experienced. + * Caller may sleep to allow kernel reclaim before retrying. + * -EINTR : Calling thread received a signal. Caller may choose + * to abandon with the same thread id. + * -ETIME : Request is timed out. + * Caller may attempt to try again immediately. */ static int -mei_pxp_send_message(struct device *dev, const void *message, size_t size) +mei_pxp_send_message(struct device *dev, const void *message, size_t size, unsigned long timeout_ms) { struct mei_cl_device *cldev; ssize_t byte; + int ret; if (!dev || !message) return -EINVAL; cldev = to_mei_cl_device(dev); - /* temporary drop const qualifier till the API is fixed */ - byte = mei_cldev_send(cldev, (u8 *)message, size); + byte = mei_cldev_send_timeout(cldev, message, size, timeout_ms); if (byte < 0) { dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte); + switch (byte) { + case -ENOMEM: + fallthrough; + case -ENODEV: + fallthrough; + case -ETIME: + ret = mei_pxp_reenable(dev, cldev); + if (ret) + byte = ret; + break; + } return byte; } @@ -55,23 +96,53 @@ mei_pxp_send_message(struct device *dev, const void *message, size_t size) * @dev: device corresponding to the mei_cl_device * @buffer: a message buffer to contain the received message * @size: size of the buffer - * Return: bytes sent on Success, <0 on Failure + * @timeout_ms: timeout in milliseconds, zero means wait indefinitely. + * + * Returns: number of bytes send on Success, <0 on Failure with the following defined failures. + * -ENODEV: Client was not connected. + * Caller may attempt to try again from send immediately. + * -ENOMEM: Internal memory allocation failure experienced. + * Caller may sleep to allow kernel reclaim before retrying. + * -EINTR : Calling thread received a signal. Caller will need to repeat calling + * (with a different owning thread) to retrieve existing unclaimed response + * (and may discard it). + * -ETIME : Request is timed out. + * Caller may attempt to try again from send immediately. */ static int -mei_pxp_receive_message(struct device *dev, void *buffer, size_t size) +mei_pxp_receive_message(struct device *dev, void *buffer, size_t size, unsigned long timeout_ms) { struct mei_cl_device *cldev; ssize_t byte; + bool retry = false; + int ret; if (!dev || !buffer) return -EINVAL; cldev = to_mei_cl_device(dev); - byte = mei_cldev_recv(cldev, buffer, size); +retry: + byte = mei_cldev_recv_timeout(cldev, buffer, size, timeout_ms); if (byte < 0) { dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte); - return byte; + switch (byte) { + case -ENOMEM: + /* Retry the read when pages are reclaimed */ + msleep(20); + if (!retry) { + retry = true; + goto retry; + } + fallthrough; + case -ENODEV: + fallthrough; + case -ETIME: + ret = mei_pxp_reenable(dev, cldev); + if (ret) + byte = ret; + break; + } } return byte; @@ -155,12 +226,24 @@ static int mei_pxp_component_match(struct device *dev, int subcomponent, void *data) { struct device *base = data; + struct pci_dev *pdev; if (!dev) return 0; - if (!dev->driver || strcmp(dev->driver->name, "i915") || - subcomponent != I915_COMPONENT_PXP) + if (!dev_is_pci(dev)) + return 0; + + pdev = to_pci_dev(dev); + + if (pdev->vendor != PCI_VENDOR_ID_INTEL) + return 0; + + if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8) && + pdev->class != (PCI_CLASS_DISPLAY_OTHER << 8)) + return 0; + + if (subcomponent != I915_COMPONENT_PXP) return 0; base = base->parent; diff --git a/drivers/misc/mei/vsc-fw-loader.c b/drivers/misc/mei/vsc-fw-loader.c new file mode 100644 index 000000000000..43abefa806e1 --- /dev/null +++ b/drivers/misc/mei/vsc-fw-loader.c @@ -0,0 +1,776 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023, Intel Corporation. + * Intel Visual Sensing Controller Transport Layer Linux driver + */ + +#include <linux/acpi.h> +#include <linux/align.h> +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/cleanup.h> +#include <linux/firmware.h> +#include <linux/sizes.h> +#include <linux/slab.h> +#include <linux/string_helpers.h> +#include <linux/types.h> + +#include <linux/unaligned.h> + +#include "vsc-tp.h" + +#define VSC_MAGIC_NUM 0x49505343 /* IPSC */ +#define VSC_MAGIC_FW 0x49574653 /* IWFS */ +#define VSC_MAGIC_FILE 0x46564353 /* FVCS */ + +#define VSC_ADDR_BASE 0xE0030000 +#define VSC_EFUSE_ADDR (VSC_ADDR_BASE + 0x038) +#define VSC_STRAP_ADDR (VSC_ADDR_BASE + 0x100) + +#define VSC_MAINSTEPPING_VERSION_MASK GENMASK(7, 4) +#define VSC_MAINSTEPPING_VERSION_A 0 + +#define VSC_SUBSTEPPING_VERSION_MASK GENMASK(3, 0) +#define VSC_SUBSTEPPING_VERSION_0 0 +#define VSC_SUBSTEPPING_VERSION_1 2 + +#define VSC_BOOT_IMG_OPTION_MASK GENMASK(15, 0) + +#define VSC_SKU_CFG_LOCATION 0x5001A000 +#define VSC_SKU_MAX_SIZE 4100u + +#define VSC_ACE_IMG_CNT 2 +#define VSC_CSI_IMG_CNT 4 +#define VSC_IMG_CNT_MAX 6 + +#define VSC_ROM_PKG_SIZE 256u +#define VSC_FW_PKG_SIZE 512u + +#define VSC_IMAGE_DIR "intel/vsc/" + +#define VSC_CSI_IMAGE_NAME VSC_IMAGE_DIR "ivsc_fw.bin" +#define VSC_ACE_IMAGE_NAME_FMT VSC_IMAGE_DIR "ivsc_pkg_%s_0.bin" +#define VSC_CFG_IMAGE_NAME_FMT VSC_IMAGE_DIR "ivsc_skucfg_%s_0_1.bin" + +#define VSC_IMAGE_PATH_MAX_LEN 64 + +#define VSC_SENSOR_NAME_MAX_LEN 16 + +/* command id */ +enum { + VSC_CMD_QUERY = 0, + VSC_CMD_DL_SET = 1, + VSC_CMD_DL_START = 2, + VSC_CMD_DL_CONT = 3, + VSC_CMD_DUMP_MEM = 4, + VSC_CMD_GET_CONT = 8, + VSC_CMD_CAM_BOOT = 10, +}; + +/* command ack token */ +enum { + VSC_TOKEN_BOOTLOADER_REQ = 1, + VSC_TOKEN_DUMP_RESP = 4, + VSC_TOKEN_ERROR = 7, +}; + +/* image type */ +enum { + VSC_IMG_BOOTLOADER_TYPE = 1, + VSC_IMG_CSI_EM7D_TYPE, + VSC_IMG_CSI_SEM_TYPE, + VSC_IMG_CSI_RUNTIME_TYPE, + VSC_IMG_ACE_VISION_TYPE, + VSC_IMG_ACE_CFG_TYPE, + VSC_IMG_SKU_CFG_TYPE, +}; + +/* image fragments */ +enum { + VSC_IMG_BOOTLOADER_FRAG, + VSC_IMG_CSI_SEM_FRAG, + VSC_IMG_CSI_RUNTIME_FRAG, + VSC_IMG_ACE_VISION_FRAG, + VSC_IMG_ACE_CFG_FRAG, + VSC_IMG_CSI_EM7D_FRAG, + VSC_IMG_SKU_CFG_FRAG, + VSC_IMG_FRAG_MAX +}; + +struct vsc_rom_cmd { + __le32 magic; + __u8 cmd_id; + union { + /* download start */ + struct { + __u8 img_type; + __le16 option; + __le32 img_len; + __le32 img_loc; + __le32 crc; + DECLARE_FLEX_ARRAY(__u8, res); + } __packed dl_start; + /* download set */ + struct { + __u8 option; + __le16 img_cnt; + DECLARE_FLEX_ARRAY(__le32, payload); + } __packed dl_set; + /* download continue */ + struct { + __u8 end_flag; + __le16 len; + /* 8 is the offset of payload */ + __u8 payload[VSC_ROM_PKG_SIZE - 8]; + } __packed dl_cont; + /* dump memory */ + struct { + __u8 res; + __le16 len; + __le32 addr; + DECLARE_FLEX_ARRAY(__u8, payload); + } __packed dump_mem; + /* 5 is the offset of padding */ + __u8 padding[VSC_ROM_PKG_SIZE - 5]; + } data; +}; + +struct vsc_rom_cmd_ack { + __le32 magic; + __u8 token; + __u8 type; + __u8 res[2]; + __u8 payload[]; +}; + +struct vsc_fw_cmd { + __le32 magic; + __u8 cmd_id; + union { + struct { + __le16 option; + __u8 img_type; + __le32 img_len; + __le32 img_loc; + __le32 crc; + DECLARE_FLEX_ARRAY(__u8, res); + } __packed dl_start; + struct { + __le16 option; + __u8 img_cnt; + DECLARE_FLEX_ARRAY(__le32, payload); + } __packed dl_set; + struct { + __le32 addr; + __u8 len; + DECLARE_FLEX_ARRAY(__u8, payload); + } __packed dump_mem; + struct { + __u8 resv[3]; + __le32 crc; + DECLARE_FLEX_ARRAY(__u8, payload); + } __packed boot; + /* 5 is the offset of padding */ + __u8 padding[VSC_FW_PKG_SIZE - 5]; + } data; +}; + +struct vsc_img { + __le32 magic; + __le32 option; + __le32 image_count; + __le32 image_location[VSC_IMG_CNT_MAX]; +}; + +struct vsc_fw_sign { + __le32 magic; + __le32 image_size; + __u8 image[]; +}; + +struct vsc_image_code_data { + /* fragment index */ + u8 frag_index; + /* image type */ + u8 image_type; +}; + +struct vsc_img_frag { + u8 type; + u32 location; + const u8 *data; + u32 size; +}; + +/** + * struct vsc_fw_loader - represent vsc firmware loader + * @dev: device used to request firmware + * @tp: transport layer used with the firmware loader + * @csi: CSI image + * @ace: ACE image + * @cfg: config image + * @tx_buf: tx buffer + * @rx_buf: rx buffer + * @option: command option + * @count: total image count + * @sensor_name: camera sensor name + * @frags: image fragments + */ +struct vsc_fw_loader { + struct device *dev; + struct vsc_tp *tp; + + const struct firmware *csi; + const struct firmware *ace; + const struct firmware *cfg; + + void *tx_buf; + void *rx_buf; + + u16 option; + u16 count; + + char sensor_name[VSC_SENSOR_NAME_MAX_LEN]; + + struct vsc_img_frag frags[VSC_IMG_FRAG_MAX]; +}; + +static inline u32 vsc_sum_crc(void *data, size_t size) +{ + u32 crc = 0; + size_t i; + + for (i = 0; i < size; i++) + crc += *((u8 *)data + i); + + return crc; +} + +/* get sensor name to construct image name */ +static int vsc_get_sensor_name(struct vsc_fw_loader *fw_loader, + struct device *dev) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER }; + union acpi_object obj = { + .integer.type = ACPI_TYPE_INTEGER, + .integer.value = 1, + }; + struct acpi_object_list arg_list = { + .count = 1, + .pointer = &obj, + }; + union acpi_object *ret_obj; + acpi_handle handle; + acpi_status status; + int ret = 0; + + handle = ACPI_HANDLE(dev); + if (!handle) + return -EINVAL; + + status = acpi_evaluate_object(handle, "SID", &arg_list, &buffer); + if (ACPI_FAILURE(status)) { + dev_err(dev, "can't evaluate SID method: %d\n", status); + return -ENODEV; + } + + ret_obj = buffer.pointer; + if (!ret_obj) { + dev_err(dev, "can't locate ACPI buffer\n"); + return -ENODEV; + } + + if (ret_obj->type != ACPI_TYPE_STRING) { + dev_err(dev, "found non-string entry\n"); + ret = -ENODEV; + goto out_free_buff; + } + + /* string length excludes trailing NUL */ + if (ret_obj->string.length >= sizeof(fw_loader->sensor_name)) { + dev_err(dev, "sensor name buffer too small\n"); + ret = -EINVAL; + goto out_free_buff; + } + + memcpy(fw_loader->sensor_name, ret_obj->string.pointer, + ret_obj->string.length); + + string_lower(fw_loader->sensor_name, fw_loader->sensor_name); + +out_free_buff: + ACPI_FREE(buffer.pointer); + + return ret; +} + +static int vsc_identify_silicon(struct vsc_fw_loader *fw_loader) +{ + struct vsc_rom_cmd_ack *ack = fw_loader->rx_buf; + struct vsc_rom_cmd *cmd = fw_loader->tx_buf; + u8 version, sub_version; + int ret; + + /* identify stepping information */ + cmd->magic = cpu_to_le32(VSC_MAGIC_NUM); + cmd->cmd_id = VSC_CMD_DUMP_MEM; + cmd->data.dump_mem.addr = cpu_to_le32(VSC_EFUSE_ADDR); + cmd->data.dump_mem.len = cpu_to_le16(sizeof(__le32)); + ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE); + if (ret || ack->token == VSC_TOKEN_ERROR) { + dev_err(fw_loader->dev, "CMD_DUMP_MEM error %d token %d\n", ret, ack->token); + return ret ?: -EINVAL; + } + + cmd->magic = cpu_to_le32(VSC_MAGIC_NUM); + cmd->cmd_id = VSC_CMD_GET_CONT; + ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE); + if (ret || ack->token != VSC_TOKEN_DUMP_RESP) { + dev_err(fw_loader->dev, "CMD_GETCONT error %d token %d\n", ret, ack->token); + return ret ?: -EINVAL; + } + + version = FIELD_GET(VSC_MAINSTEPPING_VERSION_MASK, ack->payload[0]); + sub_version = FIELD_GET(VSC_SUBSTEPPING_VERSION_MASK, ack->payload[0]); + + if (version != VSC_MAINSTEPPING_VERSION_A) { + dev_err(fw_loader->dev, "mainstepping mismatch expected %d got %d\n", + VSC_MAINSTEPPING_VERSION_A, version); + return -EINVAL; + } + + if (sub_version != VSC_SUBSTEPPING_VERSION_0 && + sub_version != VSC_SUBSTEPPING_VERSION_1) { + dev_err(fw_loader->dev, "substepping %d is out of supported range %d - %d\n", + sub_version, VSC_SUBSTEPPING_VERSION_0, VSC_SUBSTEPPING_VERSION_1); + return -EINVAL; + } + + dev_info(fw_loader->dev, "silicon stepping version is %u:%u\n", + version, sub_version); + + /* identify strap information */ + cmd->magic = cpu_to_le32(VSC_MAGIC_NUM); + cmd->cmd_id = VSC_CMD_DUMP_MEM; + cmd->data.dump_mem.addr = cpu_to_le32(VSC_STRAP_ADDR); + cmd->data.dump_mem.len = cpu_to_le16(sizeof(__le32)); + ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE); + if (ret) + return ret; + if (ack->token == VSC_TOKEN_ERROR) + return -EINVAL; + + cmd->magic = cpu_to_le32(VSC_MAGIC_NUM); + cmd->cmd_id = VSC_CMD_GET_CONT; + ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE); + if (ret) + return ret; + if (ack->token != VSC_TOKEN_DUMP_RESP) + return -EINVAL; + + return 0; +} + +static int vsc_identify_csi_image(struct vsc_fw_loader *fw_loader) +{ + const struct firmware *image; + struct vsc_fw_sign *sign; + struct vsc_img *img; + unsigned int i; + int ret; + + ret = request_firmware(&image, VSC_CSI_IMAGE_NAME, fw_loader->dev); + if (ret) + return ret; + + img = (struct vsc_img *)image->data; + if (!img) { + ret = -ENOENT; + goto err_release_image; + } + + if (le32_to_cpu(img->magic) != VSC_MAGIC_FILE) { + ret = -EINVAL; + goto err_release_image; + } + + if (le32_to_cpu(img->image_count) != VSC_CSI_IMG_CNT) { + ret = -EINVAL; + goto err_release_image; + } + fw_loader->count += le32_to_cpu(img->image_count) - 1; + + fw_loader->option = + FIELD_GET(VSC_BOOT_IMG_OPTION_MASK, le32_to_cpu(img->option)); + + sign = (struct vsc_fw_sign *) + (img->image_location + le32_to_cpu(img->image_count)); + + for (i = 0; i < VSC_CSI_IMG_CNT; i++) { + /* mapping from CSI image index to image code data */ + static const struct vsc_image_code_data csi_image_map[] = { + { VSC_IMG_BOOTLOADER_FRAG, VSC_IMG_BOOTLOADER_TYPE }, + { VSC_IMG_CSI_SEM_FRAG, VSC_IMG_CSI_SEM_TYPE }, + { VSC_IMG_CSI_RUNTIME_FRAG, VSC_IMG_CSI_RUNTIME_TYPE }, + { VSC_IMG_CSI_EM7D_FRAG, VSC_IMG_CSI_EM7D_TYPE }, + }; + struct vsc_img_frag *frag; + + if ((u8 *)sign + sizeof(*sign) > image->data + image->size) { + ret = -EINVAL; + goto err_release_image; + } + + if (le32_to_cpu(sign->magic) != VSC_MAGIC_FW) { + ret = -EINVAL; + goto err_release_image; + } + + if (!le32_to_cpu(img->image_location[i])) { + ret = -EINVAL; + goto err_release_image; + } + + frag = &fw_loader->frags[csi_image_map[i].frag_index]; + + frag->data = sign->image; + frag->size = le32_to_cpu(sign->image_size); + frag->location = le32_to_cpu(img->image_location[i]); + frag->type = csi_image_map[i].image_type; + + sign = (struct vsc_fw_sign *) + (sign->image + le32_to_cpu(sign->image_size)); + } + + fw_loader->csi = image; + + return 0; + +err_release_image: + release_firmware(image); + + return ret; +} + +static int vsc_identify_ace_image(struct vsc_fw_loader *fw_loader) +{ + char path[VSC_IMAGE_PATH_MAX_LEN]; + const struct firmware *image; + struct vsc_fw_sign *sign; + struct vsc_img *img; + unsigned int i; + int ret; + + snprintf(path, sizeof(path), VSC_ACE_IMAGE_NAME_FMT, + fw_loader->sensor_name); + + ret = request_firmware(&image, path, fw_loader->dev); + if (ret) + return ret; + + img = (struct vsc_img *)image->data; + if (!img) { + ret = -ENOENT; + goto err_release_image; + } + + if (le32_to_cpu(img->magic) != VSC_MAGIC_FILE) { + ret = -EINVAL; + goto err_release_image; + } + + if (le32_to_cpu(img->image_count) != VSC_ACE_IMG_CNT) { + ret = -EINVAL; + goto err_release_image; + } + fw_loader->count += le32_to_cpu(img->image_count); + + sign = (struct vsc_fw_sign *) + (img->image_location + le32_to_cpu(img->image_count)); + + for (i = 0; i < VSC_ACE_IMG_CNT; i++) { + /* mapping from ACE image index to image code data */ + static const struct vsc_image_code_data ace_image_map[] = { + { VSC_IMG_ACE_VISION_FRAG, VSC_IMG_ACE_VISION_TYPE }, + { VSC_IMG_ACE_CFG_FRAG, VSC_IMG_ACE_CFG_TYPE }, + }; + struct vsc_img_frag *frag, *last_frag; + u8 frag_index; + + if ((u8 *)sign + sizeof(*sign) > image->data + image->size) { + ret = -EINVAL; + goto err_release_image; + } + + if (le32_to_cpu(sign->magic) != VSC_MAGIC_FW) { + ret = -EINVAL; + goto err_release_image; + } + + frag_index = ace_image_map[i].frag_index; + frag = &fw_loader->frags[frag_index]; + + frag->data = sign->image; + frag->size = le32_to_cpu(sign->image_size); + frag->location = le32_to_cpu(img->image_location[i]); + frag->type = ace_image_map[i].image_type; + + if (!frag->location) { + last_frag = &fw_loader->frags[frag_index - 1]; + frag->location = + ALIGN(last_frag->location + last_frag->size, SZ_4K); + } + + sign = (struct vsc_fw_sign *) + (sign->image + le32_to_cpu(sign->image_size)); + } + + fw_loader->ace = image; + + return 0; + +err_release_image: + release_firmware(image); + + return ret; +} + +static int vsc_identify_cfg_image(struct vsc_fw_loader *fw_loader) +{ + struct vsc_img_frag *frag = &fw_loader->frags[VSC_IMG_SKU_CFG_FRAG]; + char path[VSC_IMAGE_PATH_MAX_LEN]; + const struct firmware *image; + u32 size; + int ret; + + snprintf(path, sizeof(path), VSC_CFG_IMAGE_NAME_FMT, + fw_loader->sensor_name); + + ret = request_firmware(&image, path, fw_loader->dev); + if (ret) + return ret; + + /* identify image size */ + if (image->size <= sizeof(u32) || image->size > VSC_SKU_MAX_SIZE) { + ret = -EINVAL; + goto err_release_image; + } + + size = le32_to_cpu(*((__le32 *)image->data)) + sizeof(u32); + if (image->size != size) { + ret = -EINVAL; + goto err_release_image; + } + + frag->data = image->data; + frag->size = image->size; + frag->type = VSC_IMG_SKU_CFG_TYPE; + frag->location = VSC_SKU_CFG_LOCATION; + + fw_loader->cfg = image; + + return 0; + +err_release_image: + release_firmware(image); + + return ret; +} + +static int vsc_download_bootloader(struct vsc_fw_loader *fw_loader) +{ + struct vsc_img_frag *frag = &fw_loader->frags[VSC_IMG_BOOTLOADER_FRAG]; + struct vsc_rom_cmd_ack *ack = fw_loader->rx_buf; + struct vsc_rom_cmd *cmd = fw_loader->tx_buf; + u32 len, c_len; + size_t remain; + const u8 *p; + int ret; + + cmd->magic = cpu_to_le32(VSC_MAGIC_NUM); + cmd->cmd_id = VSC_CMD_QUERY; + ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE); + if (ret) + return ret; + if (ack->token != VSC_TOKEN_DUMP_RESP && + ack->token != VSC_TOKEN_BOOTLOADER_REQ) + return -EINVAL; + + cmd->magic = cpu_to_le32(VSC_MAGIC_NUM); + cmd->cmd_id = VSC_CMD_DL_START; + cmd->data.dl_start.option = cpu_to_le16(fw_loader->option); + cmd->data.dl_start.img_type = frag->type; + cmd->data.dl_start.img_len = cpu_to_le32(frag->size); + cmd->data.dl_start.img_loc = cpu_to_le32(frag->location); + + c_len = offsetof(struct vsc_rom_cmd, data.dl_start.crc); + cmd->data.dl_start.crc = cpu_to_le32(vsc_sum_crc(cmd, c_len)); + + ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_ROM_PKG_SIZE); + if (ret) + return ret; + + p = frag->data; + remain = frag->size; + + /* download image data */ + while (remain > 0) { + len = min(remain, sizeof(cmd->data.dl_cont.payload)); + + cmd->magic = cpu_to_le32(VSC_MAGIC_NUM); + cmd->cmd_id = VSC_CMD_DL_CONT; + cmd->data.dl_cont.len = cpu_to_le16(len); + cmd->data.dl_cont.end_flag = remain == len; + memcpy(cmd->data.dl_cont.payload, p, len); + + ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_ROM_PKG_SIZE); + if (ret) + return ret; + + p += len; + remain -= len; + } + + return 0; +} + +static int vsc_download_firmware(struct vsc_fw_loader *fw_loader) +{ + struct vsc_fw_cmd *cmd = fw_loader->tx_buf; + unsigned int i, index = 0; + u32 c_len; + int ret; + + cmd->magic = cpu_to_le32(VSC_MAGIC_NUM); + cmd->cmd_id = VSC_CMD_DL_SET; + cmd->data.dl_set.img_cnt = cpu_to_le16(fw_loader->count); + put_unaligned_le16(fw_loader->option, &cmd->data.dl_set.option); + + for (i = VSC_IMG_CSI_SEM_FRAG; i <= VSC_IMG_CSI_EM7D_FRAG; i++) { + struct vsc_img_frag *frag = &fw_loader->frags[i]; + + cmd->data.dl_set.payload[index++] = cpu_to_le32(frag->location); + cmd->data.dl_set.payload[index++] = cpu_to_le32(frag->size); + } + + c_len = offsetof(struct vsc_fw_cmd, data.dl_set.payload[index]); + cmd->data.dl_set.payload[index] = cpu_to_le32(vsc_sum_crc(cmd, c_len)); + + ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_FW_PKG_SIZE); + if (ret) + return ret; + + for (i = VSC_IMG_CSI_SEM_FRAG; i < VSC_IMG_FRAG_MAX; i++) { + struct vsc_img_frag *frag = &fw_loader->frags[i]; + const u8 *p; + u32 remain; + + cmd->magic = cpu_to_le32(VSC_MAGIC_NUM); + cmd->cmd_id = VSC_CMD_DL_START; + cmd->data.dl_start.img_type = frag->type; + cmd->data.dl_start.img_len = cpu_to_le32(frag->size); + cmd->data.dl_start.img_loc = cpu_to_le32(frag->location); + put_unaligned_le16(fw_loader->option, &cmd->data.dl_start.option); + + c_len = offsetof(struct vsc_fw_cmd, data.dl_start.crc); + cmd->data.dl_start.crc = cpu_to_le32(vsc_sum_crc(cmd, c_len)); + + ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_FW_PKG_SIZE); + if (ret) + return ret; + + p = frag->data; + remain = frag->size; + + /* download image data */ + while (remain > 0) { + u32 len = min(remain, VSC_FW_PKG_SIZE); + + memcpy(fw_loader->tx_buf, p, len); + memset(fw_loader->tx_buf + len, 0, VSC_FW_PKG_SIZE - len); + + ret = vsc_tp_rom_xfer(fw_loader->tp, fw_loader->tx_buf, + NULL, VSC_FW_PKG_SIZE); + if (ret) + break; + + p += len; + remain -= len; + } + } + + cmd->magic = cpu_to_le32(VSC_MAGIC_NUM); + cmd->cmd_id = VSC_CMD_CAM_BOOT; + + c_len = offsetof(struct vsc_fw_cmd, data.dl_start.crc); + cmd->data.boot.crc = cpu_to_le32(vsc_sum_crc(cmd, c_len)); + + return vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_FW_PKG_SIZE); +} + +/** + * vsc_tp_init - init vsc_tp + * @tp: vsc_tp device handle + * @dev: device node for mei vsc device + * Return: 0 in case of success, negative value in case of error + */ +int vsc_tp_init(struct vsc_tp *tp, struct device *dev) +{ + struct vsc_fw_loader *fw_loader __free(kfree) = NULL; + void *tx_buf __free(kfree) = NULL; + void *rx_buf __free(kfree) = NULL; + int ret; + + fw_loader = kzalloc(sizeof(*fw_loader), GFP_KERNEL); + if (!fw_loader) + return -ENOMEM; + + tx_buf = kzalloc(VSC_FW_PKG_SIZE, GFP_KERNEL); + if (!tx_buf) + return -ENOMEM; + + rx_buf = kzalloc(VSC_FW_PKG_SIZE, GFP_KERNEL); + if (!rx_buf) + return -ENOMEM; + + fw_loader->tx_buf = tx_buf; + fw_loader->rx_buf = rx_buf; + + fw_loader->tp = tp; + fw_loader->dev = dev; + + ret = vsc_get_sensor_name(fw_loader, dev); + if (ret) + return ret; + + ret = vsc_identify_silicon(fw_loader); + if (ret) + return ret; + + ret = vsc_identify_csi_image(fw_loader); + if (ret) + return ret; + + ret = vsc_identify_ace_image(fw_loader); + if (ret) + goto err_release_csi; + + ret = vsc_identify_cfg_image(fw_loader); + if (ret) + goto err_release_ace; + + ret = vsc_download_bootloader(fw_loader); + if (!ret) + ret = vsc_download_firmware(fw_loader); + + release_firmware(fw_loader->cfg); + +err_release_ace: + release_firmware(fw_loader->ace); + +err_release_csi: + release_firmware(fw_loader->csi); + + return ret; +} +EXPORT_SYMBOL_NS_GPL(vsc_tp_init, "VSC_TP"); diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c new file mode 100644 index 000000000000..5ecf99883996 --- /dev/null +++ b/drivers/misc/mei/vsc-tp.c @@ -0,0 +1,577 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023, Intel Corporation. + * Intel Visual Sensing Controller Transport Layer Linux driver + */ + +#include <linux/acpi.h> +#include <linux/cleanup.h> +#include <linux/crc32.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/iopoll.h> +#include <linux/irq.h> +#include <linux/irqreturn.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +#include "vsc-tp.h" + +#define VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS 20 +#define VSC_TP_ROM_BOOTUP_DELAY_MS 10 +#define VSC_TP_ROM_XFER_POLL_TIMEOUT_US (500 * USEC_PER_MSEC) +#define VSC_TP_ROM_XFER_POLL_DELAY_US (20 * USEC_PER_MSEC) +#define VSC_TP_WAIT_FW_POLL_TIMEOUT (2 * HZ) +#define VSC_TP_WAIT_FW_POLL_DELAY_US (20 * USEC_PER_MSEC) +#define VSC_TP_MAX_XFER_COUNT 5 + +#define VSC_TP_PACKET_SYNC 0x31 +#define VSC_TP_CRC_SIZE sizeof(u32) +#define VSC_TP_MAX_MSG_SIZE 2048 +/* SPI xfer timeout size */ +#define VSC_TP_XFER_TIMEOUT_BYTES 700 +#define VSC_TP_PACKET_PADDING_SIZE 1 +#define VSC_TP_PACKET_SIZE(pkt) \ + (sizeof(struct vsc_tp_packet_hdr) + le16_to_cpu((pkt)->hdr.len) + VSC_TP_CRC_SIZE) +#define VSC_TP_MAX_PACKET_SIZE \ + (sizeof(struct vsc_tp_packet_hdr) + VSC_TP_MAX_MSG_SIZE + VSC_TP_CRC_SIZE) +#define VSC_TP_MAX_XFER_SIZE \ + (VSC_TP_MAX_PACKET_SIZE + VSC_TP_XFER_TIMEOUT_BYTES) +#define VSC_TP_NEXT_XFER_LEN(len, offset) \ + (len + sizeof(struct vsc_tp_packet_hdr) + VSC_TP_CRC_SIZE - offset + VSC_TP_PACKET_PADDING_SIZE) + +struct vsc_tp_packet_hdr { + __u8 sync; + __u8 cmd; + __le16 len; + __le32 seq; +}; + +struct vsc_tp_packet { + struct vsc_tp_packet_hdr hdr; + __u8 buf[VSC_TP_MAX_XFER_SIZE - sizeof(struct vsc_tp_packet_hdr)]; +}; + +struct vsc_tp { + /* do the actual data transfer */ + struct spi_device *spi; + + /* bind with mei framework */ + struct platform_device *pdev; + + struct gpio_desc *wakeuphost; + struct gpio_desc *resetfw; + struct gpio_desc *wakeupfw; + + /* command sequence number */ + u32 seq; + + /* command buffer */ + struct vsc_tp_packet *tx_buf; + struct vsc_tp_packet *rx_buf; + + atomic_t assert_cnt; + wait_queue_head_t xfer_wait; + struct work_struct event_work; + + vsc_tp_event_cb_t event_notify; + void *event_notify_context; + struct mutex event_notify_mutex; /* protects event_notify + context */ + struct mutex mutex; /* protects command download */ +}; + +/* GPIO resources */ +static const struct acpi_gpio_params wakeuphost_gpio = { 0, 0, false }; +static const struct acpi_gpio_params wakeuphostint_gpio = { 1, 0, false }; +static const struct acpi_gpio_params resetfw_gpio = { 2, 0, false }; +static const struct acpi_gpio_params wakeupfw = { 3, 0, false }; + +static const struct acpi_gpio_mapping vsc_tp_acpi_gpios[] = { + { "wakeuphost-gpios", &wakeuphost_gpio, 1 }, + { "wakeuphostint-gpios", &wakeuphostint_gpio, 1 }, + { "resetfw-gpios", &resetfw_gpio, 1 }, + { "wakeupfw-gpios", &wakeupfw, 1 }, + {} +}; + +static irqreturn_t vsc_tp_isr(int irq, void *data) +{ + struct vsc_tp *tp = data; + + atomic_inc(&tp->assert_cnt); + + wake_up(&tp->xfer_wait); + + schedule_work(&tp->event_work); + + return IRQ_HANDLED; +} + +static void vsc_tp_event_work(struct work_struct *work) +{ + struct vsc_tp *tp = container_of(work, struct vsc_tp, event_work); + + guard(mutex)(&tp->event_notify_mutex); + + if (tp->event_notify) + tp->event_notify(tp->event_notify_context); +} + +/* wakeup firmware and wait for response */ +static int vsc_tp_wakeup_request(struct vsc_tp *tp) +{ + int ret; + + gpiod_set_value_cansleep(tp->wakeupfw, 0); + + ret = wait_event_timeout(tp->xfer_wait, + atomic_read(&tp->assert_cnt), + VSC_TP_WAIT_FW_POLL_TIMEOUT); + if (!ret) + return -ETIMEDOUT; + + return read_poll_timeout(gpiod_get_value_cansleep, ret, ret, + VSC_TP_WAIT_FW_POLL_DELAY_US, + VSC_TP_WAIT_FW_POLL_TIMEOUT, false, + tp->wakeuphost); +} + +static void vsc_tp_wakeup_release(struct vsc_tp *tp) +{ + atomic_dec_if_positive(&tp->assert_cnt); + + gpiod_set_value_cansleep(tp->wakeupfw, 1); +} + +static int vsc_tp_dev_xfer(struct vsc_tp *tp, void *obuf, void *ibuf, size_t len) +{ + struct spi_message msg = { 0 }; + struct spi_transfer xfer = { + .tx_buf = obuf, + .rx_buf = ibuf, + .len = len, + }; + + spi_message_init_with_transfers(&msg, &xfer, 1); + + return spi_sync_locked(tp->spi, &msg); +} + +static int vsc_tp_xfer_helper(struct vsc_tp *tp, struct vsc_tp_packet *pkt, + void *ibuf, u16 ilen) +{ + int ret, offset = 0, cpy_len, src_len, dst_len = sizeof(struct vsc_tp_packet_hdr); + int next_xfer_len = VSC_TP_PACKET_SIZE(pkt) + VSC_TP_XFER_TIMEOUT_BYTES; + u8 *src, *crc_src, *rx_buf = (u8 *)tp->rx_buf; + int count_down = VSC_TP_MAX_XFER_COUNT; + u32 recv_crc = 0, crc = ~0; + struct vsc_tp_packet_hdr ack; + u8 *dst = (u8 *)&ack; + bool synced = false; + + do { + ret = vsc_tp_dev_xfer(tp, pkt, rx_buf, next_xfer_len); + if (ret) + return ret; + memset(pkt, 0, VSC_TP_MAX_XFER_SIZE); + + if (synced) { + src = rx_buf; + src_len = next_xfer_len; + } else { + src = memchr(rx_buf, VSC_TP_PACKET_SYNC, next_xfer_len); + if (!src) + continue; + synced = true; + src_len = next_xfer_len - (src - rx_buf); + } + + /* traverse received data */ + while (src_len > 0) { + cpy_len = min(src_len, dst_len); + memcpy(dst, src, cpy_len); + crc_src = src; + src += cpy_len; + src_len -= cpy_len; + dst += cpy_len; + dst_len -= cpy_len; + + if (offset < sizeof(ack)) { + offset += cpy_len; + crc = crc32(crc, crc_src, cpy_len); + + if (!src_len) + continue; + + if (le16_to_cpu(ack.len)) { + dst = ibuf; + dst_len = min(ilen, le16_to_cpu(ack.len)); + } else { + dst = (u8 *)&recv_crc; + dst_len = sizeof(recv_crc); + } + } else if (offset < sizeof(ack) + le16_to_cpu(ack.len)) { + offset += cpy_len; + crc = crc32(crc, crc_src, cpy_len); + + if (src_len) { + int remain = sizeof(ack) + le16_to_cpu(ack.len) - offset; + + cpy_len = min(src_len, remain); + offset += cpy_len; + crc = crc32(crc, src, cpy_len); + src += cpy_len; + src_len -= cpy_len; + if (src_len) { + dst = (u8 *)&recv_crc; + dst_len = sizeof(recv_crc); + continue; + } + } + next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset); + } else if (offset < sizeof(ack) + le16_to_cpu(ack.len) + VSC_TP_CRC_SIZE) { + offset += cpy_len; + + if (src_len) { + /* terminate the traverse */ + next_xfer_len = 0; + break; + } + next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset); + } + } + } while (next_xfer_len > 0 && --count_down); + + if (next_xfer_len > 0) + return -EAGAIN; + + if (~recv_crc != crc || le32_to_cpu(ack.seq) != tp->seq) { + dev_err(&tp->spi->dev, "recv crc or seq error\n"); + return -EINVAL; + } + + if (ack.cmd == VSC_TP_CMD_ACK || ack.cmd == VSC_TP_CMD_NACK || + ack.cmd == VSC_TP_CMD_BUSY) { + dev_err(&tp->spi->dev, "recv cmd ack error\n"); + return -EAGAIN; + } + + return min(le16_to_cpu(ack.len), ilen); +} + +/** + * vsc_tp_xfer - transfer data to firmware + * @tp: vsc_tp device handle + * @cmd: the command to be sent to the device + * @obuf: the tx buffer to be sent to the device + * @olen: the length of tx buffer + * @ibuf: the rx buffer to receive from the device + * @ilen: the length of rx buffer + * Return: the length of received data in case of success, + * otherwise negative value + */ +int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen, + void *ibuf, size_t ilen) +{ + struct vsc_tp_packet *pkt = tp->tx_buf; + u32 crc; + int ret; + + if (!obuf || !ibuf || olen > VSC_TP_MAX_MSG_SIZE) + return -EINVAL; + + guard(mutex)(&tp->mutex); + + pkt->hdr.sync = VSC_TP_PACKET_SYNC; + pkt->hdr.cmd = cmd; + pkt->hdr.len = cpu_to_le16(olen); + pkt->hdr.seq = cpu_to_le32(++tp->seq); + memcpy(pkt->buf, obuf, olen); + + crc = ~crc32(~0, (u8 *)pkt, sizeof(pkt) + olen); + memcpy(pkt->buf + olen, &crc, sizeof(crc)); + + ret = vsc_tp_wakeup_request(tp); + if (unlikely(ret)) + dev_err(&tp->spi->dev, "wakeup firmware failed ret: %d\n", ret); + else + ret = vsc_tp_xfer_helper(tp, pkt, ibuf, ilen); + + vsc_tp_wakeup_release(tp); + + return ret; +} +EXPORT_SYMBOL_NS_GPL(vsc_tp_xfer, "VSC_TP"); + +/** + * vsc_tp_rom_xfer - transfer data to rom code + * @tp: vsc_tp device handle + * @obuf: the data buffer to be sent to the device + * @ibuf: the buffer to receive data from the device + * @len: the length of tx buffer and rx buffer + * Return: 0 in case of success, negative value in case of error + */ +int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len) +{ + size_t words = len / sizeof(__be32); + int ret; + + if (len % sizeof(__be32) || len > VSC_TP_MAX_MSG_SIZE) + return -EINVAL; + + guard(mutex)(&tp->mutex); + + /* rom xfer is big endian */ + cpu_to_be32_array((__be32 *)tp->tx_buf, obuf, words); + + ret = read_poll_timeout(gpiod_get_value_cansleep, ret, + !ret, VSC_TP_ROM_XFER_POLL_DELAY_US, + VSC_TP_ROM_XFER_POLL_TIMEOUT_US, false, + tp->wakeuphost); + if (ret) { + dev_err(&tp->spi->dev, "wait rom failed ret: %d\n", ret); + return ret; + } + + ret = vsc_tp_dev_xfer(tp, tp->tx_buf, ibuf ? tp->rx_buf : NULL, len); + if (ret) + return ret; + + if (ibuf) + be32_to_cpu_array(ibuf, (__be32 *)tp->rx_buf, words); + + return ret; +} + +/** + * vsc_tp_reset - reset vsc transport layer + * @tp: vsc_tp device handle + */ +void vsc_tp_reset(struct vsc_tp *tp) +{ + disable_irq(tp->spi->irq); + + /* toggle reset pin */ + gpiod_set_value_cansleep(tp->resetfw, 0); + msleep(VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS); + gpiod_set_value_cansleep(tp->resetfw, 1); + + /* wait for ROM */ + msleep(VSC_TP_ROM_BOOTUP_DELAY_MS); + + /* + * Set default host wakeup pin to non-active + * to avoid unexpected host irq interrupt. + */ + gpiod_set_value_cansleep(tp->wakeupfw, 1); + + atomic_set(&tp->assert_cnt, 0); +} +EXPORT_SYMBOL_NS_GPL(vsc_tp_reset, "VSC_TP"); + +/** + * vsc_tp_need_read - check if device has data to sent + * @tp: vsc_tp device handle + * Return: true if device has data to sent, otherwise false + */ +bool vsc_tp_need_read(struct vsc_tp *tp) +{ + if (!atomic_read(&tp->assert_cnt)) + return false; + if (!gpiod_get_value_cansleep(tp->wakeuphost)) + return false; + if (!gpiod_get_value_cansleep(tp->wakeupfw)) + return false; + + return true; +} +EXPORT_SYMBOL_NS_GPL(vsc_tp_need_read, "VSC_TP"); + +/** + * vsc_tp_register_event_cb - register a callback function to receive event + * @tp: vsc_tp device handle + * @event_cb: callback function + * @context: execution context of event callback + * Return: 0 in case of success, negative value in case of error + */ +int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb, + void *context) +{ + guard(mutex)(&tp->event_notify_mutex); + + tp->event_notify = event_cb; + tp->event_notify_context = context; + + return 0; +} +EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, "VSC_TP"); + +/** + * vsc_tp_intr_synchronize - synchronize vsc_tp interrupt + * @tp: vsc_tp device handle + */ +void vsc_tp_intr_synchronize(struct vsc_tp *tp) +{ + synchronize_irq(tp->spi->irq); +} +EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_synchronize, "VSC_TP"); + +/** + * vsc_tp_intr_enable - enable vsc_tp interrupt + * @tp: vsc_tp device handle + */ +void vsc_tp_intr_enable(struct vsc_tp *tp) +{ + enable_irq(tp->spi->irq); +} +EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_enable, "VSC_TP"); + +/** + * vsc_tp_intr_disable - disable vsc_tp interrupt + * @tp: vsc_tp device handle + */ +void vsc_tp_intr_disable(struct vsc_tp *tp) +{ + disable_irq(tp->spi->irq); +} +EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, "VSC_TP"); + +static int vsc_tp_match_any(struct acpi_device *adev, void *data) +{ + struct acpi_device **__adev = data; + + *__adev = adev; + + return 1; +} + +static int vsc_tp_probe(struct spi_device *spi) +{ + struct vsc_tp *tp; + struct platform_device_info pinfo = { + .name = "intel_vsc", + .data = &tp, + .size_data = sizeof(tp), + .id = PLATFORM_DEVID_NONE, + }; + struct device *dev = &spi->dev; + struct platform_device *pdev; + struct acpi_device *adev; + int ret; + + tp = devm_kzalloc(dev, sizeof(*tp), GFP_KERNEL); + if (!tp) + return -ENOMEM; + + tp->tx_buf = devm_kzalloc(dev, sizeof(*tp->tx_buf), GFP_KERNEL); + if (!tp->tx_buf) + return -ENOMEM; + + tp->rx_buf = devm_kzalloc(dev, sizeof(*tp->rx_buf), GFP_KERNEL); + if (!tp->rx_buf) + return -ENOMEM; + + ret = devm_acpi_dev_add_driver_gpios(dev, vsc_tp_acpi_gpios); + if (ret) + return ret; + + tp->wakeuphost = devm_gpiod_get(dev, "wakeuphostint", GPIOD_IN); + if (IS_ERR(tp->wakeuphost)) + return PTR_ERR(tp->wakeuphost); + + tp->resetfw = devm_gpiod_get(dev, "resetfw", GPIOD_OUT_HIGH); + if (IS_ERR(tp->resetfw)) + return PTR_ERR(tp->resetfw); + + tp->wakeupfw = devm_gpiod_get(dev, "wakeupfw", GPIOD_OUT_HIGH); + if (IS_ERR(tp->wakeupfw)) + return PTR_ERR(tp->wakeupfw); + + atomic_set(&tp->assert_cnt, 0); + init_waitqueue_head(&tp->xfer_wait); + tp->spi = spi; + + irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY); + ret = request_threaded_irq(spi->irq, NULL, vsc_tp_isr, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + dev_name(dev), tp); + if (ret) + return ret; + + mutex_init(&tp->mutex); + mutex_init(&tp->event_notify_mutex); + INIT_WORK(&tp->event_work, vsc_tp_event_work); + + /* only one child acpi device */ + ret = acpi_dev_for_each_child(ACPI_COMPANION(dev), + vsc_tp_match_any, &adev); + if (!ret) { + ret = -ENODEV; + goto err_destroy_lock; + } + + pinfo.fwnode = acpi_fwnode_handle(adev); + pdev = platform_device_register_full(&pinfo); + if (IS_ERR(pdev)) { + ret = PTR_ERR(pdev); + goto err_destroy_lock; + } + + tp->pdev = pdev; + spi_set_drvdata(spi, tp); + + return 0; + +err_destroy_lock: + free_irq(spi->irq, tp); + + cancel_work_sync(&tp->event_work); + mutex_destroy(&tp->event_notify_mutex); + mutex_destroy(&tp->mutex); + + return ret; +} + +/* Note this is also used for shutdown */ +static void vsc_tp_remove(struct spi_device *spi) +{ + struct vsc_tp *tp = spi_get_drvdata(spi); + + platform_device_unregister(tp->pdev); + + free_irq(spi->irq, tp); + + cancel_work_sync(&tp->event_work); + mutex_destroy(&tp->event_notify_mutex); + mutex_destroy(&tp->mutex); +} + +static const struct acpi_device_id vsc_tp_acpi_ids[] = { + { "INTC1009" }, /* Raptor Lake */ + { "INTC1058" }, /* Tiger Lake */ + { "INTC1094" }, /* Alder Lake */ + { "INTC10D0" }, /* Meteor Lake */ + {} +}; +MODULE_DEVICE_TABLE(acpi, vsc_tp_acpi_ids); + +static struct spi_driver vsc_tp_driver = { + .probe = vsc_tp_probe, + .remove = vsc_tp_remove, + .shutdown = vsc_tp_remove, + .driver = { + .name = "vsc-tp", + .acpi_match_table = vsc_tp_acpi_ids, + }, +}; +module_spi_driver(vsc_tp_driver); + +MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>"); +MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>"); +MODULE_DESCRIPTION("Intel Visual Sensing Controller Transport Layer"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/mei/vsc-tp.h b/drivers/misc/mei/vsc-tp.h new file mode 100644 index 000000000000..f9513ddc3e40 --- /dev/null +++ b/drivers/misc/mei/vsc-tp.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023, Intel Corporation. + * Intel Visual Sensing Controller Transport Layer Linux driver + */ + +#ifndef _VSC_TP_H_ +#define _VSC_TP_H_ + +#include <linux/types.h> + +#define VSC_TP_CMD_WRITE 0x01 +#define VSC_TP_CMD_READ 0x02 + +#define VSC_TP_CMD_ACK 0x10 +#define VSC_TP_CMD_NACK 0x11 +#define VSC_TP_CMD_BUSY 0x12 + +struct vsc_tp; + +/** + * typedef vsc_event_cb_t - event callback function signature + * @context: the execution context of who registered this callback + * + * The callback function is called in interrupt context and the data + * payload is only valid during the call. If the user needs access + * the data payload later, it must copy the payload. + */ +typedef void (*vsc_tp_event_cb_t)(void *context); + +int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, + size_t len); + +int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen, + void *ibuf, size_t ilen); + +int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb, + void *context); + +void vsc_tp_intr_enable(struct vsc_tp *tp); +void vsc_tp_intr_disable(struct vsc_tp *tp); +void vsc_tp_intr_synchronize(struct vsc_tp *tp); + +void vsc_tp_reset(struct vsc_tp *tp); + +bool vsc_tp_need_read(struct vsc_tp *tp); + +int vsc_tp_init(struct vsc_tp *tp, struct device *dev); + +#endif diff --git a/drivers/misc/mrvl_cn10k_dpi.c b/drivers/misc/mrvl_cn10k_dpi.c new file mode 100644 index 000000000000..7d5433121ff6 --- /dev/null +++ b/drivers/misc/mrvl_cn10k_dpi.c @@ -0,0 +1,676 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon CN10K DPI driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#include <linux/bitfield.h> +#include <linux/compat.h> +#include <linux/delay.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/irq.h> +#include <linux/interrupt.h> + +#include <uapi/misc/mrvl_cn10k_dpi.h> + +/* PCI device IDs */ +#define PCI_DEVID_MRVL_CN10K_DPI_PF 0xA080 +#define PCI_SUBDEVID_MRVL_CN10K_DPI_PF 0xB900 + +/* PCI BAR Number */ +#define PCI_DPI_CFG_BAR 0 + +/* MSI-X interrupts */ +#define DPI_MAX_REQQ_INT 0x20 +#define DPI_MAX_CC_INT 0x40 + +/* MBOX MSI-X interrupt vector index */ +#define DPI_MBOX_PF_VF_INT_IDX 0x75 + +#define DPI_MAX_IRQS (DPI_MBOX_PF_VF_INT_IDX + 1) + +#define DPI_MAX_VFS 0x20 + +#define DPI_MAX_ENG_FIFO_SZ 0x20 +#define DPI_MAX_ENG_MOLR 0x400 + +#define DPI_DMA_IDS_DMA_NPA_PF_FUNC(x) FIELD_PREP(GENMASK_ULL(31, 16), x) +#define DPI_DMA_IDS_INST_STRM(x) FIELD_PREP(GENMASK_ULL(47, 40), x) +#define DPI_DMA_IDS_DMA_STRM(x) FIELD_PREP(GENMASK_ULL(39, 32), x) +#define DPI_DMA_ENG_EN_MOLR(x) FIELD_PREP(GENMASK_ULL(41, 32), x) +#define DPI_EBUS_PORTX_CFG_MPS(x) FIELD_PREP(GENMASK(6, 4), x) +#define DPI_DMA_IDS_DMA_SSO_PF_FUNC(x) FIELD_PREP(GENMASK(15, 0), x) +#define DPI_DMA_IDS2_INST_AURA(x) FIELD_PREP(GENMASK(19, 0), x) +#define DPI_DMA_IBUFF_CSIZE_CSIZE(x) FIELD_PREP(GENMASK(13, 0), x) +#define DPI_EBUS_PORTX_CFG_MRRS(x) FIELD_PREP(GENMASK(2, 0), x) +#define DPI_ENG_BUF_BLKS(x) FIELD_PREP(GENMASK(5, 0), x) +#define DPI_DMA_CONTROL_DMA_ENB GENMASK_ULL(53, 48) + +#define DPI_DMA_CONTROL_O_MODE BIT_ULL(14) +#define DPI_DMA_CONTROL_LDWB BIT_ULL(32) +#define DPI_DMA_CONTROL_WQECSMODE1 BIT_ULL(37) +#define DPI_DMA_CONTROL_ZBWCSEN BIT_ULL(39) +#define DPI_DMA_CONTROL_WQECSOFF(ofst) (((u64)ofst) << 40) +#define DPI_DMA_CONTROL_WQECSDIS BIT_ULL(47) +#define DPI_DMA_CONTROL_PKT_EN BIT_ULL(56) +#define DPI_DMA_IBUFF_CSIZE_NPA_FREE BIT(16) + +#define DPI_CTL_EN BIT_ULL(0) +#define DPI_DMA_CC_INT BIT_ULL(0) +#define DPI_DMA_QRST BIT_ULL(0) + +#define DPI_REQQ_INT_INSTRFLT BIT_ULL(0) +#define DPI_REQQ_INT_RDFLT BIT_ULL(1) +#define DPI_REQQ_INT_WRFLT BIT_ULL(2) +#define DPI_REQQ_INT_CSFLT BIT_ULL(3) +#define DPI_REQQ_INT_INST_DBO BIT_ULL(4) +#define DPI_REQQ_INT_INST_ADDR_NULL BIT_ULL(5) +#define DPI_REQQ_INT_INST_FILL_INVAL BIT_ULL(6) +#define DPI_REQQ_INT_INSTR_PSN BIT_ULL(7) + +#define DPI_REQQ_INT \ + (DPI_REQQ_INT_INSTRFLT | \ + DPI_REQQ_INT_RDFLT | \ + DPI_REQQ_INT_WRFLT | \ + DPI_REQQ_INT_CSFLT | \ + DPI_REQQ_INT_INST_DBO | \ + DPI_REQQ_INT_INST_ADDR_NULL | \ + DPI_REQQ_INT_INST_FILL_INVAL | \ + DPI_REQQ_INT_INSTR_PSN) + +#define DPI_PF_RAS_EBI_DAT_PSN BIT_ULL(0) +#define DPI_PF_RAS_NCB_DAT_PSN BIT_ULL(1) +#define DPI_PF_RAS_NCB_CMD_PSN BIT_ULL(2) + +#define DPI_PF_RAS_INT \ + (DPI_PF_RAS_EBI_DAT_PSN | \ + DPI_PF_RAS_NCB_DAT_PSN | \ + DPI_PF_RAS_NCB_CMD_PSN) + +/* Message fields in word_l of DPI mailbox structure */ +#define DPI_MBOX_VFID(msg) FIELD_GET(GENMASK_ULL(7, 0), msg) +#define DPI_MBOX_CMD(msg) FIELD_GET(GENMASK_ULL(11, 8), msg) +#define DPI_MBOX_CBUF_SIZE(msg) FIELD_GET(GENMASK_ULL(27, 12), msg) +#define DPI_MBOX_CBUF_AURA(msg) FIELD_GET(GENMASK_ULL(47, 28), msg) +#define DPI_MBOX_SSO_PFFUNC(msg) FIELD_GET(GENMASK_ULL(63, 48), msg) + +/* Message fields in word_h of DPI mailbox structure */ +#define DPI_MBOX_NPA_PFFUNC(msg) FIELD_GET(GENMASK_ULL(15, 0), msg) +#define DPI_MBOX_WQES_COMPL(msg) FIELD_GET(GENMASK_ULL(16, 16), msg) +#define DPI_MBOX_WQES_OFFSET(msg) FIELD_GET(GENMASK_ULL(23, 17), msg) + +#define DPI_DMAX_IBUFF_CSIZE(x) (0x0ULL | ((x) << 11)) +#define DPI_DMAX_IDS(x) (0x18ULL | ((x) << 11)) +#define DPI_DMAX_IDS2(x) (0x20ULL | ((x) << 11)) +#define DPI_DMAX_QRST(x) (0x30ULL | ((x) << 11)) + +#define DPI_CTL 0x10010ULL +#define DPI_DMA_CONTROL 0x10018ULL +#define DPI_PF_RAS 0x10308ULL +#define DPI_PF_RAS_ENA_W1C 0x10318ULL +#define DPI_MBOX_VF_PF_INT 0x16300ULL +#define DPI_MBOX_VF_PF_INT_W1S 0x16308ULL +#define DPI_MBOX_VF_PF_INT_ENA_W1C 0x16310ULL +#define DPI_MBOX_VF_PF_INT_ENA_W1S 0x16318ULL + +#define DPI_DMA_ENGX_EN(x) (0x10040ULL | ((x) << 3)) +#define DPI_ENGX_BUF(x) (0x100C0ULL | ((x) << 3)) +#define DPI_EBUS_PORTX_CFG(x) (0x10100ULL | ((x) << 3)) +#define DPI_DMA_CCX_INT(x) (0x11000ULL | ((x) << 3)) +#define DPI_DMA_CCX_INT_ENA_W1C(x) (0x11800ULL | ((x) << 3)) +#define DPI_REQQX_INT(x) (0x12C00ULL | ((x) << 5)) +#define DPI_REQQX_INT_ENA_W1C(x) (0x13800ULL | ((x) << 5)) +#define DPI_MBOX_PF_VF_DATA0(x) (0x16000ULL | ((x) << 4)) +#define DPI_MBOX_PF_VF_DATA1(x) (0x16008ULL | ((x) << 4)) + +#define DPI_WCTL_FIF_THR 0x17008ULL + +#define DPI_EBUS_MAX_PORTS 2 + +#define DPI_EBUS_MRRS_MIN 128 +#define DPI_EBUS_MRRS_MAX 1024 +#define DPI_EBUS_MPS_MIN 128 +#define DPI_EBUS_MPS_MAX 1024 +#define DPI_WCTL_FIFO_THRESHOLD 0x30 + +#define DPI_QUEUE_OPEN 0x1 +#define DPI_QUEUE_CLOSE 0x2 +#define DPI_REG_DUMP 0x3 +#define DPI_GET_REG_CFG 0x4 +#define DPI_QUEUE_OPEN_V2 0x5 + +enum dpi_mbox_rsp_type { + DPI_MBOX_TYPE_CMD, + DPI_MBOX_TYPE_RSP_ACK, + DPI_MBOX_TYPE_RSP_NACK, +}; + +struct dpivf_config { + u32 aura; + u16 csize; + u16 sso_pf_func; + u16 npa_pf_func; +}; + +struct dpipf_vf { + struct dpivf_config vf_config; + bool setup_done; + u8 this_vfid; +}; + +/* DPI device mailbox */ +struct dpi_mbox { + struct work_struct work; + /* lock to serialize mbox requests */ + struct mutex lock; + struct dpipf *pf; + u8 __iomem *pf_vf_data_reg; + u8 __iomem *vf_pf_data_reg; +}; + +struct dpipf { + struct miscdevice miscdev; + void __iomem *reg_base; + struct pci_dev *pdev; + struct dpipf_vf vf[DPI_MAX_VFS]; + /* Mailbox to talk to VFs */ + struct dpi_mbox *mbox[DPI_MAX_VFS]; +}; + +struct dpi_mbox_message { + uint64_t word_l; + uint64_t word_h; +}; + +static inline void dpi_reg_write(struct dpipf *dpi, u64 offset, u64 val) +{ + writeq(val, dpi->reg_base + offset); +} + +static inline u64 dpi_reg_read(struct dpipf *dpi, u64 offset) +{ + return readq(dpi->reg_base + offset); +} + +static void dpi_wqe_cs_offset(struct dpipf *dpi, u8 offset) +{ + u64 reg; + + reg = dpi_reg_read(dpi, DPI_DMA_CONTROL); + reg &= ~DPI_DMA_CONTROL_WQECSDIS; + reg |= DPI_DMA_CONTROL_ZBWCSEN | DPI_DMA_CONTROL_WQECSMODE1; + reg |= DPI_DMA_CONTROL_WQECSOFF(offset); + dpi_reg_write(dpi, DPI_DMA_CONTROL, reg); +} + +static int dpi_queue_init(struct dpipf *dpi, struct dpipf_vf *dpivf, u8 vf) +{ + u16 sso_pf_func = dpivf->vf_config.sso_pf_func; + u16 npa_pf_func = dpivf->vf_config.npa_pf_func; + u16 csize = dpivf->vf_config.csize; + u32 aura = dpivf->vf_config.aura; + unsigned long timeout; + u64 reg; + + dpi_reg_write(dpi, DPI_DMAX_QRST(vf), DPI_DMA_QRST); + + /* Wait for a maximum of 3 sec */ + timeout = jiffies + msecs_to_jiffies(3000); + while (!time_after(jiffies, timeout)) { + reg = dpi_reg_read(dpi, DPI_DMAX_QRST(vf)); + if (!(reg & DPI_DMA_QRST)) + break; + + /* Reset would take time for the request cache to drain */ + usleep_range(500, 1000); + } + + if (reg & DPI_DMA_QRST) { + dev_err(&dpi->pdev->dev, "Queue reset failed\n"); + return -EBUSY; + } + + dpi_reg_write(dpi, DPI_DMAX_IDS2(vf), 0); + dpi_reg_write(dpi, DPI_DMAX_IDS(vf), 0); + + reg = DPI_DMA_IBUFF_CSIZE_CSIZE(csize) | DPI_DMA_IBUFF_CSIZE_NPA_FREE; + dpi_reg_write(dpi, DPI_DMAX_IBUFF_CSIZE(vf), reg); + + reg = dpi_reg_read(dpi, DPI_DMAX_IDS2(vf)); + reg |= DPI_DMA_IDS2_INST_AURA(aura); + dpi_reg_write(dpi, DPI_DMAX_IDS2(vf), reg); + + reg = dpi_reg_read(dpi, DPI_DMAX_IDS(vf)); + reg |= DPI_DMA_IDS_DMA_NPA_PF_FUNC(npa_pf_func); + reg |= DPI_DMA_IDS_DMA_SSO_PF_FUNC(sso_pf_func); + reg |= DPI_DMA_IDS_DMA_STRM(vf + 1); + reg |= DPI_DMA_IDS_INST_STRM(vf + 1); + dpi_reg_write(dpi, DPI_DMAX_IDS(vf), reg); + + return 0; +} + +static void dpi_queue_fini(struct dpipf *dpi, u8 vf) +{ + dpi_reg_write(dpi, DPI_DMAX_QRST(vf), DPI_DMA_QRST); + + /* Reset IDS and IDS2 registers */ + dpi_reg_write(dpi, DPI_DMAX_IDS2(vf), 0); + dpi_reg_write(dpi, DPI_DMAX_IDS(vf), 0); +} + +static irqreturn_t dpi_mbox_intr_handler(int irq, void *data) +{ + struct dpipf *dpi = data; + u64 reg; + u32 vf; + + reg = dpi_reg_read(dpi, DPI_MBOX_VF_PF_INT); + if (reg) { + for (vf = 0; vf < pci_num_vf(dpi->pdev); vf++) { + if (reg & BIT_ULL(vf)) + schedule_work(&dpi->mbox[vf]->work); + } + dpi_reg_write(dpi, DPI_MBOX_VF_PF_INT, reg); + } + + return IRQ_HANDLED; +} + +static int queue_config(struct dpipf *dpi, struct dpipf_vf *dpivf, struct dpi_mbox_message *msg) +{ + int ret = 0; + + switch (DPI_MBOX_CMD(msg->word_l)) { + case DPI_QUEUE_OPEN: + case DPI_QUEUE_OPEN_V2: + dpivf->vf_config.aura = DPI_MBOX_CBUF_AURA(msg->word_l); + dpivf->vf_config.csize = DPI_MBOX_CMD(msg->word_l) == DPI_QUEUE_OPEN ? + DPI_MBOX_CBUF_SIZE(msg->word_l) >> 3 : + DPI_MBOX_CBUF_SIZE(msg->word_l); + dpivf->vf_config.sso_pf_func = DPI_MBOX_SSO_PFFUNC(msg->word_l); + dpivf->vf_config.npa_pf_func = DPI_MBOX_NPA_PFFUNC(msg->word_h); + ret = dpi_queue_init(dpi, dpivf, DPI_MBOX_VFID(msg->word_l)); + if (!ret) { + if (DPI_MBOX_WQES_COMPL(msg->word_h)) + dpi_wqe_cs_offset(dpi, DPI_MBOX_WQES_OFFSET(msg->word_h)); + dpivf->setup_done = true; + } + break; + case DPI_QUEUE_CLOSE: + memset(&dpivf->vf_config, 0, sizeof(struct dpivf_config)); + dpi_queue_fini(dpi, DPI_MBOX_VFID(msg->word_l)); + dpivf->setup_done = false; + break; + default: + return -EINVAL; + } + + return ret; +} + +static void dpi_pfvf_mbox_work(struct work_struct *work) +{ + struct dpi_mbox *mbox = container_of(work, struct dpi_mbox, work); + struct dpi_mbox_message msg; + struct dpipf_vf *dpivf; + struct dpipf *dpi; + int vfid, ret; + + dpi = mbox->pf; + memset(&msg, 0, sizeof(msg)); + + mutex_lock(&mbox->lock); + msg.word_l = readq(mbox->vf_pf_data_reg); + if (msg.word_l == (u64)-1) + goto exit; + + vfid = DPI_MBOX_VFID(msg.word_l); + if (vfid >= pci_num_vf(dpi->pdev)) + goto exit; + + dpivf = &dpi->vf[vfid]; + msg.word_h = readq(mbox->pf_vf_data_reg); + + ret = queue_config(dpi, dpivf, &msg); + if (ret < 0) + writeq(DPI_MBOX_TYPE_RSP_NACK, mbox->pf_vf_data_reg); + else + writeq(DPI_MBOX_TYPE_RSP_ACK, mbox->pf_vf_data_reg); +exit: + mutex_unlock(&mbox->lock); +} + +/* Setup registers for a PF mailbox */ +static void dpi_setup_mbox_regs(struct dpipf *dpi, int vf) +{ + struct dpi_mbox *mbox = dpi->mbox[vf]; + + mbox->pf_vf_data_reg = dpi->reg_base + DPI_MBOX_PF_VF_DATA0(vf); + mbox->vf_pf_data_reg = dpi->reg_base + DPI_MBOX_PF_VF_DATA1(vf); +} + +static int dpi_pfvf_mbox_setup(struct dpipf *dpi) +{ + int vf; + + for (vf = 0; vf < DPI_MAX_VFS; vf++) { + dpi->mbox[vf] = devm_kzalloc(&dpi->pdev->dev, sizeof(*dpi->mbox[vf]), GFP_KERNEL); + + if (!dpi->mbox[vf]) + return -ENOMEM; + + mutex_init(&dpi->mbox[vf]->lock); + INIT_WORK(&dpi->mbox[vf]->work, dpi_pfvf_mbox_work); + dpi->mbox[vf]->pf = dpi; + dpi_setup_mbox_regs(dpi, vf); + } + + return 0; +} + +static void dpi_pfvf_mbox_destroy(struct dpipf *dpi) +{ + unsigned int vf; + + for (vf = 0; vf < DPI_MAX_VFS; vf++) { + if (work_pending(&dpi->mbox[vf]->work)) + cancel_work_sync(&dpi->mbox[vf]->work); + + dpi->mbox[vf] = NULL; + } +} + +static void dpi_init(struct dpipf *dpi) +{ + unsigned int engine, port; + u8 mrrs_val, mps_val; + u64 reg; + + for (engine = 0; engine < DPI_MAX_ENGINES; engine++) { + if (engine == 4 || engine == 5) + reg = DPI_ENG_BUF_BLKS(16); + else + reg = DPI_ENG_BUF_BLKS(8); + + dpi_reg_write(dpi, DPI_ENGX_BUF(engine), reg); + } + + reg = DPI_DMA_CONTROL_ZBWCSEN | DPI_DMA_CONTROL_PKT_EN | DPI_DMA_CONTROL_LDWB | + DPI_DMA_CONTROL_O_MODE | DPI_DMA_CONTROL_DMA_ENB; + + dpi_reg_write(dpi, DPI_DMA_CONTROL, reg); + dpi_reg_write(dpi, DPI_CTL, DPI_CTL_EN); + + mrrs_val = 2; /* 512B */ + mps_val = 1; /* 256B */ + + for (port = 0; port < DPI_EBUS_MAX_PORTS; port++) { + reg = dpi_reg_read(dpi, DPI_EBUS_PORTX_CFG(port)); + reg &= ~(DPI_EBUS_PORTX_CFG_MRRS(7) | DPI_EBUS_PORTX_CFG_MPS(7)); + reg |= DPI_EBUS_PORTX_CFG_MPS(mps_val) | DPI_EBUS_PORTX_CFG_MRRS(mrrs_val); + dpi_reg_write(dpi, DPI_EBUS_PORTX_CFG(port), reg); + } + + dpi_reg_write(dpi, DPI_WCTL_FIF_THR, DPI_WCTL_FIFO_THRESHOLD); +} + +static void dpi_fini(struct dpipf *dpi) +{ + unsigned int engine; + + for (engine = 0; engine < DPI_MAX_ENGINES; engine++) + dpi_reg_write(dpi, DPI_ENGX_BUF(engine), 0); + + dpi_reg_write(dpi, DPI_DMA_CONTROL, 0); + dpi_reg_write(dpi, DPI_CTL, 0); +} + +static void dpi_free_irq_vectors(void *pdev) +{ + pci_free_irq_vectors((struct pci_dev *)pdev); +} + +static int dpi_irq_init(struct dpipf *dpi) +{ + struct pci_dev *pdev = dpi->pdev; + struct device *dev = &pdev->dev; + int i, ret; + + /* Clear all RAS interrupts */ + dpi_reg_write(dpi, DPI_PF_RAS, DPI_PF_RAS_INT); + + /* Clear all RAS interrupt enable bits */ + dpi_reg_write(dpi, DPI_PF_RAS_ENA_W1C, DPI_PF_RAS_INT); + + for (i = 0; i < DPI_MAX_REQQ_INT; i++) { + dpi_reg_write(dpi, DPI_REQQX_INT(i), DPI_REQQ_INT); + dpi_reg_write(dpi, DPI_REQQX_INT_ENA_W1C(i), DPI_REQQ_INT); + } + + for (i = 0; i < DPI_MAX_CC_INT; i++) { + dpi_reg_write(dpi, DPI_DMA_CCX_INT(i), DPI_DMA_CC_INT); + dpi_reg_write(dpi, DPI_DMA_CCX_INT_ENA_W1C(i), DPI_DMA_CC_INT); + } + + ret = pci_alloc_irq_vectors(pdev, DPI_MAX_IRQS, DPI_MAX_IRQS, PCI_IRQ_MSIX); + if (ret != DPI_MAX_IRQS) { + dev_err(dev, "DPI: Failed to alloc %d msix irqs\n", DPI_MAX_IRQS); + return ret; + } + + ret = devm_add_action_or_reset(dev, dpi_free_irq_vectors, pdev); + if (ret) { + dev_err(dev, "DPI: Failed to add irq free action\n"); + return ret; + } + + ret = devm_request_irq(dev, pci_irq_vector(pdev, DPI_MBOX_PF_VF_INT_IDX), + dpi_mbox_intr_handler, 0, "dpi-mbox", dpi); + if (ret) { + dev_err(dev, "DPI: request_irq failed for mbox; err=%d\n", ret); + return ret; + } + + dpi_reg_write(dpi, DPI_MBOX_VF_PF_INT_ENA_W1S, GENMASK_ULL(31, 0)); + + return 0; +} + +static int dpi_mps_mrrs_config(struct dpipf *dpi, void __user *arg) +{ + struct dpi_mps_mrrs_cfg cfg; + u8 mrrs_val, mps_val; + u64 reg; + + if (copy_from_user(&cfg, arg, sizeof(struct dpi_mps_mrrs_cfg))) + return -EFAULT; + + if (cfg.max_read_req_sz < DPI_EBUS_MRRS_MIN || cfg.max_read_req_sz > DPI_EBUS_MRRS_MAX || + !is_power_of_2(cfg.max_read_req_sz)) + return -EINVAL; + + if (cfg.max_payload_sz < DPI_EBUS_MPS_MIN || cfg.max_payload_sz > DPI_EBUS_MPS_MAX || + !is_power_of_2(cfg.max_payload_sz)) + return -EINVAL; + + if (cfg.port >= DPI_EBUS_MAX_PORTS) + return -EINVAL; + + /* Make sure reserved fields are set to 0 */ + if (cfg.reserved) + return -EINVAL; + + mrrs_val = fls(cfg.max_read_req_sz >> 8); + mps_val = fls(cfg.max_payload_sz >> 8); + + reg = dpi_reg_read(dpi, DPI_EBUS_PORTX_CFG(cfg.port)); + reg &= ~(DPI_EBUS_PORTX_CFG_MRRS(0x7) | DPI_EBUS_PORTX_CFG_MPS(0x7)); + reg |= DPI_EBUS_PORTX_CFG_MPS(mps_val) | DPI_EBUS_PORTX_CFG_MRRS(mrrs_val); + dpi_reg_write(dpi, DPI_EBUS_PORTX_CFG(cfg.port), reg); + + return 0; +} + +static int dpi_engine_config(struct dpipf *dpi, void __user *arg) +{ + struct dpi_engine_cfg cfg; + unsigned int engine; + u8 *eng_buf; + u64 reg; + + if (copy_from_user(&cfg, arg, sizeof(struct dpi_engine_cfg))) + return -EFAULT; + + /* Make sure reserved fields are set to 0 */ + if (cfg.reserved) + return -EINVAL; + + eng_buf = (u8 *)&cfg.fifo_mask; + + for (engine = 0; engine < DPI_MAX_ENGINES; engine++) { + if (eng_buf[engine] > DPI_MAX_ENG_FIFO_SZ) + return -EINVAL; + dpi_reg_write(dpi, DPI_ENGX_BUF(engine), eng_buf[engine]); + + if (cfg.update_molr) { + if (cfg.molr[engine] > DPI_MAX_ENG_MOLR) + return -EINVAL; + reg = DPI_DMA_ENG_EN_MOLR(cfg.molr[engine]); + dpi_reg_write(dpi, DPI_DMA_ENGX_EN(engine), reg); + } else { + /* Make sure unused fields are set to 0 */ + if (cfg.molr[engine]) + return -EINVAL; + } + } + + return 0; +} + +static long dpi_dev_ioctl(struct file *fptr, unsigned int cmd, unsigned long data) +{ + void __user *arg = (void __user *)data; + struct dpipf *dpi; + int ret; + + dpi = container_of(fptr->private_data, struct dpipf, miscdev); + + switch (cmd) { + case DPI_MPS_MRRS_CFG: + ret = dpi_mps_mrrs_config(dpi, arg); + break; + case DPI_ENGINE_CFG: + ret = dpi_engine_config(dpi, arg); + break; + default: + ret = -ENOTTY; + break; + } + + return ret; +} + +static const struct file_operations dpi_device_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = dpi_dev_ioctl, + .compat_ioctl = compat_ptr_ioctl, +}; + +static int dpi_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct dpipf *dpi; + int ret; + + dpi = devm_kzalloc(dev, sizeof(*dpi), GFP_KERNEL); + if (!dpi) + return -ENOMEM; + + dpi->pdev = pdev; + + ret = pcim_enable_device(pdev); + if (ret) { + dev_err(dev, "DPI: Failed to enable PCI device\n"); + return ret; + } + + ret = pcim_iomap_regions(pdev, BIT(0) | BIT(4), KBUILD_MODNAME); + if (ret) { + dev_err(dev, "DPI: Failed to request MMIO region\n"); + return ret; + } + + dpi->reg_base = pcim_iomap_table(pdev)[PCI_DPI_CFG_BAR]; + + /* Initialize global PF registers */ + dpi_init(dpi); + + /* Setup PF-VF mailbox */ + ret = dpi_pfvf_mbox_setup(dpi); + if (ret) { + dev_err(dev, "DPI: Failed to setup pf-vf mbox\n"); + goto err_dpi_fini; + } + + /* Register interrupts */ + ret = dpi_irq_init(dpi); + if (ret) { + dev_err(dev, "DPI: Failed to initialize irq vectors\n"); + goto err_dpi_mbox_free; + } + + pci_set_drvdata(pdev, dpi); + dpi->miscdev.minor = MISC_DYNAMIC_MINOR; + dpi->miscdev.name = KBUILD_MODNAME; + dpi->miscdev.fops = &dpi_device_fops; + dpi->miscdev.parent = dev; + + ret = misc_register(&dpi->miscdev); + if (ret) { + dev_err(dev, "DPI: Failed to register misc device\n"); + goto err_dpi_mbox_free; + } + + return 0; + +err_dpi_mbox_free: + dpi_pfvf_mbox_destroy(dpi); +err_dpi_fini: + dpi_fini(dpi); + return ret; +} + +static void dpi_remove(struct pci_dev *pdev) +{ + struct dpipf *dpi = pci_get_drvdata(pdev); + + misc_deregister(&dpi->miscdev); + pci_sriov_configure_simple(pdev, 0); + dpi_pfvf_mbox_destroy(dpi); + dpi_fini(dpi); + pci_set_drvdata(pdev, NULL); +} + +static const struct pci_device_id dpi_id_table[] = { + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_MRVL_CN10K_DPI_PF, + PCI_VENDOR_ID_CAVIUM, PCI_SUBDEVID_MRVL_CN10K_DPI_PF) }, + { 0, } /* end of table */ +}; + +static struct pci_driver dpi_driver = { + .name = KBUILD_MODNAME, + .id_table = dpi_id_table, + .probe = dpi_probe, + .remove = dpi_remove, + .sriov_configure = pci_sriov_configure_simple, +}; + +module_pci_driver(dpi_driver); +MODULE_DEVICE_TABLE(pci, dpi_id_table); +MODULE_AUTHOR("Marvell."); +MODULE_DESCRIPTION("Marvell Octeon CN10K DPI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/nsm.c b/drivers/misc/nsm.c new file mode 100644 index 000000000000..ef7b32742340 --- /dev/null +++ b/drivers/misc/nsm.c @@ -0,0 +1,505 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Amazon Nitro Secure Module driver. + * + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * The Nitro Secure Module implements commands via CBOR over virtio. + * This driver exposes a raw message ioctls on /dev/nsm that user + * space can use to issue these commands. + */ + +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/interrupt.h> +#include <linux/hw_random.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/uaccess.h> +#include <linux/uio.h> +#include <linux/virtio_config.h> +#include <linux/virtio_ids.h> +#include <linux/virtio.h> +#include <linux/wait.h> +#include <uapi/linux/nsm.h> + +/* Timeout for NSM virtqueue respose in milliseconds. */ +#define NSM_DEFAULT_TIMEOUT_MSECS (120000) /* 2 minutes */ + +/* Maximum length input data */ +struct nsm_data_req { + u32 len; + u8 data[NSM_REQUEST_MAX_SIZE]; +}; + +/* Maximum length output data */ +struct nsm_data_resp { + u32 len; + u8 data[NSM_RESPONSE_MAX_SIZE]; +}; + +/* Full NSM request/response message */ +struct nsm_msg { + struct nsm_data_req req; + struct nsm_data_resp resp; +}; + +struct nsm { + struct virtio_device *vdev; + struct virtqueue *vq; + struct mutex lock; + struct completion cmd_done; + struct miscdevice misc; + struct hwrng hwrng; + struct work_struct misc_init; + struct nsm_msg msg; +}; + +/* NSM device ID */ +static const struct virtio_device_id id_table[] = { + { VIRTIO_ID_NITRO_SEC_MOD, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static struct nsm *file_to_nsm(struct file *file) +{ + return container_of(file->private_data, struct nsm, misc); +} + +static struct nsm *hwrng_to_nsm(struct hwrng *rng) +{ + return container_of(rng, struct nsm, hwrng); +} + +#define CBOR_TYPE_MASK 0xE0 +#define CBOR_TYPE_MAP 0xA0 +#define CBOR_TYPE_TEXT 0x60 +#define CBOR_TYPE_ARRAY 0x40 +#define CBOR_HEADER_SIZE_SHORT 1 + +#define CBOR_SHORT_SIZE_MAX_VALUE 23 +#define CBOR_LONG_SIZE_U8 24 +#define CBOR_LONG_SIZE_U16 25 +#define CBOR_LONG_SIZE_U32 26 +#define CBOR_LONG_SIZE_U64 27 + +static bool cbor_object_is_array(const u8 *cbor_object, size_t cbor_object_size) +{ + if (cbor_object_size == 0 || cbor_object == NULL) + return false; + + return (cbor_object[0] & CBOR_TYPE_MASK) == CBOR_TYPE_ARRAY; +} + +static int cbor_object_get_array(u8 *cbor_object, size_t cbor_object_size, u8 **cbor_array) +{ + u8 cbor_short_size; + void *array_len_p; + u64 array_len; + u64 array_offset; + + if (!cbor_object_is_array(cbor_object, cbor_object_size)) + return -EFAULT; + + cbor_short_size = (cbor_object[0] & 0x1F); + + /* Decoding byte array length */ + array_offset = CBOR_HEADER_SIZE_SHORT; + if (cbor_short_size >= CBOR_LONG_SIZE_U8) + array_offset += BIT(cbor_short_size - CBOR_LONG_SIZE_U8); + + if (cbor_object_size < array_offset) + return -EFAULT; + + array_len_p = &cbor_object[1]; + + switch (cbor_short_size) { + case CBOR_SHORT_SIZE_MAX_VALUE: /* short encoding */ + array_len = cbor_short_size; + break; + case CBOR_LONG_SIZE_U8: + array_len = *(u8 *)array_len_p; + break; + case CBOR_LONG_SIZE_U16: + array_len = be16_to_cpup((__be16 *)array_len_p); + break; + case CBOR_LONG_SIZE_U32: + array_len = be32_to_cpup((__be32 *)array_len_p); + break; + case CBOR_LONG_SIZE_U64: + array_len = be64_to_cpup((__be64 *)array_len_p); + break; + } + + if (cbor_object_size < array_offset) + return -EFAULT; + + if (cbor_object_size - array_offset < array_len) + return -EFAULT; + + if (array_len > INT_MAX) + return -EFAULT; + + *cbor_array = cbor_object + array_offset; + return array_len; +} + +/* Copy the request of a raw message to kernel space */ +static int fill_req_raw(struct nsm *nsm, struct nsm_data_req *req, + struct nsm_raw *raw) +{ + /* Verify the user input size. */ + if (raw->request.len > sizeof(req->data)) + return -EMSGSIZE; + + /* Copy the request payload */ + if (copy_from_user(req->data, u64_to_user_ptr(raw->request.addr), + raw->request.len)) + return -EFAULT; + + req->len = raw->request.len; + + return 0; +} + +/* Copy the response of a raw message back to user-space */ +static int parse_resp_raw(struct nsm *nsm, struct nsm_data_resp *resp, + struct nsm_raw *raw) +{ + /* Truncate any message that does not fit. */ + raw->response.len = min_t(u64, raw->response.len, resp->len); + + /* Copy the response content to user space */ + if (copy_to_user(u64_to_user_ptr(raw->response.addr), + resp->data, raw->response.len)) + return -EFAULT; + + return 0; +} + +/* Virtqueue interrupt handler */ +static void nsm_vq_callback(struct virtqueue *vq) +{ + struct nsm *nsm = vq->vdev->priv; + + complete(&nsm->cmd_done); +} + +/* Forward a message to the NSM device and wait for the response from it */ +static int nsm_sendrecv_msg_locked(struct nsm *nsm) +{ + struct device *dev = &nsm->vdev->dev; + struct scatterlist sg_in, sg_out; + struct nsm_msg *msg = &nsm->msg; + struct virtqueue *vq = nsm->vq; + unsigned int len; + void *queue_buf; + bool kicked; + int rc; + + /* Initialize scatter-gather lists with request and response buffers. */ + sg_init_one(&sg_out, msg->req.data, msg->req.len); + sg_init_one(&sg_in, msg->resp.data, sizeof(msg->resp.data)); + + init_completion(&nsm->cmd_done); + /* Add the request buffer (read by the device). */ + rc = virtqueue_add_outbuf(vq, &sg_out, 1, msg->req.data, GFP_KERNEL); + if (rc) + return rc; + + /* Add the response buffer (written by the device). */ + rc = virtqueue_add_inbuf(vq, &sg_in, 1, msg->resp.data, GFP_KERNEL); + if (rc) + goto cleanup; + + kicked = virtqueue_kick(vq); + if (!kicked) { + /* Cannot kick the virtqueue. */ + rc = -EIO; + goto cleanup; + } + + /* If the kick succeeded, wait for the device's response. */ + if (!wait_for_completion_io_timeout(&nsm->cmd_done, + msecs_to_jiffies(NSM_DEFAULT_TIMEOUT_MSECS))) { + rc = -ETIMEDOUT; + goto cleanup; + } + + queue_buf = virtqueue_get_buf(vq, &len); + if (!queue_buf || (queue_buf != msg->req.data)) { + dev_err(dev, "wrong request buffer."); + rc = -ENODATA; + goto cleanup; + } + + queue_buf = virtqueue_get_buf(vq, &len); + if (!queue_buf || (queue_buf != msg->resp.data)) { + dev_err(dev, "wrong response buffer."); + rc = -ENODATA; + goto cleanup; + } + + msg->resp.len = len; + + rc = 0; + +cleanup: + if (rc) { + /* Clean the virtqueue. */ + while (virtqueue_get_buf(vq, &len) != NULL) + ; + } + + return rc; +} + +static int fill_req_get_random(struct nsm *nsm, struct nsm_data_req *req) +{ + /* + * 69 # text(9) + * 47657452616E646F6D # "GetRandom" + */ + const u8 request[] = { CBOR_TYPE_TEXT + strlen("GetRandom"), + 'G', 'e', 't', 'R', 'a', 'n', 'd', 'o', 'm' }; + + memcpy(req->data, request, sizeof(request)); + req->len = sizeof(request); + + return 0; +} + +static int parse_resp_get_random(struct nsm *nsm, struct nsm_data_resp *resp, + void *out, size_t max) +{ + /* + * A1 # map(1) + * 69 # text(9) - Name of field + * 47657452616E646F6D # "GetRandom" + * A1 # map(1) - The field itself + * 66 # text(6) + * 72616E646F6D # "random" + * # The rest of the response is random data + */ + const u8 response[] = { CBOR_TYPE_MAP + 1, + CBOR_TYPE_TEXT + strlen("GetRandom"), + 'G', 'e', 't', 'R', 'a', 'n', 'd', 'o', 'm', + CBOR_TYPE_MAP + 1, + CBOR_TYPE_TEXT + strlen("random"), + 'r', 'a', 'n', 'd', 'o', 'm' }; + struct device *dev = &nsm->vdev->dev; + u8 *rand_data = NULL; + u8 *resp_ptr = resp->data; + u64 resp_len = resp->len; + int rc; + + if ((resp->len < sizeof(response) + 1) || + (memcmp(resp_ptr, response, sizeof(response)) != 0)) { + dev_err(dev, "Invalid response for GetRandom"); + return -EFAULT; + } + + resp_ptr += sizeof(response); + resp_len -= sizeof(response); + + rc = cbor_object_get_array(resp_ptr, resp_len, &rand_data); + if (rc < 0) { + dev_err(dev, "GetRandom: Invalid CBOR encoding\n"); + return rc; + } + + rc = min_t(size_t, rc, max); + memcpy(out, rand_data, rc); + + return rc; +} + +/* + * HwRNG implementation + */ +static int nsm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) +{ + struct nsm *nsm = hwrng_to_nsm(rng); + struct device *dev = &nsm->vdev->dev; + int rc = 0; + + /* NSM always needs to wait for a response */ + if (!wait) + return 0; + + mutex_lock(&nsm->lock); + + rc = fill_req_get_random(nsm, &nsm->msg.req); + if (rc != 0) + goto out; + + rc = nsm_sendrecv_msg_locked(nsm); + if (rc != 0) + goto out; + + rc = parse_resp_get_random(nsm, &nsm->msg.resp, data, max); + if (rc < 0) + goto out; + + dev_dbg(dev, "RNG: returning rand bytes = %d", rc); +out: + mutex_unlock(&nsm->lock); + return rc; +} + +static long nsm_dev_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + void __user *argp = u64_to_user_ptr((u64)arg); + struct nsm *nsm = file_to_nsm(file); + struct nsm_raw raw; + int r = 0; + + if (cmd != NSM_IOCTL_RAW) + return -EINVAL; + + if (_IOC_SIZE(cmd) != sizeof(raw)) + return -EINVAL; + + /* Copy user argument struct to kernel argument struct */ + r = -EFAULT; + if (copy_from_user(&raw, argp, _IOC_SIZE(cmd))) + goto out; + + mutex_lock(&nsm->lock); + + /* Convert kernel argument struct to device request */ + r = fill_req_raw(nsm, &nsm->msg.req, &raw); + if (r) + goto out; + + /* Send message to NSM and read reply */ + r = nsm_sendrecv_msg_locked(nsm); + if (r) + goto out; + + /* Parse device response into kernel argument struct */ + r = parse_resp_raw(nsm, &nsm->msg.resp, &raw); + if (r) + goto out; + + /* Copy kernel argument struct back to user argument struct */ + r = -EFAULT; + if (copy_to_user(argp, &raw, sizeof(raw))) + goto out; + + r = 0; + +out: + mutex_unlock(&nsm->lock); + return r; +} + +static int nsm_device_init_vq(struct virtio_device *vdev) +{ + struct virtqueue *vq = virtio_find_single_vq(vdev, + nsm_vq_callback, "nsm.vq.0"); + struct nsm *nsm = vdev->priv; + + if (IS_ERR(vq)) + return PTR_ERR(vq); + + nsm->vq = vq; + + return 0; +} + +static const struct file_operations nsm_dev_fops = { + .unlocked_ioctl = nsm_dev_ioctl, + .compat_ioctl = compat_ptr_ioctl, +}; + +/* Handler for probing the NSM device */ +static int nsm_device_probe(struct virtio_device *vdev) +{ + struct device *dev = &vdev->dev; + struct nsm *nsm; + int rc; + + nsm = devm_kzalloc(&vdev->dev, sizeof(*nsm), GFP_KERNEL); + if (!nsm) + return -ENOMEM; + + vdev->priv = nsm; + nsm->vdev = vdev; + + rc = nsm_device_init_vq(vdev); + if (rc) { + dev_err(dev, "queue failed to initialize: %d.\n", rc); + goto err_init_vq; + } + + mutex_init(&nsm->lock); + + /* Register as hwrng provider */ + nsm->hwrng = (struct hwrng) { + .read = nsm_rng_read, + .name = "nsm-hwrng", + .quality = 1000, + }; + + rc = hwrng_register(&nsm->hwrng); + if (rc) { + dev_err(dev, "RNG initialization error: %d.\n", rc); + goto err_hwrng; + } + + /* Register /dev/nsm device node */ + nsm->misc = (struct miscdevice) { + .minor = MISC_DYNAMIC_MINOR, + .name = "nsm", + .fops = &nsm_dev_fops, + .mode = 0666, + }; + + rc = misc_register(&nsm->misc); + if (rc) { + dev_err(dev, "misc device registration error: %d.\n", rc); + goto err_misc; + } + + return 0; + +err_misc: + hwrng_unregister(&nsm->hwrng); +err_hwrng: + vdev->config->del_vqs(vdev); +err_init_vq: + return rc; +} + +/* Handler for removing the NSM device */ +static void nsm_device_remove(struct virtio_device *vdev) +{ + struct nsm *nsm = vdev->priv; + + hwrng_unregister(&nsm->hwrng); + + vdev->config->del_vqs(vdev); + misc_deregister(&nsm->misc); +} + +/* NSM device configuration structure */ +static struct virtio_driver virtio_nsm_driver = { + .feature_table = 0, + .feature_table_size = 0, + .feature_table_legacy = 0, + .feature_table_size_legacy = 0, + .driver.name = KBUILD_MODNAME, + .id_table = id_table, + .probe = nsm_device_probe, + .remove = nsm_device_remove, +}; + +module_virtio_driver(virtio_nsm_driver); +MODULE_DEVICE_TABLE(virtio, id_table); +MODULE_DESCRIPTION("Virtio NSM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/ntsync.c b/drivers/misc/ntsync.c new file mode 100644 index 000000000000..9087f045e362 --- /dev/null +++ b/drivers/misc/ntsync.c @@ -0,0 +1,1209 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ntsync.c - Kernel driver for NT synchronization primitives + * + * Copyright (C) 2024 Elizabeth Figura <zfigura@codeweavers.com> + */ + +#include <linux/anon_inodes.h> +#include <linux/atomic.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/hrtimer.h> +#include <linux/ktime.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/overflow.h> +#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <uapi/linux/ntsync.h> + +#define NTSYNC_NAME "ntsync" + +enum ntsync_type { + NTSYNC_TYPE_SEM, + NTSYNC_TYPE_MUTEX, + NTSYNC_TYPE_EVENT, +}; + +/* + * Individual synchronization primitives are represented by + * struct ntsync_obj, and each primitive is backed by a file. + * + * The whole namespace is represented by a struct ntsync_device also + * backed by a file. + * + * Both rely on struct file for reference counting. Individual + * ntsync_obj objects take a reference to the device when created. + * Wait operations take a reference to each object being waited on for + * the duration of the wait. + */ + +struct ntsync_obj { + spinlock_t lock; + int dev_locked; + + enum ntsync_type type; + + struct file *file; + struct ntsync_device *dev; + + /* The following fields are protected by the object lock. */ + union { + struct { + __u32 count; + __u32 max; + } sem; + struct { + __u32 count; + pid_t owner; + bool ownerdead; + } mutex; + struct { + bool manual; + bool signaled; + } event; + } u; + + /* + * any_waiters is protected by the object lock, but all_waiters is + * protected by the device wait_all_lock. + */ + struct list_head any_waiters; + struct list_head all_waiters; + + /* + * Hint describing how many tasks are queued on this object in a + * wait-all operation. + * + * Any time we do a wake, we may need to wake "all" waiters as well as + * "any" waiters. In order to atomically wake "all" waiters, we must + * lock all of the objects, and that means grabbing the wait_all_lock + * below (and, due to lock ordering rules, before locking this object). + * However, wait-all is a rare operation, and grabbing the wait-all + * lock for every wake would create unnecessary contention. + * Therefore we first check whether all_hint is zero, and, if it is, + * we skip trying to wake "all" waiters. + * + * Since wait requests must originate from user-space threads, we're + * limited here by PID_MAX_LIMIT, so there's no risk of overflow. + */ + atomic_t all_hint; +}; + +struct ntsync_q_entry { + struct list_head node; + struct ntsync_q *q; + struct ntsync_obj *obj; + __u32 index; +}; + +struct ntsync_q { + struct task_struct *task; + __u32 owner; + + /* + * Protected via atomic_try_cmpxchg(). Only the thread that wins the + * compare-and-swap may actually change object states and wake this + * task. + */ + atomic_t signaled; + + bool all; + bool ownerdead; + __u32 count; + struct ntsync_q_entry entries[]; +}; + +struct ntsync_device { + /* + * Wait-all operations must atomically grab all objects, and be totally + * ordered with respect to each other and wait-any operations. + * If one thread is trying to acquire several objects, another thread + * cannot touch the object at the same time. + * + * This device-wide lock is used to serialize wait-for-all + * operations, and operations on an object that is involved in a + * wait-for-all. + */ + struct mutex wait_all_lock; + + struct file *file; +}; + +/* + * Single objects are locked using obj->lock. + * + * Multiple objects are 'locked' while holding dev->wait_all_lock. + * In this case however, individual objects are not locked by holding + * obj->lock, but by setting obj->dev_locked. + * + * This means that in order to lock a single object, the sequence is slightly + * more complicated than usual. Specifically it needs to check obj->dev_locked + * after acquiring obj->lock, if set, it needs to drop the lock and acquire + * dev->wait_all_lock in order to serialize against the multi-object operation. + */ + +static void dev_lock_obj(struct ntsync_device *dev, struct ntsync_obj *obj) +{ + lockdep_assert_held(&dev->wait_all_lock); + lockdep_assert(obj->dev == dev); + spin_lock(&obj->lock); + /* + * By setting obj->dev_locked inside obj->lock, it is ensured that + * anyone holding obj->lock must see the value. + */ + obj->dev_locked = 1; + spin_unlock(&obj->lock); +} + +static void dev_unlock_obj(struct ntsync_device *dev, struct ntsync_obj *obj) +{ + lockdep_assert_held(&dev->wait_all_lock); + lockdep_assert(obj->dev == dev); + spin_lock(&obj->lock); + obj->dev_locked = 0; + spin_unlock(&obj->lock); +} + +static void obj_lock(struct ntsync_obj *obj) +{ + struct ntsync_device *dev = obj->dev; + + for (;;) { + spin_lock(&obj->lock); + if (likely(!obj->dev_locked)) + break; + + spin_unlock(&obj->lock); + mutex_lock(&dev->wait_all_lock); + spin_lock(&obj->lock); + /* + * obj->dev_locked should be set and released under the same + * wait_all_lock section, since we now own this lock, it should + * be clear. + */ + lockdep_assert(!obj->dev_locked); + spin_unlock(&obj->lock); + mutex_unlock(&dev->wait_all_lock); + } +} + +static void obj_unlock(struct ntsync_obj *obj) +{ + spin_unlock(&obj->lock); +} + +static bool ntsync_lock_obj(struct ntsync_device *dev, struct ntsync_obj *obj) +{ + bool all; + + obj_lock(obj); + all = atomic_read(&obj->all_hint); + if (unlikely(all)) { + obj_unlock(obj); + mutex_lock(&dev->wait_all_lock); + dev_lock_obj(dev, obj); + } + + return all; +} + +static void ntsync_unlock_obj(struct ntsync_device *dev, struct ntsync_obj *obj, bool all) +{ + if (all) { + dev_unlock_obj(dev, obj); + mutex_unlock(&dev->wait_all_lock); + } else { + obj_unlock(obj); + } +} + +#define ntsync_assert_held(obj) \ + lockdep_assert((lockdep_is_held(&(obj)->lock) != LOCK_STATE_NOT_HELD) || \ + ((lockdep_is_held(&(obj)->dev->wait_all_lock) != LOCK_STATE_NOT_HELD) && \ + (obj)->dev_locked)) + +static bool is_signaled(struct ntsync_obj *obj, __u32 owner) +{ + ntsync_assert_held(obj); + + switch (obj->type) { + case NTSYNC_TYPE_SEM: + return !!obj->u.sem.count; + case NTSYNC_TYPE_MUTEX: + if (obj->u.mutex.owner && obj->u.mutex.owner != owner) + return false; + return obj->u.mutex.count < UINT_MAX; + case NTSYNC_TYPE_EVENT: + return obj->u.event.signaled; + } + + WARN(1, "bad object type %#x\n", obj->type); + return false; +} + +/* + * "locked_obj" is an optional pointer to an object which is already locked and + * should not be locked again. This is necessary so that changing an object's + * state and waking it can be a single atomic operation. + */ +static void try_wake_all(struct ntsync_device *dev, struct ntsync_q *q, + struct ntsync_obj *locked_obj) +{ + __u32 count = q->count; + bool can_wake = true; + int signaled = -1; + __u32 i; + + lockdep_assert_held(&dev->wait_all_lock); + if (locked_obj) + lockdep_assert(locked_obj->dev_locked); + + for (i = 0; i < count; i++) { + if (q->entries[i].obj != locked_obj) + dev_lock_obj(dev, q->entries[i].obj); + } + + for (i = 0; i < count; i++) { + if (!is_signaled(q->entries[i].obj, q->owner)) { + can_wake = false; + break; + } + } + + if (can_wake && atomic_try_cmpxchg(&q->signaled, &signaled, 0)) { + for (i = 0; i < count; i++) { + struct ntsync_obj *obj = q->entries[i].obj; + + switch (obj->type) { + case NTSYNC_TYPE_SEM: + obj->u.sem.count--; + break; + case NTSYNC_TYPE_MUTEX: + if (obj->u.mutex.ownerdead) + q->ownerdead = true; + obj->u.mutex.ownerdead = false; + obj->u.mutex.count++; + obj->u.mutex.owner = q->owner; + break; + case NTSYNC_TYPE_EVENT: + if (!obj->u.event.manual) + obj->u.event.signaled = false; + break; + } + } + wake_up_process(q->task); + } + + for (i = 0; i < count; i++) { + if (q->entries[i].obj != locked_obj) + dev_unlock_obj(dev, q->entries[i].obj); + } +} + +static void try_wake_all_obj(struct ntsync_device *dev, struct ntsync_obj *obj) +{ + struct ntsync_q_entry *entry; + + lockdep_assert_held(&dev->wait_all_lock); + lockdep_assert(obj->dev_locked); + + list_for_each_entry(entry, &obj->all_waiters, node) + try_wake_all(dev, entry->q, obj); +} + +static void try_wake_any_sem(struct ntsync_obj *sem) +{ + struct ntsync_q_entry *entry; + + ntsync_assert_held(sem); + lockdep_assert(sem->type == NTSYNC_TYPE_SEM); + + list_for_each_entry(entry, &sem->any_waiters, node) { + struct ntsync_q *q = entry->q; + int signaled = -1; + + if (!sem->u.sem.count) + break; + + if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) { + sem->u.sem.count--; + wake_up_process(q->task); + } + } +} + +static void try_wake_any_mutex(struct ntsync_obj *mutex) +{ + struct ntsync_q_entry *entry; + + ntsync_assert_held(mutex); + lockdep_assert(mutex->type == NTSYNC_TYPE_MUTEX); + + list_for_each_entry(entry, &mutex->any_waiters, node) { + struct ntsync_q *q = entry->q; + int signaled = -1; + + if (mutex->u.mutex.count == UINT_MAX) + break; + if (mutex->u.mutex.owner && mutex->u.mutex.owner != q->owner) + continue; + + if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) { + if (mutex->u.mutex.ownerdead) + q->ownerdead = true; + mutex->u.mutex.ownerdead = false; + mutex->u.mutex.count++; + mutex->u.mutex.owner = q->owner; + wake_up_process(q->task); + } + } +} + +static void try_wake_any_event(struct ntsync_obj *event) +{ + struct ntsync_q_entry *entry; + + ntsync_assert_held(event); + lockdep_assert(event->type == NTSYNC_TYPE_EVENT); + + list_for_each_entry(entry, &event->any_waiters, node) { + struct ntsync_q *q = entry->q; + int signaled = -1; + + if (!event->u.event.signaled) + break; + + if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) { + if (!event->u.event.manual) + event->u.event.signaled = false; + wake_up_process(q->task); + } + } +} + +/* + * Actually change the semaphore state, returning -EOVERFLOW if it is made + * invalid. + */ +static int release_sem_state(struct ntsync_obj *sem, __u32 count) +{ + __u32 sum; + + ntsync_assert_held(sem); + + if (check_add_overflow(sem->u.sem.count, count, &sum) || + sum > sem->u.sem.max) + return -EOVERFLOW; + + sem->u.sem.count = sum; + return 0; +} + +static int ntsync_sem_release(struct ntsync_obj *sem, void __user *argp) +{ + struct ntsync_device *dev = sem->dev; + __u32 __user *user_args = argp; + __u32 prev_count; + __u32 args; + bool all; + int ret; + + if (copy_from_user(&args, argp, sizeof(args))) + return -EFAULT; + + if (sem->type != NTSYNC_TYPE_SEM) + return -EINVAL; + + all = ntsync_lock_obj(dev, sem); + + prev_count = sem->u.sem.count; + ret = release_sem_state(sem, args); + if (!ret) { + if (all) + try_wake_all_obj(dev, sem); + try_wake_any_sem(sem); + } + + ntsync_unlock_obj(dev, sem, all); + + if (!ret && put_user(prev_count, user_args)) + ret = -EFAULT; + + return ret; +} + +/* + * Actually change the mutex state, returning -EPERM if not the owner. + */ +static int unlock_mutex_state(struct ntsync_obj *mutex, + const struct ntsync_mutex_args *args) +{ + ntsync_assert_held(mutex); + + if (mutex->u.mutex.owner != args->owner) + return -EPERM; + + if (!--mutex->u.mutex.count) + mutex->u.mutex.owner = 0; + return 0; +} + +static int ntsync_mutex_unlock(struct ntsync_obj *mutex, void __user *argp) +{ + struct ntsync_mutex_args __user *user_args = argp; + struct ntsync_device *dev = mutex->dev; + struct ntsync_mutex_args args; + __u32 prev_count; + bool all; + int ret; + + if (copy_from_user(&args, argp, sizeof(args))) + return -EFAULT; + if (!args.owner) + return -EINVAL; + + if (mutex->type != NTSYNC_TYPE_MUTEX) + return -EINVAL; + + all = ntsync_lock_obj(dev, mutex); + + prev_count = mutex->u.mutex.count; + ret = unlock_mutex_state(mutex, &args); + if (!ret) { + if (all) + try_wake_all_obj(dev, mutex); + try_wake_any_mutex(mutex); + } + + ntsync_unlock_obj(dev, mutex, all); + + if (!ret && put_user(prev_count, &user_args->count)) + ret = -EFAULT; + + return ret; +} + +/* + * Actually change the mutex state to mark its owner as dead, + * returning -EPERM if not the owner. + */ +static int kill_mutex_state(struct ntsync_obj *mutex, __u32 owner) +{ + ntsync_assert_held(mutex); + + if (mutex->u.mutex.owner != owner) + return -EPERM; + + mutex->u.mutex.ownerdead = true; + mutex->u.mutex.owner = 0; + mutex->u.mutex.count = 0; + return 0; +} + +static int ntsync_mutex_kill(struct ntsync_obj *mutex, void __user *argp) +{ + struct ntsync_device *dev = mutex->dev; + __u32 owner; + bool all; + int ret; + + if (get_user(owner, (__u32 __user *)argp)) + return -EFAULT; + if (!owner) + return -EINVAL; + + if (mutex->type != NTSYNC_TYPE_MUTEX) + return -EINVAL; + + all = ntsync_lock_obj(dev, mutex); + + ret = kill_mutex_state(mutex, owner); + if (!ret) { + if (all) + try_wake_all_obj(dev, mutex); + try_wake_any_mutex(mutex); + } + + ntsync_unlock_obj(dev, mutex, all); + + return ret; +} + +static int ntsync_event_set(struct ntsync_obj *event, void __user *argp, bool pulse) +{ + struct ntsync_device *dev = event->dev; + __u32 prev_state; + bool all; + + if (event->type != NTSYNC_TYPE_EVENT) + return -EINVAL; + + all = ntsync_lock_obj(dev, event); + + prev_state = event->u.event.signaled; + event->u.event.signaled = true; + if (all) + try_wake_all_obj(dev, event); + try_wake_any_event(event); + if (pulse) + event->u.event.signaled = false; + + ntsync_unlock_obj(dev, event, all); + + if (put_user(prev_state, (__u32 __user *)argp)) + return -EFAULT; + + return 0; +} + +static int ntsync_event_reset(struct ntsync_obj *event, void __user *argp) +{ + struct ntsync_device *dev = event->dev; + __u32 prev_state; + bool all; + + if (event->type != NTSYNC_TYPE_EVENT) + return -EINVAL; + + all = ntsync_lock_obj(dev, event); + + prev_state = event->u.event.signaled; + event->u.event.signaled = false; + + ntsync_unlock_obj(dev, event, all); + + if (put_user(prev_state, (__u32 __user *)argp)) + return -EFAULT; + + return 0; +} + +static int ntsync_sem_read(struct ntsync_obj *sem, void __user *argp) +{ + struct ntsync_sem_args __user *user_args = argp; + struct ntsync_device *dev = sem->dev; + struct ntsync_sem_args args; + bool all; + + if (sem->type != NTSYNC_TYPE_SEM) + return -EINVAL; + + all = ntsync_lock_obj(dev, sem); + + args.count = sem->u.sem.count; + args.max = sem->u.sem.max; + + ntsync_unlock_obj(dev, sem, all); + + if (copy_to_user(user_args, &args, sizeof(args))) + return -EFAULT; + return 0; +} + +static int ntsync_mutex_read(struct ntsync_obj *mutex, void __user *argp) +{ + struct ntsync_mutex_args __user *user_args = argp; + struct ntsync_device *dev = mutex->dev; + struct ntsync_mutex_args args; + bool all; + int ret; + + if (mutex->type != NTSYNC_TYPE_MUTEX) + return -EINVAL; + + all = ntsync_lock_obj(dev, mutex); + + args.count = mutex->u.mutex.count; + args.owner = mutex->u.mutex.owner; + ret = mutex->u.mutex.ownerdead ? -EOWNERDEAD : 0; + + ntsync_unlock_obj(dev, mutex, all); + + if (copy_to_user(user_args, &args, sizeof(args))) + return -EFAULT; + return ret; +} + +static int ntsync_event_read(struct ntsync_obj *event, void __user *argp) +{ + struct ntsync_event_args __user *user_args = argp; + struct ntsync_device *dev = event->dev; + struct ntsync_event_args args; + bool all; + + if (event->type != NTSYNC_TYPE_EVENT) + return -EINVAL; + + all = ntsync_lock_obj(dev, event); + + args.manual = event->u.event.manual; + args.signaled = event->u.event.signaled; + + ntsync_unlock_obj(dev, event, all); + + if (copy_to_user(user_args, &args, sizeof(args))) + return -EFAULT; + return 0; +} + +static void ntsync_free_obj(struct ntsync_obj *obj) +{ + fput(obj->dev->file); + kfree(obj); +} + +static int ntsync_obj_release(struct inode *inode, struct file *file) +{ + ntsync_free_obj(file->private_data); + return 0; +} + +static long ntsync_obj_ioctl(struct file *file, unsigned int cmd, + unsigned long parm) +{ + struct ntsync_obj *obj = file->private_data; + void __user *argp = (void __user *)parm; + + switch (cmd) { + case NTSYNC_IOC_SEM_RELEASE: + return ntsync_sem_release(obj, argp); + case NTSYNC_IOC_SEM_READ: + return ntsync_sem_read(obj, argp); + case NTSYNC_IOC_MUTEX_UNLOCK: + return ntsync_mutex_unlock(obj, argp); + case NTSYNC_IOC_MUTEX_KILL: + return ntsync_mutex_kill(obj, argp); + case NTSYNC_IOC_MUTEX_READ: + return ntsync_mutex_read(obj, argp); + case NTSYNC_IOC_EVENT_SET: + return ntsync_event_set(obj, argp, false); + case NTSYNC_IOC_EVENT_RESET: + return ntsync_event_reset(obj, argp); + case NTSYNC_IOC_EVENT_PULSE: + return ntsync_event_set(obj, argp, true); + case NTSYNC_IOC_EVENT_READ: + return ntsync_event_read(obj, argp); + default: + return -ENOIOCTLCMD; + } +} + +static const struct file_operations ntsync_obj_fops = { + .owner = THIS_MODULE, + .release = ntsync_obj_release, + .unlocked_ioctl = ntsync_obj_ioctl, + .compat_ioctl = compat_ptr_ioctl, +}; + +static struct ntsync_obj *ntsync_alloc_obj(struct ntsync_device *dev, + enum ntsync_type type) +{ + struct ntsync_obj *obj; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return NULL; + obj->type = type; + obj->dev = dev; + get_file(dev->file); + spin_lock_init(&obj->lock); + INIT_LIST_HEAD(&obj->any_waiters); + INIT_LIST_HEAD(&obj->all_waiters); + atomic_set(&obj->all_hint, 0); + + return obj; +} + +static int ntsync_obj_get_fd(struct ntsync_obj *obj) +{ + FD_PREPARE(fdf, O_CLOEXEC, + anon_inode_getfile("ntsync", &ntsync_obj_fops, obj, O_RDWR)); + if (fdf.err) + return fdf.err; + obj->file = fd_prepare_file(fdf); + return fd_publish(fdf); +} + +static int ntsync_create_sem(struct ntsync_device *dev, void __user *argp) +{ + struct ntsync_sem_args args; + struct ntsync_obj *sem; + int fd; + + if (copy_from_user(&args, argp, sizeof(args))) + return -EFAULT; + + if (args.count > args.max) + return -EINVAL; + + sem = ntsync_alloc_obj(dev, NTSYNC_TYPE_SEM); + if (!sem) + return -ENOMEM; + sem->u.sem.count = args.count; + sem->u.sem.max = args.max; + fd = ntsync_obj_get_fd(sem); + if (fd < 0) + ntsync_free_obj(sem); + + return fd; +} + +static int ntsync_create_mutex(struct ntsync_device *dev, void __user *argp) +{ + struct ntsync_mutex_args args; + struct ntsync_obj *mutex; + int fd; + + if (copy_from_user(&args, argp, sizeof(args))) + return -EFAULT; + + if (!args.owner != !args.count) + return -EINVAL; + + mutex = ntsync_alloc_obj(dev, NTSYNC_TYPE_MUTEX); + if (!mutex) + return -ENOMEM; + mutex->u.mutex.count = args.count; + mutex->u.mutex.owner = args.owner; + fd = ntsync_obj_get_fd(mutex); + if (fd < 0) + ntsync_free_obj(mutex); + + return fd; +} + +static int ntsync_create_event(struct ntsync_device *dev, void __user *argp) +{ + struct ntsync_event_args args; + struct ntsync_obj *event; + int fd; + + if (copy_from_user(&args, argp, sizeof(args))) + return -EFAULT; + + event = ntsync_alloc_obj(dev, NTSYNC_TYPE_EVENT); + if (!event) + return -ENOMEM; + event->u.event.manual = args.manual; + event->u.event.signaled = args.signaled; + fd = ntsync_obj_get_fd(event); + if (fd < 0) + ntsync_free_obj(event); + + return fd; +} + +static struct ntsync_obj *get_obj(struct ntsync_device *dev, int fd) +{ + struct file *file = fget(fd); + struct ntsync_obj *obj; + + if (!file) + return NULL; + + if (file->f_op != &ntsync_obj_fops) { + fput(file); + return NULL; + } + + obj = file->private_data; + if (obj->dev != dev) { + fput(file); + return NULL; + } + + return obj; +} + +static void put_obj(struct ntsync_obj *obj) +{ + fput(obj->file); +} + +static int ntsync_schedule(const struct ntsync_q *q, const struct ntsync_wait_args *args) +{ + ktime_t timeout = ns_to_ktime(args->timeout); + clockid_t clock = CLOCK_MONOTONIC; + ktime_t *timeout_ptr; + int ret = 0; + + timeout_ptr = (args->timeout == U64_MAX ? NULL : &timeout); + + if (args->flags & NTSYNC_WAIT_REALTIME) + clock = CLOCK_REALTIME; + + do { + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + + set_current_state(TASK_INTERRUPTIBLE); + if (atomic_read(&q->signaled) != -1) { + ret = 0; + break; + } + ret = schedule_hrtimeout_range_clock(timeout_ptr, 0, HRTIMER_MODE_ABS, clock); + } while (ret < 0); + __set_current_state(TASK_RUNNING); + + return ret; +} + +/* + * Allocate and initialize the ntsync_q structure, but do not queue us yet. + */ +static int setup_wait(struct ntsync_device *dev, + const struct ntsync_wait_args *args, bool all, + struct ntsync_q **ret_q) +{ + int fds[NTSYNC_MAX_WAIT_COUNT + 1]; + const __u32 count = args->count; + size_t size = array_size(count, sizeof(fds[0])); + struct ntsync_q *q; + __u32 total_count; + __u32 i, j; + + if (args->pad || (args->flags & ~NTSYNC_WAIT_REALTIME)) + return -EINVAL; + + if (size >= sizeof(fds)) + return -EINVAL; + + total_count = count; + if (args->alert) + total_count++; + + if (copy_from_user(fds, u64_to_user_ptr(args->objs), size)) + return -EFAULT; + if (args->alert) + fds[count] = args->alert; + + q = kmalloc(struct_size(q, entries, total_count), GFP_KERNEL); + if (!q) + return -ENOMEM; + q->task = current; + q->owner = args->owner; + atomic_set(&q->signaled, -1); + q->all = all; + q->ownerdead = false; + q->count = count; + + for (i = 0; i < total_count; i++) { + struct ntsync_q_entry *entry = &q->entries[i]; + struct ntsync_obj *obj = get_obj(dev, fds[i]); + + if (!obj) + goto err; + + if (all) { + /* Check that the objects are all distinct. */ + for (j = 0; j < i; j++) { + if (obj == q->entries[j].obj) { + put_obj(obj); + goto err; + } + } + } + + entry->obj = obj; + entry->q = q; + entry->index = i; + } + + *ret_q = q; + return 0; + +err: + for (j = 0; j < i; j++) + put_obj(q->entries[j].obj); + kfree(q); + return -EINVAL; +} + +static void try_wake_any_obj(struct ntsync_obj *obj) +{ + switch (obj->type) { + case NTSYNC_TYPE_SEM: + try_wake_any_sem(obj); + break; + case NTSYNC_TYPE_MUTEX: + try_wake_any_mutex(obj); + break; + case NTSYNC_TYPE_EVENT: + try_wake_any_event(obj); + break; + } +} + +static int ntsync_wait_any(struct ntsync_device *dev, void __user *argp) +{ + struct ntsync_wait_args args; + __u32 i, total_count; + struct ntsync_q *q; + int signaled; + bool all; + int ret; + + if (copy_from_user(&args, argp, sizeof(args))) + return -EFAULT; + + ret = setup_wait(dev, &args, false, &q); + if (ret < 0) + return ret; + + total_count = args.count; + if (args.alert) + total_count++; + + /* queue ourselves */ + + for (i = 0; i < total_count; i++) { + struct ntsync_q_entry *entry = &q->entries[i]; + struct ntsync_obj *obj = entry->obj; + + all = ntsync_lock_obj(dev, obj); + list_add_tail(&entry->node, &obj->any_waiters); + ntsync_unlock_obj(dev, obj, all); + } + + /* + * Check if we are already signaled. + * + * Note that the API requires that normal objects are checked before + * the alert event. Hence we queue the alert event last, and check + * objects in order. + */ + + for (i = 0; i < total_count; i++) { + struct ntsync_obj *obj = q->entries[i].obj; + + if (atomic_read(&q->signaled) != -1) + break; + + all = ntsync_lock_obj(dev, obj); + try_wake_any_obj(obj); + ntsync_unlock_obj(dev, obj, all); + } + + /* sleep */ + + ret = ntsync_schedule(q, &args); + + /* and finally, unqueue */ + + for (i = 0; i < total_count; i++) { + struct ntsync_q_entry *entry = &q->entries[i]; + struct ntsync_obj *obj = entry->obj; + + all = ntsync_lock_obj(dev, obj); + list_del(&entry->node); + ntsync_unlock_obj(dev, obj, all); + + put_obj(obj); + } + + signaled = atomic_read(&q->signaled); + if (signaled != -1) { + struct ntsync_wait_args __user *user_args = argp; + + /* even if we caught a signal, we need to communicate success */ + ret = q->ownerdead ? -EOWNERDEAD : 0; + + if (put_user(signaled, &user_args->index)) + ret = -EFAULT; + } else if (!ret) { + ret = -ETIMEDOUT; + } + + kfree(q); + return ret; +} + +static int ntsync_wait_all(struct ntsync_device *dev, void __user *argp) +{ + struct ntsync_wait_args args; + struct ntsync_q *q; + int signaled; + __u32 i; + int ret; + + if (copy_from_user(&args, argp, sizeof(args))) + return -EFAULT; + + ret = setup_wait(dev, &args, true, &q); + if (ret < 0) + return ret; + + /* queue ourselves */ + + mutex_lock(&dev->wait_all_lock); + + for (i = 0; i < args.count; i++) { + struct ntsync_q_entry *entry = &q->entries[i]; + struct ntsync_obj *obj = entry->obj; + + atomic_inc(&obj->all_hint); + + /* + * obj->all_waiters is protected by dev->wait_all_lock rather + * than obj->lock, so there is no need to acquire obj->lock + * here. + */ + list_add_tail(&entry->node, &obj->all_waiters); + } + if (args.alert) { + struct ntsync_q_entry *entry = &q->entries[args.count]; + struct ntsync_obj *obj = entry->obj; + + dev_lock_obj(dev, obj); + list_add_tail(&entry->node, &obj->any_waiters); + dev_unlock_obj(dev, obj); + } + + /* check if we are already signaled */ + + try_wake_all(dev, q, NULL); + + mutex_unlock(&dev->wait_all_lock); + + /* + * Check if the alert event is signaled, making sure to do so only + * after checking if the other objects are signaled. + */ + + if (args.alert) { + struct ntsync_obj *obj = q->entries[args.count].obj; + + if (atomic_read(&q->signaled) == -1) { + bool all = ntsync_lock_obj(dev, obj); + try_wake_any_obj(obj); + ntsync_unlock_obj(dev, obj, all); + } + } + + /* sleep */ + + ret = ntsync_schedule(q, &args); + + /* and finally, unqueue */ + + mutex_lock(&dev->wait_all_lock); + + for (i = 0; i < args.count; i++) { + struct ntsync_q_entry *entry = &q->entries[i]; + struct ntsync_obj *obj = entry->obj; + + /* + * obj->all_waiters is protected by dev->wait_all_lock rather + * than obj->lock, so there is no need to acquire it here. + */ + list_del(&entry->node); + + atomic_dec(&obj->all_hint); + + put_obj(obj); + } + + mutex_unlock(&dev->wait_all_lock); + + if (args.alert) { + struct ntsync_q_entry *entry = &q->entries[args.count]; + struct ntsync_obj *obj = entry->obj; + bool all; + + all = ntsync_lock_obj(dev, obj); + list_del(&entry->node); + ntsync_unlock_obj(dev, obj, all); + + put_obj(obj); + } + + signaled = atomic_read(&q->signaled); + if (signaled != -1) { + struct ntsync_wait_args __user *user_args = argp; + + /* even if we caught a signal, we need to communicate success */ + ret = q->ownerdead ? -EOWNERDEAD : 0; + + if (put_user(signaled, &user_args->index)) + ret = -EFAULT; + } else if (!ret) { + ret = -ETIMEDOUT; + } + + kfree(q); + return ret; +} + +static int ntsync_char_open(struct inode *inode, struct file *file) +{ + struct ntsync_device *dev; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + mutex_init(&dev->wait_all_lock); + + file->private_data = dev; + dev->file = file; + return nonseekable_open(inode, file); +} + +static int ntsync_char_release(struct inode *inode, struct file *file) +{ + struct ntsync_device *dev = file->private_data; + + kfree(dev); + + return 0; +} + +static long ntsync_char_ioctl(struct file *file, unsigned int cmd, + unsigned long parm) +{ + struct ntsync_device *dev = file->private_data; + void __user *argp = (void __user *)parm; + + switch (cmd) { + case NTSYNC_IOC_CREATE_EVENT: + return ntsync_create_event(dev, argp); + case NTSYNC_IOC_CREATE_MUTEX: + return ntsync_create_mutex(dev, argp); + case NTSYNC_IOC_CREATE_SEM: + return ntsync_create_sem(dev, argp); + case NTSYNC_IOC_WAIT_ALL: + return ntsync_wait_all(dev, argp); + case NTSYNC_IOC_WAIT_ANY: + return ntsync_wait_any(dev, argp); + default: + return -ENOIOCTLCMD; + } +} + +static const struct file_operations ntsync_fops = { + .owner = THIS_MODULE, + .open = ntsync_char_open, + .release = ntsync_char_release, + .unlocked_ioctl = ntsync_char_ioctl, + .compat_ioctl = compat_ptr_ioctl, +}; + +static struct miscdevice ntsync_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = NTSYNC_NAME, + .fops = &ntsync_fops, + .mode = 0666, +}; + +module_misc_device(ntsync_misc); + +MODULE_AUTHOR("Elizabeth Figura <zfigura@codeweavers.com>"); +MODULE_DESCRIPTION("Kernel driver for NT synchronization primitives"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/ocxl/afu_irq.c b/drivers/misc/ocxl/afu_irq.c index a06920b7e049..f6b821fc274c 100644 --- a/drivers/misc/ocxl/afu_irq.c +++ b/drivers/misc/ocxl/afu_irq.c @@ -57,7 +57,7 @@ EXPORT_SYMBOL_GPL(ocxl_irq_set_handler); static irqreturn_t afu_irq_handler(int virq, void *data) { - struct afu_irq *irq = (struct afu_irq *) data; + struct afu_irq *irq = data; trace_ocxl_afu_irq_receive(virq); @@ -203,7 +203,7 @@ u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, int irq_id) mutex_lock(&ctx->irq_lock); irq = idr_find(&ctx->irq_idr, irq_id); if (irq) { - xd = irq_get_handler_data(irq->virq); + xd = irq_get_chip_data(irq->virq); addr = xd ? xd->trig_page : 0; } mutex_unlock(&ctx->irq_lock); diff --git a/drivers/misc/ocxl/context.c b/drivers/misc/ocxl/context.c index 7f83116ae11a..cded7d1caf32 100644 --- a/drivers/misc/ocxl/context.c +++ b/drivers/misc/ocxl/context.c @@ -55,7 +55,7 @@ EXPORT_SYMBOL_GPL(ocxl_context_alloc); */ static void xsl_fault_error(void *data, u64 addr, u64 dsisr) { - struct ocxl_context *ctx = (struct ocxl_context *) data; + struct ocxl_context *ctx = data; mutex_lock(&ctx->xsl_error_lock); ctx->xsl_error.addr = addr; diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c index 6e63f060e4cc..7eb74711ac96 100644 --- a/drivers/misc/ocxl/file.c +++ b/drivers/misc/ocxl/file.c @@ -14,7 +14,6 @@ #define OCXL_NUM_MINORS 256 /* Total to reserve */ static dev_t ocxl_dev; -static struct class *ocxl_class; static DEFINE_MUTEX(minors_idr_lock); static struct idr minors_idr; @@ -185,7 +184,7 @@ static irqreturn_t irq_handler(void *private) { struct eventfd_ctx *ev_ctx = private; - eventfd_signal(ev_ctx, 1); + eventfd_signal(ev_ctx); return IRQ_HANDLED; } @@ -509,6 +508,16 @@ static void ocxl_file_make_invisible(struct ocxl_file_info *info) cdev_del(&info->cdev); } +static char *ocxl_devnode(const struct device *dev, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "ocxl/%s", dev_name(dev)); +} + +static const struct class ocxl_class = { + .name = "ocxl", + .devnode = ocxl_devnode, +}; + int ocxl_file_register_afu(struct ocxl_afu *afu) { int minor; @@ -529,7 +538,7 @@ int ocxl_file_register_afu(struct ocxl_afu *afu) info->dev.parent = &fn->dev; info->dev.devt = MKDEV(MAJOR(ocxl_dev), minor); - info->dev.class = ocxl_class; + info->dev.class = &ocxl_class; info->dev.release = info_release; info->afu = afu; @@ -584,11 +593,6 @@ void ocxl_file_unregister_afu(struct ocxl_afu *afu) device_unregister(&info->dev); } -static char *ocxl_devnode(const struct device *dev, umode_t *mode) -{ - return kasprintf(GFP_KERNEL, "ocxl/%s", dev_name(dev)); -} - int ocxl_file_init(void) { int rc; @@ -601,20 +605,19 @@ int ocxl_file_init(void) return rc; } - ocxl_class = class_create("ocxl"); - if (IS_ERR(ocxl_class)) { + rc = class_register(&ocxl_class); + if (rc) { pr_err("Unable to create ocxl class\n"); unregister_chrdev_region(ocxl_dev, OCXL_NUM_MINORS); - return PTR_ERR(ocxl_class); + return rc; } - ocxl_class->devnode = ocxl_devnode; return 0; } void ocxl_file_exit(void) { - class_destroy(ocxl_class); + class_unregister(&ocxl_class); unregister_chrdev_region(ocxl_dev, OCXL_NUM_MINORS); idr_destroy(&minors_idr); } diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c index c06c699c0e7b..03402203cacd 100644 --- a/drivers/misc/ocxl/link.c +++ b/drivers/misc/ocxl/link.c @@ -188,7 +188,7 @@ ack: static irqreturn_t xsl_fault_handler(int irq, void *data) { - struct ocxl_link *link = (struct ocxl_link *) data; + struct ocxl_link *link = data; struct spa *spa = link->spa; u64 dsisr, dar, pe_handle; struct pe_data *pe_data; @@ -483,7 +483,7 @@ static void release_xsl(struct kref *ref) void ocxl_link_release(struct pci_dev *dev, void *link_handle) { - struct ocxl_link *link = (struct ocxl_link *) link_handle; + struct ocxl_link *link = link_handle; mutex_lock(&links_list_lock); kref_put(&link->ref, release_xsl); @@ -540,7 +540,7 @@ int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr, void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr), void *xsl_err_data) { - struct ocxl_link *link = (struct ocxl_link *) link_handle; + struct ocxl_link *link = link_handle; struct spa *spa = link->spa; struct ocxl_process_element *pe; int pe_handle, rc = 0; @@ -630,7 +630,7 @@ EXPORT_SYMBOL_GPL(ocxl_link_add_pe); int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid) { - struct ocxl_link *link = (struct ocxl_link *) link_handle; + struct ocxl_link *link = link_handle; struct spa *spa = link->spa; struct ocxl_process_element *pe; int pe_handle, rc; @@ -666,7 +666,7 @@ int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid) int ocxl_link_remove_pe(void *link_handle, int pasid) { - struct ocxl_link *link = (struct ocxl_link *) link_handle; + struct ocxl_link *link = link_handle; struct spa *spa = link->spa; struct ocxl_process_element *pe; struct pe_data *pe_data; @@ -752,7 +752,7 @@ EXPORT_SYMBOL_GPL(ocxl_link_remove_pe); int ocxl_link_irq_alloc(void *link_handle, int *hw_irq) { - struct ocxl_link *link = (struct ocxl_link *) link_handle; + struct ocxl_link *link = link_handle; int irq; if (atomic_dec_if_positive(&link->irq_available) < 0) @@ -771,7 +771,7 @@ EXPORT_SYMBOL_GPL(ocxl_link_irq_alloc); void ocxl_link_free_irq(void *link_handle, int hw_irq) { - struct ocxl_link *link = (struct ocxl_link *) link_handle; + struct ocxl_link *link = link_handle; xive_native_free_irq(hw_irq); atomic_inc(&link->irq_available); diff --git a/drivers/misc/ocxl/main.c b/drivers/misc/ocxl/main.c index ef73cf35dda2..658974143c3c 100644 --- a/drivers/misc/ocxl/main.c +++ b/drivers/misc/ocxl/main.c @@ -7,7 +7,7 @@ static int __init init_ocxl(void) { - int rc = 0; + int rc; if (!tlbie_capable) return -EINVAL; diff --git a/drivers/misc/ocxl/ocxl_internal.h b/drivers/misc/ocxl/ocxl_internal.h index 10125a22d5a5..d2028d6c6f08 100644 --- a/drivers/misc/ocxl/ocxl_internal.h +++ b/drivers/misc/ocxl/ocxl_internal.h @@ -97,8 +97,6 @@ struct ocxl_process_element { __be32 software_state; }; -int ocxl_create_cdev(struct ocxl_afu *afu); -void ocxl_destroy_cdev(struct ocxl_afu *afu); int ocxl_file_register_afu(struct ocxl_afu *afu); void ocxl_file_unregister_afu(struct ocxl_afu *afu); diff --git a/drivers/misc/ocxl/sysfs.c b/drivers/misc/ocxl/sysfs.c index 405180d47d9b..1b6a86f17b6c 100644 --- a/drivers/misc/ocxl/sysfs.c +++ b/drivers/misc/ocxl/sysfs.c @@ -16,7 +16,7 @@ static ssize_t global_mmio_size_show(struct device *device, { struct ocxl_afu *afu = to_afu(device); - return scnprintf(buf, PAGE_SIZE, "%d\n", + return sysfs_emit(buf, "%d\n", afu->config.global_mmio_size); } @@ -26,7 +26,7 @@ static ssize_t pp_mmio_size_show(struct device *device, { struct ocxl_afu *afu = to_afu(device); - return scnprintf(buf, PAGE_SIZE, "%d\n", + return sysfs_emit(buf, "%d\n", afu->config.pp_mmio_stride); } @@ -36,7 +36,7 @@ static ssize_t afu_version_show(struct device *device, { struct ocxl_afu *afu = to_afu(device); - return scnprintf(buf, PAGE_SIZE, "%hhu:%hhu\n", + return sysfs_emit(buf, "%hhu:%hhu\n", afu->config.version_major, afu->config.version_minor); } @@ -47,7 +47,7 @@ static ssize_t contexts_show(struct device *device, { struct ocxl_afu *afu = to_afu(device); - return scnprintf(buf, PAGE_SIZE, "%d/%d\n", + return sysfs_emit(buf, "%d/%d\n", afu->pasid_count, afu->pasid_max); } @@ -61,9 +61,9 @@ static ssize_t reload_on_reset_show(struct device *device, int val; if (ocxl_config_get_reset_reload(pci_dev, &val)) - return scnprintf(buf, PAGE_SIZE, "unavailable\n"); + return sysfs_emit(buf, "unavailable\n"); - return scnprintf(buf, PAGE_SIZE, "%d\n", val); + return sysfs_emit(buf, "%d\n", val); } static ssize_t reload_on_reset_store(struct device *device, @@ -94,7 +94,7 @@ static struct device_attribute afu_attrs[] = { }; static ssize_t global_mmio_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, char *buf, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct ocxl_afu *afu = to_afu(kobj_to_dev(kobj)); @@ -125,7 +125,7 @@ static const struct vm_operations_struct global_mmio_vmops = { }; static int global_mmio_mmap(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, struct vm_area_struct *vma) { struct ocxl_afu *afu = to_afu(kobj_to_dev(kobj)); diff --git a/drivers/misc/open-dice.c b/drivers/misc/open-dice.c index 8aea2d070a40..24c29e0f00ef 100644 --- a/drivers/misc/open-dice.c +++ b/drivers/misc/open-dice.c @@ -140,7 +140,6 @@ static int __init open_dice_probe(struct platform_device *pdev) return -ENOMEM; *drvdata = (struct open_dice_drvdata){ - .lock = __MUTEX_INITIALIZER(drvdata->lock), .rmem = rmem, .misc = (struct miscdevice){ .parent = dev, @@ -150,6 +149,7 @@ static int __init open_dice_probe(struct platform_device *pdev) .mode = 0600, }, }; + mutex_init(&drvdata->lock); /* Index overflow check not needed, misc_register() will fail. */ snprintf(drvdata->name, sizeof(drvdata->name), DRIVER_NAME"%u", dev_idx++); @@ -165,12 +165,11 @@ static int __init open_dice_probe(struct platform_device *pdev) return 0; } -static int open_dice_remove(struct platform_device *pdev) +static void open_dice_remove(struct platform_device *pdev) { struct open_dice_drvdata *drvdata = platform_get_drvdata(pdev); misc_deregister(&drvdata->misc); - return 0; } static const struct of_device_id open_dice_of_match[] = { @@ -202,5 +201,6 @@ static void __exit open_dice_exit(void) module_init(open_dice_init); module_exit(open_dice_exit); +MODULE_DESCRIPTION("Driver for Open Profile for DICE."); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("David Brazdil <dbrazdil@google.com>"); diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c index 8d2b7135738e..7bee179841bc 100644 --- a/drivers/misc/pch_phub.c +++ b/drivers/misc/pch_phub.c @@ -483,7 +483,7 @@ static int pch_phub_write_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data) } static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { unsigned int rom_signature; @@ -553,7 +553,7 @@ return_err_nomutex: } static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { int err; diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index ed4d0ef5e5c3..1c0fd185114f 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -7,6 +7,7 @@ */ #include <linux/crc32.h> +#include <linux/cleanup.h> #include <linux/delay.h> #include <linux/fs.h> #include <linux/io.h> @@ -27,20 +28,17 @@ #define DRV_MODULE_NAME "pci-endpoint-test" -#define IRQ_TYPE_UNDEFINED -1 -#define IRQ_TYPE_LEGACY 0 -#define IRQ_TYPE_MSI 1 -#define IRQ_TYPE_MSIX 2 - #define PCI_ENDPOINT_TEST_MAGIC 0x0 #define PCI_ENDPOINT_TEST_COMMAND 0x4 -#define COMMAND_RAISE_LEGACY_IRQ BIT(0) +#define COMMAND_RAISE_INTX_IRQ BIT(0) #define COMMAND_RAISE_MSI_IRQ BIT(1) #define COMMAND_RAISE_MSIX_IRQ BIT(2) #define COMMAND_READ BIT(3) #define COMMAND_WRITE BIT(4) #define COMMAND_COPY BIT(5) +#define COMMAND_ENABLE_DOORBELL BIT(6) +#define COMMAND_DISABLE_DOORBELL BIT(7) #define PCI_ENDPOINT_TEST_STATUS 0x8 #define STATUS_READ_SUCCESS BIT(0) @@ -52,6 +50,11 @@ #define STATUS_IRQ_RAISED BIT(6) #define STATUS_SRC_ADDR_INVALID BIT(7) #define STATUS_DST_ADDR_INVALID BIT(8) +#define STATUS_DOORBELL_SUCCESS BIT(9) +#define STATUS_DOORBELL_ENABLE_SUCCESS BIT(10) +#define STATUS_DOORBELL_ENABLE_FAIL BIT(11) +#define STATUS_DOORBELL_DISABLE_SUCCESS BIT(12) +#define STATUS_DOORBELL_DISABLE_FAIL BIT(13) #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10 @@ -66,11 +69,23 @@ #define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28 #define PCI_ENDPOINT_TEST_FLAGS 0x2c + #define FLAG_USE_DMA BIT(0) +#define PCI_ENDPOINT_TEST_CAPS 0x30 +#define CAP_UNALIGNED_ACCESS BIT(0) +#define CAP_MSI BIT(1) +#define CAP_MSIX BIT(2) +#define CAP_INTX BIT(3) + +#define PCI_ENDPOINT_TEST_DB_BAR 0x34 +#define PCI_ENDPOINT_TEST_DB_OFFSET 0x38 +#define PCI_ENDPOINT_TEST_DB_DATA 0x3c + #define PCI_DEVICE_ID_TI_AM654 0xb00c #define PCI_DEVICE_ID_TI_J7200 0xb00f #define PCI_DEVICE_ID_TI_AM64 0xb010 +#define PCI_DEVICE_ID_TI_J721S2 0xb013 #define PCI_DEVICE_ID_LS1088A 0x80c0 #define PCI_DEVICE_ID_IMX8 0x0808 @@ -81,20 +96,15 @@ #define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b #define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d #define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025 +#define PCI_DEVICE_ID_RENESAS_R8A779F0 0x0031 + +#define PCI_DEVICE_ID_ROCKCHIP_RK3588 0x3588 static DEFINE_IDA(pci_endpoint_test_ida); #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \ miscdev) -static bool no_msi; -module_param(no_msi, bool, 0444); -MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test"); - -static int irq_type = IRQ_TYPE_MSI; -module_param(irq_type, int, 0444); -MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)"); - enum pci_barno { BAR_0, BAR_1, @@ -102,6 +112,7 @@ enum pci_barno { BAR_3, BAR_4, BAR_5, + NO_BAR = -1, }; struct pci_endpoint_test { @@ -117,13 +128,13 @@ struct pci_endpoint_test { struct miscdevice miscdev; enum pci_barno test_reg_bar; size_t alignment; + u32 ep_caps; const char *name; }; struct pci_endpoint_test_data { enum pci_barno test_reg_bar; size_t alignment; - int irq_type; }; static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test, @@ -138,18 +149,6 @@ static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test, writel(value, test->base + offset); } -static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test, - int bar, int offset) -{ - return readl(test->bar[bar] + offset); -} - -static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test, - int bar, u32 offset, u32 value) -{ - writel(value, test->bar[bar] + offset); -} - static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id) { struct pci_endpoint_test *test = dev_id; @@ -169,108 +168,186 @@ static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test) struct pci_dev *pdev = test->pdev; pci_free_irq_vectors(pdev); - test->irq_type = IRQ_TYPE_UNDEFINED; + test->irq_type = PCITEST_IRQ_TYPE_UNDEFINED; } -static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test, +static int pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test, int type) { - int irq = -1; + int irq; struct pci_dev *pdev = test->pdev; struct device *dev = &pdev->dev; - bool res = true; switch (type) { - case IRQ_TYPE_LEGACY: - irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY); - if (irq < 0) + case PCITEST_IRQ_TYPE_INTX: + irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX); + if (irq < 0) { dev_err(dev, "Failed to get Legacy interrupt\n"); + return irq; + } + break; - case IRQ_TYPE_MSI: + case PCITEST_IRQ_TYPE_MSI: irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI); - if (irq < 0) + if (irq < 0) { dev_err(dev, "Failed to get MSI interrupts\n"); + return irq; + } + break; - case IRQ_TYPE_MSIX: + case PCITEST_IRQ_TYPE_MSIX: irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX); - if (irq < 0) + if (irq < 0) { dev_err(dev, "Failed to get MSI-X interrupts\n"); + return irq; + } + break; default: dev_err(dev, "Invalid IRQ type selected\n"); - } - - if (irq < 0) { - irq = 0; - res = false; + return -EINVAL; } test->irq_type = type; test->num_irqs = irq; - return res; + return 0; } static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test) { int i; struct pci_dev *pdev = test->pdev; - struct device *dev = &pdev->dev; for (i = 0; i < test->num_irqs; i++) - devm_free_irq(dev, pci_irq_vector(pdev, i), test); + free_irq(pci_irq_vector(pdev, i), test); test->num_irqs = 0; } -static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test) +static int pci_endpoint_test_request_irq(struct pci_endpoint_test *test) { int i; - int err; + int ret; struct pci_dev *pdev = test->pdev; struct device *dev = &pdev->dev; for (i = 0; i < test->num_irqs; i++) { - err = devm_request_irq(dev, pci_irq_vector(pdev, i), - pci_endpoint_test_irqhandler, - IRQF_SHARED, test->name, test); - if (err) + ret = request_irq(pci_irq_vector(pdev, i), + pci_endpoint_test_irqhandler, IRQF_SHARED, + test->name, test); + if (ret) goto fail; } - return true; + return 0; fail: - switch (irq_type) { - case IRQ_TYPE_LEGACY: + switch (test->irq_type) { + case PCITEST_IRQ_TYPE_INTX: dev_err(dev, "Failed to request IRQ %d for Legacy\n", pci_irq_vector(pdev, i)); break; - case IRQ_TYPE_MSI: + case PCITEST_IRQ_TYPE_MSI: dev_err(dev, "Failed to request IRQ %d for MSI %d\n", pci_irq_vector(pdev, i), i + 1); break; - case IRQ_TYPE_MSIX: + case PCITEST_IRQ_TYPE_MSIX: dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n", pci_irq_vector(pdev, i), i + 1); break; } - return false; + test->num_irqs = i; + pci_endpoint_test_release_irq(test); + + return ret; +} + +static const u32 bar_test_pattern[] = { + 0xA0A0A0A0, + 0xA1A1A1A1, + 0xA2A2A2A2, + 0xA3A3A3A3, + 0xA4A4A4A4, + 0xA5A5A5A5, +}; + +static int pci_endpoint_test_bar_memcmp(struct pci_endpoint_test *test, + enum pci_barno barno, + resource_size_t offset, void *write_buf, + void *read_buf, int size) +{ + memset(write_buf, bar_test_pattern[barno], size); + memcpy_toio(test->bar[barno] + offset, write_buf, size); + + memcpy_fromio(read_buf, test->bar[barno] + offset, size); + + return memcmp(write_buf, read_buf, size); } -static bool pci_endpoint_test_bar(struct pci_endpoint_test *test, +static int pci_endpoint_test_bar(struct pci_endpoint_test *test, enum pci_barno barno) { - int j; - u32 val; - int size; + resource_size_t bar_size, offset = 0; + void *write_buf __free(kfree) = NULL; + void *read_buf __free(kfree) = NULL; struct pci_dev *pdev = test->pdev; + int buf_size; + + bar_size = pci_resource_len(pdev, barno); + if (!bar_size) + return -ENODATA; if (!test->bar[barno]) - return false; + return -ENOMEM; + + if (barno == test->test_reg_bar) + bar_size = 0x4; + + /* + * Allocate a buffer of max size 1MB, and reuse that buffer while + * iterating over the whole BAR size (which might be much larger). + */ + buf_size = min(SZ_1M, bar_size); + + write_buf = kmalloc(buf_size, GFP_KERNEL); + if (!write_buf) + return -ENOMEM; + + read_buf = kmalloc(buf_size, GFP_KERNEL); + if (!read_buf) + return -ENOMEM; + + while (offset < bar_size) { + if (pci_endpoint_test_bar_memcmp(test, barno, offset, write_buf, + read_buf, buf_size)) + return -EIO; + offset += buf_size; + } + + return 0; +} + +static u32 bar_test_pattern_with_offset(enum pci_barno barno, int offset) +{ + u32 val; + + /* Keep the BAR pattern in the top byte. */ + val = bar_test_pattern[barno] & 0xff000000; + /* Store the (partial) offset in the remaining bytes. */ + val |= offset & 0x00ffffff; + + return val; +} + +static void pci_endpoint_test_bars_write_bar(struct pci_endpoint_test *test, + enum pci_barno barno) +{ + struct pci_dev *pdev = test->pdev; + int j, size; size = pci_resource_len(pdev, barno); @@ -278,42 +355,96 @@ static bool pci_endpoint_test_bar(struct pci_endpoint_test *test, size = 0x4; for (j = 0; j < size; j += 4) - pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0); + writel_relaxed(bar_test_pattern_with_offset(barno, j), + test->bar[barno] + j); +} + +static int pci_endpoint_test_bars_read_bar(struct pci_endpoint_test *test, + enum pci_barno barno) +{ + struct pci_dev *pdev = test->pdev; + struct device *dev = &pdev->dev; + int j, size; + u32 val; + + size = pci_resource_len(pdev, barno); + + if (barno == test->test_reg_bar) + size = 0x4; for (j = 0; j < size; j += 4) { - val = pci_endpoint_test_bar_readl(test, barno, j); - if (val != 0xA0A0A0A0) - return false; + u32 expected = bar_test_pattern_with_offset(barno, j); + + val = readl_relaxed(test->bar[barno] + j); + if (val != expected) { + dev_err(dev, + "BAR%d incorrect data at offset: %#x, got: %#x expected: %#x\n", + barno, j, val, expected); + return -EIO; + } } - return true; + return 0; } -static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test) +static int pci_endpoint_test_bars(struct pci_endpoint_test *test) +{ + enum pci_barno bar; + int ret; + + /* Write all BARs in order (without reading). */ + for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) + if (test->bar[bar]) + pci_endpoint_test_bars_write_bar(test, bar); + + /* + * Read all BARs in order (without writing). + * If there is an address translation issue on the EP, writing one BAR + * might have overwritten another BAR. Ensure that this is not the case. + * (Reading back the BAR directly after writing can not detect this.) + */ + for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { + if (test->bar[bar]) { + ret = pci_endpoint_test_bars_read_bar(test, bar); + if (ret) + return ret; + } + } + + return 0; +} + +static int pci_endpoint_test_intx_irq(struct pci_endpoint_test *test) { u32 val; pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, - IRQ_TYPE_LEGACY); + PCITEST_IRQ_TYPE_INTX); pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0); pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, - COMMAND_RAISE_LEGACY_IRQ); + COMMAND_RAISE_INTX_IRQ); val = wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000)); if (!val) - return false; + return -ETIMEDOUT; - return true; + return 0; } -static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test, +static int pci_endpoint_test_msi_irq(struct pci_endpoint_test *test, u16 msi_num, bool msix) { - u32 val; struct pci_dev *pdev = test->pdev; + u32 val; + int irq; + + irq = pci_irq_vector(pdev, msi_num - 1); + if (irq < 0) + return irq; pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, - msix ? IRQ_TYPE_MSIX : IRQ_TYPE_MSI); + msix ? PCITEST_IRQ_TYPE_MSIX : + PCITEST_IRQ_TYPE_MSI); pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num); pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, msix ? COMMAND_RAISE_MSIX_IRQ : @@ -321,9 +452,12 @@ static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test, val = wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000)); if (!val) - return false; + return -ETIMEDOUT; - return pci_irq_vector(pdev, msi_num - 1) == test->last_irq; + if (irq != test->last_irq) + return -EIO; + + return 0; } static int pci_endpoint_test_validate_xfer_params(struct device *dev, @@ -342,11 +476,10 @@ static int pci_endpoint_test_validate_xfer_params(struct device *dev, return 0; } -static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, +static int pci_endpoint_test_copy(struct pci_endpoint_test *test, unsigned long arg) { struct pci_endpoint_test_xfer_param param; - bool ret = false; void *src_addr; void *dst_addr; u32 flags = 0; @@ -365,17 +498,17 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, int irq_type = test->irq_type; u32 src_crc32; u32 dst_crc32; - int err; + int ret; - err = copy_from_user(¶m, (void __user *)arg, sizeof(param)); - if (err) { + ret = copy_from_user(¶m, (void __user *)arg, sizeof(param)); + if (ret) { dev_err(dev, "Failed to get transfer param\n"); - return false; + return -EFAULT; } - err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment); - if (err) - return false; + ret = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment); + if (ret) + return ret; size = param.size; @@ -383,24 +516,24 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, if (use_dma) flags |= FLAG_USE_DMA; - if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) { + if (irq_type < PCITEST_IRQ_TYPE_INTX || + irq_type > PCITEST_IRQ_TYPE_MSIX) { dev_err(dev, "Invalid IRQ type option\n"); - goto err; + return -EINVAL; } orig_src_addr = kzalloc(size + alignment, GFP_KERNEL); if (!orig_src_addr) { dev_err(dev, "Failed to allocate source buffer\n"); - ret = false; - goto err; + return -ENOMEM; } get_random_bytes(orig_src_addr, size + alignment); orig_src_phys_addr = dma_map_single(dev, orig_src_addr, size + alignment, DMA_TO_DEVICE); - if (dma_mapping_error(dev, orig_src_phys_addr)) { + ret = dma_mapping_error(dev, orig_src_phys_addr); + if (ret) { dev_err(dev, "failed to map source buffer address\n"); - ret = false; goto err_src_phys_addr; } @@ -424,15 +557,15 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL); if (!orig_dst_addr) { dev_err(dev, "Failed to allocate destination address\n"); - ret = false; + ret = -ENOMEM; goto err_dst_addr; } orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr, size + alignment, DMA_FROM_DEVICE); - if (dma_mapping_error(dev, orig_dst_phys_addr)) { + ret = dma_mapping_error(dev, orig_dst_phys_addr); + if (ret) { dev_err(dev, "failed to map destination buffer address\n"); - ret = false; goto err_dst_phys_addr; } @@ -465,8 +598,8 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, DMA_FROM_DEVICE); dst_crc32 = crc32_le(~0, dst_addr, size); - if (dst_crc32 == src_crc32) - ret = true; + if (dst_crc32 != src_crc32) + ret = -EIO; err_dst_phys_addr: kfree(orig_dst_addr); @@ -477,16 +610,13 @@ err_dst_addr: err_src_phys_addr: kfree(orig_src_addr); - -err: return ret; } -static bool pci_endpoint_test_write(struct pci_endpoint_test *test, +static int pci_endpoint_test_write(struct pci_endpoint_test *test, unsigned long arg) { struct pci_endpoint_test_xfer_param param; - bool ret = false; u32 flags = 0; bool use_dma; u32 reg; @@ -501,17 +631,17 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, int irq_type = test->irq_type; size_t size; u32 crc32; - int err; + int ret; - err = copy_from_user(¶m, (void __user *)arg, sizeof(param)); - if (err != 0) { + ret = copy_from_user(¶m, (void __user *)arg, sizeof(param)); + if (ret) { dev_err(dev, "Failed to get transfer param\n"); - return false; + return -EFAULT; } - err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment); - if (err) - return false; + ret = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment); + if (ret) + return ret; size = param.size; @@ -519,25 +649,25 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, if (use_dma) flags |= FLAG_USE_DMA; - if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) { + if (irq_type < PCITEST_IRQ_TYPE_INTX || + irq_type > PCITEST_IRQ_TYPE_MSIX) { dev_err(dev, "Invalid IRQ type option\n"); - goto err; + return -EINVAL; } orig_addr = kzalloc(size + alignment, GFP_KERNEL); if (!orig_addr) { dev_err(dev, "Failed to allocate address\n"); - ret = false; - goto err; + return -ENOMEM; } get_random_bytes(orig_addr, size + alignment); orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment, DMA_TO_DEVICE); - if (dma_mapping_error(dev, orig_phys_addr)) { + ret = dma_mapping_error(dev, orig_phys_addr); + if (ret) { dev_err(dev, "failed to map source buffer address\n"); - ret = false; goto err_phys_addr; } @@ -570,24 +700,21 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, wait_for_completion(&test->irq_raised); reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS); - if (reg & STATUS_READ_SUCCESS) - ret = true; + if (!(reg & STATUS_READ_SUCCESS)) + ret = -EIO; dma_unmap_single(dev, orig_phys_addr, size + alignment, DMA_TO_DEVICE); err_phys_addr: kfree(orig_addr); - -err: return ret; } -static bool pci_endpoint_test_read(struct pci_endpoint_test *test, +static int pci_endpoint_test_read(struct pci_endpoint_test *test, unsigned long arg) { struct pci_endpoint_test_xfer_param param; - bool ret = false; u32 flags = 0; bool use_dma; size_t size; @@ -601,17 +728,17 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t alignment = test->alignment; int irq_type = test->irq_type; u32 crc32; - int err; + int ret; - err = copy_from_user(¶m, (void __user *)arg, sizeof(param)); - if (err) { + ret = copy_from_user(¶m, (void __user *)arg, sizeof(param)); + if (ret) { dev_err(dev, "Failed to get transfer param\n"); - return false; + return -EFAULT; } - err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment); - if (err) - return false; + ret = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment); + if (ret) + return ret; size = param.size; @@ -619,23 +746,23 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, if (use_dma) flags |= FLAG_USE_DMA; - if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) { + if (irq_type < PCITEST_IRQ_TYPE_INTX || + irq_type > PCITEST_IRQ_TYPE_MSIX) { dev_err(dev, "Invalid IRQ type option\n"); - goto err; + return -EINVAL; } orig_addr = kzalloc(size + alignment, GFP_KERNEL); if (!orig_addr) { dev_err(dev, "Failed to allocate destination address\n"); - ret = false; - goto err; + return -ENOMEM; } orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment, DMA_FROM_DEVICE); - if (dma_mapping_error(dev, orig_phys_addr)) { + ret = dma_mapping_error(dev, orig_phys_addr); + if (ret) { dev_err(dev, "failed to map source buffer address\n"); - ret = false; goto err_phys_addr; } @@ -667,50 +794,131 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, DMA_FROM_DEVICE); crc32 = crc32_le(~0, addr, size); - if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM)) - ret = true; + if (crc32 != pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM)) + ret = -EIO; err_phys_addr: kfree(orig_addr); -err: return ret; } -static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test) +static int pci_endpoint_test_clear_irq(struct pci_endpoint_test *test) { pci_endpoint_test_release_irq(test); pci_endpoint_test_free_irq_vectors(test); - return true; + + return 0; } -static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test, +static int pci_endpoint_test_set_irq(struct pci_endpoint_test *test, int req_irq_type) { struct pci_dev *pdev = test->pdev; struct device *dev = &pdev->dev; + int ret; - if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) { + if (req_irq_type < PCITEST_IRQ_TYPE_INTX || + req_irq_type > PCITEST_IRQ_TYPE_AUTO) { dev_err(dev, "Invalid IRQ type option\n"); - return false; + return -EINVAL; + } + + if (req_irq_type == PCITEST_IRQ_TYPE_AUTO) { + if (test->ep_caps & CAP_MSI) + req_irq_type = PCITEST_IRQ_TYPE_MSI; + else if (test->ep_caps & CAP_MSIX) + req_irq_type = PCITEST_IRQ_TYPE_MSIX; + else if (test->ep_caps & CAP_INTX) + req_irq_type = PCITEST_IRQ_TYPE_INTX; + else + /* fallback to MSI if no caps defined */ + req_irq_type = PCITEST_IRQ_TYPE_MSI; } if (test->irq_type == req_irq_type) - return true; + return 0; pci_endpoint_test_release_irq(test); pci_endpoint_test_free_irq_vectors(test); - if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type)) - goto err; + ret = pci_endpoint_test_alloc_irq_vectors(test, req_irq_type); + if (ret) + return ret; - if (!pci_endpoint_test_request_irq(test)) - goto err; + ret = pci_endpoint_test_request_irq(test); + if (ret) { + pci_endpoint_test_free_irq_vectors(test); + return ret; + } - return true; + return 0; +} -err: - pci_endpoint_test_free_irq_vectors(test); - return false; +static int pci_endpoint_test_doorbell(struct pci_endpoint_test *test) +{ + struct pci_dev *pdev = test->pdev; + struct device *dev = &pdev->dev; + int irq_type = test->irq_type; + enum pci_barno bar; + u32 data, status; + u32 addr; + int left; + + if (irq_type < PCITEST_IRQ_TYPE_INTX || + irq_type > PCITEST_IRQ_TYPE_MSIX) { + dev_err(dev, "Invalid IRQ type\n"); + return -EINVAL; + } + + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type); + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1); + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, + COMMAND_ENABLE_DOORBELL); + + left = wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000)); + + status = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS); + if (!left || (status & STATUS_DOORBELL_ENABLE_FAIL)) { + dev_err(dev, "Failed to enable doorbell\n"); + return -EINVAL; + } + + data = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_DATA); + addr = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_OFFSET); + bar = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_BAR); + + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type); + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1); + + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS, 0); + + bar = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_BAR); + + writel(data, test->bar[bar] + addr); + + left = wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000)); + + status = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS); + + if (!left || !(status & STATUS_DOORBELL_SUCCESS)) + dev_err(dev, "Failed to trigger doorbell in endpoint\n"); + + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, + COMMAND_DISABLE_DOORBELL); + + wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000)); + + status |= pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS); + + if (status & STATUS_DOORBELL_DISABLE_FAIL) { + dev_err(dev, "Failed to disable doorbell\n"); + return -EINVAL; + } + + if (!(status & STATUS_DOORBELL_SUCCESS)) + return -EINVAL; + + return 0; } static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd, @@ -729,14 +937,17 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd, switch (cmd) { case PCITEST_BAR: bar = arg; - if (bar > BAR_5) + if (bar <= NO_BAR || bar > BAR_5) goto ret; if (is_am654_pci_dev(pdev) && bar == BAR_0) goto ret; ret = pci_endpoint_test_bar(test, bar); break; - case PCITEST_LEGACY_IRQ: - ret = pci_endpoint_test_legacy_irq(test); + case PCITEST_BARS: + ret = pci_endpoint_test_bars(test); + break; + case PCITEST_INTX_IRQ: + ret = pci_endpoint_test_intx_irq(test); break; case PCITEST_MSI: case PCITEST_MSIX: @@ -755,11 +966,14 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd, ret = pci_endpoint_test_set_irq(test, arg); break; case PCITEST_GET_IRQTYPE: - ret = irq_type; + ret = test->irq_type; break; case PCITEST_CLEAR_IRQ: ret = pci_endpoint_test_clear_irq(test); break; + case PCITEST_DOORBELL: + ret = pci_endpoint_test_doorbell(test); + break; } ret: @@ -772,12 +986,25 @@ static const struct file_operations pci_endpoint_test_fops = { .unlocked_ioctl = pci_endpoint_test_ioctl, }; +static void pci_endpoint_test_get_capabilities(struct pci_endpoint_test *test) +{ + struct pci_dev *pdev = test->pdev; + struct device *dev = &pdev->dev; + + test->ep_caps = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CAPS); + dev_dbg(dev, "PCI_ENDPOINT_TEST_CAPS: %#x\n", test->ep_caps); + + /* CAP_UNALIGNED_ACCESS is set if the EP can do unaligned access */ + if (test->ep_caps & CAP_UNALIGNED_ACCESS) + test->alignment = 0; +} + static int pci_endpoint_test_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - int err; + int ret; int id; - char name[24]; + char name[29]; enum pci_barno bar; void __iomem *base; struct device *dev = &pdev->dev; @@ -793,50 +1020,35 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, if (!test) return -ENOMEM; - test->test_reg_bar = 0; - test->alignment = 0; test->pdev = pdev; - test->irq_type = IRQ_TYPE_UNDEFINED; - - if (no_msi) - irq_type = IRQ_TYPE_LEGACY; + test->irq_type = PCITEST_IRQ_TYPE_UNDEFINED; data = (struct pci_endpoint_test_data *)ent->driver_data; if (data) { test_reg_bar = data->test_reg_bar; test->test_reg_bar = test_reg_bar; test->alignment = data->alignment; - irq_type = data->irq_type; } init_completion(&test->irq_raised); mutex_init(&test->mutex); - if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) && - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { - dev_err(dev, "Cannot set DMA mask\n"); - return -EINVAL; - } + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); - err = pci_enable_device(pdev); - if (err) { + ret = pci_enable_device(pdev); + if (ret) { dev_err(dev, "Cannot enable PCI device\n"); - return err; + return ret; } - err = pci_request_regions(pdev, DRV_MODULE_NAME); - if (err) { + ret = pci_request_regions(pdev, DRV_MODULE_NAME); + if (ret) { dev_err(dev, "Cannot obtain PCI resources\n"); goto err_disable_pdev; } pci_set_master(pdev); - if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) { - err = -EINVAL; - goto err_disable_irq; - } - for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { base = pci_ioremap_bar(pdev, bar); @@ -850,7 +1062,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, test->base = test->bar[test_reg_bar]; if (!test->base) { - err = -ENOMEM; + ret = -ENOMEM; dev_err(dev, "Cannot perform PCI test without BAR%d\n", test_reg_bar); goto err_iounmap; @@ -858,9 +1070,9 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, test); - id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL); + id = ida_alloc(&pci_endpoint_test_ida, GFP_KERNEL); if (id < 0) { - err = id; + ret = id; dev_err(dev, "Unable to get id\n"); goto err_iounmap; } @@ -868,27 +1080,24 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id); test->name = kstrdup(name, GFP_KERNEL); if (!test->name) { - err = -ENOMEM; + ret = -ENOMEM; goto err_ida_remove; } - if (!pci_endpoint_test_request_irq(test)) { - err = -EINVAL; - goto err_kfree_test_name; - } + pci_endpoint_test_get_capabilities(test); misc_device = &test->miscdev; misc_device->minor = MISC_DYNAMIC_MINOR; misc_device->name = kstrdup(name, GFP_KERNEL); if (!misc_device->name) { - err = -ENOMEM; - goto err_release_irq; + ret = -ENOMEM; + goto err_kfree_test_name; } misc_device->parent = &pdev->dev; misc_device->fops = &pci_endpoint_test_fops; - err = misc_register(misc_device); - if (err) { + ret = misc_register(misc_device); + if (ret) { dev_err(dev, "Failed to register device\n"); goto err_kfree_name; } @@ -898,14 +1107,11 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, err_kfree_name: kfree(misc_device->name); -err_release_irq: - pci_endpoint_test_release_irq(test); - err_kfree_test_name: kfree(test->name); err_ida_remove: - ida_simple_remove(&pci_endpoint_test_ida, id); + ida_free(&pci_endpoint_test_ida, id); err_iounmap: for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { @@ -913,14 +1119,12 @@ err_iounmap: pci_iounmap(pdev, test->bar[bar]); } -err_disable_irq: - pci_endpoint_test_free_irq_vectors(test); pci_release_regions(pdev); err_disable_pdev: pci_disable_device(pdev); - return err; + return ret; } static void pci_endpoint_test_remove(struct pci_dev *pdev) @@ -941,7 +1145,7 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev) misc_deregister(&test->miscdev); kfree(misc_device->name); kfree(test->name); - ida_simple_remove(&pci_endpoint_test_ida, id); + ida_free(&pci_endpoint_test_ida, id); for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { if (test->bar[bar]) pci_iounmap(pdev, test->bar[bar]); @@ -954,20 +1158,25 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev) static const struct pci_endpoint_test_data default_data = { .test_reg_bar = BAR_0, .alignment = SZ_4K, - .irq_type = IRQ_TYPE_MSI, }; static const struct pci_endpoint_test_data am654_data = { .test_reg_bar = BAR_2, .alignment = SZ_64K, - .irq_type = IRQ_TYPE_MSI, }; static const struct pci_endpoint_test_data j721e_data = { .alignment = 256, - .irq_type = IRQ_TYPE_MSI, }; +static const struct pci_endpoint_test_data rk3588_data = { + .alignment = SZ_64K, +}; + +/* + * If the controller's Vendor/Device ID are programmable, you may be able to + * use one of the existing entries for testing instead of adding a new one. + */ static const struct pci_device_id pci_endpoint_test_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x), .driver_data = (kernel_ulong_t)&default_data, @@ -990,6 +1199,9 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),}, { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),}, { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),}, + { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0), + .driver_data = (kernel_ulong_t)&default_data, + }, { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E), .driver_data = (kernel_ulong_t)&j721e_data, }, @@ -999,6 +1211,12 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64), .driver_data = (kernel_ulong_t)&j721e_data, }, + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2), + .driver_data = (kernel_ulong_t)&j721e_data, + }, + { PCI_DEVICE(PCI_VENDOR_ID_ROCKCHIP, PCI_DEVICE_ID_ROCKCHIP_RK3588), + .driver_data = (kernel_ulong_t)&rk3588_data, + }, { } }; MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl); diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c index 7966a6b8b5b3..701db2c5859b 100644 --- a/drivers/misc/phantom.c +++ b/drivers/misc/phantom.c @@ -35,9 +35,12 @@ #define PHB_NOT_OH 2 static DEFINE_MUTEX(phantom_mutex); -static struct class *phantom_class; static int phantom_major; +static const struct class phantom_class = { + .name = "phantom", +}; + struct phantom_device { unsigned int opened; void __iomem *caddr; @@ -276,7 +279,6 @@ static const struct file_operations phantom_file_ops = { .unlocked_ioctl = phantom_ioctl, .compat_ioctl = phantom_compat_ioctl, .poll = phantom_poll, - .llseek = no_llseek, }; static irqreturn_t phantom_isr(int irq, void *data) @@ -403,7 +405,7 @@ static int phantom_probe(struct pci_dev *pdev, goto err_irq; } - if (IS_ERR(device_create(phantom_class, &pdev->dev, + if (IS_ERR(device_create(&phantom_class, &pdev->dev, MKDEV(phantom_major, minor), NULL, "phantom%u", minor))) dev_err(&pdev->dev, "can't create device\n"); @@ -436,7 +438,7 @@ static void phantom_remove(struct pci_dev *pdev) struct phantom_device *pht = pci_get_drvdata(pdev); unsigned int minor = MINOR(pht->cdev.dev); - device_destroy(phantom_class, MKDEV(phantom_major, minor)); + device_destroy(&phantom_class, MKDEV(phantom_major, minor)); cdev_del(&pht->cdev); @@ -503,13 +505,12 @@ static int __init phantom_init(void) int retval; dev_t dev; - phantom_class = class_create("phantom"); - if (IS_ERR(phantom_class)) { - retval = PTR_ERR(phantom_class); + retval = class_register(&phantom_class); + if (retval) { printk(KERN_ERR "phantom: can't register phantom class\n"); goto err; } - retval = class_create_file(phantom_class, &class_attr_version.attr); + retval = class_create_file(&phantom_class, &class_attr_version.attr); if (retval) { printk(KERN_ERR "phantom: can't create sysfs version file\n"); goto err_class; @@ -535,9 +536,9 @@ static int __init phantom_init(void) err_unchr: unregister_chrdev_region(dev, PHANTOM_MAX_MINORS); err_attr: - class_remove_file(phantom_class, &class_attr_version.attr); + class_remove_file(&phantom_class, &class_attr_version.attr); err_class: - class_destroy(phantom_class); + class_unregister(&phantom_class); err: return retval; } @@ -548,8 +549,8 @@ static void __exit phantom_exit(void) unregister_chrdev_region(MKDEV(phantom_major, 0), PHANTOM_MAX_MINORS); - class_remove_file(phantom_class, &class_attr_version.attr); - class_destroy(phantom_class); + class_remove_file(&phantom_class, &class_attr_version.attr); + class_unregister(&phantom_class); pr_debug("phantom: module successfully removed\n"); } diff --git a/drivers/misc/pvpanic/pvpanic-mmio.c b/drivers/misc/pvpanic/pvpanic-mmio.c index eb97167c03fb..f3f2113a54a7 100644 --- a/drivers/misc/pvpanic/pvpanic-mmio.c +++ b/drivers/misc/pvpanic/pvpanic-mmio.c @@ -7,16 +7,15 @@ * Copyright (C) 2021 Oracle. */ +#include <linux/device.h> +#include <linux/err.h> #include <linux/io.h> -#include <linux/kernel.h> +#include <linux/ioport.h> #include <linux/kexec.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/types.h> -#include <linux/slab.h> - -#include <uapi/misc/pvpanic.h> #include "pvpanic.h" @@ -24,52 +23,9 @@ MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>"); MODULE_DESCRIPTION("pvpanic-mmio device driver"); MODULE_LICENSE("GPL"); -static ssize_t capability_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct pvpanic_instance *pi = dev_get_drvdata(dev); - - return sysfs_emit(buf, "%x\n", pi->capability); -} -static DEVICE_ATTR_RO(capability); - -static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct pvpanic_instance *pi = dev_get_drvdata(dev); - - return sysfs_emit(buf, "%x\n", pi->events); -} - -static ssize_t events_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct pvpanic_instance *pi = dev_get_drvdata(dev); - unsigned int tmp; - int err; - - err = kstrtouint(buf, 16, &tmp); - if (err) - return err; - - if ((tmp & pi->capability) != tmp) - return -EINVAL; - - pi->events = tmp; - - return count; -} -static DEVICE_ATTR_RW(events); - -static struct attribute *pvpanic_mmio_dev_attrs[] = { - &dev_attr_capability.attr, - &dev_attr_events.attr, - NULL -}; -ATTRIBUTE_GROUPS(pvpanic_mmio_dev); - static int pvpanic_mmio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct pvpanic_instance *pi; struct resource *res; void __iomem *base; @@ -92,18 +48,7 @@ static int pvpanic_mmio_probe(struct platform_device *pdev) return -EINVAL; } - pi = devm_kmalloc(dev, sizeof(*pi), GFP_KERNEL); - if (!pi) - return -ENOMEM; - - pi->base = base; - pi->capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED; - - /* initialize capability by RDPT */ - pi->capability &= ioread8(base); - pi->events = pi->capability; - - return devm_pvpanic_probe(dev, pi); + return devm_pvpanic_probe(dev, base); } static const struct of_device_id pvpanic_mmio_match[] = { @@ -123,7 +68,7 @@ static struct platform_driver pvpanic_mmio_driver = { .name = "pvpanic-mmio", .of_match_table = pvpanic_mmio_match, .acpi_match_table = pvpanic_device_ids, - .dev_groups = pvpanic_mmio_dev_groups, + .dev_groups = pvpanic_dev_groups, }, .probe = pvpanic_mmio_probe, }; diff --git a/drivers/misc/pvpanic/pvpanic-pci.c b/drivers/misc/pvpanic/pvpanic-pci.c index 07eddb5ea30f..b21598a18f6d 100644 --- a/drivers/misc/pvpanic/pvpanic-pci.c +++ b/drivers/misc/pvpanic/pvpanic-pci.c @@ -5,68 +5,21 @@ * Copyright (C) 2021 Oracle. */ -#include <linux/kernel.h> +#include <linux/errno.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/types.h> -#include <linux/slab.h> - -#include <uapi/misc/pvpanic.h> #include "pvpanic.h" -#define PCI_VENDOR_ID_REDHAT 0x1b36 #define PCI_DEVICE_ID_REDHAT_PVPANIC 0x0011 MODULE_AUTHOR("Mihai Carabas <mihai.carabas@oracle.com>"); MODULE_DESCRIPTION("pvpanic device driver"); MODULE_LICENSE("GPL"); -static ssize_t capability_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct pvpanic_instance *pi = dev_get_drvdata(dev); - - return sysfs_emit(buf, "%x\n", pi->capability); -} -static DEVICE_ATTR_RO(capability); - -static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct pvpanic_instance *pi = dev_get_drvdata(dev); - - return sysfs_emit(buf, "%x\n", pi->events); -} - -static ssize_t events_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct pvpanic_instance *pi = dev_get_drvdata(dev); - unsigned int tmp; - int err; - - err = kstrtouint(buf, 16, &tmp); - if (err) - return err; - - if ((tmp & pi->capability) != tmp) - return -EINVAL; - - pi->events = tmp; - - return count; -} -static DEVICE_ATTR_RW(events); - -static struct attribute *pvpanic_pci_dev_attrs[] = { - &dev_attr_capability.attr, - &dev_attr_events.attr, - NULL -}; -ATTRIBUTE_GROUPS(pvpanic_pci_dev); - static int pvpanic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - struct pvpanic_instance *pi; void __iomem *base; int ret; @@ -78,18 +31,7 @@ static int pvpanic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e if (!base) return -ENOMEM; - pi = devm_kmalloc(&pdev->dev, sizeof(*pi), GFP_KERNEL); - if (!pi) - return -ENOMEM; - - pi->base = base; - pi->capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED; - - /* initlize capability by RDPT */ - pi->capability &= ioread8(base); - pi->events = pi->capability; - - return devm_pvpanic_probe(&pdev->dev, pi); + return devm_pvpanic_probe(&pdev->dev, base); } static const struct pci_device_id pvpanic_pci_id_tbl[] = { @@ -102,8 +44,6 @@ static struct pci_driver pvpanic_pci_driver = { .name = "pvpanic-pci", .id_table = pvpanic_pci_id_tbl, .probe = pvpanic_pci_probe, - .driver = { - .dev_groups = pvpanic_pci_dev_groups, - }, + .dev_groups = pvpanic_dev_groups, }; module_pci_driver(pvpanic_pci_driver); diff --git a/drivers/misc/pvpanic/pvpanic.c b/drivers/misc/pvpanic/pvpanic.c index 049a12006348..17c0eb549463 100644 --- a/drivers/misc/pvpanic/pvpanic.c +++ b/drivers/misc/pvpanic/pvpanic.c @@ -7,16 +7,22 @@ * Copyright (C) 2021 Oracle. */ +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/gfp_types.h> #include <linux/io.h> -#include <linux/kernel.h> #include <linux/kexec.h> +#include <linux/kstrtox.h> +#include <linux/limits.h> +#include <linux/list.h> #include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/platform_device.h> #include <linux/panic_notifier.h> +#include <linux/platform_device.h> +#include <linux/reboot.h> +#include <linux/spinlock.h> +#include <linux/sysfs.h> #include <linux/types.h> -#include <linux/cdev.h> -#include <linux/list.h> #include <uapi/misc/pvpanic.h> @@ -26,6 +32,14 @@ MODULE_AUTHOR("Mihai Carabas <mihai.carabas@oracle.com>"); MODULE_DESCRIPTION("pvpanic device driver"); MODULE_LICENSE("GPL"); +struct pvpanic_instance { + void __iomem *base; + unsigned int capability; + unsigned int events; + struct sys_off_handler *sys_off; + struct list_head list; +}; + static struct list_head pvpanic_list; static spinlock_t pvpanic_lock; @@ -66,6 +80,39 @@ static struct notifier_block pvpanic_panic_nb = { .priority = INT_MAX, }; +static int pvpanic_sys_off(struct sys_off_data *data) +{ + pvpanic_send_event(PVPANIC_SHUTDOWN); + + return NOTIFY_DONE; +} + +static void pvpanic_synchronize_sys_off_handler(struct device *dev, struct pvpanic_instance *pi) +{ + /* The kernel core has logic to fall back to system halt if no + * sys_off_handler is registered. + * When the pvpanic sys_off_handler is disabled via sysfs the kernel + * should use that fallback logic, so the handler needs to be unregistered. + */ + + struct sys_off_handler *sys_off; + + if (!(pi->events & PVPANIC_SHUTDOWN) == !pi->sys_off) + return; + + if (!pi->sys_off) { + sys_off = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_LOW, + pvpanic_sys_off, NULL); + if (IS_ERR(sys_off)) + dev_warn(dev, "Could not register sys_off_handler: %pe\n", sys_off); + else + pi->sys_off = sys_off; + } else { + unregister_sys_off_handler(pi->sys_off); + pi->sys_off = NULL; + } +} + static void pvpanic_remove(void *param) { struct pvpanic_instance *pi_cur, *pi_next; @@ -79,13 +126,83 @@ static void pvpanic_remove(void *param) } } spin_unlock(&pvpanic_lock); + + unregister_sys_off_handler(pi->sys_off); +} + +static ssize_t capability_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct pvpanic_instance *pi = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%x\n", pi->capability); } +static DEVICE_ATTR_RO(capability); -int devm_pvpanic_probe(struct device *dev, struct pvpanic_instance *pi) +static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (!pi || !pi->base) + struct pvpanic_instance *pi = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%x\n", pi->events); +} + +static ssize_t events_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pvpanic_instance *pi = dev_get_drvdata(dev); + unsigned int tmp; + int err; + + err = kstrtouint(buf, 16, &tmp); + if (err) + return err; + + if ((tmp & pi->capability) != tmp) return -EINVAL; + pi->events = tmp; + pvpanic_synchronize_sys_off_handler(dev, pi); + + return count; +} +static DEVICE_ATTR_RW(events); + +static struct attribute *pvpanic_dev_attrs[] = { + &dev_attr_capability.attr, + &dev_attr_events.attr, + NULL +}; + +static const struct attribute_group pvpanic_dev_group = { + .attrs = pvpanic_dev_attrs, +}; + +const struct attribute_group *pvpanic_dev_groups[] = { + &pvpanic_dev_group, + NULL +}; +EXPORT_SYMBOL_GPL(pvpanic_dev_groups); + +int devm_pvpanic_probe(struct device *dev, void __iomem *base) +{ + struct pvpanic_instance *pi; + + if (!base) + return -EINVAL; + + pi = devm_kmalloc(dev, sizeof(*pi), GFP_KERNEL); + if (!pi) + return -ENOMEM; + + pi->base = base; + pi->capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED | PVPANIC_SHUTDOWN; + + /* initlize capability by RDPT */ + pi->capability &= ioread8(base); + pi->events = pi->capability; + + pi->sys_off = NULL; + pvpanic_synchronize_sys_off_handler(dev, pi); + spin_lock(&pvpanic_lock); list_add(&pi->list, &pvpanic_list); spin_unlock(&pvpanic_lock); diff --git a/drivers/misc/pvpanic/pvpanic.h b/drivers/misc/pvpanic/pvpanic.h index 493545951754..a42fa760eed5 100644 --- a/drivers/misc/pvpanic/pvpanic.h +++ b/drivers/misc/pvpanic/pvpanic.h @@ -8,13 +8,12 @@ #ifndef PVPANIC_H_ #define PVPANIC_H_ -struct pvpanic_instance { - void __iomem *base; - unsigned int capability; - unsigned int events; - struct list_head list; -}; +#include <linux/compiler_types.h> -int devm_pvpanic_probe(struct device *dev, struct pvpanic_instance *pi); +struct attribute_group; +struct device; + +int devm_pvpanic_probe(struct device *dev, void __iomem *base); +extern const struct attribute_group *pvpanic_dev_groups[]; #endif /* PVPANIC_H_ */ diff --git a/drivers/misc/qcom-coincell.c b/drivers/misc/qcom-coincell.c index 54d4f6ee8888..3c57f7429147 100644 --- a/drivers/misc/qcom-coincell.c +++ b/drivers/misc/qcom-coincell.c @@ -8,7 +8,6 @@ #include <linux/slab.h> #include <linux/of.h> #include <linux/regmap.h> -#include <linux/of_device.h> #include <linux/platform_device.h> struct qcom_coincell { diff --git a/drivers/misc/rp1/Kconfig b/drivers/misc/rp1/Kconfig new file mode 100644 index 000000000000..5232e70d3079 --- /dev/null +++ b/drivers/misc/rp1/Kconfig @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# RaspberryPi RP1 misc device +# + +config MISC_RP1 + tristate "RaspberryPi RP1 misc device" + depends on OF_IRQ && OF_OVERLAY && PCI_MSI && PCI_QUIRKS + select PCI_DYNAMIC_OF_NODES + help + Support the RP1 peripheral chip found on Raspberry Pi 5 board. + + This device supports several sub-devices including e.g. Ethernet + controller, USB controller, I2C, SPI and UART. + + The driver is responsible for enabling the DT node once the PCIe + endpoint has been configured, and handling interrupts. + + This driver uses an overlay to load other drivers to support for + RP1 internal sub-devices. diff --git a/drivers/misc/rp1/Makefile b/drivers/misc/rp1/Makefile new file mode 100644 index 000000000000..508b4cb05627 --- /dev/null +++ b/drivers/misc/rp1/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_MISC_RP1) += rp1-pci.o +rp1-pci-objs := rp1_pci.o rp1-pci.dtbo.o diff --git a/drivers/misc/rp1/rp1-pci.dtso b/drivers/misc/rp1/rp1-pci.dtso new file mode 100644 index 000000000000..eea826b36e02 --- /dev/null +++ b/drivers/misc/rp1/rp1-pci.dtso @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) + +/* + * The dts overlay is included from the dts directory so + * it can be possible to check it with CHECK_DTBS while + * also compile it from the driver source directory. + */ + +/dts-v1/; +/plugin/; + +/ { + fragment@0 { + target-path=""; + __overlay__ { + compatible = "pci1de4,1"; + #address-cells = <3>; + #size-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + + #include "arm64/broadcom/rp1-common.dtsi" + }; + }; +}; diff --git a/drivers/misc/rp1/rp1_pci.c b/drivers/misc/rp1/rp1_pci.c new file mode 100644 index 000000000000..a342bcc6164b --- /dev/null +++ b/drivers/misc/rp1/rp1_pci.c @@ -0,0 +1,336 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2025 Raspberry Pi Ltd. + * + * All rights reserved. + */ + +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/of_platform.h> +#include <linux/pci.h> +#include <linux/platform_device.h> + +#define RP1_HW_IRQ_MASK GENMASK(5, 0) + +#define REG_SET 0x800 +#define REG_CLR 0xc00 + +/* MSI-X CFG registers start at 0x8 */ +#define MSIX_CFG(x) (0x8 + (4 * (x))) + +#define MSIX_CFG_IACK_EN BIT(3) +#define MSIX_CFG_IACK BIT(2) +#define MSIX_CFG_ENABLE BIT(0) + +/* Address map */ +#define RP1_PCIE_APBS_BASE 0x108000 + +/* Interrupts */ +#define RP1_INT_END 61 + +/* Embedded dtbo symbols created by cmd_wrap_S_dtb in scripts/Makefile.lib */ +extern char __dtbo_rp1_pci_begin[]; +extern char __dtbo_rp1_pci_end[]; + +struct rp1_dev { + struct pci_dev *pdev; + struct irq_domain *domain; + struct irq_data *pcie_irqds[64]; + void __iomem *bar1; + int ovcs_id; /* overlay changeset id */ + bool level_triggered_irq[RP1_INT_END]; +}; + +static void msix_cfg_set(struct rp1_dev *rp1, unsigned int hwirq, u32 value) +{ + iowrite32(value, rp1->bar1 + RP1_PCIE_APBS_BASE + REG_SET + MSIX_CFG(hwirq)); +} + +static void msix_cfg_clr(struct rp1_dev *rp1, unsigned int hwirq, u32 value) +{ + iowrite32(value, rp1->bar1 + RP1_PCIE_APBS_BASE + REG_CLR + MSIX_CFG(hwirq)); +} + +static void rp1_mask_irq(struct irq_data *irqd) +{ + struct rp1_dev *rp1 = irqd->domain->host_data; + struct irq_data *pcie_irqd = rp1->pcie_irqds[irqd->hwirq]; + + pci_msi_mask_irq(pcie_irqd); +} + +static void rp1_unmask_irq(struct irq_data *irqd) +{ + struct rp1_dev *rp1 = irqd->domain->host_data; + struct irq_data *pcie_irqd = rp1->pcie_irqds[irqd->hwirq]; + + pci_msi_unmask_irq(pcie_irqd); +} + +static int rp1_irq_set_type(struct irq_data *irqd, unsigned int type) +{ + struct rp1_dev *rp1 = irqd->domain->host_data; + unsigned int hwirq = (unsigned int)irqd->hwirq; + + switch (type) { + case IRQ_TYPE_LEVEL_HIGH: + dev_dbg(&rp1->pdev->dev, "MSIX IACK EN for IRQ %u\n", hwirq); + msix_cfg_set(rp1, hwirq, MSIX_CFG_IACK_EN); + rp1->level_triggered_irq[hwirq] = true; + break; + case IRQ_TYPE_EDGE_RISING: + msix_cfg_clr(rp1, hwirq, MSIX_CFG_IACK_EN); + rp1->level_triggered_irq[hwirq] = false; + break; + default: + return -EINVAL; + } + + return 0; +} + +static struct irq_chip rp1_irq_chip = { + .name = "rp1_irq_chip", + .irq_mask = rp1_mask_irq, + .irq_unmask = rp1_unmask_irq, + .irq_set_type = rp1_irq_set_type, +}; + +static void rp1_chained_handle_irq(struct irq_desc *desc) +{ + unsigned int hwirq = desc->irq_data.hwirq & RP1_HW_IRQ_MASK; + struct rp1_dev *rp1 = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned int virq; + + chained_irq_enter(chip, desc); + + virq = irq_find_mapping(rp1->domain, hwirq); + generic_handle_irq(virq); + if (rp1->level_triggered_irq[hwirq]) + msix_cfg_set(rp1, hwirq, MSIX_CFG_IACK); + + chained_irq_exit(chip, desc); +} + +static int rp1_irq_xlate(struct irq_domain *d, struct device_node *node, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, unsigned int *out_type) +{ + struct rp1_dev *rp1 = d->host_data; + struct irq_data *pcie_irqd; + unsigned long hwirq; + int pcie_irq; + int ret; + + ret = irq_domain_xlate_twocell(d, node, intspec, intsize, + &hwirq, out_type); + if (ret) + return ret; + + pcie_irq = pci_irq_vector(rp1->pdev, hwirq); + pcie_irqd = irq_get_irq_data(pcie_irq); + rp1->pcie_irqds[hwirq] = pcie_irqd; + *out_hwirq = hwirq; + + return 0; +} + +static int rp1_irq_activate(struct irq_domain *d, struct irq_data *irqd, + bool reserve) +{ + struct rp1_dev *rp1 = d->host_data; + + msix_cfg_set(rp1, (unsigned int)irqd->hwirq, MSIX_CFG_ENABLE); + + return 0; +} + +static void rp1_irq_deactivate(struct irq_domain *d, struct irq_data *irqd) +{ + struct rp1_dev *rp1 = d->host_data; + + msix_cfg_clr(rp1, (unsigned int)irqd->hwirq, MSIX_CFG_ENABLE); +} + +static const struct irq_domain_ops rp1_domain_ops = { + .xlate = rp1_irq_xlate, + .activate = rp1_irq_activate, + .deactivate = rp1_irq_deactivate, +}; + +static void rp1_unregister_interrupts(struct pci_dev *pdev) +{ + struct rp1_dev *rp1 = pci_get_drvdata(pdev); + int irq, i; + + if (rp1->domain) { + for (i = 0; i < RP1_INT_END; i++) { + irq = irq_find_mapping(rp1->domain, i); + irq_dispose_mapping(irq); + } + + irq_domain_remove(rp1->domain); + } + + pci_free_irq_vectors(pdev); +} + +static int rp1_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + u32 dtbo_size = __dtbo_rp1_pci_end - __dtbo_rp1_pci_begin; + void *dtbo_start = __dtbo_rp1_pci_begin; + struct device *dev = &pdev->dev; + struct device_node *rp1_node; + bool skip_ovl = true; + struct rp1_dev *rp1; + int err = 0; + int i; + + /* + * Either use rp1_nexus node if already present in DT, or + * set a flag to load it from overlay at runtime + */ + rp1_node = of_find_node_by_name(NULL, "rp1_nexus"); + if (!rp1_node) { + rp1_node = dev_of_node(dev); + skip_ovl = false; + } + + if (!rp1_node) { + dev_err(dev, "Missing of_node for device\n"); + err = -EINVAL; + goto err_put_node; + } + + rp1 = devm_kzalloc(&pdev->dev, sizeof(*rp1), GFP_KERNEL); + if (!rp1) { + err = -ENOMEM; + goto err_put_node; + } + + rp1->pdev = pdev; + + if (pci_resource_len(pdev, 1) <= 0x10000) { + dev_err(&pdev->dev, + "Not initialized - is the firmware running?\n"); + err = -EINVAL; + goto err_put_node; + } + + err = pcim_enable_device(pdev); + if (err < 0) { + err = dev_err_probe(&pdev->dev, err, + "Enabling PCI device has failed"); + goto err_put_node; + } + + rp1->bar1 = pcim_iomap(pdev, 1, 0); + if (!rp1->bar1) { + dev_err(&pdev->dev, "Cannot map PCI BAR\n"); + err = -EIO; + goto err_put_node; + } + + pci_set_master(pdev); + + err = pci_alloc_irq_vectors(pdev, RP1_INT_END, RP1_INT_END, + PCI_IRQ_MSIX); + if (err < 0) { + err = dev_err_probe(&pdev->dev, err, + "Failed to allocate MSI-X vectors\n"); + goto err_put_node; + } else if (err != RP1_INT_END) { + dev_err(&pdev->dev, "Cannot allocate enough interrupts\n"); + err = -EINVAL; + goto err_put_node; + } + + pci_set_drvdata(pdev, rp1); + rp1->domain = irq_domain_add_linear(rp1_node, RP1_INT_END, + &rp1_domain_ops, rp1); + if (!rp1->domain) { + dev_err(&pdev->dev, "Error creating IRQ domain\n"); + err = -ENOMEM; + goto err_unregister_interrupts; + } + + for (i = 0; i < RP1_INT_END; i++) { + unsigned int irq = irq_create_mapping(rp1->domain, i); + + if (!irq) { + dev_err(&pdev->dev, "Failed to create IRQ mapping\n"); + err = -EINVAL; + goto err_unregister_interrupts; + } + + irq_set_chip_and_handler(irq, &rp1_irq_chip, handle_level_irq); + irq_set_probe(irq); + irq_set_chained_handler_and_data(pci_irq_vector(pdev, i), + rp1_chained_handle_irq, rp1); + } + + if (!skip_ovl) { + err = of_overlay_fdt_apply(dtbo_start, dtbo_size, &rp1->ovcs_id, + rp1_node); + if (err) + goto err_unregister_interrupts; + } + + err = of_platform_default_populate(rp1_node, NULL, dev); + if (err) { + dev_err_probe(&pdev->dev, err, "Error populating devicetree\n"); + goto err_unload_overlay; + } + + if (skip_ovl) + of_node_put(rp1_node); + + return 0; + +err_unload_overlay: + of_overlay_remove(&rp1->ovcs_id); +err_unregister_interrupts: + rp1_unregister_interrupts(pdev); +err_put_node: + if (skip_ovl) + of_node_put(rp1_node); + + return err; +} + +static void rp1_remove(struct pci_dev *pdev) +{ + struct rp1_dev *rp1 = pci_get_drvdata(pdev); + struct device *dev = &pdev->dev; + + of_platform_depopulate(dev); + of_overlay_remove(&rp1->ovcs_id); + rp1_unregister_interrupts(pdev); +} + +static const struct pci_device_id dev_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_RPI, PCI_DEVICE_ID_RPI_RP1_C0), }, + { } +}; +MODULE_DEVICE_TABLE(pci, dev_id_table); + +static struct pci_driver rp1_driver = { + .name = KBUILD_MODNAME, + .id_table = dev_id_table, + .probe = rp1_probe, + .remove = rp1_remove, +}; + +module_pci_driver(rp1_driver); + +MODULE_AUTHOR("Phil Elwell <phil@raspberrypi.com>"); +MODULE_AUTHOR("Andrea della Porta <andrea.porta@suse.com>"); +MODULE_DESCRIPTION("RaspberryPi RP1 misc device"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/rpmb-core.c b/drivers/misc/rpmb-core.c new file mode 100644 index 000000000000..2d653926cdbb --- /dev/null +++ b/drivers/misc/rpmb-core.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright(c) 2015 - 2019 Intel Corporation. All rights reserved. + * Copyright(c) 2021 - 2024 Linaro Ltd. + */ +#include <linux/device.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/rpmb.h> +#include <linux/slab.h> + +static DEFINE_IDA(rpmb_ida); + +/** + * rpmb_dev_get() - increase rpmb device ref counter + * @rdev: rpmb device + */ +struct rpmb_dev *rpmb_dev_get(struct rpmb_dev *rdev) +{ + if (rdev) + get_device(&rdev->dev); + return rdev; +} +EXPORT_SYMBOL_GPL(rpmb_dev_get); + +/** + * rpmb_dev_put() - decrease rpmb device ref counter + * @rdev: rpmb device + */ +void rpmb_dev_put(struct rpmb_dev *rdev) +{ + if (rdev) + put_device(&rdev->dev); +} +EXPORT_SYMBOL_GPL(rpmb_dev_put); + +/** + * rpmb_route_frames() - route rpmb frames to rpmb device + * @rdev: rpmb device + * @req: rpmb request frames + * @req_len: length of rpmb request frames in bytes + * @rsp: rpmb response frames + * @rsp_len: length of rpmb response frames in bytes + * + * Returns: < 0 on failure + */ +int rpmb_route_frames(struct rpmb_dev *rdev, u8 *req, + unsigned int req_len, u8 *rsp, unsigned int rsp_len) +{ + if (!req || !req_len || !rsp || !rsp_len) + return -EINVAL; + + return rdev->descr.route_frames(rdev->dev.parent, req, req_len, + rsp, rsp_len); +} +EXPORT_SYMBOL_GPL(rpmb_route_frames); + +static void rpmb_dev_release(struct device *dev) +{ + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + ida_free(&rpmb_ida, rdev->id); + kfree(rdev->descr.dev_id); + kfree(rdev); +} + +static struct class rpmb_class = { + .name = "rpmb", + .dev_release = rpmb_dev_release, +}; + +/** + * rpmb_dev_find_device() - return first matching rpmb device + * @start: rpmb device to begin with + * @data: data for the match function + * @match: the matching function + * + * Iterate over registered RPMB devices, and call @match() for each passing + * it the RPMB device and @data. + * + * The return value of @match() is checked for each call. If it returns + * anything other 0, break and return the found RPMB device. + * + * It's the callers responsibility to call rpmb_dev_put() on the returned + * device, when it's done with it. + * + * Returns: a matching rpmb device or NULL on failure + */ +struct rpmb_dev *rpmb_dev_find_device(const void *data, + const struct rpmb_dev *start, + int (*match)(struct device *dev, + const void *data)) +{ + struct device *dev; + const struct device *start_dev = NULL; + + if (start) + start_dev = &start->dev; + dev = class_find_device(&rpmb_class, start_dev, data, match); + + return dev ? to_rpmb_dev(dev) : NULL; +} +EXPORT_SYMBOL_GPL(rpmb_dev_find_device); + +int rpmb_interface_register(struct class_interface *intf) +{ + intf->class = &rpmb_class; + + return class_interface_register(intf); +} +EXPORT_SYMBOL_GPL(rpmb_interface_register); + +void rpmb_interface_unregister(struct class_interface *intf) +{ + class_interface_unregister(intf); +} +EXPORT_SYMBOL_GPL(rpmb_interface_unregister); + +/** + * rpmb_dev_unregister() - unregister RPMB partition from the RPMB subsystem + * @rdev: the rpmb device to unregister + * + * This function should be called from the release function of the + * underlying device used when the RPMB device was registered. + * + * Returns: < 0 on failure + */ +int rpmb_dev_unregister(struct rpmb_dev *rdev) +{ + if (!rdev) + return -EINVAL; + + device_del(&rdev->dev); + + rpmb_dev_put(rdev); + + return 0; +} +EXPORT_SYMBOL_GPL(rpmb_dev_unregister); + +/** + * rpmb_dev_register - register RPMB partition with the RPMB subsystem + * @dev: storage device of the rpmb device + * @descr: RPMB device description + * + * While registering the RPMB partition extract needed device information + * while needed resources are available. + * + * Returns: a pointer to a 'struct rpmb_dev' or an ERR_PTR on failure + */ +struct rpmb_dev *rpmb_dev_register(struct device *dev, + struct rpmb_descr *descr) +{ + struct rpmb_dev *rdev; + int ret; + + if (!dev || !descr || !descr->route_frames || !descr->dev_id || + !descr->dev_id_len) + return ERR_PTR(-EINVAL); + + rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); + if (!rdev) + return ERR_PTR(-ENOMEM); + rdev->descr = *descr; + rdev->descr.dev_id = kmemdup(descr->dev_id, descr->dev_id_len, + GFP_KERNEL); + if (!rdev->descr.dev_id) { + ret = -ENOMEM; + goto err_free_rdev; + } + + ret = ida_alloc(&rpmb_ida, GFP_KERNEL); + if (ret < 0) + goto err_free_dev_id; + rdev->id = ret; + + dev_set_name(&rdev->dev, "rpmb%d", rdev->id); + rdev->dev.class = &rpmb_class; + rdev->dev.parent = dev; + + ret = device_register(&rdev->dev); + if (ret) { + put_device(&rdev->dev); + return ERR_PTR(ret); + } + + dev_dbg(&rdev->dev, "registered device\n"); + + return rdev; + +err_free_dev_id: + kfree(rdev->descr.dev_id); +err_free_rdev: + kfree(rdev); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(rpmb_dev_register); + +static int __init rpmb_init(void) +{ + int ret; + + ret = class_register(&rpmb_class); + if (ret) { + pr_err("couldn't create class\n"); + return ret; + } + ida_init(&rpmb_ida); + return 0; +} + +static void __exit rpmb_exit(void) +{ + ida_destroy(&rpmb_ida); + class_unregister(&rpmb_class); +} + +subsys_initcall(rpmb_init); +module_exit(rpmb_exit); + +MODULE_AUTHOR("Jens Wiklander <jens.wiklander@linaro.org>"); +MODULE_DESCRIPTION("RPMB class"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/sgi-gru/gru.h b/drivers/misc/sgi-gru/gru.h index 3ad76cd18b4b..6ae045037219 100644 --- a/drivers/misc/sgi-gru/gru.h +++ b/drivers/misc/sgi-gru/gru.h @@ -30,9 +30,7 @@ /* * Size used to map GRU GSeg */ -#if defined(CONFIG_IA64) -#define GRU_GSEG_PAGESIZE (256 * 1024UL) -#elif defined(CONFIG_X86_64) +#if defined(CONFIG_X86_64) #define GRU_GSEG_PAGESIZE (256 * 1024UL) /* ZZZ 2MB ??? */ #else #error "Unsupported architecture" diff --git a/drivers/misc/sgi-gru/gru_instructions.h b/drivers/misc/sgi-gru/gru_instructions.h index 04d5170ac149..da5eb9edf9ec 100644 --- a/drivers/misc/sgi-gru/gru_instructions.h +++ b/drivers/misc/sgi-gru/gru_instructions.h @@ -29,17 +29,7 @@ extern void gru_wait_abort_proc(void *cb); * Architecture dependent functions */ -#if defined(CONFIG_IA64) -#include <linux/compiler.h> -#include <asm/intrinsics.h> -#define __flush_cache(p) ia64_fc((unsigned long)p) -/* Use volatile on IA64 to ensure ordering via st4.rel */ -#define gru_ordered_store_ulong(p, v) \ - do { \ - barrier(); \ - *((volatile unsigned long *)(p)) = v; /* force st.rel */ \ - } while (0) -#elif defined(CONFIG_X86_64) +#if defined(CONFIG_X86_64) #include <asm/cacheflush.h> #define __flush_cache(p) clflush(p) #define gru_ordered_store_ulong(p, v) \ diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index 629edb6486de..3557d78ee47a 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c @@ -227,7 +227,7 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr, if (unlikely(pmd_none(*pmdp))) goto err; #ifdef CONFIG_X86_64 - if (unlikely(pmd_large(*pmdp))) + if (unlikely(pmd_leaf(*pmdp))) pte = ptep_get((pte_t *)pmdp); else #endif diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index a3d659c11cc4..e755690c9805 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c @@ -337,72 +337,6 @@ static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep) return mmr; } -#ifdef CONFIG_IA64 - -static int gru_irq_count[GRU_CHIPLETS_PER_BLADE]; - -static void gru_noop(struct irq_data *d) -{ -} - -static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = { - [0 ... GRU_CHIPLETS_PER_BLADE - 1] { - .irq_mask = gru_noop, - .irq_unmask = gru_noop, - .irq_ack = gru_noop - } -}; - -static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name, - irq_handler_t irq_handler, int cpu, int blade) -{ - unsigned long mmr; - int irq = IRQ_GRU + chiplet; - int ret, core; - - mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core); - if (mmr == 0) - return 0; - - if (gru_irq_count[chiplet] == 0) { - gru_chip[chiplet].name = irq_name; - ret = irq_set_chip(irq, &gru_chip[chiplet]); - if (ret) { - printk(KERN_ERR "%s: set_irq_chip failed, errno=%d\n", - GRU_DRIVER_ID_STR, -ret); - return ret; - } - - ret = request_irq(irq, irq_handler, 0, irq_name, NULL); - if (ret) { - printk(KERN_ERR "%s: request_irq failed, errno=%d\n", - GRU_DRIVER_ID_STR, -ret); - return ret; - } - } - gru_irq_count[chiplet]++; - - return 0; -} - -static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade) -{ - unsigned long mmr; - int core, irq = IRQ_GRU + chiplet; - - if (gru_irq_count[chiplet] == 0) - return; - - mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core); - if (mmr == 0) - return; - - if (--gru_irq_count[chiplet] == 0) - free_irq(irq, NULL); -} - -#elif defined CONFIG_X86_64 - static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name, irq_handler_t irq_handler, int cpu, int blade) { @@ -447,8 +381,6 @@ static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade) } } -#endif - static void gru_teardown_tlb_irqs(void) { int blade; @@ -514,12 +446,8 @@ static int __init gru_init(void) if (!gru_supported()) return 0; -#if defined CONFIG_IA64 - gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */ -#else gru_start_paddr = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG) & 0x7fffffffffffUL; -#endif gru_start_vaddr = __va(gru_start_paddr); gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE; printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n", diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c index 1d75d5e540bc..695316a83b01 100644 --- a/drivers/misc/sgi-gru/gruhandles.c +++ b/drivers/misc/sgi-gru/gruhandles.c @@ -11,16 +11,10 @@ #include "grutables.h" /* 10 sec */ -#ifdef CONFIG_IA64 -#include <asm/processor.h> -#define GRU_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10) -#define CLKS2NSEC(c) ((c) *1000000000 / local_cpu_data->itc_freq) -#else #include <linux/sync_core.h> #include <asm/tsc.h> #define GRU_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) #define CLKS2NSEC(c) ((c) * 1000000 / tsc_khz) -#endif /* Extract the status field from a kernel handle */ #define GET_MSEG_HANDLE_STATUS(h) (((*(unsigned long *)(h)) >> 16) & 3) diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c index 37e804bbb1f2..205945ce9e86 100644 --- a/drivers/misc/sgi-gru/grukservices.c +++ b/drivers/misc/sgi-gru/grukservices.c @@ -258,7 +258,6 @@ static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr) int lcpu; BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES); - preempt_disable(); bs = gru_lock_kernel_context(-1); lcpu = uv_blade_processor_id(); *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE; @@ -272,7 +271,6 @@ static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr) static void gru_free_cpu_resources(void *cb, void *dsr) { gru_unlock_kernel_context(uv_numa_blade_id()); - preempt_enable(); } /* diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c index 4eb4b9455139..3036c15f3689 100644 --- a/drivers/misc/sgi-gru/grumain.c +++ b/drivers/misc/sgi-gru/grumain.c @@ -41,16 +41,12 @@ struct device *grudev = &gru_device; */ int gru_cpu_fault_map_id(void) { -#ifdef CONFIG_IA64 - return uv_blade_processor_id() % GRU_NUM_TFM; -#else int cpu = smp_processor_id(); int id, core; core = uv_cpu_core_number(cpu); id = core + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu); return id; -#endif } /*--------- ASID Management ------------------------------------------- @@ -941,10 +937,8 @@ vm_fault_t gru_fault(struct vm_fault *vmf) again: mutex_lock(>s->ts_ctxlock); - preempt_disable(); if (gru_check_context_placement(gts)) { - preempt_enable(); mutex_unlock(>s->ts_ctxlock); gru_unload_context(gts, 1); return VM_FAULT_NOPAGE; @@ -953,7 +947,6 @@ again: if (!gts->ts_gru) { STAT(load_user_context); if (!gru_assign_gru_context(gts)) { - preempt_enable(); mutex_unlock(>s->ts_ctxlock); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */ @@ -969,7 +962,6 @@ again: vma->vm_page_prot); } - preempt_enable(); mutex_unlock(>s->ts_ctxlock); return VM_FAULT_NOPAGE; diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c index 10921cd2608d..1107dd3e2e9f 100644 --- a/drivers/misc/sgi-gru/grutlbpurge.c +++ b/drivers/misc/sgi-gru/grutlbpurge.c @@ -65,7 +65,6 @@ static struct gru_tlb_global_handle *get_lock_tgh_handle(struct gru_state struct gru_tlb_global_handle *tgh; int n; - preempt_disable(); if (uv_numa_blade_id() == gru->gs_blade_id) n = get_on_blade_tgh(gru); else @@ -79,7 +78,6 @@ static struct gru_tlb_global_handle *get_lock_tgh_handle(struct gru_state static void get_unlock_tgh_handle(struct gru_tlb_global_handle *tgh) { unlock_tgh_handle(tgh); - preempt_enable(); } /* diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index f1336f43d3bd..3185711beb07 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h @@ -16,7 +16,7 @@ #include <linux/mutex.h> -#if defined CONFIG_X86_UV || defined CONFIG_IA64_SGI_UV +#if defined CONFIG_X86_UV #include <asm/uv/uv.h> #endif diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c index 19fc7076af27..3faa7eadf679 100644 --- a/drivers/misc/sgi-xp/xp_uv.c +++ b/drivers/misc/sgi-xp/xp_uv.c @@ -18,8 +18,6 @@ #include <asm/uv/uv_hub.h> #if defined CONFIG_X86_64 #include <asm/uv/bios.h> -#elif defined CONFIG_IA64_SGI_UV -#include <asm/sn/sn_sal.h> #endif #include "../sgi-gru/grukservices.h" #include "xp.h" @@ -99,17 +97,6 @@ xp_expand_memprotect_uv(unsigned long phys_addr, unsigned long size) "UV_MEMPROT_ALLOW_RW) failed, ret=%d\n", ret); return xpBiosError; } - -#elif defined CONFIG_IA64_SGI_UV - u64 nasid_array; - - ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1, - &nasid_array); - if (ret != 0) { - dev_err(xp, "sn_change_memprotect(,, " - "SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret); - return xpSalError; - } #else #error not a supported configuration #endif @@ -129,17 +116,6 @@ xp_restrict_memprotect_uv(unsigned long phys_addr, unsigned long size) "UV_MEMPROT_RESTRICT_ACCESS) failed, ret=%d\n", ret); return xpBiosError; } - -#elif defined CONFIG_IA64_SGI_UV - u64 nasid_array; - - ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0, - &nasid_array); - if (ret != 0) { - dev_err(xp, "sn_change_memprotect(,, " - "SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret); - return xpSalError; - } #else #error not a supported configuration #endif diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index 6da509d692bb..9fe816bf3957 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -93,7 +93,7 @@ int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT; static int xpc_disengage_min_timelimit; /* = 0 */ static int xpc_disengage_max_timelimit = 120; -static struct ctl_table xpc_sys_xpc_hb[] = { +static const struct ctl_table xpc_sys_xpc_hb[] = { { .procname = "hb_interval", .data = &xpc_hb_interval, @@ -110,9 +110,8 @@ static struct ctl_table xpc_sys_xpc_hb[] = { .proc_handler = proc_dointvec_minmax, .extra1 = &xpc_hb_check_min_interval, .extra2 = &xpc_hb_check_max_interval}, - {} }; -static struct ctl_table xpc_sys_xpc[] = { +static const struct ctl_table xpc_sys_xpc[] = { { .procname = "disengage_timelimit", .data = &xpc_disengage_timelimit, @@ -121,7 +120,6 @@ static struct ctl_table xpc_sys_xpc[] = { .proc_handler = proc_dointvec_minmax, .extra1 = &xpc_disengage_min_timelimit, .extra2 = &xpc_disengage_max_timelimit}, - {} }; static struct ctl_table_header *xpc_sysctl; @@ -166,7 +164,8 @@ struct xpc_arch_operations xpc_arch_ops; static void xpc_timeout_partition_disengage(struct timer_list *t) { - struct xpc_partition *part = from_timer(part, t, disengage_timer); + struct xpc_partition *part = timer_container_of(part, t, + disengage_timer); DBUG_ON(time_is_after_jiffies(part->disengage_timeout)); @@ -204,7 +203,7 @@ xpc_start_hb_beater(void) static void xpc_stop_hb_beater(void) { - del_timer_sync(&xpc_hb_timer); + timer_delete_sync(&xpc_hb_timer); xpc_arch_ops.heartbeat_exit(); } @@ -1155,36 +1154,6 @@ xpc_die_deactivate(void) static int xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args) { -#ifdef CONFIG_IA64 /* !!! temporary kludge */ - switch (event) { - case DIE_MACHINE_RESTART: - case DIE_MACHINE_HALT: - xpc_die_deactivate(); - break; - - case DIE_KDEBUG_ENTER: - /* Should lack of heartbeat be ignored by other partitions? */ - if (!xpc_kdebug_ignore) - break; - - fallthrough; - case DIE_MCA_MONARCH_ENTER: - case DIE_INIT_MONARCH_ENTER: - xpc_arch_ops.offline_heartbeat(); - break; - - case DIE_KDEBUG_LEAVE: - /* Is lack of heartbeat being ignored by other partitions? */ - if (!xpc_kdebug_ignore) - break; - - fallthrough; - case DIE_MCA_MONARCH_LEAVE: - case DIE_INIT_MONARCH_LEAVE: - xpc_arch_ops.online_heartbeat(); - break; - } -#else struct die_args *die_args = _die_args; switch (event) { @@ -1206,7 +1175,6 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args) default: xpc_die_deactivate(); } -#endif return NOTIFY_DONE; } diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index 1999d02923de..d0467010558c 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c @@ -291,7 +291,7 @@ static int __xpc_partition_disengaged(struct xpc_partition *part, /* Cancel the timer function if not called from it */ if (!from_timer) - del_timer_sync(&part->disengage_timer); + timer_delete_sync(&part->disengage_timer); DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING && part->act_state != XPC_P_AS_INACTIVE); diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index fff522d347e3..2f03a7080d96 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -24,34 +24,12 @@ #include <linux/slab.h> #include <linux/numa.h> #include <asm/uv/uv_hub.h> -#if defined CONFIG_X86_64 #include <asm/uv/bios.h> #include <asm/uv/uv_irq.h> -#elif defined CONFIG_IA64_SGI_UV -#include <asm/sn/intr.h> -#include <asm/sn/sn_sal.h> -#endif #include "../sgi-gru/gru.h" #include "../sgi-gru/grukservices.h" #include "xpc.h" -#if defined CONFIG_IA64_SGI_UV -struct uv_IO_APIC_route_entry { - __u64 vector : 8, - delivery_mode : 3, - dest_mode : 1, - delivery_status : 1, - polarity : 1, - __reserved_1 : 1, - trigger : 1, - mask : 1, - __reserved_2 : 15, - dest : 32; -}; - -#define sn_partition_id 0 -#endif - static struct xpc_heartbeat_uv *xpc_heartbeat_uv; #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES) @@ -113,7 +91,6 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) { int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); -#if defined CONFIG_X86_64 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, UV_AFFINITY_CPU); if (mq->irq < 0) @@ -121,40 +98,13 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset); -#elif defined CONFIG_IA64_SGI_UV - if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0) - mq->irq = SGI_XPC_ACTIVATE; - else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0) - mq->irq = SGI_XPC_NOTIFY; - else - return -EINVAL; - - mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq; - uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value); -#else - #error not a supported configuration -#endif - return 0; } static void xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) { -#if defined CONFIG_X86_64 uv_teardown_irq(mq->irq); - -#elif defined CONFIG_IA64_SGI_UV - int mmr_pnode; - unsigned long mmr_value; - - mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); - mmr_value = 1UL << 16; - - uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value); -#else - #error not a supported configuration -#endif } static int @@ -162,17 +112,6 @@ xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq) { int ret; -#if defined CONFIG_IA64_SGI_UV - int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); - - ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address), - mq->order, &mq->mmr_offset); - if (ret < 0) { - dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n", - ret); - return -EBUSY; - } -#elif defined CONFIG_X86_64 ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address), mq->order, &mq->mmr_offset); if (ret < 0) { @@ -180,9 +119,6 @@ xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq) "ret=%d\n", ret); return ret; } -#else - #error not a supported configuration -#endif mq->watchlist_num = ret; return 0; @@ -194,15 +130,8 @@ xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq) int ret; int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); -#if defined CONFIG_X86_64 ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num); BUG_ON(ret != BIOS_STATUS_SUCCESS); -#elif defined CONFIG_IA64_SGI_UV - ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num); - BUG_ON(ret != SALRET_OK); -#else - #error not a supported configuration -#endif } static struct xpc_gru_mq_uv * @@ -786,7 +715,6 @@ xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa, s64 status; enum xp_retval ret; -#if defined CONFIG_X86_64 status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa, (u64 *)len); if (status == BIOS_STATUS_SUCCESS) @@ -796,19 +724,6 @@ xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa, else ret = xpBiosError; -#elif defined CONFIG_IA64_SGI_UV - status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len); - if (status == SALRET_OK) - ret = xpSuccess; - else if (status == SALRET_MORE_PASSES) - ret = xpNeedMoreInfo; - else - ret = xpSalError; - -#else - #error not a supported configuration -#endif - return ret; } diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c index 61209739dc43..c98ff8aa221c 100644 --- a/drivers/misc/sram.c +++ b/drivers/misc/sram.c @@ -10,8 +10,8 @@ #include <linux/genalloc.h> #include <linux/io.h> #include <linux/list_sort.h> +#include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/slab.h> @@ -23,12 +23,13 @@ #define SRAM_GRANULARITY 32 static ssize_t sram_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t pos, size_t count) { struct sram_partition *part; - part = container_of(attr, struct sram_partition, battr); + /* Cast away the const as the attribute is part of a larger structure */ + part = (struct sram_partition *)container_of(attr, struct sram_partition, battr); mutex_lock(&part->lock); memcpy_fromio(buf, part->base + pos, count); @@ -38,12 +39,13 @@ static ssize_t sram_read(struct file *filp, struct kobject *kobj, } static ssize_t sram_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, char *buf, loff_t pos, size_t count) { struct sram_partition *part; - part = container_of(attr, struct sram_partition, battr); + /* Cast away the const as the attribute is part of a larger structure */ + part = (struct sram_partition *)container_of(attr, struct sram_partition, battr); mutex_lock(&part->lock); memcpy_toio(part->base + pos, buf, count); @@ -164,8 +166,8 @@ static void sram_free_partitions(struct sram_dev *sram) static int sram_reserve_cmp(void *priv, const struct list_head *a, const struct list_head *b) { - struct sram_reserve *ra = list_entry(a, struct sram_reserve, list); - struct sram_reserve *rb = list_entry(b, struct sram_reserve, list); + const struct sram_reserve *ra = list_entry(a, struct sram_reserve, list); + const struct sram_reserve *rb = list_entry(b, struct sram_reserve, list); return ra->start - rb->start; } @@ -435,7 +437,7 @@ err_free_partitions: return ret; } -static int sram_remove(struct platform_device *pdev) +static void sram_remove(struct platform_device *pdev) { struct sram_dev *sram = platform_get_drvdata(pdev); @@ -443,8 +445,6 @@ static int sram_remove(struct platform_device *pdev) if (sram->pool && gen_pool_avail(sram->pool) < gen_pool_size(sram->pool)) dev_err(sram->dev, "removed while SRAM allocated\n"); - - return 0; } static struct platform_driver sram_driver = { diff --git a/drivers/misc/ti-st/Kconfig b/drivers/misc/ti-st/Kconfig deleted file mode 100644 index 1503a6496f63..000000000000 --- a/drivers/misc/ti-st/Kconfig +++ /dev/null @@ -1,19 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# TI's shared transport line discipline and the protocol -# drivers (BT, FM and GPS) -# -menu "Texas Instruments shared transport line discipline" -config TI_ST - tristate "Shared transport core driver" - depends on NET && TTY - depends on GPIOLIB || COMPILE_TEST - select FW_LOADER - help - This enables the shared transport core driver for TI - BT / FM and GPS combo chips. This enables protocol drivers - to register themselves with core and send data, the responses - are returned to relevant protocol drivers based on their - packet types. - -endmenu diff --git a/drivers/misc/ti-st/Makefile b/drivers/misc/ti-st/Makefile deleted file mode 100644 index 93393100952e..000000000000 --- a/drivers/misc/ti-st/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Makefile for TI's shared transport line discipline -# and its protocol drivers (BT, FM, GPS) -# -obj-$(CONFIG_TI_ST) += st_drv.o -st_drv-objs := st_core.o st_kim.o st_ll.o diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c deleted file mode 100644 index 01d2257deea4..000000000000 --- a/drivers/misc/ti-st/st_core.c +++ /dev/null @@ -1,923 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Shared Transport Line discipline driver Core - * This hooks up ST KIM driver and ST LL driver - * Copyright (C) 2009-2010 Texas Instruments - * Author: Pavan Savoy <pavan_savoy@ti.com> - */ - -#define pr_fmt(fmt) "(stc): " fmt -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/tty.h> - -#include <linux/seq_file.h> -#include <linux/skbuff.h> - -#include <linux/ti_wilink_st.h> - -extern void st_kim_recv(void *, const unsigned char *, long); -void st_int_recv(void *, const unsigned char *, long); -/* - * function pointer pointing to either, - * st_kim_recv during registration to receive fw download responses - * st_int_recv after registration to receive proto stack responses - */ -static void (*st_recv) (void *, const unsigned char *, long); - -/********************************************************************/ -static void add_channel_to_table(struct st_data_s *st_gdata, - struct st_proto_s *new_proto) -{ - pr_info("%s: id %d\n", __func__, new_proto->chnl_id); - /* list now has the channel id as index itself */ - st_gdata->list[new_proto->chnl_id] = new_proto; - st_gdata->is_registered[new_proto->chnl_id] = true; -} - -static void remove_channel_from_table(struct st_data_s *st_gdata, - struct st_proto_s *proto) -{ - pr_info("%s: id %d\n", __func__, proto->chnl_id); -/* st_gdata->list[proto->chnl_id] = NULL; */ - st_gdata->is_registered[proto->chnl_id] = false; -} - -/* - * called from KIM during firmware download. - * - * This is a wrapper function to tty->ops->write_room. - * It returns number of free space available in - * uart tx buffer. - */ -int st_get_uart_wr_room(struct st_data_s *st_gdata) -{ - if (unlikely(st_gdata == NULL || st_gdata->tty == NULL)) { - pr_err("tty unavailable to perform write"); - return -1; - } - - return tty_write_room(st_gdata->tty); -} - -/* - * can be called in from - * -- KIM (during fw download) - * -- ST Core (during st_write) - * - * This is the internal write function - a wrapper - * to tty->ops->write - */ -int st_int_write(struct st_data_s *st_gdata, - const unsigned char *data, int count) -{ - struct tty_struct *tty; - if (unlikely(st_gdata == NULL || st_gdata->tty == NULL)) { - pr_err("tty unavailable to perform write"); - return -EINVAL; - } - tty = st_gdata->tty; -#ifdef VERBOSE - print_hex_dump(KERN_DEBUG, "<out<", DUMP_PREFIX_NONE, - 16, 1, data, count, 0); -#endif - return tty->ops->write(tty, data, count); - -} - -/* - * push the skb received to relevant - * protocol stacks - */ -static void st_send_frame(unsigned char chnl_id, struct st_data_s *st_gdata) -{ - pr_debug(" %s(prot:%d) ", __func__, chnl_id); - - if (unlikely - (st_gdata == NULL || st_gdata->rx_skb == NULL - || st_gdata->is_registered[chnl_id] == false)) { - pr_err("chnl_id %d not registered, no data to send?", - chnl_id); - kfree_skb(st_gdata->rx_skb); - return; - } - /* - * this cannot fail - * this shouldn't take long - * - should be just skb_queue_tail for the - * protocol stack driver - */ - if (likely(st_gdata->list[chnl_id]->recv != NULL)) { - if (unlikely - (st_gdata->list[chnl_id]->recv - (st_gdata->list[chnl_id]->priv_data, st_gdata->rx_skb) - != 0)) { - pr_err(" proto stack %d's ->recv failed", chnl_id); - kfree_skb(st_gdata->rx_skb); - return; - } - } else { - pr_err(" proto stack %d's ->recv null", chnl_id); - kfree_skb(st_gdata->rx_skb); - } - return; -} - -/* - * st_reg_complete - to call registration complete callbacks - * of all protocol stack drivers - * This function is being called with spin lock held, protocol drivers are - * only expected to complete their waits and do nothing more than that. - */ -static void st_reg_complete(struct st_data_s *st_gdata, int err) -{ - unsigned char i = 0; - pr_info(" %s ", __func__); - for (i = 0; i < ST_MAX_CHANNELS; i++) { - if (likely(st_gdata != NULL && - st_gdata->is_registered[i] == true && - st_gdata->list[i]->reg_complete_cb != NULL)) { - st_gdata->list[i]->reg_complete_cb - (st_gdata->list[i]->priv_data, err); - pr_info("protocol %d's cb sent %d\n", i, err); - if (err) { /* cleanup registered protocol */ - st_gdata->is_registered[i] = false; - if (st_gdata->protos_registered) - st_gdata->protos_registered--; - } - } - } -} - -static inline int st_check_data_len(struct st_data_s *st_gdata, - unsigned char chnl_id, int len) -{ - int room = skb_tailroom(st_gdata->rx_skb); - - pr_debug("len %d room %d", len, room); - - if (!len) { - /* - * Received packet has only packet header and - * has zero length payload. So, ask ST CORE to - * forward the packet to protocol driver (BT/FM/GPS) - */ - st_send_frame(chnl_id, st_gdata); - - } else if (len > room) { - /* - * Received packet's payload length is larger. - * We can't accommodate it in created skb. - */ - pr_err("Data length is too large len %d room %d", len, - room); - kfree_skb(st_gdata->rx_skb); - } else { - /* - * Packet header has non-zero payload length and - * we have enough space in created skb. Lets read - * payload data */ - st_gdata->rx_state = ST_W4_DATA; - st_gdata->rx_count = len; - return len; - } - - /* Change ST state to continue to process next packet */ - st_gdata->rx_state = ST_W4_PACKET_TYPE; - st_gdata->rx_skb = NULL; - st_gdata->rx_count = 0; - st_gdata->rx_chnl = 0; - - return 0; -} - -/* - * st_wakeup_ack - internal function for action when wake-up ack - * received - */ -static inline void st_wakeup_ack(struct st_data_s *st_gdata, - unsigned char cmd) -{ - struct sk_buff *waiting_skb; - unsigned long flags = 0; - - spin_lock_irqsave(&st_gdata->lock, flags); - /* - * de-Q from waitQ and Q in txQ now that the - * chip is awake - */ - while ((waiting_skb = skb_dequeue(&st_gdata->tx_waitq))) - skb_queue_tail(&st_gdata->txq, waiting_skb); - - /* state forwarded to ST LL */ - st_ll_sleep_state(st_gdata, (unsigned long)cmd); - spin_unlock_irqrestore(&st_gdata->lock, flags); - - /* wake up to send the recently copied skbs from waitQ */ - st_tx_wakeup(st_gdata); -} - -/* - * st_int_recv - ST's internal receive function. - * Decodes received RAW data and forwards to corresponding - * client drivers (Bluetooth,FM,GPS..etc). - * This can receive various types of packets, - * HCI-Events, ACL, SCO, 4 types of HCI-LL PM packets - * CH-8 packets from FM, CH-9 packets from GPS cores. - */ -void st_int_recv(void *disc_data, - const unsigned char *data, long count) -{ - char *ptr; - struct st_proto_s *proto; - unsigned short payload_len = 0; - int len = 0; - unsigned char type = 0; - unsigned char *plen; - struct st_data_s *st_gdata = (struct st_data_s *)disc_data; - unsigned long flags; - - ptr = (char *)data; - /* tty_receive sent null ? */ - if (unlikely(ptr == NULL) || (st_gdata == NULL)) { - pr_err(" received null from TTY "); - return; - } - - pr_debug("count %ld rx_state %ld" - "rx_count %ld", count, st_gdata->rx_state, - st_gdata->rx_count); - - spin_lock_irqsave(&st_gdata->lock, flags); - /* Decode received bytes here */ - while (count) { - if (st_gdata->rx_count) { - len = min_t(unsigned int, st_gdata->rx_count, count); - skb_put_data(st_gdata->rx_skb, ptr, len); - st_gdata->rx_count -= len; - count -= len; - ptr += len; - - if (st_gdata->rx_count) - continue; - - /* Check ST RX state machine , where are we? */ - switch (st_gdata->rx_state) { - /* Waiting for complete packet ? */ - case ST_W4_DATA: - pr_debug("Complete pkt received"); - /* - * Ask ST CORE to forward - * the packet to protocol driver - */ - st_send_frame(st_gdata->rx_chnl, st_gdata); - - st_gdata->rx_state = ST_W4_PACKET_TYPE; - st_gdata->rx_skb = NULL; - continue; - /* parse the header to know details */ - case ST_W4_HEADER: - proto = st_gdata->list[st_gdata->rx_chnl]; - plen = - &st_gdata->rx_skb->data - [proto->offset_len_in_hdr]; - pr_debug("plen pointing to %x\n", *plen); - if (proto->len_size == 1) /* 1 byte len field */ - payload_len = *(unsigned char *)plen; - else if (proto->len_size == 2) - payload_len = - __le16_to_cpu(*(unsigned short *)plen); - else - pr_info("%s: invalid length " - "for id %d\n", - __func__, proto->chnl_id); - st_check_data_len(st_gdata, proto->chnl_id, - payload_len); - pr_debug("off %d, pay len %d\n", - proto->offset_len_in_hdr, payload_len); - continue; - } /* end of switch rx_state */ - } - - /* end of if rx_count */ - - /* - * Check first byte of packet and identify module - * owner (BT/FM/GPS) - */ - switch (*ptr) { - case LL_SLEEP_IND: - case LL_SLEEP_ACK: - case LL_WAKE_UP_IND: - pr_debug("PM packet"); - /* - * this takes appropriate action based on - * sleep state received -- - */ - st_ll_sleep_state(st_gdata, *ptr); - /* - * if WAKEUP_IND collides copy from waitq to txq - * and assume chip awake - */ - spin_unlock_irqrestore(&st_gdata->lock, flags); - if (st_ll_getstate(st_gdata) == ST_LL_AWAKE) - st_wakeup_ack(st_gdata, LL_WAKE_UP_ACK); - spin_lock_irqsave(&st_gdata->lock, flags); - - ptr++; - count--; - continue; - case LL_WAKE_UP_ACK: - pr_debug("PM packet"); - - spin_unlock_irqrestore(&st_gdata->lock, flags); - /* wake up ack received */ - st_wakeup_ack(st_gdata, *ptr); - spin_lock_irqsave(&st_gdata->lock, flags); - - ptr++; - count--; - continue; - /* Unknown packet? */ - default: - type = *ptr; - - /* - * Default case means non-HCILL packets, - * possibilities are packets for: - * (a) valid protocol - Supported Protocols within - * the ST_MAX_CHANNELS. - * (b) registered protocol - Checked by - * "st_gdata->list[type] == NULL)" are supported - * protocols only. - * Rules out any invalid protocol and - * unregistered protocols with channel ID < 16. - */ - - if ((type >= ST_MAX_CHANNELS) || - (st_gdata->list[type] == NULL)) { - pr_err("chip/interface misbehavior: " - "dropping frame starting " - "with 0x%02x\n", type); - goto done; - } - - st_gdata->rx_skb = alloc_skb( - st_gdata->list[type]->max_frame_size, - GFP_ATOMIC); - if (st_gdata->rx_skb == NULL) { - pr_err("out of memory: dropping\n"); - goto done; - } - - skb_reserve(st_gdata->rx_skb, - st_gdata->list[type]->reserve); - /* next 2 required for BT only */ - st_gdata->rx_skb->cb[0] = type; /*pkt_type*/ - st_gdata->rx_skb->cb[1] = 0; /*incoming*/ - st_gdata->rx_chnl = *ptr; - st_gdata->rx_state = ST_W4_HEADER; - st_gdata->rx_count = st_gdata->list[type]->hdr_len; - pr_debug("rx_count %ld\n", st_gdata->rx_count); - } - ptr++; - count--; - } -done: - spin_unlock_irqrestore(&st_gdata->lock, flags); - pr_debug("done %s", __func__); - return; -} - -/* - * st_int_dequeue - internal de-Q function. - * If the previous data set was not written - * completely, return that skb which has the pending data. - * In normal cases, return top of txq. - */ -static struct sk_buff *st_int_dequeue(struct st_data_s *st_gdata) -{ - struct sk_buff *returning_skb; - - pr_debug("%s", __func__); - if (st_gdata->tx_skb != NULL) { - returning_skb = st_gdata->tx_skb; - st_gdata->tx_skb = NULL; - return returning_skb; - } - return skb_dequeue(&st_gdata->txq); -} - -/* - * st_int_enqueue - internal Q-ing function. - * Will either Q the skb to txq or the tx_waitq - * depending on the ST LL state. - * If the chip is asleep, then Q it onto waitq and - * wakeup the chip. - * txq and waitq needs protection since the other contexts - * may be sending data, waking up chip. - */ -static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb) -{ - unsigned long flags = 0; - - pr_debug("%s", __func__); - spin_lock_irqsave(&st_gdata->lock, flags); - - switch (st_ll_getstate(st_gdata)) { - case ST_LL_AWAKE: - pr_debug("ST LL is AWAKE, sending normally"); - skb_queue_tail(&st_gdata->txq, skb); - break; - case ST_LL_ASLEEP_TO_AWAKE: - skb_queue_tail(&st_gdata->tx_waitq, skb); - break; - case ST_LL_AWAKE_TO_ASLEEP: - pr_err("ST LL is illegal state(%ld)," - "purging received skb.", st_ll_getstate(st_gdata)); - kfree_skb(skb); - break; - case ST_LL_ASLEEP: - skb_queue_tail(&st_gdata->tx_waitq, skb); - st_ll_wakeup(st_gdata); - break; - default: - pr_err("ST LL is illegal state(%ld)," - "purging received skb.", st_ll_getstate(st_gdata)); - kfree_skb(skb); - break; - } - - spin_unlock_irqrestore(&st_gdata->lock, flags); - pr_debug("done %s", __func__); - return; -} - -/* - * internal wakeup function - * called from either - * - TTY layer when write's finished - * - st_write (in context of the protocol stack) - */ -static void work_fn_write_wakeup(struct work_struct *work) -{ - struct st_data_s *st_gdata = container_of(work, struct st_data_s, - work_write_wakeup); - - st_tx_wakeup((void *)st_gdata); -} -void st_tx_wakeup(struct st_data_s *st_data) -{ - struct sk_buff *skb; - unsigned long flags; /* for irq save flags */ - pr_debug("%s", __func__); - /* check for sending & set flag sending here */ - if (test_and_set_bit(ST_TX_SENDING, &st_data->tx_state)) { - pr_debug("ST already sending"); - /* keep sending */ - set_bit(ST_TX_WAKEUP, &st_data->tx_state); - return; - /* TX_WAKEUP will be checked in another - * context - */ - } - do { /* come back if st_tx_wakeup is set */ - /* woke-up to write */ - clear_bit(ST_TX_WAKEUP, &st_data->tx_state); - while ((skb = st_int_dequeue(st_data))) { - int len; - spin_lock_irqsave(&st_data->lock, flags); - /* enable wake-up from TTY */ - set_bit(TTY_DO_WRITE_WAKEUP, &st_data->tty->flags); - len = st_int_write(st_data, skb->data, skb->len); - skb_pull(skb, len); - /* if skb->len = len as expected, skb->len=0 */ - if (skb->len) { - /* would be the next skb to be sent */ - st_data->tx_skb = skb; - spin_unlock_irqrestore(&st_data->lock, flags); - break; - } - kfree_skb(skb); - spin_unlock_irqrestore(&st_data->lock, flags); - } - /* if wake-up is set in another context- restart sending */ - } while (test_bit(ST_TX_WAKEUP, &st_data->tx_state)); - - /* clear flag sending */ - clear_bit(ST_TX_SENDING, &st_data->tx_state); -} - -/********************************************************************/ -/* functions called from ST KIM -*/ -void kim_st_list_protocols(struct st_data_s *st_gdata, void *buf) -{ - seq_printf(buf, "[%d]\nBT=%c\nFM=%c\nGPS=%c\n", - st_gdata->protos_registered, - st_gdata->is_registered[0x04] == true ? 'R' : 'U', - st_gdata->is_registered[0x08] == true ? 'R' : 'U', - st_gdata->is_registered[0x09] == true ? 'R' : 'U'); -} - -/********************************************************************/ -/* - * functions called from protocol stack drivers - * to be EXPORT-ed - */ -long st_register(struct st_proto_s *new_proto) -{ - struct st_data_s *st_gdata; - long err = 0; - unsigned long flags = 0; - - st_kim_ref(&st_gdata, 0); - if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL - || new_proto->reg_complete_cb == NULL) { - pr_err("gdata/new_proto/recv or reg_complete_cb not ready"); - return -EINVAL; - } - - if (new_proto->chnl_id >= ST_MAX_CHANNELS) { - pr_err("chnl_id %d not supported", new_proto->chnl_id); - return -EPROTONOSUPPORT; - } - - if (st_gdata->is_registered[new_proto->chnl_id] == true) { - pr_err("chnl_id %d already registered", new_proto->chnl_id); - return -EALREADY; - } - - /* can be from process context only */ - spin_lock_irqsave(&st_gdata->lock, flags); - - if (test_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state)) { - pr_info(" ST_REG_IN_PROGRESS:%d ", new_proto->chnl_id); - /* fw download in progress */ - - add_channel_to_table(st_gdata, new_proto); - st_gdata->protos_registered++; - new_proto->write = st_write; - - set_bit(ST_REG_PENDING, &st_gdata->st_state); - spin_unlock_irqrestore(&st_gdata->lock, flags); - return -EINPROGRESS; - } else if (st_gdata->protos_registered == ST_EMPTY) { - pr_info(" chnl_id list empty :%d ", new_proto->chnl_id); - set_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state); - st_recv = st_kim_recv; - - /* enable the ST LL - to set default chip state */ - st_ll_enable(st_gdata); - - /* release lock previously held - re-locked below */ - spin_unlock_irqrestore(&st_gdata->lock, flags); - - /* - * this may take a while to complete - * since it involves BT fw download - */ - err = st_kim_start(st_gdata->kim_data); - if (err != 0) { - clear_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state); - if ((st_gdata->protos_registered != ST_EMPTY) && - (test_bit(ST_REG_PENDING, &st_gdata->st_state))) { - pr_err(" KIM failure complete callback "); - spin_lock_irqsave(&st_gdata->lock, flags); - st_reg_complete(st_gdata, err); - spin_unlock_irqrestore(&st_gdata->lock, flags); - clear_bit(ST_REG_PENDING, &st_gdata->st_state); - } - return -EINVAL; - } - - spin_lock_irqsave(&st_gdata->lock, flags); - - clear_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state); - st_recv = st_int_recv; - - /* - * this is where all pending registration - * are signalled to be complete by calling callback functions - */ - if ((st_gdata->protos_registered != ST_EMPTY) && - (test_bit(ST_REG_PENDING, &st_gdata->st_state))) { - pr_debug(" call reg complete callback "); - st_reg_complete(st_gdata, 0); - } - clear_bit(ST_REG_PENDING, &st_gdata->st_state); - - /* - * check for already registered once more, - * since the above check is old - */ - if (st_gdata->is_registered[new_proto->chnl_id] == true) { - pr_err(" proto %d already registered ", - new_proto->chnl_id); - spin_unlock_irqrestore(&st_gdata->lock, flags); - return -EALREADY; - } - - add_channel_to_table(st_gdata, new_proto); - st_gdata->protos_registered++; - new_proto->write = st_write; - spin_unlock_irqrestore(&st_gdata->lock, flags); - return err; - } - /* if fw is already downloaded & new stack registers protocol */ - else { - add_channel_to_table(st_gdata, new_proto); - st_gdata->protos_registered++; - new_proto->write = st_write; - - /* lock already held before entering else */ - spin_unlock_irqrestore(&st_gdata->lock, flags); - return err; - } -} -EXPORT_SYMBOL_GPL(st_register); - -/* - * to unregister a protocol - - * to be called from protocol stack driver - */ -long st_unregister(struct st_proto_s *proto) -{ - long err = 0; - unsigned long flags = 0; - struct st_data_s *st_gdata; - - pr_debug("%s: %d ", __func__, proto->chnl_id); - - st_kim_ref(&st_gdata, 0); - if (!st_gdata || proto->chnl_id >= ST_MAX_CHANNELS) { - pr_err(" chnl_id %d not supported", proto->chnl_id); - return -EPROTONOSUPPORT; - } - - spin_lock_irqsave(&st_gdata->lock, flags); - - if (st_gdata->is_registered[proto->chnl_id] == false) { - pr_err(" chnl_id %d not registered", proto->chnl_id); - spin_unlock_irqrestore(&st_gdata->lock, flags); - return -EPROTONOSUPPORT; - } - - if (st_gdata->protos_registered) - st_gdata->protos_registered--; - - remove_channel_from_table(st_gdata, proto); - spin_unlock_irqrestore(&st_gdata->lock, flags); - - if ((st_gdata->protos_registered == ST_EMPTY) && - (!test_bit(ST_REG_PENDING, &st_gdata->st_state))) { - pr_info(" all chnl_ids unregistered "); - - /* stop traffic on tty */ - if (st_gdata->tty) { - tty_ldisc_flush(st_gdata->tty); - stop_tty(st_gdata->tty); - } - - /* all chnl_ids now unregistered */ - st_kim_stop(st_gdata->kim_data); - /* disable ST LL */ - st_ll_disable(st_gdata); - } - return err; -} - -/* - * called in protocol stack drivers - * via the write function pointer - */ -long st_write(struct sk_buff *skb) -{ - struct st_data_s *st_gdata; - long len; - - st_kim_ref(&st_gdata, 0); - if (unlikely(skb == NULL || st_gdata == NULL - || st_gdata->tty == NULL)) { - pr_err("data/tty unavailable to perform write"); - return -EINVAL; - } - - pr_debug("%d to be written", skb->len); - len = skb->len; - - /* st_ll to decide where to enqueue the skb */ - st_int_enqueue(st_gdata, skb); - /* wake up */ - st_tx_wakeup(st_gdata); - - /* return number of bytes written */ - return len; -} - -/* for protocols making use of shared transport */ -EXPORT_SYMBOL_GPL(st_unregister); - -/********************************************************************/ -/* - * functions called from TTY layer - */ -static int st_tty_open(struct tty_struct *tty) -{ - struct st_data_s *st_gdata; - pr_info("%s ", __func__); - - st_kim_ref(&st_gdata, 0); - st_gdata->tty = tty; - tty->disc_data = st_gdata; - - /* don't do an wakeup for now */ - clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); - - /* mem already allocated - */ - tty->receive_room = 65536; - /* Flush any pending characters in the driver and discipline. */ - tty_ldisc_flush(tty); - tty_driver_flush_buffer(tty); - /* - * signal to UIM via KIM that - - * installation of N_TI_WL ldisc is complete - */ - st_kim_complete(st_gdata->kim_data); - pr_debug("done %s", __func__); - - return 0; -} - -static void st_tty_close(struct tty_struct *tty) -{ - unsigned char i; - unsigned long flags; - struct st_data_s *st_gdata = tty->disc_data; - - pr_info("%s ", __func__); - - /* - * TODO: - * if a protocol has been registered & line discipline - * un-installed for some reason - what should be done ? - */ - spin_lock_irqsave(&st_gdata->lock, flags); - for (i = ST_BT; i < ST_MAX_CHANNELS; i++) { - if (st_gdata->is_registered[i] == true) - pr_err("%d not un-registered", i); - st_gdata->list[i] = NULL; - st_gdata->is_registered[i] = false; - } - st_gdata->protos_registered = 0; - spin_unlock_irqrestore(&st_gdata->lock, flags); - /* - * signal to UIM via KIM that - - * N_TI_WL ldisc is un-installed - */ - st_kim_complete(st_gdata->kim_data); - st_gdata->tty = NULL; - /* Flush any pending characters in the driver and discipline. */ - tty_ldisc_flush(tty); - tty_driver_flush_buffer(tty); - - spin_lock_irqsave(&st_gdata->lock, flags); - /* empty out txq and tx_waitq */ - skb_queue_purge(&st_gdata->txq); - skb_queue_purge(&st_gdata->tx_waitq); - /* reset the TTY Rx states of ST */ - st_gdata->rx_count = 0; - st_gdata->rx_state = ST_W4_PACKET_TYPE; - kfree_skb(st_gdata->rx_skb); - st_gdata->rx_skb = NULL; - spin_unlock_irqrestore(&st_gdata->lock, flags); - - pr_debug("%s: done ", __func__); -} - -static void st_tty_receive(struct tty_struct *tty, const unsigned char *data, - const char *tty_flags, int count) -{ -#ifdef VERBOSE - print_hex_dump(KERN_DEBUG, ">in>", DUMP_PREFIX_NONE, - 16, 1, data, count, 0); -#endif - - /* - * if fw download is in progress then route incoming data - * to KIM for validation - */ - st_recv(tty->disc_data, data, count); - pr_debug("done %s", __func__); -} - -/* - * wake-up function called in from the TTY layer - * inside the internal wakeup function will be called - */ -static void st_tty_wakeup(struct tty_struct *tty) -{ - struct st_data_s *st_gdata = tty->disc_data; - pr_debug("%s ", __func__); - /* don't do an wakeup for now */ - clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); - - /* - * schedule the internal wakeup instead of calling directly to - * avoid lockup (port->lock needed in tty->ops->write is - * already taken here - */ - schedule_work(&st_gdata->work_write_wakeup); -} - -static void st_tty_flush_buffer(struct tty_struct *tty) -{ - struct st_data_s *st_gdata = tty->disc_data; - pr_debug("%s ", __func__); - - kfree_skb(st_gdata->tx_skb); - st_gdata->tx_skb = NULL; - - tty_driver_flush_buffer(tty); - return; -} - -static struct tty_ldisc_ops st_ldisc_ops = { - .num = N_TI_WL, - .name = "n_st", - .open = st_tty_open, - .close = st_tty_close, - .receive_buf = st_tty_receive, - .write_wakeup = st_tty_wakeup, - .flush_buffer = st_tty_flush_buffer, - .owner = THIS_MODULE -}; - -/********************************************************************/ -int st_core_init(struct st_data_s **core_data) -{ - struct st_data_s *st_gdata; - long err; - - err = tty_register_ldisc(&st_ldisc_ops); - if (err) { - pr_err("error registering %d line discipline %ld", - N_TI_WL, err); - return err; - } - pr_debug("registered n_shared line discipline"); - - st_gdata = kzalloc(sizeof(struct st_data_s), GFP_KERNEL); - if (!st_gdata) { - pr_err("memory allocation failed"); - err = -ENOMEM; - goto err_unreg_ldisc; - } - - /* Initialize ST TxQ and Tx waitQ queue head. All BT/FM/GPS module skb's - * will be pushed in this queue for actual transmission. - */ - skb_queue_head_init(&st_gdata->txq); - skb_queue_head_init(&st_gdata->tx_waitq); - - /* Locking used in st_int_enqueue() to avoid multiple execution */ - spin_lock_init(&st_gdata->lock); - - err = st_ll_init(st_gdata); - if (err) { - pr_err("error during st_ll initialization(%ld)", err); - goto err_free_gdata; - } - - INIT_WORK(&st_gdata->work_write_wakeup, work_fn_write_wakeup); - - *core_data = st_gdata; - return 0; -err_free_gdata: - kfree(st_gdata); -err_unreg_ldisc: - tty_unregister_ldisc(&st_ldisc_ops); - return err; -} - -void st_core_exit(struct st_data_s *st_gdata) -{ - long err; - /* internal module cleanup */ - err = st_ll_deinit(st_gdata); - if (err) - pr_err("error during deinit of ST LL %ld", err); - - if (st_gdata != NULL) { - /* Free ST Tx Qs and skbs */ - skb_queue_purge(&st_gdata->txq); - skb_queue_purge(&st_gdata->tx_waitq); - kfree_skb(st_gdata->rx_skb); - kfree_skb(st_gdata->tx_skb); - /* TTY ldisc cleanup */ - tty_unregister_ldisc(&st_ldisc_ops); - /* free the global data pointer */ - kfree(st_gdata); - } -} diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c deleted file mode 100644 index f2f6cab97c08..000000000000 --- a/drivers/misc/ti-st/st_kim.c +++ /dev/null @@ -1,846 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Shared Transport Line discipline driver Core - * Init Manager module responsible for GPIO control - * and firmware download - * Copyright (C) 2009-2010 Texas Instruments - * Author: Pavan Savoy <pavan_savoy@ti.com> - */ - -#define pr_fmt(fmt) "(stk) :" fmt -#include <linux/platform_device.h> -#include <linux/jiffies.h> -#include <linux/firmware.h> -#include <linux/delay.h> -#include <linux/wait.h> -#include <linux/gpio.h> -#include <linux/debugfs.h> -#include <linux/seq_file.h> -#include <linux/sched.h> -#include <linux/sysfs.h> -#include <linux/tty.h> - -#include <linux/skbuff.h> -#include <linux/ti_wilink_st.h> -#include <linux/module.h> - -#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */ -static struct platform_device *st_kim_devices[MAX_ST_DEVICES]; - -/**********************************************************************/ -/* internal functions */ - -/* - * st_get_plat_device - - * function which returns the reference to the platform device - * requested by id. As of now only 1 such device exists (id=0) - * the context requesting for reference can get the id to be - * requested by a. The protocol driver which is registering or - * b. the tty device which is opened. - */ -static struct platform_device *st_get_plat_device(int id) -{ - return st_kim_devices[id]; -} - -/* - * validate_firmware_response - - * function to return whether the firmware response was proper - * in case of error don't complete so that waiting for proper - * response times out - */ -static void validate_firmware_response(struct kim_data_s *kim_gdata) -{ - struct sk_buff *skb = kim_gdata->rx_skb; - if (!skb) - return; - - /* - * these magic numbers are the position in the response buffer which - * allows us to distinguish whether the response is for the read - * version info. command - */ - if (skb->data[2] == 0x01 && skb->data[3] == 0x01 && - skb->data[4] == 0x10 && skb->data[5] == 0x00) { - /* fw version response */ - memcpy(kim_gdata->resp_buffer, - kim_gdata->rx_skb->data, - kim_gdata->rx_skb->len); - kim_gdata->rx_state = ST_W4_PACKET_TYPE; - kim_gdata->rx_skb = NULL; - kim_gdata->rx_count = 0; - } else if (unlikely(skb->data[5] != 0)) { - pr_err("no proper response during fw download"); - pr_err("data6 %x", skb->data[5]); - kfree_skb(skb); - return; /* keep waiting for the proper response */ - } - /* becos of all the script being downloaded */ - complete_all(&kim_gdata->kim_rcvd); - kfree_skb(skb); -} - -/* - * check for data len received inside kim_int_recv - * most often hit the last case to update state to waiting for data - */ -static inline int kim_check_data_len(struct kim_data_s *kim_gdata, int len) -{ - register int room = skb_tailroom(kim_gdata->rx_skb); - - pr_debug("len %d room %d", len, room); - - if (!len) { - validate_firmware_response(kim_gdata); - } else if (len > room) { - /* - * Received packet's payload length is larger. - * We can't accommodate it in created skb. - */ - pr_err("Data length is too large len %d room %d", len, - room); - kfree_skb(kim_gdata->rx_skb); - } else { - /* - * Packet header has non-zero payload length and - * we have enough space in created skb. Lets read - * payload data */ - kim_gdata->rx_state = ST_W4_DATA; - kim_gdata->rx_count = len; - return len; - } - - /* - * Change ST LL state to continue to process next - * packet - */ - kim_gdata->rx_state = ST_W4_PACKET_TYPE; - kim_gdata->rx_skb = NULL; - kim_gdata->rx_count = 0; - - return 0; -} - -/* - * kim_int_recv - receive function called during firmware download - * firmware download responses on different UART drivers - * have been observed to come in bursts of different - * tty_receive and hence the logic - */ -static void kim_int_recv(struct kim_data_s *kim_gdata, - const unsigned char *data, long count) -{ - const unsigned char *ptr; - int len = 0; - unsigned char *plen; - - pr_debug("%s", __func__); - /* Decode received bytes here */ - ptr = data; - if (unlikely(ptr == NULL)) { - pr_err(" received null from TTY "); - return; - } - - while (count) { - if (kim_gdata->rx_count) { - len = min_t(unsigned int, kim_gdata->rx_count, count); - skb_put_data(kim_gdata->rx_skb, ptr, len); - kim_gdata->rx_count -= len; - count -= len; - ptr += len; - - if (kim_gdata->rx_count) - continue; - - /* Check ST RX state machine , where are we? */ - switch (kim_gdata->rx_state) { - /* Waiting for complete packet ? */ - case ST_W4_DATA: - pr_debug("Complete pkt received"); - validate_firmware_response(kim_gdata); - kim_gdata->rx_state = ST_W4_PACKET_TYPE; - kim_gdata->rx_skb = NULL; - continue; - /* Waiting for Bluetooth event header ? */ - case ST_W4_HEADER: - plen = - (unsigned char *)&kim_gdata->rx_skb->data[1]; - pr_debug("event hdr: plen 0x%02x\n", *plen); - kim_check_data_len(kim_gdata, *plen); - continue; - } /* end of switch */ - } /* end of if rx_state */ - switch (*ptr) { - /* Bluetooth event packet? */ - case 0x04: - kim_gdata->rx_state = ST_W4_HEADER; - kim_gdata->rx_count = 2; - break; - default: - pr_info("unknown packet"); - ptr++; - count--; - continue; - } - ptr++; - count--; - kim_gdata->rx_skb = - alloc_skb(1024+8, GFP_ATOMIC); - if (!kim_gdata->rx_skb) { - pr_err("can't allocate mem for new packet"); - kim_gdata->rx_state = ST_W4_PACKET_TYPE; - kim_gdata->rx_count = 0; - return; - } - skb_reserve(kim_gdata->rx_skb, 8); - kim_gdata->rx_skb->cb[0] = 4; - kim_gdata->rx_skb->cb[1] = 0; - - } - return; -} - -static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name) -{ - unsigned short version = 0, chip = 0, min_ver = 0, maj_ver = 0; - static const char read_ver_cmd[] = { 0x01, 0x01, 0x10, 0x00 }; - long timeout; - - pr_debug("%s", __func__); - - reinit_completion(&kim_gdata->kim_rcvd); - if (4 != st_int_write(kim_gdata->core_data, read_ver_cmd, 4)) { - pr_err("kim: couldn't write 4 bytes"); - return -EIO; - } - - timeout = wait_for_completion_interruptible_timeout( - &kim_gdata->kim_rcvd, msecs_to_jiffies(CMD_RESP_TIME)); - if (timeout <= 0) { - pr_err(" waiting for ver info- timed out or received signal"); - return timeout ? -ERESTARTSYS : -ETIMEDOUT; - } - reinit_completion(&kim_gdata->kim_rcvd); - /* - * the positions 12 & 13 in the response buffer provide with the - * chip, major & minor numbers - */ - - version = - MAKEWORD(kim_gdata->resp_buffer[12], - kim_gdata->resp_buffer[13]); - chip = (version & 0x7C00) >> 10; - min_ver = (version & 0x007F); - maj_ver = (version & 0x0380) >> 7; - - if (version & 0x8000) - maj_ver |= 0x0008; - - sprintf(bts_scr_name, "ti-connectivity/TIInit_%d.%d.%d.bts", - chip, maj_ver, min_ver); - - /* to be accessed later via sysfs entry */ - kim_gdata->version.full = version; - kim_gdata->version.chip = chip; - kim_gdata->version.maj_ver = maj_ver; - kim_gdata->version.min_ver = min_ver; - - pr_info("%s", bts_scr_name); - return 0; -} - -static void skip_change_remote_baud(unsigned char **ptr, long *len) -{ - unsigned char *nxt_action, *cur_action; - cur_action = *ptr; - - nxt_action = cur_action + sizeof(struct bts_action) + - ((struct bts_action *) cur_action)->size; - - if (((struct bts_action *) nxt_action)->type != ACTION_WAIT_EVENT) { - pr_err("invalid action after change remote baud command"); - } else { - *ptr = *ptr + sizeof(struct bts_action) + - ((struct bts_action *)cur_action)->size; - *len = *len - (sizeof(struct bts_action) + - ((struct bts_action *)cur_action)->size); - /* warn user on not commenting these in firmware */ - pr_warn("skipping the wait event of change remote baud"); - } -} - -/* - * download_firmware - - * internal function which parses through the .bts firmware - * script file intreprets SEND, DELAY actions only as of now - */ -static long download_firmware(struct kim_data_s *kim_gdata) -{ - long err = 0; - long len = 0; - unsigned char *ptr = NULL; - unsigned char *action_ptr = NULL; - unsigned char bts_scr_name[40] = { 0 }; /* 40 char long bts scr name? */ - int wr_room_space; - int cmd_size; - unsigned long timeout; - - err = read_local_version(kim_gdata, bts_scr_name); - if (err != 0) { - pr_err("kim: failed to read local ver"); - return err; - } - err = - request_firmware(&kim_gdata->fw_entry, bts_scr_name, - &kim_gdata->kim_pdev->dev); - if (unlikely((err != 0) || (kim_gdata->fw_entry->data == NULL) || - (kim_gdata->fw_entry->size == 0))) { - pr_err(" request_firmware failed(errno %ld) for %s", err, - bts_scr_name); - return -EINVAL; - } - ptr = (void *)kim_gdata->fw_entry->data; - len = kim_gdata->fw_entry->size; - /* - * bts_header to remove out magic number and - * version - */ - ptr += sizeof(struct bts_header); - len -= sizeof(struct bts_header); - - while (len > 0 && ptr) { - pr_debug(" action size %d, type %d ", - ((struct bts_action *)ptr)->size, - ((struct bts_action *)ptr)->type); - - switch (((struct bts_action *)ptr)->type) { - case ACTION_SEND_COMMAND: /* action send */ - pr_debug("S"); - action_ptr = &(((struct bts_action *)ptr)->data[0]); - if (unlikely - (((struct hci_command *)action_ptr)->opcode == - 0xFF36)) { - /* - * ignore remote change - * baud rate HCI VS command - */ - pr_warn("change remote baud" - " rate command in firmware"); - skip_change_remote_baud(&ptr, &len); - break; - } - /* - * Make sure we have enough free space in uart - * tx buffer to write current firmware command - */ - cmd_size = ((struct bts_action *)ptr)->size; - timeout = jiffies + msecs_to_jiffies(CMD_WR_TIME); - do { - wr_room_space = - st_get_uart_wr_room(kim_gdata->core_data); - if (wr_room_space < 0) { - pr_err("Unable to get free " - "space info from uart tx buffer"); - release_firmware(kim_gdata->fw_entry); - return wr_room_space; - } - mdelay(1); /* wait 1ms before checking room */ - } while ((wr_room_space < cmd_size) && - time_before(jiffies, timeout)); - - /* Timeout happened ? */ - if (time_after_eq(jiffies, timeout)) { - pr_err("Timeout while waiting for free " - "free space in uart tx buffer"); - release_firmware(kim_gdata->fw_entry); - return -ETIMEDOUT; - } - /* - * reinit completion before sending for the - * relevant wait - */ - reinit_completion(&kim_gdata->kim_rcvd); - - /* - * Free space found in uart buffer, call st_int_write - * to send current firmware command to the uart tx - * buffer. - */ - err = st_int_write(kim_gdata->core_data, - ((struct bts_action_send *)action_ptr)->data, - ((struct bts_action *)ptr)->size); - if (unlikely(err < 0)) { - release_firmware(kim_gdata->fw_entry); - return err; - } - /* - * Check number of bytes written to the uart tx buffer - * and requested command write size - */ - if (err != cmd_size) { - pr_err("Number of bytes written to uart " - "tx buffer are not matching with " - "requested cmd write size"); - release_firmware(kim_gdata->fw_entry); - return -EIO; - } - break; - case ACTION_WAIT_EVENT: /* wait */ - pr_debug("W"); - err = wait_for_completion_interruptible_timeout( - &kim_gdata->kim_rcvd, - msecs_to_jiffies(CMD_RESP_TIME)); - if (err <= 0) { - pr_err("response timeout/signaled during fw download "); - /* timed out */ - release_firmware(kim_gdata->fw_entry); - return err ? -ERESTARTSYS : -ETIMEDOUT; - } - reinit_completion(&kim_gdata->kim_rcvd); - break; - case ACTION_DELAY: /* sleep */ - pr_info("sleep command in scr"); - action_ptr = &(((struct bts_action *)ptr)->data[0]); - mdelay(((struct bts_action_delay *)action_ptr)->msec); - break; - } - len = - len - (sizeof(struct bts_action) + - ((struct bts_action *)ptr)->size); - ptr = - ptr + sizeof(struct bts_action) + - ((struct bts_action *)ptr)->size; - } - /* fw download complete */ - release_firmware(kim_gdata->fw_entry); - return 0; -} - -/**********************************************************************/ -/* functions called from ST core */ -/* called from ST Core, when REG_IN_PROGRESS (registration in progress) - * can be because of - * 1. response to read local version - * 2. during send/recv's of firmware download - */ -void st_kim_recv(void *disc_data, const unsigned char *data, long count) -{ - struct st_data_s *st_gdata = (struct st_data_s *)disc_data; - struct kim_data_s *kim_gdata = st_gdata->kim_data; - - /* - * proceed to gather all data and distinguish read fw version response - * from other fw responses when data gathering is complete - */ - kim_int_recv(kim_gdata, data, count); - return; -} - -/* - * to signal completion of line discipline installation - * called from ST Core, upon tty_open - */ -void st_kim_complete(void *kim_data) -{ - struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; - complete(&kim_gdata->ldisc_installed); -} - -/* - * st_kim_start - called from ST Core upon 1st registration - * This involves toggling the chip enable gpio, reading - * the firmware version from chip, forming the fw file name - * based on the chip version, requesting the fw, parsing it - * and perform download(send/recv). - */ -long st_kim_start(void *kim_data) -{ - long err = 0; - long retry = POR_RETRY_COUNT; - struct ti_st_plat_data *pdata; - struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; - - pr_info(" %s", __func__); - pdata = kim_gdata->kim_pdev->dev.platform_data; - - do { - /* platform specific enabling code here */ - if (pdata->chip_enable) - pdata->chip_enable(kim_gdata); - - /* Configure BT nShutdown to HIGH state */ - gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW); - mdelay(5); /* FIXME: a proper toggle */ - gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_HIGH); - mdelay(100); - /* re-initialize the completion */ - reinit_completion(&kim_gdata->ldisc_installed); - /* send notification to UIM */ - kim_gdata->ldisc_install = 1; - pr_info("ldisc_install = 1"); - sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, - NULL, "install"); - /* wait for ldisc to be installed */ - err = wait_for_completion_interruptible_timeout( - &kim_gdata->ldisc_installed, msecs_to_jiffies(LDISC_TIME)); - if (!err) { - /* - * ldisc installation timeout, - * flush uart, power cycle BT_EN - */ - pr_err("ldisc installation timeout"); - err = st_kim_stop(kim_gdata); - continue; - } else { - /* ldisc installed now */ - pr_info("line discipline installed"); - err = download_firmware(kim_gdata); - if (err != 0) { - /* - * ldisc installed but fw download failed, - * flush uart & power cycle BT_EN - */ - pr_err("download firmware failed"); - err = st_kim_stop(kim_gdata); - continue; - } else { /* on success don't retry */ - break; - } - } - } while (retry--); - return err; -} - -/* - * st_kim_stop - stop communication with chip. - * This can be called from ST Core/KIM, on the- - * (a) last un-register when chip need not be powered there-after, - * (b) upon failure to either install ldisc or download firmware. - * The function is responsible to (a) notify UIM about un-installation, - * (b) flush UART if the ldisc was installed. - * (c) reset BT_EN - pull down nshutdown at the end. - * (d) invoke platform's chip disabling routine. - */ -long st_kim_stop(void *kim_data) -{ - long err = 0; - struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; - struct ti_st_plat_data *pdata = - kim_gdata->kim_pdev->dev.platform_data; - struct tty_struct *tty = kim_gdata->core_data->tty; - - reinit_completion(&kim_gdata->ldisc_installed); - - if (tty) { /* can be called before ldisc is installed */ - /* Flush any pending characters in the driver and discipline. */ - tty_ldisc_flush(tty); - tty_driver_flush_buffer(tty); - } - - /* send uninstall notification to UIM */ - pr_info("ldisc_install = 0"); - kim_gdata->ldisc_install = 0; - sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, NULL, "install"); - - /* wait for ldisc to be un-installed */ - err = wait_for_completion_interruptible_timeout( - &kim_gdata->ldisc_installed, msecs_to_jiffies(LDISC_TIME)); - if (!err) { /* timeout */ - pr_err(" timed out waiting for ldisc to be un-installed"); - err = -ETIMEDOUT; - } - - /* By default configure BT nShutdown to LOW state */ - gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW); - mdelay(1); - gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_HIGH); - mdelay(1); - gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW); - - /* platform specific disable */ - if (pdata->chip_disable) - pdata->chip_disable(kim_gdata); - return err; -} - -/**********************************************************************/ -/* functions called from subsystems */ -/* called when debugfs entry is read from */ - -static int version_show(struct seq_file *s, void *unused) -{ - struct kim_data_s *kim_gdata = (struct kim_data_s *)s->private; - seq_printf(s, "%04X %d.%d.%d\n", kim_gdata->version.full, - kim_gdata->version.chip, kim_gdata->version.maj_ver, - kim_gdata->version.min_ver); - return 0; -} - -static int list_show(struct seq_file *s, void *unused) -{ - struct kim_data_s *kim_gdata = (struct kim_data_s *)s->private; - kim_st_list_protocols(kim_gdata->core_data, s); - return 0; -} - -static ssize_t show_install(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct kim_data_s *kim_data = dev_get_drvdata(dev); - return sprintf(buf, "%d\n", kim_data->ldisc_install); -} - -#ifdef DEBUG -static ssize_t store_dev_name(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct kim_data_s *kim_data = dev_get_drvdata(dev); - pr_debug("storing dev name >%s<", buf); - strncpy(kim_data->dev_name, buf, count); - pr_debug("stored dev name >%s<", kim_data->dev_name); - return count; -} - -static ssize_t store_baud_rate(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct kim_data_s *kim_data = dev_get_drvdata(dev); - pr_debug("storing baud rate >%s<", buf); - sscanf(buf, "%ld", &kim_data->baud_rate); - pr_debug("stored baud rate >%ld<", kim_data->baud_rate); - return count; -} -#endif /* if DEBUG */ - -static ssize_t show_dev_name(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct kim_data_s *kim_data = dev_get_drvdata(dev); - return sprintf(buf, "%s\n", kim_data->dev_name); -} - -static ssize_t show_baud_rate(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct kim_data_s *kim_data = dev_get_drvdata(dev); - return sprintf(buf, "%d\n", kim_data->baud_rate); -} - -static ssize_t show_flow_cntrl(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct kim_data_s *kim_data = dev_get_drvdata(dev); - return sprintf(buf, "%d\n", kim_data->flow_cntrl); -} - -/* structures specific for sysfs entries */ -static struct kobj_attribute ldisc_install = -__ATTR(install, 0444, (void *)show_install, NULL); - -static struct kobj_attribute uart_dev_name = -#ifdef DEBUG /* TODO: move this to debug-fs if possible */ -__ATTR(dev_name, 0644, (void *)show_dev_name, (void *)store_dev_name); -#else -__ATTR(dev_name, 0444, (void *)show_dev_name, NULL); -#endif - -static struct kobj_attribute uart_baud_rate = -#ifdef DEBUG /* TODO: move to debugfs */ -__ATTR(baud_rate, 0644, (void *)show_baud_rate, (void *)store_baud_rate); -#else -__ATTR(baud_rate, 0444, (void *)show_baud_rate, NULL); -#endif - -static struct kobj_attribute uart_flow_cntrl = -__ATTR(flow_cntrl, 0444, (void *)show_flow_cntrl, NULL); - -static struct attribute *uim_attrs[] = { - &ldisc_install.attr, - &uart_dev_name.attr, - &uart_baud_rate.attr, - &uart_flow_cntrl.attr, - NULL, -}; - -static const struct attribute_group uim_attr_grp = { - .attrs = uim_attrs, -}; - -/* - * st_kim_ref - reference the core's data - * This references the per-ST platform device in the arch/xx/ - * board-xx.c file. - * This would enable multiple such platform devices to exist - * on a given platform - */ -void st_kim_ref(struct st_data_s **core_data, int id) -{ - struct platform_device *pdev; - struct kim_data_s *kim_gdata; - /* get kim_gdata reference from platform device */ - pdev = st_get_plat_device(id); - if (!pdev) - goto err; - kim_gdata = platform_get_drvdata(pdev); - if (!kim_gdata) - goto err; - - *core_data = kim_gdata->core_data; - return; -err: - *core_data = NULL; -} - -DEFINE_SHOW_ATTRIBUTE(version); -DEFINE_SHOW_ATTRIBUTE(list); - -/**********************************************************************/ -/* functions called from platform device driver subsystem - * need to have a relevant platform device entry in the platform's - * board-*.c file - */ - -static struct dentry *kim_debugfs_dir; -static int kim_probe(struct platform_device *pdev) -{ - struct kim_data_s *kim_gdata; - struct ti_st_plat_data *pdata = pdev->dev.platform_data; - int err; - - if ((pdev->id != -1) && (pdev->id < MAX_ST_DEVICES)) { - /* multiple devices could exist */ - st_kim_devices[pdev->id] = pdev; - } else { - /* platform's sure about existence of 1 device */ - st_kim_devices[0] = pdev; - } - - kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_KERNEL); - if (!kim_gdata) { - pr_err("no mem to allocate"); - return -ENOMEM; - } - platform_set_drvdata(pdev, kim_gdata); - - err = st_core_init(&kim_gdata->core_data); - if (err != 0) { - pr_err(" ST core init failed"); - err = -EIO; - goto err_core_init; - } - /* refer to itself */ - kim_gdata->core_data->kim_data = kim_gdata; - - /* Claim the chip enable nShutdown gpio from the system */ - kim_gdata->nshutdown = pdata->nshutdown_gpio; - err = gpio_request(kim_gdata->nshutdown, "kim"); - if (unlikely(err)) { - pr_err(" gpio %d request failed ", kim_gdata->nshutdown); - goto err_sysfs_group; - } - - /* Configure nShutdown GPIO as output=0 */ - err = gpio_direction_output(kim_gdata->nshutdown, 0); - if (unlikely(err)) { - pr_err(" unable to configure gpio %d", kim_gdata->nshutdown); - goto err_sysfs_group; - } - /* get reference of pdev for request_firmware */ - kim_gdata->kim_pdev = pdev; - init_completion(&kim_gdata->kim_rcvd); - init_completion(&kim_gdata->ldisc_installed); - - err = sysfs_create_group(&pdev->dev.kobj, &uim_attr_grp); - if (err) { - pr_err("failed to create sysfs entries"); - goto err_sysfs_group; - } - - /* copying platform data */ - strncpy(kim_gdata->dev_name, pdata->dev_name, UART_DEV_NAME_LEN); - kim_gdata->flow_cntrl = pdata->flow_cntrl; - kim_gdata->baud_rate = pdata->baud_rate; - pr_info("sysfs entries created\n"); - - kim_debugfs_dir = debugfs_create_dir("ti-st", NULL); - - debugfs_create_file("version", S_IRUGO, kim_debugfs_dir, - kim_gdata, &version_fops); - debugfs_create_file("protocols", S_IRUGO, kim_debugfs_dir, - kim_gdata, &list_fops); - return 0; - -err_sysfs_group: - st_core_exit(kim_gdata->core_data); - -err_core_init: - kfree(kim_gdata); - - return err; -} - -static int kim_remove(struct platform_device *pdev) -{ - /* free the GPIOs requested */ - struct ti_st_plat_data *pdata = pdev->dev.platform_data; - struct kim_data_s *kim_gdata; - - kim_gdata = platform_get_drvdata(pdev); - - /* - * Free the Bluetooth/FM/GPIO - * nShutdown gpio from the system - */ - gpio_free(pdata->nshutdown_gpio); - pr_info("nshutdown GPIO Freed"); - - debugfs_remove_recursive(kim_debugfs_dir); - sysfs_remove_group(&pdev->dev.kobj, &uim_attr_grp); - pr_info("sysfs entries removed"); - - kim_gdata->kim_pdev = NULL; - st_core_exit(kim_gdata->core_data); - - kfree(kim_gdata); - kim_gdata = NULL; - return 0; -} - -static int kim_suspend(struct platform_device *pdev, pm_message_t state) -{ - struct ti_st_plat_data *pdata = pdev->dev.platform_data; - - if (pdata->suspend) - return pdata->suspend(pdev, state); - - return 0; -} - -static int kim_resume(struct platform_device *pdev) -{ - struct ti_st_plat_data *pdata = pdev->dev.platform_data; - - if (pdata->resume) - return pdata->resume(pdev); - - return 0; -} - -/**********************************************************************/ -/* entry point for ST KIM module, called in from ST Core */ -static struct platform_driver kim_platform_driver = { - .probe = kim_probe, - .remove = kim_remove, - .suspend = kim_suspend, - .resume = kim_resume, - .driver = { - .name = "kim", - }, -}; - -module_platform_driver(kim_platform_driver); - -MODULE_AUTHOR("Pavan Savoy <pavan_savoy@ti.com>"); -MODULE_DESCRIPTION("Shared Transport Driver for TI BT/FM/GPS combo chips "); -MODULE_LICENSE("GPL"); diff --git a/drivers/misc/ti-st/st_ll.c b/drivers/misc/ti-st/st_ll.c deleted file mode 100644 index 07406140d277..000000000000 --- a/drivers/misc/ti-st/st_ll.c +++ /dev/null @@ -1,156 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Shared Transport driver - * HCI-LL module responsible for TI proprietary HCI_LL protocol - * Copyright (C) 2009-2010 Texas Instruments - * Author: Pavan Savoy <pavan_savoy@ti.com> - */ - -#define pr_fmt(fmt) "(stll) :" fmt -#include <linux/skbuff.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/ti_wilink_st.h> - -/**********************************************************************/ -/* internal functions */ -static void send_ll_cmd(struct st_data_s *st_data, - unsigned char cmd) -{ - - pr_debug("%s: writing %x", __func__, cmd); - st_int_write(st_data, &cmd, 1); - return; -} - -static void ll_device_want_to_sleep(struct st_data_s *st_data) -{ - struct kim_data_s *kim_data; - struct ti_st_plat_data *pdata; - - pr_debug("%s", __func__); - /* sanity check */ - if (st_data->ll_state != ST_LL_AWAKE) - pr_err("ERR hcill: ST_LL_GO_TO_SLEEP_IND" - "in state %ld", st_data->ll_state); - - send_ll_cmd(st_data, LL_SLEEP_ACK); - /* update state */ - st_data->ll_state = ST_LL_ASLEEP; - - /* communicate to platform about chip asleep */ - kim_data = st_data->kim_data; - pdata = kim_data->kim_pdev->dev.platform_data; - if (pdata->chip_asleep) - pdata->chip_asleep(NULL); -} - -static void ll_device_want_to_wakeup(struct st_data_s *st_data) -{ - struct kim_data_s *kim_data; - struct ti_st_plat_data *pdata; - - /* diff actions in diff states */ - switch (st_data->ll_state) { - case ST_LL_ASLEEP: - send_ll_cmd(st_data, LL_WAKE_UP_ACK); /* send wake_ack */ - break; - case ST_LL_ASLEEP_TO_AWAKE: - /* duplicate wake_ind */ - pr_err("duplicate wake_ind while waiting for Wake ack"); - break; - case ST_LL_AWAKE: - /* duplicate wake_ind */ - pr_err("duplicate wake_ind already AWAKE"); - break; - case ST_LL_AWAKE_TO_ASLEEP: - /* duplicate wake_ind */ - pr_err("duplicate wake_ind"); - break; - } - /* update state */ - st_data->ll_state = ST_LL_AWAKE; - - /* communicate to platform about chip wakeup */ - kim_data = st_data->kim_data; - pdata = kim_data->kim_pdev->dev.platform_data; - if (pdata->chip_awake) - pdata->chip_awake(NULL); -} - -/**********************************************************************/ -/* functions invoked by ST Core */ - -/* called when ST Core wants to - * enable ST LL */ -void st_ll_enable(struct st_data_s *ll) -{ - ll->ll_state = ST_LL_AWAKE; -} - -/* called when ST Core /local module wants to - * disable ST LL */ -void st_ll_disable(struct st_data_s *ll) -{ - ll->ll_state = ST_LL_INVALID; -} - -/* called when ST Core wants to update the state */ -void st_ll_wakeup(struct st_data_s *ll) -{ - if (likely(ll->ll_state != ST_LL_AWAKE)) { - send_ll_cmd(ll, LL_WAKE_UP_IND); /* WAKE_IND */ - ll->ll_state = ST_LL_ASLEEP_TO_AWAKE; - } else { - /* don't send the duplicate wake_indication */ - pr_err(" Chip already AWAKE "); - } -} - -/* called when ST Core wants the state */ -unsigned long st_ll_getstate(struct st_data_s *ll) -{ - pr_debug(" returning state %ld", ll->ll_state); - return ll->ll_state; -} - -/* called from ST Core, when a PM related packet arrives */ -unsigned long st_ll_sleep_state(struct st_data_s *st_data, - unsigned char cmd) -{ - switch (cmd) { - case LL_SLEEP_IND: /* sleep ind */ - pr_debug("sleep indication recvd"); - ll_device_want_to_sleep(st_data); - break; - case LL_SLEEP_ACK: /* sleep ack */ - pr_err("sleep ack rcvd: host shouldn't"); - break; - case LL_WAKE_UP_IND: /* wake ind */ - pr_debug("wake indication recvd"); - ll_device_want_to_wakeup(st_data); - break; - case LL_WAKE_UP_ACK: /* wake ack */ - pr_debug("wake ack rcvd"); - st_data->ll_state = ST_LL_AWAKE; - break; - default: - pr_err(" unknown input/state "); - return -EINVAL; - } - return 0; -} - -/* Called from ST CORE to initialize ST LL */ -long st_ll_init(struct st_data_s *ll) -{ - /* set state to invalid */ - ll->ll_state = ST_LL_INVALID; - return 0; -} - -/* Called from ST CORE to de-initialize ST LL */ -long st_ll_deinit(struct st_data_s *ll) -{ - return 0; -} diff --git a/drivers/misc/ti_fpc202.c b/drivers/misc/ti_fpc202.c new file mode 100644 index 000000000000..7964e46c7448 --- /dev/null +++ b/drivers/misc/ti_fpc202.c @@ -0,0 +1,435 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ti_fpc202.c - FPC202 Dual Port Controller driver + * + * Copyright (C) 2024 Bootlin + * + */ + +#include <linux/cleanup.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/i2c-atr.h> +#include <linux/gpio/consumer.h> +#include <linux/gpio/driver.h> +#include <linux/module.h> + +#define FPC202_NUM_PORTS 2 +#define FPC202_ALIASES_PER_PORT 2 + +/* + * GPIO: port mapping + * + * 0: P0_S0_IN_A + * 1: P0_S1_IN_A + * 2: P1_S0_IN_A + * 3: P1_S1_IN_A + * 4: P0_S0_IN_B + * ... + * 8: P0_S0_IN_C + * ... + * 12: P0_S0_OUT_A + * ... + * 16: P0_S0_OUT_B + * ... + * 19: P1_S1_OUT_B + * + */ + +#define FPC202_GPIO_COUNT 20 +#define FPC202_GPIO_P0_S0_IN_B 4 +#define FPC202_GPIO_P0_S0_OUT_A 12 + +#define FPC202_REG_IN_A_INT 0x6 +#define FPC202_REG_IN_C_IN_B 0x7 +#define FPC202_REG_OUT_A_OUT_B 0x8 + +#define FPC202_REG_OUT_A_OUT_B_VAL 0xa + +#define FPC202_REG_MOD_DEV(port, dev) (0xb4 + ((port) * 4) + (dev)) +#define FPC202_REG_AUX_DEV(port, dev) (0xb6 + ((port) * 4) + (dev)) + +/* + * The FPC202 doesn't support turning off address translation on a single port. + * So just set an invalid I2C address as the translation target when no client + * address is attached. + */ +#define FPC202_REG_DEV_INVALID 0 + +/* Even aliases are assigned to device 0 and odd aliases to device 1 */ +#define fpc202_dev_num_from_alias(alias) ((alias) % 2) + +struct fpc202_priv { + struct i2c_client *client; + struct i2c_atr *atr; + struct gpio_desc *en_gpio; + struct gpio_chip gpio; + + /* Lock REG_MOD/AUX_DEV and addr_caches during attach/detach */ + struct mutex reg_dev_lock; + + /* Cached device addresses for both ports and their devices */ + u8 addr_caches[2][2]; + + /* Keep track of which ports were probed */ + DECLARE_BITMAP(probed_ports, FPC202_NUM_PORTS); +}; + +static void fpc202_fill_alias_table(struct i2c_client *client, u16 *aliases, int port_id) +{ + u16 first_alias; + int i; + + /* + * There is a predefined list of aliases for each FPC202 I2C + * self-address. This allows daisy-chained FPC202 units to + * automatically take on different sets of aliases. + * Each port of an FPC202 unit is assigned two aliases from this list. + */ + first_alias = 0x10 + 4 * port_id + 8 * ((u16)client->addr - 2); + + for (i = 0; i < FPC202_ALIASES_PER_PORT; i++) + aliases[i] = first_alias + i; +} + +static int fpc202_gpio_get_dir(int offset) +{ + return offset < FPC202_GPIO_P0_S0_OUT_A ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT; +} + +static int fpc202_read(struct fpc202_priv *priv, u8 reg) +{ + int val; + + val = i2c_smbus_read_byte_data(priv->client, reg); + return val; +} + +static int fpc202_write(struct fpc202_priv *priv, u8 reg, u8 value) +{ + return i2c_smbus_write_byte_data(priv->client, reg, value); +} + +static void fpc202_set_enable(struct fpc202_priv *priv, int enable) +{ + if (!priv->en_gpio) + return; + + gpiod_set_value(priv->en_gpio, enable); +} + +static int fpc202_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) +{ + struct fpc202_priv *priv = gpiochip_get_data(chip); + int ret; + u8 val; + + ret = fpc202_read(priv, FPC202_REG_OUT_A_OUT_B_VAL); + if (ret < 0) { + dev_err(&priv->client->dev, "Failed to set GPIO %d value! err %d\n", offset, ret); + return ret; + } + + val = (u8)ret; + + if (value) + val |= BIT(offset - FPC202_GPIO_P0_S0_OUT_A); + else + val &= ~BIT(offset - FPC202_GPIO_P0_S0_OUT_A); + + return fpc202_write(priv, FPC202_REG_OUT_A_OUT_B_VAL, val); +} + +static int fpc202_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct fpc202_priv *priv = gpiochip_get_data(chip); + u8 reg, bit; + int ret; + + if (offset < FPC202_GPIO_P0_S0_IN_B) { + reg = FPC202_REG_IN_A_INT; + bit = BIT(4 + offset); + } else if (offset < FPC202_GPIO_P0_S0_OUT_A) { + reg = FPC202_REG_IN_C_IN_B; + bit = BIT(offset - FPC202_GPIO_P0_S0_IN_B); + } else { + reg = FPC202_REG_OUT_A_OUT_B_VAL; + bit = BIT(offset - FPC202_GPIO_P0_S0_OUT_A); + } + + ret = fpc202_read(priv, reg); + if (ret < 0) + return ret; + + return !!(((u8)ret) & bit); +} + +static int fpc202_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) +{ + if (fpc202_gpio_get_dir(offset) == GPIO_LINE_DIRECTION_OUT) + return -EINVAL; + + return 0; +} + +static int fpc202_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, + int value) +{ + struct fpc202_priv *priv = gpiochip_get_data(chip); + int ret; + u8 val; + + if (fpc202_gpio_get_dir(offset) == GPIO_LINE_DIRECTION_IN) + return -EINVAL; + + fpc202_gpio_set(chip, offset, value); + + ret = fpc202_read(priv, FPC202_REG_OUT_A_OUT_B); + if (ret < 0) + return ret; + + val = (u8)ret | BIT(offset - FPC202_GPIO_P0_S0_OUT_A); + + return fpc202_write(priv, FPC202_REG_OUT_A_OUT_B, val); +} + +/* + * Set the translation table entry associated with a port and device number. + * + * Each downstream port of the FPC202 has two fixed aliases corresponding to + * device numbers 0 and 1. If one of these aliases is found in an incoming I2C + * transfer, it will be translated to the address given by the corresponding + * translation table entry. + */ +static int fpc202_write_dev_addr(struct fpc202_priv *priv, u32 port_id, int dev_num, u16 addr) +{ + int ret, reg_mod, reg_aux; + u8 val; + + guard(mutex)(&priv->reg_dev_lock); + + reg_mod = FPC202_REG_MOD_DEV(port_id, dev_num); + reg_aux = FPC202_REG_AUX_DEV(port_id, dev_num); + val = addr & 0x7f; + + ret = fpc202_write(priv, reg_mod, val); + if (ret) + return ret; + + /* + * The FPC202 datasheet is unclear about the role of the AUX registers. + * Empirically, writing to them as well seems to be necessary for + * address translation to function properly. + */ + ret = fpc202_write(priv, reg_aux, val); + + priv->addr_caches[port_id][dev_num] = val; + + return ret; +} + +static int fpc202_attach_addr(struct i2c_atr *atr, u32 chan_id, + u16 addr, u16 alias) +{ + struct fpc202_priv *priv = i2c_atr_get_driver_data(atr); + + dev_dbg(&priv->client->dev, "attaching address 0x%02x to alias 0x%02x\n", addr, alias); + + return fpc202_write_dev_addr(priv, chan_id, fpc202_dev_num_from_alias(alias), addr); +} + +static void fpc202_detach_addr(struct i2c_atr *atr, u32 chan_id, + u16 addr) +{ + struct fpc202_priv *priv = i2c_atr_get_driver_data(atr); + int dev_num, reg_mod, val; + + for (dev_num = 0; dev_num < 2; dev_num++) { + reg_mod = FPC202_REG_MOD_DEV(chan_id, dev_num); + + mutex_lock(&priv->reg_dev_lock); + + val = priv->addr_caches[chan_id][dev_num]; + + mutex_unlock(&priv->reg_dev_lock); + + if (val < 0) { + dev_err(&priv->client->dev, "failed to read register 0x%x while detaching address 0x%02x\n", + reg_mod, addr); + return; + } + + if (val == (addr & 0x7f)) { + fpc202_write_dev_addr(priv, chan_id, dev_num, FPC202_REG_DEV_INVALID); + return; + } + } +} + +static const struct i2c_atr_ops fpc202_atr_ops = { + .attach_addr = fpc202_attach_addr, + .detach_addr = fpc202_detach_addr, +}; + +static int fpc202_probe_port(struct fpc202_priv *priv, struct device_node *i2c_handle, int port_id) +{ + u16 aliases[FPC202_ALIASES_PER_PORT] = { }; + struct device *dev = &priv->client->dev; + struct i2c_atr_adap_desc desc = { }; + int ret = 0; + + desc.chan_id = port_id; + desc.parent = dev; + desc.bus_handle = of_fwnode_handle(i2c_handle); + desc.num_aliases = FPC202_ALIASES_PER_PORT; + + fpc202_fill_alias_table(priv->client, aliases, port_id); + desc.aliases = aliases; + + ret = i2c_atr_add_adapter(priv->atr, &desc); + if (ret) + return ret; + + set_bit(port_id, priv->probed_ports); + + ret = fpc202_write_dev_addr(priv, port_id, 0, FPC202_REG_DEV_INVALID); + if (ret) + return ret; + + return fpc202_write_dev_addr(priv, port_id, 1, FPC202_REG_DEV_INVALID); +} + +static void fpc202_remove_port(struct fpc202_priv *priv, int port_id) +{ + i2c_atr_del_adapter(priv->atr, port_id); + clear_bit(port_id, priv->probed_ports); +} + +static int fpc202_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct device_node *i2c_handle; + struct fpc202_priv *priv; + int ret, port_id; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + mutex_init(&priv->reg_dev_lock); + + priv->client = client; + i2c_set_clientdata(client, priv); + + priv->en_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_HIGH); + if (IS_ERR(priv->en_gpio)) { + ret = PTR_ERR(priv->en_gpio); + dev_err(dev, "failed to fetch enable GPIO! err %d\n", ret); + goto destroy_mutex; + } + + priv->gpio.label = "gpio-fpc202"; + priv->gpio.base = -1; + priv->gpio.direction_input = fpc202_gpio_direction_input; + priv->gpio.direction_output = fpc202_gpio_direction_output; + priv->gpio.set = fpc202_gpio_set; + priv->gpio.get = fpc202_gpio_get; + priv->gpio.ngpio = FPC202_GPIO_COUNT; + priv->gpio.parent = dev; + priv->gpio.owner = THIS_MODULE; + + ret = gpiochip_add_data(&priv->gpio, priv); + if (ret) { + priv->gpio.parent = NULL; + dev_err(dev, "failed to add gpiochip err %d\n", ret); + goto disable_gpio; + } + + priv->atr = i2c_atr_new(client->adapter, dev, &fpc202_atr_ops, 2, 0); + if (IS_ERR(priv->atr)) { + ret = PTR_ERR(priv->atr); + dev_err(dev, "failed to create i2c atr err %d\n", ret); + goto disable_gpio; + } + + i2c_atr_set_driver_data(priv->atr, priv); + + bitmap_zero(priv->probed_ports, FPC202_NUM_PORTS); + + for_each_child_of_node(dev->of_node, i2c_handle) { + ret = of_property_read_u32(i2c_handle, "reg", &port_id); + if (ret) { + if (ret == -EINVAL) + continue; + + dev_err(dev, "failed to read 'reg' property of child node, err %d\n", ret); + goto unregister_chans; + } + + if (port_id > FPC202_NUM_PORTS) { + dev_err(dev, "port ID %d is out of range!\n", port_id); + ret = -EINVAL; + goto unregister_chans; + } + + ret = fpc202_probe_port(priv, i2c_handle, port_id); + if (ret) { + dev_err(dev, "Failed to probe port %d, err %d\n", port_id, ret); + goto unregister_chans; + } + } + + goto out; + +unregister_chans: + for_each_set_bit(port_id, priv->probed_ports, FPC202_NUM_PORTS) + fpc202_remove_port(priv, port_id); + + i2c_atr_delete(priv->atr); +disable_gpio: + fpc202_set_enable(priv, 0); + gpiochip_remove(&priv->gpio); +destroy_mutex: + mutex_destroy(&priv->reg_dev_lock); +out: + return ret; +} + +static void fpc202_remove(struct i2c_client *client) +{ + struct fpc202_priv *priv = i2c_get_clientdata(client); + int port_id; + + for_each_set_bit(port_id, priv->probed_ports, FPC202_NUM_PORTS) + fpc202_remove_port(priv, port_id); + + mutex_destroy(&priv->reg_dev_lock); + + i2c_atr_delete(priv->atr); + + fpc202_set_enable(priv, 0); + gpiochip_remove(&priv->gpio); +} + +static const struct of_device_id fpc202_of_match[] = { + { .compatible = "ti,fpc202" }, + {} +}; +MODULE_DEVICE_TABLE(of, fpc202_of_match); + +static struct i2c_driver fpc202_driver = { + .driver = { + .name = "fpc202", + .of_match_table = fpc202_of_match, + }, + .probe = fpc202_probe, + .remove = fpc202_remove, +}; + +module_i2c_driver(fpc202_driver); + +MODULE_AUTHOR("Romain Gantois <romain.gantois@bootlin.com>"); +MODULE_DESCRIPTION("TI FPC202 Dual Port Controller driver"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS("I2C_ATR"); diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c index 7dd86a9858ab..1d54680d6ed2 100644 --- a/drivers/misc/tifm_7xx1.c +++ b/drivers/misc/tifm_7xx1.c @@ -229,7 +229,7 @@ static int __maybe_unused tifm_7xx1_resume(struct device *dev_d) struct pci_dev *dev = to_pci_dev(dev_d); struct tifm_adapter *fm = pci_get_drvdata(dev); int rc; - unsigned long timeout; + unsigned long time_left; unsigned int good_sockets = 0, bad_sockets = 0; unsigned long flags; /* Maximum number of entries is 4 */ @@ -265,8 +265,8 @@ static int __maybe_unused tifm_7xx1_resume(struct device *dev_d) if (good_sockets) { fm->finish_me = &finish_resume; spin_unlock_irqrestore(&fm->lock, flags); - timeout = wait_for_completion_timeout(&finish_resume, HZ); - dev_dbg(&dev->dev, "wait returned %lu\n", timeout); + time_left = wait_for_completion_timeout(&finish_resume, HZ); + dev_dbg(&dev->dev, "wait returned %lu\n", time_left); writel(TIFM_IRQ_FIFOMASK(good_sockets) | TIFM_IRQ_CARDMASK(good_sockets), fm->addr + FM_CLEAR_INTERRUPT_ENABLE); diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c index eee9b6581604..12355d34e193 100644 --- a/drivers/misc/tifm_core.c +++ b/drivers/misc/tifm_core.c @@ -38,11 +38,11 @@ static int tifm_dev_match(struct tifm_dev *sock, struct tifm_device_id *id) return 0; } -static int tifm_bus_match(struct device *dev, struct device_driver *drv) +static int tifm_bus_match(struct device *dev, const struct device_driver *drv) { struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); - struct tifm_driver *fm_drv = container_of(drv, struct tifm_driver, - driver); + const struct tifm_driver *fm_drv = container_of_const(drv, struct tifm_driver, + driver); struct tifm_device_id *ids = fm_drv->id_table; if (ids) { @@ -148,7 +148,7 @@ static struct attribute *tifm_dev_attrs[] = { }; ATTRIBUTE_GROUPS(tifm_dev); -static struct bus_type tifm_bus_type = { +static const struct bus_type tifm_bus_type = { .name = "tifm", .dev_groups = tifm_dev_groups, .match = tifm_bus_match, @@ -166,7 +166,7 @@ static void tifm_free(struct device *dev) kfree(fm); } -static struct class tifm_adapter_class = { +static const struct class tifm_adapter_class = { .name = "tifm_adapter", .dev_release = tifm_free }; diff --git a/drivers/misc/tps6594-esm.c b/drivers/misc/tps6594-esm.c index 05e2c151e632..2fbd3fbdf713 100644 --- a/drivers/misc/tps6594-esm.c +++ b/drivers/misc/tps6594-esm.c @@ -56,8 +56,7 @@ static int tps6594_esm_probe(struct platform_device *pdev) for (i = 0; i < pdev->num_resources; i++) { irq = platform_get_irq_byname(pdev, pdev->resource[i].name); if (irq < 0) - return dev_err_probe(dev, irq, "Failed to get %s irq\n", - pdev->resource[i].name); + return irq; ret = devm_request_threaded_irq(dev, irq, NULL, tps6594_esm_isr, IRQF_ONESHOT, @@ -82,7 +81,7 @@ static int tps6594_esm_probe(struct platform_device *pdev) return 0; } -static int tps6594_esm_remove(struct platform_device *pdev) +static void tps6594_esm_remove(struct platform_device *pdev) { struct tps6594 *tps = dev_get_drvdata(pdev->dev.parent); struct device *dev = &pdev->dev; @@ -103,8 +102,6 @@ static int tps6594_esm_remove(struct platform_device *pdev) out: pm_runtime_put_sync(dev); pm_runtime_disable(dev); - - return ret; } static int tps6594_esm_suspend(struct device *dev) diff --git a/drivers/misc/tps6594-pfsm.c b/drivers/misc/tps6594-pfsm.c index 5223d1580807..44fa81d6cec2 100644 --- a/drivers/misc/tps6594-pfsm.c +++ b/drivers/misc/tps6594-pfsm.c @@ -1,6 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 /* - * PFSM (Pre-configurable Finite State Machine) driver for TI TPS6594/TPS6593/LP8764 PMICs + * PFSM (Pre-configurable Finite State Machine) driver for the following + * PMICs: + * - LP8764 + * - TPS65224 + * - TPS652G1 + * - TPS6594 + * - TPS6593 * * Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/ */ @@ -39,10 +45,12 @@ * * @miscdev: misc device infos * @regmap: regmap for accessing the device registers + * @chip_id: chip identifier of the device */ struct tps6594_pfsm { struct miscdevice miscdev; struct regmap *regmap; + unsigned long chip_id; }; static ssize_t tps6594_pfsm_read(struct file *f, char __user *buf, @@ -133,21 +141,29 @@ static long tps6594_pfsm_ioctl(struct file *f, unsigned int cmd, unsigned long a struct tps6594_pfsm *pfsm = TPS6594_FILE_TO_PFSM(f); struct pmic_state_opt state_opt; void __user *argp = (void __user *)arg; + unsigned int regmap_reg, mask; int ret = -ENOIOCTLCMD; switch (cmd) { case PMIC_GOTO_STANDBY: - /* Disable LP mode */ - ret = regmap_clear_bits(pfsm->regmap, TPS6594_REG_RTC_CTRL_2, - TPS6594_BIT_LP_STANDBY_SEL); - if (ret) - return ret; + /* Disable LP mode on TPS6594 Family PMIC */ + if (pfsm->chip_id != TPS65224 && pfsm->chip_id != TPS652G1) { + ret = regmap_clear_bits(pfsm->regmap, TPS6594_REG_RTC_CTRL_2, + TPS6594_BIT_LP_STANDBY_SEL); + + if (ret) + return ret; + } /* Force trigger */ ret = regmap_write_bits(pfsm->regmap, TPS6594_REG_FSM_I2C_TRIGGERS, TPS6594_BIT_TRIGGER_I2C(0), TPS6594_BIT_TRIGGER_I2C(0)); break; case PMIC_GOTO_LP_STANDBY: + /* TPS65224/TPS652G1 does not support LP STANDBY */ + if (pfsm->chip_id == TPS65224 || pfsm->chip_id == TPS652G1) + return ret; + /* Enable LP mode */ ret = regmap_set_bits(pfsm->regmap, TPS6594_REG_RTC_CTRL_2, TPS6594_BIT_LP_STANDBY_SEL); @@ -169,6 +185,10 @@ static long tps6594_pfsm_ioctl(struct file *f, unsigned int cmd, unsigned long a TPS6594_BIT_NSLEEP1B | TPS6594_BIT_NSLEEP2B); break; case PMIC_SET_MCU_ONLY_STATE: + /* TPS65224/TPS652G1 does not support MCU_ONLY_STATE */ + if (pfsm->chip_id == TPS65224 || pfsm->chip_id == TPS652G1) + return ret; + if (copy_from_user(&state_opt, argp, sizeof(state_opt))) return -EFAULT; @@ -192,14 +212,20 @@ static long tps6594_pfsm_ioctl(struct file *f, unsigned int cmd, unsigned long a return -EFAULT; /* Configure wake-up destination */ + if (pfsm->chip_id == TPS65224 || pfsm->chip_id == TPS652G1) { + regmap_reg = TPS65224_REG_STARTUP_CTRL; + mask = TPS65224_MASK_STARTUP_DEST; + } else { + regmap_reg = TPS6594_REG_RTC_CTRL_2; + mask = TPS6594_MASK_STARTUP_DEST; + } + if (state_opt.mcu_only_startup_dest) - ret = regmap_write_bits(pfsm->regmap, TPS6594_REG_RTC_CTRL_2, - TPS6594_MASK_STARTUP_DEST, - TPS6594_STARTUP_DEST_MCU_ONLY); + ret = regmap_write_bits(pfsm->regmap, regmap_reg, + mask, TPS6594_STARTUP_DEST_MCU_ONLY); else - ret = regmap_write_bits(pfsm->regmap, TPS6594_REG_RTC_CTRL_2, - TPS6594_MASK_STARTUP_DEST, - TPS6594_STARTUP_DEST_ACTIVE); + ret = regmap_write_bits(pfsm->regmap, regmap_reg, + mask, TPS6594_STARTUP_DEST_ACTIVE); if (ret) return ret; @@ -210,8 +236,14 @@ static long tps6594_pfsm_ioctl(struct file *f, unsigned int cmd, unsigned long a return ret; /* Modify NSLEEP1-2 bits */ - ret = regmap_clear_bits(pfsm->regmap, TPS6594_REG_FSM_NSLEEP_TRIGGERS, - TPS6594_BIT_NSLEEP2B); + if (pfsm->chip_id == TPS65224 || pfsm->chip_id == TPS652G1) + ret = regmap_clear_bits(pfsm->regmap, + TPS6594_REG_FSM_NSLEEP_TRIGGERS, + TPS6594_BIT_NSLEEP1B); + else + ret = regmap_clear_bits(pfsm->regmap, + TPS6594_REG_FSM_NSLEEP_TRIGGERS, + TPS6594_BIT_NSLEEP2B); break; } @@ -260,14 +292,17 @@ static int tps6594_pfsm_probe(struct platform_device *pdev) pfsm->miscdev.minor = MISC_DYNAMIC_MINOR; pfsm->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "pfsm-%ld-0x%02x", tps->chip_id, tps->reg); + if (!pfsm->miscdev.name) + return -ENOMEM; + pfsm->miscdev.fops = &tps6594_pfsm_fops; pfsm->miscdev.parent = dev->parent; + pfsm->chip_id = tps->chip_id; for (i = 0 ; i < pdev->num_resources ; i++) { irq = platform_get_irq_byname(pdev, pdev->resource[i].name); if (irq < 0) - return dev_err_probe(dev, irq, "Failed to get %s irq\n", - pdev->resource[i].name); + return irq; ret = devm_request_threaded_irq(dev, irq, NULL, tps6594_pfsm_isr, IRQF_ONESHOT, @@ -281,13 +316,11 @@ static int tps6594_pfsm_probe(struct platform_device *pdev) return misc_register(&pfsm->miscdev); } -static int tps6594_pfsm_remove(struct platform_device *pdev) +static void tps6594_pfsm_remove(struct platform_device *pdev) { struct tps6594_pfsm *pfsm = platform_get_drvdata(pdev); misc_deregister(&pfsm->miscdev); - - return 0; } static struct platform_driver tps6594_pfsm_driver = { diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c index a3bc2823143e..1a7796ab3fad 100644 --- a/drivers/misc/tsl2550.c +++ b/drivers/misc/tsl2550.c @@ -185,10 +185,10 @@ static ssize_t tsl2550_store_power_state(struct device *dev, { struct i2c_client *client = to_i2c_client(dev); struct tsl2550_data *data = i2c_get_clientdata(client); - unsigned long val = simple_strtoul(buf, NULL, 10); + unsigned long val; int ret; - if (val > 1) + if (kstrtoul(buf, 10, &val) || val > 1) return -EINVAL; mutex_lock(&data->update_lock); @@ -217,10 +217,10 @@ static ssize_t tsl2550_store_operating_mode(struct device *dev, { struct i2c_client *client = to_i2c_client(dev); struct tsl2550_data *data = i2c_get_clientdata(client); - unsigned long val = simple_strtoul(buf, NULL, 10); + unsigned long val; int ret; - if (val > 1) + if (kstrtoul(buf, 10, &val) || val > 1) return -EINVAL; if (data->power_state == 0) @@ -420,7 +420,7 @@ static SIMPLE_DEV_PM_OPS(tsl2550_pm_ops, tsl2550_suspend, tsl2550_resume); #endif /* CONFIG_PM_SLEEP */ static const struct i2c_device_id tsl2550_id[] = { - { "tsl2550", 0 }, + { "tsl2550" }, { } }; MODULE_DEVICE_TABLE(i2c, tsl2550_id); diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c index 930c252753a0..42e7d2a2a90c 100644 --- a/drivers/misc/uacce/uacce.c +++ b/drivers/misc/uacce/uacce.c @@ -7,10 +7,13 @@ #include <linux/slab.h> #include <linux/uacce.h> -static struct class *uacce_class; static dev_t uacce_devt; static DEFINE_XARRAY_ALLOC(uacce_xa); +static const struct class uacce_class = { + .name = UACCE_NAME, +}; + /* * If the parent driver or the device disappears, the queue state is invalid and * ops are not usable anymore. @@ -462,40 +465,6 @@ static void uacce_release(struct device *dev) kfree(uacce); } -static unsigned int uacce_enable_sva(struct device *parent, unsigned int flags) -{ - int ret; - - if (!(flags & UACCE_DEV_SVA)) - return flags; - - flags &= ~UACCE_DEV_SVA; - - ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_IOPF); - if (ret) { - dev_err(parent, "failed to enable IOPF feature! ret = %pe\n", ERR_PTR(ret)); - return flags; - } - - ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA); - if (ret) { - dev_err(parent, "failed to enable SVA feature! ret = %pe\n", ERR_PTR(ret)); - iommu_dev_disable_feature(parent, IOMMU_DEV_FEAT_IOPF); - return flags; - } - - return flags | UACCE_DEV_SVA; -} - -static void uacce_disable_sva(struct uacce_device *uacce) -{ - if (!(uacce->flags & UACCE_DEV_SVA)) - return; - - iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA); - iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_IOPF); -} - /** * uacce_alloc() - alloc an accelerator * @parent: pointer of uacce parent device @@ -515,8 +484,6 @@ struct uacce_device *uacce_alloc(struct device *parent, if (!uacce) return ERR_PTR(-ENOMEM); - flags = uacce_enable_sva(parent, flags); - uacce->parent = parent; uacce->flags = flags; uacce->ops = interface->ops; @@ -530,7 +497,7 @@ struct uacce_device *uacce_alloc(struct device *parent, mutex_init(&uacce->mutex); device_initialize(&uacce->dev); uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id); - uacce->dev.class = uacce_class; + uacce->dev.class = &uacce_class; uacce->dev.groups = uacce_dev_groups; uacce->dev.parent = uacce->parent; uacce->dev.release = uacce_release; @@ -539,7 +506,6 @@ struct uacce_device *uacce_alloc(struct device *parent, return uacce; err_with_uacce: - uacce_disable_sva(uacce); kfree(uacce); return ERR_PTR(ret); } @@ -602,9 +568,6 @@ void uacce_remove(struct uacce_device *uacce) unmap_mapping_range(q->mapping, 0, 0, 1); } - /* disable sva now since no opened queues */ - uacce_disable_sva(uacce); - if (uacce->cdev) cdev_device_del(uacce->cdev, &uacce->dev); xa_erase(&uacce_xa, uacce->dev_id); @@ -623,13 +586,13 @@ static int __init uacce_init(void) { int ret; - uacce_class = class_create(UACCE_NAME); - if (IS_ERR(uacce_class)) - return PTR_ERR(uacce_class); + ret = class_register(&uacce_class); + if (ret) + return ret; ret = alloc_chrdev_region(&uacce_devt, 0, MINORMASK, UACCE_NAME); if (ret) - class_destroy(uacce_class); + class_unregister(&uacce_class); return ret; } @@ -637,7 +600,7 @@ static int __init uacce_init(void) static __exit void uacce_exit(void) { unregister_chrdev_region(uacce_devt, MINORMASK); - class_destroy(uacce_class); + class_unregister(&uacce_class); } subsys_initcall(uacce_init); diff --git a/drivers/misc/vcpu_stall_detector.c b/drivers/misc/vcpu_stall_detector.c index 53b5506080e1..26166357b255 100644 --- a/drivers/misc/vcpu_stall_detector.c +++ b/drivers/misc/vcpu_stall_detector.c @@ -13,7 +13,6 @@ #include <linux/module.h> #include <linux/nmi.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/param.h> #include <linux/percpu.h> #include <linux/platform_device.h> @@ -33,6 +32,7 @@ struct vcpu_stall_detect_config { u32 clock_freq_hz; u32 stall_timeout_sec; + int ppi_irq; void __iomem *membase; struct platform_device *dev; @@ -78,6 +78,12 @@ vcpu_stall_detect_timer_fn(struct hrtimer *hrtimer) return HRTIMER_RESTART; } +static irqreturn_t vcpu_stall_detector_irq(int irq, void *dev) +{ + panic("vCPU stall detector"); + return IRQ_HANDLED; +} + static int start_stall_detector_cpu(unsigned int cpu) { u32 ticks, ping_timeout_ms; @@ -105,8 +111,7 @@ static int start_stall_detector_cpu(unsigned int cpu) ping_timeout_ms = vcpu_stall_config.stall_timeout_sec * MSEC_PER_SEC / 2; - hrtimer_init(vcpu_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - vcpu_hrtimer->function = vcpu_stall_detect_timer_fn; + hrtimer_setup(vcpu_hrtimer, vcpu_stall_detect_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL); vcpu_stall_detector->is_initialized = true; hrtimer_start(vcpu_hrtimer, ms_to_ktime(ping_timeout_ms), @@ -133,7 +138,7 @@ static int stop_stall_detector_cpu(unsigned int cpu) static int vcpu_stall_detect_probe(struct platform_device *pdev) { - int ret; + int ret, irq; struct resource *r; void __iomem *membase; u32 clock_freq_hz = VCPU_STALL_DEFAULT_CLOCK_HZ; @@ -170,9 +175,22 @@ static int vcpu_stall_detect_probe(struct platform_device *pdev) vcpu_stall_config = (struct vcpu_stall_detect_config) { .membase = membase, .clock_freq_hz = clock_freq_hz, - .stall_timeout_sec = stall_timeout_sec + .stall_timeout_sec = stall_timeout_sec, + .ppi_irq = -1, }; + irq = platform_get_irq_optional(pdev, 0); + if (irq > 0) { + ret = request_percpu_irq(irq, + vcpu_stall_detector_irq, + "vcpu_stall_detector", + vcpu_stall_detectors); + if (ret) + goto err; + + vcpu_stall_config.ppi_irq = irq; + } + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "virt/vcpu_stall_detector:online", start_stall_detector_cpu, @@ -185,19 +203,24 @@ static int vcpu_stall_detect_probe(struct platform_device *pdev) vcpu_stall_config.hp_online = ret; return 0; err: + if (vcpu_stall_config.ppi_irq > 0) + free_percpu_irq(vcpu_stall_config.ppi_irq, + vcpu_stall_detectors); return ret; } -static int vcpu_stall_detect_remove(struct platform_device *pdev) +static void vcpu_stall_detect_remove(struct platform_device *pdev) { int cpu; cpuhp_remove_state(vcpu_stall_config.hp_online); + if (vcpu_stall_config.ppi_irq > 0) + free_percpu_irq(vcpu_stall_config.ppi_irq, + vcpu_stall_detectors); + for_each_possible_cpu(cpu) stop_stall_detector_cpu(cpu); - - return 0; } static const struct of_device_id vcpu_stall_detect_of_match[] = { diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 9ce9b9e0e9b6..cc1d18b3df5c 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -380,16 +380,7 @@ struct vmballoon { /** * @shrinker: shrinker interface that is used to avoid over-inflation. */ - struct shrinker shrinker; - - /** - * @shrinker_registered: whether the shrinker was registered. - * - * The shrinker interface does not handle gracefully the removal of - * shrinker that was not registered before. This indication allows to - * simplify the unregistration process. - */ - bool shrinker_registered; + struct shrinker *shrinker; }; static struct vmballoon balloon; @@ -1568,29 +1559,27 @@ static unsigned long vmballoon_shrinker_count(struct shrinker *shrinker, static void vmballoon_unregister_shrinker(struct vmballoon *b) { - if (b->shrinker_registered) - unregister_shrinker(&b->shrinker); - b->shrinker_registered = false; + shrinker_free(b->shrinker); + b->shrinker = NULL; } static int vmballoon_register_shrinker(struct vmballoon *b) { - int r; - /* Do nothing if the shrinker is not enabled */ if (!vmwballoon_shrinker_enable) return 0; - b->shrinker.scan_objects = vmballoon_shrinker_scan; - b->shrinker.count_objects = vmballoon_shrinker_count; - b->shrinker.seeks = DEFAULT_SEEKS; + b->shrinker = shrinker_alloc(0, "vmw-balloon"); + if (!b->shrinker) + return -ENOMEM; - r = register_shrinker(&b->shrinker, "vmw-balloon"); + b->shrinker->scan_objects = vmballoon_shrinker_scan; + b->shrinker->count_objects = vmballoon_shrinker_count; + b->shrinker->private_data = b; - if (r == 0) - b->shrinker_registered = true; + shrinker_register(b->shrinker); - return r; + return 0; } /* @@ -1748,7 +1737,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info, { unsigned long status, flags; struct vmballoon *b; - int ret; + int ret = 0; b = container_of(b_dev_info, struct vmballoon, b_dev_info); @@ -1789,8 +1778,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info, * @pages_lock . We keep holding @comm_lock since we will need it in a * second. */ - balloon_page_delete(page); - + balloon_page_finalize(page); put_page(page); /* Inflate */ @@ -1808,17 +1796,15 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info, * A failure happened. While we can deflate the page we just * inflated, this deflation can also encounter an error. Instead * we will decrease the size of the balloon to reflect the - * change and report failure. + * change. */ atomic64_dec(&b->size); - ret = -EBUSY; } else { /* * Success. Take a reference for the page, and we will add it to * the list after acquiring the lock. */ get_page(newpage); - ret = MIGRATEPAGE_SUCCESS; } /* Update the balloon list under the @pages_lock */ @@ -1829,7 +1815,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info, * If we succeed just insert it to the list and update the statistics * under the lock. */ - if (ret == MIGRATEPAGE_SUCCESS) { + if (status == VMW_BALLOON_SUCCESS) { balloon_page_insert(&b->b_dev_info, newpage); __count_vm_event(BALLOON_MIGRATE); } @@ -1883,7 +1869,7 @@ static int __init vmballoon_init(void) error = vmballoon_register_shrinker(&balloon); if (error) - goto fail; + return error; /* * Initialization of compaction must be done after the call to @@ -1905,9 +1891,6 @@ static int __init vmballoon_init(void) vmballoon_debugfs_init(&balloon); return 0; -fail: - vmballoon_unregister_shrinker(&balloon); - return error; } /* diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c index f22b44827e92..8069d271ed81 100644 --- a/drivers/misc/vmw_vmci/vmci_context.c +++ b/drivers/misc/vmw_vmci/vmci_context.c @@ -251,6 +251,8 @@ static int ctx_fire_notification(u32 context_id, u32 priv_flags) ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_CONTEXT_RESOURCE_ID); ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); + memset((char*)&ev + sizeof(ev.msg.hdr), 0, + ev.msg.hdr.payload_size); ev.msg.event_data.event = VMCI_EVENT_CTX_REMOVED; ev.payload.context_id = context_id; @@ -269,28 +271,6 @@ static int ctx_fire_notification(u32 context_id, u32 priv_flags) } /* - * Returns the current number of pending datagrams. The call may - * also serve as a synchronization point for the datagram queue, - * as no enqueue operations can occur concurrently. - */ -int vmci_ctx_pending_datagrams(u32 cid, u32 *pending) -{ - struct vmci_ctx *context; - - context = vmci_ctx_get(cid); - if (context == NULL) - return VMCI_ERROR_INVALID_ARGS; - - spin_lock(&context->lock); - if (pending) - *pending = context->pending_datagrams; - spin_unlock(&context->lock); - vmci_ctx_put(context); - - return VMCI_SUCCESS; -} - -/* * Queues a VMCI datagram for the appropriate target VM context. */ int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg) @@ -992,38 +972,6 @@ int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle) } /* - * Unregisters all doorbell handles that were previously - * registered with vmci_ctx_dbell_create. - */ -int vmci_ctx_dbell_destroy_all(u32 context_id) -{ - struct vmci_ctx *context; - struct vmci_handle handle; - - if (context_id == VMCI_INVALID_ID) - return VMCI_ERROR_INVALID_ARGS; - - context = vmci_ctx_get(context_id); - if (context == NULL) - return VMCI_ERROR_NOT_FOUND; - - spin_lock(&context->lock); - do { - struct vmci_handle_arr *arr = context->doorbell_array; - handle = vmci_handle_arr_remove_tail(arr); - } while (!vmci_handle_is_invalid(handle)); - do { - struct vmci_handle_arr *arr = context->pending_doorbell_array; - handle = vmci_handle_arr_remove_tail(arr); - } while (!vmci_handle_is_invalid(handle)); - spin_unlock(&context->lock); - - vmci_ctx_put(context); - - return VMCI_SUCCESS; -} - -/* * Registers a notification of a doorbell handle initiated by the * specified source context. The notification of doorbells are * subject to the same isolation rules as datagram delivery. To diff --git a/drivers/misc/vmw_vmci/vmci_context.h b/drivers/misc/vmw_vmci/vmci_context.h index 4db8701c9781..083effa08102 100644 --- a/drivers/misc/vmw_vmci/vmci_context.h +++ b/drivers/misc/vmw_vmci/vmci_context.h @@ -98,7 +98,7 @@ struct vmci_ctx_chkpt_buf_info { }; /* - * VMCINotificationReceiveInfo: Used to recieve pending notifications + * VMCINotificationReceiveInfo: Used to receive pending notifications * for doorbells and queue pairs. */ struct vmci_ctx_notify_recv_info { @@ -132,7 +132,6 @@ bool vmci_ctx_supports_host_qp(struct vmci_ctx *context); int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg); int vmci_ctx_dequeue_datagram(struct vmci_ctx *context, size_t *max_size, struct vmci_datagram **dg); -int vmci_ctx_pending_datagrams(u32 cid, u32 *pending); struct vmci_ctx *vmci_ctx_get(u32 cid); void vmci_ctx_put(struct vmci_ctx *context); bool vmci_ctx_exists(u32 cid); @@ -153,7 +152,6 @@ void vmci_ctx_unset_notify(struct vmci_ctx *context); int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle); int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle); -int vmci_ctx_dbell_destroy_all(u32 context_id); int vmci_ctx_notify_dbell(u32 cid, struct vmci_handle handle, u32 src_priv_flags); diff --git a/drivers/misc/vmw_vmci/vmci_datagram.c b/drivers/misc/vmw_vmci/vmci_datagram.c index f50d22882476..3964d9e5a39b 100644 --- a/drivers/misc/vmw_vmci/vmci_datagram.c +++ b/drivers/misc/vmw_vmci/vmci_datagram.c @@ -224,8 +224,8 @@ static int dg_dispatch_as_host(u32 context_id, struct vmci_datagram *dg) return VMCI_ERROR_NO_MEM; } - dg_info = kmalloc(sizeof(*dg_info) + - (size_t) dg->payload_size, GFP_ATOMIC); + dg_info = kmalloc(struct_size(dg_info, msg_payload, dg->payload_size), + GFP_ATOMIC); if (!dg_info) { atomic_dec(&delayed_dg_host_queue_size); vmci_resource_put(resource); @@ -234,7 +234,8 @@ static int dg_dispatch_as_host(u32 context_id, struct vmci_datagram *dg) dg_info->in_dg_host_queue = true; dg_info->entry = dst_entry; - memcpy(&dg_info->msg, dg, dg_size); + dg_info->msg = *dg; + memcpy(&dg_info->msg_payload, dg + 1, dg->payload_size); INIT_WORK(&dg_info->work, dg_delayed_dispatch); schedule_work(&dg_info->work); @@ -377,7 +378,8 @@ int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg) dg_info->in_dg_host_queue = false; dg_info->entry = dst_entry; - memcpy(&dg_info->msg, dg, VMCI_DG_SIZE(dg)); + dg_info->msg = *dg; + memcpy(&dg_info->msg_payload, dg + 1, dg->payload_size); INIT_WORK(&dg_info->work, dg_delayed_dispatch); schedule_work(&dg_info->work); diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c index fa8a7fce4481..53eeb9e6cb56 100644 --- a/drivers/misc/vmw_vmci/vmci_doorbell.c +++ b/drivers/misc/vmw_vmci/vmci_doorbell.c @@ -258,23 +258,6 @@ static int dbell_unlink(struct vmci_handle handle) } /* - * Notify another guest or the host. We send a datagram down to the - * host via the hypervisor with the notification info. - */ -static int dbell_notify_as_guest(struct vmci_handle handle, u32 priv_flags) -{ - struct vmci_doorbell_notify_msg notify_msg; - - notify_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, - VMCI_DOORBELL_NOTIFY); - notify_msg.hdr.src = VMCI_ANON_SRC_HANDLE; - notify_msg.hdr.payload_size = sizeof(notify_msg) - VMCI_DG_HEADERSIZE; - notify_msg.handle = handle; - - return vmci_send_datagram(¬ify_msg.hdr); -} - -/* * Calls the specified callback in a delayed context. */ static void dbell_delayed_dispatch(struct work_struct *work) @@ -566,39 +549,3 @@ int vmci_doorbell_destroy(struct vmci_handle handle) return VMCI_SUCCESS; } EXPORT_SYMBOL_GPL(vmci_doorbell_destroy); - -/* - * vmci_doorbell_notify() - Ring the doorbell (and hide in the bushes). - * @dst: The handlle identifying the doorbell resource - * @priv_flags: Priviledge flags. - * - * Generates a notification on the doorbell identified by the - * handle. For host side generation of notifications, the caller - * can specify what the privilege of the calling side is. - */ -int vmci_doorbell_notify(struct vmci_handle dst, u32 priv_flags) -{ - int retval; - enum vmci_route route; - struct vmci_handle src; - - if (vmci_handle_is_invalid(dst) || - (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS)) - return VMCI_ERROR_INVALID_ARGS; - - src = VMCI_INVALID_HANDLE; - retval = vmci_route(&src, &dst, false, &route); - if (retval < VMCI_SUCCESS) - return retval; - - if (VMCI_ROUTE_AS_HOST == route) - return vmci_ctx_notify_dbell(VMCI_HOST_CONTEXT_ID, - dst, priv_flags); - - if (VMCI_ROUTE_AS_GUEST == route) - return dbell_notify_as_guest(dst, priv_flags); - - pr_warn("Unknown route (%d) for doorbell\n", route); - return VMCI_ERROR_DST_UNREACHABLE; -} -EXPORT_SYMBOL_GPL(vmci_doorbell_notify); diff --git a/drivers/misc/vmw_vmci/vmci_event.c b/drivers/misc/vmw_vmci/vmci_event.c index 5d7ac07623c2..9a41ab65378d 100644 --- a/drivers/misc/vmw_vmci/vmci_event.c +++ b/drivers/misc/vmw_vmci/vmci_event.c @@ -9,6 +9,7 @@ #include <linux/vmw_vmci_api.h> #include <linux/list.h> #include <linux/module.h> +#include <linux/nospec.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/rculist.h> @@ -86,9 +87,12 @@ static void event_deliver(struct vmci_event_msg *event_msg) { struct vmci_subscription *cur; struct list_head *subscriber_list; + u32 sanitized_event, max_vmci_event; rcu_read_lock(); - subscriber_list = &subscriber_array[event_msg->event_data.event]; + max_vmci_event = ARRAY_SIZE(subscriber_array); + sanitized_event = array_index_nospec(event_msg->event_data.event, max_vmci_event); + subscriber_list = &subscriber_array[sanitized_event]; list_for_each_entry_rcu(cur, subscriber_list, node) { cur->callback(cur->id, &event_msg->event_data, cur->callback_data); diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c index 4f8d962bb5b2..476af89e751b 100644 --- a/drivers/misc/vmw_vmci/vmci_guest.c +++ b/drivers/misc/vmw_vmci/vmci_guest.c @@ -625,7 +625,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, if (!vmci_dev) { dev_err(&pdev->dev, "Can't allocate memory for VMCI device\n"); - return -ENOMEM; + error = -ENOMEM; + goto err_unmap_mmio_base; } vmci_dev->dev = &pdev->dev; @@ -642,7 +643,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, if (!vmci_dev->tx_buffer) { dev_err(&pdev->dev, "Can't allocate memory for datagram tx buffer\n"); - return -ENOMEM; + error = -ENOMEM; + goto err_unmap_mmio_base; } vmci_dev->data_buffer = dma_alloc_coherent(&pdev->dev, VMCI_DMA_DG_BUFFER_SIZE, @@ -787,8 +789,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, error = pci_alloc_irq_vectors(pdev, num_irq_vectors, num_irq_vectors, PCI_IRQ_MSIX); if (error < 0) { - error = pci_alloc_irq_vectors(pdev, 1, 1, - PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY); + error = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); if (error < 0) goto err_unsubscribe_event; } else { @@ -893,6 +894,10 @@ err_free_notification_bitmap: err_free_data_buffers: vmci_free_dg_buffers(vmci_dev); +err_unmap_mmio_base: + if (mmio_base != NULL) + pci_iounmap(pdev, mmio_base); + /* The rest are managed resources and will be freed by PCI core */ return error; } diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.c b/drivers/misc/vmw_vmci/vmci_handle_array.c index de7fee7ead1b..681b3500125a 100644 --- a/drivers/misc/vmw_vmci/vmci_handle_array.c +++ b/drivers/misc/vmw_vmci/vmci_handle_array.c @@ -8,12 +8,6 @@ #include <linux/slab.h> #include "vmci_handle_array.h" -static size_t handle_arr_calc_size(u32 capacity) -{ - return VMCI_HANDLE_ARRAY_HEADER_SIZE + - capacity * sizeof(struct vmci_handle); -} - struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity) { struct vmci_handle_arr *array; @@ -25,7 +19,7 @@ struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity) capacity = min((u32)VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY, max_capacity); - array = kmalloc(handle_arr_calc_size(capacity), GFP_ATOMIC); + array = kmalloc(struct_size(array, entries, capacity), GFP_ATOMIC); if (!array) return NULL; @@ -51,8 +45,8 @@ int vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr, struct vmci_handle_arr *new_array; u32 capacity_bump = min(array->max_capacity - array->capacity, array->capacity); - size_t new_size = handle_arr_calc_size(array->capacity + - capacity_bump); + size_t new_size = struct_size(array, entries, + size_add(array->capacity, capacity_bump)); if (array->size >= array->max_capacity) return VMCI_ERROR_NO_MEM; diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.h b/drivers/misc/vmw_vmci/vmci_handle_array.h index 96193f85be5b..27a38b97e8a8 100644 --- a/drivers/misc/vmw_vmci/vmci_handle_array.h +++ b/drivers/misc/vmw_vmci/vmci_handle_array.h @@ -17,17 +17,11 @@ struct vmci_handle_arr { u32 max_capacity; u32 size; u32 pad; - struct vmci_handle entries[]; + struct vmci_handle entries[] __counted_by(capacity); }; -#define VMCI_HANDLE_ARRAY_HEADER_SIZE \ - offsetof(struct vmci_handle_arr, entries) /* Select a default capacity that results in a 64 byte sized array */ #define VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY 6 -/* Make sure that the max array size can be expressed by a u32 */ -#define VMCI_HANDLE_ARRAY_MAX_CAPACITY \ - ((U32_MAX - VMCI_HANDLE_ARRAY_HEADER_SIZE - 1) / \ - sizeof(struct vmci_handle)) struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity); void vmci_handle_arr_destroy(struct vmci_handle_arr *array); diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c index abe79f6fd2a7..b64944367ac5 100644 --- a/drivers/misc/vmw_vmci/vmci_host.c +++ b/drivers/misc/vmw_vmci/vmci_host.c @@ -227,6 +227,7 @@ static int drv_cp_harray_to_user(void __user *user_buf_uva, static int vmci_host_setup_notify(struct vmci_ctx *context, unsigned long uva) { + struct page *page; int retval; if (context->notify_page) { @@ -243,13 +244,11 @@ static int vmci_host_setup_notify(struct vmci_ctx *context, /* * Lock physical page backing a given user VA. */ - retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &context->notify_page); - if (retval != 1) { - context->notify_page = NULL; + retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &page); + if (retval != 1) return VMCI_ERROR_GENERIC; - } - if (context->notify_page == NULL) - return VMCI_ERROR_UNAVAILABLE; + + context->notify_page = page; /* * Map the locked page and set up notify pointer. diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index 73d71c4ec139..b88ac144ad32 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -3023,139 +3023,6 @@ s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair) EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready); /* - * vmci_qpair_enqueue() - Throw data on the queue. - * @qpair: Pointer to the queue pair struct. - * @buf: Pointer to buffer containing data - * @buf_size: Length of buffer. - * @buf_type: Buffer type (Unused). - * - * This is the client interface for enqueueing data into the queue. - * Returns number of bytes enqueued or < 0 on error. - */ -ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair, - const void *buf, - size_t buf_size, - int buf_type) -{ - ssize_t result; - struct iov_iter from; - struct kvec v = {.iov_base = (void *)buf, .iov_len = buf_size}; - - if (!qpair || !buf) - return VMCI_ERROR_INVALID_ARGS; - - iov_iter_kvec(&from, ITER_SOURCE, &v, 1, buf_size); - - qp_lock(qpair); - - do { - result = qp_enqueue_locked(qpair->produce_q, - qpair->consume_q, - qpair->produce_q_size, - &from); - - if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && - !qp_wait_for_ready_queue(qpair)) - result = VMCI_ERROR_WOULD_BLOCK; - - } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); - - qp_unlock(qpair); - - return result; -} -EXPORT_SYMBOL_GPL(vmci_qpair_enqueue); - -/* - * vmci_qpair_dequeue() - Get data from the queue. - * @qpair: Pointer to the queue pair struct. - * @buf: Pointer to buffer for the data - * @buf_size: Length of buffer. - * @buf_type: Buffer type (Unused). - * - * This is the client interface for dequeueing data from the queue. - * Returns number of bytes dequeued or < 0 on error. - */ -ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair, - void *buf, - size_t buf_size, - int buf_type) -{ - ssize_t result; - struct iov_iter to; - struct kvec v = {.iov_base = buf, .iov_len = buf_size}; - - if (!qpair || !buf) - return VMCI_ERROR_INVALID_ARGS; - - iov_iter_kvec(&to, ITER_DEST, &v, 1, buf_size); - - qp_lock(qpair); - - do { - result = qp_dequeue_locked(qpair->produce_q, - qpair->consume_q, - qpair->consume_q_size, - &to, true); - - if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && - !qp_wait_for_ready_queue(qpair)) - result = VMCI_ERROR_WOULD_BLOCK; - - } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); - - qp_unlock(qpair); - - return result; -} -EXPORT_SYMBOL_GPL(vmci_qpair_dequeue); - -/* - * vmci_qpair_peek() - Peek at the data in the queue. - * @qpair: Pointer to the queue pair struct. - * @buf: Pointer to buffer for the data - * @buf_size: Length of buffer. - * @buf_type: Buffer type (Unused on Linux). - * - * This is the client interface for peeking into a queue. (I.e., - * copy data from the queue without updating the head pointer.) - * Returns number of bytes dequeued or < 0 on error. - */ -ssize_t vmci_qpair_peek(struct vmci_qp *qpair, - void *buf, - size_t buf_size, - int buf_type) -{ - struct iov_iter to; - struct kvec v = {.iov_base = buf, .iov_len = buf_size}; - ssize_t result; - - if (!qpair || !buf) - return VMCI_ERROR_INVALID_ARGS; - - iov_iter_kvec(&to, ITER_DEST, &v, 1, buf_size); - - qp_lock(qpair); - - do { - result = qp_dequeue_locked(qpair->produce_q, - qpair->consume_q, - qpair->consume_q_size, - &to, false); - - if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && - !qp_wait_for_ready_queue(qpair)) - result = VMCI_ERROR_WOULD_BLOCK; - - } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); - - qp_unlock(qpair); - - return result; -} -EXPORT_SYMBOL_GPL(vmci_qpair_peek); - -/* * vmci_qpair_enquev() - Throw data on the queue using iov. * @qpair: Pointer to the queue pair struct. * @iov: Pointer to buffer containing data diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c index 692daa9eff34..19c9d2cdd277 100644 --- a/drivers/misc/vmw_vmci/vmci_resource.c +++ b/drivers/misc/vmw_vmci/vmci_resource.c @@ -144,7 +144,8 @@ void vmci_resource_remove(struct vmci_resource *resource) spin_lock(&vmci_resource_table.lock); hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) { - if (vmci_handle_is_equal(r->handle, resource->handle)) { + if (vmci_handle_is_equal(r->handle, resource->handle) && + resource->type == r->type) { hlist_del_init_rcu(&r->node); break; } diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c index 270ff4c5971a..3135ba3a58ee 100644 --- a/drivers/misc/xilinx_sdfec.c +++ b/drivers/misc/xilinx_sdfec.c @@ -15,7 +15,8 @@ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/of_platform.h> +#include <linux/of.h> +#include <linux/platform_device.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/clk.h> @@ -1347,7 +1348,6 @@ static int xsdfec_probe(struct platform_device *pdev) { struct xsdfec_dev *xsdfec; struct device *dev; - struct resource *res; int err; bool irq_enabled = true; @@ -1363,8 +1363,7 @@ static int xsdfec_probe(struct platform_device *pdev) return err; dev = xsdfec->dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - xsdfec->regs = devm_ioremap_resource(dev, res); + xsdfec->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(xsdfec->regs)) { err = PTR_ERR(xsdfec->regs); goto err_xsdfec_dev; @@ -1421,7 +1420,7 @@ err_xsdfec_dev: return err; } -static int xsdfec_remove(struct platform_device *pdev) +static void xsdfec_remove(struct platform_device *pdev) { struct xsdfec_dev *xsdfec; @@ -1429,7 +1428,6 @@ static int xsdfec_remove(struct platform_device *pdev) misc_deregister(&xsdfec->miscdev); ida_free(&dev_nrs, xsdfec->dev_id); xsdfec_disable_all_clks(&xsdfec->clks); - return 0; } static const struct of_device_id xsdfec_of_match[] = { @@ -1446,7 +1444,7 @@ static struct platform_driver xsdfec_driver = { .of_match_table = xsdfec_of_match, }, .probe = xsdfec_probe, - .remove = xsdfec_remove, + .remove = xsdfec_remove, }; module_platform_driver(xsdfec_driver); diff --git a/drivers/misc/xilinx_tmr_inject.c b/drivers/misc/xilinx_tmr_inject.c index d96f6d7cd109..6284606ffb9f 100644 --- a/drivers/misc/xilinx_tmr_inject.c +++ b/drivers/misc/xilinx_tmr_inject.c @@ -11,7 +11,9 @@ #include <asm/xilinx_mb_manager.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/debugfs.h> +#include <linux/platform_device.h> #include <linux/fault-inject.h> /* TMR Inject Register offsets */ @@ -142,11 +144,10 @@ static int xtmr_inject_probe(struct platform_device *pdev) return 0; } -static int xtmr_inject_remove(struct platform_device *pdev) +static void xtmr_inject_remove(struct platform_device *pdev) { debugfs_remove_recursive(dbgfs_root); dbgfs_root = NULL; - return 0; } static const struct of_device_id xtmr_inject_of_match[] = { diff --git a/drivers/misc/xilinx_tmr_manager.c b/drivers/misc/xilinx_tmr_manager.c index 0ef55e06d3a0..03912a90fd95 100644 --- a/drivers/misc/xilinx_tmr_manager.c +++ b/drivers/misc/xilinx_tmr_manager.c @@ -15,7 +15,8 @@ #include <asm/xilinx_mb_manager.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/platform_device.h> /* TMR Manager Register offsets */ #define XTMR_MANAGER_CR_OFFSET 0x0 @@ -170,8 +171,7 @@ static int xtmr_manager_probe(struct platform_device *pdev) if (!xtmr_manager) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - xtmr_manager->regs = devm_ioremap_resource(&pdev->dev, res); + xtmr_manager->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(xtmr_manager->regs)) return PTR_ERR(xtmr_manager->regs); |
