diff options
Diffstat (limited to 'drivers/char')
175 files changed, 7719 insertions, 6701 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 625af75833fc..d2cfc584e202 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -237,7 +237,8 @@ config APPLICOM config SONYPI tristate "Sony Vaio Programmable I/O Control Device support" - depends on X86_32 && PCI && INPUT + depends on X86_32 && PCI && INPUT && HAS_IOPORT + depends on ACPI_EC || !ACPI help This driver enables access to the Sony Programmable I/O Control Device which can be found in many (all ?) Sony Vaio laptops. @@ -348,7 +349,7 @@ config DEVPORT device is similar to /dev/mem, but for I/O ports. config HPET - bool "HPET - High Precision Event Timer" if (X86 || IA64) + bool "HPET - High Precision Event Timer" if X86 default n depends on ACPI help @@ -377,7 +378,7 @@ config HPET_MMAP_DEFAULT config HANGCHECK_TIMER tristate "Hangcheck timer" - depends on X86 || IA64 || PPC64 || S390 + depends on X86 || PPC64 || S390 help The hangcheck-timer module detects when the system has gone out to lunch past a certain margin. It can reboot the system @@ -403,7 +404,7 @@ config TELCLOCK configuration of the telecom clock configuration settings. This device is used for hardware synchronization across the ATCA backplane fabric. Upon loading, the driver exports a sysfs directory, - /sys/devices/platform/telco_clock, with a number of files for + /sys/devices/faux/telco_clock, with a number of files for controlling the behavior of this hardware. source "drivers/s390/char/Kconfig" diff --git a/drivers/char/Makefile b/drivers/char/Makefile index c5f532e412f1..1291369b9126 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -6,9 +6,9 @@ obj-y += mem.o random.o obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o obj-y += misc.o +obj-$(CONFIG_TEST_MISC_MINOR) += misc_minor_kunit.o obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o -obj-$(CONFIG_MSPEC) += mspec.o obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o obj-$(CONFIG_IBM_BSR) += bsr.o diff --git a/drivers/char/adi.c b/drivers/char/adi.c index 751d7cc0da1b..0849d933a2d5 100644 --- a/drivers/char/adi.c +++ b/drivers/char/adi.c @@ -14,12 +14,6 @@ #define MAX_BUF_SZ PAGE_SIZE -static int adi_open(struct inode *inode, struct file *file) -{ - file->f_mode |= FMODE_UNSIGNED_OFFSET; - return 0; -} - static int read_mcd_tag(unsigned long addr) { long err; @@ -86,8 +80,8 @@ static ssize_t adi_read(struct file *file, char __user *buf, bytes_read += ver_buf_sz; ver_buf_idx = 0; - ver_buf_sz = min(count - bytes_read, - (size_t)MAX_BUF_SZ); + ver_buf_sz = min_t(size_t, count - bytes_read, + MAX_BUF_SZ); } } @@ -137,7 +131,7 @@ static ssize_t adi_write(struct file *file, const char __user *buf, ssize_t ret; int i; - if (count <= 0) + if (count == 0) return -EINVAL; ver_buf_sz = min_t(size_t, count, MAX_BUF_SZ); @@ -163,7 +157,7 @@ static ssize_t adi_write(struct file *file, const char __user *buf, } bytes_written += ver_buf_sz; - ver_buf_sz = min(count - bytes_written, (size_t)MAX_BUF_SZ); + ver_buf_sz = min_t(size_t, count - bytes_written, MAX_BUF_SZ); } while (bytes_written < count); (*offp) += bytes_written; @@ -196,7 +190,6 @@ static loff_t adi_llseek(struct file *file, loff_t offset, int whence) if (offset != file->f_pos) { file->f_pos = offset; - file->f_version = 0; ret = offset; } @@ -206,9 +199,9 @@ static loff_t adi_llseek(struct file *file, loff_t offset, int whence) static const struct file_operations adi_fops = { .owner = THIS_MODULE, .llseek = adi_llseek, - .open = adi_open, .read = adi_read, .write = adi_write, + .fop_flags = FOP_UNSIGNED_OFFSET, }; static struct miscdevice adi_miscdev = { diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig index 4f501e4842ab..c47eb7bf06d4 100644 --- a/drivers/char/agp/Kconfig +++ b/drivers/char/agp/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 menuconfig AGP tristate "/dev/agpgart (AGP Support)" - depends on ALPHA || IA64 || PARISC || PPC || X86 + depends on ALPHA || PARISC || PPC || X86 depends on PCI help AGP (Accelerated Graphics Port) is a bus system mainly used to @@ -109,20 +109,6 @@ config AGP_VIA This option gives you AGP support for the GLX component of X on VIA MVP3/Apollo Pro chipsets. -config AGP_I460 - tristate "Intel 460GX chipset support" - depends on AGP && IA64 - help - This option gives you AGP GART support for the Intel 460GX chipset - for IA64 processors. - -config AGP_HP_ZX1 - tristate "HP ZX1 chipset AGP support" - depends on AGP && IA64 - help - This option gives you AGP GART support for the HP ZX1 chipset - for IA64 processors. - config AGP_PARISC tristate "HP Quicksilver AGP support" depends on AGP && PARISC && 64BIT && IOMMU_SBA diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile index 90ed8c789e48..43b09cf193bb 100644 --- a/drivers/char/agp/Makefile +++ b/drivers/char/agp/Makefile @@ -1,12 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 agpgart-y := backend.o generic.o isoch.o -ifeq ($(CONFIG_DRM_LEGACY),y) -agpgart-$(CONFIG_COMPAT) += compat_ioctl.o -agpgart-y += frontend.o -endif - - obj-$(CONFIG_AGP) += agpgart.o obj-$(CONFIG_AGP_ALI) += ali-agp.o obj-$(CONFIG_AGP_ATI) += ati-agp.o @@ -14,9 +8,7 @@ obj-$(CONFIG_AGP_AMD) += amd-k7-agp.o obj-$(CONFIG_AGP_AMD64) += amd64-agp.o obj-$(CONFIG_AGP_ALPHA_CORE) += alpha-agp.o obj-$(CONFIG_AGP_EFFICEON) += efficeon-agp.o -obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o obj-$(CONFIG_AGP_PARISC) += parisc-agp.o -obj-$(CONFIG_AGP_I460) += i460-agp.o obj-$(CONFIG_AGP_INTEL) += intel-agp.o obj-$(CONFIG_INTEL_GTT) += intel-gtt.o obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h index 8771dcc9b8e2..67d7be800a7c 100644 --- a/drivers/char/agp/agp.h +++ b/drivers/char/agp/agp.h @@ -138,7 +138,6 @@ struct agp_bridge_data { unsigned long gart_bus_addr; unsigned long gatt_bus_addr; u32 mode; - enum chipset_type type; unsigned long *key_list; atomic_t current_memory_agp; atomic_t agp_in_use; @@ -185,15 +184,6 @@ void agp_put_bridge(struct agp_bridge_data *bridge); int agp_add_bridge(struct agp_bridge_data *bridge); void agp_remove_bridge(struct agp_bridge_data *bridge); -/* Frontend routines. */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) -int agp_frontend_initialize(void); -void agp_frontend_cleanup(void); -#else -static inline int agp_frontend_initialize(void) { return 0; } -static inline void agp_frontend_cleanup(void) {} -#endif - /* Generic routines. */ void agp_generic_enable(struct agp_bridge_data *bridge, u32 mode); int agp_generic_create_gatt_table(struct agp_bridge_data *bridge); diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c index 760d9a931289..2eaab502ec29 100644 --- a/drivers/char/agp/ali-agp.c +++ b/drivers/char/agp/ali-agp.c @@ -418,5 +418,6 @@ module_init(agp_ali_init); module_exit(agp_ali_cleanup); MODULE_AUTHOR("Dave Jones"); +MODULE_DESCRIPTION("ALi AGPGART routines"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c index c9bf2c219841..e1763ecb8111 100644 --- a/drivers/char/agp/alpha-agp.c +++ b/drivers/char/agp/alpha-agp.c @@ -149,7 +149,7 @@ struct agp_bridge_driver alpha_core_agp_driver = { struct agp_bridge_data *alpha_bridge; -int __init +static int __init alpha_core_agp_setup(void) { alpha_agp_info *agp = alpha_mv.agp_info(); @@ -217,4 +217,5 @@ module_init(agp_alpha_core_init); module_exit(agp_alpha_core_cleanup); MODULE_AUTHOR("Jeff Wiedemeier <Jeff.Wiedemeier@hp.com>"); +MODULE_DESCRIPTION("Alpha AGP support"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c index 55397ba765d2..795c8c9ff680 100644 --- a/drivers/char/agp/amd-k7-agp.c +++ b/drivers/char/agp/amd-k7-agp.c @@ -549,4 +549,5 @@ static void __exit agp_amdk7_cleanup(void) module_init(agp_amdk7_init); module_exit(agp_amdk7_cleanup); +MODULE_DESCRIPTION("AMD K7 AGPGART routines"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index ce8651436609..2505df1f4e69 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -16,7 +16,7 @@ #include <linux/mmzone.h> #include <asm/page.h> /* PAGE_SIZE */ #include <asm/e820/api.h> -#include <asm/amd_nb.h> +#include <asm/amd/nb.h> #include <asm/gart.h> #include "agp.h" @@ -720,11 +720,6 @@ static const struct pci_device_id agp_amd64_pci_table[] = { MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table); -static const struct pci_device_id agp_amd64_pci_promisc_table[] = { - { PCI_DEVICE_CLASS(0, 0) }, - { } -}; - static DEFINE_SIMPLE_DEV_PM_OPS(agp_amd64_pm_ops, NULL, agp_amd64_resume); static struct pci_driver agp_amd64_pci_driver = { @@ -739,6 +734,7 @@ static struct pci_driver agp_amd64_pci_driver = { /* Not static due to IOMMU code calling it early. */ int __init agp_amd64_init(void) { + struct pci_dev *pdev = NULL; int err = 0; if (agp_off) @@ -767,9 +763,13 @@ int __init agp_amd64_init(void) } /* Look for any AGP bridge */ - agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table; - err = driver_attach(&agp_amd64_pci_driver.driver); - if (err == 0 && agp_bridges_found == 0) { + for_each_pci_dev(pdev) + if (pci_find_capability(pdev, PCI_CAP_ID_AGP)) + pci_add_dynid(&agp_amd64_pci_driver, + pdev->vendor, pdev->device, + pdev->subsystem_vendor, + pdev->subsystem_device, 0, 0, 0); + if (agp_bridges_found == 0) { pci_unregister_driver(&agp_amd64_pci_driver); err = -ENODEV; } @@ -802,4 +802,5 @@ module_exit(agp_amd64_cleanup); MODULE_AUTHOR("Dave Jones, Andi Kleen"); module_param(agp_try_unsupported, bool, 0); +MODULE_DESCRIPTION("GART driver for the AMD Opteron/Athlon64 on-CPU northbridge"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c index 3c1fce48aabe..f7871afe08cf 100644 --- a/drivers/char/agp/ati-agp.c +++ b/drivers/char/agp/ati-agp.c @@ -572,5 +572,6 @@ module_init(agp_ati_init); module_exit(agp_ati_cleanup); MODULE_AUTHOR("Dave Jones"); +MODULE_DESCRIPTION("ATi AGPGART routines"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c index 0e19c600db53..1776afd3ee07 100644 --- a/drivers/char/agp/backend.c +++ b/drivers/char/agp/backend.c @@ -293,13 +293,6 @@ int agp_add_bridge(struct agp_bridge_data *bridge) } if (list_empty(&agp_bridges)) { - error = agp_frontend_initialize(); - if (error) { - dev_info(&bridge->dev->dev, - "agp_frontend_initialize() failed\n"); - goto frontend_err; - } - dev_info(&bridge->dev->dev, "AGP aperture is %dM @ 0x%lx\n", bridge->driver->fetch_size(), bridge->gart_bus_addr); @@ -308,8 +301,6 @@ int agp_add_bridge(struct agp_bridge_data *bridge) list_add(&bridge->list, &agp_bridges); return 0; -frontend_err: - agp_backend_cleanup(bridge); err_out: module_put(bridge->driver->owner); err_put_bridge: @@ -323,8 +314,6 @@ void agp_remove_bridge(struct agp_bridge_data *bridge) { agp_backend_cleanup(bridge); list_del(&bridge->list); - if (list_empty(&agp_bridges)) - agp_frontend_cleanup(); module_put(bridge->driver->owner); } EXPORT_SYMBOL_GPL(agp_remove_bridge); diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c deleted file mode 100644 index 52ffe1706ce0..000000000000 --- a/drivers/char/agp/compat_ioctl.c +++ /dev/null @@ -1,291 +0,0 @@ -/* - * AGPGART driver frontend compatibility ioctls - * Copyright (C) 2004 Silicon Graphics, Inc. - * Copyright (C) 2002-2003 Dave Jones - * Copyright (C) 1999 Jeff Hartmann - * Copyright (C) 1999 Precision Insight, Inc. - * Copyright (C) 1999 Xi Graphics, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE - * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/fs.h> -#include <linux/agpgart.h> -#include <linux/slab.h> -#include <linux/uaccess.h> -#include "agp.h" -#include "compat_ioctl.h" - -static int compat_agpioc_info_wrap(struct agp_file_private *priv, void __user *arg) -{ - struct agp_info32 userinfo; - struct agp_kern_info kerninfo; - - agp_copy_info(agp_bridge, &kerninfo); - - userinfo.version.major = kerninfo.version.major; - userinfo.version.minor = kerninfo.version.minor; - userinfo.bridge_id = kerninfo.device->vendor | - (kerninfo.device->device << 16); - userinfo.agp_mode = kerninfo.mode; - userinfo.aper_base = (compat_long_t)kerninfo.aper_base; - userinfo.aper_size = kerninfo.aper_size; - userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory; - userinfo.pg_used = kerninfo.current_memory; - - if (copy_to_user(arg, &userinfo, sizeof(userinfo))) - return -EFAULT; - - return 0; -} - -static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg) -{ - struct agp_region32 ureserve; - struct agp_region kreserve; - struct agp_client *client; - struct agp_file_private *client_priv; - - DBG(""); - if (copy_from_user(&ureserve, arg, sizeof(ureserve))) - return -EFAULT; - - if ((unsigned) ureserve.seg_count >= ~0U/sizeof(struct agp_segment32)) - return -EFAULT; - - kreserve.pid = ureserve.pid; - kreserve.seg_count = ureserve.seg_count; - - client = agp_find_client_by_pid(kreserve.pid); - - if (kreserve.seg_count == 0) { - /* remove a client */ - client_priv = agp_find_private(kreserve.pid); - - if (client_priv != NULL) { - set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags); - set_bit(AGP_FF_IS_VALID, &client_priv->access_flags); - } - if (client == NULL) { - /* client is already removed */ - return 0; - } - return agp_remove_client(kreserve.pid); - } else { - struct agp_segment32 *usegment; - struct agp_segment *ksegment; - int seg; - - if (ureserve.seg_count >= 16384) - return -EINVAL; - - usegment = kmalloc_array(ureserve.seg_count, - sizeof(*usegment), - GFP_KERNEL); - if (!usegment) - return -ENOMEM; - - ksegment = kmalloc_array(kreserve.seg_count, - sizeof(*ksegment), - GFP_KERNEL); - if (!ksegment) { - kfree(usegment); - return -ENOMEM; - } - - if (copy_from_user(usegment, (void __user *) ureserve.seg_list, - sizeof(*usegment) * ureserve.seg_count)) { - kfree(usegment); - kfree(ksegment); - return -EFAULT; - } - - for (seg = 0; seg < ureserve.seg_count; seg++) { - ksegment[seg].pg_start = usegment[seg].pg_start; - ksegment[seg].pg_count = usegment[seg].pg_count; - ksegment[seg].prot = usegment[seg].prot; - } - - kfree(usegment); - kreserve.seg_list = ksegment; - - if (client == NULL) { - /* Create the client and add the segment */ - client = agp_create_client(kreserve.pid); - - if (client == NULL) { - kfree(ksegment); - return -ENOMEM; - } - client_priv = agp_find_private(kreserve.pid); - - if (client_priv != NULL) { - set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags); - set_bit(AGP_FF_IS_VALID, &client_priv->access_flags); - } - } - return agp_create_segment(client, &kreserve); - } - /* Will never really happen */ - return -EINVAL; -} - -static int compat_agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg) -{ - struct agp_memory *memory; - struct agp_allocate32 alloc; - - DBG(""); - if (copy_from_user(&alloc, arg, sizeof(alloc))) - return -EFAULT; - - memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type); - - if (memory == NULL) - return -ENOMEM; - - alloc.key = memory->key; - alloc.physical = memory->physical; - - if (copy_to_user(arg, &alloc, sizeof(alloc))) { - agp_free_memory_wrap(memory); - return -EFAULT; - } - return 0; -} - -static int compat_agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg) -{ - struct agp_bind32 bind_info; - struct agp_memory *memory; - - DBG(""); - if (copy_from_user(&bind_info, arg, sizeof(bind_info))) - return -EFAULT; - - memory = agp_find_mem_by_key(bind_info.key); - - if (memory == NULL) - return -EINVAL; - - return agp_bind_memory(memory, bind_info.pg_start); -} - -static int compat_agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg) -{ - struct agp_memory *memory; - struct agp_unbind32 unbind; - - DBG(""); - if (copy_from_user(&unbind, arg, sizeof(unbind))) - return -EFAULT; - - memory = agp_find_mem_by_key(unbind.key); - - if (memory == NULL) - return -EINVAL; - - return agp_unbind_memory(memory); -} - -long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - struct agp_file_private *curr_priv = file->private_data; - int ret_val = -ENOTTY; - - mutex_lock(&(agp_fe.agp_mutex)); - - if ((agp_fe.current_controller == NULL) && - (cmd != AGPIOC_ACQUIRE32)) { - ret_val = -EINVAL; - goto ioctl_out; - } - if ((agp_fe.backend_acquired != true) && - (cmd != AGPIOC_ACQUIRE32)) { - ret_val = -EBUSY; - goto ioctl_out; - } - if (cmd != AGPIOC_ACQUIRE32) { - if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) { - ret_val = -EPERM; - goto ioctl_out; - } - /* Use the original pid of the controller, - * in case it's threaded */ - - if (agp_fe.current_controller->pid != curr_priv->my_pid) { - ret_val = -EBUSY; - goto ioctl_out; - } - } - - switch (cmd) { - case AGPIOC_INFO32: - ret_val = compat_agpioc_info_wrap(curr_priv, (void __user *) arg); - break; - - case AGPIOC_ACQUIRE32: - ret_val = agpioc_acquire_wrap(curr_priv); - break; - - case AGPIOC_RELEASE32: - ret_val = agpioc_release_wrap(curr_priv); - break; - - case AGPIOC_SETUP32: - ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg); - break; - - case AGPIOC_RESERVE32: - ret_val = compat_agpioc_reserve_wrap(curr_priv, (void __user *) arg); - break; - - case AGPIOC_PROTECT32: - ret_val = agpioc_protect_wrap(curr_priv); - break; - - case AGPIOC_ALLOCATE32: - ret_val = compat_agpioc_allocate_wrap(curr_priv, (void __user *) arg); - break; - - case AGPIOC_DEALLOCATE32: - ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg); - break; - - case AGPIOC_BIND32: - ret_val = compat_agpioc_bind_wrap(curr_priv, (void __user *) arg); - break; - - case AGPIOC_UNBIND32: - ret_val = compat_agpioc_unbind_wrap(curr_priv, (void __user *) arg); - break; - - case AGPIOC_CHIPSET_FLUSH32: - break; - } - -ioctl_out: - DBG("ioctl returns %d\n", ret_val); - mutex_unlock(&(agp_fe.agp_mutex)); - return ret_val; -} - diff --git a/drivers/char/agp/compat_ioctl.h b/drivers/char/agp/compat_ioctl.h deleted file mode 100644 index f30e0fd97963..000000000000 --- a/drivers/char/agp/compat_ioctl.h +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright (C) 1999 Jeff Hartmann - * Copyright (C) 1999 Precision Insight, Inc. - * Copyright (C) 1999 Xi Graphics, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE - * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef _AGP_COMPAT_IOCTL_H -#define _AGP_COMPAT_IOCTL_H - -#include <linux/compat.h> -#include <linux/agpgart.h> - -#define AGPIOC_INFO32 _IOR (AGPIOC_BASE, 0, compat_uptr_t) -#define AGPIOC_ACQUIRE32 _IO (AGPIOC_BASE, 1) -#define AGPIOC_RELEASE32 _IO (AGPIOC_BASE, 2) -#define AGPIOC_SETUP32 _IOW (AGPIOC_BASE, 3, compat_uptr_t) -#define AGPIOC_RESERVE32 _IOW (AGPIOC_BASE, 4, compat_uptr_t) -#define AGPIOC_PROTECT32 _IOW (AGPIOC_BASE, 5, compat_uptr_t) -#define AGPIOC_ALLOCATE32 _IOWR(AGPIOC_BASE, 6, compat_uptr_t) -#define AGPIOC_DEALLOCATE32 _IOW (AGPIOC_BASE, 7, compat_int_t) -#define AGPIOC_BIND32 _IOW (AGPIOC_BASE, 8, compat_uptr_t) -#define AGPIOC_UNBIND32 _IOW (AGPIOC_BASE, 9, compat_uptr_t) -#define AGPIOC_CHIPSET_FLUSH32 _IO (AGPIOC_BASE, 10) - -struct agp_info32 { - struct agp_version version; /* version of the driver */ - u32 bridge_id; /* bridge vendor/device */ - u32 agp_mode; /* mode info of bridge */ - compat_long_t aper_base; /* base of aperture */ - compat_size_t aper_size; /* size of aperture */ - compat_size_t pg_total; /* max pages (swap + system) */ - compat_size_t pg_system; /* max pages (system) */ - compat_size_t pg_used; /* current pages used */ -}; - -/* - * The "prot" down below needs still a "sleep" flag somehow ... - */ -struct agp_segment32 { - compat_off_t pg_start; /* starting page to populate */ - compat_size_t pg_count; /* number of pages */ - compat_int_t prot; /* prot flags for mmap */ -}; - -struct agp_region32 { - compat_pid_t pid; /* pid of process */ - compat_size_t seg_count; /* number of segments */ - struct agp_segment32 *seg_list; -}; - -struct agp_allocate32 { - compat_int_t key; /* tag of allocation */ - compat_size_t pg_count; /* number of pages */ - u32 type; /* 0 == normal, other devspec */ - u32 physical; /* device specific (some devices - * need a phys address of the - * actual page behind the gatt - * table) */ -}; - -struct agp_bind32 { - compat_int_t key; /* tag of allocation */ - compat_off_t pg_start; /* starting page to populate */ -}; - -struct agp_unbind32 { - compat_int_t key; /* tag of allocation */ - u32 priority; /* priority for paging out */ -}; - -extern struct agp_front_data agp_fe; - -int agpioc_acquire_wrap(struct agp_file_private *priv); -int agpioc_release_wrap(struct agp_file_private *priv); -int agpioc_protect_wrap(struct agp_file_private *priv); -int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg); -int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg); -struct agp_file_private *agp_find_private(pid_t pid); -struct agp_client *agp_create_client(pid_t id); -int agp_remove_client(pid_t id); -int agp_create_segment(struct agp_client *client, struct agp_region *region); -void agp_free_memory_wrap(struct agp_memory *memory); -struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type); -struct agp_memory *agp_find_mem_by_key(int key); -struct agp_client *agp_find_client_by_pid(pid_t id); - -#endif /* _AGP_COMPAT_H */ diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c index f28d42319269..0d25bbdc7e6a 100644 --- a/drivers/char/agp/efficeon-agp.c +++ b/drivers/char/agp/efficeon-agp.c @@ -465,4 +465,5 @@ module_init(agp_efficeon_init); module_exit(agp_efficeon_cleanup); MODULE_AUTHOR("Carlos Puchol <cpglinux@puchol.com>"); +MODULE_DESCRIPTION("Transmeta's Efficeon AGPGART driver"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c deleted file mode 100644 index 321118a9cfa5..000000000000 --- a/drivers/char/agp/frontend.c +++ /dev/null @@ -1,1068 +0,0 @@ -/* - * AGPGART driver frontend - * Copyright (C) 2004 Silicon Graphics, Inc. - * Copyright (C) 2002-2003 Dave Jones - * Copyright (C) 1999 Jeff Hartmann - * Copyright (C) 1999 Precision Insight, Inc. - * Copyright (C) 1999 Xi Graphics, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE - * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/mman.h> -#include <linux/pci.h> -#include <linux/miscdevice.h> -#include <linux/agp_backend.h> -#include <linux/agpgart.h> -#include <linux/slab.h> -#include <linux/mm.h> -#include <linux/fs.h> -#include <linux/sched.h> -#include <linux/uaccess.h> - -#include "agp.h" -#include "compat_ioctl.h" - -struct agp_front_data agp_fe; - -struct agp_memory *agp_find_mem_by_key(int key) -{ - struct agp_memory *curr; - - if (agp_fe.current_controller == NULL) - return NULL; - - curr = agp_fe.current_controller->pool; - - while (curr != NULL) { - if (curr->key == key) - break; - curr = curr->next; - } - - DBG("key=%d -> mem=%p", key, curr); - return curr; -} - -static void agp_remove_from_pool(struct agp_memory *temp) -{ - struct agp_memory *prev; - struct agp_memory *next; - - /* Check to see if this is even in the memory pool */ - - DBG("mem=%p", temp); - if (agp_find_mem_by_key(temp->key) != NULL) { - next = temp->next; - prev = temp->prev; - - if (prev != NULL) { - prev->next = next; - if (next != NULL) - next->prev = prev; - - } else { - /* This is the first item on the list */ - if (next != NULL) - next->prev = NULL; - - agp_fe.current_controller->pool = next; - } - } -} - -/* - * Routines for managing each client's segment list - - * These routines handle adding and removing segments - * to each auth'ed client. - */ - -static struct -agp_segment_priv *agp_find_seg_in_client(const struct agp_client *client, - unsigned long offset, - int size, pgprot_t page_prot) -{ - struct agp_segment_priv *seg; - int i; - off_t pg_start; - size_t pg_count; - - pg_start = offset / 4096; - pg_count = size / 4096; - seg = *(client->segments); - - for (i = 0; i < client->num_segments; i++) { - if ((seg[i].pg_start == pg_start) && - (seg[i].pg_count == pg_count) && - (pgprot_val(seg[i].prot) == pgprot_val(page_prot))) { - return seg + i; - } - } - - return NULL; -} - -static void agp_remove_seg_from_client(struct agp_client *client) -{ - DBG("client=%p", client); - - if (client->segments != NULL) { - if (*(client->segments) != NULL) { - DBG("Freeing %p from client %p", *(client->segments), client); - kfree(*(client->segments)); - } - DBG("Freeing %p from client %p", client->segments, client); - kfree(client->segments); - client->segments = NULL; - } -} - -static void agp_add_seg_to_client(struct agp_client *client, - struct agp_segment_priv ** seg, int num_segments) -{ - struct agp_segment_priv **prev_seg; - - prev_seg = client->segments; - - if (prev_seg != NULL) - agp_remove_seg_from_client(client); - - DBG("Adding seg %p (%d segments) to client %p", seg, num_segments, client); - client->num_segments = num_segments; - client->segments = seg; -} - -static pgprot_t agp_convert_mmap_flags(int prot) -{ - unsigned long prot_bits; - - prot_bits = calc_vm_prot_bits(prot, 0) | VM_SHARED; - return vm_get_page_prot(prot_bits); -} - -int agp_create_segment(struct agp_client *client, struct agp_region *region) -{ - struct agp_segment_priv **ret_seg; - struct agp_segment_priv *seg; - struct agp_segment *user_seg; - size_t i; - - seg = kzalloc((sizeof(struct agp_segment_priv) * region->seg_count), GFP_KERNEL); - if (seg == NULL) { - kfree(region->seg_list); - region->seg_list = NULL; - return -ENOMEM; - } - user_seg = region->seg_list; - - for (i = 0; i < region->seg_count; i++) { - seg[i].pg_start = user_seg[i].pg_start; - seg[i].pg_count = user_seg[i].pg_count; - seg[i].prot = agp_convert_mmap_flags(user_seg[i].prot); - } - kfree(region->seg_list); - region->seg_list = NULL; - - ret_seg = kmalloc(sizeof(void *), GFP_KERNEL); - if (ret_seg == NULL) { - kfree(seg); - return -ENOMEM; - } - *ret_seg = seg; - agp_add_seg_to_client(client, ret_seg, region->seg_count); - return 0; -} - -/* End - Routines for managing each client's segment list */ - -/* This function must only be called when current_controller != NULL */ -static void agp_insert_into_pool(struct agp_memory * temp) -{ - struct agp_memory *prev; - - prev = agp_fe.current_controller->pool; - - if (prev != NULL) { - prev->prev = temp; - temp->next = prev; - } - agp_fe.current_controller->pool = temp; -} - - -/* File private list routines */ - -struct agp_file_private *agp_find_private(pid_t pid) -{ - struct agp_file_private *curr; - - curr = agp_fe.file_priv_list; - - while (curr != NULL) { - if (curr->my_pid == pid) - return curr; - curr = curr->next; - } - - return NULL; -} - -static void agp_insert_file_private(struct agp_file_private * priv) -{ - struct agp_file_private *prev; - - prev = agp_fe.file_priv_list; - - if (prev != NULL) - prev->prev = priv; - priv->next = prev; - agp_fe.file_priv_list = priv; -} - -static void agp_remove_file_private(struct agp_file_private * priv) -{ - struct agp_file_private *next; - struct agp_file_private *prev; - - next = priv->next; - prev = priv->prev; - - if (prev != NULL) { - prev->next = next; - - if (next != NULL) - next->prev = prev; - - } else { - if (next != NULL) - next->prev = NULL; - - agp_fe.file_priv_list = next; - } -} - -/* End - File flag list routines */ - -/* - * Wrappers for agp_free_memory & agp_allocate_memory - * These make sure that internal lists are kept updated. - */ -void agp_free_memory_wrap(struct agp_memory *memory) -{ - agp_remove_from_pool(memory); - agp_free_memory(memory); -} - -struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type) -{ - struct agp_memory *memory; - - memory = agp_allocate_memory(agp_bridge, pg_count, type); - if (memory == NULL) - return NULL; - - agp_insert_into_pool(memory); - return memory; -} - -/* Routines for managing the list of controllers - - * These routines manage the current controller, and the list of - * controllers - */ - -static struct agp_controller *agp_find_controller_by_pid(pid_t id) -{ - struct agp_controller *controller; - - controller = agp_fe.controllers; - - while (controller != NULL) { - if (controller->pid == id) - return controller; - controller = controller->next; - } - - return NULL; -} - -static struct agp_controller *agp_create_controller(pid_t id) -{ - struct agp_controller *controller; - - controller = kzalloc(sizeof(struct agp_controller), GFP_KERNEL); - if (controller == NULL) - return NULL; - - controller->pid = id; - return controller; -} - -static int agp_insert_controller(struct agp_controller *controller) -{ - struct agp_controller *prev_controller; - - prev_controller = agp_fe.controllers; - controller->next = prev_controller; - - if (prev_controller != NULL) - prev_controller->prev = controller; - - agp_fe.controllers = controller; - - return 0; -} - -static void agp_remove_all_clients(struct agp_controller *controller) -{ - struct agp_client *client; - struct agp_client *temp; - - client = controller->clients; - - while (client) { - struct agp_file_private *priv; - - temp = client; - agp_remove_seg_from_client(temp); - priv = agp_find_private(temp->pid); - - if (priv != NULL) { - clear_bit(AGP_FF_IS_VALID, &priv->access_flags); - clear_bit(AGP_FF_IS_CLIENT, &priv->access_flags); - } - client = client->next; - kfree(temp); - } -} - -static void agp_remove_all_memory(struct agp_controller *controller) -{ - struct agp_memory *memory; - struct agp_memory *temp; - - memory = controller->pool; - - while (memory) { - temp = memory; - memory = memory->next; - agp_free_memory_wrap(temp); - } -} - -static int agp_remove_controller(struct agp_controller *controller) -{ - struct agp_controller *prev_controller; - struct agp_controller *next_controller; - - prev_controller = controller->prev; - next_controller = controller->next; - - if (prev_controller != NULL) { - prev_controller->next = next_controller; - if (next_controller != NULL) - next_controller->prev = prev_controller; - - } else { - if (next_controller != NULL) - next_controller->prev = NULL; - - agp_fe.controllers = next_controller; - } - - agp_remove_all_memory(controller); - agp_remove_all_clients(controller); - - if (agp_fe.current_controller == controller) { - agp_fe.current_controller = NULL; - agp_fe.backend_acquired = false; - agp_backend_release(agp_bridge); - } - kfree(controller); - return 0; -} - -static void agp_controller_make_current(struct agp_controller *controller) -{ - struct agp_client *clients; - - clients = controller->clients; - - while (clients != NULL) { - struct agp_file_private *priv; - - priv = agp_find_private(clients->pid); - - if (priv != NULL) { - set_bit(AGP_FF_IS_VALID, &priv->access_flags); - set_bit(AGP_FF_IS_CLIENT, &priv->access_flags); - } - clients = clients->next; - } - - agp_fe.current_controller = controller; -} - -static void agp_controller_release_current(struct agp_controller *controller, - struct agp_file_private *controller_priv) -{ - struct agp_client *clients; - - clear_bit(AGP_FF_IS_VALID, &controller_priv->access_flags); - clients = controller->clients; - - while (clients != NULL) { - struct agp_file_private *priv; - - priv = agp_find_private(clients->pid); - - if (priv != NULL) - clear_bit(AGP_FF_IS_VALID, &priv->access_flags); - - clients = clients->next; - } - - agp_fe.current_controller = NULL; - agp_fe.used_by_controller = false; - agp_backend_release(agp_bridge); -} - -/* - * Routines for managing client lists - - * These routines are for managing the list of auth'ed clients. - */ - -static struct agp_client -*agp_find_client_in_controller(struct agp_controller *controller, pid_t id) -{ - struct agp_client *client; - - if (controller == NULL) - return NULL; - - client = controller->clients; - - while (client != NULL) { - if (client->pid == id) - return client; - client = client->next; - } - - return NULL; -} - -static struct agp_controller *agp_find_controller_for_client(pid_t id) -{ - struct agp_controller *controller; - - controller = agp_fe.controllers; - - while (controller != NULL) { - if ((agp_find_client_in_controller(controller, id)) != NULL) - return controller; - controller = controller->next; - } - - return NULL; -} - -struct agp_client *agp_find_client_by_pid(pid_t id) -{ - struct agp_client *temp; - - if (agp_fe.current_controller == NULL) - return NULL; - - temp = agp_find_client_in_controller(agp_fe.current_controller, id); - return temp; -} - -static void agp_insert_client(struct agp_client *client) -{ - struct agp_client *prev_client; - - prev_client = agp_fe.current_controller->clients; - client->next = prev_client; - - if (prev_client != NULL) - prev_client->prev = client; - - agp_fe.current_controller->clients = client; - agp_fe.current_controller->num_clients++; -} - -struct agp_client *agp_create_client(pid_t id) -{ - struct agp_client *new_client; - - new_client = kzalloc(sizeof(struct agp_client), GFP_KERNEL); - if (new_client == NULL) - return NULL; - - new_client->pid = id; - agp_insert_client(new_client); - return new_client; -} - -int agp_remove_client(pid_t id) -{ - struct agp_client *client; - struct agp_client *prev_client; - struct agp_client *next_client; - struct agp_controller *controller; - - controller = agp_find_controller_for_client(id); - if (controller == NULL) - return -EINVAL; - - client = agp_find_client_in_controller(controller, id); - if (client == NULL) - return -EINVAL; - - prev_client = client->prev; - next_client = client->next; - - if (prev_client != NULL) { - prev_client->next = next_client; - if (next_client != NULL) - next_client->prev = prev_client; - - } else { - if (next_client != NULL) - next_client->prev = NULL; - controller->clients = next_client; - } - - controller->num_clients--; - agp_remove_seg_from_client(client); - kfree(client); - return 0; -} - -/* End - Routines for managing client lists */ - -/* File Operations */ - -static int agp_mmap(struct file *file, struct vm_area_struct *vma) -{ - unsigned int size, current_size; - unsigned long offset; - struct agp_client *client; - struct agp_file_private *priv = file->private_data; - struct agp_kern_info kerninfo; - - mutex_lock(&(agp_fe.agp_mutex)); - - if (agp_fe.backend_acquired != true) - goto out_eperm; - - if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags))) - goto out_eperm; - - agp_copy_info(agp_bridge, &kerninfo); - size = vma->vm_end - vma->vm_start; - current_size = kerninfo.aper_size; - current_size = current_size * 0x100000; - offset = vma->vm_pgoff << PAGE_SHIFT; - DBG("%lx:%lx", offset, offset+size); - - if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) { - if ((size + offset) > current_size) - goto out_inval; - - client = agp_find_client_by_pid(current->pid); - - if (client == NULL) - goto out_eperm; - - if (!agp_find_seg_in_client(client, offset, size, vma->vm_page_prot)) - goto out_inval; - - DBG("client vm_ops=%p", kerninfo.vm_ops); - if (kerninfo.vm_ops) { - vma->vm_ops = kerninfo.vm_ops; - } else if (io_remap_pfn_range(vma, vma->vm_start, - (kerninfo.aper_base + offset) >> PAGE_SHIFT, - size, - pgprot_writecombine(vma->vm_page_prot))) { - goto out_again; - } - mutex_unlock(&(agp_fe.agp_mutex)); - return 0; - } - - if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) { - if (size != current_size) - goto out_inval; - - DBG("controller vm_ops=%p", kerninfo.vm_ops); - if (kerninfo.vm_ops) { - vma->vm_ops = kerninfo.vm_ops; - } else if (io_remap_pfn_range(vma, vma->vm_start, - kerninfo.aper_base >> PAGE_SHIFT, - size, - pgprot_writecombine(vma->vm_page_prot))) { - goto out_again; - } - mutex_unlock(&(agp_fe.agp_mutex)); - return 0; - } - -out_eperm: - mutex_unlock(&(agp_fe.agp_mutex)); - return -EPERM; - -out_inval: - mutex_unlock(&(agp_fe.agp_mutex)); - return -EINVAL; - -out_again: - mutex_unlock(&(agp_fe.agp_mutex)); - return -EAGAIN; -} - -static int agp_release(struct inode *inode, struct file *file) -{ - struct agp_file_private *priv = file->private_data; - - mutex_lock(&(agp_fe.agp_mutex)); - - DBG("priv=%p", priv); - - if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) { - struct agp_controller *controller; - - controller = agp_find_controller_by_pid(priv->my_pid); - - if (controller != NULL) { - if (controller == agp_fe.current_controller) - agp_controller_release_current(controller, priv); - agp_remove_controller(controller); - controller = NULL; - } - } - - if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) - agp_remove_client(priv->my_pid); - - agp_remove_file_private(priv); - kfree(priv); - file->private_data = NULL; - mutex_unlock(&(agp_fe.agp_mutex)); - return 0; -} - -static int agp_open(struct inode *inode, struct file *file) -{ - int minor = iminor(inode); - struct agp_file_private *priv; - struct agp_client *client; - - if (minor != AGPGART_MINOR) - return -ENXIO; - - mutex_lock(&(agp_fe.agp_mutex)); - - priv = kzalloc(sizeof(struct agp_file_private), GFP_KERNEL); - if (priv == NULL) { - mutex_unlock(&(agp_fe.agp_mutex)); - return -ENOMEM; - } - - set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags); - priv->my_pid = current->pid; - - if (capable(CAP_SYS_RAWIO)) - /* Root priv, can be controller */ - set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags); - - client = agp_find_client_by_pid(current->pid); - - if (client != NULL) { - set_bit(AGP_FF_IS_CLIENT, &priv->access_flags); - set_bit(AGP_FF_IS_VALID, &priv->access_flags); - } - file->private_data = (void *) priv; - agp_insert_file_private(priv); - DBG("private=%p, client=%p", priv, client); - - mutex_unlock(&(agp_fe.agp_mutex)); - - return 0; -} - -static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg) -{ - struct agp_info userinfo; - struct agp_kern_info kerninfo; - - agp_copy_info(agp_bridge, &kerninfo); - - memset(&userinfo, 0, sizeof(userinfo)); - userinfo.version.major = kerninfo.version.major; - userinfo.version.minor = kerninfo.version.minor; - userinfo.bridge_id = kerninfo.device->vendor | - (kerninfo.device->device << 16); - userinfo.agp_mode = kerninfo.mode; - userinfo.aper_base = kerninfo.aper_base; - userinfo.aper_size = kerninfo.aper_size; - userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory; - userinfo.pg_used = kerninfo.current_memory; - - if (copy_to_user(arg, &userinfo, sizeof(struct agp_info))) - return -EFAULT; - - return 0; -} - -int agpioc_acquire_wrap(struct agp_file_private *priv) -{ - struct agp_controller *controller; - - DBG(""); - - if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags))) - return -EPERM; - - if (agp_fe.current_controller != NULL) - return -EBUSY; - - if (!agp_bridge) - return -ENODEV; - - if (atomic_read(&agp_bridge->agp_in_use)) - return -EBUSY; - - atomic_inc(&agp_bridge->agp_in_use); - - agp_fe.backend_acquired = true; - - controller = agp_find_controller_by_pid(priv->my_pid); - - if (controller != NULL) { - agp_controller_make_current(controller); - } else { - controller = agp_create_controller(priv->my_pid); - - if (controller == NULL) { - agp_fe.backend_acquired = false; - agp_backend_release(agp_bridge); - return -ENOMEM; - } - agp_insert_controller(controller); - agp_controller_make_current(controller); - } - - set_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags); - set_bit(AGP_FF_IS_VALID, &priv->access_flags); - return 0; -} - -int agpioc_release_wrap(struct agp_file_private *priv) -{ - DBG(""); - agp_controller_release_current(agp_fe.current_controller, priv); - return 0; -} - -int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg) -{ - struct agp_setup mode; - - DBG(""); - if (copy_from_user(&mode, arg, sizeof(struct agp_setup))) - return -EFAULT; - - agp_enable(agp_bridge, mode.agp_mode); - return 0; -} - -static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg) -{ - struct agp_region reserve; - struct agp_client *client; - struct agp_file_private *client_priv; - - DBG(""); - if (copy_from_user(&reserve, arg, sizeof(struct agp_region))) - return -EFAULT; - - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment)) - return -EFAULT; - - client = agp_find_client_by_pid(reserve.pid); - - if (reserve.seg_count == 0) { - /* remove a client */ - client_priv = agp_find_private(reserve.pid); - - if (client_priv != NULL) { - set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags); - set_bit(AGP_FF_IS_VALID, &client_priv->access_flags); - } - if (client == NULL) { - /* client is already removed */ - return 0; - } - return agp_remove_client(reserve.pid); - } else { - struct agp_segment *segment; - - if (reserve.seg_count >= 16384) - return -EINVAL; - - segment = kmalloc((sizeof(struct agp_segment) * reserve.seg_count), - GFP_KERNEL); - - if (segment == NULL) - return -ENOMEM; - - if (copy_from_user(segment, (void __user *) reserve.seg_list, - sizeof(struct agp_segment) * reserve.seg_count)) { - kfree(segment); - return -EFAULT; - } - reserve.seg_list = segment; - - if (client == NULL) { - /* Create the client and add the segment */ - client = agp_create_client(reserve.pid); - - if (client == NULL) { - kfree(segment); - return -ENOMEM; - } - client_priv = agp_find_private(reserve.pid); - - if (client_priv != NULL) { - set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags); - set_bit(AGP_FF_IS_VALID, &client_priv->access_flags); - } - } - return agp_create_segment(client, &reserve); - } - /* Will never really happen */ - return -EINVAL; -} - -int agpioc_protect_wrap(struct agp_file_private *priv) -{ - DBG(""); - /* This function is not currently implemented */ - return -EINVAL; -} - -static int agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg) -{ - struct agp_memory *memory; - struct agp_allocate alloc; - - DBG(""); - if (copy_from_user(&alloc, arg, sizeof(struct agp_allocate))) - return -EFAULT; - - if (alloc.type >= AGP_USER_TYPES) - return -EINVAL; - - memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type); - - if (memory == NULL) - return -ENOMEM; - - alloc.key = memory->key; - alloc.physical = memory->physical; - - if (copy_to_user(arg, &alloc, sizeof(struct agp_allocate))) { - agp_free_memory_wrap(memory); - return -EFAULT; - } - return 0; -} - -int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg) -{ - struct agp_memory *memory; - - DBG(""); - memory = agp_find_mem_by_key(arg); - - if (memory == NULL) - return -EINVAL; - - agp_free_memory_wrap(memory); - return 0; -} - -static int agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg) -{ - struct agp_bind bind_info; - struct agp_memory *memory; - - DBG(""); - if (copy_from_user(&bind_info, arg, sizeof(struct agp_bind))) - return -EFAULT; - - memory = agp_find_mem_by_key(bind_info.key); - - if (memory == NULL) - return -EINVAL; - - return agp_bind_memory(memory, bind_info.pg_start); -} - -static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg) -{ - struct agp_memory *memory; - struct agp_unbind unbind; - - DBG(""); - if (copy_from_user(&unbind, arg, sizeof(struct agp_unbind))) - return -EFAULT; - - memory = agp_find_mem_by_key(unbind.key); - - if (memory == NULL) - return -EINVAL; - - return agp_unbind_memory(memory); -} - -static long agp_ioctl(struct file *file, - unsigned int cmd, unsigned long arg) -{ - struct agp_file_private *curr_priv = file->private_data; - int ret_val = -ENOTTY; - - DBG("priv=%p, cmd=%x", curr_priv, cmd); - mutex_lock(&(agp_fe.agp_mutex)); - - if ((agp_fe.current_controller == NULL) && - (cmd != AGPIOC_ACQUIRE)) { - ret_val = -EINVAL; - goto ioctl_out; - } - if ((agp_fe.backend_acquired != true) && - (cmd != AGPIOC_ACQUIRE)) { - ret_val = -EBUSY; - goto ioctl_out; - } - if (cmd != AGPIOC_ACQUIRE) { - if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) { - ret_val = -EPERM; - goto ioctl_out; - } - /* Use the original pid of the controller, - * in case it's threaded */ - - if (agp_fe.current_controller->pid != curr_priv->my_pid) { - ret_val = -EBUSY; - goto ioctl_out; - } - } - - switch (cmd) { - case AGPIOC_INFO: - ret_val = agpioc_info_wrap(curr_priv, (void __user *) arg); - break; - - case AGPIOC_ACQUIRE: - ret_val = agpioc_acquire_wrap(curr_priv); - break; - - case AGPIOC_RELEASE: - ret_val = agpioc_release_wrap(curr_priv); - break; - - case AGPIOC_SETUP: - ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg); - break; - - case AGPIOC_RESERVE: - ret_val = agpioc_reserve_wrap(curr_priv, (void __user *) arg); - break; - - case AGPIOC_PROTECT: - ret_val = agpioc_protect_wrap(curr_priv); - break; - - case AGPIOC_ALLOCATE: - ret_val = agpioc_allocate_wrap(curr_priv, (void __user *) arg); - break; - - case AGPIOC_DEALLOCATE: - ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg); - break; - - case AGPIOC_BIND: - ret_val = agpioc_bind_wrap(curr_priv, (void __user *) arg); - break; - - case AGPIOC_UNBIND: - ret_val = agpioc_unbind_wrap(curr_priv, (void __user *) arg); - break; - - case AGPIOC_CHIPSET_FLUSH: - break; - } - -ioctl_out: - DBG("ioctl returns %d\n", ret_val); - mutex_unlock(&(agp_fe.agp_mutex)); - return ret_val; -} - -static const struct file_operations agp_fops = -{ - .owner = THIS_MODULE, - .llseek = no_llseek, - .unlocked_ioctl = agp_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = compat_agp_ioctl, -#endif - .mmap = agp_mmap, - .open = agp_open, - .release = agp_release, -}; - -static struct miscdevice agp_miscdev = -{ - .minor = AGPGART_MINOR, - .name = "agpgart", - .fops = &agp_fops -}; - -int agp_frontend_initialize(void) -{ - memset(&agp_fe, 0, sizeof(struct agp_front_data)); - mutex_init(&(agp_fe.agp_mutex)); - - if (misc_register(&agp_miscdev)) { - printk(KERN_ERR PFX "unable to get minor: %d\n", AGPGART_MINOR); - return -EIO; - } - return 0; -} - -void agp_frontend_cleanup(void) -{ - misc_deregister(&agp_miscdev); -} diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c deleted file mode 100644 index 84d9adbb62f6..000000000000 --- a/drivers/char/agp/hp-agp.c +++ /dev/null @@ -1,550 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * HP zx1 AGPGART routines. - * - * (c) Copyright 2002, 2003 Hewlett-Packard Development Company, L.P. - * Bjorn Helgaas <bjorn.helgaas@hp.com> - */ - -#include <linux/acpi.h> -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/init.h> -#include <linux/agp_backend.h> -#include <linux/log2.h> -#include <linux/slab.h> - -#include <asm/acpi-ext.h> - -#include "agp.h" - -#define HP_ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */ - -/* HP ZX1 IOC registers */ -#define HP_ZX1_IBASE 0x300 -#define HP_ZX1_IMASK 0x308 -#define HP_ZX1_PCOM 0x310 -#define HP_ZX1_TCNFG 0x318 -#define HP_ZX1_PDIR_BASE 0x320 - -#define HP_ZX1_IOVA_BASE GB(1UL) -#define HP_ZX1_IOVA_SIZE GB(1UL) -#define HP_ZX1_GART_SIZE (HP_ZX1_IOVA_SIZE / 2) -#define HP_ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL - -#define HP_ZX1_PDIR_VALID_BIT 0x8000000000000000UL -#define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> hp_private.io_tlb_shift) - -#define AGP8X_MODE_BIT 3 -#define AGP8X_MODE (1 << AGP8X_MODE_BIT) - -/* AGP bridge need not be PCI device, but DRM thinks it is. */ -static struct pci_dev fake_bridge_dev; - -static int hp_zx1_gart_found; - -static struct aper_size_info_fixed hp_zx1_sizes[] = -{ - {0, 0, 0}, /* filled in by hp_zx1_fetch_size() */ -}; - -static struct gatt_mask hp_zx1_masks[] = -{ - {.mask = HP_ZX1_PDIR_VALID_BIT, .type = 0} -}; - -static struct _hp_private { - volatile u8 __iomem *ioc_regs; - volatile u8 __iomem *lba_regs; - int lba_cap_offset; - u64 *io_pdir; // PDIR for entire IOVA - u64 *gatt; // PDIR just for GART (subset of above) - u64 gatt_entries; - u64 iova_base; - u64 gart_base; - u64 gart_size; - u64 io_pdir_size; - int io_pdir_owner; // do we own it, or share it with sba_iommu? - int io_page_size; - int io_tlb_shift; - int io_tlb_ps; // IOC ps config - int io_pages_per_kpage; -} hp_private; - -static int __init hp_zx1_ioc_shared(void) -{ - struct _hp_private *hp = &hp_private; - - printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR shared with sba_iommu\n"); - - /* - * IOC already configured by sba_iommu module; just use - * its setup. We assume: - * - IOVA space is 1Gb in size - * - first 512Mb is IOMMU, second 512Mb is GART - */ - hp->io_tlb_ps = readq(hp->ioc_regs+HP_ZX1_TCNFG); - switch (hp->io_tlb_ps) { - case 0: hp->io_tlb_shift = 12; break; - case 1: hp->io_tlb_shift = 13; break; - case 2: hp->io_tlb_shift = 14; break; - case 3: hp->io_tlb_shift = 16; break; - default: - printk(KERN_ERR PFX "Invalid IOTLB page size " - "configuration 0x%x\n", hp->io_tlb_ps); - hp->gatt = NULL; - hp->gatt_entries = 0; - return -ENODEV; - } - hp->io_page_size = 1 << hp->io_tlb_shift; - hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size; - - hp->iova_base = readq(hp->ioc_regs+HP_ZX1_IBASE) & ~0x1; - hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE; - - hp->gart_size = HP_ZX1_GART_SIZE; - hp->gatt_entries = hp->gart_size / hp->io_page_size; - - hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE)); - hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)]; - - if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) { - /* Normal case when no AGP device in system */ - hp->gatt = NULL; - hp->gatt_entries = 0; - printk(KERN_ERR PFX "No reserved IO PDIR entry found; " - "GART disabled\n"); - return -ENODEV; - } - - return 0; -} - -static int __init -hp_zx1_ioc_owner (void) -{ - struct _hp_private *hp = &hp_private; - - printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR dedicated to GART\n"); - - /* - * Select an IOV page size no larger than system page size. - */ - if (PAGE_SIZE >= KB(64)) { - hp->io_tlb_shift = 16; - hp->io_tlb_ps = 3; - } else if (PAGE_SIZE >= KB(16)) { - hp->io_tlb_shift = 14; - hp->io_tlb_ps = 2; - } else if (PAGE_SIZE >= KB(8)) { - hp->io_tlb_shift = 13; - hp->io_tlb_ps = 1; - } else { - hp->io_tlb_shift = 12; - hp->io_tlb_ps = 0; - } - hp->io_page_size = 1 << hp->io_tlb_shift; - hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size; - - hp->iova_base = HP_ZX1_IOVA_BASE; - hp->gart_size = HP_ZX1_GART_SIZE; - hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - hp->gart_size; - - hp->gatt_entries = hp->gart_size / hp->io_page_size; - hp->io_pdir_size = (HP_ZX1_IOVA_SIZE / hp->io_page_size) * sizeof(u64); - - return 0; -} - -static int __init -hp_zx1_ioc_init (u64 hpa) -{ - struct _hp_private *hp = &hp_private; - - hp->ioc_regs = ioremap(hpa, 1024); - if (!hp->ioc_regs) - return -ENOMEM; - - /* - * If the IOTLB is currently disabled, we can take it over. - * Otherwise, we have to share with sba_iommu. - */ - hp->io_pdir_owner = (readq(hp->ioc_regs+HP_ZX1_IBASE) & 0x1) == 0; - - if (hp->io_pdir_owner) - return hp_zx1_ioc_owner(); - - return hp_zx1_ioc_shared(); -} - -static int -hp_zx1_lba_find_capability (volatile u8 __iomem *hpa, int cap) -{ - u16 status; - u8 pos, id; - int ttl = 48; - - status = readw(hpa+PCI_STATUS); - if (!(status & PCI_STATUS_CAP_LIST)) - return 0; - pos = readb(hpa+PCI_CAPABILITY_LIST); - while (ttl-- && pos >= 0x40) { - pos &= ~3; - id = readb(hpa+pos+PCI_CAP_LIST_ID); - if (id == 0xff) - break; - if (id == cap) - return pos; - pos = readb(hpa+pos+PCI_CAP_LIST_NEXT); - } - return 0; -} - -static int __init -hp_zx1_lba_init (u64 hpa) -{ - struct _hp_private *hp = &hp_private; - int cap; - - hp->lba_regs = ioremap(hpa, 256); - if (!hp->lba_regs) - return -ENOMEM; - - hp->lba_cap_offset = hp_zx1_lba_find_capability(hp->lba_regs, PCI_CAP_ID_AGP); - - cap = readl(hp->lba_regs+hp->lba_cap_offset) & 0xff; - if (cap != PCI_CAP_ID_AGP) { - printk(KERN_ERR PFX "Invalid capability ID 0x%02x at 0x%x\n", - cap, hp->lba_cap_offset); - iounmap(hp->lba_regs); - return -ENODEV; - } - - return 0; -} - -static int -hp_zx1_fetch_size(void) -{ - int size; - - size = hp_private.gart_size / MB(1); - hp_zx1_sizes[0].size = size; - agp_bridge->current_size = (void *) &hp_zx1_sizes[0]; - return size; -} - -static int -hp_zx1_configure (void) -{ - struct _hp_private *hp = &hp_private; - - agp_bridge->gart_bus_addr = hp->gart_base; - agp_bridge->capndx = hp->lba_cap_offset; - agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS); - - if (hp->io_pdir_owner) { - writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE); - readl(hp->ioc_regs+HP_ZX1_PDIR_BASE); - writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG); - readl(hp->ioc_regs+HP_ZX1_TCNFG); - writel((unsigned int)(~(HP_ZX1_IOVA_SIZE-1)), hp->ioc_regs+HP_ZX1_IMASK); - readl(hp->ioc_regs+HP_ZX1_IMASK); - writel(hp->iova_base|1, hp->ioc_regs+HP_ZX1_IBASE); - readl(hp->ioc_regs+HP_ZX1_IBASE); - writel(hp->iova_base|ilog2(HP_ZX1_IOVA_SIZE), hp->ioc_regs+HP_ZX1_PCOM); - readl(hp->ioc_regs+HP_ZX1_PCOM); - } - - return 0; -} - -static void -hp_zx1_cleanup (void) -{ - struct _hp_private *hp = &hp_private; - - if (hp->ioc_regs) { - if (hp->io_pdir_owner) { - writeq(0, hp->ioc_regs+HP_ZX1_IBASE); - readq(hp->ioc_regs+HP_ZX1_IBASE); - } - iounmap(hp->ioc_regs); - } - if (hp->lba_regs) - iounmap(hp->lba_regs); -} - -static void -hp_zx1_tlbflush (struct agp_memory *mem) -{ - struct _hp_private *hp = &hp_private; - - writeq(hp->gart_base | ilog2(hp->gart_size), hp->ioc_regs+HP_ZX1_PCOM); - readq(hp->ioc_regs+HP_ZX1_PCOM); -} - -static int -hp_zx1_create_gatt_table (struct agp_bridge_data *bridge) -{ - struct _hp_private *hp = &hp_private; - int i; - - if (hp->io_pdir_owner) { - hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL, - get_order(hp->io_pdir_size)); - if (!hp->io_pdir) { - printk(KERN_ERR PFX "Couldn't allocate contiguous " - "memory for I/O PDIR\n"); - hp->gatt = NULL; - hp->gatt_entries = 0; - return -ENOMEM; - } - memset(hp->io_pdir, 0, hp->io_pdir_size); - - hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)]; - } - - for (i = 0; i < hp->gatt_entries; i++) { - hp->gatt[i] = (unsigned long) agp_bridge->scratch_page; - } - - return 0; -} - -static int -hp_zx1_free_gatt_table (struct agp_bridge_data *bridge) -{ - struct _hp_private *hp = &hp_private; - - if (hp->io_pdir_owner) - free_pages((unsigned long) hp->io_pdir, - get_order(hp->io_pdir_size)); - else - hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE; - return 0; -} - -static int -hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type) -{ - struct _hp_private *hp = &hp_private; - int i, k; - off_t j, io_pg_start; - int io_pg_count; - - if (type != mem->type || - agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) { - return -EINVAL; - } - - io_pg_start = hp->io_pages_per_kpage * pg_start; - io_pg_count = hp->io_pages_per_kpage * mem->page_count; - if ((io_pg_start + io_pg_count) > hp->gatt_entries) { - return -EINVAL; - } - - j = io_pg_start; - while (j < (io_pg_start + io_pg_count)) { - if (hp->gatt[j]) { - return -EBUSY; - } - j++; - } - - if (!mem->is_flushed) { - global_cache_flush(); - mem->is_flushed = true; - } - - for (i = 0, j = io_pg_start; i < mem->page_count; i++) { - unsigned long paddr; - - paddr = page_to_phys(mem->pages[i]); - for (k = 0; - k < hp->io_pages_per_kpage; - k++, j++, paddr += hp->io_page_size) { - hp->gatt[j] = HP_ZX1_PDIR_VALID_BIT | paddr; - } - } - - agp_bridge->driver->tlb_flush(mem); - return 0; -} - -static int -hp_zx1_remove_memory (struct agp_memory *mem, off_t pg_start, int type) -{ - struct _hp_private *hp = &hp_private; - int i, io_pg_start, io_pg_count; - - if (type != mem->type || - agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) { - return -EINVAL; - } - - io_pg_start = hp->io_pages_per_kpage * pg_start; - io_pg_count = hp->io_pages_per_kpage * mem->page_count; - for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) { - hp->gatt[i] = agp_bridge->scratch_page; - } - - agp_bridge->driver->tlb_flush(mem); - return 0; -} - -static unsigned long -hp_zx1_mask_memory (struct agp_bridge_data *bridge, dma_addr_t addr, int type) -{ - return HP_ZX1_PDIR_VALID_BIT | addr; -} - -static void -hp_zx1_enable (struct agp_bridge_data *bridge, u32 mode) -{ - struct _hp_private *hp = &hp_private; - u32 command; - - command = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS); - command = agp_collect_device_status(bridge, mode, command); - command |= 0x00000100; - - writel(command, hp->lba_regs+hp->lba_cap_offset+PCI_AGP_COMMAND); - - agp_device_command(command, (mode & AGP8X_MODE) != 0); -} - -const struct agp_bridge_driver hp_zx1_driver = { - .owner = THIS_MODULE, - .size_type = FIXED_APER_SIZE, - .configure = hp_zx1_configure, - .fetch_size = hp_zx1_fetch_size, - .cleanup = hp_zx1_cleanup, - .tlb_flush = hp_zx1_tlbflush, - .mask_memory = hp_zx1_mask_memory, - .masks = hp_zx1_masks, - .agp_enable = hp_zx1_enable, - .cache_flush = global_cache_flush, - .create_gatt_table = hp_zx1_create_gatt_table, - .free_gatt_table = hp_zx1_free_gatt_table, - .insert_memory = hp_zx1_insert_memory, - .remove_memory = hp_zx1_remove_memory, - .alloc_by_type = agp_generic_alloc_by_type, - .free_by_type = agp_generic_free_by_type, - .agp_alloc_page = agp_generic_alloc_page, - .agp_alloc_pages = agp_generic_alloc_pages, - .agp_destroy_page = agp_generic_destroy_page, - .agp_destroy_pages = agp_generic_destroy_pages, - .agp_type_to_mask_type = agp_generic_type_to_mask_type, - .cant_use_aperture = true, -}; - -static int __init -hp_zx1_setup (u64 ioc_hpa, u64 lba_hpa) -{ - struct agp_bridge_data *bridge; - int error = 0; - - error = hp_zx1_ioc_init(ioc_hpa); - if (error) - goto fail; - - error = hp_zx1_lba_init(lba_hpa); - if (error) - goto fail; - - bridge = agp_alloc_bridge(); - if (!bridge) { - error = -ENOMEM; - goto fail; - } - bridge->driver = &hp_zx1_driver; - - fake_bridge_dev.vendor = PCI_VENDOR_ID_HP; - fake_bridge_dev.device = PCI_DEVICE_ID_HP_PCIX_LBA; - bridge->dev = &fake_bridge_dev; - - error = agp_add_bridge(bridge); - fail: - if (error) - hp_zx1_cleanup(); - return error; -} - -static acpi_status __init -zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret) -{ - acpi_handle handle, parent; - acpi_status status; - struct acpi_device_info *info; - u64 lba_hpa, sba_hpa, length; - int match; - - status = hp_acpi_csr_space(obj, &lba_hpa, &length); - if (ACPI_FAILURE(status)) - return AE_OK; /* keep looking for another bridge */ - - /* Look for an enclosing IOC scope and find its CSR space */ - handle = obj; - do { - status = acpi_get_object_info(handle, &info); - if (ACPI_SUCCESS(status) && (info->valid & ACPI_VALID_HID)) { - /* TBD check _CID also */ - match = (strcmp(info->hardware_id.string, "HWP0001") == 0); - kfree(info); - if (match) { - status = hp_acpi_csr_space(handle, &sba_hpa, &length); - if (ACPI_SUCCESS(status)) - break; - else { - printk(KERN_ERR PFX "Detected HP ZX1 " - "AGP LBA but no IOC.\n"); - return AE_OK; - } - } - } - - status = acpi_get_parent(handle, &parent); - handle = parent; - } while (ACPI_SUCCESS(status)); - - if (ACPI_FAILURE(status)) - return AE_OK; /* found no enclosing IOC */ - - if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa)) - return AE_OK; - - printk(KERN_INFO PFX "Detected HP ZX1 %s AGP chipset " - "(ioc=%llx, lba=%llx)\n", (char *)context, - sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa); - - hp_zx1_gart_found = 1; - return AE_CTRL_TERMINATE; /* we only support one bridge; quit looking */ -} - -static int __init -agp_hp_init (void) -{ - if (agp_off) - return -EINVAL; - - acpi_get_devices("HWP0003", zx1_gart_probe, "HWP0003", NULL); - if (hp_zx1_gart_found) - return 0; - - acpi_get_devices("HWP0007", zx1_gart_probe, "HWP0007", NULL); - if (hp_zx1_gart_found) - return 0; - - return -ENODEV; -} - -static void __exit -agp_hp_cleanup (void) -{ -} - -module_init(agp_hp_init); -module_exit(agp_hp_cleanup); - -MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c deleted file mode 100644 index 15b240ea4848..000000000000 --- a/drivers/char/agp/i460-agp.c +++ /dev/null @@ -1,659 +0,0 @@ -/* - * For documentation on the i460 AGP interface, see Chapter 7 (AGP Subsystem) of - * the "Intel 460GTX Chipset Software Developer's Manual": - * http://www.intel.com/design/archives/itanium/downloads/248704.htm - */ -/* - * 460GX support by Chris Ahna <christopher.j.ahna@intel.com> - * Clean up & simplification by David Mosberger-Tang <davidm@hpl.hp.com> - */ -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/init.h> -#include <linux/string.h> -#include <linux/slab.h> -#include <linux/agp_backend.h> -#include <linux/log2.h> - -#include "agp.h" - -#define INTEL_I460_BAPBASE 0x98 -#define INTEL_I460_GXBCTL 0xa0 -#define INTEL_I460_AGPSIZ 0xa2 -#define INTEL_I460_ATTBASE 0xfe200000 -#define INTEL_I460_GATT_VALID (1UL << 24) -#define INTEL_I460_GATT_COHERENT (1UL << 25) - -/* - * The i460 can operate with large (4MB) pages, but there is no sane way to support this - * within the current kernel/DRM environment, so we disable the relevant code for now. - * See also comments in ia64_alloc_page()... - */ -#define I460_LARGE_IO_PAGES 0 - -#if I460_LARGE_IO_PAGES -# define I460_IO_PAGE_SHIFT i460.io_page_shift -#else -# define I460_IO_PAGE_SHIFT 12 -#endif - -#define I460_IOPAGES_PER_KPAGE (PAGE_SIZE >> I460_IO_PAGE_SHIFT) -#define I460_KPAGES_PER_IOPAGE (1 << (I460_IO_PAGE_SHIFT - PAGE_SHIFT)) -#define I460_SRAM_IO_DISABLE (1 << 4) -#define I460_BAPBASE_ENABLE (1 << 3) -#define I460_AGPSIZ_MASK 0x7 -#define I460_4M_PS (1 << 1) - -/* Control bits for Out-Of-GART coherency and Burst Write Combining */ -#define I460_GXBCTL_OOG (1UL << 0) -#define I460_GXBCTL_BWC (1UL << 2) - -/* - * gatt_table entries are 32-bits wide on the i460; the generic code ought to declare the - * gatt_table and gatt_table_real pointers a "void *"... - */ -#define RD_GATT(index) readl((u32 *) i460.gatt + (index)) -#define WR_GATT(index, val) writel((val), (u32 *) i460.gatt + (index)) -/* - * The 460 spec says we have to read the last location written to make sure that all - * writes have taken effect - */ -#define WR_FLUSH_GATT(index) RD_GATT(index) - -static unsigned long i460_mask_memory (struct agp_bridge_data *bridge, - dma_addr_t addr, int type); - -static struct { - void *gatt; /* ioremap'd GATT area */ - - /* i460 supports multiple GART page sizes, so GART pageshift is dynamic: */ - u8 io_page_shift; - - /* BIOS configures chipset to one of 2 possible apbase values: */ - u8 dynamic_apbase; - - /* structure for tracking partial use of 4MB GART pages: */ - struct lp_desc { - unsigned long *alloced_map; /* bitmap of kernel-pages in use */ - int refcount; /* number of kernel pages using the large page */ - u64 paddr; /* physical address of large page */ - struct page *page; /* page pointer */ - } *lp_desc; -} i460; - -static const struct aper_size_info_8 i460_sizes[3] = -{ - /* - * The 32GB aperture is only available with a 4M GART page size. Due to the - * dynamic GART page size, we can't figure out page_order or num_entries until - * runtime. - */ - {32768, 0, 0, 4}, - {1024, 0, 0, 2}, - {256, 0, 0, 1} -}; - -static struct gatt_mask i460_masks[] = -{ - { - .mask = INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT, - .type = 0 - } -}; - -static int i460_fetch_size (void) -{ - int i; - u8 temp; - struct aper_size_info_8 *values; - - /* Determine the GART page size */ - pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &temp); - i460.io_page_shift = (temp & I460_4M_PS) ? 22 : 12; - pr_debug("i460_fetch_size: io_page_shift=%d\n", i460.io_page_shift); - - if (i460.io_page_shift != I460_IO_PAGE_SHIFT) { - printk(KERN_ERR PFX - "I/O (GART) page-size %luKB doesn't match expected " - "size %luKB\n", - 1UL << (i460.io_page_shift - 10), - 1UL << (I460_IO_PAGE_SHIFT)); - return 0; - } - - values = A_SIZE_8(agp_bridge->driver->aperture_sizes); - - pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp); - - /* Exit now if the IO drivers for the GART SRAMS are turned off */ - if (temp & I460_SRAM_IO_DISABLE) { - printk(KERN_ERR PFX "GART SRAMS disabled on 460GX chipset\n"); - printk(KERN_ERR PFX "AGPGART operation not possible\n"); - return 0; - } - - /* Make sure we don't try to create an 2 ^ 23 entry GATT */ - if ((i460.io_page_shift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) { - printk(KERN_ERR PFX "We can't have a 32GB aperture with 4KB GART pages\n"); - return 0; - } - - /* Determine the proper APBASE register */ - if (temp & I460_BAPBASE_ENABLE) - i460.dynamic_apbase = INTEL_I460_BAPBASE; - else - i460.dynamic_apbase = AGP_APBASE; - - for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { - /* - * Dynamically calculate the proper num_entries and page_order values for - * the define aperture sizes. Take care not to shift off the end of - * values[i].size. - */ - values[i].num_entries = (values[i].size << 8) >> (I460_IO_PAGE_SHIFT - 12); - values[i].page_order = ilog2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT); - } - - for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { - /* Neglect control bits when matching up size_value */ - if ((temp & I460_AGPSIZ_MASK) == values[i].size_value) { - agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); - agp_bridge->aperture_size_idx = i; - return values[i].size; - } - } - - return 0; -} - -/* There isn't anything to do here since 460 has no GART TLB. */ -static void i460_tlb_flush (struct agp_memory *mem) -{ - return; -} - -/* - * This utility function is needed to prevent corruption of the control bits - * which are stored along with the aperture size in 460's AGPSIZ register - */ -static void i460_write_agpsiz (u8 size_value) -{ - u8 temp; - - pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp); - pci_write_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, - ((temp & ~I460_AGPSIZ_MASK) | size_value)); -} - -static void i460_cleanup (void) -{ - struct aper_size_info_8 *previous_size; - - previous_size = A_SIZE_8(agp_bridge->previous_size); - i460_write_agpsiz(previous_size->size_value); - - if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) - kfree(i460.lp_desc); -} - -static int i460_configure (void) -{ - union { - u32 small[2]; - u64 large; - } temp; - size_t size; - u8 scratch; - struct aper_size_info_8 *current_size; - - temp.large = 0; - - current_size = A_SIZE_8(agp_bridge->current_size); - i460_write_agpsiz(current_size->size_value); - - /* - * Do the necessary rigmarole to read all eight bytes of APBASE. - * This has to be done since the AGP aperture can be above 4GB on - * 460 based systems. - */ - pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase, &(temp.small[0])); - pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase + 4, &(temp.small[1])); - - /* Clear BAR control bits */ - agp_bridge->gart_bus_addr = temp.large & ~((1UL << 3) - 1); - - pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &scratch); - pci_write_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, - (scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC); - - /* - * Initialize partial allocation trackers if a GART page is bigger than a kernel - * page. - */ - if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) { - size = current_size->num_entries * sizeof(i460.lp_desc[0]); - i460.lp_desc = kzalloc(size, GFP_KERNEL); - if (!i460.lp_desc) - return -ENOMEM; - } - return 0; -} - -static int i460_create_gatt_table (struct agp_bridge_data *bridge) -{ - int page_order, num_entries, i; - void *temp; - - /* - * Load up the fixed address of the GART SRAMS which hold our GATT table. - */ - temp = agp_bridge->current_size; - page_order = A_SIZE_8(temp)->page_order; - num_entries = A_SIZE_8(temp)->num_entries; - - i460.gatt = ioremap(INTEL_I460_ATTBASE, PAGE_SIZE << page_order); - if (!i460.gatt) { - printk(KERN_ERR PFX "ioremap failed\n"); - return -ENOMEM; - } - - /* These are no good, the should be removed from the agp_bridge strucure... */ - agp_bridge->gatt_table_real = NULL; - agp_bridge->gatt_table = NULL; - agp_bridge->gatt_bus_addr = 0; - - for (i = 0; i < num_entries; ++i) - WR_GATT(i, 0); - WR_FLUSH_GATT(i - 1); - return 0; -} - -static int i460_free_gatt_table (struct agp_bridge_data *bridge) -{ - int num_entries, i; - void *temp; - - temp = agp_bridge->current_size; - - num_entries = A_SIZE_8(temp)->num_entries; - - for (i = 0; i < num_entries; ++i) - WR_GATT(i, 0); - WR_FLUSH_GATT(num_entries - 1); - - iounmap(i460.gatt); - return 0; -} - -/* - * The following functions are called when the I/O (GART) page size is smaller than - * PAGE_SIZE. - */ - -static int i460_insert_memory_small_io_page (struct agp_memory *mem, - off_t pg_start, int type) -{ - unsigned long paddr, io_pg_start, io_page_size; - int i, j, k, num_entries; - void *temp; - - pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n", - mem, pg_start, type, page_to_phys(mem->pages[0])); - - if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES) - return -EINVAL; - - io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start; - - temp = agp_bridge->current_size; - num_entries = A_SIZE_8(temp)->num_entries; - - if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) { - printk(KERN_ERR PFX "Looks like we're out of AGP memory\n"); - return -EINVAL; - } - - j = io_pg_start; - while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) { - if (!PGE_EMPTY(agp_bridge, RD_GATT(j))) { - pr_debug("i460_insert_memory_small_io_page: GATT[%d]=0x%x is busy\n", - j, RD_GATT(j)); - return -EBUSY; - } - j++; - } - - io_page_size = 1UL << I460_IO_PAGE_SHIFT; - for (i = 0, j = io_pg_start; i < mem->page_count; i++) { - paddr = page_to_phys(mem->pages[i]); - for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size) - WR_GATT(j, i460_mask_memory(agp_bridge, paddr, mem->type)); - } - WR_FLUSH_GATT(j - 1); - return 0; -} - -static int i460_remove_memory_small_io_page(struct agp_memory *mem, - off_t pg_start, int type) -{ - int i; - - pr_debug("i460_remove_memory_small_io_page(mem=%p, pg_start=%ld, type=%d)\n", - mem, pg_start, type); - - pg_start = I460_IOPAGES_PER_KPAGE * pg_start; - - for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++) - WR_GATT(i, 0); - WR_FLUSH_GATT(i - 1); - return 0; -} - -#if I460_LARGE_IO_PAGES - -/* - * These functions are called when the I/O (GART) page size exceeds PAGE_SIZE. - * - * This situation is interesting since AGP memory allocations that are smaller than a - * single GART page are possible. The i460.lp_desc array tracks partial allocation of the - * large GART pages to work around this issue. - * - * i460.lp_desc[pg_num].refcount tracks the number of kernel pages in use within GART page - * pg_num. i460.lp_desc[pg_num].paddr is the physical address of the large page and - * i460.lp_desc[pg_num].alloced_map is a bitmap of kernel pages that are in use (allocated). - */ - -static int i460_alloc_large_page (struct lp_desc *lp) -{ - unsigned long order = I460_IO_PAGE_SHIFT - PAGE_SHIFT; - size_t map_size; - - lp->page = alloc_pages(GFP_KERNEL, order); - if (!lp->page) { - printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n"); - return -ENOMEM; - } - - map_size = ((I460_KPAGES_PER_IOPAGE + BITS_PER_LONG - 1) & -BITS_PER_LONG)/8; - lp->alloced_map = kzalloc(map_size, GFP_KERNEL); - if (!lp->alloced_map) { - __free_pages(lp->page, order); - printk(KERN_ERR PFX "Out of memory, we're in trouble...\n"); - return -ENOMEM; - } - - lp->paddr = page_to_phys(lp->page); - lp->refcount = 0; - atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); - return 0; -} - -static void i460_free_large_page (struct lp_desc *lp) -{ - kfree(lp->alloced_map); - lp->alloced_map = NULL; - - __free_pages(lp->page, I460_IO_PAGE_SHIFT - PAGE_SHIFT); - atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); -} - -static int i460_insert_memory_large_io_page (struct agp_memory *mem, - off_t pg_start, int type) -{ - int i, start_offset, end_offset, idx, pg, num_entries; - struct lp_desc *start, *end, *lp; - void *temp; - - if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES) - return -EINVAL; - - temp = agp_bridge->current_size; - num_entries = A_SIZE_8(temp)->num_entries; - - /* Figure out what pg_start means in terms of our large GART pages */ - start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE]; - end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; - start_offset = pg_start % I460_KPAGES_PER_IOPAGE; - end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; - - if (end > i460.lp_desc + num_entries) { - printk(KERN_ERR PFX "Looks like we're out of AGP memory\n"); - return -EINVAL; - } - - /* Check if the requested region of the aperture is free */ - for (lp = start; lp <= end; ++lp) { - if (!lp->alloced_map) - continue; /* OK, the entire large page is available... */ - - for (idx = ((lp == start) ? start_offset : 0); - idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE); - idx++) - { - if (test_bit(idx, lp->alloced_map)) - return -EBUSY; - } - } - - for (lp = start, i = 0; lp <= end; ++lp) { - if (!lp->alloced_map) { - /* Allocate new GART pages... */ - if (i460_alloc_large_page(lp) < 0) - return -ENOMEM; - pg = lp - i460.lp_desc; - WR_GATT(pg, i460_mask_memory(agp_bridge, - lp->paddr, 0)); - WR_FLUSH_GATT(pg); - } - - for (idx = ((lp == start) ? start_offset : 0); - idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE); - idx++, i++) - { - mem->pages[i] = lp->page; - __set_bit(idx, lp->alloced_map); - ++lp->refcount; - } - } - return 0; -} - -static int i460_remove_memory_large_io_page (struct agp_memory *mem, - off_t pg_start, int type) -{ - int i, pg, start_offset, end_offset, idx, num_entries; - struct lp_desc *start, *end, *lp; - void *temp; - - temp = agp_bridge->current_size; - num_entries = A_SIZE_8(temp)->num_entries; - - /* Figure out what pg_start means in terms of our large GART pages */ - start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE]; - end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; - start_offset = pg_start % I460_KPAGES_PER_IOPAGE; - end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; - - for (i = 0, lp = start; lp <= end; ++lp) { - for (idx = ((lp == start) ? start_offset : 0); - idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE); - idx++, i++) - { - mem->pages[i] = NULL; - __clear_bit(idx, lp->alloced_map); - --lp->refcount; - } - - /* Free GART pages if they are unused */ - if (lp->refcount == 0) { - pg = lp - i460.lp_desc; - WR_GATT(pg, 0); - WR_FLUSH_GATT(pg); - i460_free_large_page(lp); - } - } - return 0; -} - -/* Wrapper routines to call the approriate {small_io_page,large_io_page} function */ - -static int i460_insert_memory (struct agp_memory *mem, - off_t pg_start, int type) -{ - if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) - return i460_insert_memory_small_io_page(mem, pg_start, type); - else - return i460_insert_memory_large_io_page(mem, pg_start, type); -} - -static int i460_remove_memory (struct agp_memory *mem, - off_t pg_start, int type) -{ - if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) - return i460_remove_memory_small_io_page(mem, pg_start, type); - else - return i460_remove_memory_large_io_page(mem, pg_start, type); -} - -/* - * If the I/O (GART) page size is bigger than the kernel page size, we don't want to - * allocate memory until we know where it is to be bound in the aperture (a - * multi-kernel-page alloc might fit inside of an already allocated GART page). - * - * Let's just hope nobody counts on the allocated AGP memory being there before bind time - * (I don't think current drivers do)... - */ -static struct page *i460_alloc_page (struct agp_bridge_data *bridge) -{ - void *page; - - if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { - page = agp_generic_alloc_page(agp_bridge); - } else - /* Returning NULL would cause problems */ - /* AK: really dubious code. */ - page = (void *)~0UL; - return page; -} - -static void i460_destroy_page (struct page *page, int flags) -{ - if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { - agp_generic_destroy_page(page, flags); - } -} - -#endif /* I460_LARGE_IO_PAGES */ - -static unsigned long i460_mask_memory (struct agp_bridge_data *bridge, - dma_addr_t addr, int type) -{ - /* Make sure the returned address is a valid GATT entry */ - return bridge->driver->masks[0].mask - | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xfffff000) >> 12); -} - -const struct agp_bridge_driver intel_i460_driver = { - .owner = THIS_MODULE, - .aperture_sizes = i460_sizes, - .size_type = U8_APER_SIZE, - .num_aperture_sizes = 3, - .configure = i460_configure, - .fetch_size = i460_fetch_size, - .cleanup = i460_cleanup, - .tlb_flush = i460_tlb_flush, - .mask_memory = i460_mask_memory, - .masks = i460_masks, - .agp_enable = agp_generic_enable, - .cache_flush = global_cache_flush, - .create_gatt_table = i460_create_gatt_table, - .free_gatt_table = i460_free_gatt_table, -#if I460_LARGE_IO_PAGES - .insert_memory = i460_insert_memory, - .remove_memory = i460_remove_memory, - .agp_alloc_page = i460_alloc_page, - .agp_destroy_page = i460_destroy_page, -#else - .insert_memory = i460_insert_memory_small_io_page, - .remove_memory = i460_remove_memory_small_io_page, - .agp_alloc_page = agp_generic_alloc_page, - .agp_alloc_pages = agp_generic_alloc_pages, - .agp_destroy_page = agp_generic_destroy_page, - .agp_destroy_pages = agp_generic_destroy_pages, -#endif - .alloc_by_type = agp_generic_alloc_by_type, - .free_by_type = agp_generic_free_by_type, - .agp_type_to_mask_type = agp_generic_type_to_mask_type, - .cant_use_aperture = true, -}; - -static int agp_intel_i460_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct agp_bridge_data *bridge; - u8 cap_ptr; - - cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); - if (!cap_ptr) - return -ENODEV; - - bridge = agp_alloc_bridge(); - if (!bridge) - return -ENOMEM; - - bridge->driver = &intel_i460_driver; - bridge->dev = pdev; - bridge->capndx = cap_ptr; - - printk(KERN_INFO PFX "Detected Intel 460GX chipset\n"); - - pci_set_drvdata(pdev, bridge); - return agp_add_bridge(bridge); -} - -static void agp_intel_i460_remove(struct pci_dev *pdev) -{ - struct agp_bridge_data *bridge = pci_get_drvdata(pdev); - - agp_remove_bridge(bridge); - agp_put_bridge(bridge); -} - -static struct pci_device_id agp_intel_i460_pci_table[] = { - { - .class = (PCI_CLASS_BRIDGE_HOST << 8), - .class_mask = ~0, - .vendor = PCI_VENDOR_ID_INTEL, - .device = PCI_DEVICE_ID_INTEL_84460GX, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - }, - { } -}; - -MODULE_DEVICE_TABLE(pci, agp_intel_i460_pci_table); - -static struct pci_driver agp_intel_i460_pci_driver = { - .name = "agpgart-intel-i460", - .id_table = agp_intel_i460_pci_table, - .probe = agp_intel_i460_probe, - .remove = agp_intel_i460_remove, -}; - -static int __init agp_intel_i460_init(void) -{ - if (agp_off) - return -EINVAL; - return pci_register_driver(&agp_intel_i460_pci_driver); -} - -static void __exit agp_intel_i460_cleanup(void) -{ - pci_unregister_driver(&agp_intel_i460_pci_driver); -} - -module_init(agp_intel_i460_init); -module_exit(agp_intel_i460_cleanup); - -MODULE_AUTHOR("Chris Ahna <Christopher.J.Ahna@intel.com>"); -MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index c518b3a9db04..3111e320b2c5 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -12,7 +12,7 @@ #include <asm/smp.h> #include "agp.h" #include "intel-agp.h" -#include <drm/intel-gtt.h> +#include <drm/intel/intel-gtt.h> static int intel_fetch_size(void) { @@ -920,4 +920,5 @@ module_init(agp_intel_init); module_exit(agp_intel_cleanup); MODULE_AUTHOR("Dave Jones, Various @Intel"); +MODULE_DESCRIPTION("Intel AGPGART routines"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index bf6716ff863b..bcc26785175d 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -25,7 +25,7 @@ #include <asm/smp.h> #include "agp.h" #include "intel-agp.h" -#include <drm/intel-gtt.h> +#include <drm/intel/intel-gtt.h> #include <asm/set_memory.h> /* @@ -53,6 +53,7 @@ struct intel_gtt_driver { * of the mmio register file, that's done in the generic code. */ void (*cleanup)(void); void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags); + dma_addr_t (*read_entry)(unsigned int entry, bool *is_present, bool *is_local); /* Flags is a more or less chipset specific opaque value. * For chipsets that need to support old ums (non-gem) code, this * needs to be identical to the various supported agp memory types! */ @@ -336,6 +337,19 @@ static void i810_write_entry(dma_addr_t addr, unsigned int entry, writel_relaxed(addr | pte_flags, intel_private.gtt + entry); } +static dma_addr_t i810_read_entry(unsigned int entry, + bool *is_present, bool *is_local) +{ + u32 val; + + val = readl(intel_private.gtt + entry); + + *is_present = val & I810_PTE_VALID; + *is_local = val & I810_PTE_LOCAL; + + return val & ~0xfff; +} + static resource_size_t intel_gtt_stolen_size(void) { u16 gmch_ctrl; @@ -741,6 +755,19 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry, writel_relaxed(addr | pte_flags, intel_private.gtt + entry); } +static dma_addr_t i830_read_entry(unsigned int entry, + bool *is_present, bool *is_local) +{ + u32 val; + + val = readl(intel_private.gtt + entry); + + *is_present = val & I810_PTE_VALID; + *is_local = false; + + return val & ~0xfff; +} + bool intel_gmch_enable_gtt(void) { u8 __iomem *reg; @@ -878,6 +905,13 @@ void intel_gmch_gtt_insert_sg_entries(struct sg_table *st, } EXPORT_SYMBOL(intel_gmch_gtt_insert_sg_entries); +dma_addr_t intel_gmch_gtt_read_entry(unsigned int pg, + bool *is_present, bool *is_local) +{ + return intel_private.driver->read_entry(pg, is_present, is_local); +} +EXPORT_SYMBOL(intel_gmch_gtt_read_entry); + #if IS_ENABLED(CONFIG_AGP_INTEL) static void intel_gmch_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries, @@ -1126,6 +1160,19 @@ static void i965_write_entry(dma_addr_t addr, writel_relaxed(addr | pte_flags, intel_private.gtt + entry); } +static dma_addr_t i965_read_entry(unsigned int entry, + bool *is_present, bool *is_local) +{ + u64 val; + + val = readl(intel_private.gtt + entry); + + *is_present = val & I810_PTE_VALID; + *is_local = false; + + return ((val & 0xf0) << 28) | (val & ~0xfff); +} + static int i9xx_setup(void) { phys_addr_t reg_addr; @@ -1187,6 +1234,7 @@ static const struct intel_gtt_driver i81x_gtt_driver = { .cleanup = i810_cleanup, .check_flags = i830_check_flags, .write_entry = i810_write_entry, + .read_entry = i810_read_entry, }; static const struct intel_gtt_driver i8xx_gtt_driver = { .gen = 2, @@ -1194,6 +1242,7 @@ static const struct intel_gtt_driver i8xx_gtt_driver = { .setup = i830_setup, .cleanup = i830_cleanup, .write_entry = i830_write_entry, + .read_entry = i830_read_entry, .dma_mask_size = 32, .check_flags = i830_check_flags, .chipset_flush = i830_chipset_flush, @@ -1205,6 +1254,7 @@ static const struct intel_gtt_driver i915_gtt_driver = { .cleanup = i9xx_cleanup, /* i945 is the last gpu to need phys mem (for overlay and cursors). */ .write_entry = i830_write_entry, + .read_entry = i830_read_entry, .dma_mask_size = 32, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, @@ -1215,6 +1265,7 @@ static const struct intel_gtt_driver g33_gtt_driver = { .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, + .read_entry = i965_read_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, @@ -1225,6 +1276,7 @@ static const struct intel_gtt_driver pineview_gtt_driver = { .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, + .read_entry = i965_read_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, @@ -1235,6 +1287,7 @@ static const struct intel_gtt_driver i965_gtt_driver = { .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, + .read_entry = i965_read_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, @@ -1244,6 +1297,7 @@ static const struct intel_gtt_driver g4x_gtt_driver = { .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, + .read_entry = i965_read_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, @@ -1254,6 +1308,7 @@ static const struct intel_gtt_driver ironlake_gtt_driver = { .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, + .read_entry = i965_read_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, @@ -1461,4 +1516,5 @@ void intel_gmch_remove(void) EXPORT_SYMBOL(intel_gmch_remove); MODULE_AUTHOR("Dave Jones, Various @Intel"); +MODULE_DESCRIPTION("Intel GTT (Graphics Translation Table) routines"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c index dbcbc06cc202..4787391bb6b4 100644 --- a/drivers/char/agp/nvidia-agp.c +++ b/drivers/char/agp/nvidia-agp.c @@ -11,6 +11,7 @@ #include <linux/page-flags.h> #include <linux/mm.h> #include <linux/jiffies.h> +#include <asm/msr.h> #include "agp.h" /* NVIDIA registers */ @@ -462,6 +463,7 @@ static void __exit agp_nvidia_cleanup(void) module_init(agp_nvidia_init); module_exit(agp_nvidia_cleanup); +MODULE_DESCRIPTION("Nvidia AGPGART routines"); MODULE_LICENSE("GPL and additional rights"); MODULE_AUTHOR("NVIDIA Corporation"); diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c index 514f9f287a78..93a48070b2a1 100644 --- a/drivers/char/agp/parisc-agp.c +++ b/drivers/char/agp/parisc-agp.c @@ -38,7 +38,7 @@ static struct _parisc_agp_info { int lba_cap_offset; - u64 *gatt; + __le64 *gatt; u64 gatt_entries; u64 gart_base; @@ -104,7 +104,7 @@ parisc_agp_create_gatt_table(struct agp_bridge_data *bridge) int i; for (i = 0; i < info->gatt_entries; i++) { - info->gatt[i] = (unsigned long)agp_bridge->scratch_page; + info->gatt[i] = cpu_to_le64(agp_bridge->scratch_page); } return 0; @@ -158,9 +158,9 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type) for (k = 0; k < info->io_pages_per_kpage; k++, j++, paddr += info->io_page_size) { - info->gatt[j] = + info->gatt[j] = cpu_to_le64( parisc_agp_mask_memory(agp_bridge, - paddr, type); + paddr, type)); asm_io_fdc(&info->gatt[j]); } } @@ -184,7 +184,7 @@ parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type) io_pg_start = info->io_pages_per_kpage * pg_start; io_pg_count = info->io_pages_per_kpage * mem->page_count; for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) { - info->gatt[i] = agp_bridge->scratch_page; + info->gatt[i] = cpu_to_le64(agp_bridge->scratch_page); } agp_bridge->driver->tlb_flush(mem); @@ -204,7 +204,8 @@ parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr, pa |= (ci >> PAGE_SHIFT) & 0xff;/* move CI (8 bits) into lowest byte */ pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */ - return cpu_to_le64(pa); + /* return native (big-endian) PDIR entry */ + return pa; } static void @@ -251,7 +252,8 @@ static int __init agp_ioc_init(void __iomem *ioc_regs) { struct _parisc_agp_info *info = &parisc_agp_info; - u64 iova_base, *io_pdir, io_tlb_ps; + u64 iova_base, io_tlb_ps; + __le64 *io_pdir; int io_tlb_shift; printk(KERN_INFO DRVPFX "IO PDIR shared with sba_iommu\n"); @@ -394,8 +396,6 @@ find_quicksilver(struct device *dev, void *data) static int __init parisc_agp_init(void) { - extern struct sba_device *sba_list; - int err = -1; struct parisc_device *sba = NULL, *lba = NULL; struct lba_device *lbadev = NULL; @@ -432,4 +432,5 @@ out: module_init(parisc_agp_init); MODULE_AUTHOR("Kyle McMartin <kyle@parisc-linux.org>"); +MODULE_DESCRIPTION("HP Quicksilver AGP GART routines"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c index 484bb101c53b..a0deb97cedb0 100644 --- a/drivers/char/agp/sis-agp.c +++ b/drivers/char/agp/sis-agp.c @@ -433,4 +433,5 @@ module_param(agp_sis_force_delay, bool, 0); MODULE_PARM_DESC(agp_sis_force_delay,"forces sis delay hack"); module_param(agp_sis_agp_spec, int, 0); MODULE_PARM_DESC(agp_sis_agp_spec,"0=force sis init, 1=force generic agp3 init, default: autodetect"); +MODULE_DESCRIPTION("SiS AGPGART routines"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c index b91da5998dd7..0ab7562d17c9 100644 --- a/drivers/char/agp/sworks-agp.c +++ b/drivers/char/agp/sworks-agp.c @@ -564,5 +564,6 @@ static void __exit agp_serverworks_cleanup(void) module_init(agp_serverworks_init); module_exit(agp_serverworks_cleanup); +MODULE_DESCRIPTION("Serverworks AGPGART routines"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c index 62de7f4ba864..b8d7115b8c9e 100644 --- a/drivers/char/agp/uninorth-agp.c +++ b/drivers/char/agp/uninorth-agp.c @@ -3,6 +3,7 @@ * UniNorth AGPGART routines. */ #include <linux/module.h> +#include <linux/of.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/init.h> @@ -725,4 +726,5 @@ MODULE_PARM_DESC(aperture, "\t\tDefault: " DEFAULT_APERTURE_STRING "M"); MODULE_AUTHOR("Ben Herrenschmidt & Paul Mackerras"); +MODULE_DESCRIPTION("Apple UniNorth & U3 AGP support"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c index bc5140af2dcb..8b19a5d1a09b 100644 --- a/drivers/char/agp/via-agp.c +++ b/drivers/char/agp/via-agp.c @@ -575,5 +575,6 @@ static void __exit agp_via_cleanup(void) module_init(agp_via_init); module_exit(agp_via_cleanup); +MODULE_DESCRIPTION("VIA AGPGART routines"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Dave Jones"); diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c index e795390b070f..4aa5d1c76f83 100644 --- a/drivers/char/apm-emulation.c +++ b/drivers/char/apm-emulation.c @@ -141,22 +141,11 @@ static struct apm_queue kapmd_queue; static DEFINE_MUTEX(state_lock); -static const char driver_version[] = "1.13"; /* no spaces */ - - - -/* - * Compatibility cruft until the IPAQ people move over to the new - * interface. - */ -static void __apm_get_power_status(struct apm_power_info *info) -{ -} /* * This allows machines to provide their own "apm get power status" function. */ -void (*apm_get_power_status)(struct apm_power_info *) = __apm_get_power_status; +void (*apm_get_power_status)(struct apm_power_info *); EXPORT_SYMBOL(apm_get_power_status); @@ -435,6 +424,8 @@ static struct miscdevice apm_device = { */ static int proc_apm_show(struct seq_file *m, void *v) { + static const char driver_version[] = "1.13"; /* no spaces */ + struct apm_power_info info; char *units; diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c index 69314532f38c..c138c468f3a4 100644 --- a/drivers/char/applicom.c +++ b/drivers/char/applicom.c @@ -111,7 +111,6 @@ static irqreturn_t ac_interrupt(int, void *); static const struct file_operations ac_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .read = ac_read, .write = ac_write, .unlocked_ioctl = ac_ioctl, @@ -836,7 +835,10 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ret = -ENOTTY; break; } - Dummy = readb(apbs[IndexCard].RamIO + VERS); + + if (cmd != 6) + Dummy = readb(apbs[IndexCard].RamIO + VERS); + kfree(adgl); mutex_unlock(&ac_mutex); return ret; diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c index 12143854aeac..837109ef6766 100644 --- a/drivers/char/bsr.c +++ b/drivers/char/bsr.c @@ -6,11 +6,10 @@ * Author: Sonny Rao <sonnyrao@us.ibm.com> */ +#include <linux/device.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_device.h> -#include <linux/of_platform.h> #include <linux/fs.h> #include <linux/module.h> #include <linux/cdev.h> @@ -343,5 +342,6 @@ static void __exit bsr_exit(void) module_init(bsr_init); module_exit(bsr_exit); +MODULE_DESCRIPTION("IBM POWER Barrier Synchronization Register Driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Sonny Rao <sonnyrao@us.ibm.com>"); diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c index cf89a9631107..44a1cdbd4bfb 100644 --- a/drivers/char/ds1620.c +++ b/drivers/char/ds1620.c @@ -353,7 +353,6 @@ static const struct file_operations ds1620_fops = { .open = ds1620_open, .read = ds1620_read, .unlocked_ioctl = ds1620_unlocked_ioctl, - .llseek = no_llseek, }; static struct miscdevice ds1620_miscdev = { @@ -421,4 +420,5 @@ static void __exit ds1620_exit(void) module_init(ds1620_init); module_exit(ds1620_exit); +MODULE_DESCRIPTION("Dallas Semiconductor DS1620 thermometer driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c index bda27e595da1..1c2c8439797c 100644 --- a/drivers/char/dsp56k.c +++ b/drivers/char/dsp56k.c @@ -530,5 +530,6 @@ static void __exit dsp56k_cleanup_driver(void) } module_exit(dsp56k_cleanup_driver); +MODULE_DESCRIPTION("Atari DSP56001 Device Driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("dsp56k/bootstrap.bin"); diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c index 6946c1cad9f6..16618079298a 100644 --- a/drivers/char/dtlk.c +++ b/drivers/char/dtlk.c @@ -107,7 +107,6 @@ static const struct file_operations dtlk_fops = .unlocked_ioctl = dtlk_ioctl, .open = dtlk_open, .release = dtlk_release, - .llseek = no_llseek, }; /* local prototypes */ @@ -244,11 +243,11 @@ static __poll_t dtlk_poll(struct file *file, poll_table * wait) poll_wait(file, &dtlk_process_list, wait); if (dtlk_has_indexing && dtlk_readable()) { - del_timer(&dtlk_timer); + timer_delete(&dtlk_timer); mask = EPOLLIN | EPOLLRDNORM; } if (dtlk_writeable()) { - del_timer(&dtlk_timer); + timer_delete(&dtlk_timer); mask |= EPOLLOUT | EPOLLWRNORM; } /* there are no exception conditions */ @@ -323,7 +322,7 @@ static int dtlk_release(struct inode *inode, struct file *file) } TRACE_RET; - del_timer_sync(&dtlk_timer); + timer_delete_sync(&dtlk_timer); return 0; } @@ -660,4 +659,5 @@ static char dtlk_write_tts(char ch) return 0; } +MODULE_DESCRIPTION("RC Systems DoubleTalk PC speech card driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c index 4181bcc1c796..231cbf7b300f 100644 --- a/drivers/char/hangcheck-timer.c +++ b/drivers/char/hangcheck-timer.c @@ -69,7 +69,8 @@ MODULE_VERSION(VERSION_STR); static int __init hangcheck_parse_tick(char *str) { int par; - if (get_option(&str,&par)) + + if (get_option(&str, &par)) hangcheck_tick = par; return 1; } @@ -77,7 +78,8 @@ static int __init hangcheck_parse_tick(char *str) static int __init hangcheck_parse_margin(char *str) { int par; - if (get_option(&str,&par)) + + if (get_option(&str, &par)) hangcheck_margin = par; return 1; } @@ -85,7 +87,8 @@ static int __init hangcheck_parse_margin(char *str) static int __init hangcheck_parse_reboot(char *str) { int par; - if (get_option(&str,&par)) + + if (get_option(&str, &par)) hangcheck_reboot = par; return 1; } @@ -93,7 +96,8 @@ static int __init hangcheck_parse_reboot(char *str) static int __init hangcheck_parse_dump_tasks(char *str) { int par; - if (get_option(&str,&par)) + + if (get_option(&str, &par)) hangcheck_dump_tasks = par; return 1; } @@ -126,23 +130,23 @@ static void hangcheck_fire(struct timer_list *unused) if (tsc_diff > hangcheck_tsc_margin) { if (hangcheck_dump_tasks) { - printk(KERN_CRIT "Hangcheck: Task state:\n"); + pr_crit("Hangcheck: Task state:\n"); #ifdef CONFIG_MAGIC_SYSRQ handle_sysrq('t'); #endif /* CONFIG_MAGIC_SYSRQ */ } if (hangcheck_reboot) { - printk(KERN_CRIT "Hangcheck: hangcheck is restarting the machine.\n"); + pr_crit("Hangcheck: hangcheck is restarting the machine.\n"); emergency_restart(); } else { - printk(KERN_CRIT "Hangcheck: hangcheck value past margin!\n"); + pr_crit("Hangcheck: hangcheck value past margin!\n"); } } #if 0 /* * Enable to investigate delays in detail */ - printk("Hangcheck: called %Ld ns since last time (%Ld ns overshoot)\n", + pr_debug("Hangcheck: called %lld ns since last time (%lld ns overshoot)\n", tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ); #endif mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ)); @@ -152,7 +156,7 @@ static void hangcheck_fire(struct timer_list *unused) static int __init hangcheck_init(void) { - printk("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n", + pr_debug("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n", VERSION_STR, hangcheck_tick, hangcheck_margin); hangcheck_tsc_margin = (unsigned long long)hangcheck_margin + hangcheck_tick; @@ -167,8 +171,8 @@ static int __init hangcheck_init(void) static void __exit hangcheck_exit(void) { - del_timer_sync(&hangcheck_ticktock); - printk("Hangcheck: Stopped hangcheck timer.\n"); + timer_delete_sync(&hangcheck_ticktock); + pr_debug("Hangcheck: Stopped hangcheck timer.\n"); } module_init(hangcheck_init); diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index ee71376f174b..4f5ccd3a1f56 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -64,25 +64,6 @@ static DEFINE_MUTEX(hpet_mutex); /* replaces BKL */ static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ; -/* This clocksource driver currently only works on ia64 */ -#ifdef CONFIG_IA64 -static void __iomem *hpet_mctr; - -static u64 read_hpet(struct clocksource *cs) -{ - return (u64)read_counter((void __iomem *)hpet_mctr); -} - -static struct clocksource clocksource_hpet = { - .name = "hpet", - .rating = 250, - .read = read_hpet, - .mask = CLOCKSOURCE_MASK(64), - .flags = CLOCK_SOURCE_IS_CONTINUOUS, -}; -static struct clocksource *hpet_clocksource; -#endif - /* A lock for concurrent access by app and isr hpet activity. */ static DEFINE_SPINLOCK(hpet_lock); @@ -106,12 +87,11 @@ struct hpets { struct hpets *hp_next; struct hpet __iomem *hp_hpet; unsigned long hp_hpet_phys; - struct clocksource *hp_clocksource; unsigned long long hp_tick_freq; unsigned long hp_delta; unsigned int hp_ntimer; unsigned int hp_which; - struct hpet_dev hp_dev[]; + struct hpet_dev hp_dev[] __counted_by(hp_ntimer); }; static struct hpets *hpets; @@ -182,6 +162,7 @@ static irqreturn_t hpet_interrupt(int irq, void *data) static void hpet_timer_set_irq(struct hpet_dev *devp) { + const unsigned int nr_irqs = irq_get_nr_irqs(); unsigned long v; int irq, gsi; struct hpet_timer __iomem *timer; @@ -289,8 +270,13 @@ hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos) if (!devp->hd_ireqfreq) return -EIO; - if (count < sizeof(unsigned long)) - return -EINVAL; + if (in_compat_syscall()) { + if (count < sizeof(compat_ulong_t)) + return -EINVAL; + } else { + if (count < sizeof(unsigned long)) + return -EINVAL; + } add_wait_queue(&devp->hd_waitqueue, &wait); @@ -314,9 +300,16 @@ hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos) schedule(); } - retval = put_user(data, (unsigned long __user *)buf); - if (!retval) - retval = sizeof(unsigned long); + if (in_compat_syscall()) { + retval = put_user(data, (compat_ulong_t __user *)buf); + if (!retval) + retval = sizeof(compat_ulong_t); + } else { + retval = put_user(data, (unsigned long __user *)buf); + if (!retval) + retval = sizeof(unsigned long); + } + out: __set_current_state(TASK_RUNNING); remove_wait_queue(&devp->hd_waitqueue, &wait); @@ -671,12 +664,24 @@ struct compat_hpet_info { unsigned short hi_timer; }; +/* 32-bit types would lead to different command codes which should be + * translated into 64-bit ones before passed to hpet_ioctl_common + */ +#define COMPAT_HPET_INFO _IOR('h', 0x03, struct compat_hpet_info) +#define COMPAT_HPET_IRQFREQ _IOW('h', 0x6, compat_ulong_t) + static long hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct hpet_info info; int err; + if (cmd == COMPAT_HPET_INFO) + cmd = HPET_INFO; + + if (cmd == COMPAT_HPET_IRQFREQ) + cmd = HPET_IRQFREQ; + mutex_lock(&hpet_mutex); err = hpet_ioctl_common(file->private_data, cmd, arg, &info); mutex_unlock(&hpet_mutex); @@ -696,7 +701,6 @@ hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) static const struct file_operations hpet_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .read = hpet_read, .poll = hpet_poll, .unlocked_ioctl = hpet_ioctl, @@ -720,7 +724,7 @@ static int hpet_is_known(struct hpet_data *hdp) return 0; } -static struct ctl_table hpet_table[] = { +static const struct ctl_table hpet_table[] = { { .procname = "max-user-freq", .data = &hpet_max_freq, @@ -728,7 +732,6 @@ static struct ctl_table hpet_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, - {} }; static struct ctl_table_header *sysctl_header; @@ -805,7 +808,7 @@ int hpet_alloc(struct hpet_data *hdp) struct hpets *hpetp; struct hpet __iomem *hpet; static struct hpets *last; - unsigned long period; + u32 period; unsigned long long temp; u32 remainder; @@ -862,11 +865,11 @@ int hpet_alloc(struct hpet_data *hdp) do_div(temp, period); hpetp->hp_tick_freq = temp; /* ticks per second */ - printk(KERN_INFO "hpet%d: at MMIO 0x%lx, IRQ%s", + printk(KERN_INFO "hpet%u: at MMIO 0x%lx, IRQ%s", hpetp->hp_which, hdp->hd_phys_address, - hpetp->hp_ntimer > 1 ? "s" : ""); + str_plural(hpetp->hp_ntimer)); for (i = 0; i < hpetp->hp_ntimer; i++) - printk(KERN_CONT "%s %d", i > 0 ? "," : "", hdp->hd_irq[i]); + printk(KERN_CONT "%s %u", i > 0 ? "," : "", hdp->hd_irq[i]); printk(KERN_CONT "\n"); temp = hpetp->hp_tick_freq; @@ -907,17 +910,6 @@ int hpet_alloc(struct hpet_data *hdp) hpetp->hp_delta = hpet_calibrate(hpetp); -/* This clocksource driver currently only works on ia64 */ -#ifdef CONFIG_IA64 - if (!hpet_clocksource) { - hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc; - clocksource_hpet.archdata.fsys_mmio = hpet_mctr; - clocksource_register_hz(&clocksource_hpet, hpetp->hp_tick_freq); - hpetp->hp_clocksource = &clocksource_hpet; - hpet_clocksource = &clocksource_hpet; - } -#endif - return 0; } @@ -1031,8 +1023,7 @@ static int __init hpet_init(void) result = acpi_bus_register_driver(&hpet_acpi_driver); if (result < 0) { - if (sysctl_header) - unregister_sysctl_table(sysctl_header); + unregister_sysctl_table(sysctl_header); misc_deregister(&hpet_misc); return result; } diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 8de74dcfa18c..492a2a61a65b 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -37,7 +37,7 @@ config HW_RANDOM_TIMERIOMEM config HW_RANDOM_INTEL tristate "Intel HW Random Number Generator support" - depends on (X86 || IA64 || COMPILE_TEST) && PCI + depends on (X86 || COMPILE_TEST) && PCI default HW_RANDOM help This driver provides kernel-side support for the Random Number @@ -50,7 +50,7 @@ config HW_RANDOM_INTEL config HW_RANDOM_AMD tristate "AMD HW Random Number Generator support" - depends on (X86 || PPC_MAPLE || COMPILE_TEST) + depends on (X86 || COMPILE_TEST) depends on PCI && HAS_IOPORT_MAP default HW_RANDOM help @@ -62,9 +62,22 @@ config HW_RANDOM_AMD If unsure, say Y. +config HW_RANDOM_AIROHA + tristate "Airoha True HW Random Number Generator support" + depends on ARCH_AIROHA || COMPILE_TEST + default HW_RANDOM + help + This driver provides kernel-side support for the True Random Number + Generator hardware found on Airoha SoC. + + To compile this driver as a module, choose M here: the + module will be called airoha-rng. + + If unsure, say Y. + config HW_RANDOM_ATMEL tristate "Atmel Random Number Generator support" - depends on (ARCH_AT91 || COMPILE_TEST) + depends on (ARCH_MICROCHIP || COMPILE_TEST) default HW_RANDOM help This driver provides kernel-side support for the Random Number @@ -99,9 +112,22 @@ config HW_RANDOM_BCM2835 If unsure, say Y. +config HW_RANDOM_BCM74110 + tristate "Broadcom BCM74110 Random Number Generator support" + depends on ARCH_BRCMSTB || COMPILE_TEST + default HW_RANDOM + help + This driver provides kernel-side support for the Random Number + Generator hardware found on the Broadcom BCM74110 SoCs. + + To compile this driver as a module, choose M here: the + module will be called bcm74110-rng + + If unsure, say Y. + config HW_RANDOM_IPROC_RNG200 tristate "Broadcom iProc/STB RNG200 support" - depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST + depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BCMBCA || ARCH_BRCMSTB || COMPILE_TEST default HW_RANDOM help This driver provides kernel-side support for the RNG200 @@ -286,6 +312,7 @@ config HW_RANDOM_INGENIC_TRNG config HW_RANDOM_NOMADIK tristate "ST-Ericsson Nomadik Random Number Generator support" depends on ARCH_NOMADIK || COMPILE_TEST + depends on ARM_AMBA default HW_RANDOM help This driver provides kernel-side support for the Random Number @@ -508,10 +535,10 @@ config HW_RANDOM_NPCM If unsure, say Y. config HW_RANDOM_KEYSTONE + tristate "TI Keystone NETCP SA Hardware random number generator" depends on ARCH_KEYSTONE || COMPILE_TEST depends on HAS_IOMEM && OF default HW_RANDOM - tristate "TI Keystone NETCP SA Hardware random number generator" help This option enables Keystone's hardware random generator. @@ -553,15 +580,15 @@ config HW_RANDOM_ARM_SMCCC_TRNG module will be called arm_smccc_trng. config HW_RANDOM_CN10K - tristate "Marvell CN10K Random Number Generator support" - depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST)) - default HW_RANDOM - help - This driver provides support for the True Random Number - generator available in Marvell CN10K SoCs. + tristate "Marvell CN10K Random Number Generator support" + depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST)) + default HW_RANDOM if ARCH_THUNDER + help + This driver provides support for the True Random Number + generator available in Marvell CN10K SoCs. - To compile this driver as a module, choose M here. - The module will be called cn10k_rng. If unsure, say Y. + To compile this driver as a module, choose M here. + The module will be called cn10k_rng. If unsure, say Y. config HW_RANDOM_JH7110 tristate "StarFive JH7110 Random Number Generator support" @@ -573,6 +600,21 @@ config HW_RANDOM_JH7110 To compile this driver as a module, choose M here. The module will be called jh7110-trng. +config HW_RANDOM_ROCKCHIP + tristate "Rockchip True Random Number Generator" + depends on HW_RANDOM && (ARCH_ROCKCHIP || COMPILE_TEST) + depends on HAS_IOMEM + default HW_RANDOM + help + This driver provides kernel-side support for the True Random Number + Generator hardware found on some Rockchip SoCs like RK3566, RK3568 + or RK3588. + + To compile this driver as a module, choose M here: the + module will be called rockchip-rng. + + If unsure, say Y. + endif # HW_RANDOM config UML_RANDOM diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 32549a1186dc..b9132b3f5d21 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -8,6 +8,7 @@ rng-core-y := core.o obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o +obj-$(CONFIG_HW_RANDOM_AIROHA) += airoha-trng.o obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o obj-$(CONFIG_HW_RANDOM_BA431) += ba431-rng.o obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o @@ -31,6 +32,7 @@ obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o obj-$(CONFIG_HW_RANDOM_HISTB) += histb-rng.o obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o +obj-$(CONFIG_HW_RANDOM_BCM74110) += bcm74110-rng.o obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o @@ -48,4 +50,5 @@ obj-$(CONFIG_HW_RANDOM_XIPHERA) += xiphera-trng.o obj-$(CONFIG_HW_RANDOM_ARM_SMCCC_TRNG) += arm_smccc_trng.o obj-$(CONFIG_HW_RANDOM_CN10K) += cn10k-rng.o obj-$(CONFIG_HW_RANDOM_POLARFIRE_SOC) += mpfs-rng.o +obj-$(CONFIG_HW_RANDOM_ROCKCHIP) += rockchip-rng.o obj-$(CONFIG_HW_RANDOM_JH7110) += jh7110-trng.o diff --git a/drivers/char/hw_random/airoha-trng.c b/drivers/char/hw_random/airoha-trng.c new file mode 100644 index 000000000000..1dbfa9505c21 --- /dev/null +++ b/drivers/char/hw_random/airoha-trng.c @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2024 Christian Marangi */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/hw_random.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/platform_device.h> + +#define TRNG_IP_RDY 0x800 +#define CNT_TRANS GENMASK(15, 8) +#define SAMPLE_RDY BIT(0) +#define TRNG_NS_SEK_AND_DAT_EN 0x804 +#define RNG_EN BIT(31) /* referenced as ring_en */ +#define RAW_DATA_EN BIT(16) +#define TRNG_HEALTH_TEST_SW_RST 0x808 +#define SW_RST BIT(0) /* Active High */ +#define TRNG_INTR_EN 0x818 +#define INTR_MASK BIT(16) +#define CONTINUOUS_HEALTH_INITR_EN BIT(2) +#define SW_STARTUP_INITR_EN BIT(1) +#define RST_STARTUP_INITR_EN BIT(0) +/* Notice that Health Test are done only out of Reset and with RNG_EN */ +#define TRNG_HEALTH_TEST_STATUS 0x824 +#define CONTINUOUS_HEALTH_AP_TEST_FAIL BIT(23) +#define CONTINUOUS_HEALTH_RC_TEST_FAIL BIT(22) +#define SW_STARTUP_TEST_DONE BIT(21) +#define SW_STARTUP_AP_TEST_FAIL BIT(20) +#define SW_STARTUP_RC_TEST_FAIL BIT(19) +#define RST_STARTUP_TEST_DONE BIT(18) +#define RST_STARTUP_AP_TEST_FAIL BIT(17) +#define RST_STARTUP_RC_TEST_FAIL BIT(16) +#define RAW_DATA_VALID BIT(7) + +#define TRNG_RAW_DATA_OUT 0x828 + +#define TRNG_CNT_TRANS_VALID 0x80 +#define BUSY_LOOP_SLEEP 10 +#define BUSY_LOOP_TIMEOUT (BUSY_LOOP_SLEEP * 10000) + +struct airoha_trng { + void __iomem *base; + struct hwrng rng; + struct device *dev; + + struct completion rng_op_done; +}; + +static int airoha_trng_irq_mask(struct airoha_trng *trng) +{ + u32 val; + + val = readl(trng->base + TRNG_INTR_EN); + val |= INTR_MASK; + writel(val, trng->base + TRNG_INTR_EN); + + return 0; +} + +static int airoha_trng_irq_unmask(struct airoha_trng *trng) +{ + u32 val; + + val = readl(trng->base + TRNG_INTR_EN); + val &= ~INTR_MASK; + writel(val, trng->base + TRNG_INTR_EN); + + return 0; +} + +static int airoha_trng_init(struct hwrng *rng) +{ + struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng); + int ret; + u32 val; + + val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN); + val |= RNG_EN; + writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN); + + /* Set out of SW Reset */ + airoha_trng_irq_unmask(trng); + writel(0, trng->base + TRNG_HEALTH_TEST_SW_RST); + + ret = wait_for_completion_timeout(&trng->rng_op_done, BUSY_LOOP_TIMEOUT); + if (ret <= 0) { + dev_err(trng->dev, "Timeout waiting for Health Check\n"); + airoha_trng_irq_mask(trng); + return -ENODEV; + } + + /* Check if Health Test Failed */ + val = readl(trng->base + TRNG_HEALTH_TEST_STATUS); + if (val & (RST_STARTUP_AP_TEST_FAIL | RST_STARTUP_RC_TEST_FAIL)) { + dev_err(trng->dev, "Health Check fail: %s test fail\n", + val & RST_STARTUP_AP_TEST_FAIL ? "AP" : "RC"); + return -ENODEV; + } + + /* Check if IP is ready */ + ret = readl_poll_timeout(trng->base + TRNG_IP_RDY, val, + val & SAMPLE_RDY, 10, 1000); + if (ret < 0) { + dev_err(trng->dev, "Timeout waiting for IP ready"); + return -ENODEV; + } + + /* CNT_TRANS must be 0x80 for IP to be considered ready */ + ret = readl_poll_timeout(trng->base + TRNG_IP_RDY, val, + FIELD_GET(CNT_TRANS, val) == TRNG_CNT_TRANS_VALID, + 10, 1000); + if (ret < 0) { + dev_err(trng->dev, "Timeout waiting for IP ready"); + return -ENODEV; + } + + return 0; +} + +static void airoha_trng_cleanup(struct hwrng *rng) +{ + struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng); + u32 val; + + val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN); + val &= ~RNG_EN; + writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN); + + /* Put it in SW Reset */ + writel(SW_RST, trng->base + TRNG_HEALTH_TEST_SW_RST); +} + +static int airoha_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng); + u32 *data = buf; + u32 status; + int ret; + + ret = readl_poll_timeout(trng->base + TRNG_HEALTH_TEST_STATUS, status, + status & RAW_DATA_VALID, 10, 1000); + if (ret < 0) { + dev_err(trng->dev, "Timeout waiting for TRNG RAW Data valid\n"); + return ret; + } + + *data = readl(trng->base + TRNG_RAW_DATA_OUT); + + return 4; +} + +static irqreturn_t airoha_trng_irq(int irq, void *priv) +{ + struct airoha_trng *trng = (struct airoha_trng *)priv; + + airoha_trng_irq_mask(trng); + /* Just complete the task, we will read the value later */ + complete(&trng->rng_op_done); + + return IRQ_HANDLED; +} + +static int airoha_trng_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct airoha_trng *trng; + int irq, ret; + u32 val; + + trng = devm_kzalloc(dev, sizeof(*trng), GFP_KERNEL); + if (!trng) + return -ENOMEM; + + trng->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(trng->base)) + return PTR_ERR(trng->base); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + airoha_trng_irq_mask(trng); + ret = devm_request_irq(&pdev->dev, irq, airoha_trng_irq, 0, + pdev->name, (void *)trng); + if (ret) { + dev_err(dev, "Can't get interrupt working.\n"); + return ret; + } + + init_completion(&trng->rng_op_done); + + /* Enable interrupt for SW reset Health Check */ + val = readl(trng->base + TRNG_INTR_EN); + val |= RST_STARTUP_INITR_EN; + writel(val, trng->base + TRNG_INTR_EN); + + /* Set output to raw data */ + val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN); + val |= RAW_DATA_EN; + writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN); + + /* Put it in SW Reset */ + writel(SW_RST, trng->base + TRNG_HEALTH_TEST_SW_RST); + + trng->dev = dev; + trng->rng.name = pdev->name; + trng->rng.init = airoha_trng_init; + trng->rng.cleanup = airoha_trng_cleanup; + trng->rng.read = airoha_trng_read; + + ret = devm_hwrng_register(dev, &trng->rng); + if (ret) { + dev_err(dev, "failed to register rng device: %d\n", ret); + return ret; + } + + return 0; +} + +static const struct of_device_id airoha_trng_of_match[] = { + { .compatible = "airoha,en7581-trng", }, + {}, +}; +MODULE_DEVICE_TABLE(of, airoha_trng_of_match); + +static struct platform_driver airoha_trng_driver = { + .driver = { + .name = "airoha-trng", + .of_match_table = airoha_trng_of_match, + }, + .probe = airoha_trng_probe, +}; + +module_platform_driver(airoha_trng_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>"); +MODULE_DESCRIPTION("Airoha True Random Number Generator driver"); diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index 86162a13681e..9a24d19236dc 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c @@ -143,8 +143,10 @@ static int __init amd_rng_mod_init(void) found: err = pci_read_config_dword(pdev, 0x58, &pmbase); - if (err) + if (err) { + err = pcibios_err_to_errno(err); goto put_dev; + } pmbase &= 0x0000FF00; if (pmbase == 0) { diff --git a/drivers/char/hw_random/arm_smccc_trng.c b/drivers/char/hw_random/arm_smccc_trng.c index 7e954341b09f..dcb8e7f37f25 100644 --- a/drivers/char/hw_random/arm_smccc_trng.c +++ b/drivers/char/hw_random/arm_smccc_trng.c @@ -118,4 +118,5 @@ module_platform_driver(smccc_trng_driver); MODULE_ALIAS("platform:smccc_trng"); MODULE_AUTHOR("Andre Przywara"); +MODULE_DESCRIPTION("Arm SMCCC TRNG firmware interface support"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c index a37367ebcbac..6ed24be3481d 100644 --- a/drivers/char/hw_random/atmel-rng.c +++ b/drivers/char/hw_random/atmel-rng.c @@ -37,6 +37,7 @@ struct atmel_trng { struct clk *clk; void __iomem *base; struct hwrng rng; + struct device *dev; bool has_half_rate; }; @@ -59,9 +60,9 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max, u32 *data = buf; int ret; - ret = pm_runtime_get_sync((struct device *)trng->rng.priv); + ret = pm_runtime_get_sync(trng->dev); if (ret < 0) { - pm_runtime_put_sync((struct device *)trng->rng.priv); + pm_runtime_put_sync(trng->dev); return ret; } @@ -79,8 +80,7 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max, ret = 4; out: - pm_runtime_mark_last_busy((struct device *)trng->rng.priv); - pm_runtime_put_sync_autosuspend((struct device *)trng->rng.priv); + pm_runtime_put_sync_autosuspend(trng->dev); return ret; } @@ -134,9 +134,9 @@ static int atmel_trng_probe(struct platform_device *pdev) return -ENODEV; trng->has_half_rate = data->has_half_rate; + trng->dev = &pdev->dev; trng->rng.name = pdev->name; trng->rng.read = atmel_trng_read; - trng->rng.priv = (unsigned long)&pdev->dev; platform_set_drvdata(pdev, trng); #ifndef CONFIG_PM @@ -161,15 +161,13 @@ static int atmel_trng_probe(struct platform_device *pdev) return ret; } -static int atmel_trng_remove(struct platform_device *pdev) +static void atmel_trng_remove(struct platform_device *pdev) { struct atmel_trng *trng = platform_get_drvdata(pdev); atmel_trng_cleanup(trng); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); - - return 0; } static int __maybe_unused atmel_trng_runtime_suspend(struct device *dev) diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index e19b0f9f48b9..6d6ac409efcf 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c @@ -70,7 +70,7 @@ static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max, while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) { if (!wait) return 0; - hwrng_msleep(rng, 1000); + hwrng_yield(rng); } num_words = rng_readl(priv, RNG_STATUS) >> 24; @@ -94,8 +94,10 @@ static int bcm2835_rng_init(struct hwrng *rng) return ret; ret = reset_control_reset(priv->reset); - if (ret) + if (ret) { + clk_disable_unprepare(priv->clk); return ret; + } if (priv->mask_interrupts) { /* mask the interrupt */ @@ -136,12 +138,11 @@ static const struct of_device_id bcm2835_rng_of_match[] = { { .compatible = "brcm,bcm6368-rng"}, {}, }; +MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match); static int bcm2835_rng_probe(struct platform_device *pdev) { - const struct bcm2835_rng_of_data *of_data; struct device *dev = &pdev->dev; - const struct of_device_id *rng_id; struct bcm2835_rng_priv *priv; int err; @@ -149,8 +150,6 @@ static int bcm2835_rng_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - platform_set_drvdata(pdev, priv); - /* map peripheral */ priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) @@ -171,12 +170,10 @@ static int bcm2835_rng_probe(struct platform_device *pdev) priv->rng.cleanup = bcm2835_rng_cleanup; if (dev_of_node(dev)) { - rng_id = of_match_node(bcm2835_rng_of_match, dev->of_node); - if (!rng_id) - return -EINVAL; + const struct bcm2835_rng_of_data *of_data; /* Check for rng init function, execute it */ - of_data = rng_id->data; + of_data = of_device_get_match_data(dev); if (of_data) priv->mask_interrupts = of_data->mask_interrupts; } @@ -191,8 +188,6 @@ static int bcm2835_rng_probe(struct platform_device *pdev) return err; } -MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match); - static const struct platform_device_id bcm2835_rng_devtype[] = { { .name = "bcm2835-rng" }, { .name = "bcm63xx-rng" }, diff --git a/drivers/char/hw_random/bcm74110-rng.c b/drivers/char/hw_random/bcm74110-rng.c new file mode 100644 index 000000000000..5c64148e91f1 --- /dev/null +++ b/drivers/char/hw_random/bcm74110-rng.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2024 Broadcom + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/random.h> +#include <linux/hw_random.h> + +#define HOST_REV_ID 0x00 +#define HOST_FIFO_DEPTH 0x04 +#define HOST_FIFO_COUNT 0x08 +#define HOST_FIFO_THRESHOLD 0x0c +#define HOST_FIFO_DATA 0x10 + +#define HOST_FIFO_COUNT_MASK 0xffff + +/* Delay range in microseconds */ +#define FIFO_DELAY_MIN_US 3 +#define FIFO_DELAY_MAX_US 7 +#define FIFO_DELAY_MAX_COUNT 10 + +struct bcm74110_priv { + void __iomem *base; +}; + +static inline int bcm74110_rng_fifo_count(void __iomem *mem) +{ + return readl_relaxed(mem) & HOST_FIFO_COUNT_MASK; +} + +static int bcm74110_rng_read(struct hwrng *rng, void *buf, size_t max, + bool wait) +{ + struct bcm74110_priv *priv = (struct bcm74110_priv *)rng->priv; + void __iomem *fc_addr = priv->base + HOST_FIFO_COUNT; + void __iomem *fd_addr = priv->base + HOST_FIFO_DATA; + unsigned underrun_count = 0; + u32 max_words = max / sizeof(u32); + u32 num_words; + unsigned i; + + /* + * We need to check how many words are available in the RNG FIFO. If + * there aren't any, we need to wait for some to become available. + */ + while ((num_words = bcm74110_rng_fifo_count(fc_addr)) == 0) { + if (!wait) + return 0; + /* + * As a precaution, limit how long we wait. If the FIFO doesn't + * refill within the allotted time, return 0 (=no data) to the + * caller. + */ + if (likely(underrun_count < FIFO_DELAY_MAX_COUNT)) + usleep_range(FIFO_DELAY_MIN_US, FIFO_DELAY_MAX_US); + else + return 0; + underrun_count++; + } + if (num_words > max_words) + num_words = max_words; + + /* Bail early if we run out of random numbers unexpectedly */ + for (i = 0; i < num_words && bcm74110_rng_fifo_count(fc_addr) > 0; i++) + ((u32 *)buf)[i] = readl_relaxed(fd_addr); + + return i * sizeof(u32); +} + +static struct hwrng bcm74110_hwrng = { + .read = bcm74110_rng_read, +}; + +static int bcm74110_rng_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct bcm74110_priv *priv; + int rc; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + bcm74110_hwrng.name = pdev->name; + bcm74110_hwrng.priv = (unsigned long)priv; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + rc = devm_hwrng_register(dev, &bcm74110_hwrng); + if (rc) + dev_err(dev, "hwrng registration failed (%d)\n", rc); + else + dev_info(dev, "hwrng registered\n"); + + return rc; +} + +static const struct of_device_id bcm74110_rng_match[] = { + { .compatible = "brcm,bcm74110-rng", }, + {}, +}; +MODULE_DEVICE_TABLE(of, bcm74110_rng_match); + +static struct platform_driver bcm74110_rng_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = bcm74110_rng_match, + }, + .probe = bcm74110_rng_probe, +}; +module_platform_driver(bcm74110_rng_driver); + +MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>"); +MODULE_DESCRIPTION("BCM 74110 Random Number Generator (RNG) driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c index c99c54cd99c6..c1b8918b2292 100644 --- a/drivers/char/hw_random/cavium-rng-vf.c +++ b/drivers/char/hw_random/cavium-rng-vf.c @@ -266,4 +266,5 @@ static struct pci_driver cavium_rng_vf_driver = { module_pci_driver(cavium_rng_vf_driver); MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>"); +MODULE_DESCRIPTION("Cavium ThunderX Random Number Generator VF support"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/char/hw_random/cavium-rng.c b/drivers/char/hw_random/cavium-rng.c index b96579222408..d9d7b6038c06 100644 --- a/drivers/char/hw_random/cavium-rng.c +++ b/drivers/char/hw_random/cavium-rng.c @@ -88,4 +88,5 @@ static struct pci_driver cavium_rng_pf_driver = { module_pci_driver(cavium_rng_pf_driver); MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>"); +MODULE_DESCRIPTION("Cavium ThunderX Random Number Generator support"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c index 1abbff04a015..a5be9258037f 100644 --- a/drivers/char/hw_random/cctrng.c +++ b/drivers/char/hw_random/cctrng.c @@ -98,7 +98,6 @@ static void cc_trng_pm_put_suspend(struct device *dev) { int rc = 0; - pm_runtime_mark_last_busy(dev); rc = pm_runtime_put_autosuspend(dev); if (rc) dev_err(dev, "pm_runtime_put_autosuspend returned %x\n", rc); @@ -560,7 +559,7 @@ post_pm_err: return rc; } -static int cctrng_remove(struct platform_device *pdev) +static void cctrng_remove(struct platform_device *pdev) { struct cctrng_drvdata *drvdata = platform_get_drvdata(pdev); struct device *dev = &pdev->dev; @@ -570,8 +569,6 @@ static int cctrng_remove(struct platform_device *pdev) cc_trng_pm_fini(drvdata); dev_info(dev, "ARM cctrng device terminated\n"); - - return 0; } static int __maybe_unused cctrng_suspend(struct device *dev) @@ -624,6 +621,7 @@ static int __maybe_unused cctrng_resume(struct device *dev) /* wait for Cryptocell reset completion */ if (!cctrng_wait_for_reset_completion(drvdata)) { dev_err(dev, "Cryptocell reset not completed"); + clk_disable_unprepare(drvdata->clk); return -EBUSY; } diff --git a/drivers/char/hw_random/cn10k-rng.c b/drivers/char/hw_random/cn10k-rng.c index 31935316a160..3b4e78182e14 100644 --- a/drivers/char/hw_random/cn10k-rng.c +++ b/drivers/char/hw_random/cn10k-rng.c @@ -188,7 +188,7 @@ static int cn10k_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id) rng->reg_base = pcim_iomap(pdev, 0, 0); if (!rng->reg_base) - return dev_err_probe(&pdev->dev, -ENOMEM, "Error while mapping CSRs, exiting\n"); + return -ENOMEM; rng->ops.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "cn10k-rng-%s", dev_name(&pdev->dev)); diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index e3598ec9cfca..96d7fe41b373 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -23,10 +23,13 @@ #include <linux/sched.h> #include <linux/sched/signal.h> #include <linux/slab.h> +#include <linux/string.h> #include <linux/uaccess.h> #define RNG_MODULE_NAME "hw_random" +#define RNG_BUFFER_SIZE (SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES) + static struct hwrng *current_rng; /* the current rng has been explicitly chosen by user via sysfs */ static int cur_rng_set_by_user; @@ -58,20 +61,7 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, static size_t rng_buffer_size(void) { - return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES; -} - -static void add_early_randomness(struct hwrng *rng) -{ - int bytes_read; - - mutex_lock(&reading_mutex); - bytes_read = rng_get_data(rng, rng_fillbuf, 32, 0); - mutex_unlock(&reading_mutex); - if (bytes_read > 0) { - size_t entropy = bytes_read * 8 * rng->quality / 1024; - add_hwgenerator_randomness(rng_fillbuf, bytes_read, entropy, false); - } + return RNG_BUFFER_SIZE; } static inline void cleanup_rng(struct kref *kref) @@ -171,7 +161,6 @@ static int hwrng_init(struct hwrng *rng) reinit_completion(&rng->cleanup_done); skip_init: - rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024); current_quality = rng->quality; /* obsolete */ return 0; @@ -192,8 +181,15 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, int present; BUG_ON(!mutex_is_locked(&reading_mutex)); - if (rng->read) - return rng->read(rng, (void *)buffer, size, wait); + if (rng->read) { + int err; + + err = rng->read(rng, buffer, size, wait); + if (WARN_ON_ONCE(err > 0 && err > size)) + err = size; + + return err; + } if (rng->data_present) present = rng->data_present(rng, wait); @@ -209,6 +205,7 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, static ssize_t rng_dev_read(struct file *filp, char __user *buf, size_t size, loff_t *offp) { + u8 buffer[RNG_BUFFER_SIZE]; ssize_t ret = 0; int err = 0; int bytes_read, len; @@ -236,34 +233,37 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf, if (bytes_read < 0) { err = bytes_read; goto out_unlock_reading; + } else if (bytes_read == 0 && + (filp->f_flags & O_NONBLOCK)) { + err = -EAGAIN; + goto out_unlock_reading; } + data_avail = bytes_read; } - if (!data_avail) { - if (filp->f_flags & O_NONBLOCK) { - err = -EAGAIN; - goto out_unlock_reading; - } - } else { - len = data_avail; + len = data_avail; + if (len) { if (len > size) len = size; data_avail -= len; - if (copy_to_user(buf + ret, rng_buffer + data_avail, - len)) { + memcpy(buffer, rng_buffer + data_avail, len); + } + mutex_unlock(&reading_mutex); + put_rng(rng); + + if (len) { + if (copy_to_user(buf + ret, buffer, len)) { err = -EFAULT; - goto out_unlock_reading; + goto out; } size -= len; ret += len; } - mutex_unlock(&reading_mutex); - put_rng(rng); if (need_resched()) schedule_timeout_interruptible(1); @@ -274,6 +274,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf, } } out: + memzero_explicit(buffer, sizeof(buffer)); return ret ? : err; out_unlock_reading: @@ -332,15 +333,17 @@ static ssize_t rng_current_store(struct device *dev, const char *buf, size_t len) { int err; - struct hwrng *rng, *old_rng, *new_rng; + struct hwrng *rng, *new_rng; err = mutex_lock_interruptible(&rng_mutex); if (err) return -ERESTARTSYS; - old_rng = current_rng; if (sysfs_streq(buf, "")) { err = enable_best_rng(); + } else if (sysfs_streq(buf, "none")) { + cur_rng_set_by_user = 1; + drop_current_rng(); } else { list_for_each_entry(rng, &rng_list, list) { if (sysfs_streq(rng->name, buf)) { @@ -354,11 +357,8 @@ static ssize_t rng_current_store(struct device *dev, new_rng = get_current_rng_nolock(); mutex_unlock(&rng_mutex); - if (new_rng) { - if (new_rng != old_rng) - add_early_randomness(new_rng); + if (new_rng) put_rng(new_rng); - } return err ? : len; } @@ -374,7 +374,7 @@ static ssize_t rng_current_show(struct device *dev, if (IS_ERR(rng)) return PTR_ERR(rng); - ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none"); + ret = sysfs_emit(buf, "%s\n", rng ? rng->name : "none"); put_rng(rng); return ret; @@ -395,7 +395,7 @@ static ssize_t rng_available_show(struct device *dev, strlcat(buf, rng->name, PAGE_SIZE); strlcat(buf, " ", PAGE_SIZE); } - strlcat(buf, "\n", PAGE_SIZE); + strlcat(buf, "none\n", PAGE_SIZE); mutex_unlock(&rng_mutex); return strlen(buf); @@ -479,16 +479,6 @@ static struct attribute *rng_dev_attrs[] = { ATTRIBUTE_GROUPS(rng_dev); -static void __exit unregister_miscdev(void) -{ - misc_deregister(&rng_miscdev); -} - -static int __init register_miscdev(void) -{ - return misc_register(&rng_miscdev); -} - static int hwrng_fillfn(void *unused) { size_t entropy, entropy_credit = 0; /* in 1/1024 of a bit */ @@ -536,7 +526,6 @@ int hwrng_register(struct hwrng *rng) { int err = -EINVAL; struct hwrng *tmp; - bool is_new_current = false; if (!rng->name || (!rng->data_read && !rng->read)) goto out; @@ -555,8 +544,11 @@ int hwrng_register(struct hwrng *rng) complete(&rng->cleanup_done); init_completion(&rng->dying); - if (!current_rng || - (!cur_rng_set_by_user && rng->quality > current_rng->quality)) { + /* Adjust quality field to always have a proper value */ + rng->quality = min3(default_quality, 1024, rng->quality ?: 1024); + + if (!cur_rng_set_by_user && + (!current_rng || rng->quality > current_rng->quality)) { /* * Set new rng as current as the new rng source * provides better entropy quality and was not @@ -565,25 +557,8 @@ int hwrng_register(struct hwrng *rng) err = set_current_rng(rng); if (err) goto out_unlock; - /* to use current_rng in add_early_randomness() we need - * to take a ref - */ - is_new_current = true; - kref_get(&rng->ref); } mutex_unlock(&rng_mutex); - if (is_new_current || !rng->init) { - /* - * Use a new device's input to add some randomness to - * the system. If this rng device isn't going to be - * used right away, its init function hasn't been - * called yet by set_current_rng(); so only use the - * randomness from devices that don't need an init callback - */ - add_early_randomness(rng); - } - if (is_new_current) - put_rng(rng); return 0; out_unlock: mutex_unlock(&rng_mutex); @@ -594,12 +569,11 @@ EXPORT_SYMBOL_GPL(hwrng_register); void hwrng_unregister(struct hwrng *rng) { - struct hwrng *old_rng, *new_rng; + struct hwrng *new_rng; int err; mutex_lock(&rng_mutex); - old_rng = current_rng; list_del(&rng->list); complete_all(&rng->dying); if (current_rng == rng) { @@ -618,11 +592,8 @@ void hwrng_unregister(struct hwrng *rng) } else mutex_unlock(&rng_mutex); - if (new_rng) { - if (old_rng != new_rng) - add_early_randomness(new_rng); + if (new_rng) put_rng(new_rng); - } wait_for_completion(&rng->cleanup_done); } @@ -678,6 +649,12 @@ long hwrng_msleep(struct hwrng *rng, unsigned int msecs) } EXPORT_SYMBOL_GPL(hwrng_msleep); +long hwrng_yield(struct hwrng *rng) +{ + return wait_for_completion_interruptible_timeout(&rng->dying, 1); +} +EXPORT_SYMBOL_GPL(hwrng_yield); + static int __init hwrng_modinit(void) { int ret; @@ -693,7 +670,7 @@ static int __init hwrng_modinit(void) return -ENOMEM; } - ret = register_miscdev(); + ret = misc_register(&rng_miscdev); if (ret) { kfree(rng_fillbuf); kfree(rng_buffer); @@ -710,7 +687,7 @@ static void __exit hwrng_modexit(void) kfree(rng_fillbuf); mutex_unlock(&rng_mutex); - unregister_miscdev(); + misc_deregister(&rng_miscdev); } fs_initcall(hwrng_modinit); /* depends on misc_register() */ diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c index 30207b7ac5f4..02e207c09e81 100644 --- a/drivers/char/hw_random/exynos-trng.c +++ b/drivers/char/hw_random/exynos-trng.c @@ -10,6 +10,7 @@ * Krzysztof Kozłowski <krzk@kernel.org> */ +#include <linux/arm-smccc.h> #include <linux/clk.h> #include <linux/crypto.h> #include <linux/delay.h> @@ -22,46 +23,69 @@ #include <linux/mod_devicetable.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> - -#define EXYNOS_TRNG_CLKDIV (0x0) - -#define EXYNOS_TRNG_CTRL (0x20) -#define EXYNOS_TRNG_CTRL_RNGEN BIT(31) - -#define EXYNOS_TRNG_POST_CTRL (0x30) -#define EXYNOS_TRNG_ONLINE_CTRL (0x40) -#define EXYNOS_TRNG_ONLINE_STAT (0x44) -#define EXYNOS_TRNG_ONLINE_MAXCHI2 (0x48) -#define EXYNOS_TRNG_FIFO_CTRL (0x50) -#define EXYNOS_TRNG_FIFO_0 (0x80) -#define EXYNOS_TRNG_FIFO_1 (0x84) -#define EXYNOS_TRNG_FIFO_2 (0x88) -#define EXYNOS_TRNG_FIFO_3 (0x8c) -#define EXYNOS_TRNG_FIFO_4 (0x90) -#define EXYNOS_TRNG_FIFO_5 (0x94) -#define EXYNOS_TRNG_FIFO_6 (0x98) -#define EXYNOS_TRNG_FIFO_7 (0x9c) -#define EXYNOS_TRNG_FIFO_LEN (8) -#define EXYNOS_TRNG_CLOCK_RATE (500000) - +#include <linux/property.h> + +#define EXYNOS_TRNG_CLKDIV 0x0 + +#define EXYNOS_TRNG_CTRL 0x20 +#define EXYNOS_TRNG_CTRL_RNGEN BIT(31) + +#define EXYNOS_TRNG_POST_CTRL 0x30 +#define EXYNOS_TRNG_ONLINE_CTRL 0x40 +#define EXYNOS_TRNG_ONLINE_STAT 0x44 +#define EXYNOS_TRNG_ONLINE_MAXCHI2 0x48 +#define EXYNOS_TRNG_FIFO_CTRL 0x50 +#define EXYNOS_TRNG_FIFO_0 0x80 +#define EXYNOS_TRNG_FIFO_1 0x84 +#define EXYNOS_TRNG_FIFO_2 0x88 +#define EXYNOS_TRNG_FIFO_3 0x8c +#define EXYNOS_TRNG_FIFO_4 0x90 +#define EXYNOS_TRNG_FIFO_5 0x94 +#define EXYNOS_TRNG_FIFO_6 0x98 +#define EXYNOS_TRNG_FIFO_7 0x9c +#define EXYNOS_TRNG_FIFO_LEN 8 +#define EXYNOS_TRNG_CLOCK_RATE 500000 + +/* Driver feature flags */ +#define EXYNOS_SMC BIT(0) + +#define EXYNOS_SMC_CALL_VAL(func_num) \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_SIP, \ + func_num) + +/* SMC command for DTRNG access */ +#define SMC_CMD_RANDOM EXYNOS_SMC_CALL_VAL(0x1012) + +/* SMC_CMD_RANDOM: arguments */ +#define HWRNG_INIT 0x0 +#define HWRNG_EXIT 0x1 +#define HWRNG_GET_DATA 0x2 +#define HWRNG_RESUME 0x3 + +/* SMC_CMD_RANDOM: return values */ +#define HWRNG_RET_OK 0x0 +#define HWRNG_RET_RETRY_ERROR 0x2 + +#define HWRNG_MAX_TRIES 100 struct exynos_trng_dev { - struct device *dev; - void __iomem *mem; - struct clk *clk; - struct hwrng rng; + struct device *dev; + void __iomem *mem; + struct clk *clk; /* operating clock */ + struct clk *pclk; /* bus clock */ + struct hwrng rng; + unsigned long flags; }; -static int exynos_trng_do_read(struct hwrng *rng, void *data, size_t max, - bool wait) +static int exynos_trng_do_read_reg(struct hwrng *rng, void *data, size_t max, + bool wait) { - struct exynos_trng_dev *trng; + struct exynos_trng_dev *trng = (struct exynos_trng_dev *)rng->priv; int val; max = min_t(size_t, max, (EXYNOS_TRNG_FIFO_LEN * 4)); - - trng = (struct exynos_trng_dev *)rng->priv; - writel_relaxed(max * 8, trng->mem + EXYNOS_TRNG_FIFO_CTRL); val = readl_poll_timeout(trng->mem + EXYNOS_TRNG_FIFO_CTRL, val, val == 0, 200, 1000000); @@ -73,7 +97,40 @@ static int exynos_trng_do_read(struct hwrng *rng, void *data, size_t max, return max; } -static int exynos_trng_init(struct hwrng *rng) +static int exynos_trng_do_read_smc(struct hwrng *rng, void *data, size_t max, + bool wait) +{ + struct arm_smccc_res res; + unsigned int copied = 0; + u32 *buf = data; + int tries = 0; + + while (copied < max) { + arm_smccc_smc(SMC_CMD_RANDOM, HWRNG_GET_DATA, 0, 0, 0, 0, 0, 0, + &res); + switch (res.a0) { + case HWRNG_RET_OK: + *buf++ = res.a2; + *buf++ = res.a3; + copied += 8; + tries = 0; + break; + case HWRNG_RET_RETRY_ERROR: + if (!wait) + return copied; + if (++tries >= HWRNG_MAX_TRIES) + return copied; + cond_resched(); + break; + default: + return -EIO; + } + } + + return copied; +} + +static int exynos_trng_init_reg(struct hwrng *rng) { struct exynos_trng_dev *trng = (struct exynos_trng_dev *)rng->priv; unsigned long sss_rate; @@ -87,7 +144,7 @@ static int exynos_trng_init(struct hwrng *rng) */ val = sss_rate / (EXYNOS_TRNG_CLOCK_RATE * 2); if (val > 0x7fff) { - dev_err(trng->dev, "clock divider too large: %d", val); + dev_err(trng->dev, "clock divider too large: %d\n", val); return -ERANGE; } val = val << 1; @@ -106,6 +163,24 @@ static int exynos_trng_init(struct hwrng *rng) return 0; } +static int exynos_trng_init_smc(struct hwrng *rng) +{ + struct exynos_trng_dev *trng = (struct exynos_trng_dev *)rng->priv; + struct arm_smccc_res res; + int ret = 0; + + arm_smccc_smc(SMC_CMD_RANDOM, HWRNG_INIT, 0, 0, 0, 0, 0, 0, &res); + if (res.a0 != HWRNG_RET_OK) { + dev_err(trng->dev, "SMC command for TRNG init failed (%d)\n", + (int)res.a0); + ret = -EIO; + } + if ((int)res.a0 == -1) + dev_info(trng->dev, "Make sure LDFW is loaded by your BL\n"); + + return ret; +} + static int exynos_trng_probe(struct platform_device *pdev) { struct exynos_trng_dev *trng; @@ -115,21 +190,29 @@ static int exynos_trng_probe(struct platform_device *pdev) if (!trng) return ret; + platform_set_drvdata(pdev, trng); + trng->dev = &pdev->dev; + + trng->flags = (unsigned long)device_get_match_data(&pdev->dev); + trng->rng.name = devm_kstrdup(&pdev->dev, dev_name(&pdev->dev), GFP_KERNEL); if (!trng->rng.name) return ret; - trng->rng.init = exynos_trng_init; - trng->rng.read = exynos_trng_do_read; - trng->rng.priv = (unsigned long) trng; + trng->rng.priv = (unsigned long)trng; - platform_set_drvdata(pdev, trng); - trng->dev = &pdev->dev; + if (trng->flags & EXYNOS_SMC) { + trng->rng.init = exynos_trng_init_smc; + trng->rng.read = exynos_trng_do_read_smc; + } else { + trng->rng.init = exynos_trng_init_reg; + trng->rng.read = exynos_trng_do_read_reg; - trng->mem = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(trng->mem)) - return PTR_ERR(trng->mem); + trng->mem = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(trng->mem)) + return PTR_ERR(trng->mem); + } pm_runtime_enable(&pdev->dev); ret = pm_runtime_resume_and_get(&pdev->dev); @@ -138,32 +221,30 @@ static int exynos_trng_probe(struct platform_device *pdev) goto err_pm_get; } - trng->clk = devm_clk_get(&pdev->dev, "secss"); + trng->clk = devm_clk_get_enabled(&pdev->dev, "secss"); if (IS_ERR(trng->clk)) { - ret = PTR_ERR(trng->clk); - dev_err(&pdev->dev, "Could not get clock.\n"); + ret = dev_err_probe(&pdev->dev, PTR_ERR(trng->clk), + "Could not get clock\n"); goto err_clock; } - ret = clk_prepare_enable(trng->clk); - if (ret) { - dev_err(&pdev->dev, "Could not enable the clk.\n"); + trng->pclk = devm_clk_get_optional_enabled(&pdev->dev, "pclk"); + if (IS_ERR(trng->pclk)) { + ret = dev_err_probe(&pdev->dev, PTR_ERR(trng->pclk), + "Could not get pclk\n"); goto err_clock; } ret = devm_hwrng_register(&pdev->dev, &trng->rng); if (ret) { dev_err(&pdev->dev, "Could not register hwrng device.\n"); - goto err_register; + goto err_clock; } dev_info(&pdev->dev, "Exynos True Random Number Generator.\n"); return 0; -err_register: - clk_disable_unprepare(trng->clk); - err_clock: pm_runtime_put_noidle(&pdev->dev); @@ -173,20 +254,33 @@ err_pm_get: return ret; } -static int exynos_trng_remove(struct platform_device *pdev) +static void exynos_trng_remove(struct platform_device *pdev) { - struct exynos_trng_dev *trng = platform_get_drvdata(pdev); + struct exynos_trng_dev *trng = platform_get_drvdata(pdev); + + if (trng->flags & EXYNOS_SMC) { + struct arm_smccc_res res; - clk_disable_unprepare(trng->clk); + arm_smccc_smc(SMC_CMD_RANDOM, HWRNG_EXIT, 0, 0, 0, 0, 0, 0, + &res); + } pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - - return 0; } static int exynos_trng_suspend(struct device *dev) { + struct exynos_trng_dev *trng = dev_get_drvdata(dev); + struct arm_smccc_res res; + + if (trng->flags & EXYNOS_SMC) { + arm_smccc_smc(SMC_CMD_RANDOM, HWRNG_EXIT, 0, 0, 0, 0, 0, 0, + &res); + if (res.a0 != HWRNG_RET_OK) + return -EIO; + } + pm_runtime_put_sync(dev); return 0; @@ -194,6 +288,7 @@ static int exynos_trng_suspend(struct device *dev) static int exynos_trng_resume(struct device *dev) { + struct exynos_trng_dev *trng = dev_get_drvdata(dev); int ret; ret = pm_runtime_resume_and_get(dev); @@ -202,15 +297,32 @@ static int exynos_trng_resume(struct device *dev) return ret; } + if (trng->flags & EXYNOS_SMC) { + struct arm_smccc_res res; + + arm_smccc_smc(SMC_CMD_RANDOM, HWRNG_RESUME, 0, 0, 0, 0, 0, 0, + &res); + if (res.a0 != HWRNG_RET_OK) + return -EIO; + + arm_smccc_smc(SMC_CMD_RANDOM, HWRNG_INIT, 0, 0, 0, 0, 0, 0, + &res); + if (res.a0 != HWRNG_RET_OK) + return -EIO; + } + return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(exynos_trng_pm_ops, exynos_trng_suspend, - exynos_trng_resume); + exynos_trng_resume); static const struct of_device_id exynos_trng_dt_match[] = { { .compatible = "samsung,exynos5250-trng", + }, { + .compatible = "samsung,exynos850-trng", + .data = (void *)EXYNOS_SMC, }, { }, }; @@ -227,6 +339,7 @@ static struct platform_driver exynos_trng_driver = { }; module_platform_driver(exynos_trng_driver); + MODULE_AUTHOR("Łukasz Stelmach"); MODULE_DESCRIPTION("H/W TRNG driver for Exynos chips"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c index 12fbe8091831..159baf00a867 100644 --- a/drivers/char/hw_random/geode-rng.c +++ b/drivers/char/hw_random/geode-rng.c @@ -58,7 +58,8 @@ struct amd_geode_priv { static int geode_rng_data_read(struct hwrng *rng, u32 *data) { - void __iomem *mem = (void __iomem *)rng->priv; + struct amd_geode_priv *priv = (struct amd_geode_priv *)rng->priv; + void __iomem *mem = priv->membase; *data = readl(mem + GEODE_RNG_DATA_REG); @@ -67,7 +68,8 @@ static int geode_rng_data_read(struct hwrng *rng, u32 *data) static int geode_rng_data_present(struct hwrng *rng, int wait) { - void __iomem *mem = (void __iomem *)rng->priv; + struct amd_geode_priv *priv = (struct amd_geode_priv *)rng->priv; + void __iomem *mem = priv->membase; int data, i; for (i = 0; i < 20; i++) { diff --git a/drivers/char/hw_random/hisi-rng.c b/drivers/char/hw_random/hisi-rng.c index 96438f85cafa..4e501d5c121f 100644 --- a/drivers/char/hw_random/hisi-rng.c +++ b/drivers/char/hw_random/hisi-rng.c @@ -79,8 +79,6 @@ static int hisi_rng_probe(struct platform_device *pdev) if (!rng) return -ENOMEM; - platform_set_drvdata(pdev, rng); - rng->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(rng->base)) return PTR_ERR(rng->base); @@ -91,10 +89,8 @@ static int hisi_rng_probe(struct platform_device *pdev) rng->rng.read = hisi_rng_read; ret = devm_hwrng_register(&pdev->dev, &rng->rng); - if (ret) { - dev_err(&pdev->dev, "failed to register hwrng\n"); - return ret; - } + if (ret) + return dev_err_probe(&pdev->dev, ret, "failed to register hwrng\n"); return 0; } diff --git a/drivers/char/hw_random/histb-rng.c b/drivers/char/hw_random/histb-rng.c index f652e1135e4b..1b91e88cc4c0 100644 --- a/drivers/char/hw_random/histb-rng.c +++ b/drivers/char/hw_random/histb-rng.c @@ -89,7 +89,7 @@ depth_show(struct device *dev, struct device_attribute *attr, char *buf) struct histb_rng_priv *priv = dev_get_drvdata(dev); void __iomem *base = priv->base; - return sprintf(buf, "%d\n", histb_rng_get_depth(base)); + return sprintf(buf, "%u\n", histb_rng_get_depth(base)); } static ssize_t diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c index e4b385b01b11..241664a9b5d9 100644 --- a/drivers/char/hw_random/imx-rngc.c +++ b/drivers/char/hw_random/imx-rngc.c @@ -13,6 +13,8 @@ #include <linux/clk.h> #include <linux/err.h> #include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> #include <linux/interrupt.h> #include <linux/hw_random.h> #include <linux/completion.h> @@ -51,8 +53,9 @@ #define RNGC_ERROR_STATUS_STAT_ERR 0x00000008 -#define RNGC_TIMEOUT 3000 /* 3 sec */ - +#define RNGC_SELFTEST_TIMEOUT 2500 /* us */ +#define RNGC_SEED_TIMEOUT 200 /* ms */ +#define RNGC_PM_TIMEOUT 500 /* ms */ static bool self_test = true; module_param(self_test, bool, 0); @@ -110,7 +113,8 @@ static int imx_rngc_self_test(struct imx_rngc *rngc) cmd = readl(rngc->base + RNGC_COMMAND); writel(cmd | RNGC_CMD_SELF_TEST, rngc->base + RNGC_COMMAND); - ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_TIMEOUT)); + ret = wait_for_completion_timeout(&rngc->rng_op_done, + usecs_to_jiffies(RNGC_SELFTEST_TIMEOUT)); imx_rngc_irq_mask_clear(rngc); if (!ret) return -ETIMEDOUT; @@ -122,7 +126,11 @@ static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng); unsigned int status; - int retval = 0; + int err, retval = 0; + + err = pm_runtime_resume_and_get(rngc->dev); + if (err) + return err; while (max >= sizeof(u32)) { status = readl(rngc->base + RNGC_STATUS); @@ -140,6 +148,8 @@ static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait) max -= sizeof(u32); } } + pm_runtime_mark_last_busy(rngc->dev); + pm_runtime_put(rngc->dev); return retval ? retval : -EIO; } @@ -168,7 +178,11 @@ static int imx_rngc_init(struct hwrng *rng) { struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng); u32 cmd, ctrl; - int ret; + int ret, err; + + err = pm_runtime_resume_and_get(rngc->dev); + if (err) + return err; /* clear error */ cmd = readl(rngc->base + RNGC_COMMAND); @@ -182,17 +196,18 @@ static int imx_rngc_init(struct hwrng *rng) cmd = readl(rngc->base + RNGC_COMMAND); writel(cmd | RNGC_CMD_SEED, rngc->base + RNGC_COMMAND); - ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_TIMEOUT)); + ret = wait_for_completion_timeout(&rngc->rng_op_done, + msecs_to_jiffies(RNGC_SEED_TIMEOUT)); if (!ret) { - ret = -ETIMEDOUT; - goto err; + err = -ETIMEDOUT; + goto out; } } while (rngc->err_reg == RNGC_ERROR_STATUS_STAT_ERR); if (rngc->err_reg) { - ret = -EIO; - goto err; + err = -EIO; + goto out; } /* @@ -203,23 +218,29 @@ static int imx_rngc_init(struct hwrng *rng) ctrl |= RNGC_CTRL_AUTO_SEED; writel(ctrl, rngc->base + RNGC_CONTROL); +out: /* * if initialisation was successful, we keep the interrupt * unmasked until imx_rngc_cleanup is called * we mask the interrupt ourselves if we return an error */ - return 0; + if (err) + imx_rngc_irq_mask_clear(rngc); -err: - imx_rngc_irq_mask_clear(rngc); - return ret; + pm_runtime_put(rngc->dev); + return err; } static void imx_rngc_cleanup(struct hwrng *rng) { struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng); + int err; - imx_rngc_irq_mask_clear(rngc); + err = pm_runtime_resume_and_get(rngc->dev); + if (!err) { + imx_rngc_irq_mask_clear(rngc); + pm_runtime_put(rngc->dev); + } } static int __init imx_rngc_probe(struct platform_device *pdev) @@ -238,7 +259,7 @@ static int __init imx_rngc_probe(struct platform_device *pdev) if (IS_ERR(rngc->base)) return PTR_ERR(rngc->base); - rngc->clk = devm_clk_get_enabled(&pdev->dev, NULL); + rngc->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(rngc->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(rngc->clk), "Cannot get rng_clk\n"); @@ -246,14 +267,18 @@ static int __init imx_rngc_probe(struct platform_device *pdev) if (irq < 0) return irq; + clk_prepare_enable(rngc->clk); + ver_id = readl(rngc->base + RNGC_VER_ID); rng_type = FIELD_GET(RNG_TYPE, ver_id); /* * This driver supports only RNGC and RNGB. (There's a different * driver for RNGA.) */ - if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB) + if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB) { + clk_disable_unprepare(rngc->clk); return -ENODEV; + } init_completion(&rngc->rng_op_done); @@ -270,15 +295,24 @@ static int __init imx_rngc_probe(struct platform_device *pdev) ret = devm_request_irq(&pdev->dev, irq, imx_rngc_irq, 0, pdev->name, (void *)rngc); - if (ret) + if (ret) { + clk_disable_unprepare(rngc->clk); return dev_err_probe(&pdev->dev, ret, "Can't get interrupt working.\n"); + } if (self_test) { ret = imx_rngc_self_test(rngc); - if (ret) + if (ret) { + clk_disable_unprepare(rngc->clk); return dev_err_probe(&pdev->dev, ret, "self test failed\n"); + } } + pm_runtime_set_autosuspend_delay(&pdev->dev, RNGC_PM_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + devm_pm_runtime_enable(&pdev->dev); + ret = devm_hwrng_register(&pdev->dev, &rngc->rng); if (ret) return dev_err_probe(&pdev->dev, ret, "hwrng registration failed\n"); @@ -308,7 +342,10 @@ static int imx_rngc_resume(struct device *dev) return 0; } -static DEFINE_SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume); +static const struct dev_pm_ops imx_rngc_pm_ops = { + SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) + RUNTIME_PM_OPS(imx_rngc_suspend, imx_rngc_resume, NULL) +}; static const struct of_device_id imx_rngc_dt_ids[] = { { .compatible = "fsl,imx25-rngb" }, @@ -319,7 +356,7 @@ MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids); static struct platform_driver imx_rngc_driver = { .driver = { .name = KBUILD_MODNAME, - .pm = pm_sleep_ptr(&imx_rngc_pm_ops), + .pm = pm_ptr(&imx_rngc_pm_ops), .of_match_table = imx_rngc_dt_ids, }, }; diff --git a/drivers/char/hw_random/ingenic-rng.c b/drivers/char/hw_random/ingenic-rng.c index 4f18c3fa5427..bbfd662d25a6 100644 --- a/drivers/char/hw_random/ingenic-rng.c +++ b/drivers/char/hw_random/ingenic-rng.c @@ -11,7 +11,7 @@ #include <linux/io.h> #include <linux/iopoll.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/slab.h> @@ -114,15 +114,13 @@ static int ingenic_rng_probe(struct platform_device *pdev) return 0; } -static int ingenic_rng_remove(struct platform_device *pdev) +static void ingenic_rng_remove(struct platform_device *pdev) { struct ingenic_rng *priv = platform_get_drvdata(pdev); hwrng_unregister(&priv->rng); writel(0, priv->base + RNG_REG_ERNG_OFFSET); - - return 0; } static const struct of_device_id ingenic_rng_of_match[] = { diff --git a/drivers/char/hw_random/jh7110-trng.c b/drivers/char/hw_random/jh7110-trng.c index 38474d48a25e..9776f4daa044 100644 --- a/drivers/char/hw_random/jh7110-trng.c +++ b/drivers/char/hw_random/jh7110-trng.c @@ -300,7 +300,7 @@ static int starfive_trng_probe(struct platform_device *pdev) ret = devm_request_irq(&pdev->dev, irq, starfive_trng_irq, 0, pdev->name, (void *)trng); if (ret) - return dev_err_probe(&pdev->dev, irq, + return dev_err_probe(&pdev->dev, ret, "Failed to register interrupt handler\n"); trng->hclk = devm_clk_get(&pdev->dev, "hclk"); @@ -369,8 +369,12 @@ static int __maybe_unused starfive_trng_resume(struct device *dev) return 0; } -static DEFINE_SIMPLE_DEV_PM_OPS(starfive_trng_pm_ops, starfive_trng_suspend, - starfive_trng_resume); +static const struct dev_pm_ops starfive_trng_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(starfive_trng_suspend, + starfive_trng_resume) + SET_RUNTIME_PM_OPS(starfive_trng_suspend, + starfive_trng_resume, NULL) +}; static const struct of_device_id trng_dt_ids[] __maybe_unused = { { .compatible = "starfive,jh7110-trng" }, diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c index 2f2f21f1b659..9e408144a10c 100644 --- a/drivers/char/hw_random/ks-sa-rng.c +++ b/drivers/char/hw_random/ks-sa-rng.c @@ -81,7 +81,6 @@ struct trng_regs { }; struct ks_sa_rng { - struct device *dev; struct hwrng rng; struct clk *clk; struct regmap *regmap_cfg; @@ -113,8 +112,7 @@ static unsigned int refill_delay_ns(unsigned long clk_rate) static int ks_sa_rng_init(struct hwrng *rng) { u32 value; - struct device *dev = (struct device *)rng->priv; - struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev); + struct ks_sa_rng *ks_sa_rng = container_of(rng, struct ks_sa_rng, rng); unsigned long clk_rate = clk_get_rate(ks_sa_rng->clk); /* Enable RNG module */ @@ -153,8 +151,7 @@ static int ks_sa_rng_init(struct hwrng *rng) static void ks_sa_rng_cleanup(struct hwrng *rng) { - struct device *dev = (struct device *)rng->priv; - struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev); + struct ks_sa_rng *ks_sa_rng = container_of(rng, struct ks_sa_rng, rng); /* Disable RNG */ writel(0, &ks_sa_rng->reg_rng->control); @@ -164,8 +161,7 @@ static void ks_sa_rng_cleanup(struct hwrng *rng) static int ks_sa_rng_data_read(struct hwrng *rng, u32 *data) { - struct device *dev = (struct device *)rng->priv; - struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev); + struct ks_sa_rng *ks_sa_rng = container_of(rng, struct ks_sa_rng, rng); /* Read random data */ data[0] = readl(&ks_sa_rng->reg_rng->output_l); @@ -179,8 +175,7 @@ static int ks_sa_rng_data_read(struct hwrng *rng, u32 *data) static int ks_sa_rng_data_present(struct hwrng *rng, int wait) { - struct device *dev = (struct device *)rng->priv; - struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev); + struct ks_sa_rng *ks_sa_rng = container_of(rng, struct ks_sa_rng, rng); u64 now = ktime_get_ns(); u32 ready; @@ -217,7 +212,6 @@ static int ks_sa_rng_probe(struct platform_device *pdev) if (!ks_sa_rng) return -ENOMEM; - ks_sa_rng->dev = dev; ks_sa_rng->rng = (struct hwrng) { .name = "ks_sa_hwrng", .init = ks_sa_rng_init, @@ -225,7 +219,6 @@ static int ks_sa_rng_probe(struct platform_device *pdev) .data_present = ks_sa_rng_data_present, .cleanup = ks_sa_rng_cleanup, }; - ks_sa_rng->rng.priv = (unsigned long)dev; ks_sa_rng->reg_rng = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ks_sa_rng->reg_rng)) @@ -235,30 +228,27 @@ static int ks_sa_rng_probe(struct platform_device *pdev) syscon_regmap_lookup_by_phandle(dev->of_node, "ti,syscon-sa-cfg"); - if (IS_ERR(ks_sa_rng->regmap_cfg)) { - dev_err(dev, "syscon_node_to_regmap failed\n"); - return -EINVAL; - } + if (IS_ERR(ks_sa_rng->regmap_cfg)) + return dev_err_probe(dev, -EINVAL, "syscon_node_to_regmap failed\n"); + + ks_sa_rng->clk = devm_clk_get_enabled(dev, NULL); + if (IS_ERR(ks_sa_rng->clk)) + return dev_err_probe(dev, PTR_ERR(ks_sa_rng->clk), "Failed to get clock\n"); pm_runtime_enable(dev); ret = pm_runtime_resume_and_get(dev); if (ret < 0) { - dev_err(dev, "Failed to enable SA power-domain\n"); pm_runtime_disable(dev); - return ret; + return dev_err_probe(dev, ret, "Failed to enable SA power-domain\n"); } - platform_set_drvdata(pdev, ks_sa_rng); - return devm_hwrng_register(&pdev->dev, &ks_sa_rng->rng); } -static int ks_sa_rng_remove(struct platform_device *pdev) +static void ks_sa_rng_remove(struct platform_device *pdev) { pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - - return 0; } static const struct of_device_id ks_sa_rng_dt_match[] = { diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c index a4eb8e35f13d..75225eb9fef6 100644 --- a/drivers/char/hw_random/meson-rng.c +++ b/drivers/char/hw_random/meson-rng.c @@ -13,12 +13,23 @@ #include <linux/types.h> #include <linux/of.h> #include <linux/clk.h> +#include <linux/iopoll.h> -#define RNG_DATA 0x00 +#define RNG_DATA 0x00 +#define RNG_S4_DATA 0x08 +#define RNG_S4_CFG 0x00 + +#define RUN_BIT BIT(0) +#define SEED_READY_STS_BIT BIT(31) + +struct meson_rng_priv { + int (*read)(struct hwrng *rng, void *buf, size_t max, bool wait); +}; struct meson_rng_data { void __iomem *base; struct hwrng rng; + struct device *dev; }; static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) @@ -31,16 +42,62 @@ static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) return sizeof(u32); } +static int meson_rng_wait_status(void __iomem *cfg_addr, int bit) +{ + u32 status = 0; + int ret; + + ret = readl_relaxed_poll_timeout_atomic(cfg_addr, + status, !(status & bit), + 10, 10000); + if (ret) + return -EBUSY; + + return 0; +} + +static int meson_s4_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct meson_rng_data *data = + container_of(rng, struct meson_rng_data, rng); + + void __iomem *cfg_addr = data->base + RNG_S4_CFG; + int err; + + writel_relaxed(readl_relaxed(cfg_addr) | SEED_READY_STS_BIT, cfg_addr); + + err = meson_rng_wait_status(cfg_addr, SEED_READY_STS_BIT); + if (err) { + dev_err(data->dev, "Seed isn't ready, try again\n"); + return err; + } + + err = meson_rng_wait_status(cfg_addr, RUN_BIT); + if (err) { + dev_err(data->dev, "Can't get random number, try again\n"); + return err; + } + + *(u32 *)buf = readl_relaxed(data->base + RNG_S4_DATA); + + return sizeof(u32); +} + static int meson_rng_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct meson_rng_data *data; struct clk *core_clk; + const struct meson_rng_priv *priv; data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; + priv = device_get_match_data(&pdev->dev); + if (!priv) + return -ENODEV; + data->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->base)) return PTR_ERR(data->base); @@ -51,13 +108,30 @@ static int meson_rng_probe(struct platform_device *pdev) "Failed to get core clock\n"); data->rng.name = pdev->name; - data->rng.read = meson_rng_read; + data->rng.read = priv->read; + + data->dev = &pdev->dev; return devm_hwrng_register(dev, &data->rng); } +static const struct meson_rng_priv meson_rng_priv = { + .read = meson_rng_read, +}; + +static const struct meson_rng_priv meson_rng_priv_s4 = { + .read = meson_s4_rng_read, +}; + static const struct of_device_id meson_rng_of_match[] = { - { .compatible = "amlogic,meson-rng", }, + { + .compatible = "amlogic,meson-rng", + .data = (void *)&meson_rng_priv, + }, + { + .compatible = "amlogic,meson-s4-rng", + .data = (void *)&meson_rng_priv_s4, + }, {}, }; MODULE_DEVICE_TABLE(of, meson_rng_of_match); diff --git a/drivers/char/hw_random/mpfs-rng.c b/drivers/char/hw_random/mpfs-rng.c index c6972734ae62..0994024daa70 100644 --- a/drivers/char/hw_random/mpfs-rng.c +++ b/drivers/char/hw_random/mpfs-rng.c @@ -79,8 +79,6 @@ static int mpfs_rng_probe(struct platform_device *pdev) rng_priv->rng.read = mpfs_rng_read; rng_priv->rng.name = pdev->name; - platform_set_drvdata(pdev, rng_priv); - ret = devm_hwrng_register(&pdev->dev, &rng_priv->rng); if (ret) return dev_err_probe(&pdev->dev, ret, "Failed to register MPFS hwrng\n"); diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c index aa993753ab12..5808d09d12c4 100644 --- a/drivers/char/hw_random/mtk-rng.c +++ b/drivers/char/hw_random/mtk-rng.c @@ -36,6 +36,7 @@ struct mtk_rng { void __iomem *base; struct clk *clk; struct hwrng rng; + struct device *dev; }; static int mtk_rng_init(struct hwrng *rng) @@ -85,7 +86,7 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) struct mtk_rng *priv = to_mtk_rng(rng); int retval = 0; - pm_runtime_get_sync((struct device *)priv->rng.priv); + pm_runtime_get_sync(priv->dev); while (max >= sizeof(u32)) { if (!mtk_rng_wait_ready(rng, wait)) @@ -97,8 +98,7 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) max -= sizeof(u32); } - pm_runtime_mark_last_busy((struct device *)priv->rng.priv); - pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv); + pm_runtime_put_sync_autosuspend(priv->dev); return retval || !wait ? retval : -EIO; } @@ -112,13 +112,13 @@ static int mtk_rng_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; + priv->dev = &pdev->dev; priv->rng.name = pdev->name; #ifndef CONFIG_PM priv->rng.init = mtk_rng_init; priv->rng.cleanup = mtk_rng_cleanup; #endif priv->rng.read = mtk_rng_read; - priv->rng.priv = (unsigned long)&pdev->dev; priv->rng.quality = 900; priv->clk = devm_clk_get(&pdev->dev, "rng"); @@ -142,7 +142,9 @@ static int mtk_rng_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, priv); pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_enable(&pdev->dev); + ret = devm_pm_runtime_enable(&pdev->dev); + if (ret) + return ret; dev_info(&pdev->dev, "registered RNG driver\n"); diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c index 008763c988ed..e3fcb8bcc29b 100644 --- a/drivers/char/hw_random/mxc-rnga.c +++ b/drivers/char/hw_random/mxc-rnga.c @@ -131,7 +131,7 @@ static void mxc_rnga_cleanup(struct hwrng *rng) __raw_writel(ctrl & ~RNGA_CONTROL_GO, mxc_rng->mem + RNGA_CONTROL); } -static int __init mxc_rnga_probe(struct platform_device *pdev) +static int mxc_rnga_probe(struct platform_device *pdev) { int err; struct mxc_rng *mxc_rng; @@ -147,44 +147,32 @@ static int __init mxc_rnga_probe(struct platform_device *pdev) mxc_rng->rng.data_present = mxc_rnga_data_present; mxc_rng->rng.data_read = mxc_rnga_data_read; - mxc_rng->clk = devm_clk_get(&pdev->dev, NULL); + mxc_rng->clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(mxc_rng->clk)) { dev_err(&pdev->dev, "Could not get rng_clk!\n"); return PTR_ERR(mxc_rng->clk); } - err = clk_prepare_enable(mxc_rng->clk); - if (err) - return err; - mxc_rng->mem = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mxc_rng->mem)) { err = PTR_ERR(mxc_rng->mem); - goto err_ioremap; + return err; } err = hwrng_register(&mxc_rng->rng); if (err) { dev_err(&pdev->dev, "MXC RNGA registering failed (%d)\n", err); - goto err_ioremap; + return err; } return 0; - -err_ioremap: - clk_disable_unprepare(mxc_rng->clk); - return err; } -static int __exit mxc_rnga_remove(struct platform_device *pdev) +static void mxc_rnga_remove(struct platform_device *pdev) { struct mxc_rng *mxc_rng = platform_get_drvdata(pdev); hwrng_unregister(&mxc_rng->rng); - - clk_disable_unprepare(mxc_rng->clk); - - return 0; } static const struct of_device_id mxc_rnga_of_match[] = { @@ -199,10 +187,11 @@ static struct platform_driver mxc_rnga_driver = { .name = "mxc_rnga", .of_match_table = mxc_rnga_of_match, }, - .remove = __exit_p(mxc_rnga_remove), + .probe = mxc_rnga_probe, + .remove = mxc_rnga_remove, }; -module_platform_driver_probe(mxc_rnga_driver, mxc_rnga_probe); +module_platform_driver(mxc_rnga_driver); MODULE_AUTHOR("Freescale Semiconductor, Inc."); MODULE_DESCRIPTION("H/W RNGA driver for i.MX"); diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c index 73e408146420..ea6d5599242f 100644 --- a/drivers/char/hw_random/n2-drv.c +++ b/drivers/char/hw_random/n2-drv.c @@ -14,7 +14,8 @@ #include <linux/hw_random.h> #include <linux/of.h> -#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/property.h> #include <asm/hypervisor.h> @@ -28,7 +29,7 @@ static char version[] = DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; -MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); +MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); MODULE_DESCRIPTION("Niagara2 RNG driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); @@ -695,20 +696,15 @@ static void n2rng_driver_version(void) static const struct of_device_id n2rng_match[]; static int n2rng_probe(struct platform_device *op) { - const struct of_device_id *match; int err = -ENOMEM; struct n2rng *np; - match = of_match_device(n2rng_match, &op->dev); - if (!match) - return -EINVAL; - n2rng_driver_version(); np = devm_kzalloc(&op->dev, sizeof(*np), GFP_KERNEL); if (!np) goto out; np->op = op; - np->data = (struct n2rng_template *)match->data; + np->data = (struct n2rng_template *)device_get_match_data(&op->dev); INIT_DELAYED_WORK(&np->work, n2rng_work); @@ -785,7 +781,7 @@ out: return err; } -static int n2rng_remove(struct platform_device *op) +static void n2rng_remove(struct platform_device *op) { struct n2rng *np = platform_get_drvdata(op); @@ -794,8 +790,6 @@ static int n2rng_remove(struct platform_device *op) cancel_delayed_work_sync(&np->work); sun4v_hvapi_unregister(HV_GRP_RNG); - - return 0; } static struct n2rng_template n2_template = { diff --git a/drivers/char/hw_random/n2rng.h b/drivers/char/hw_random/n2rng.h index 9a870f5dc371..7612f15a261f 100644 --- a/drivers/char/hw_random/n2rng.h +++ b/drivers/char/hw_random/n2rng.h @@ -48,7 +48,7 @@ #define HV_RNG_NUM_CONTROL 4 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ extern unsigned long sun4v_rng_get_diag_ctl(void); extern unsigned long sun4v_rng_ctl_read_v1(unsigned long ctl_regs_ra, unsigned long *state, @@ -147,6 +147,6 @@ struct n2rng { #define N2RNG_BUSY_LIMIT 100 #define N2RNG_HCHECK_LIMIT 100 -#endif /* !(__ASSEMBLY__) */ +#endif /* !(__ASSEMBLER__) */ #endif /* _N2RNG_H */ diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c index 8c6a40d6ce3d..f2a2aa7a531c 100644 --- a/drivers/char/hw_random/nomadik-rng.c +++ b/drivers/char/hw_random/nomadik-rng.c @@ -78,7 +78,6 @@ MODULE_DEVICE_TABLE(amba, nmk_rng_ids); static struct amba_driver nmk_rng_driver = { .drv = { - .owner = THIS_MODULE, .name = "rng", }, .probe = nmk_rng_probe, @@ -88,4 +87,5 @@ static struct amba_driver nmk_rng_driver = { module_amba_driver(nmk_rng_driver); +MODULE_DESCRIPTION("ST-Ericsson Nomadik Random Number Generator"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c index 8a304b754217..40d6e29dea03 100644 --- a/drivers/char/hw_random/npcm-rng.c +++ b/drivers/char/hw_random/npcm-rng.c @@ -32,6 +32,7 @@ struct npcm_rng { void __iomem *base; struct hwrng rng; + struct device *dev; u32 clkp; }; @@ -57,7 +58,7 @@ static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) int retval = 0; int ready; - pm_runtime_get_sync((struct device *)priv->rng.priv); + pm_runtime_get_sync(priv->dev); while (max) { if (wait) { @@ -79,8 +80,7 @@ static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) max--; } - pm_runtime_mark_last_busy((struct device *)priv->rng.priv); - pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv); + pm_runtime_put_sync_autosuspend(priv->dev); return retval || !wait ? retval : -EIO; } @@ -109,7 +109,7 @@ static int npcm_rng_probe(struct platform_device *pdev) #endif priv->rng.name = pdev->name; priv->rng.read = npcm_rng_read; - priv->rng.priv = (unsigned long)&pdev->dev; + priv->dev = &pdev->dev; priv->clkp = (u32)(uintptr_t)of_device_get_match_data(&pdev->dev); writel(NPCM_RNG_M1ROSEL, priv->base + NPCM_RNGMODE_REG); @@ -126,15 +126,13 @@ static int npcm_rng_probe(struct platform_device *pdev) return 0; } -static int npcm_rng_remove(struct platform_device *pdev) +static void npcm_rng_remove(struct platform_device *pdev) { struct npcm_rng *priv = platform_get_drvdata(pdev); devm_hwrng_unregister(&pdev->dev, &priv->rng); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); - - return 0; } #ifdef CONFIG_PM diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c index 8561a09b4681..412f54405036 100644 --- a/drivers/char/hw_random/octeon-rng.c +++ b/drivers/char/hw_random/octeon-rng.c @@ -33,7 +33,7 @@ static int octeon_rng_init(struct hwrng *rng) ctl.u64 = 0; ctl.s.ent_en = 1; /* Enable the entropy source. */ ctl.s.rng_en = 1; /* Enable the RNG hardware. */ - cvmx_write_csr((__force u64)p->control_status, ctl.u64); + cvmx_write_csr((unsigned long)p->control_status, ctl.u64); return 0; } @@ -44,14 +44,14 @@ static void octeon_rng_cleanup(struct hwrng *rng) ctl.u64 = 0; /* Disable everything. */ - cvmx_write_csr((__force u64)p->control_status, ctl.u64); + cvmx_write_csr((unsigned long)p->control_status, ctl.u64); } static int octeon_rng_data_read(struct hwrng *rng, u32 *data) { struct octeon_rng *p = container_of(rng, struct octeon_rng, ops); - *data = cvmx_read64_uint32((__force u64)p->result); + *data = cvmx_read64_uint32((unsigned long)p->result); return sizeof(u32); } diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index be03f76a2a80..5e8b50f15db7 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -509,7 +509,7 @@ err_ioremap: return ret; } -static int omap_rng_remove(struct platform_device *pdev) +static void omap_rng_remove(struct platform_device *pdev) { struct omap_rng_dev *priv = platform_get_drvdata(pdev); @@ -521,8 +521,6 @@ static int omap_rng_remove(struct platform_device *pdev) clk_disable_unprepare(priv->clk); clk_disable_unprepare(priv->clk_reg); - - return 0; } static int __maybe_unused omap_rng_suspend(struct device *dev) @@ -566,4 +564,5 @@ static struct platform_driver omap_rng_driver = { module_platform_driver(omap_rng_driver); MODULE_ALIAS("platform:omap_rng"); MODULE_AUTHOR("Deepak Saxena (and others)"); +MODULE_DESCRIPTION("RNG driver for TI OMAP CPU family"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c index 18dc46b1b58e..aa71f61c3dc9 100644 --- a/drivers/char/hw_random/omap3-rom-rng.c +++ b/drivers/char/hw_random/omap3-rom-rng.c @@ -56,7 +56,6 @@ static int omap3_rom_rng_read(struct hwrng *rng, void *data, size_t max, bool w) else r = 4; - pm_runtime_mark_last_busy(ddata->dev); pm_runtime_put_autosuspend(ddata->dev); return r; @@ -178,4 +177,5 @@ module_platform_driver(omap3_rom_rng_driver); MODULE_ALIAS("platform:omap3-rom-rng"); MODULE_AUTHOR("Juha Yrjola"); MODULE_AUTHOR("Pali Rohár <pali@kernel.org>"); +MODULE_DESCRIPTION("RNG driver for TI OMAP3 CPU family"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/hw_random/rockchip-rng.c b/drivers/char/hw_random/rockchip-rng.c new file mode 100644 index 000000000000..6e3ed4b85605 --- /dev/null +++ b/drivers/char/hw_random/rockchip-rng.c @@ -0,0 +1,492 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * rockchip-rng.c True Random Number Generator driver for Rockchip SoCs + * + * Copyright (c) 2018, Fuzhou Rockchip Electronics Co., Ltd. + * Copyright (c) 2022, Aurelien Jarno + * Copyright (c) 2025, Collabora Ltd. + * Authors: + * Lin Jinhan <troy.lin@rock-chips.com> + * Aurelien Jarno <aurelien@aurel32.net> + * Nicolas Frattaroli <nicolas.frattaroli@collabora.com> + */ +#include <linux/clk.h> +#include <linux/hw_random.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> +#include <linux/slab.h> + +#define RK_RNG_AUTOSUSPEND_DELAY 100 +#define RK_RNG_MAX_BYTE 32 +#define RK_RNG_POLL_PERIOD_US 100 +#define RK_RNG_POLL_TIMEOUT_US 10000 + +/* + * TRNG collects osc ring output bit every RK_RNG_SAMPLE_CNT time. The value is + * a tradeoff between speed and quality and has been adjusted to get a quality + * of ~900 (~87.5% of FIPS 140-2 successes). + */ +#define RK_RNG_SAMPLE_CNT 1000 + +/* after how many bytes of output TRNGv1 implementations should be reseeded */ +#define RK_TRNG_V1_AUTO_RESEED_CNT 16000 + +/* TRNG registers from RK3568 TRM-Part2, section 5.4.1 */ +#define TRNG_RST_CTL 0x0004 +#define TRNG_RNG_CTL 0x0400 +#define TRNG_RNG_CTL_LEN_64_BIT (0x00 << 4) +#define TRNG_RNG_CTL_LEN_128_BIT (0x01 << 4) +#define TRNG_RNG_CTL_LEN_192_BIT (0x02 << 4) +#define TRNG_RNG_CTL_LEN_256_BIT (0x03 << 4) +#define TRNG_RNG_CTL_OSC_RING_SPEED_0 (0x00 << 2) +#define TRNG_RNG_CTL_OSC_RING_SPEED_1 (0x01 << 2) +#define TRNG_RNG_CTL_OSC_RING_SPEED_2 (0x02 << 2) +#define TRNG_RNG_CTL_OSC_RING_SPEED_3 (0x03 << 2) +#define TRNG_RNG_CTL_MASK GENMASK(15, 0) +#define TRNG_RNG_CTL_ENABLE BIT(1) +#define TRNG_RNG_CTL_START BIT(0) +#define TRNG_RNG_SAMPLE_CNT 0x0404 +#define TRNG_RNG_DOUT 0x0410 + +/* + * TRNG V1 register definitions + * The TRNG V1 IP is a stand-alone TRNG implementation (not part of a crypto IP) + * and can be found in the Rockchip RK3588 SoC + */ +#define TRNG_V1_CTRL 0x0000 +#define TRNG_V1_CTRL_NOP 0x00 +#define TRNG_V1_CTRL_RAND 0x01 +#define TRNG_V1_CTRL_SEED 0x02 + +#define TRNG_V1_STAT 0x0004 +#define TRNG_V1_STAT_SEEDED BIT(9) +#define TRNG_V1_STAT_GENERATING BIT(30) +#define TRNG_V1_STAT_RESEEDING BIT(31) + +#define TRNG_V1_MODE 0x0008 +#define TRNG_V1_MODE_128_BIT (0x00 << 3) +#define TRNG_V1_MODE_256_BIT (0x01 << 3) + +/* Interrupt Enable register; unused because polling is faster */ +#define TRNG_V1_IE 0x0010 +#define TRNG_V1_IE_GLBL_EN BIT(31) +#define TRNG_V1_IE_SEED_DONE_EN BIT(1) +#define TRNG_V1_IE_RAND_RDY_EN BIT(0) + +#define TRNG_V1_ISTAT 0x0014 +#define TRNG_V1_ISTAT_RAND_RDY BIT(0) + +/* RAND0 ~ RAND7 */ +#define TRNG_V1_RAND0 0x0020 +#define TRNG_V1_RAND7 0x003C + +/* Auto Reseed Register */ +#define TRNG_V1_AUTO_RQSTS 0x0060 + +#define TRNG_V1_VERSION 0x00F0 +#define TRNG_v1_VERSION_CODE 0x46bc +/* end of TRNG_V1 register definitions */ + +/* + * RKRNG register definitions + * The RKRNG IP is a stand-alone TRNG implementation (not part of a crypto IP) + * and can be found in the Rockchip RK3576, Rockchip RK3562 and Rockchip RK3528 + * SoCs. It can either output true randomness (TRNG) or "deterministic" + * randomness derived from hashing the true entropy (DRNG). This driver + * implementation uses just the true entropy, and leaves stretching the entropy + * up to Linux. + */ +#define RKRNG_CFG 0x0000 +#define RKRNG_CTRL 0x0010 +#define RKRNG_CTRL_REQ_TRNG BIT(4) +#define RKRNG_STATE 0x0014 +#define RKRNG_STATE_TRNG_RDY BIT(4) +#define RKRNG_TRNG_DATA0 0x0050 +#define RKRNG_TRNG_DATA1 0x0054 +#define RKRNG_TRNG_DATA2 0x0058 +#define RKRNG_TRNG_DATA3 0x005C +#define RKRNG_TRNG_DATA4 0x0060 +#define RKRNG_TRNG_DATA5 0x0064 +#define RKRNG_TRNG_DATA6 0x0068 +#define RKRNG_TRNG_DATA7 0x006C +#define RKRNG_READ_LEN 32 + +/* Before removing this assert, give rk3588_rng_read an upper bound of 32 */ +static_assert(RK_RNG_MAX_BYTE <= (TRNG_V1_RAND7 + 4 - TRNG_V1_RAND0), + "You raised RK_RNG_MAX_BYTE and broke rk3588-rng, congrats."); + +struct rk_rng { + struct hwrng rng; + void __iomem *base; + int clk_num; + struct clk_bulk_data *clk_bulks; + const struct rk_rng_soc_data *soc_data; + struct device *dev; +}; + +struct rk_rng_soc_data { + int (*rk_rng_init)(struct hwrng *rng); + int (*rk_rng_read)(struct hwrng *rng, void *buf, size_t max, bool wait); + void (*rk_rng_cleanup)(struct hwrng *rng); + unsigned short quality; + bool reset_optional; +}; + +/* The mask in the upper 16 bits determines the bits that are updated */ +static void rk_rng_write_ctl(struct rk_rng *rng, u32 val, u32 mask) +{ + writel((mask << 16) | val, rng->base + TRNG_RNG_CTL); +} + +static inline void rk_rng_writel(struct rk_rng *rng, u32 val, u32 offset) +{ + writel(val, rng->base + offset); +} + +static inline u32 rk_rng_readl(struct rk_rng *rng, u32 offset) +{ + return readl(rng->base + offset); +} + +static int rk_rng_enable_clks(struct rk_rng *rk_rng) +{ + int ret; + /* start clocks */ + ret = clk_bulk_prepare_enable(rk_rng->clk_num, rk_rng->clk_bulks); + if (ret < 0) { + dev_err(rk_rng->dev, "Failed to enable clocks: %d\n", ret); + return ret; + } + + return 0; +} + +static int rk3568_rng_init(struct hwrng *rng) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + int ret; + + ret = rk_rng_enable_clks(rk_rng); + if (ret < 0) + return ret; + + /* set the sample period */ + writel(RK_RNG_SAMPLE_CNT, rk_rng->base + TRNG_RNG_SAMPLE_CNT); + + /* set osc ring speed and enable it */ + rk_rng_write_ctl(rk_rng, TRNG_RNG_CTL_LEN_256_BIT | + TRNG_RNG_CTL_OSC_RING_SPEED_0 | + TRNG_RNG_CTL_ENABLE, + TRNG_RNG_CTL_MASK); + + return 0; +} + +static void rk3568_rng_cleanup(struct hwrng *rng) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + + /* stop TRNG */ + rk_rng_write_ctl(rk_rng, 0, TRNG_RNG_CTL_MASK); + + /* stop clocks */ + clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks); +} + +static int rk3568_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + size_t to_read = min_t(size_t, max, RK_RNG_MAX_BYTE); + u32 reg; + int ret = 0; + + ret = pm_runtime_resume_and_get(rk_rng->dev); + if (ret < 0) + return ret; + + /* Start collecting random data */ + rk_rng_write_ctl(rk_rng, TRNG_RNG_CTL_START, TRNG_RNG_CTL_START); + + ret = readl_poll_timeout(rk_rng->base + TRNG_RNG_CTL, reg, + !(reg & TRNG_RNG_CTL_START), + RK_RNG_POLL_PERIOD_US, + RK_RNG_POLL_TIMEOUT_US); + if (ret < 0) + goto out; + + /* Read random data stored in the registers */ + memcpy_fromio(buf, rk_rng->base + TRNG_RNG_DOUT, to_read); +out: + pm_runtime_put_sync_autosuspend(rk_rng->dev); + + return (ret < 0) ? ret : to_read; +} + +static int rk3576_rng_init(struct hwrng *rng) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + + return rk_rng_enable_clks(rk_rng); +} + +static int rk3576_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + size_t to_read = min_t(size_t, max, RKRNG_READ_LEN); + int ret = 0; + u32 val; + + ret = pm_runtime_resume_and_get(rk_rng->dev); + if (ret < 0) + return ret; + + rk_rng_writel(rk_rng, RKRNG_CTRL_REQ_TRNG | (RKRNG_CTRL_REQ_TRNG << 16), + RKRNG_CTRL); + + if (readl_poll_timeout(rk_rng->base + RKRNG_STATE, val, + (val & RKRNG_STATE_TRNG_RDY), RK_RNG_POLL_PERIOD_US, + RK_RNG_POLL_TIMEOUT_US)) { + dev_err(rk_rng->dev, "timed out waiting for data\n"); + ret = -ETIMEDOUT; + goto out; + } + + rk_rng_writel(rk_rng, RKRNG_STATE_TRNG_RDY, RKRNG_STATE); + + memcpy_fromio(buf, rk_rng->base + RKRNG_TRNG_DATA0, to_read); + +out: + pm_runtime_put_sync_autosuspend(rk_rng->dev); + + return (ret < 0) ? ret : to_read; +} + +static int rk3588_rng_init(struct hwrng *rng) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + u32 version, status, mask, istat; + int ret; + + ret = rk_rng_enable_clks(rk_rng); + if (ret < 0) + return ret; + + version = rk_rng_readl(rk_rng, TRNG_V1_VERSION); + if (version != TRNG_v1_VERSION_CODE) { + dev_err(rk_rng->dev, + "wrong trng version, expected = %08x, actual = %08x\n", + TRNG_V1_VERSION, version); + ret = -EFAULT; + goto err_disable_clk; + } + + mask = TRNG_V1_STAT_SEEDED | TRNG_V1_STAT_GENERATING | + TRNG_V1_STAT_RESEEDING; + if (readl_poll_timeout(rk_rng->base + TRNG_V1_STAT, status, + (status & mask) == TRNG_V1_STAT_SEEDED, + RK_RNG_POLL_PERIOD_US, RK_RNG_POLL_TIMEOUT_US) < 0) { + dev_err(rk_rng->dev, "timed out waiting for hwrng to reseed\n"); + ret = -ETIMEDOUT; + goto err_disable_clk; + } + + /* + * clear ISTAT flag, downstream advises to do this to avoid + * auto-reseeding "on power on" + */ + istat = rk_rng_readl(rk_rng, TRNG_V1_ISTAT); + rk_rng_writel(rk_rng, istat, TRNG_V1_ISTAT); + + /* auto reseed after RK_TRNG_V1_AUTO_RESEED_CNT bytes */ + rk_rng_writel(rk_rng, RK_TRNG_V1_AUTO_RESEED_CNT / 16, TRNG_V1_AUTO_RQSTS); + + return 0; +err_disable_clk: + clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks); + return ret; +} + +static void rk3588_rng_cleanup(struct hwrng *rng) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + + clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks); +} + +static int rk3588_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + size_t to_read = min_t(size_t, max, RK_RNG_MAX_BYTE); + int ret = 0; + u32 reg; + + ret = pm_runtime_resume_and_get(rk_rng->dev); + if (ret < 0) + return ret; + + /* Clear ISTAT, even without interrupts enabled, this will be updated */ + reg = rk_rng_readl(rk_rng, TRNG_V1_ISTAT); + rk_rng_writel(rk_rng, reg, TRNG_V1_ISTAT); + + /* generate 256 bits of random data */ + rk_rng_writel(rk_rng, TRNG_V1_MODE_256_BIT, TRNG_V1_MODE); + rk_rng_writel(rk_rng, TRNG_V1_CTRL_RAND, TRNG_V1_CTRL); + + ret = readl_poll_timeout_atomic(rk_rng->base + TRNG_V1_ISTAT, reg, + (reg & TRNG_V1_ISTAT_RAND_RDY), 0, + RK_RNG_POLL_TIMEOUT_US); + if (ret < 0) + goto out; + + /* Read random data that's in registers TRNG_V1_RAND0 through RAND7 */ + memcpy_fromio(buf, rk_rng->base + TRNG_V1_RAND0, to_read); + +out: + /* Clear ISTAT */ + rk_rng_writel(rk_rng, reg, TRNG_V1_ISTAT); + /* close the TRNG */ + rk_rng_writel(rk_rng, TRNG_V1_CTRL_NOP, TRNG_V1_CTRL); + + pm_runtime_put_sync_autosuspend(rk_rng->dev); + + return (ret < 0) ? ret : to_read; +} + +static const struct rk_rng_soc_data rk3568_soc_data = { + .rk_rng_init = rk3568_rng_init, + .rk_rng_read = rk3568_rng_read, + .rk_rng_cleanup = rk3568_rng_cleanup, + .quality = 900, + .reset_optional = false, +}; + +static const struct rk_rng_soc_data rk3576_soc_data = { + .rk_rng_init = rk3576_rng_init, + .rk_rng_read = rk3576_rng_read, + .rk_rng_cleanup = rk3588_rng_cleanup, + .quality = 999, /* as determined by actual testing */ + .reset_optional = true, +}; + +static const struct rk_rng_soc_data rk3588_soc_data = { + .rk_rng_init = rk3588_rng_init, + .rk_rng_read = rk3588_rng_read, + .rk_rng_cleanup = rk3588_rng_cleanup, + .quality = 999, /* as determined by actual testing */ + .reset_optional = true, +}; + +static int rk_rng_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct reset_control *rst; + struct rk_rng *rk_rng; + int ret; + + rk_rng = devm_kzalloc(dev, sizeof(*rk_rng), GFP_KERNEL); + if (!rk_rng) + return -ENOMEM; + + rk_rng->soc_data = of_device_get_match_data(dev); + rk_rng->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(rk_rng->base)) + return PTR_ERR(rk_rng->base); + + rk_rng->clk_num = devm_clk_bulk_get_all(dev, &rk_rng->clk_bulks); + if (rk_rng->clk_num < 0) + return dev_err_probe(dev, rk_rng->clk_num, + "Failed to get clks property\n"); + + if (rk_rng->soc_data->reset_optional) + rst = devm_reset_control_array_get_optional_exclusive(dev); + else + rst = devm_reset_control_array_get_exclusive(dev); + + if (rst) { + if (IS_ERR(rst)) + return dev_err_probe(dev, PTR_ERR(rst), "Failed to get reset property\n"); + + reset_control_assert(rst); + udelay(2); + reset_control_deassert(rst); + } + + platform_set_drvdata(pdev, rk_rng); + + rk_rng->rng.name = dev_driver_string(dev); + if (!IS_ENABLED(CONFIG_PM)) { + rk_rng->rng.init = rk_rng->soc_data->rk_rng_init; + rk_rng->rng.cleanup = rk_rng->soc_data->rk_rng_cleanup; + } + rk_rng->rng.read = rk_rng->soc_data->rk_rng_read; + rk_rng->dev = dev; + rk_rng->rng.quality = rk_rng->soc_data->quality; + + pm_runtime_set_autosuspend_delay(dev, RK_RNG_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(dev); + ret = devm_pm_runtime_enable(dev); + if (ret) + return dev_err_probe(dev, ret, "Runtime pm activation failed.\n"); + + ret = devm_hwrng_register(dev, &rk_rng->rng); + if (ret) + return dev_err_probe(dev, ret, "Failed to register Rockchip hwrng\n"); + + return 0; +} + +static int __maybe_unused rk_rng_runtime_suspend(struct device *dev) +{ + struct rk_rng *rk_rng = dev_get_drvdata(dev); + + rk_rng->soc_data->rk_rng_cleanup(&rk_rng->rng); + + return 0; +} + +static int __maybe_unused rk_rng_runtime_resume(struct device *dev) +{ + struct rk_rng *rk_rng = dev_get_drvdata(dev); + + return rk_rng->soc_data->rk_rng_init(&rk_rng->rng); +} + +static const struct dev_pm_ops rk_rng_pm_ops = { + SET_RUNTIME_PM_OPS(rk_rng_runtime_suspend, + rk_rng_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) +}; + +static const struct of_device_id rk_rng_dt_match[] = { + { .compatible = "rockchip,rk3568-rng", .data = (void *)&rk3568_soc_data }, + { .compatible = "rockchip,rk3576-rng", .data = (void *)&rk3576_soc_data }, + { .compatible = "rockchip,rk3588-rng", .data = (void *)&rk3588_soc_data }, + { /* sentinel */ }, +}; + +MODULE_DEVICE_TABLE(of, rk_rng_dt_match); + +static struct platform_driver rk_rng_driver = { + .driver = { + .name = "rockchip-rng", + .pm = &rk_rng_pm_ops, + .of_match_table = rk_rng_dt_match, + }, + .probe = rk_rng_probe, +}; + +module_platform_driver(rk_rng_driver); + +MODULE_DESCRIPTION("Rockchip True Random Number Generator driver"); +MODULE_AUTHOR("Lin Jinhan <troy.lin@rock-chips.com>"); +MODULE_AUTHOR("Aurelien Jarno <aurelien@aurel32.net>"); +MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>"); +MODULE_AUTHOR("Nicolas Frattaroli <nicolas.frattaroli@collabora.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/hw_random/s390-trng.c b/drivers/char/hw_random/s390-trng.c index d27e32e9bfee..3024d5e9fd61 100644 --- a/drivers/char/hw_random/s390-trng.c +++ b/drivers/char/hw_random/s390-trng.c @@ -9,8 +9,7 @@ * Author(s): Harald Freudenberger <freude@de.ibm.com> */ -#define KMSG_COMPONENT "trng" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "trng: " fmt #include <linux/hw_random.h> #include <linux/kernel.h> diff --git a/drivers/char/hw_random/st-rng.c b/drivers/char/hw_random/st-rng.c index 6e9dfac9fc9f..23749817d83c 100644 --- a/drivers/char/hw_random/st-rng.c +++ b/drivers/char/hw_random/st-rng.c @@ -121,4 +121,5 @@ static struct platform_driver st_rng_driver = { module_platform_driver(st_rng_driver); MODULE_AUTHOR("Pankaj Dev <pankaj.dev@st.com>"); +MODULE_DESCRIPTION("ST Microelectronics HW Random Number Generator"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c index efb6a9f9a11b..9a8c00586ab0 100644 --- a/drivers/char/hw_random/stm32-rng.c +++ b/drivers/char/hw_random/stm32-rng.c @@ -4,6 +4,7 @@ */ #include <linux/clk.h> +#include <linux/clk-provider.h> #include <linux/delay.h> #include <linux/hw_random.h> #include <linux/io.h> @@ -17,198 +18,587 @@ #include <linux/reset.h> #include <linux/slab.h> -#define RNG_CR 0x00 -#define RNG_CR_RNGEN BIT(2) -#define RNG_CR_CED BIT(5) - -#define RNG_SR 0x04 -#define RNG_SR_SEIS BIT(6) -#define RNG_SR_CEIS BIT(5) -#define RNG_SR_DRDY BIT(0) +#define RNG_CR 0x00 +#define RNG_CR_RNGEN BIT(2) +#define RNG_CR_CED BIT(5) +#define RNG_CR_CONFIG1 GENMASK(11, 8) +#define RNG_CR_NISTC BIT(12) +#define RNG_CR_CONFIG2 GENMASK(15, 13) +#define RNG_CR_CLKDIV_SHIFT 16 +#define RNG_CR_CLKDIV GENMASK(19, 16) +#define RNG_CR_CONFIG3 GENMASK(25, 20) +#define RNG_CR_CONDRST BIT(30) +#define RNG_CR_CONFLOCK BIT(31) +#define RNG_CR_ENTROPY_SRC_MASK (RNG_CR_CONFIG1 | RNG_CR_NISTC | RNG_CR_CONFIG2 | RNG_CR_CONFIG3) +#define RNG_CR_CONFIG_MASK (RNG_CR_ENTROPY_SRC_MASK | RNG_CR_CED | RNG_CR_CLKDIV) + +#define RNG_SR 0x04 +#define RNG_SR_DRDY BIT(0) +#define RNG_SR_CECS BIT(1) +#define RNG_SR_SECS BIT(2) +#define RNG_SR_CEIS BIT(5) +#define RNG_SR_SEIS BIT(6) + +#define RNG_DR 0x08 + +#define RNG_NSCR 0x0C +#define RNG_NSCR_MASK GENMASK(17, 0) + +#define RNG_HTCR 0x10 + +#define RNG_NB_RECOVER_TRIES 3 + +struct stm32_rng_data { + uint max_clock_rate; + uint nb_clock; + u32 cr; + u32 nscr; + u32 htcr; + bool has_cond_reset; +}; -#define RNG_DR 0x08 +/** + * struct stm32_rng_config - RNG configuration data + * + * @cr: RNG configuration. 0 means default hardware RNG configuration + * @nscr: Noise sources control configuration. + * @htcr: Health tests configuration. + */ +struct stm32_rng_config { + u32 cr; + u32 nscr; + u32 htcr; +}; struct stm32_rng_private { struct hwrng rng; + struct device *dev; void __iomem *base; - struct clk *clk; + struct clk_bulk_data *clk_bulk; struct reset_control *rst; + struct stm32_rng_config pm_conf; + const struct stm32_rng_data *data; bool ced; + bool lock_conf; }; +/* + * Extracts from the STM32 RNG specification when RNG supports CONDRST. + * + * When a noise source (or seed) error occurs, the RNG stops generating + * random numbers and sets to “1” both SEIS and SECS bits to indicate + * that a seed error occurred. (...) + * + * 1. Software reset by writing CONDRST at 1 and at 0 (see bitfield + * description for details). This step is needed only if SECS is set. + * Indeed, when SEIS is set and SECS is cleared it means RNG performed + * the reset automatically (auto-reset). + * 2. If SECS was set in step 1 (no auto-reset) wait for CONDRST + * to be cleared in the RNG_CR register, then confirm that SEIS is + * cleared in the RNG_SR register. Otherwise just clear SEIS bit in + * the RNG_SR register. + * 3. If SECS was set in step 1 (no auto-reset) wait for SECS to be + * cleared by RNG. The random number generation is now back to normal. + */ +static int stm32_rng_conceal_seed_error_cond_reset(struct stm32_rng_private *priv) +{ + struct device *dev = priv->dev; + u32 sr = readl_relaxed(priv->base + RNG_SR); + u32 cr = readl_relaxed(priv->base + RNG_CR); + int err; + + if (sr & RNG_SR_SECS) { + /* Conceal by resetting the subsystem (step 1.) */ + writel_relaxed(cr | RNG_CR_CONDRST, priv->base + RNG_CR); + writel_relaxed(cr & ~RNG_CR_CONDRST, priv->base + RNG_CR); + } else { + /* RNG auto-reset (step 2.) */ + writel_relaxed(sr & ~RNG_SR_SEIS, priv->base + RNG_SR); + goto end; + } + + err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_CR, cr, !(cr & RNG_CR_CONDRST), 10, + 100000); + if (err) { + dev_err(dev, "%s: timeout %x\n", __func__, sr); + return err; + } + + /* Check SEIS is cleared (step 2.) */ + if (readl_relaxed(priv->base + RNG_SR) & RNG_SR_SEIS) + return -EINVAL; + + err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_SR, sr, !(sr & RNG_SR_SECS), 10, + 100000); + if (err) { + dev_err(dev, "%s: timeout %x\n", __func__, sr); + return err; + } + +end: + return 0; +} + +/* + * Extracts from the STM32 RNG specification, when CONDRST is not supported + * + * When a noise source (or seed) error occurs, the RNG stops generating + * random numbers and sets to “1” both SEIS and SECS bits to indicate + * that a seed error occurred. (...) + * + * The following sequence shall be used to fully recover from a seed + * error after the RNG initialization: + * 1. Clear the SEIS bit by writing it to “0”. + * 2. Read out 12 words from the RNG_DR register, and discard each of + * them in order to clean the pipeline. + * 3. Confirm that SEIS is still cleared. Random number generation is + * back to normal. + */ +static int stm32_rng_conceal_seed_error_sw_reset(struct stm32_rng_private *priv) +{ + unsigned int i = 0; + u32 sr = readl_relaxed(priv->base + RNG_SR); + + writel_relaxed(sr & ~RNG_SR_SEIS, priv->base + RNG_SR); + + for (i = 12; i != 0; i--) + (void)readl_relaxed(priv->base + RNG_DR); + + if (readl_relaxed(priv->base + RNG_SR) & RNG_SR_SEIS) + return -EINVAL; + + return 0; +} + +static int stm32_rng_conceal_seed_error(struct hwrng *rng) +{ + struct stm32_rng_private *priv = container_of(rng, struct stm32_rng_private, rng); + + dev_dbg(priv->dev, "Concealing seed error\n"); + + if (priv->data->has_cond_reset) + return stm32_rng_conceal_seed_error_cond_reset(priv); + else + return stm32_rng_conceal_seed_error_sw_reset(priv); +}; + + static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) { - struct stm32_rng_private *priv = - container_of(rng, struct stm32_rng_private, rng); + struct stm32_rng_private *priv = container_of(rng, struct stm32_rng_private, rng); + unsigned int i = 0; + int retval = 0, err = 0; u32 sr; - int retval = 0; - pm_runtime_get_sync((struct device *) priv->rng.priv); + retval = pm_runtime_resume_and_get(priv->dev); + if (retval) + return retval; + + if (readl_relaxed(priv->base + RNG_SR) & RNG_SR_SEIS) + stm32_rng_conceal_seed_error(rng); while (max >= sizeof(u32)) { sr = readl_relaxed(priv->base + RNG_SR); - /* Manage timeout which is based on timer and take */ - /* care of initial delay time when enabling rng */ + /* + * Manage timeout which is based on timer and take + * care of initial delay time when enabling the RNG. + */ if (!sr && wait) { - int err; - err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_SR, sr, sr, 10, 50000); - if (err) - dev_err((struct device *)priv->rng.priv, - "%s: timeout %x!\n", __func__, sr); + if (err) { + dev_err(priv->dev, "%s: timeout %x!\n", __func__, sr); + break; + } + } else if (!sr) { + /* The FIFO is being filled up */ + break; } - /* If error detected or data not ready... */ if (sr != RNG_SR_DRDY) { - if (WARN_ONCE(sr & (RNG_SR_SEIS | RNG_SR_CEIS), - "bad RNG status - %x\n", sr)) + if (sr & RNG_SR_SEIS) { + err = stm32_rng_conceal_seed_error(rng); + i++; + if (err && i > RNG_NB_RECOVER_TRIES) { + dev_err(priv->dev, "Couldn't recover from seed error\n"); + retval = -ENOTRECOVERABLE; + goto exit_rpm; + } + + continue; + } + + if (WARN_ONCE((sr & RNG_SR_CEIS), "RNG clock too slow - %x\n", sr)) writel_relaxed(0, priv->base + RNG_SR); - break; } + /* Late seed error case: DR being 0 is an error status */ *(u32 *)data = readl_relaxed(priv->base + RNG_DR); + if (!*(u32 *)data) { + err = stm32_rng_conceal_seed_error(rng); + i++; + if (err && i > RNG_NB_RECOVER_TRIES) { + dev_err(priv->dev, "Couldn't recover from seed error"); + retval = -ENOTRECOVERABLE; + goto exit_rpm; + } + + continue; + } + i = 0; retval += sizeof(u32); data += sizeof(u32); max -= sizeof(u32); } - pm_runtime_mark_last_busy((struct device *) priv->rng.priv); - pm_runtime_put_sync_autosuspend((struct device *) priv->rng.priv); +exit_rpm: + pm_runtime_put_sync_autosuspend(priv->dev); return retval || !wait ? retval : -EIO; } +static uint stm32_rng_clock_freq_restrain(struct hwrng *rng) +{ + struct stm32_rng_private *priv = + container_of(rng, struct stm32_rng_private, rng); + unsigned long clock_rate = 0; + uint clock_div = 0; + + clock_rate = clk_get_rate(priv->clk_bulk[0].clk); + + /* + * Get the exponent to apply on the CLKDIV field in RNG_CR register + * No need to handle the case when clock-div > 0xF as it is physically + * impossible + */ + while ((clock_rate >> clock_div) > priv->data->max_clock_rate) + clock_div++; + + pr_debug("RNG clk rate : %lu\n", clk_get_rate(priv->clk_bulk[0].clk) >> clock_div); + + return clock_div; +} + static int stm32_rng_init(struct hwrng *rng) { struct stm32_rng_private *priv = container_of(rng, struct stm32_rng_private, rng); int err; + u32 reg; - err = clk_prepare_enable(priv->clk); + err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk); if (err) return err; - if (priv->ced) - writel_relaxed(RNG_CR_RNGEN, priv->base + RNG_CR); - else - writel_relaxed(RNG_CR_RNGEN | RNG_CR_CED, - priv->base + RNG_CR); - /* clear error indicators */ writel_relaxed(0, priv->base + RNG_SR); + reg = readl_relaxed(priv->base + RNG_CR); + + /* + * Keep default RNG configuration if none was specified. + * 0 is an invalid value as it disables all entropy sources. + */ + if (priv->data->has_cond_reset && priv->data->cr) { + uint clock_div = stm32_rng_clock_freq_restrain(rng); + + reg &= ~RNG_CR_CONFIG_MASK; + reg |= RNG_CR_CONDRST | (priv->data->cr & RNG_CR_ENTROPY_SRC_MASK) | + (clock_div << RNG_CR_CLKDIV_SHIFT); + if (priv->ced) + reg &= ~RNG_CR_CED; + else + reg |= RNG_CR_CED; + writel_relaxed(reg, priv->base + RNG_CR); + + /* Health tests and noise control registers */ + writel_relaxed(priv->data->htcr, priv->base + RNG_HTCR); + writel_relaxed(priv->data->nscr & RNG_NSCR_MASK, priv->base + RNG_NSCR); + + reg &= ~RNG_CR_CONDRST; + reg |= RNG_CR_RNGEN; + if (priv->lock_conf) + reg |= RNG_CR_CONFLOCK; + + writel_relaxed(reg, priv->base + RNG_CR); + + err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_CR, reg, + (!(reg & RNG_CR_CONDRST)), + 10, 50000); + if (err) { + clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk); + dev_err(priv->dev, "%s: timeout %x!\n", __func__, reg); + return -EINVAL; + } + } else { + /* Handle all RNG versions by checking if conditional reset should be set */ + if (priv->data->has_cond_reset) + reg |= RNG_CR_CONDRST; + + if (priv->ced) + reg &= ~RNG_CR_CED; + else + reg |= RNG_CR_CED; + + writel_relaxed(reg, priv->base + RNG_CR); + + if (priv->data->has_cond_reset) + reg &= ~RNG_CR_CONDRST; + + reg |= RNG_CR_RNGEN; + + writel_relaxed(reg, priv->base + RNG_CR); + } + + err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_SR, reg, + reg & RNG_SR_DRDY, + 10, 100000); + if (err || (reg & ~RNG_SR_DRDY)) { + clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk); + dev_err(priv->dev, "%s: timeout:%x SR: %x!\n", __func__, err, reg); + + return -EINVAL; + } + + clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk); + return 0; } -static void stm32_rng_cleanup(struct hwrng *rng) +static void stm32_rng_remove(struct platform_device *ofdev) { - struct stm32_rng_private *priv = - container_of(rng, struct stm32_rng_private, rng); + pm_runtime_disable(&ofdev->dev); +} - writel_relaxed(0, priv->base + RNG_CR); - clk_disable_unprepare(priv->clk); +static int __maybe_unused stm32_rng_runtime_suspend(struct device *dev) +{ + struct stm32_rng_private *priv = dev_get_drvdata(dev); + u32 reg; + + reg = readl_relaxed(priv->base + RNG_CR); + reg &= ~RNG_CR_RNGEN; + writel_relaxed(reg, priv->base + RNG_CR); + + clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk); + + return 0; } -static int stm32_rng_probe(struct platform_device *ofdev) +static int __maybe_unused stm32_rng_suspend(struct device *dev) { - struct device *dev = &ofdev->dev; - struct device_node *np = ofdev->dev.of_node; - struct stm32_rng_private *priv; - struct resource res; + struct stm32_rng_private *priv = dev_get_drvdata(dev); int err; - priv = devm_kzalloc(dev, sizeof(struct stm32_rng_private), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - err = of_address_to_resource(np, 0, &res); + err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk); if (err) return err; - priv->base = devm_ioremap_resource(dev, &res); - if (IS_ERR(priv->base)) - return PTR_ERR(priv->base); - - priv->clk = devm_clk_get(&ofdev->dev, NULL); - if (IS_ERR(priv->clk)) - return PTR_ERR(priv->clk); - - priv->rst = devm_reset_control_get(&ofdev->dev, NULL); - if (!IS_ERR(priv->rst)) { - reset_control_assert(priv->rst); - udelay(2); - reset_control_deassert(priv->rst); + if (priv->data->has_cond_reset) { + priv->pm_conf.nscr = readl_relaxed(priv->base + RNG_NSCR); + priv->pm_conf.htcr = readl_relaxed(priv->base + RNG_HTCR); } - priv->ced = of_property_read_bool(np, "clock-error-detect"); + /* Do not save that RNG is enabled as it will be handled at resume */ + priv->pm_conf.cr = readl_relaxed(priv->base + RNG_CR) & ~RNG_CR_RNGEN; - dev_set_drvdata(dev, priv); + writel_relaxed(priv->pm_conf.cr, priv->base + RNG_CR); - priv->rng.name = dev_driver_string(dev); -#ifndef CONFIG_PM - priv->rng.init = stm32_rng_init; - priv->rng.cleanup = stm32_rng_cleanup; -#endif - priv->rng.read = stm32_rng_read; - priv->rng.priv = (unsigned long) dev; - priv->rng.quality = 900; - - pm_runtime_set_autosuspend_delay(dev, 100); - pm_runtime_use_autosuspend(dev); - pm_runtime_enable(dev); - - return devm_hwrng_register(dev, &priv->rng); -} - -static int stm32_rng_remove(struct platform_device *ofdev) -{ - pm_runtime_disable(&ofdev->dev); + clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk); return 0; } -#ifdef CONFIG_PM -static int stm32_rng_runtime_suspend(struct device *dev) +static int __maybe_unused stm32_rng_runtime_resume(struct device *dev) { struct stm32_rng_private *priv = dev_get_drvdata(dev); + int err; + u32 reg; - stm32_rng_cleanup(&priv->rng); + err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk); + if (err) + return err; + + /* Clean error indications */ + writel_relaxed(0, priv->base + RNG_SR); + + reg = readl_relaxed(priv->base + RNG_CR); + reg |= RNG_CR_RNGEN; + writel_relaxed(reg, priv->base + RNG_CR); return 0; } -static int stm32_rng_runtime_resume(struct device *dev) +static int __maybe_unused stm32_rng_resume(struct device *dev) { struct stm32_rng_private *priv = dev_get_drvdata(dev); + int err; + u32 reg; + + err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk); + if (err) + return err; + + /* Clean error indications */ + writel_relaxed(0, priv->base + RNG_SR); + + if (priv->data->has_cond_reset) { + /* + * Correct configuration in bits [29:4] must be set in the same + * access that set RNG_CR_CONDRST bit. Else config setting is + * not taken into account. CONFIGLOCK bit must also be unset but + * it is not handled at the moment. + */ + writel_relaxed(priv->pm_conf.cr | RNG_CR_CONDRST, priv->base + RNG_CR); + + writel_relaxed(priv->pm_conf.nscr, priv->base + RNG_NSCR); + writel_relaxed(priv->pm_conf.htcr, priv->base + RNG_HTCR); + + reg = readl_relaxed(priv->base + RNG_CR); + reg |= RNG_CR_RNGEN; + reg &= ~RNG_CR_CONDRST; + writel_relaxed(reg, priv->base + RNG_CR); + + err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_CR, reg, + reg & ~RNG_CR_CONDRST, 10, 100000); + + if (err) { + clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk); + dev_err(priv->dev, "%s: timeout:%x CR: %x!\n", __func__, err, reg); + return -EINVAL; + } + } else { + reg = priv->pm_conf.cr; + reg |= RNG_CR_RNGEN; + writel_relaxed(reg, priv->base + RNG_CR); + } + + clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk); - return stm32_rng_init(&priv->rng); + return 0; } -#endif -static const struct dev_pm_ops stm32_rng_pm_ops = { +static const struct dev_pm_ops __maybe_unused stm32_rng_pm_ops = { SET_RUNTIME_PM_OPS(stm32_rng_runtime_suspend, stm32_rng_runtime_resume, NULL) - SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, - pm_runtime_force_resume) + SET_SYSTEM_SLEEP_PM_OPS(stm32_rng_suspend, + stm32_rng_resume) }; +static const struct stm32_rng_data stm32mp25_rng_data = { + .has_cond_reset = true, + .max_clock_rate = 48000000, + .nb_clock = 2, + .cr = 0x00F00D00, + .nscr = 0x2B5BB, + .htcr = 0x969D, +}; + +static const struct stm32_rng_data stm32mp13_rng_data = { + .has_cond_reset = true, + .max_clock_rate = 48000000, + .nb_clock = 1, + .cr = 0x00F00D00, + .nscr = 0x2B5BB, + .htcr = 0x969D, +}; + +static const struct stm32_rng_data stm32_rng_data = { + .has_cond_reset = false, + .max_clock_rate = 48000000, + .nb_clock = 1, +}; static const struct of_device_id stm32_rng_match[] = { { + .compatible = "st,stm32mp25-rng", + .data = &stm32mp25_rng_data, + }, + { + .compatible = "st,stm32mp13-rng", + .data = &stm32mp13_rng_data, + }, + { .compatible = "st,stm32-rng", + .data = &stm32_rng_data, }, {}, }; MODULE_DEVICE_TABLE(of, stm32_rng_match); +static int stm32_rng_probe(struct platform_device *ofdev) +{ + struct device *dev = &ofdev->dev; + struct device_node *np = ofdev->dev.of_node; + struct stm32_rng_private *priv; + struct resource *res; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->base = devm_platform_get_and_ioremap_resource(ofdev, 0, &res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->rst = devm_reset_control_get(&ofdev->dev, NULL); + if (!IS_ERR(priv->rst)) { + reset_control_assert(priv->rst); + udelay(2); + reset_control_deassert(priv->rst); + } + + priv->ced = of_property_read_bool(np, "clock-error-detect"); + priv->lock_conf = of_property_read_bool(np, "st,rng-lock-conf"); + priv->dev = dev; + + priv->data = of_device_get_match_data(dev); + if (!priv->data) + return -ENODEV; + + dev_set_drvdata(dev, priv); + + priv->rng.name = dev_driver_string(dev); + priv->rng.init = stm32_rng_init; + priv->rng.read = stm32_rng_read; + priv->rng.quality = 900; + + if (!priv->data->nb_clock || priv->data->nb_clock > 2) + return -EINVAL; + + ret = devm_clk_bulk_get_all(dev, &priv->clk_bulk); + if (ret != priv->data->nb_clock) + return dev_err_probe(dev, -EINVAL, "Failed to get clocks: %d\n", ret); + + if (priv->data->nb_clock == 2) { + const char *id = priv->clk_bulk[1].id; + struct clk *clk = priv->clk_bulk[1].clk; + + if (!priv->clk_bulk[0].id || !priv->clk_bulk[1].id) + return dev_err_probe(dev, -EINVAL, "Missing clock name\n"); + + if (strcmp(priv->clk_bulk[0].id, "core")) { + priv->clk_bulk[1].id = priv->clk_bulk[0].id; + priv->clk_bulk[1].clk = priv->clk_bulk[0].clk; + priv->clk_bulk[0].id = id; + priv->clk_bulk[0].clk = clk; + } + } + + pm_runtime_set_autosuspend_delay(dev, 100); + pm_runtime_use_autosuspend(dev); + pm_runtime_enable(dev); + + return devm_hwrng_register(dev, &priv->rng); +} + static struct platform_driver stm32_rng_driver = { .driver = { .name = "stm32-rng", - .pm = &stm32_rng_pm_ops, + .pm = pm_ptr(&stm32_rng_pm_ops), .of_match_table = stm32_rng_match, }, .probe = stm32_rng_probe, diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c index 3db9d868efb1..e61f06393209 100644 --- a/drivers/char/hw_random/timeriomem-rng.c +++ b/drivers/char/hw_random/timeriomem-rng.c @@ -150,10 +150,9 @@ static int timeriomem_rng_probe(struct platform_device *pdev) priv->rng_ops.quality = pdata->quality; } - priv->period = ns_to_ktime(period * NSEC_PER_USEC); + priv->period = us_to_ktime(period); init_completion(&priv->completion); - hrtimer_init(&priv->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - priv->timer.function = timeriomem_rng_trigger; + hrtimer_setup(&priv->timer, timeriomem_rng_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); priv->rng_ops.name = dev_name(&pdev->dev); priv->rng_ops.read = timeriomem_rng_read; @@ -174,13 +173,11 @@ static int timeriomem_rng_probe(struct platform_device *pdev) return 0; } -static int timeriomem_rng_remove(struct platform_device *pdev) +static void timeriomem_rng_remove(struct platform_device *pdev) { struct timeriomem_rng_private *priv = platform_get_drvdata(pdev); hrtimer_cancel(&priv->timer); - - return 0; } static const struct of_device_id timeriomem_rng_match[] = { diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index e41a84e6b4b5..dd998f4fe4f2 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -135,7 +135,7 @@ static int probe_common(struct virtio_device *vdev) if (!vi) return -ENOMEM; - vi->index = index = ida_simple_get(&rng_index_ida, 0, 0, GFP_KERNEL); + vi->index = index = ida_alloc(&rng_index_ida, GFP_KERNEL); if (index < 0) { err = index; goto err_ida; @@ -166,7 +166,7 @@ static int probe_common(struct virtio_device *vdev) return 0; err_find: - ida_simple_remove(&rng_index_ida, index); + ida_free(&rng_index_ida, index); err_ida: kfree(vi); return err; @@ -184,7 +184,7 @@ static void remove_common(struct virtio_device *vdev) hwrng_unregister(&vi->hwrng); virtio_reset_device(vdev); vdev->config->del_vqs(vdev); - ida_simple_remove(&rng_index_ida, vi->index); + ida_free(&rng_index_ida, vi->index); kfree(vi); } @@ -208,7 +208,6 @@ static void virtrng_scan(struct virtio_device *vdev) vi->hwrng_register_done = true; } -#ifdef CONFIG_PM_SLEEP static int virtrng_freeze(struct virtio_device *vdev) { remove_common(vdev); @@ -238,7 +237,6 @@ static int virtrng_restore(struct virtio_device *vdev) return err; } -#endif static const struct virtio_device_id id_table[] = { { VIRTIO_ID_RNG, VIRTIO_DEV_ANY_ID }, @@ -247,15 +245,12 @@ static const struct virtio_device_id id_table[] = { static struct virtio_driver virtio_rng_driver = { .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtrng_probe, .remove = virtrng_remove, .scan = virtrng_scan, -#ifdef CONFIG_PM_SLEEP - .freeze = virtrng_freeze, - .restore = virtrng_restore, -#endif + .freeze = pm_sleep_ptr(virtrng_freeze), + .restore = pm_sleep_ptr(virtrng_restore), }; module_virtio_driver(virtio_rng_driver); diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c index 99f4e86ac3e9..709a36507145 100644 --- a/drivers/char/hw_random/xgene-rng.c +++ b/drivers/char/hw_random/xgene-rng.c @@ -88,12 +88,12 @@ struct xgene_rng_dev { static void xgene_rng_expired_timer(struct timer_list *t) { - struct xgene_rng_dev *ctx = from_timer(ctx, t, failure_timer); + struct xgene_rng_dev *ctx = timer_container_of(ctx, t, failure_timer); /* Clear failure counter as timer expired */ disable_irq(ctx->irq); ctx->failure_cnt = 0; - del_timer(&ctx->failure_timer); + timer_delete(&ctx->failure_timer); enable_irq(ctx->irq); } @@ -321,7 +321,6 @@ static int xgene_rng_probe(struct platform_device *pdev) return -ENOMEM; ctx->dev = &pdev->dev; - platform_set_drvdata(pdev, ctx); ctx->csr_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctx->csr_base)) @@ -358,15 +357,13 @@ static int xgene_rng_probe(struct platform_device *pdev) return 0; } -static int xgene_rng_remove(struct platform_device *pdev) +static void xgene_rng_remove(struct platform_device *pdev) { int rc; rc = device_init_wakeup(&pdev->dev, 0); if (rc) dev_err(&pdev->dev, "RNG init wakeup failed error %d\n", rc); - - return 0; } static const struct of_device_id xgene_rng_of_match[] = { @@ -378,7 +375,7 @@ MODULE_DEVICE_TABLE(of, xgene_rng_of_match); static struct platform_driver xgene_rng_driver = { .probe = xgene_rng_probe, - .remove = xgene_rng_remove, + .remove = xgene_rng_remove, .driver = { .name = "xgene-rng", .of_match_table = xgene_rng_of_match, diff --git a/drivers/char/hw_random/xiphera-trng.c b/drivers/char/hw_random/xiphera-trng.c index 2c586d1fe8a9..4af64f76c8d6 100644 --- a/drivers/char/hw_random/xiphera-trng.c +++ b/drivers/char/hw_random/xiphera-trng.c @@ -121,8 +121,6 @@ static int xiphera_trng_probe(struct platform_device *pdev) return ret; } - platform_set_drvdata(pdev, trng); - return 0; } diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig index f4adc6feb3b2..92bed266d07c 100644 --- a/drivers/char/ipmi/Kconfig +++ b/drivers/char/ipmi/Kconfig @@ -84,6 +84,13 @@ config IPMI_IPMB bus, and it also supports direct messaging on the bus using IPMB direct messages. This module requires I2C support. +config IPMI_LS2K + bool 'Loongson-2K IPMI interface' + depends on LOONGARCH + select MFD_LS2K_BMC_CORE + help + Provides a driver for Loongson-2K IPMI interfaces. + config IPMI_POWERNV depends on PPC_POWERNV tristate 'POWERNV (OPAL firmware) IPMI interface' diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile index cb6138b8ded9..4ea450a82242 100644 --- a/drivers/char/ipmi/Makefile +++ b/drivers/char/ipmi/Makefile @@ -5,13 +5,11 @@ ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o \ ipmi_si_hotmod.o ipmi_si_hardcode.o ipmi_si_platform.o \ - ipmi_si_port_io.o ipmi_si_mem_io.o -ifdef CONFIG_PCI -ipmi_si-y += ipmi_si_pci.o -endif -ifdef CONFIG_PARISC -ipmi_si-y += ipmi_si_parisc.o -endif + ipmi_si_mem_io.o +ipmi_si-$(CONFIG_HAS_IOPORT) += ipmi_si_port_io.o +ipmi_si-$(CONFIG_PCI) += ipmi_si_pci.o +ipmi_si-$(CONFIG_IPMI_LS2K) += ipmi_si_ls2k.o +ipmi_si-$(CONFIG_PARISC) += ipmi_si_parisc.o obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c index 7450904e330a..a179d4797011 100644 --- a/drivers/char/ipmi/bt-bmc.c +++ b/drivers/char/ipmi/bt-bmc.c @@ -347,7 +347,7 @@ static const struct file_operations bt_bmc_fops = { static void poll_timer(struct timer_list *t) { - struct bt_bmc *bt_bmc = from_timer(bt_bmc, t, poll_timer); + struct bt_bmc *bt_bmc = timer_container_of(bt_bmc, t, poll_timer); bt_bmc->poll_timer.expires += msecs_to_jiffies(500); wake_up(&bt_bmc->queue); @@ -459,14 +459,13 @@ static int bt_bmc_probe(struct platform_device *pdev) return 0; } -static int bt_bmc_remove(struct platform_device *pdev) +static void bt_bmc_remove(struct platform_device *pdev) { struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev); misc_deregister(&bt_bmc->miscdev); if (bt_bmc->irq < 0) - del_timer_sync(&bt_bmc->poll_timer); - return 0; + timer_delete_sync(&bt_bmc->poll_timer); } static const struct of_device_id bt_bmc_match[] = { diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c index 49100845fcb7..ee2bdc7ed0da 100644 --- a/drivers/char/ipmi/ipmb_dev_int.c +++ b/drivers/char/ipmi/ipmb_dev_int.c @@ -321,6 +321,9 @@ static int ipmb_probe(struct i2c_client *client) ipmb_dev->miscdev.name = devm_kasprintf(&client->dev, GFP_KERNEL, "%s%d", "ipmb-", client->adapter->nr); + if (!ipmb_dev->miscdev.name) + return -ENOMEM; + ipmb_dev->miscdev.fops = &ipmb_fops; ipmb_dev->miscdev.parent = &client->dev; ret = misc_register(&ipmb_dev->miscdev); @@ -350,16 +353,18 @@ static void ipmb_remove(struct i2c_client *client) } static const struct i2c_device_id ipmb_id[] = { - { "ipmb-dev", 0 }, - {}, + { "ipmb-dev" }, + {} }; MODULE_DEVICE_TABLE(i2c, ipmb_id); +#ifdef CONFIG_ACPI static const struct acpi_device_id acpi_ipmb_id[] = { { "IPMB0001", 0 }, {}, }; MODULE_DEVICE_TABLE(acpi, acpi_ipmb_id); +#endif static struct i2c_driver ipmb_driver = { .driver = { diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c index 332082e02ea5..e6ba35b71f10 100644 --- a/drivers/char/ipmi/ipmi_devintf.c +++ b/drivers/char/ipmi/ipmi_devintf.c @@ -122,12 +122,9 @@ out: static int ipmi_release(struct inode *inode, struct file *file) { struct ipmi_file_private *priv = file->private_data; - int rv; struct ipmi_recv_msg *msg, *next; - rv = ipmi_destroy_user(priv->user); - if (rv) - return rv; + ipmi_destroy_user(priv->user); list_for_each_entry_safe(msg, next, &priv->recv_msgs, link) ipmi_free_recv_msg(msg); diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c index 4e335832fc26..3a51e58b2487 100644 --- a/drivers/char/ipmi/ipmi_ipmb.c +++ b/drivers/char/ipmi/ipmi_ipmb.c @@ -404,8 +404,7 @@ static void ipmi_ipmb_shutdown(void *send_info) ipmi_ipmb_stop_thread(iidev); } -static void ipmi_ipmb_sender(void *send_info, - struct ipmi_smi_msg *msg) +static int ipmi_ipmb_sender(void *send_info, struct ipmi_smi_msg *msg) { struct ipmi_ipmb_dev *iidev = send_info; unsigned long flags; @@ -417,6 +416,7 @@ static void ipmi_ipmb_sender(void *send_info, spin_unlock_irqrestore(&iidev->lock, flags); up(&iidev->wake_thread); + return IPMI_CC_NO_ERROR; } static void ipmi_ipmb_request_events(void *send_info) @@ -561,8 +561,8 @@ MODULE_DEVICE_TABLE(of, of_ipmi_ipmb_match); #endif static const struct i2c_device_id ipmi_ipmb_id[] = { - { DEVICE_NAME, 0 }, - {}, + { DEVICE_NAME }, + {} }; MODULE_DEVICE_TABLE(i2c, ipmi_ipmb_id); diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c index ecfcb50302f6..efda90dcf5b3 100644 --- a/drivers/char/ipmi/ipmi_kcs_sm.c +++ b/drivers/char/ipmi/ipmi_kcs_sm.c @@ -122,10 +122,10 @@ struct si_sm_data { unsigned long error0_timeout; }; -static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs, - struct si_sm_io *io, enum kcs_states state) +static unsigned int init_kcs_data(struct si_sm_data *kcs, + struct si_sm_io *io) { - kcs->state = state; + kcs->state = KCS_IDLE; kcs->io = io; kcs->write_pos = 0; kcs->write_count = 0; @@ -140,12 +140,6 @@ static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs, return 2; } -static unsigned int init_kcs_data(struct si_sm_data *kcs, - struct si_sm_io *io) -{ - return init_kcs_data_with_state(kcs, io, KCS_IDLE); -} - static inline unsigned char read_status(struct si_sm_data *kcs) { return kcs->io->inputb(kcs->io, 1); @@ -276,7 +270,7 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data, if (size > MAX_KCS_WRITE_SIZE) return IPMI_REQ_LEN_EXCEEDED_ERR; - if (kcs->state != KCS_IDLE) { + if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) { dev_warn(kcs->io->dev, "KCS in invalid state %d\n", kcs->state); return IPMI_NOT_IN_MY_STATE_ERR; } @@ -501,7 +495,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) } if (kcs->state == KCS_HOSED) { - init_kcs_data_with_state(kcs, kcs->io, KCS_ERROR0); + init_kcs_data(kcs, kcs->io); return SI_SM_HOSED; } diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 186f1fee7534..3f48fc6ab596 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -27,7 +27,6 @@ #include <linux/ipmi_smi.h> #include <linux/notifier.h> #include <linux/init.h> -#include <linux/proc_fs.h> #include <linux/rcupdate.h> #include <linux/interrupt.h> #include <linux/moduleparam.h> @@ -39,17 +38,22 @@ #define IPMI_DRIVER_VERSION "39.2" -static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); +static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user); +static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg, + struct ipmi_user *user); static int ipmi_init_msghandler(void); -static void smi_recv_tasklet(struct tasklet_struct *t); +static void smi_work(struct work_struct *t); static void handle_new_recv_msgs(struct ipmi_smi *intf); static void need_waiter(struct ipmi_smi *intf); static int handle_one_recv_msg(struct ipmi_smi *intf, struct ipmi_smi_msg *msg); +static void intf_free(struct kref *ref); static bool initialized; static bool drvregistered; +static struct timer_list ipmi_timer; + /* Numbers in this enumerator should be mapped to ipmi_panic_event_str */ enum ipmi_panic_event_op { IPMI_SEND_PANIC_EVENT_NONE, @@ -180,14 +184,8 @@ MODULE_PARM_DESC(max_msgs_per_user, struct ipmi_user { struct list_head link; - /* - * Set to NULL when the user is destroyed, a pointer to myself - * so srcu_dereference can be used on it. - */ - struct ipmi_user *self; - struct srcu_struct release_barrier; - struct kref refcount; + refcount_t destroyed; /* The upper layer that handles receive messages. */ const struct ipmi_user_hndl *handler; @@ -200,30 +198,8 @@ struct ipmi_user { bool gets_events; atomic_t nr_msgs; - - /* Free must run in process context for RCU cleanup. */ - struct work_struct remove_work; }; -static struct workqueue_struct *remove_work_wq; - -static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index) - __acquires(user->release_barrier) -{ - struct ipmi_user *ruser; - - *index = srcu_read_lock(&user->release_barrier); - ruser = srcu_dereference(user->self, &user->release_barrier); - if (!ruser) - srcu_read_unlock(&user->release_barrier, *index); - return ruser; -} - -static void release_ipmi_user(struct ipmi_user *user, int index) -{ - srcu_read_unlock(&user->release_barrier, index); -} - struct cmd_rcvr { struct list_head link; @@ -327,6 +303,8 @@ struct bmc_device { }; #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) +static struct workqueue_struct *bmc_remove_work_wq; + static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, struct ipmi_device_id *id, bool *guid_set, guid_t *guid); @@ -451,14 +429,14 @@ struct ipmi_smi { struct list_head link; /* - * The list of upper layers that are using me. seq_lock write - * protects this. Read protection is with srcu. + * The list of upper layers that are using me. */ struct list_head users; - struct srcu_struct users_srcu; + struct mutex users_mutex; atomic_t nr_users; struct device_attribute nr_users_devattr; struct device_attribute nr_msgs_devattr; + struct device_attribute maintenance_mode_devattr; /* Used for wake ups at startup. */ @@ -491,20 +469,27 @@ struct ipmi_smi { * interface to match them up with their responses. A routine * is called periodically to time the items in this list. */ - spinlock_t seq_lock; + struct mutex seq_lock; struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; int curr_seq; /* - * Messages queued for delivery. If delivery fails (out of memory - * for instance), They will stay in here to be processed later in a - * periodic timer interrupt. The tasklet is for handling received - * messages directly from the handler. + * Messages queued for deliver to the user. + */ + struct mutex user_msgs_mutex; + struct list_head user_msgs; + + /* + * Messages queued for processing. If processing fails (out + * of memory for instance), They will stay in here to be + * processed later in a periodic timer interrupt. The + * workqueue is for handling received messages directly from + * the handler. */ spinlock_t waiting_rcv_msgs_lock; struct list_head waiting_rcv_msgs; atomic_t watchdog_pretimeouts_to_deliver; - struct tasklet_struct recv_tasklet; + struct work_struct smi_work; spinlock_t xmit_msgs_lock; struct list_head xmit_msgs; @@ -522,10 +507,9 @@ struct ipmi_smi { * Events that were queues because no one was there to receive * them. */ - spinlock_t events_lock; /* For dealing with event stuff. */ + struct mutex events_mutex; /* For dealing with event stuff. */ struct list_head waiting_events; unsigned int waiting_events_count; /* How many events in queue? */ - char delivering_events; char event_msg_printed; /* How many users are waiting for events? */ @@ -560,7 +544,11 @@ struct ipmi_smi { /* For handling of maintenance mode. */ int maintenance_mode; - bool maintenance_mode_enable; + +#define IPMI_MAINTENANCE_MODE_STATE_OFF 0 +#define IPMI_MAINTENANCE_MODE_STATE_FIRMWARE 1 +#define IPMI_MAINTENANCE_MODE_STATE_RESET 2 + int maintenance_mode_state; int auto_maintenance_timeout; spinlock_t maintenance_mode_lock; /* Used in a timer... */ @@ -611,8 +599,31 @@ static void __ipmi_bmc_unregister(struct ipmi_smi *intf); static int __ipmi_bmc_register(struct ipmi_smi *intf, struct ipmi_device_id *id, bool guid_set, guid_t *guid, int intf_num); -static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id); +static int __scan_channels(struct ipmi_smi *intf, + struct ipmi_device_id *id, bool rescan); + +static void free_ipmi_user(struct kref *ref) +{ + struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); + struct module *owner; + + owner = user->intf->owner; + kref_put(&user->intf->refcount, intf_free); + module_put(owner); + vfree(user); +} + +static void release_ipmi_user(struct ipmi_user *user) +{ + kref_put(&user->refcount, free_ipmi_user); +} +static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user) +{ + if (!kref_get_unless_zero(&user->refcount)) + return NULL; + return user; +} /* * The driver model view of the IPMI messaging driver. @@ -630,9 +641,6 @@ static DEFINE_MUTEX(ipmidriver_mutex); static LIST_HEAD(ipmi_interfaces); static DEFINE_MUTEX(ipmi_interfaces_mutex); -#define ipmi_interfaces_mutex_held() \ - lockdep_is_held(&ipmi_interfaces_mutex) -static struct srcu_struct ipmi_interfaces_srcu; /* * List of watchers that want to know when smi's are added and deleted. @@ -698,27 +706,20 @@ static void free_smi_msg_list(struct list_head *q) } } -static void clean_up_interface_data(struct ipmi_smi *intf) +static void intf_free(struct kref *ref) { + struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount); int i; struct cmd_rcvr *rcvr, *rcvr2; - struct list_head list; - - tasklet_kill(&intf->recv_tasklet); free_smi_msg_list(&intf->waiting_rcv_msgs); free_recv_msg_list(&intf->waiting_events); /* * Wholesale remove all the entries from the list in the - * interface and wait for RCU to know that none are in use. + * interface. No need for locks, this is single-threaded. */ - mutex_lock(&intf->cmd_rcvrs_mutex); - INIT_LIST_HEAD(&list); - list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu); - mutex_unlock(&intf->cmd_rcvrs_mutex); - - list_for_each_entry_safe(rcvr, rcvr2, &list, link) + list_for_each_entry_safe(rcvr, rcvr2, &intf->cmd_rcvrs, link) kfree(rcvr); for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { @@ -726,20 +727,17 @@ static void clean_up_interface_data(struct ipmi_smi *intf) && (intf->seq_table[i].recv_msg)) ipmi_free_recv_msg(intf->seq_table[i].recv_msg); } -} -static void intf_free(struct kref *ref) -{ - struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount); - - clean_up_interface_data(intf); kfree(intf); } int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) { struct ipmi_smi *intf; - int index, rv; + unsigned int count = 0, i; + int *interfaces = NULL; + struct device **devices = NULL; + int rv = 0; /* * Make sure the driver is actually initialized, this handles @@ -753,20 +751,53 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) list_add(&watcher->link, &smi_watchers); - index = srcu_read_lock(&ipmi_interfaces_srcu); - list_for_each_entry_rcu(intf, &ipmi_interfaces, link, - lockdep_is_held(&smi_watchers_mutex)) { - int intf_num = READ_ONCE(intf->intf_num); + /* + * Build an array of ipmi interfaces and fill it in, and + * another array of the devices. We can't call the callback + * with ipmi_interfaces_mutex held. smi_watchers_mutex will + * keep things in order for the user. + */ + mutex_lock(&ipmi_interfaces_mutex); + list_for_each_entry(intf, &ipmi_interfaces, link) + count++; + if (count > 0) { + interfaces = kmalloc_array(count, sizeof(*interfaces), + GFP_KERNEL); + if (!interfaces) { + rv = -ENOMEM; + } else { + devices = kmalloc_array(count, sizeof(*devices), + GFP_KERNEL); + if (!devices) { + kfree(interfaces); + interfaces = NULL; + rv = -ENOMEM; + } + } + count = 0; + } + if (interfaces) { + list_for_each_entry(intf, &ipmi_interfaces, link) { + int intf_num = READ_ONCE(intf->intf_num); - if (intf_num == -1) - continue; - watcher->new_smi(intf_num, intf->si_dev); + if (intf_num == -1) + continue; + devices[count] = intf->si_dev; + interfaces[count++] = intf_num; + } + } + mutex_unlock(&ipmi_interfaces_mutex); + + if (interfaces) { + for (i = 0; i < count; i++) + watcher->new_smi(interfaces[i], devices[i]); + kfree(interfaces); + kfree(devices); } - srcu_read_unlock(&ipmi_interfaces_srcu, index); mutex_unlock(&smi_watchers_mutex); - return 0; + return rv; } EXPORT_SYMBOL(ipmi_smi_watcher_register); @@ -779,22 +810,17 @@ int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) } EXPORT_SYMBOL(ipmi_smi_watcher_unregister); -/* - * Must be called with smi_watchers_mutex held. - */ static void call_smi_watchers(int i, struct device *dev) { struct ipmi_smi_watcher *w; - mutex_lock(&smi_watchers_mutex); list_for_each_entry(w, &smi_watchers, link) { if (try_module_get(w->owner)) { w->new_smi(i, dev); module_put(w->owner); } } - mutex_unlock(&smi_watchers_mutex); } static int @@ -939,20 +965,15 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) * risk. At this moment, simply skip it in that case. */ ipmi_free_recv_msg(msg); - atomic_dec(&msg->user->nr_msgs); } else { - int index; - struct ipmi_user *user = acquire_ipmi_user(msg->user, &index); - - if (user) { - atomic_dec(&user->nr_msgs); - user->handler->ipmi_recv_hndl(msg, user->handler_data); - release_ipmi_user(user, index); - } else { - /* User went away, give up. */ - ipmi_free_recv_msg(msg); - rv = -EINVAL; - } + /* + * Deliver it in smi_work. The message will hold a + * refcount to the user. + */ + mutex_lock(&intf->user_msgs_mutex); + list_add_tail(&msg->link, &intf->user_msgs); + mutex_unlock(&intf->user_msgs_mutex); + queue_work(system_wq, &intf->smi_work); } return rv; @@ -1104,12 +1125,11 @@ static int intf_find_seq(struct ipmi_smi *intf, struct ipmi_recv_msg **recv_msg) { int rv = -ENODEV; - unsigned long flags; if (seq >= IPMI_IPMB_NUM_SEQ) return -EINVAL; - spin_lock_irqsave(&intf->seq_lock, flags); + mutex_lock(&intf->seq_lock); if (intf->seq_table[seq].inuse) { struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; @@ -1122,7 +1142,7 @@ static int intf_find_seq(struct ipmi_smi *intf, rv = 0; } } - spin_unlock_irqrestore(&intf->seq_lock, flags); + mutex_unlock(&intf->seq_lock); return rv; } @@ -1133,14 +1153,13 @@ static int intf_start_seq_timer(struct ipmi_smi *intf, long msgid) { int rv = -ENODEV; - unsigned long flags; unsigned char seq; unsigned long seqid; GET_SEQ_FROM_MSGID(msgid, seq, seqid); - spin_lock_irqsave(&intf->seq_lock, flags); + mutex_lock(&intf->seq_lock); /* * We do this verification because the user can be deleted * while a message is outstanding. @@ -1151,7 +1170,7 @@ static int intf_start_seq_timer(struct ipmi_smi *intf, ent->timeout = ent->orig_timeout; rv = 0; } - spin_unlock_irqrestore(&intf->seq_lock, flags); + mutex_unlock(&intf->seq_lock); return rv; } @@ -1162,7 +1181,6 @@ static int intf_err_seq(struct ipmi_smi *intf, unsigned int err) { int rv = -ENODEV; - unsigned long flags; unsigned char seq; unsigned long seqid; struct ipmi_recv_msg *msg = NULL; @@ -1170,7 +1188,7 @@ static int intf_err_seq(struct ipmi_smi *intf, GET_SEQ_FROM_MSGID(msgid, seq, seqid); - spin_lock_irqsave(&intf->seq_lock, flags); + mutex_lock(&intf->seq_lock); /* * We do this verification because the user can be deleted * while a message is outstanding. @@ -1184,7 +1202,7 @@ static int intf_err_seq(struct ipmi_smi *intf, msg = ent->recv_msg; rv = 0; } - spin_unlock_irqrestore(&intf->seq_lock, flags); + mutex_unlock(&intf->seq_lock); if (msg) deliver_err_response(intf, msg, err); @@ -1192,23 +1210,13 @@ static int intf_err_seq(struct ipmi_smi *intf, return rv; } -static void free_user_work(struct work_struct *work) -{ - struct ipmi_user *user = container_of(work, struct ipmi_user, - remove_work); - - cleanup_srcu_struct(&user->release_barrier); - vfree(user); -} - int ipmi_create_user(unsigned int if_num, const struct ipmi_user_hndl *handler, void *handler_data, struct ipmi_user **user) { - unsigned long flags; - struct ipmi_user *new_user; - int rv, index; + struct ipmi_user *new_user = NULL; + int rv = 0; struct ipmi_smi *intf; /* @@ -1230,30 +1238,31 @@ int ipmi_create_user(unsigned int if_num, if (rv) return rv; - new_user = vzalloc(sizeof(*new_user)); - if (!new_user) - return -ENOMEM; - - index = srcu_read_lock(&ipmi_interfaces_srcu); - list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { + mutex_lock(&ipmi_interfaces_mutex); + list_for_each_entry(intf, &ipmi_interfaces, link) { if (intf->intf_num == if_num) goto found; } /* Not found, return an error */ rv = -EINVAL; - goto out_kfree; + goto out_unlock; found: + if (intf->in_shutdown) { + rv = -ENODEV; + goto out_unlock; + } + if (atomic_add_return(1, &intf->nr_users) > max_users) { rv = -EBUSY; goto out_kfree; } - INIT_WORK(&new_user->remove_work, free_user_work); - - rv = init_srcu_struct(&new_user->release_barrier); - if (rv) + new_user = vzalloc(sizeof(*new_user)); + if (!new_user) { + rv = -ENOMEM; goto out_kfree; + } if (!try_module_get(intf->owner)) { rv = -ENODEV; @@ -1265,86 +1274,68 @@ int ipmi_create_user(unsigned int if_num, atomic_set(&new_user->nr_msgs, 0); kref_init(&new_user->refcount); + refcount_set(&new_user->destroyed, 1); + kref_get(&new_user->refcount); /* Destroy owns a refcount. */ new_user->handler = handler; new_user->handler_data = handler_data; new_user->intf = intf; new_user->gets_events = false; - rcu_assign_pointer(new_user->self, new_user); - spin_lock_irqsave(&intf->seq_lock, flags); - list_add_rcu(&new_user->link, &intf->users); - spin_unlock_irqrestore(&intf->seq_lock, flags); + mutex_lock(&intf->users_mutex); + mutex_lock(&intf->seq_lock); + list_add(&new_user->link, &intf->users); + mutex_unlock(&intf->seq_lock); + mutex_unlock(&intf->users_mutex); + if (handler->ipmi_watchdog_pretimeout) /* User wants pretimeouts, so make sure to watch for them. */ smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); - srcu_read_unlock(&ipmi_interfaces_srcu, index); - *user = new_user; - return 0; out_kfree: - atomic_dec(&intf->nr_users); - srcu_read_unlock(&ipmi_interfaces_srcu, index); - vfree(new_user); + if (rv) { + atomic_dec(&intf->nr_users); + vfree(new_user); + } else { + *user = new_user; + } +out_unlock: + mutex_unlock(&ipmi_interfaces_mutex); return rv; } EXPORT_SYMBOL(ipmi_create_user); int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) { - int rv, index; + int rv = -EINVAL; struct ipmi_smi *intf; - index = srcu_read_lock(&ipmi_interfaces_srcu); - list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { - if (intf->intf_num == if_num) - goto found; + mutex_lock(&ipmi_interfaces_mutex); + list_for_each_entry(intf, &ipmi_interfaces, link) { + if (intf->intf_num == if_num) { + if (!intf->handlers->get_smi_info) + rv = -ENOTTY; + else + rv = intf->handlers->get_smi_info(intf->send_info, data); + break; + } } - srcu_read_unlock(&ipmi_interfaces_srcu, index); - - /* Not found, return an error */ - return -EINVAL; - -found: - if (!intf->handlers->get_smi_info) - rv = -ENOTTY; - else - rv = intf->handlers->get_smi_info(intf->send_info, data); - srcu_read_unlock(&ipmi_interfaces_srcu, index); + mutex_unlock(&ipmi_interfaces_mutex); return rv; } EXPORT_SYMBOL(ipmi_get_smi_info); -static void free_user(struct kref *ref) -{ - struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); - - /* SRCU cleanup must happen in task context. */ - queue_work(remove_work_wq, &user->remove_work); -} - +/* Must be called with intf->users_mutex held. */ static void _ipmi_destroy_user(struct ipmi_user *user) { struct ipmi_smi *intf = user->intf; int i; - unsigned long flags; struct cmd_rcvr *rcvr; struct cmd_rcvr *rcvrs = NULL; - struct module *owner; + struct ipmi_recv_msg *msg, *msg2; - if (!acquire_ipmi_user(user, &i)) { - /* - * The user has already been cleaned up, just make sure - * nothing is using it and return. - */ - synchronize_srcu(&user->release_barrier); + if (!refcount_dec_if_one(&user->destroyed)) return; - } - - rcu_assign_pointer(user->self, NULL); - release_ipmi_user(user, i); - - synchronize_srcu(&user->release_barrier); if (user->handler->shutdown) user->handler->shutdown(user->handler_data); @@ -1355,11 +1346,11 @@ static void _ipmi_destroy_user(struct ipmi_user *user) if (user->gets_events) atomic_dec(&intf->event_waiters); - /* Remove the user from the interface's sequence table. */ - spin_lock_irqsave(&intf->seq_lock, flags); - list_del_rcu(&user->link); + /* Remove the user from the interface's list and sequence table. */ + list_del(&user->link); atomic_dec(&intf->nr_users); + mutex_lock(&intf->seq_lock); for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { if (intf->seq_table[i].inuse && (intf->seq_table[i].recv_msg->user == user)) { @@ -1368,13 +1359,13 @@ static void _ipmi_destroy_user(struct ipmi_user *user) ipmi_free_recv_msg(intf->seq_table[i].recv_msg); } } - spin_unlock_irqrestore(&intf->seq_lock, flags); + mutex_unlock(&intf->seq_lock); /* * Remove the user from the command receiver's table. First * we build a list of everything (not using the standard link, * since other things may be using it till we do - * synchronize_srcu()) then free everything in that list. + * synchronize_rcu()) then free everything in that list. */ mutex_lock(&intf->cmd_rcvrs_mutex); list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, @@ -1386,25 +1377,33 @@ static void _ipmi_destroy_user(struct ipmi_user *user) } } mutex_unlock(&intf->cmd_rcvrs_mutex); - synchronize_rcu(); while (rcvrs) { rcvr = rcvrs; rcvrs = rcvr->next; kfree(rcvr); } - owner = intf->owner; - kref_put(&intf->refcount, intf_free); - module_put(owner); + mutex_lock(&intf->user_msgs_mutex); + list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) { + if (msg->user != user) + continue; + list_del(&msg->link); + ipmi_free_recv_msg(msg); + } + mutex_unlock(&intf->user_msgs_mutex); + + release_ipmi_user(user); } -int ipmi_destroy_user(struct ipmi_user *user) +void ipmi_destroy_user(struct ipmi_user *user) { - _ipmi_destroy_user(user); + struct ipmi_smi *intf = user->intf; - kref_put(&user->refcount, free_user); + mutex_lock(&intf->users_mutex); + _ipmi_destroy_user(user); + mutex_unlock(&intf->users_mutex); - return 0; + kref_put(&user->refcount, free_ipmi_user); } EXPORT_SYMBOL(ipmi_destroy_user); @@ -1413,9 +1412,9 @@ int ipmi_get_version(struct ipmi_user *user, unsigned char *minor) { struct ipmi_device_id id; - int rv, index; + int rv; - user = acquire_ipmi_user(user, &index); + user = acquire_ipmi_user(user); if (!user) return -ENODEV; @@ -1424,7 +1423,7 @@ int ipmi_get_version(struct ipmi_user *user, *major = ipmi_version_major(&id); *minor = ipmi_version_minor(&id); } - release_ipmi_user(user, index); + release_ipmi_user(user); return rv; } @@ -1434,9 +1433,9 @@ int ipmi_set_my_address(struct ipmi_user *user, unsigned int channel, unsigned char address) { - int index, rv = 0; + int rv = 0; - user = acquire_ipmi_user(user, &index); + user = acquire_ipmi_user(user); if (!user) return -ENODEV; @@ -1446,7 +1445,7 @@ int ipmi_set_my_address(struct ipmi_user *user, channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); user->intf->addrinfo[channel].address = address; } - release_ipmi_user(user, index); + release_ipmi_user(user); return rv; } @@ -1456,9 +1455,9 @@ int ipmi_get_my_address(struct ipmi_user *user, unsigned int channel, unsigned char *address) { - int index, rv = 0; + int rv = 0; - user = acquire_ipmi_user(user, &index); + user = acquire_ipmi_user(user); if (!user) return -ENODEV; @@ -1468,7 +1467,7 @@ int ipmi_get_my_address(struct ipmi_user *user, channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); *address = user->intf->addrinfo[channel].address; } - release_ipmi_user(user, index); + release_ipmi_user(user); return rv; } @@ -1478,9 +1477,9 @@ int ipmi_set_my_LUN(struct ipmi_user *user, unsigned int channel, unsigned char LUN) { - int index, rv = 0; + int rv = 0; - user = acquire_ipmi_user(user, &index); + user = acquire_ipmi_user(user); if (!user) return -ENODEV; @@ -1490,7 +1489,7 @@ int ipmi_set_my_LUN(struct ipmi_user *user, channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); user->intf->addrinfo[channel].lun = LUN & 0x3; } - release_ipmi_user(user, index); + release_ipmi_user(user); return rv; } @@ -1500,9 +1499,9 @@ int ipmi_get_my_LUN(struct ipmi_user *user, unsigned int channel, unsigned char *address) { - int index, rv = 0; + int rv = 0; - user = acquire_ipmi_user(user, &index); + user = acquire_ipmi_user(user); if (!user) return -ENODEV; @@ -1512,7 +1511,7 @@ int ipmi_get_my_LUN(struct ipmi_user *user, channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); *address = user->intf->addrinfo[channel].lun; } - release_ipmi_user(user, index); + release_ipmi_user(user); return rv; } @@ -1520,17 +1519,17 @@ EXPORT_SYMBOL(ipmi_get_my_LUN); int ipmi_get_maintenance_mode(struct ipmi_user *user) { - int mode, index; + int mode; unsigned long flags; - user = acquire_ipmi_user(user, &index); + user = acquire_ipmi_user(user); if (!user) return -ENODEV; spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags); mode = user->intf->maintenance_mode; spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags); - release_ipmi_user(user, index); + release_ipmi_user(user); return mode; } @@ -1539,17 +1538,24 @@ EXPORT_SYMBOL(ipmi_get_maintenance_mode); static void maintenance_mode_update(struct ipmi_smi *intf) { if (intf->handlers->set_maintenance_mode) + /* + * Lower level drivers only care about firmware mode + * as it affects their timing. They don't care about + * reset, which disables all commands for a while. + */ intf->handlers->set_maintenance_mode( - intf->send_info, intf->maintenance_mode_enable); + intf->send_info, + (intf->maintenance_mode_state == + IPMI_MAINTENANCE_MODE_STATE_FIRMWARE)); } int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode) { - int rv = 0, index; + int rv = 0; unsigned long flags; struct ipmi_smi *intf = user->intf; - user = acquire_ipmi_user(user, &index); + user = acquire_ipmi_user(user); if (!user) return -ENODEV; @@ -1557,16 +1563,17 @@ int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode) if (intf->maintenance_mode != mode) { switch (mode) { case IPMI_MAINTENANCE_MODE_AUTO: - intf->maintenance_mode_enable - = (intf->auto_maintenance_timeout > 0); + /* Just leave it alone. */ break; case IPMI_MAINTENANCE_MODE_OFF: - intf->maintenance_mode_enable = false; + intf->maintenance_mode_state = + IPMI_MAINTENANCE_MODE_STATE_OFF; break; case IPMI_MAINTENANCE_MODE_ON: - intf->maintenance_mode_enable = true; + intf->maintenance_mode_state = + IPMI_MAINTENANCE_MODE_STATE_FIRMWARE; break; default: @@ -1579,7 +1586,7 @@ int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode) } out_unlock: spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); - release_ipmi_user(user, index); + release_ipmi_user(user); return rv; } @@ -1587,19 +1594,17 @@ EXPORT_SYMBOL(ipmi_set_maintenance_mode); int ipmi_set_gets_events(struct ipmi_user *user, bool val) { - unsigned long flags; struct ipmi_smi *intf = user->intf; struct ipmi_recv_msg *msg, *msg2; struct list_head msgs; - int index; - user = acquire_ipmi_user(user, &index); + user = acquire_ipmi_user(user); if (!user) return -ENODEV; INIT_LIST_HEAD(&msgs); - spin_lock_irqsave(&intf->events_lock, flags); + mutex_lock(&intf->events_mutex); if (user->gets_events == val) goto out; @@ -1612,13 +1617,6 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val) atomic_dec(&intf->event_waiters); } - if (intf->delivering_events) - /* - * Another thread is delivering events for this, so - * let it handle any new events. - */ - goto out; - /* Deliver any queued events. */ while (user->gets_events && !list_empty(&intf->waiting_events)) { list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) @@ -1629,22 +1627,15 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val) intf->event_msg_printed = 0; } - intf->delivering_events = 1; - spin_unlock_irqrestore(&intf->events_lock, flags); - list_for_each_entry_safe(msg, msg2, &msgs, link) { - msg->user = user; - kref_get(&user->refcount); + ipmi_set_recv_msg_user(msg, user); deliver_local_response(intf, msg); } - - spin_lock_irqsave(&intf->events_lock, flags); - intf->delivering_events = 0; } out: - spin_unlock_irqrestore(&intf->events_lock, flags); - release_ipmi_user(user, index); + mutex_unlock(&intf->events_mutex); + release_ipmi_user(user); return 0; } @@ -1689,9 +1680,9 @@ int ipmi_register_for_cmd(struct ipmi_user *user, { struct ipmi_smi *intf = user->intf; struct cmd_rcvr *rcvr; - int rv = 0, index; + int rv = 0; - user = acquire_ipmi_user(user, &index); + user = acquire_ipmi_user(user); if (!user) return -ENODEV; @@ -1721,7 +1712,7 @@ out_unlock: if (rv) kfree(rcvr); out_release: - release_ipmi_user(user, index); + release_ipmi_user(user); return rv; } @@ -1735,9 +1726,9 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user, struct ipmi_smi *intf = user->intf; struct cmd_rcvr *rcvr; struct cmd_rcvr *rcvrs = NULL; - int i, rv = -ENOENT, index; + int i, rv = -ENOENT; - user = acquire_ipmi_user(user, &index); + user = acquire_ipmi_user(user); if (!user) return -ENODEV; @@ -1760,7 +1751,7 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user, } mutex_unlock(&intf->cmd_rcvrs_mutex); synchronize_rcu(); - release_ipmi_user(user, index); + release_ipmi_user(user); while (rcvrs) { smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); rcvr = rcvrs; @@ -1884,13 +1875,12 @@ static void smi_send(struct ipmi_smi *intf, const struct ipmi_smi_handlers *handlers, struct ipmi_smi_msg *smi_msg, int priority) { - int run_to_completion = intf->run_to_completion; + int run_to_completion = READ_ONCE(intf->run_to_completion); unsigned long flags = 0; if (!run_to_completion) spin_lock_irqsave(&intf->xmit_msgs_lock, flags); smi_msg = smi_add_send_msg(intf, smi_msg, priority); - if (!run_to_completion) spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); @@ -1943,14 +1933,20 @@ static int i_ipmi_req_sysintf(struct ipmi_smi *intf, if (is_maintenance_mode_cmd(msg)) { unsigned long flags; + int newst; + + if (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST) + newst = IPMI_MAINTENANCE_MODE_STATE_FIRMWARE; + else + newst = IPMI_MAINTENANCE_MODE_STATE_RESET; spin_lock_irqsave(&intf->maintenance_mode_lock, flags); - intf->auto_maintenance_timeout - = maintenance_mode_timeout_ms; + intf->auto_maintenance_timeout = maintenance_mode_timeout_ms; if (!intf->maintenance_mode - && !intf->maintenance_mode_enable) { - intf->maintenance_mode_enable = true; + && intf->maintenance_mode_state < newst) { + intf->maintenance_mode_state = newst; maintenance_mode_update(intf); + mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); } spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); @@ -1964,7 +1960,7 @@ static int i_ipmi_req_sysintf(struct ipmi_smi *intf, smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3); smi_msg->data[1] = msg->cmd; smi_msg->msgid = msgid; - smi_msg->user_data = recv_msg; + smi_msg->recv_msg = recv_msg; if (msg->data_len > 0) memcpy(&smi_msg->data[2], msg->data, msg->data_len); smi_msg->data_size = msg->data_len + 2; @@ -2045,12 +2041,9 @@ static int i_ipmi_req_ipmb(struct ipmi_smi *intf, * Save the receive message so we can use it * to deliver the response. */ - smi_msg->user_data = recv_msg; + smi_msg->recv_msg = recv_msg; } else { - /* It's a command, so get a sequence for it. */ - unsigned long flags; - - spin_lock_irqsave(&intf->seq_lock, flags); + mutex_lock(&intf->seq_lock); if (is_maintenance_mode_cmd(msg)) intf->ipmb_maintenance_mode_timeout = @@ -2108,7 +2101,7 @@ static int i_ipmi_req_ipmb(struct ipmi_smi *intf, * to be correct. */ out_err: - spin_unlock_irqrestore(&intf->seq_lock, flags); + mutex_unlock(&intf->seq_lock); } return rv; @@ -2161,7 +2154,7 @@ static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf, memcpy(smi_msg->data + 4, msg->data, msg->data_len); smi_msg->data_size = msg->data_len + 4; - smi_msg->user_data = recv_msg; + smi_msg->recv_msg = recv_msg; return 0; } @@ -2224,12 +2217,9 @@ static int i_ipmi_req_lan(struct ipmi_smi *intf, * Save the receive message so we can use it * to deliver the response. */ - smi_msg->user_data = recv_msg; + smi_msg->recv_msg = recv_msg; } else { - /* It's a command, so get a sequence for it. */ - unsigned long flags; - - spin_lock_irqsave(&intf->seq_lock, flags); + mutex_lock(&intf->seq_lock); /* * Create a sequence number with a 1 second @@ -2278,7 +2268,7 @@ static int i_ipmi_req_lan(struct ipmi_smi *intf, * to be correct. */ out_err: - spin_unlock_irqrestore(&intf->seq_lock, flags); + mutex_unlock(&intf->seq_lock); } return rv; @@ -2306,24 +2296,21 @@ static int i_ipmi_request(struct ipmi_user *user, { struct ipmi_smi_msg *smi_msg; struct ipmi_recv_msg *recv_msg; + int run_to_completion = READ_ONCE(intf->run_to_completion); int rv = 0; - if (user) { - if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) { - /* Decrement will happen at the end of the routine. */ - rv = -EBUSY; - goto out; - } - } - - if (supplied_recv) + if (supplied_recv) { recv_msg = supplied_recv; - else { - recv_msg = ipmi_alloc_recv_msg(); - if (recv_msg == NULL) { - rv = -ENOMEM; - goto out; + recv_msg->user = user; + if (user) { + atomic_inc(&user->nr_msgs); + /* The put happens when the message is freed. */ + kref_get(&user->refcount); } + } else { + recv_msg = ipmi_alloc_recv_msg(user); + if (IS_ERR(recv_msg)) + return PTR_ERR(recv_msg); } recv_msg->user_msg_data = user_msg_data; @@ -2334,21 +2321,22 @@ static int i_ipmi_request(struct ipmi_user *user, if (smi_msg == NULL) { if (!supplied_recv) ipmi_free_recv_msg(recv_msg); - rv = -ENOMEM; - goto out; + return -ENOMEM; } } - rcu_read_lock(); + if (!run_to_completion) + mutex_lock(&intf->users_mutex); + if (intf->maintenance_mode_state == IPMI_MAINTENANCE_MODE_STATE_RESET) { + /* No messages while the BMC is in reset. */ + rv = -EBUSY; + goto out_err; + } if (intf->in_shutdown) { rv = -ENODEV; goto out_err; } - recv_msg->user = user; - if (user) - /* The put happens when the message is freed. */ - kref_get(&user->refcount); recv_msg->msgid = msgid; /* * Store the message to send in the receive message so timeout @@ -2377,19 +2365,19 @@ static int i_ipmi_request(struct ipmi_user *user, if (rv) { out_err: - ipmi_free_smi_msg(smi_msg); - ipmi_free_recv_msg(recv_msg); + if (!supplied_smi) + ipmi_free_smi_msg(smi_msg); + if (!supplied_recv) + ipmi_free_recv_msg(recv_msg); } else { dev_dbg(intf->si_dev, "Send: %*ph\n", smi_msg->data_size, smi_msg->data); smi_send(intf, intf->handlers, smi_msg, priority); } - rcu_read_unlock(); + if (!run_to_completion) + mutex_unlock(&intf->users_mutex); -out: - if (rv && user) - atomic_dec(&user->nr_msgs); return rv; } @@ -2416,12 +2404,12 @@ int ipmi_request_settime(struct ipmi_user *user, unsigned int retry_time_ms) { unsigned char saddr = 0, lun = 0; - int rv, index; + int rv; if (!user) return -EINVAL; - user = acquire_ipmi_user(user, &index); + user = acquire_ipmi_user(user); if (!user) return -ENODEV; @@ -2440,7 +2428,7 @@ int ipmi_request_settime(struct ipmi_user *user, retries, retry_time_ms); - release_ipmi_user(user, index); + release_ipmi_user(user); return rv; } EXPORT_SYMBOL(ipmi_request_settime); @@ -2455,12 +2443,12 @@ int ipmi_request_supply_msgs(struct ipmi_user *user, int priority) { unsigned char saddr = 0, lun = 0; - int rv, index; + int rv; if (!user) return -EINVAL; - user = acquire_ipmi_user(user, &index); + user = acquire_ipmi_user(user); if (!user) return -ENODEV; @@ -2479,7 +2467,7 @@ int ipmi_request_supply_msgs(struct ipmi_user *user, lun, -1, 0); - release_ipmi_user(user, index); + release_ipmi_user(user); return rv; } EXPORT_SYMBOL(ipmi_request_supply_msgs); @@ -2640,6 +2628,12 @@ retry_bmc_lock: (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry))) goto out_noprocessing; + /* Don't allow sysfs access when in maintenance mode. */ + if (intf->maintenance_mode_state) { + rv = -EBUSY; + goto out_noprocessing; + } + prev_guid_set = bmc->dyn_guid_set; __get_guid(intf); @@ -2675,7 +2669,7 @@ retry_bmc_lock: if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num)) need_waiter(intf); /* Retry later on an error. */ else - __scan_channels(intf, &id); + __scan_channels(intf, &id, false); if (!intf_set) { @@ -2695,7 +2689,7 @@ retry_bmc_lock: goto out_noprocessing; } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id))) /* Version info changes, scan the channels again. */ - __scan_channels(intf, &bmc->fetch_id); + __scan_channels(intf, &bmc->fetch_id, true); bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; @@ -3053,7 +3047,7 @@ static void cleanup_bmc_work(struct work_struct *work) int id = bmc->pdev.id; /* Unregister overwrites id */ platform_device_unregister(&bmc->pdev); - ida_simple_remove(&ipmi_bmc_ida, id); + ida_free(&ipmi_bmc_ida, id); } static void @@ -3066,7 +3060,7 @@ cleanup_bmc_device(struct kref *ref) * with removing the device attributes while reading a device * attribute. */ - queue_work(remove_work_wq, &bmc->remove_work); + queue_work(bmc_remove_work_wq, &bmc->remove_work); } /* @@ -3169,7 +3163,7 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf, bmc->pdev.name = "ipmi_bmc"; - rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL); + rv = ida_alloc(&ipmi_bmc_ida, GFP_KERNEL); if (rv < 0) { kfree(bmc); goto out; @@ -3424,8 +3418,6 @@ channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) intf->channels_ready = true; wake_up(&intf->waitq); } else { - intf->channel_list = intf->wchannels + set; - intf->channels_ready = true; rv = send_channel_info_cmd(intf, intf->curr_channel); } @@ -3447,10 +3439,21 @@ channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) /* * Must be holding intf->bmc_reg_mutex to call this. */ -static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id) +static int __scan_channels(struct ipmi_smi *intf, + struct ipmi_device_id *id, + bool rescan) { int rv; + if (rescan) { + /* Clear channels_ready to force channels rescan. */ + intf->channels_ready = false; + } + + /* Skip channel scan if channels are already marked ready */ + if (intf->channels_ready) + return 0; + if (ipmi_version_major(id) > 1 || (ipmi_version_major(id) == 1 && ipmi_version_minor(id) >= 5)) { @@ -3522,20 +3525,32 @@ static ssize_t nr_msgs_show(struct device *dev, char *buf) { struct ipmi_smi *intf = container_of(attr, - struct ipmi_smi, nr_msgs_devattr); + struct ipmi_smi, nr_msgs_devattr); struct ipmi_user *user; - int index; unsigned int count = 0; - index = srcu_read_lock(&intf->users_srcu); - list_for_each_entry_rcu(user, &intf->users, link) + mutex_lock(&intf->users_mutex); + list_for_each_entry(user, &intf->users, link) count += atomic_read(&user->nr_msgs); - srcu_read_unlock(&intf->users_srcu, index); + mutex_unlock(&intf->users_mutex); return sysfs_emit(buf, "%u\n", count); } static DEVICE_ATTR_RO(nr_msgs); +static ssize_t maintenance_mode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ipmi_smi *intf = container_of(attr, + struct ipmi_smi, + maintenance_mode_devattr); + + return sysfs_emit(buf, "%u %d\n", intf->maintenance_mode_state, + intf->auto_maintenance_timeout); +} +static DEVICE_ATTR_RO(maintenance_mode); + static void redo_bmc_reg(struct work_struct *work) { struct ipmi_smi *intf = container_of(work, struct ipmi_smi, @@ -3571,12 +3586,6 @@ int ipmi_add_smi(struct module *owner, if (!intf) return -ENOMEM; - rv = init_srcu_struct(&intf->users_srcu); - if (rv) { - kfree(intf); - return rv; - } - intf->owner = owner; intf->bmc = &intf->tmp_bmc; INIT_LIST_HEAD(&intf->bmc->intfs); @@ -3593,11 +3602,14 @@ int ipmi_add_smi(struct module *owner, } if (slave_addr != 0) intf->addrinfo[0].address = slave_addr; + INIT_LIST_HEAD(&intf->user_msgs); + mutex_init(&intf->user_msgs_mutex); INIT_LIST_HEAD(&intf->users); + mutex_init(&intf->users_mutex); atomic_set(&intf->nr_users, 0); intf->handlers = handlers; intf->send_info = send_info; - spin_lock_init(&intf->seq_lock); + mutex_init(&intf->seq_lock); for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { intf->seq_table[j].inuse = 0; intf->seq_table[j].seqid = 0; @@ -3605,13 +3617,12 @@ int ipmi_add_smi(struct module *owner, intf->curr_seq = 0; spin_lock_init(&intf->waiting_rcv_msgs_lock); INIT_LIST_HEAD(&intf->waiting_rcv_msgs); - tasklet_setup(&intf->recv_tasklet, - smi_recv_tasklet); + INIT_WORK(&intf->smi_work, smi_work); atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); spin_lock_init(&intf->xmit_msgs_lock); INIT_LIST_HEAD(&intf->xmit_msgs); INIT_LIST_HEAD(&intf->hp_xmit_msgs); - spin_lock_init(&intf->events_lock); + mutex_init(&intf->events_mutex); spin_lock_init(&intf->watch_lock); atomic_set(&intf->event_waiters, 0); intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; @@ -3624,12 +3635,16 @@ int ipmi_add_smi(struct module *owner, for (i = 0; i < IPMI_NUM_STATS; i++) atomic_set(&intf->stats[i], 0); + /* + * Grab the watchers mutex so we can deliver the new interface + * without races. + */ + mutex_lock(&smi_watchers_mutex); mutex_lock(&ipmi_interfaces_mutex); /* Look for a hole in the numbers. */ i = 0; link = &ipmi_interfaces; - list_for_each_entry_rcu(tintf, &ipmi_interfaces, link, - ipmi_interfaces_mutex_held()) { + list_for_each_entry(tintf, &ipmi_interfaces, link) { if (tintf->intf_num != i) { link = &tintf->link; break; @@ -3638,9 +3653,9 @@ int ipmi_add_smi(struct module *owner, } /* Add the new interface in numeric order. */ if (i == 0) - list_add_rcu(&intf->link, &ipmi_interfaces); + list_add(&intf->link, &ipmi_interfaces); else - list_add_tail_rcu(&intf->link, link); + list_add_tail(&intf->link, link); rv = handlers->start_processing(send_info, intf); if (rv) @@ -3653,7 +3668,7 @@ int ipmi_add_smi(struct module *owner, } mutex_lock(&intf->bmc_reg_mutex); - rv = __scan_channels(intf, &id); + rv = __scan_channels(intf, &id, false); mutex_unlock(&intf->bmc_reg_mutex); if (rv) goto out_err_bmc_reg; @@ -3672,18 +3687,22 @@ int ipmi_add_smi(struct module *owner, goto out_err_bmc_reg; } - /* - * Keep memory order straight for RCU readers. Make - * sure everything else is committed to memory before - * setting intf_num to mark the interface valid. - */ - smp_wmb(); + intf->maintenance_mode_devattr = dev_attr_maintenance_mode; + sysfs_attr_init(&intf->maintenance_mode_devattr.attr); + rv = device_create_file(intf->si_dev, &intf->maintenance_mode_devattr); + if (rv) { + device_remove_file(intf->si_dev, &intf->nr_users_devattr); + goto out_err_bmc_reg; + } + intf->intf_num = i; mutex_unlock(&ipmi_interfaces_mutex); /* After this point the interface is legal to use. */ call_smi_watchers(i, intf->si_dev); + mutex_unlock(&smi_watchers_mutex); + return 0; out_err_bmc_reg: @@ -3692,10 +3711,9 @@ int ipmi_add_smi(struct module *owner, if (intf->handlers->shutdown) intf->handlers->shutdown(intf->send_info); out_err: - list_del_rcu(&intf->link); + list_del(&intf->link); mutex_unlock(&ipmi_interfaces_mutex); - synchronize_srcu(&ipmi_interfaces_srcu); - cleanup_srcu_struct(&intf->users_srcu); + mutex_unlock(&smi_watchers_mutex); kref_put(&intf->refcount, intf_free); return rv; @@ -3761,20 +3779,30 @@ static void cleanup_smi_msgs(struct ipmi_smi *intf) void ipmi_unregister_smi(struct ipmi_smi *intf) { struct ipmi_smi_watcher *w; - int intf_num, index; + int intf_num; if (!intf) return; + intf_num = intf->intf_num; mutex_lock(&ipmi_interfaces_mutex); + cancel_work_sync(&intf->smi_work); + /* smi_work() can no longer be in progress after this. */ + intf->intf_num = -1; intf->in_shutdown = true; - list_del_rcu(&intf->link); + list_del(&intf->link); mutex_unlock(&ipmi_interfaces_mutex); - synchronize_srcu(&ipmi_interfaces_srcu); - /* At this point no users can be added to the interface. */ + /* + * At this point no users can be added to the interface and no + * new messages can be sent. + */ + + if (intf->handlers->shutdown) + intf->handlers->shutdown(intf->send_info); + device_remove_file(intf->si_dev, &intf->maintenance_mode_devattr); device_remove_file(intf->si_dev, &intf->nr_msgs_devattr); device_remove_file(intf->si_dev, &intf->nr_users_devattr); @@ -3787,24 +3815,19 @@ void ipmi_unregister_smi(struct ipmi_smi *intf) w->smi_gone(intf_num); mutex_unlock(&smi_watchers_mutex); - index = srcu_read_lock(&intf->users_srcu); + mutex_lock(&intf->users_mutex); while (!list_empty(&intf->users)) { - struct ipmi_user *user = - container_of(list_next_rcu(&intf->users), - struct ipmi_user, link); + struct ipmi_user *user = list_first_entry(&intf->users, + struct ipmi_user, link); _ipmi_destroy_user(user); } - srcu_read_unlock(&intf->users_srcu, index); - - if (intf->handlers->shutdown) - intf->handlers->shutdown(intf->send_info); + mutex_unlock(&intf->users_mutex); cleanup_smi_msgs(intf); ipmi_bmc_unregister(intf); - cleanup_srcu_struct(&intf->users_srcu); kref_put(&intf->refcount, intf_free); } EXPORT_SYMBOL(ipmi_unregister_smi); @@ -3882,7 +3905,7 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, unsigned char chan; struct ipmi_user *user = NULL; struct ipmi_ipmb_addr *ipmb_addr; - struct ipmi_recv_msg *recv_msg; + struct ipmi_recv_msg *recv_msg = NULL; if (msg->rsp_size < 10) { /* Message not big enough, just ignore it. */ @@ -3903,9 +3926,8 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); if (rcvr) { user = rcvr->user; - kref_get(&user->refcount); - } else - user = NULL; + recv_msg = ipmi_alloc_recv_msg(user); + } rcu_read_unlock(); if (user == NULL) { @@ -3929,58 +3951,47 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, dev_dbg(intf->si_dev, "Invalid command: %*ph\n", msg->data_size, msg->data); - rcu_read_lock(); - if (!intf->in_shutdown) { - smi_send(intf, intf->handlers, msg, 0); - /* - * We used the message, so return the value - * that causes it to not be freed or - * queued. - */ - rv = -1; - } - rcu_read_unlock(); - } else { - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { - /* - * We couldn't allocate memory for the - * message, so requeue it for handling - * later. - */ - rv = 1; - kref_put(&user->refcount, free_user); - } else { - /* Extract the source address from the data. */ - ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; - ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; - ipmb_addr->slave_addr = msg->rsp[6]; - ipmb_addr->lun = msg->rsp[7] & 3; - ipmb_addr->channel = msg->rsp[3] & 0xf; + smi_send(intf, intf->handlers, msg, 0); + /* + * We used the message, so return the value that + * causes it to not be freed or queued. + */ + rv = -1; + } else if (!IS_ERR(recv_msg)) { + /* Extract the source address from the data. */ + ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; + ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; + ipmb_addr->slave_addr = msg->rsp[6]; + ipmb_addr->lun = msg->rsp[7] & 3; + ipmb_addr->channel = msg->rsp[3] & 0xf; - /* - * Extract the rest of the message information - * from the IPMB header. - */ - recv_msg->user = user; - recv_msg->recv_type = IPMI_CMD_RECV_TYPE; - recv_msg->msgid = msg->rsp[7] >> 2; - recv_msg->msg.netfn = msg->rsp[4] >> 2; - recv_msg->msg.cmd = msg->rsp[8]; - recv_msg->msg.data = recv_msg->msg_data; + /* + * Extract the rest of the message information + * from the IPMB header. + */ + recv_msg->recv_type = IPMI_CMD_RECV_TYPE; + recv_msg->msgid = msg->rsp[7] >> 2; + recv_msg->msg.netfn = msg->rsp[4] >> 2; + recv_msg->msg.cmd = msg->rsp[8]; + recv_msg->msg.data = recv_msg->msg_data; - /* - * We chop off 10, not 9 bytes because the checksum - * at the end also needs to be removed. - */ - recv_msg->msg.data_len = msg->rsp_size - 10; - memcpy(recv_msg->msg_data, &msg->rsp[9], - msg->rsp_size - 10); - if (deliver_response(intf, recv_msg)) - ipmi_inc_stat(intf, unhandled_commands); - else - ipmi_inc_stat(intf, handled_commands); - } + /* + * We chop off 10, not 9 bytes because the checksum + * at the end also needs to be removed. + */ + recv_msg->msg.data_len = msg->rsp_size - 10; + memcpy(recv_msg->msg_data, &msg->rsp[9], + msg->rsp_size - 10); + if (deliver_response(intf, recv_msg)) + ipmi_inc_stat(intf, unhandled_commands); + else + ipmi_inc_stat(intf, handled_commands); + } else { + /* + * We couldn't allocate memory for the message, so + * requeue it for handling later. + */ + rv = 1; } return rv; @@ -3993,7 +4004,7 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, int rv = 0; struct ipmi_user *user = NULL; struct ipmi_ipmb_direct_addr *daddr; - struct ipmi_recv_msg *recv_msg; + struct ipmi_recv_msg *recv_msg = NULL; unsigned char netfn = msg->rsp[0] >> 2; unsigned char cmd = msg->rsp[3]; @@ -4002,9 +4013,8 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, rcvr = find_cmd_rcvr(intf, netfn, cmd, 0); if (rcvr) { user = rcvr->user; - kref_get(&user->refcount); - } else - user = NULL; + recv_msg = ipmi_alloc_recv_msg(user); + } rcu_read_unlock(); if (user == NULL) { @@ -4020,55 +4030,44 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE; msg->data_size = 5; - rcu_read_lock(); - if (!intf->in_shutdown) { - smi_send(intf, intf->handlers, msg, 0); - /* - * We used the message, so return the value - * that causes it to not be freed or - * queued. - */ - rv = -1; - } - rcu_read_unlock(); - } else { - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { - /* - * We couldn't allocate memory for the - * message, so requeue it for handling - * later. - */ - rv = 1; - kref_put(&user->refcount, free_user); - } else { - /* Extract the source address from the data. */ - daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr; - daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; - daddr->channel = 0; - daddr->slave_addr = msg->rsp[1]; - daddr->rs_lun = msg->rsp[0] & 3; - daddr->rq_lun = msg->rsp[2] & 3; + smi_send(intf, intf->handlers, msg, 0); + /* + * We used the message, so return the value that + * causes it to not be freed or queued. + */ + rv = -1; + } else if (!IS_ERR(recv_msg)) { + /* Extract the source address from the data. */ + daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr; + daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; + daddr->channel = 0; + daddr->slave_addr = msg->rsp[1]; + daddr->rs_lun = msg->rsp[0] & 3; + daddr->rq_lun = msg->rsp[2] & 3; - /* - * Extract the rest of the message information - * from the IPMB header. - */ - recv_msg->user = user; - recv_msg->recv_type = IPMI_CMD_RECV_TYPE; - recv_msg->msgid = (msg->rsp[2] >> 2); - recv_msg->msg.netfn = msg->rsp[0] >> 2; - recv_msg->msg.cmd = msg->rsp[3]; - recv_msg->msg.data = recv_msg->msg_data; - - recv_msg->msg.data_len = msg->rsp_size - 4; - memcpy(recv_msg->msg_data, msg->rsp + 4, - msg->rsp_size - 4); - if (deliver_response(intf, recv_msg)) - ipmi_inc_stat(intf, unhandled_commands); - else - ipmi_inc_stat(intf, handled_commands); - } + /* + * Extract the rest of the message information + * from the IPMB header. + */ + recv_msg->recv_type = IPMI_CMD_RECV_TYPE; + recv_msg->msgid = (msg->rsp[2] >> 2); + recv_msg->msg.netfn = msg->rsp[0] >> 2; + recv_msg->msg.cmd = msg->rsp[3]; + recv_msg->msg.data = recv_msg->msg_data; + + recv_msg->msg.data_len = msg->rsp_size - 4; + memcpy(recv_msg->msg_data, msg->rsp + 4, + msg->rsp_size - 4); + if (deliver_response(intf, recv_msg)) + ipmi_inc_stat(intf, unhandled_commands); + else + ipmi_inc_stat(intf, handled_commands); + } else { + /* + * We couldn't allocate memory for the message, so + * requeue it for handling later. + */ + rv = 1; } return rv; @@ -4080,7 +4079,7 @@ static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg; struct ipmi_ipmb_direct_addr *daddr; - recv_msg = msg->user_data; + recv_msg = msg->recv_msg; if (recv_msg == NULL) { dev_warn(intf->si_dev, "IPMI direct message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); @@ -4182,7 +4181,7 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, unsigned char chan; struct ipmi_user *user = NULL; struct ipmi_lan_addr *lan_addr; - struct ipmi_recv_msg *recv_msg; + struct ipmi_recv_msg *recv_msg = NULL; if (msg->rsp_size < 12) { /* Message not big enough, just ignore it. */ @@ -4203,63 +4202,76 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); if (rcvr) { user = rcvr->user; - kref_get(&user->refcount); - } else - user = NULL; + recv_msg = ipmi_alloc_recv_msg(user); + } rcu_read_unlock(); if (user == NULL) { - /* We didn't find a user, just give up. */ + /* We didn't find a user, just give up and return an error. */ ipmi_inc_stat(intf, unhandled_commands); + msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); + msg->data[1] = IPMI_SEND_MSG_CMD; + msg->data[2] = chan; + msg->data[3] = msg->rsp[4]; /* handle */ + msg->data[4] = msg->rsp[8]; /* rsSWID */ + msg->data[5] = ((netfn + 1) << 2) | (msg->rsp[9] & 0x3); + msg->data[6] = ipmb_checksum(&msg->data[3], 3); + msg->data[7] = msg->rsp[5]; /* rqSWID */ + /* rqseq/lun */ + msg->data[8] = (msg->rsp[9] & 0xfc) | (msg->rsp[6] & 0x3); + msg->data[9] = cmd; + msg->data[10] = IPMI_INVALID_CMD_COMPLETION_CODE; + msg->data[11] = ipmb_checksum(&msg->data[7], 4); + msg->data_size = 12; + + dev_dbg(intf->si_dev, "Invalid command: %*ph\n", + msg->data_size, msg->data); + + smi_send(intf, intf->handlers, msg, 0); /* - * Don't do anything with these messages, just allow - * them to be freed. + * We used the message, so return the value that + * causes it to not be freed or queued. */ - rv = 0; - } else { - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { - /* - * We couldn't allocate memory for the - * message, so requeue it for handling later. - */ - rv = 1; - kref_put(&user->refcount, free_user); - } else { - /* Extract the source address from the data. */ - lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; - lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; - lan_addr->session_handle = msg->rsp[4]; - lan_addr->remote_SWID = msg->rsp[8]; - lan_addr->local_SWID = msg->rsp[5]; - lan_addr->lun = msg->rsp[9] & 3; - lan_addr->channel = msg->rsp[3] & 0xf; - lan_addr->privilege = msg->rsp[3] >> 4; + rv = -1; + } else if (!IS_ERR(recv_msg)) { + /* Extract the source address from the data. */ + lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; + lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; + lan_addr->session_handle = msg->rsp[4]; + lan_addr->remote_SWID = msg->rsp[8]; + lan_addr->local_SWID = msg->rsp[5]; + lan_addr->lun = msg->rsp[9] & 3; + lan_addr->channel = msg->rsp[3] & 0xf; + lan_addr->privilege = msg->rsp[3] >> 4; - /* - * Extract the rest of the message information - * from the IPMB header. - */ - recv_msg->user = user; - recv_msg->recv_type = IPMI_CMD_RECV_TYPE; - recv_msg->msgid = msg->rsp[9] >> 2; - recv_msg->msg.netfn = msg->rsp[6] >> 2; - recv_msg->msg.cmd = msg->rsp[10]; - recv_msg->msg.data = recv_msg->msg_data; + /* + * Extract the rest of the message information + * from the IPMB header. + */ + recv_msg->recv_type = IPMI_CMD_RECV_TYPE; + recv_msg->msgid = msg->rsp[9] >> 2; + recv_msg->msg.netfn = msg->rsp[6] >> 2; + recv_msg->msg.cmd = msg->rsp[10]; + recv_msg->msg.data = recv_msg->msg_data; - /* - * We chop off 12, not 11 bytes because the checksum - * at the end also needs to be removed. - */ - recv_msg->msg.data_len = msg->rsp_size - 12; - memcpy(recv_msg->msg_data, &msg->rsp[11], - msg->rsp_size - 12); - if (deliver_response(intf, recv_msg)) - ipmi_inc_stat(intf, unhandled_commands); - else - ipmi_inc_stat(intf, handled_commands); - } + /* + * We chop off 12, not 11 bytes because the checksum + * at the end also needs to be removed. + */ + recv_msg->msg.data_len = msg->rsp_size - 12; + memcpy(recv_msg->msg_data, &msg->rsp[11], + msg->rsp_size - 12); + if (deliver_response(intf, recv_msg)) + ipmi_inc_stat(intf, unhandled_commands); + else + ipmi_inc_stat(intf, handled_commands); + } else { + /* + * We couldn't allocate memory for the message, so + * requeue it for handling later. + */ + rv = 1; } return rv; @@ -4281,7 +4293,7 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, unsigned char chan; struct ipmi_user *user = NULL; struct ipmi_system_interface_addr *smi_addr; - struct ipmi_recv_msg *recv_msg; + struct ipmi_recv_msg *recv_msg = NULL; /* * We expect the OEM SW to perform error checking @@ -4310,9 +4322,8 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); if (rcvr) { user = rcvr->user; - kref_get(&user->refcount); - } else - user = NULL; + recv_msg = ipmi_alloc_recv_msg(user); + } rcu_read_unlock(); if (user == NULL) { @@ -4325,48 +4336,42 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, */ rv = 0; - } else { - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { - /* - * We couldn't allocate memory for the - * message, so requeue it for handling - * later. - */ - rv = 1; - kref_put(&user->refcount, free_user); - } else { - /* - * OEM Messages are expected to be delivered via - * the system interface to SMS software. We might - * need to visit this again depending on OEM - * requirements - */ - smi_addr = ((struct ipmi_system_interface_addr *) - &recv_msg->addr); - smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; - smi_addr->channel = IPMI_BMC_CHANNEL; - smi_addr->lun = msg->rsp[0] & 3; - - recv_msg->user = user; - recv_msg->user_msg_data = NULL; - recv_msg->recv_type = IPMI_OEM_RECV_TYPE; - recv_msg->msg.netfn = msg->rsp[0] >> 2; - recv_msg->msg.cmd = msg->rsp[1]; - recv_msg->msg.data = recv_msg->msg_data; + } else if (!IS_ERR(recv_msg)) { + /* + * OEM Messages are expected to be delivered via + * the system interface to SMS software. We might + * need to visit this again depending on OEM + * requirements + */ + smi_addr = ((struct ipmi_system_interface_addr *) + &recv_msg->addr); + smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + smi_addr->channel = IPMI_BMC_CHANNEL; + smi_addr->lun = msg->rsp[0] & 3; + + recv_msg->user_msg_data = NULL; + recv_msg->recv_type = IPMI_OEM_RECV_TYPE; + recv_msg->msg.netfn = msg->rsp[0] >> 2; + recv_msg->msg.cmd = msg->rsp[1]; + recv_msg->msg.data = recv_msg->msg_data; - /* - * The message starts at byte 4 which follows the - * Channel Byte in the "GET MESSAGE" command - */ - recv_msg->msg.data_len = msg->rsp_size - 4; - memcpy(recv_msg->msg_data, &msg->rsp[4], - msg->rsp_size - 4); - if (deliver_response(intf, recv_msg)) - ipmi_inc_stat(intf, unhandled_commands); - else - ipmi_inc_stat(intf, handled_commands); - } + /* + * The message starts at byte 4 which follows the + * Channel Byte in the "GET MESSAGE" command + */ + recv_msg->msg.data_len = msg->rsp_size - 4; + memcpy(recv_msg->msg_data, &msg->rsp[4], + msg->rsp_size - 4); + if (deliver_response(intf, recv_msg)) + ipmi_inc_stat(intf, unhandled_commands); + else + ipmi_inc_stat(intf, handled_commands); + } else { + /* + * We couldn't allocate memory for the message, so + * requeue it for handling later. + */ + rv = 1; } return rv; @@ -4396,8 +4401,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg, *recv_msg2; struct list_head msgs; struct ipmi_user *user; - int rv = 0, deliver_count = 0, index; - unsigned long flags; + int rv = 0, deliver_count = 0; if (msg->rsp_size < 19) { /* Message is too small to be an IPMB event. */ @@ -4412,7 +4416,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf, INIT_LIST_HEAD(&msgs); - spin_lock_irqsave(&intf->events_lock, flags); + mutex_lock(&intf->events_mutex); ipmi_inc_stat(intf, events); @@ -4420,18 +4424,20 @@ static int handle_read_event_rsp(struct ipmi_smi *intf, * Allocate and fill in one message for every user that is * getting events. */ - index = srcu_read_lock(&intf->users_srcu); - list_for_each_entry_rcu(user, &intf->users, link) { + mutex_lock(&intf->users_mutex); + list_for_each_entry(user, &intf->users, link) { if (!user->gets_events) continue; - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { - rcu_read_unlock(); + recv_msg = ipmi_alloc_recv_msg(user); + if (IS_ERR(recv_msg)) { + mutex_unlock(&intf->users_mutex); list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { + user = recv_msg->user; list_del(&recv_msg->link); ipmi_free_recv_msg(recv_msg); + kref_put(&user->refcount, free_ipmi_user); } /* * We couldn't allocate memory for the @@ -4445,11 +4451,9 @@ static int handle_read_event_rsp(struct ipmi_smi *intf, deliver_count++; copy_event_into_recv_msg(recv_msg, msg); - recv_msg->user = user; - kref_get(&user->refcount); list_add_tail(&recv_msg->link, &msgs); } - srcu_read_unlock(&intf->users_srcu, index); + mutex_unlock(&intf->users_mutex); if (deliver_count) { /* Now deliver all the messages. */ @@ -4462,8 +4466,8 @@ static int handle_read_event_rsp(struct ipmi_smi *intf, * No one to receive the message, put it in queue if there's * not already too many things in the queue. */ - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { + recv_msg = ipmi_alloc_recv_msg(NULL); + if (IS_ERR(recv_msg)) { /* * We couldn't allocate memory for the * message, so requeue it for handling @@ -4487,7 +4491,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf, } out: - spin_unlock_irqrestore(&intf->events_lock, flags); + mutex_unlock(&intf->events_mutex); return rv; } @@ -4498,7 +4502,7 @@ static int handle_bmc_rsp(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg; struct ipmi_system_interface_addr *smi_addr; - recv_msg = msg->user_data; + recv_msg = msg->recv_msg; if (recv_msg == NULL) { dev_warn(intf->si_dev, "IPMI SMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); @@ -4539,9 +4543,10 @@ static int handle_one_recv_msg(struct ipmi_smi *intf, if (msg->rsp_size < 2) { /* Message is too small to be correct. */ - dev_warn(intf->si_dev, - "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n", - (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size); + dev_warn_ratelimited(intf->si_dev, + "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n", + (msg->data[0] >> 2) | 1, + msg->data[1], msg->rsp_size); return_unspecified: /* Generate an error response for the message. */ @@ -4571,14 +4576,14 @@ return_unspecified: } else if ((msg->data_size >= 2) && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) && (msg->data[1] == IPMI_SEND_MSG_CMD) - && (msg->user_data == NULL)) { + && (msg->recv_msg == NULL)) { - if (intf->in_shutdown) + if (intf->in_shutdown || intf->run_to_completion) goto out; /* * This is the local response to a command send, start - * the timer for these. The user_data will not be + * the timer for these. The recv_msg will not be * NULL if this is a response send, and we will let * response sends just go through. */ @@ -4617,10 +4622,10 @@ return_unspecified: * The NetFN and Command in the response is not even * marginally correct. */ - dev_warn(intf->si_dev, - "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n", - (msg->data[0] >> 2) | 1, msg->data[1], - msg->rsp[0] >> 2, msg->rsp[1]); + dev_warn_ratelimited(intf->si_dev, + "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n", + (msg->data[0] >> 2) | 1, msg->data[1], + msg->rsp[0] >> 2, msg->rsp[1]); goto return_unspecified; } @@ -4638,13 +4643,16 @@ return_unspecified: requeue = handle_ipmb_direct_rcv_rsp(intf, msg); } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) && (msg->rsp[1] == IPMI_SEND_MSG_CMD) - && (msg->user_data != NULL)) { + && (msg->recv_msg != NULL)) { /* * It's a response to a response we sent. For this we * deliver a send message response to the user. */ struct ipmi_recv_msg *recv_msg; + if (intf->run_to_completion) + goto out; + chan = msg->data[2] & 0x0f; if (chan >= IPMI_MAX_CHANNELS) /* Invalid channel number */ @@ -4652,7 +4660,7 @@ return_unspecified: cc = msg->rsp[2]; process_response_response: - recv_msg = msg->user_data; + recv_msg = msg->recv_msg; requeue = 0; if (!recv_msg) @@ -4667,6 +4675,9 @@ process_response_response: && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { struct ipmi_channel *chans; + if (intf->run_to_completion) + goto out; + /* It's from the receive queue. */ chan = msg->rsp[3] & 0xf; if (chan >= IPMI_MAX_CHANNELS) { @@ -4741,6 +4752,9 @@ process_response_response: } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) { /* It's an asynchronous event. */ + if (intf->run_to_completion) + goto out; + requeue = handle_read_event_rsp(intf, msg); } else { /* It's a response from the local BMC. */ @@ -4756,10 +4770,10 @@ process_response_response: */ static void handle_new_recv_msgs(struct ipmi_smi *intf) { - struct ipmi_smi_msg *smi_msg; - unsigned long flags = 0; - int rv; - int run_to_completion = intf->run_to_completion; + struct ipmi_smi_msg *smi_msg; + unsigned long flags = 0; + int rv; + int run_to_completion = READ_ONCE(intf->run_to_completion); /* See if any waiting messages need to be processed. */ if (!run_to_completion) @@ -4779,7 +4793,7 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf) * To preserve message order, quit if we * can't handle a message. Add the message * back at the head, this is safe because this - * tasklet is the only thing that pulls the + * workqueue is the only thing that pulls the * messages. */ list_add(&smi_msg->link, &intf->waiting_rcv_msgs); @@ -4793,31 +4807,16 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf) } if (!run_to_completion) spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); - - /* - * If the pretimout count is non-zero, decrement one from it and - * deliver pretimeouts to all the users. - */ - if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { - struct ipmi_user *user; - int index; - - index = srcu_read_lock(&intf->users_srcu); - list_for_each_entry_rcu(user, &intf->users, link) { - if (user->handler->ipmi_watchdog_pretimeout) - user->handler->ipmi_watchdog_pretimeout( - user->handler_data); - } - srcu_read_unlock(&intf->users_srcu, index); - } } -static void smi_recv_tasklet(struct tasklet_struct *t) +static void smi_work(struct work_struct *t) { unsigned long flags = 0; /* keep us warning-free. */ - struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet); - int run_to_completion = intf->run_to_completion; + struct ipmi_smi *intf = from_work(intf, t, smi_work); + int run_to_completion = READ_ONCE(intf->run_to_completion); struct ipmi_smi_msg *newmsg = NULL; + struct ipmi_recv_msg *msg, *msg2; + int cc; /* * Start the next message if available. @@ -4826,9 +4825,7 @@ static void smi_recv_tasklet(struct tasklet_struct *t) * because the lower layer is allowed to hold locks while calling * message delivery. */ - - rcu_read_lock(); - +restart: if (!run_to_completion) spin_lock_irqsave(&intf->xmit_msgs_lock, flags); if (intf->curr_msg == NULL && !intf->in_shutdown) { @@ -4846,15 +4843,64 @@ static void smi_recv_tasklet(struct tasklet_struct *t) intf->curr_msg = newmsg; } } - if (!run_to_completion) spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); - if (newmsg) - intf->handlers->sender(intf->send_info, newmsg); - rcu_read_unlock(); + if (newmsg) { + cc = intf->handlers->sender(intf->send_info, newmsg); + if (cc) { + if (newmsg->recv_msg) + deliver_err_response(intf, + newmsg->recv_msg, cc); + else + ipmi_free_smi_msg(newmsg); + goto restart; + } + } handle_new_recv_msgs(intf); + + /* Nothing below applies during panic time. */ + if (run_to_completion) + return; + + /* + * If the pretimout count is non-zero, decrement one from it and + * deliver pretimeouts to all the users. + */ + if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { + struct ipmi_user *user; + + mutex_lock(&intf->users_mutex); + list_for_each_entry(user, &intf->users, link) { + if (user->handler->ipmi_watchdog_pretimeout) + user->handler->ipmi_watchdog_pretimeout( + user->handler_data); + } + mutex_unlock(&intf->users_mutex); + } + + /* + * Freeing the message can cause a user to be released, which + * can then cause the interface to be freed. Make sure that + * doesn't happen until we are ready. + */ + kref_get(&intf->refcount); + + mutex_lock(&intf->user_msgs_mutex); + list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) { + struct ipmi_user *user = msg->user; + + list_del(&msg->link); + + if (refcount_read(&user->destroyed) == 0) + ipmi_free_recv_msg(msg); + else + user->handler->ipmi_recv_hndl(msg, user->handler_data); + } + mutex_unlock(&intf->user_msgs_mutex); + + kref_put(&intf->refcount, intf_free); } /* Handle a new message from the lower layer. */ @@ -4862,11 +4908,11 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf, struct ipmi_smi_msg *msg) { unsigned long flags = 0; /* keep us warning-free. */ - int run_to_completion = intf->run_to_completion; + int run_to_completion = READ_ONCE(intf->run_to_completion); /* * To preserve message order, we keep a queue and deliver from - * a tasklet. + * a workqueue. */ if (!run_to_completion) spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); @@ -4887,9 +4933,9 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf, spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); if (run_to_completion) - smi_recv_tasklet(&intf->recv_tasklet); + smi_work(&intf->smi_work); else - tasklet_schedule(&intf->recv_tasklet); + queue_work(system_wq, &intf->smi_work); } EXPORT_SYMBOL(ipmi_smi_msg_received); @@ -4899,7 +4945,7 @@ void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf) return; atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); - tasklet_schedule(&intf->recv_tasklet); + queue_work(system_wq, &intf->smi_work); } EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); @@ -4928,8 +4974,7 @@ smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg, static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, struct list_head *timeouts, unsigned long timeout_period, - int slot, unsigned long *flags, - bool *need_timer) + int slot, bool *need_timer) { struct ipmi_recv_msg *msg; @@ -4981,7 +5026,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, return; } - spin_unlock_irqrestore(&intf->seq_lock, *flags); + mutex_unlock(&intf->seq_lock); /* * Send the new message. We send with a zero @@ -5002,7 +5047,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, } else ipmi_free_smi_msg(smi_msg); - spin_lock_irqsave(&intf->seq_lock, *flags); + mutex_lock(&intf->seq_lock); } } @@ -5029,7 +5074,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf, * list. */ INIT_LIST_HEAD(&timeouts); - spin_lock_irqsave(&intf->seq_lock, flags); + mutex_lock(&intf->seq_lock); if (intf->ipmb_maintenance_mode_timeout) { if (intf->ipmb_maintenance_mode_timeout <= timeout_period) intf->ipmb_maintenance_mode_timeout = 0; @@ -5039,8 +5084,8 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf, for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) check_msg_timeout(intf, &intf->seq_table[i], &timeouts, timeout_period, i, - &flags, &need_timer); - spin_unlock_irqrestore(&intf->seq_lock, flags); + &need_timer); + mutex_unlock(&intf->seq_lock); list_for_each_entry_safe(msg, msg2, &timeouts, link) deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE); @@ -5060,7 +5105,9 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf, -= timeout_period; if (!intf->maintenance_mode && (intf->auto_maintenance_timeout <= 0)) { - intf->maintenance_mode_enable = false; + intf->maintenance_mode_state = + IPMI_MAINTENANCE_MODE_STATE_OFF; + intf->auto_maintenance_timeout = 0; maintenance_mode_update(intf); } } @@ -5068,7 +5115,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf, flags); } - tasklet_schedule(&intf->recv_tasklet); + queue_work(system_wq, &intf->smi_work); return need_timer; } @@ -5076,28 +5123,28 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf, static void ipmi_request_event(struct ipmi_smi *intf) { /* No event requests when in maintenance mode. */ - if (intf->maintenance_mode_enable) + if (intf->maintenance_mode_state) return; if (!intf->in_shutdown) intf->handlers->request_events(intf->send_info); } -static struct timer_list ipmi_timer; - static atomic_t stop_operation; -static void ipmi_timeout(struct timer_list *unused) +static void ipmi_timeout_work(struct work_struct *work) { + if (atomic_read(&stop_operation)) + return; + struct ipmi_smi *intf; bool need_timer = false; - int index; if (atomic_read(&stop_operation)) return; - index = srcu_read_lock(&ipmi_interfaces_srcu); - list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { + mutex_lock(&ipmi_interfaces_mutex); + list_for_each_entry(intf, &ipmi_interfaces, link) { if (atomic_read(&intf->event_waiters)) { intf->ticks_to_req_ev--; if (intf->ticks_to_req_ev == 0) { @@ -5106,15 +5153,27 @@ static void ipmi_timeout(struct timer_list *unused) } need_timer = true; } + if (intf->maintenance_mode_state) + need_timer = true; need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); } - srcu_read_unlock(&ipmi_interfaces_srcu, index); + mutex_unlock(&ipmi_interfaces_mutex); if (need_timer) mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); } +static DECLARE_WORK(ipmi_timer_work, ipmi_timeout_work); + +static void ipmi_timeout(struct timer_list *unused) +{ + if (atomic_read(&stop_operation)) + return; + + queue_work(system_wq, &ipmi_timer_work); +} + static void need_waiter(struct ipmi_smi *intf) { /* Racy, but worst case we start the timer twice. */ @@ -5139,7 +5198,7 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC); if (rv) { rv->done = free_smi_msg; - rv->user_data = NULL; + rv->recv_msg = NULL; rv->type = IPMI_SMI_MSG_TYPE_NORMAL; atomic_inc(&smi_msg_inuse_count); } @@ -5155,27 +5214,51 @@ static void free_recv_msg(struct ipmi_recv_msg *msg) kfree(msg); } -static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) +static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user) { struct ipmi_recv_msg *rv; + if (user) { + if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) { + atomic_dec(&user->nr_msgs); + return ERR_PTR(-EBUSY); + } + } + rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC); - if (rv) { - rv->user = NULL; - rv->done = free_recv_msg; - atomic_inc(&recv_msg_inuse_count); + if (!rv) { + if (user) + atomic_dec(&user->nr_msgs); + return ERR_PTR(-ENOMEM); } + + rv->user = user; + rv->done = free_recv_msg; + if (user) + kref_get(&user->refcount); + atomic_inc(&recv_msg_inuse_count); return rv; } void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) { - if (msg->user && !oops_in_progress) - kref_put(&msg->user->refcount, free_user); + if (msg->user && !oops_in_progress) { + atomic_dec(&msg->user->nr_msgs); + kref_put(&msg->user->refcount, free_ipmi_user); + } msg->done(msg); } EXPORT_SYMBOL(ipmi_free_recv_msg); +static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg, + struct ipmi_user *user) +{ + WARN_ON_ONCE(msg->user); /* User should not be set. */ + msg->user = user; + atomic_inc(&user->nr_msgs); + kref_get(&user->refcount); +} + static atomic_t panic_done_count = ATOMIC_INIT(0); static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) @@ -5191,9 +5274,9 @@ static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) /* * Inside a panic, send a message and wait for a response. */ -static void ipmi_panic_request_and_wait(struct ipmi_smi *intf, - struct ipmi_addr *addr, - struct kernel_ipmi_msg *msg) +static void _ipmi_panic_request_and_wait(struct ipmi_smi *intf, + struct ipmi_addr *addr, + struct kernel_ipmi_msg *msg) { struct ipmi_smi_msg smi_msg; struct ipmi_recv_msg recv_msg; @@ -5223,6 +5306,15 @@ static void ipmi_panic_request_and_wait(struct ipmi_smi *intf, ipmi_poll(intf); } +void ipmi_panic_request_and_wait(struct ipmi_user *user, + struct ipmi_addr *addr, + struct kernel_ipmi_msg *msg) +{ + user->intf->run_to_completion = 1; + _ipmi_panic_request_and_wait(user->intf, addr, msg); +} +EXPORT_SYMBOL(ipmi_panic_request_and_wait); + static void event_receiver_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) { @@ -5291,7 +5383,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str) } /* Send the event announcing the panic. */ - ipmi_panic_request_and_wait(intf, &addr, &msg); + _ipmi_panic_request_and_wait(intf, &addr, &msg); /* * On every interface, dump a bunch of OEM event holding the @@ -5327,7 +5419,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str) msg.data = NULL; msg.data_len = 0; intf->null_user_handler = device_id_fetcher; - ipmi_panic_request_and_wait(intf, &addr, &msg); + _ipmi_panic_request_and_wait(intf, &addr, &msg); if (intf->local_event_generator) { /* Request the event receiver from the local MC. */ @@ -5336,7 +5428,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str) msg.data = NULL; msg.data_len = 0; intf->null_user_handler = event_receiver_fetcher; - ipmi_panic_request_and_wait(intf, &addr, &msg); + _ipmi_panic_request_and_wait(intf, &addr, &msg); } intf->null_user_handler = NULL; @@ -5377,23 +5469,18 @@ static void send_panic_events(struct ipmi_smi *intf, char *str) j = 0; while (*p) { - int size = strlen(p); + int size = strnlen(p, 11); - if (size > 11) - size = 11; data[0] = 0; data[1] = 0; data[2] = 0xf0; /* OEM event without timestamp. */ data[3] = intf->addrinfo[0].address; data[4] = j++; /* sequence # */ - /* - * Always give 11 bytes, so strncpy will fill - * it with zeroes for me. - */ - strncpy(data+5, p, 11); + + memcpy_and_pad(data+5, 11, p, size, '\0'); p += size; - ipmi_panic_request_and_wait(intf, &addr, &msg); + _ipmi_panic_request_and_wait(intf, &addr, &msg); } } @@ -5411,7 +5498,7 @@ static int panic_event(struct notifier_block *this, has_panicked = 1; /* For every registered interface, set it to run to completion. */ - list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { + list_for_each_entry(intf, &ipmi_interfaces, link) { if (!intf->handlers || intf->intf_num == -1) /* Interface is not ready. */ continue; @@ -5441,7 +5528,7 @@ static int panic_event(struct notifier_block *this, intf->handlers->set_run_to_completion(intf->send_info, 1); - list_for_each_entry_rcu(user, &intf->users, link) { + list_for_each_entry(user, &intf->users, link) { if (user->handler->ipmi_panic_handler) user->handler->ipmi_panic_handler( user->handler_data); @@ -5486,15 +5573,11 @@ static int ipmi_init_msghandler(void) if (initialized) goto out; - rv = init_srcu_struct(&ipmi_interfaces_srcu); - if (rv) - goto out; - - remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq"); - if (!remove_work_wq) { + bmc_remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq"); + if (!bmc_remove_work_wq) { pr_err("unable to create ipmi-msghandler-remove-wq workqueue"); rv = -ENOMEM; - goto out_wq; + goto out; } timer_setup(&ipmi_timer, ipmi_timeout, 0); @@ -5504,9 +5587,6 @@ static int ipmi_init_msghandler(void) initialized = true; -out_wq: - if (rv) - cleanup_srcu_struct(&ipmi_interfaces_srcu); out: mutex_unlock(&ipmi_interfaces_mutex); return rv; @@ -5530,7 +5610,7 @@ static void __exit cleanup_ipmi(void) int count; if (initialized) { - destroy_workqueue(remove_work_wq); + destroy_workqueue(bmc_remove_work_wq); atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block); @@ -5546,7 +5626,8 @@ static void __exit cleanup_ipmi(void) * here. */ atomic_set(&stop_operation, 1); - del_timer_sync(&ipmi_timer); + timer_delete_sync(&ipmi_timer); + cancel_work_sync(&ipmi_timer_work); initialized = false; @@ -5557,8 +5638,6 @@ static void __exit cleanup_ipmi(void) count = atomic_read(&recv_msg_inuse_count); if (count != 0) pr_warn("recv message count %d at exit\n", count); - - cleanup_srcu_struct(&ipmi_interfaces_srcu); } if (drvregistered) driver_unregister(&ipmidriver.driver); diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c index da22a8cbe68e..52a1130defe5 100644 --- a/drivers/char/ipmi/ipmi_powernv.c +++ b/drivers/char/ipmi/ipmi_powernv.c @@ -51,7 +51,7 @@ static void send_error_reply(struct ipmi_smi_powernv *smi, ipmi_smi_msg_received(smi->intf, msg); } -static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg) +static int ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg) { struct ipmi_smi_powernv *smi = send_info; struct opal_ipmi_msg *opal_msg; @@ -93,18 +93,19 @@ static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg) smi->interface_id, opal_msg, size); rc = opal_ipmi_send(smi->interface_id, opal_msg, size); pr_devel("%s: -> %d\n", __func__, rc); - - if (!rc) { - smi->cur_msg = msg; - spin_unlock_irqrestore(&smi->msg_lock, flags); - return; + if (rc) { + comp = IPMI_ERR_UNSPECIFIED; + goto err_unlock; } - comp = IPMI_ERR_UNSPECIFIED; + smi->cur_msg = msg; + spin_unlock_irqrestore(&smi->msg_lock, flags); + return IPMI_CC_NO_ERROR; + err_unlock: spin_unlock_irqrestore(&smi->msg_lock, flags); err: - send_error_reply(smi, msg, comp); + return comp; } static int ipmi_powernv_recv(struct ipmi_smi_powernv *smi) @@ -281,15 +282,13 @@ err_free: return rc; } -static int ipmi_powernv_remove(struct platform_device *pdev) +static void ipmi_powernv_remove(struct platform_device *pdev) { struct ipmi_smi_powernv *smi = dev_get_drvdata(&pdev->dev); ipmi_unregister_smi(smi->intf); free_irq(smi->irq, smi); irq_dispose_mapping(smi->irq); - - return 0; } static const struct of_device_id ipmi_powernv_match[] = { @@ -304,7 +303,7 @@ static struct platform_driver powernv_ipmi_driver = { .of_match_table = ipmi_powernv_match, }, .probe = ipmi_powernv_probe, - .remove = ipmi_powernv_remove, + .remove = ipmi_powernv_remove, }; diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c index 870659d91db2..e63c316d8aaa 100644 --- a/drivers/char/ipmi/ipmi_poweroff.c +++ b/drivers/char/ipmi/ipmi_poweroff.c @@ -650,13 +650,12 @@ static struct ipmi_smi_watcher smi_watcher = { #ifdef CONFIG_PROC_FS #include <linux/sysctl.h> -static struct ctl_table ipmi_table[] = { +static const struct ctl_table ipmi_table[] = { { .procname = "poweroff_powercycle", .data = &poweroff_powercycle, .maxlen = sizeof(poweroff_powercycle), .mode = 0644, .proc_handler = proc_dointvec }, - { } }; static struct ctl_table_header *ipmi_table_header; @@ -700,8 +699,6 @@ static int __init ipmi_poweroff_init(void) #ifdef MODULE static void __exit ipmi_poweroff_cleanup(void) { - int rv; - #ifdef CONFIG_PROC_FS unregister_sysctl_table(ipmi_table_header); #endif @@ -709,9 +706,7 @@ static void __exit ipmi_poweroff_cleanup(void) ipmi_smi_watcher_unregister(&smi_watcher); if (ready) { - rv = ipmi_destroy_user(ipmi_user); - if (rv) - pr_err("could not cleanup the IPMI user: 0x%x\n", rv); + ipmi_destroy_user(ipmi_user); pm_power_off = old_poweroff_func; } } diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h index a7ead2a4c753..687835b53da5 100644 --- a/drivers/char/ipmi/ipmi_si.h +++ b/drivers/char/ipmi/ipmi_si.h @@ -26,6 +26,14 @@ enum si_type { /* Array is defined in the ipmi_si_intf.c */ extern const char *const si_to_str[]; +struct ipmi_match_info { + enum si_type type; +}; + +extern const struct ipmi_match_info ipmi_kcs_si_info; +extern const struct ipmi_match_info ipmi_smic_si_info; +extern const struct ipmi_match_info ipmi_bt_si_info; + enum ipmi_addr_space { IPMI_IO_ADDR_SPACE, IPMI_MEM_ADDR_SPACE }; @@ -64,7 +72,7 @@ struct si_sm_io { void (*irq_cleanup)(struct si_sm_io *io); u8 slave_addr; - enum si_type si_type; + const struct ipmi_match_info *si_info; struct device *dev; }; @@ -93,6 +101,13 @@ void ipmi_si_pci_shutdown(void); static inline void ipmi_si_pci_init(void) { } static inline void ipmi_si_pci_shutdown(void) { } #endif +#ifdef CONFIG_IPMI_LS2K +void ipmi_si_ls2k_init(void); +void ipmi_si_ls2k_shutdown(void); +#else +static inline void ipmi_si_ls2k_init(void) { } +static inline void ipmi_si_ls2k_shutdown(void) { } +#endif #ifdef CONFIG_PARISC void ipmi_si_parisc_init(void); void ipmi_si_parisc_shutdown(void); diff --git a/drivers/char/ipmi/ipmi_si_hardcode.c b/drivers/char/ipmi/ipmi_si_hardcode.c index ed5e91b1e040..0c92fa3eee88 100644 --- a/drivers/char/ipmi/ipmi_si_hardcode.c +++ b/drivers/char/ipmi/ipmi_si_hardcode.c @@ -80,10 +80,10 @@ static void __init ipmi_hardcode_init_one(const char *si_type_str, } p.regsize = regsizes[i]; + p.regspacing = regspacings[i]; p.slave_addr = slave_addrs[i]; p.addr_source = SI_HARDCODED; p.regshift = regshifts[i]; - p.regsize = regsizes[i]; p.addr = addr; p.space = addr_space; diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 5cd031f3fc97..5459ffdde8dc 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -53,6 +53,7 @@ #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY) #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a short timeout */ +#define SI_TIMEOUT_HOSED (HZ) /* 1 second when in hosed state. */ enum si_intf_state { SI_NORMAL, @@ -61,7 +62,8 @@ enum si_intf_state { SI_CLEARING_FLAGS, SI_GETTING_MESSAGES, SI_CHECKING_ENABLES, - SI_SETTING_ENABLES + SI_SETTING_ENABLES, + SI_HOSED /* FIXME - add watchdog stuff. */ }; @@ -73,6 +75,10 @@ enum si_intf_state { /* 'invalid' to allow a firmware-specified interface to be disabled */ const char *const si_to_str[] = { "invalid", "kcs", "smic", "bt", NULL }; +const struct ipmi_match_info ipmi_kcs_si_info = { .type = SI_KCS }; +const struct ipmi_match_info ipmi_smic_si_info = { .type = SI_SMIC }; +const struct ipmi_match_info ipmi_bt_si_info = { .type = SI_BT }; + static bool initialized; /* @@ -269,8 +275,7 @@ void debug_timestamp(struct smi_info *smi_info, char *msg) struct timespec64 t; ktime_get_ts64(&t); - dev_dbg(smi_info->io.dev, "**%s: %lld.%9.9ld\n", - msg, t.tv_sec, t.tv_nsec); + dev_dbg(smi_info->io.dev, "**%s: %ptSp\n", msg, &t); } #else #define debug_timestamp(smi_info, x) @@ -309,7 +314,7 @@ static void return_hosed_msg(struct smi_info *smi_info, int cCode) static enum si_sm_result start_next_msg(struct smi_info *smi_info) { - int rv; + int rv; if (!smi_info->waiting_msg) { smi_info->curr_msg = NULL; @@ -386,6 +391,17 @@ static void start_clear_flags(struct smi_info *smi_info) smi_info->si_state = SI_CLEARING_FLAGS; } +static void start_get_flags(struct smi_info *smi_info) +{ + unsigned char msg[2]; + + msg[0] = (IPMI_NETFN_APP_REQUEST << 2); + msg[1] = IPMI_GET_MSG_FLAGS_CMD; + + start_new_msg(smi_info, msg, 2); + smi_info->si_state = SI_GETTING_FLAGS; +} + static void start_getting_msg_queue(struct smi_info *smi_info) { smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); @@ -692,7 +708,7 @@ static void handle_transaction_done(struct smi_info *smi_info) break; } enables = current_global_enables(smi_info, 0, &irq_on); - if (smi_info->io.si_type == SI_BT) + if (smi_info->io.si_info->type == SI_BT) /* BT has its own interrupt enable bit. */ check_bt_irq(smi_info, irq_on); if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) { @@ -738,6 +754,8 @@ static void handle_transaction_done(struct smi_info *smi_info) } break; } + case SI_HOSED: /* Shouldn't happen. */ + break; } } @@ -752,6 +770,10 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, enum si_sm_result si_sm_result; restart: + if (smi_info->si_state == SI_HOSED) + /* Just in case, hosed state is only left from the timeout. */ + return SI_SM_HOSED; + /* * There used to be a loop here that waited a little while * (around 25us) before giving up. That turned out to be @@ -775,18 +797,20 @@ restart: /* * Do the before return_hosed_msg, because that - * releases the lock. + * releases the lock. We just disable operations for + * a while and retry in hosed state. */ - smi_info->si_state = SI_NORMAL; + smi_info->si_state = SI_HOSED; if (smi_info->curr_msg != NULL) { /* * If we were handling a user message, format * a response to send to the upper layer to * tell it about the error. */ - return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED); + return_hosed_msg(smi_info, IPMI_BUS_ERR); } - goto restart; + smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_HOSED); + goto out; } /* @@ -794,8 +818,6 @@ restart: * this if there is not yet an upper layer to handle anything. */ if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) { - unsigned char msg[2]; - if (smi_info->si_state != SI_NORMAL) { /* * We got an ATTN, but we are doing something else. @@ -813,11 +835,7 @@ restart: * interrupts work with the SMI, that's not really * possible. */ - msg[0] = (IPMI_NETFN_APP_REQUEST << 2); - msg[1] = IPMI_GET_MSG_FLAGS_CMD; - - start_new_msg(smi_info, msg, 2); - smi_info->si_state = SI_GETTING_FLAGS; + start_get_flags(smi_info); goto restart; } } @@ -859,7 +877,7 @@ restart: if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) { /* Ok it if fails, the timer will just go off. */ - if (del_timer(&smi_info->si_timer)) + if (timer_delete(&smi_info->si_timer)) smi_info->timer_running = false; } @@ -890,27 +908,29 @@ static void flush_messages(void *send_info) * mode. This means we are single-threaded, no need for locks. */ result = smi_event_handler(smi_info, 0); - while (result != SI_SM_IDLE) { + while (result != SI_SM_IDLE && result != SI_SM_HOSED) { udelay(SI_SHORT_TIMEOUT_USEC); result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC); } } -static void sender(void *send_info, - struct ipmi_smi_msg *msg) +static int sender(void *send_info, struct ipmi_smi_msg *msg) { struct smi_info *smi_info = send_info; unsigned long flags; debug_timestamp(smi_info, "Enqueue"); + if (smi_info->si_state == SI_HOSED) + return IPMI_BUS_ERR; + if (smi_info->run_to_completion) { /* * If we are running to completion, start it. Upper * layer will call flush_messages to clear it out. */ smi_info->waiting_msg = msg; - return; + return IPMI_CC_NO_ERROR; } spin_lock_irqsave(&smi_info->si_lock, flags); @@ -925,6 +945,7 @@ static void sender(void *send_info, smi_info->waiting_msg = msg; check_start_timer_thread(smi_info); spin_unlock_irqrestore(&smi_info->si_lock, flags); + return IPMI_CC_NO_ERROR; } static void set_run_to_completion(void *send_info, bool i_run_to_completion) @@ -1072,7 +1093,8 @@ static void set_need_watch(void *send_info, unsigned int watch_mask) static void smi_timeout(struct timer_list *t) { - struct smi_info *smi_info = from_timer(smi_info, t, si_timer); + struct smi_info *smi_info = timer_container_of(smi_info, t, + si_timer); enum si_sm_result smi_result; unsigned long flags; unsigned long jiffies_now; @@ -1082,6 +1104,10 @@ static void smi_timeout(struct timer_list *t) spin_lock_irqsave(&(smi_info->si_lock), flags); debug_timestamp(smi_info, "Timer"); + if (smi_info->si_state == SI_HOSED) + /* Try something to see if the BMC is now operational. */ + start_get_flags(smi_info); + jiffies_now = jiffies; time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) * SI_USEC_PER_JIFFY); @@ -1091,14 +1117,11 @@ static void smi_timeout(struct timer_list *t) /* Running with interrupts, only do long timeouts. */ timeout = jiffies + SI_TIMEOUT_JIFFIES; smi_inc_stat(smi_info, long_timeouts); - goto do_mod_timer; - } - - /* - * If the state machine asks for a short delay, then shorten - * the timer timeout. - */ - if (smi_result == SI_SM_CALL_WITH_DELAY) { + } else if (smi_result == SI_SM_CALL_WITH_DELAY) { + /* + * If the state machine asks for a short delay, then shorten + * the timer timeout. + */ smi_inc_stat(smi_info, short_timeouts); timeout = jiffies + 1; } else { @@ -1106,7 +1129,6 @@ static void smi_timeout(struct timer_list *t) timeout = jiffies + SI_TIMEOUT_JIFFIES; } -do_mod_timer: if (smi_result != SI_SM_IDLE) smi_mod_timer(smi_info, timeout); else @@ -1119,7 +1141,7 @@ irqreturn_t ipmi_si_irq_handler(int irq, void *data) struct smi_info *smi_info = data; unsigned long flags; - if (smi_info->io.si_type == SI_BT) + if (smi_info->io.si_info->type == SI_BT) /* We need to clear the IRQ flag for the BT interface. */ smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, IPMI_BT_INTMASK_CLEAR_IRQ_BIT @@ -1164,7 +1186,7 @@ static int smi_start_processing(void *send_info, * The BT interface is efficient enough to not need a thread, * and there is no need for a thread if we have interrupts. */ - else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq)) + else if (new_smi->io.si_info->type != SI_BT && !new_smi->io.irq) enable = 1; if (enable) { @@ -1235,7 +1257,7 @@ MODULE_PARM_DESC(kipmid_max_busy_us, void ipmi_irq_finish_setup(struct si_sm_io *io) { - if (io->si_type == SI_BT) + if (io->si_info->type == SI_BT) /* Enable the interrupt in the BT interface. */ io->outputb(io, IPMI_BT_INTMASK_REG, IPMI_BT_INTMASK_ENABLE_IRQ_BIT); @@ -1243,7 +1265,7 @@ void ipmi_irq_finish_setup(struct si_sm_io *io) void ipmi_irq_start_cleanup(struct si_sm_io *io) { - if (io->si_type == SI_BT) + if (io->si_info->type == SI_BT) /* Disable the interrupt in the BT interface. */ io->outputb(io, IPMI_BT_INTMASK_REG, 0); } @@ -1614,7 +1636,7 @@ static ssize_t type_show(struct device *dev, { struct smi_info *smi_info = dev_get_drvdata(dev); - return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_type]); + return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_info->type]); } static DEVICE_ATTR_RO(type); @@ -1649,7 +1671,7 @@ static ssize_t params_show(struct device *dev, return sysfs_emit(buf, "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", - si_to_str[smi_info->io.si_type], + si_to_str[smi_info->io.si_info->type], addr_space_to_str[smi_info->io.addr_space], smi_info->io.addr_data, smi_info->io.regspacing, @@ -1803,7 +1825,7 @@ setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info) { struct ipmi_device_id *id = &smi_info->device_id; if (id->manufacturer_id == DELL_IANA_MFR_ID && - smi_info->io.si_type == SI_BT) + smi_info->io.si_info->type == SI_BT) register_xaction_notifier(&dell_poweredge_bt_xaction_notifier); } @@ -1839,7 +1861,7 @@ static inline void stop_timer_and_thread(struct smi_info *smi_info) } smi_info->timer_can_start = false; - del_timer_sync(&smi_info->si_timer); + timer_delete_sync(&smi_info->si_timer); } static struct smi_info *find_dup_si(struct smi_info *info) @@ -1882,7 +1904,8 @@ int ipmi_si_add_smi(struct si_sm_io *io) } if (!io->io_setup) { - if (io->addr_space == IPMI_IO_ADDR_SPACE) { + if (IS_ENABLED(CONFIG_HAS_IOPORT) && + io->addr_space == IPMI_IO_ADDR_SPACE) { io->io_setup = ipmi_si_port_setup; } else if (io->addr_space == IPMI_MEM_ADDR_SPACE) { io->io_setup = ipmi_si_mem_setup; @@ -1906,13 +1929,13 @@ int ipmi_si_add_smi(struct si_sm_io *io) /* We prefer ACPI over SMBIOS. */ dev_info(dup->io.dev, "Removing SMBIOS-specified %s state machine in favor of ACPI\n", - si_to_str[new_smi->io.si_type]); + si_to_str[new_smi->io.si_info->type]); cleanup_one_si(dup); } else { dev_info(new_smi->io.dev, "%s-specified %s state machine: duplicate\n", ipmi_addr_src_to_str(new_smi->io.addr_source), - si_to_str[new_smi->io.si_type]); + si_to_str[new_smi->io.si_info->type]); rv = -EBUSY; kfree(new_smi); goto out_err; @@ -1921,7 +1944,7 @@ int ipmi_si_add_smi(struct si_sm_io *io) pr_info("Adding %s-specified %s state machine\n", ipmi_addr_src_to_str(new_smi->io.addr_source), - si_to_str[new_smi->io.si_type]); + si_to_str[new_smi->io.si_info->type]); list_add_tail(&new_smi->link, &smi_infos); @@ -1944,12 +1967,12 @@ static int try_smi_init(struct smi_info *new_smi) pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n", ipmi_addr_src_to_str(new_smi->io.addr_source), - si_to_str[new_smi->io.si_type], + si_to_str[new_smi->io.si_info->type], addr_space_to_str[new_smi->io.addr_space], new_smi->io.addr_data, new_smi->io.slave_addr, new_smi->io.irq); - switch (new_smi->io.si_type) { + switch (new_smi->io.si_info->type) { case SI_KCS: new_smi->handlers = &kcs_smi_handlers; break; @@ -2072,7 +2095,7 @@ static int try_smi_init(struct smi_info *new_smi) smi_num++; dev_info(new_smi->io.dev, "IPMI %s interface initialized\n", - si_to_str[new_smi->io.si_type]); + si_to_str[new_smi->io.si_info->type]); WARN_ON(new_smi->io.dev->init_name != NULL); @@ -2090,10 +2113,18 @@ static int try_smi_init(struct smi_info *new_smi) return rv; } +/* + * Devices in the same address space at the same address are the same. + */ +static bool __init ipmi_smi_info_same(struct smi_info *e1, struct smi_info *e2) +{ + return (e1->io.addr_space == e2->io.addr_space && + e1->io.addr_data == e2->io.addr_data); +} + static int __init init_ipmi_si(void) { - struct smi_info *e; - enum ipmi_addr_src type = SI_INVALID; + struct smi_info *e, *e2; if (initialized) return 0; @@ -2106,45 +2137,77 @@ static int __init init_ipmi_si(void) ipmi_si_pci_init(); + ipmi_si_ls2k_init(); + ipmi_si_parisc_init(); - /* We prefer devices with interrupts, but in the case of a machine - with multiple BMCs we assume that there will be several instances - of a given type so if we succeed in registering a type then also - try to register everything else of the same type */ mutex_lock(&smi_infos_lock); + + /* + * Scan through all the devices. We prefer devices with + * interrupts, so go through those first in case there are any + * duplicates that don't have the interrupt set. + */ list_for_each_entry(e, &smi_infos, link) { - /* Try to register a device if it has an IRQ and we either - haven't successfully registered a device yet or this - device has the same type as one we successfully registered */ - if (e->io.irq && (!type || e->io.addr_source == type)) { - if (!try_smi_init(e)) { - type = e->io.addr_source; + bool dup = false; + + /* Register ones with interrupts first. */ + if (!e->io.irq) + continue; + + /* + * Go through the ones we have already seen to see if this + * is a dup. + */ + list_for_each_entry(e2, &smi_infos, link) { + if (e2 == e) + break; + if (e2->io.irq && ipmi_smi_info_same(e, e2)) { + dup = true; + break; } } + if (!dup) + try_smi_init(e); } - /* type will only have been set if we successfully registered an si */ - if (type) - goto skip_fallback_noirq; + /* + * Now try devices without interrupts. + */ + list_for_each_entry(e, &smi_infos, link) { + bool dup = false; - /* Fall back to the preferred device */ + if (e->io.irq) + continue; - list_for_each_entry(e, &smi_infos, link) { - if (!e->io.irq && (!type || e->io.addr_source == type)) { - if (!try_smi_init(e)) { - type = e->io.addr_source; + /* + * Go through the ones we have already seen to see if + * this is a dup. We have already looked at the ones + * with interrupts. + */ + list_for_each_entry(e2, &smi_infos, link) { + if (!e2->io.irq) + continue; + if (ipmi_smi_info_same(e, e2)) { + dup = true; + break; } } + list_for_each_entry(e2, &smi_infos, link) { + if (e2 == e) + break; + if (ipmi_smi_info_same(e, e2)) { + dup = true; + break; + } + } + if (!dup) + try_smi_init(e); } -skip_fallback_noirq: initialized = true; mutex_unlock(&smi_infos_lock); - if (type) - return 0; - mutex_lock(&smi_infos_lock); if (unload_when_empty && list_empty(&smi_infos)) { mutex_unlock(&smi_infos_lock); @@ -2266,7 +2329,7 @@ struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type, list_for_each_entry_safe(e, tmp_e, &smi_infos, link) { if (e->io.addr_space != addr_space) continue; - if (e->io.si_type != si_type) + if (e->io.si_info->type != si_type) continue; if (e->io.addr_data == addr) { dev = get_device(e->io.dev); @@ -2287,6 +2350,8 @@ static void cleanup_ipmi_si(void) ipmi_si_pci_shutdown(); + ipmi_si_ls2k_shutdown(); + ipmi_si_parisc_shutdown(); ipmi_si_platform_shutdown(); diff --git a/drivers/char/ipmi/ipmi_si_ls2k.c b/drivers/char/ipmi/ipmi_si_ls2k.c new file mode 100644 index 000000000000..45442c257efd --- /dev/null +++ b/drivers/char/ipmi/ipmi_si_ls2k.c @@ -0,0 +1,189 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for Loongson-2K BMC IPMI interface + * + * Copyright (C) 2024-2025 Loongson Technology Corporation Limited. + * + * Authors: + * Chong Qiao <qiaochong@loongson.cn> + * Binbin Zhou <zhoubinbin@loongson.cn> + */ + +#include <linux/bitfield.h> +#include <linux/ioport.h> +#include <linux/module.h> +#include <linux/types.h> + +#include "ipmi_si.h" + +#define LS2K_KCS_FIFO_IBFH 0x0 +#define LS2K_KCS_FIFO_IBFT 0x1 +#define LS2K_KCS_FIFO_OBFH 0x2 +#define LS2K_KCS_FIFO_OBFT 0x3 + +/* KCS registers */ +#define LS2K_KCS_REG_STS 0x4 +#define LS2K_KCS_REG_DATA_OUT 0x5 +#define LS2K_KCS_REG_DATA_IN 0x6 +#define LS2K_KCS_REG_CMD 0x8 + +#define LS2K_KCS_CMD_DATA 0xa +#define LS2K_KCS_VERSION 0xb +#define LS2K_KCS_WR_REQ 0xc +#define LS2K_KCS_WR_ACK 0x10 + +#define LS2K_KCS_STS_OBF BIT(0) +#define LS2K_KCS_STS_IBF BIT(1) +#define LS2K_KCS_STS_SMS_ATN BIT(2) +#define LS2K_KCS_STS_CMD BIT(3) + +#define LS2K_KCS_DATA_MASK (LS2K_KCS_STS_OBF | LS2K_KCS_STS_IBF | LS2K_KCS_STS_CMD) + +static bool ls2k_registered; + +static unsigned char ls2k_mem_inb_v0(const struct si_sm_io *io, unsigned int offset) +{ + void __iomem *addr = io->addr; + int reg_offset; + + if (offset & BIT(0)) { + reg_offset = LS2K_KCS_REG_STS; + } else { + writeb(readb(addr + LS2K_KCS_REG_STS) & ~LS2K_KCS_STS_OBF, addr + LS2K_KCS_REG_STS); + reg_offset = LS2K_KCS_REG_DATA_OUT; + } + + return readb(addr + reg_offset); +} + +static unsigned char ls2k_mem_inb_v1(const struct si_sm_io *io, unsigned int offset) +{ + void __iomem *addr = io->addr; + unsigned char inb = 0, cmd; + bool obf, ibf; + + obf = readb(addr + LS2K_KCS_FIFO_OBFH) ^ readb(addr + LS2K_KCS_FIFO_OBFT); + ibf = readb(addr + LS2K_KCS_FIFO_IBFH) ^ readb(addr + LS2K_KCS_FIFO_IBFT); + cmd = readb(addr + LS2K_KCS_CMD_DATA); + + if (offset & BIT(0)) { + inb = readb(addr + LS2K_KCS_REG_STS) & ~LS2K_KCS_DATA_MASK; + inb |= FIELD_PREP(LS2K_KCS_STS_OBF, obf) + | FIELD_PREP(LS2K_KCS_STS_IBF, ibf) + | FIELD_PREP(LS2K_KCS_STS_CMD, cmd); + } else { + inb = readb(addr + LS2K_KCS_REG_DATA_OUT); + writeb(readb(addr + LS2K_KCS_FIFO_OBFH), addr + LS2K_KCS_FIFO_OBFT); + } + + return inb; +} + +static void ls2k_mem_outb_v0(const struct si_sm_io *io, unsigned int offset, + unsigned char val) +{ + void __iomem *addr = io->addr; + unsigned char sts = readb(addr + LS2K_KCS_REG_STS); + int reg_offset; + + if (sts & LS2K_KCS_STS_IBF) + return; + + if (offset & BIT(0)) { + reg_offset = LS2K_KCS_REG_CMD; + sts |= LS2K_KCS_STS_CMD; + } else { + reg_offset = LS2K_KCS_REG_DATA_IN; + sts &= ~LS2K_KCS_STS_CMD; + } + + writew(val, addr + reg_offset); + writeb(sts | LS2K_KCS_STS_IBF, addr + LS2K_KCS_REG_STS); + writel(readl(addr + LS2K_KCS_WR_REQ) + 1, addr + LS2K_KCS_WR_REQ); +} + +static void ls2k_mem_outb_v1(const struct si_sm_io *io, unsigned int offset, + unsigned char val) +{ + void __iomem *addr = io->addr; + unsigned char ibfh, ibft; + int reg_offset; + + ibfh = readb(addr + LS2K_KCS_FIFO_IBFH); + ibft = readb(addr + LS2K_KCS_FIFO_IBFT); + + if (ibfh ^ ibft) + return; + + reg_offset = (offset & BIT(0)) ? LS2K_KCS_REG_CMD : LS2K_KCS_REG_DATA_IN; + writew(val, addr + reg_offset); + + writeb(offset & BIT(0), addr + LS2K_KCS_CMD_DATA); + writeb(!ibft, addr + LS2K_KCS_FIFO_IBFH); + writel(readl(addr + LS2K_KCS_WR_REQ) + 1, addr + LS2K_KCS_WR_REQ); +} + +static void ls2k_mem_cleanup(struct si_sm_io *io) +{ + if (io->addr) + iounmap(io->addr); +} + +static int ipmi_ls2k_mem_setup(struct si_sm_io *io) +{ + unsigned char version; + + io->addr = ioremap(io->addr_data, io->regspacing); + if (!io->addr) + return -EIO; + + version = readb(io->addr + LS2K_KCS_VERSION); + + io->inputb = version ? ls2k_mem_inb_v1 : ls2k_mem_inb_v0; + io->outputb = version ? ls2k_mem_outb_v1 : ls2k_mem_outb_v0; + io->io_cleanup = ls2k_mem_cleanup; + + return 0; +} + +static int ipmi_ls2k_probe(struct platform_device *pdev) +{ + struct si_sm_io io; + + memset(&io, 0, sizeof(io)); + + io.si_info = &ipmi_kcs_si_info; + io.io_setup = ipmi_ls2k_mem_setup; + io.addr_data = pdev->resource[0].start; + io.regspacing = resource_size(&pdev->resource[0]); + io.dev = &pdev->dev; + + dev_dbg(&pdev->dev, "addr 0x%lx, spacing %d.\n", io.addr_data, io.regspacing); + + return ipmi_si_add_smi(&io); +} + +static void ipmi_ls2k_remove(struct platform_device *pdev) +{ + ipmi_si_remove_by_dev(&pdev->dev); +} + +struct platform_driver ipmi_ls2k_platform_driver = { + .driver = { + .name = "ls2k-ipmi-si", + }, + .probe = ipmi_ls2k_probe, + .remove = ipmi_ls2k_remove, +}; + +void ipmi_si_ls2k_init(void) +{ + platform_driver_register(&ipmi_ls2k_platform_driver); + ls2k_registered = true; +} + +void ipmi_si_ls2k_shutdown(void) +{ + if (ls2k_registered) + platform_driver_unregister(&ipmi_ls2k_platform_driver); +} diff --git a/drivers/char/ipmi/ipmi_si_parisc.c b/drivers/char/ipmi/ipmi_si_parisc.c index 2be2967f6b5f..3b0a70d9adbb 100644 --- a/drivers/char/ipmi/ipmi_si_parisc.c +++ b/drivers/char/ipmi/ipmi_si_parisc.c @@ -13,7 +13,7 @@ static int __init ipmi_parisc_probe(struct parisc_device *dev) memset(&io, 0, sizeof(io)); - io.si_type = SI_KCS; + io.si_info = &ipmi_kcs_si_info; io.addr_source = SI_DEVICETREE; io.addr_space = IPMI_MEM_ADDR_SPACE; io.addr_data = dev->hpa.start; diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c index 74fa2055868b..17f72763322d 100644 --- a/drivers/char/ipmi/ipmi_si_pci.c +++ b/drivers/char/ipmi/ipmi_si_pci.c @@ -23,30 +23,32 @@ MODULE_PARM_DESC(trypci, static int ipmi_pci_probe_regspacing(struct si_sm_io *io) { - if (io->si_type == SI_KCS) { - unsigned char status; - int regspacing; - - io->regsize = DEFAULT_REGSIZE; - io->regshift = 0; - - /* detect 1, 4, 16byte spacing */ - for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) { - io->regspacing = regspacing; - if (io->io_setup(io)) { - dev_err(io->dev, "Could not setup I/O space\n"); - return DEFAULT_REGSPACING; - } - /* write invalid cmd */ - io->outputb(io, 1, 0x10); - /* read status back */ - status = io->inputb(io, 1); - io->io_cleanup(io); - if (status) - return regspacing; - regspacing *= 4; + unsigned char status; + int regspacing; + + if (io->si_info->type != SI_KCS) + return DEFAULT_REGSPACING; + + io->regsize = DEFAULT_REGSIZE; + io->regshift = 0; + + /* detect 1, 4, 16byte spacing */ + for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) { + io->regspacing = regspacing; + if (io->io_setup(io)) { + dev_err(io->dev, "Could not setup I/O space\n"); + return DEFAULT_REGSPACING; } + /* write invalid cmd */ + io->outputb(io, 1, 0x10); + /* read status back */ + status = io->inputb(io, 1); + io->io_cleanup(io); + if (status) + return regspacing; + regspacing *= 4; } + return DEFAULT_REGSPACING; } @@ -74,15 +76,15 @@ static int ipmi_pci_probe(struct pci_dev *pdev, switch (pdev->class) { case PCI_CLASS_SERIAL_IPMI_SMIC: - io.si_type = SI_SMIC; + io.si_info = &ipmi_smic_si_info; break; case PCI_CLASS_SERIAL_IPMI_KCS: - io.si_type = SI_KCS; + io.si_info = &ipmi_kcs_si_info; break; case PCI_CLASS_SERIAL_IPMI_BT: - io.si_type = SI_BT; + io.si_info = &ipmi_bt_si_info; break; default: @@ -97,6 +99,9 @@ static int ipmi_pci_probe(struct pci_dev *pdev, } if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) { + if (!IS_ENABLED(CONFIG_HAS_IOPORT)) + return -ENXIO; + io.addr_space = IPMI_IO_ADDR_SPACE; io.io_setup = ipmi_si_port_setup; } else { @@ -115,7 +120,7 @@ static int ipmi_pci_probe(struct pci_dev *pdev, if (io.irq) io.irq_setup = ipmi_std_irq_setup; - dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n", + dev_info(&pdev->dev, "%pR regsize %u spacing %u irq %d\n", &pdev->resource[0], io.regsize, io.regspacing, io.irq); return ipmi_si_add_smi(&io); diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c index c3d8ac7873ba..fb6e359ae494 100644 --- a/drivers/char/ipmi/ipmi_si_platform.c +++ b/drivers/char/ipmi/ipmi_si_platform.c @@ -11,10 +11,11 @@ #include <linux/types.h> #include <linux/module.h> -#include <linux/of_device.h> -#include <linux/of_platform.h> +#include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/property.h> #include <linux/acpi.h> #include "ipmi_si.h" #include "ipmi_dmi.h" @@ -162,9 +163,13 @@ static int platform_ipmi_probe(struct platform_device *pdev) switch (type) { case SI_KCS: + io.si_info = &ipmi_kcs_si_info; + break; case SI_SMIC: + io.si_info = &ipmi_smic_si_info; + break; case SI_BT: - io.si_type = type; + io.si_info = &ipmi_bt_si_info; break; case SI_TYPE_INVALID: /* User disabled this in hardcode. */ return -ENODEV; @@ -212,19 +217,15 @@ static int platform_ipmi_probe(struct platform_device *pdev) #ifdef CONFIG_OF static const struct of_device_id of_ipmi_match[] = { - { .type = "ipmi", .compatible = "ipmi-kcs", - .data = (void *)(unsigned long) SI_KCS }, - { .type = "ipmi", .compatible = "ipmi-smic", - .data = (void *)(unsigned long) SI_SMIC }, - { .type = "ipmi", .compatible = "ipmi-bt", - .data = (void *)(unsigned long) SI_BT }, - {}, + { .type = "ipmi", .compatible = "ipmi-kcs", .data = &ipmi_kcs_si_info }, + { .type = "ipmi", .compatible = "ipmi-smic", .data = &ipmi_smic_si_info }, + { .type = "ipmi", .compatible = "ipmi-bt", .data = &ipmi_bt_si_info }, + {} }; MODULE_DEVICE_TABLE(of, of_ipmi_match); static int of_ipmi_probe(struct platform_device *pdev) { - const struct of_device_id *match; struct si_sm_io io; struct resource resource; const __be32 *regsize, *regspacing, *regshift; @@ -237,10 +238,6 @@ static int of_ipmi_probe(struct platform_device *pdev) dev_info(&pdev->dev, "probing via device tree\n"); - match = of_match_device(of_ipmi_match, &pdev->dev); - if (!match) - return -ENODEV; - if (!of_device_is_available(np)) return -EINVAL; @@ -269,7 +266,7 @@ static int of_ipmi_probe(struct platform_device *pdev) } memset(&io, 0, sizeof(io)); - io.si_type = (unsigned long) match->data; + io.si_info = device_get_match_data(&pdev->dev); io.addr_source = SI_DEVICETREE; io.irq_setup = ipmi_std_irq_setup; @@ -300,7 +297,7 @@ static int find_slave_address(struct si_sm_io *io, int slave_addr) { #ifdef CONFIG_IPMI_DMI_DECODE if (!slave_addr) - slave_addr = ipmi_dmi_get_slave_addr(io->si_type, + slave_addr = ipmi_dmi_get_slave_addr(io->si_info->type, io->addr_space, io->addr_data); #endif @@ -339,13 +336,13 @@ static int acpi_ipmi_probe(struct platform_device *pdev) switch (tmp) { case 1: - io.si_type = SI_KCS; + io.si_info = &ipmi_kcs_si_info; break; case 2: - io.si_type = SI_SMIC; + io.si_info = &ipmi_smic_si_info; break; case 3: - io.si_type = SI_BT; + io.si_info = &ipmi_bt_si_info; break; case 4: /* SSIF, just ignore */ return -ENODEV; @@ -409,11 +406,9 @@ static int ipmi_probe(struct platform_device *pdev) return platform_ipmi_probe(pdev); } -static int ipmi_remove(struct platform_device *pdev) +static void ipmi_remove(struct platform_device *pdev) { ipmi_si_remove_by_dev(&pdev->dev); - - return 0; } static int pdev_match_name(struct device *dev, const void *data) diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index df8dd50b4cbe..ef1582a029f4 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -481,8 +481,6 @@ static int ipmi_ssif_thread(void *data) /* Wait for something to do */ result = wait_for_completion_interruptible( &ssif_info->wake_thread); - if (ssif_info->stopping) - break; if (result == -ERESTARTSYS) continue; init_completion(&ssif_info->wake_thread); @@ -541,7 +539,8 @@ static void start_resend(struct ssif_info *ssif_info); static void retry_timeout(struct timer_list *t) { - struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer); + struct ssif_info *ssif_info = timer_container_of(ssif_info, t, + retry_timer); unsigned long oflags, *flags; bool waiting, resend; @@ -565,7 +564,8 @@ static void retry_timeout(struct timer_list *t) static void watch_timeout(struct timer_list *t) { - struct ssif_info *ssif_info = from_timer(ssif_info, t, watch_timer); + struct ssif_info *ssif_info = timer_container_of(ssif_info, t, + watch_timer); unsigned long oflags, *flags; if (ssif_info->stopping) @@ -599,7 +599,7 @@ static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type, flags = ipmi_ssif_lock_cond(ssif_info, &oflags); if (ssif_info->waiting_alert) { ssif_info->waiting_alert = false; - del_timer(&ssif_info->retry_timer); + timer_delete(&ssif_info->retry_timer); do_get = true; } else if (ssif_info->curr_msg) { ssif_info->got_alert = true; @@ -980,7 +980,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result, ipmi_ssif_unlock_cond(ssif_info, flags); start_get(ssif_info); } else { - /* Wait a jiffie then request the next message */ + /* Wait a jiffy then request the next message */ ssif_info->waiting_alert = true; ssif_info->retries_left = SSIF_RECV_RETRIES; if (!ssif_info->stopping) @@ -1068,8 +1068,7 @@ static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags) } } -static void sender(void *send_info, - struct ipmi_smi_msg *msg) +static int sender(void *send_info, struct ipmi_smi_msg *msg) { struct ssif_info *ssif_info = send_info; unsigned long oflags, *flags; @@ -1084,11 +1083,10 @@ static void sender(void *send_info, struct timespec64 t; ktime_get_real_ts64(&t); - dev_dbg(&ssif_info->client->dev, - "**Enqueue %02x %02x: %lld.%6.6ld\n", - msg->data[0], msg->data[1], - (long long)t.tv_sec, (long)t.tv_nsec / NSEC_PER_USEC); + dev_dbg(&ssif_info->client->dev, "**Enqueue %02x %02x: %ptSp\n", + msg->data[0], msg->data[1], &t); } + return IPMI_CC_NO_ERROR; } static int get_smi_info(void *send_info, struct ipmi_smi_info *data) @@ -1268,12 +1266,10 @@ static void shutdown_ssif(void *send_info) schedule_timeout(1); ssif_info->stopping = true; - del_timer_sync(&ssif_info->watch_timer); - del_timer_sync(&ssif_info->retry_timer); - if (ssif_info->thread) { - complete(&ssif_info->wake_thread); + timer_delete_sync(&ssif_info->watch_timer); + timer_delete_sync(&ssif_info->retry_timer); + if (ssif_info->thread) kthread_stop(ssif_info->thread); - } } static void ssif_remove(struct i2c_client *client) @@ -1368,8 +1364,20 @@ static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info) rv = do_cmd(client, 2, msg, &len, resp); if (rv) rv = -ENODEV; - else + else { + if (len < 3) { + rv = -ENODEV; + } else { + struct ipmi_device_id id; + + rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1], + resp + 2, len - 2, &id); + if (rv) + rv = -ENODEV; /* Error means a BMC probably isn't there. */ + } + if (!rv && info) strscpy(info->type, DEVICE_NAME, I2C_NAME_SIZE); + } kfree(resp); return rv; } @@ -1704,6 +1712,16 @@ static int ssif_probe(struct i2c_client *client) ipmi_addr_src_to_str(ssif_info->addr_source), client->addr, client->adapter->name, slave_addr); + /* + * Send a get device id command and validate its response to + * make sure a valid BMC is there. + */ + rv = ssif_detect(client, NULL); + if (rv) { + dev_err(&client->dev, "Not present\n"); + goto out; + } + /* Now check for system interface capabilities */ msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD; @@ -1945,7 +1963,7 @@ static int new_ssif_client(int addr, char *adapter_name, } } - strncpy(addr_info->binfo.type, DEVICE_NAME, + strscpy(addr_info->binfo.type, DEVICE_NAME, sizeof(addr_info->binfo.type)); addr_info->binfo.addr = addr; addr_info->binfo.platform_data = addr_info; @@ -2049,7 +2067,7 @@ static int dmi_ipmi_probe(struct platform_device *pdev) #endif static const struct i2c_device_id ssif_id[] = { - { DEVICE_NAME, 0 }, + { DEVICE_NAME }, { } }; MODULE_DEVICE_TABLE(i2c, ssif_id); @@ -2071,7 +2089,7 @@ static int ssif_platform_probe(struct platform_device *dev) return dmi_ipmi_probe(dev); } -static int ssif_platform_remove(struct platform_device *dev) +static void ssif_platform_remove(struct platform_device *dev) { struct ssif_addr_info *addr_info = dev_get_drvdata(&dev->dev); @@ -2079,13 +2097,13 @@ static int ssif_platform_remove(struct platform_device *dev) list_del(&addr_info->link); kfree(addr_info); mutex_unlock(&ssif_infos_mutex); - return 0; } static const struct platform_device_id ssif_plat_ids[] = { { "dmi-ipmi-ssif", 0 }, { } }; +MODULE_DEVICE_TABLE(platform, ssif_plat_ids); static struct platform_driver ipmi_driver = { .driver = { diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 9a459257489f..a013ddbf1466 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -150,7 +150,7 @@ static char preaction[16] = "pre_none"; static unsigned char preop_val = WDOG_PREOP_NONE; static char preop[16] = "preop_none"; -static DEFINE_SPINLOCK(ipmi_read_lock); +static DEFINE_MUTEX(ipmi_read_mutex); static char data_to_read; static DECLARE_WAIT_QUEUE_HEAD(read_q); static struct fasync_struct *fasync_q; @@ -363,7 +363,7 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg, { struct kernel_ipmi_msg msg; unsigned char data[6]; - int rv; + int rv = 0; struct ipmi_system_interface_addr addr; int hbnow = 0; @@ -405,14 +405,18 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg, msg.cmd = IPMI_WDOG_SET_TIMER; msg.data = data; msg.data_len = sizeof(data); - rv = ipmi_request_supply_msgs(watchdog_user, - (struct ipmi_addr *) &addr, - 0, - &msg, - NULL, - smi_msg, - recv_msg, - 1); + if (smi_msg) + rv = ipmi_request_supply_msgs(watchdog_user, + (struct ipmi_addr *) &addr, + 0, + &msg, + NULL, + smi_msg, + recv_msg, + 1); + else + ipmi_panic_request_and_wait(watchdog_user, + (struct ipmi_addr *) &addr, &msg); if (rv) pr_warn("set timeout error: %d\n", rv); else if (send_heartbeat_now) @@ -431,9 +435,7 @@ static int _ipmi_set_timeout(int do_heartbeat) atomic_set(&msg_tofree, 2); - rv = __ipmi_set_timeout(&smi_msg, - &recv_msg, - &send_heartbeat_now); + rv = __ipmi_set_timeout(&smi_msg, &recv_msg, &send_heartbeat_now); if (rv) { atomic_set(&msg_tofree, 0); return rv; @@ -460,27 +462,10 @@ static int ipmi_set_timeout(int do_heartbeat) return rv; } -static atomic_t panic_done_count = ATOMIC_INIT(0); - -static void panic_smi_free(struct ipmi_smi_msg *msg) -{ - atomic_dec(&panic_done_count); -} -static void panic_recv_free(struct ipmi_recv_msg *msg) -{ - atomic_dec(&panic_done_count); -} - -static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg = - INIT_IPMI_SMI_MSG(panic_smi_free); -static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg = - INIT_IPMI_RECV_MSG(panic_recv_free); - static void panic_halt_ipmi_heartbeat(void) { struct kernel_ipmi_msg msg; struct ipmi_system_interface_addr addr; - int rv; /* * Don't reset the timer if we have the timer turned off, that @@ -497,24 +482,10 @@ static void panic_halt_ipmi_heartbeat(void) msg.cmd = IPMI_WDOG_RESET_TIMER; msg.data = NULL; msg.data_len = 0; - atomic_add(2, &panic_done_count); - rv = ipmi_request_supply_msgs(watchdog_user, - (struct ipmi_addr *) &addr, - 0, - &msg, - NULL, - &panic_halt_heartbeat_smi_msg, - &panic_halt_heartbeat_recv_msg, - 1); - if (rv) - atomic_sub(2, &panic_done_count); + ipmi_panic_request_and_wait(watchdog_user, (struct ipmi_addr *) &addr, + &msg); } -static struct ipmi_smi_msg panic_halt_smi_msg = - INIT_IPMI_SMI_MSG(panic_smi_free); -static struct ipmi_recv_msg panic_halt_recv_msg = - INIT_IPMI_RECV_MSG(panic_recv_free); - /* * Special call, doesn't claim any locks. This is only to be called * at panic or halt time, in run-to-completion mode, when the caller @@ -526,22 +497,13 @@ static void panic_halt_ipmi_set_timeout(void) int send_heartbeat_now; int rv; - /* Wait for the messages to be free. */ - while (atomic_read(&panic_done_count) != 0) - ipmi_poll_interface(watchdog_user); - atomic_add(2, &panic_done_count); - rv = __ipmi_set_timeout(&panic_halt_smi_msg, - &panic_halt_recv_msg, - &send_heartbeat_now); + rv = __ipmi_set_timeout(NULL, NULL, &send_heartbeat_now); if (rv) { - atomic_sub(2, &panic_done_count); pr_warn("Unable to extend the watchdog timeout\n"); } else { if (send_heartbeat_now) panic_halt_ipmi_heartbeat(); } - while (atomic_read(&panic_done_count) != 0) - ipmi_poll_interface(watchdog_user); } static int __ipmi_heartbeat(void) @@ -793,7 +755,7 @@ static ssize_t ipmi_read(struct file *file, * Reading returns if the pretimeout has gone off, and it only does * it once per pretimeout. */ - spin_lock_irq(&ipmi_read_lock); + mutex_lock(&ipmi_read_mutex); if (!data_to_read) { if (file->f_flags & O_NONBLOCK) { rv = -EAGAIN; @@ -804,9 +766,9 @@ static ssize_t ipmi_read(struct file *file, add_wait_queue(&read_q, &wait); while (!data_to_read && !signal_pending(current)) { set_current_state(TASK_INTERRUPTIBLE); - spin_unlock_irq(&ipmi_read_lock); + mutex_unlock(&ipmi_read_mutex); schedule(); - spin_lock_irq(&ipmi_read_lock); + mutex_lock(&ipmi_read_mutex); } remove_wait_queue(&read_q, &wait); @@ -818,7 +780,7 @@ static ssize_t ipmi_read(struct file *file, data_to_read = 0; out: - spin_unlock_irq(&ipmi_read_lock); + mutex_unlock(&ipmi_read_mutex); if (rv == 0) { if (copy_to_user(buf, &data_to_read, 1)) @@ -856,10 +818,10 @@ static __poll_t ipmi_poll(struct file *file, poll_table *wait) poll_wait(file, &read_q, wait); - spin_lock_irq(&ipmi_read_lock); + mutex_lock(&ipmi_read_mutex); if (data_to_read) mask |= (EPOLLIN | EPOLLRDNORM); - spin_unlock_irq(&ipmi_read_lock); + mutex_unlock(&ipmi_read_mutex); return mask; } @@ -903,7 +865,6 @@ static const struct file_operations ipmi_wdog_fops = { .open = ipmi_open, .release = ipmi_close, .fasync = ipmi_fasync, - .llseek = no_llseek, }; static struct miscdevice ipmi_wdog_miscdev = { @@ -933,13 +894,11 @@ static void ipmi_wdog_pretimeout_handler(void *handler_data) if (atomic_inc_and_test(&preop_panic_excl)) panic("Watchdog pre-timeout"); } else if (preop_val == WDOG_PREOP_GIVE_DATA) { - unsigned long flags; - - spin_lock_irqsave(&ipmi_read_lock, flags); + mutex_lock(&ipmi_read_mutex); data_to_read = 1; wake_up_interruptible(&read_q); kill_fasync(&fasync_q, SIGIO, POLL_IN); - spin_unlock_irqrestore(&ipmi_read_lock, flags); + mutex_unlock(&ipmi_read_mutex); } } @@ -1065,7 +1024,6 @@ static void ipmi_register_watchdog(int ipmi_intf) static void ipmi_unregister_watchdog(int ipmi_intf) { - int rv; struct ipmi_user *loc_user = watchdog_user; if (!loc_user) @@ -1090,9 +1048,7 @@ static void ipmi_unregister_watchdog(int ipmi_intf) mutex_lock(&ipmi_watchdog_mutex); /* Disconnect from IPMI. */ - rv = ipmi_destroy_user(loc_user); - if (rv) - pr_warn("error unlinking from IPMI: %d\n", rv); + ipmi_destroy_user(loc_user); /* If it comes back, restart it properly. */ ipmi_start_timer_on_heartbeat = 1; @@ -1190,14 +1146,8 @@ static struct ipmi_smi_watcher smi_watcher = { .smi_gone = ipmi_smi_gone }; -static int action_op(const char *inval, char *outval) +static int action_op_set_val(const char *inval) { - if (outval) - strcpy(outval, action); - - if (!inval) - return 0; - if (strcmp(inval, "reset") == 0) action_val = WDOG_TIMEOUT_RESET; else if (strcmp(inval, "none") == 0) @@ -1208,18 +1158,26 @@ static int action_op(const char *inval, char *outval) action_val = WDOG_TIMEOUT_POWER_DOWN; else return -EINVAL; - strcpy(action, inval); return 0; } -static int preaction_op(const char *inval, char *outval) +static int action_op(const char *inval, char *outval) { + int rv; + if (outval) - strcpy(outval, preaction); + strcpy(outval, action); if (!inval) return 0; + rv = action_op_set_val(inval); + if (!rv) + strcpy(action, inval); + return rv; +} +static int preaction_op_set_val(const char *inval) +{ if (strcmp(inval, "pre_none") == 0) preaction_val = WDOG_PRETIMEOUT_NONE; else if (strcmp(inval, "pre_smi") == 0) @@ -1232,18 +1190,26 @@ static int preaction_op(const char *inval, char *outval) preaction_val = WDOG_PRETIMEOUT_MSG_INT; else return -EINVAL; - strcpy(preaction, inval); return 0; } -static int preop_op(const char *inval, char *outval) +static int preaction_op(const char *inval, char *outval) { + int rv; + if (outval) - strcpy(outval, preop); + strcpy(outval, preaction); if (!inval) return 0; + rv = preaction_op_set_val(inval); + if (!rv) + strcpy(preaction, inval); + return 0; +} +static int preop_op_set_val(const char *inval) +{ if (strcmp(inval, "preop_none") == 0) preop_val = WDOG_PREOP_NONE; else if (strcmp(inval, "preop_panic") == 0) @@ -1252,7 +1218,22 @@ static int preop_op(const char *inval, char *outval) preop_val = WDOG_PREOP_GIVE_DATA; else return -EINVAL; - strcpy(preop, inval); + return 0; +} + +static int preop_op(const char *inval, char *outval) +{ + int rv; + + if (outval) + strcpy(outval, preop); + + if (!inval) + return 0; + + rv = preop_op_set_val(inval); + if (!rv) + strcpy(preop, inval); return 0; } @@ -1289,18 +1270,18 @@ static int __init ipmi_wdog_init(void) { int rv; - if (action_op(action, NULL)) { + if (action_op_set_val(action)) { action_op("reset", NULL); pr_info("Unknown action '%s', defaulting to reset\n", action); } - if (preaction_op(preaction, NULL)) { + if (preaction_op_set_val(preaction)) { preaction_op("pre_none", NULL); pr_info("Unknown preaction '%s', defaulting to none\n", preaction); } - if (preop_op(preop, NULL)) { + if (preop_op_set_val(preop)) { preop_op("preop_none", NULL); pr_info("Unknown preop '%s', defaulting to none\n", preop); } diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c index 72640da55380..a13a3470c17a 100644 --- a/drivers/char/ipmi/kcs_bmc_aspeed.c +++ b/drivers/char/ipmi/kcs_bmc_aspeed.c @@ -428,7 +428,7 @@ static void aspeed_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, if (rc == -ETIMEDOUT) mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD); } else { - del_timer(&priv->obe.timer); + timer_delete(&priv->obe.timer); } } @@ -641,7 +641,7 @@ static int aspeed_kcs_probe(struct platform_device *pdev) return 0; } -static int aspeed_kcs_remove(struct platform_device *pdev) +static void aspeed_kcs_remove(struct platform_device *pdev) { struct aspeed_kcs_bmc *priv = platform_get_drvdata(pdev); struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc; @@ -655,9 +655,7 @@ static int aspeed_kcs_remove(struct platform_device *pdev) spin_lock_irq(&priv->obe.lock); priv->obe.remove = true; spin_unlock_irq(&priv->obe.lock); - del_timer_sync(&priv->obe.timer); - - return 0; + timer_delete_sync(&priv->obe.timer); } static const struct of_device_id ast_kcs_bmc_match[] = { diff --git a/drivers/char/ipmi/kcs_bmc_npcm7xx.c b/drivers/char/ipmi/kcs_bmc_npcm7xx.c index 7961fec56476..4808a61bf273 100644 --- a/drivers/char/ipmi/kcs_bmc_npcm7xx.c +++ b/drivers/char/ipmi/kcs_bmc_npcm7xx.c @@ -218,7 +218,7 @@ static int npcm7xx_kcs_probe(struct platform_device *pdev) return 0; } -static int npcm7xx_kcs_remove(struct platform_device *pdev) +static void npcm7xx_kcs_remove(struct platform_device *pdev) { struct npcm7xx_kcs_bmc *priv = platform_get_drvdata(pdev); struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc; @@ -227,8 +227,6 @@ static int npcm7xx_kcs_remove(struct platform_device *pdev) npcm7xx_kcs_enable_channel(kcs_bmc, false); npcm7xx_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0); - - return 0; } static const struct of_device_id npcm_kcs_bmc_match[] = { @@ -243,7 +241,7 @@ static struct platform_driver npcm_kcs_bmc_driver = { .of_match_table = npcm_kcs_bmc_match, }, .probe = npcm7xx_kcs_probe, - .remove = npcm7xx_kcs_remove, + .remove = npcm7xx_kcs_remove, }; module_platform_driver(npcm_kcs_bmc_driver); diff --git a/drivers/char/ipmi/ssif_bmc.c b/drivers/char/ipmi/ssif_bmc.c index 56346fb32872..7a52e3ea49ed 100644 --- a/drivers/char/ipmi/ssif_bmc.c +++ b/drivers/char/ipmi/ssif_bmc.c @@ -177,13 +177,15 @@ static ssize_t ssif_bmc_write(struct file *file, const char __user *buf, size_t unsigned long flags; ssize_t ret; - if (count > sizeof(struct ipmi_ssif_msg)) + if (count < sizeof(msg.len) || + count > sizeof(struct ipmi_ssif_msg)) return -EINVAL; if (copy_from_user(&msg, buf, count)) return -EFAULT; - if (!msg.len || count < sizeof_field(struct ipmi_ssif_msg, len) + msg.len) + if (!msg.len || msg.len > IPMI_SSIF_PAYLOAD_MAX || + count < sizeof_field(struct ipmi_ssif_msg, len) + msg.len) return -EINVAL; spin_lock_irqsave(&ssif_bmc->lock, flags); @@ -207,7 +209,7 @@ static ssize_t ssif_bmc_write(struct file *file, const char __user *buf, size_t if (ret) goto exit; - del_timer(&ssif_bmc->response_timer); + timer_delete(&ssif_bmc->response_timer); ssif_bmc->response_timer_inited = false; memcpy(&ssif_bmc->response, &msg, count); @@ -290,13 +292,13 @@ static void complete_response(struct ssif_bmc_ctx *ssif_bmc) ssif_bmc->nbytes_processed = 0; ssif_bmc->remain_len = 0; ssif_bmc->busy = false; - memset(&ssif_bmc->part_buf, 0, sizeof(struct ssif_part_buffer)); wake_up_all(&ssif_bmc->wait_queue); } static void response_timeout(struct timer_list *t) { - struct ssif_bmc_ctx *ssif_bmc = from_timer(ssif_bmc, t, response_timer); + struct ssif_bmc_ctx *ssif_bmc = timer_container_of(ssif_bmc, t, + response_timer); unsigned long flags; spin_lock_irqsave(&ssif_bmc->lock, flags); @@ -742,9 +744,11 @@ static void on_stop_event(struct ssif_bmc_ctx *ssif_bmc, u8 *val) ssif_bmc->aborting = true; } } else if (ssif_bmc->state == SSIF_RES_SENDING) { - if (ssif_bmc->is_singlepart_read || ssif_bmc->block_num == 0xFF) + if (ssif_bmc->is_singlepart_read || ssif_bmc->block_num == 0xFF) { + memset(&ssif_bmc->part_buf, 0, sizeof(struct ssif_part_buffer)); /* Invalidate response buffer to denote it is sent */ complete_response(ssif_bmc); + } ssif_bmc->state = SSIF_READY; } @@ -850,8 +854,8 @@ static const struct of_device_id ssif_bmc_match[] = { MODULE_DEVICE_TABLE(of, ssif_bmc_match); static const struct i2c_device_id ssif_bmc_id[] = { - { DEVICE_NAME, 0 }, - { }, + { DEVICE_NAME }, + { } }; MODULE_DEVICE_TABLE(i2c, ssif_bmc_id); diff --git a/drivers/char/lp.c b/drivers/char/lp.c index 2f171d14b9b5..24417a00dfe9 100644 --- a/drivers/char/lp.c +++ b/drivers/char/lp.c @@ -1016,7 +1016,6 @@ static struct parport_driver lp_driver = { .name = "lp", .match_port = lp_attach, .detach = lp_detach, - .devmodel = true, }; static int __init lp_init(void) @@ -1123,4 +1122,5 @@ module_init(lp_init_module); module_exit(lp_cleanup_module); MODULE_ALIAS_CHARDEV_MAJOR(LP_MAJOR); +MODULE_DESCRIPTION("Generic parallel printer driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 1052b0f2d4cf..52039fae1594 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -31,10 +31,6 @@ #include <linux/uaccess.h> #include <linux/security.h> -#ifdef CONFIG_IA64 -# include <linux/efi.h> -#endif - #define DEVMEM_MINOR 1 #define DEVPORT_MINOR 4 @@ -65,29 +61,11 @@ static inline int page_is_allowed(unsigned long pfn) { return devmem_is_allowed(pfn); } -static inline int range_is_allowed(unsigned long pfn, unsigned long size) -{ - u64 from = ((u64)pfn) << PAGE_SHIFT; - u64 to = from + size; - u64 cursor = from; - - while (cursor < to) { - if (!devmem_is_allowed(pfn)) - return 0; - cursor += PAGE_SIZE; - pfn++; - } - return 1; -} #else static inline int page_is_allowed(unsigned long pfn) { return 1; } -static inline int range_is_allowed(unsigned long pfn, unsigned long size) -{ - return 1; -} #endif static inline bool should_stop_iteration(void) @@ -277,13 +255,6 @@ int __weak phys_mem_access_prot_allowed(struct file *file, #ifdef pgprot_noncached static int uncached_access(struct file *file, phys_addr_t addr) { -#if defined(CONFIG_IA64) - /* - * On ia64, we ignore O_DSYNC because we cannot tolerate memory - * attribute aliases. - */ - return !(efi_mem_attributes(addr) & EFI_MEMORY_WB); -#else /* * Accessing memory above the top the kernel knows about or through a * file pointer @@ -292,7 +263,6 @@ static int uncached_access(struct file *file, phys_addr_t addr) if (file->f_flags & O_DSYNC) return 1; return addr >= __pa(high_memory); -#endif } #endif @@ -334,13 +304,13 @@ static unsigned zero_mmap_capabilities(struct file *file) } /* can't do an in-place private mapping if there's no MMU */ -static inline int private_mapping_ok(struct vm_area_struct *vma) +static inline int private_mapping_ok(struct vm_area_desc *desc) { - return is_nommu_shared_mapping(vma->vm_flags); + return is_nommu_shared_mapping(desc->vm_flags); } #else -static inline int private_mapping_ok(struct vm_area_struct *vma) +static inline int private_mapping_ok(struct vm_area_desc *desc) { return 1; } @@ -352,49 +322,53 @@ static const struct vm_operations_struct mmap_mem_ops = { #endif }; -static int mmap_mem(struct file *file, struct vm_area_struct *vma) +static int mmap_filter_error(int err) +{ + return -EAGAIN; +} + +static int mmap_mem_prepare(struct vm_area_desc *desc) { - size_t size = vma->vm_end - vma->vm_start; - phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; + struct file *file = desc->file; + const size_t size = vma_desc_size(desc); + const phys_addr_t offset = (phys_addr_t)desc->pgoff << PAGE_SHIFT; /* Does it even fit in phys_addr_t? */ - if (offset >> PAGE_SHIFT != vma->vm_pgoff) + if (offset >> PAGE_SHIFT != desc->pgoff) return -EINVAL; /* It's illegal to wrap around the end of the physical address space. */ if (offset + (phys_addr_t)size - 1 < offset) return -EINVAL; - if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) + if (!valid_mmap_phys_addr_range(desc->pgoff, size)) return -EINVAL; - if (!private_mapping_ok(vma)) + if (!private_mapping_ok(desc)) return -ENOSYS; - if (!range_is_allowed(vma->vm_pgoff, size)) + if (!range_is_allowed(desc->pgoff, size)) return -EPERM; - if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size, - &vma->vm_page_prot)) + if (!phys_mem_access_prot_allowed(file, desc->pgoff, size, + &desc->page_prot)) return -EINVAL; - vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, - size, - vma->vm_page_prot); + desc->page_prot = phys_mem_access_prot(file, desc->pgoff, + size, + desc->page_prot); - vma->vm_ops = &mmap_mem_ops; + desc->vm_ops = &mmap_mem_ops; + + /* Remap-pfn-range will mark the range VM_IO. */ + mmap_action_remap_full(desc, desc->pgoff); + /* We filter remap errors to -EAGAIN. */ + desc->action.error_hook = mmap_filter_error; - /* Remap-pfn-range will mark the range VM_IO */ - if (remap_pfn_range(vma, - vma->vm_start, - vma->vm_pgoff, - size, - vma->vm_page_prot)) { - return -EAGAIN; - } return 0; } +#ifdef CONFIG_DEVPORT static ssize_t read_port(struct file *file, char __user *buf, size_t count, loff_t *ppos) { @@ -436,6 +410,7 @@ static ssize_t write_port(struct file *file, const char __user *buf, *ppos = i; return tmp-buf; } +#endif static ssize_t read_null(struct file *file, char __user *buf, size_t count, loff_t *ppos) @@ -529,38 +504,64 @@ static ssize_t read_zero(struct file *file, char __user *buf, return cleared; } -static int mmap_zero(struct file *file, struct vm_area_struct *vma) +static int mmap_zero_private_success(const struct vm_area_struct *vma) +{ + /* + * This is a highly unique situation where we mark a MAP_PRIVATE mapping + * of /dev/zero anonymous, despite it not being. + */ + vma_set_anonymous((struct vm_area_struct *)vma); + + return 0; +} + +static int mmap_zero_prepare(struct vm_area_desc *desc) { #ifndef CONFIG_MMU return -ENOSYS; #endif - if (vma->vm_flags & VM_SHARED) - return shmem_zero_setup(vma); - vma_set_anonymous(vma); + if (desc->vm_flags & VM_SHARED) + return shmem_zero_setup_desc(desc); + + desc->action.success_hook = mmap_zero_private_success; return 0; } +#ifndef CONFIG_MMU +static unsigned long get_unmapped_area_zero(struct file *file, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + return -ENOSYS; +} +#else static unsigned long get_unmapped_area_zero(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { -#ifdef CONFIG_MMU if (flags & MAP_SHARED) { /* - * mmap_zero() will call shmem_zero_setup() to create a file, - * so use shmem's get_unmapped_area in case it can be huge; - * and pass NULL for file as in mmap.c's get_unmapped_area(), - * so as not to confuse shmem with our handle on "/dev/zero". + * mmap_zero_prepare() will call shmem_zero_setup() to create a + * file, so use shmem's get_unmapped_area in case it can be + * huge; and pass NULL for file as in mmap.c's + * get_unmapped_area(), so as not to confuse shmem with our + * handle on "/dev/zero". */ return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags); } - /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */ - return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); + /* + * Otherwise flags & MAP_PRIVATE: with no shmem object beneath it, + * attempt to map aligned to huge page size if possible, otherwise we + * fall back to system page size mappings. + */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + return thp_get_unmapped_area(file, addr, len, pgoff, flags); #else - return -ENOSYS; + return mm_get_unmapped_area(file, addr, len, pgoff, flags); #endif } +#endif /* CONFIG_MMU */ static ssize_t write_full(struct file *file, const char __user *buf, size_t count, loff_t *ppos) @@ -640,18 +641,20 @@ static int open_port(struct inode *inode, struct file *filp) #define full_lseek null_lseek #define write_zero write_null #define write_iter_zero write_iter_null +#define splice_write_zero splice_write_null #define open_mem open_port static const struct file_operations __maybe_unused mem_fops = { .llseek = memory_lseek, .read = read_mem, .write = write_mem, - .mmap = mmap_mem, + .mmap_prepare = mmap_mem_prepare, .open = open_mem, #ifndef CONFIG_MMU .get_unmapped_area = get_unmapped_area_mem, .mmap_capabilities = memory_mmap_capabilities, #endif + .fop_flags = FOP_UNSIGNED_OFFSET, }; static const struct file_operations null_fops = { @@ -664,12 +667,14 @@ static const struct file_operations null_fops = { .uring_cmd = uring_cmd_null, }; -static const struct file_operations __maybe_unused port_fops = { +#ifdef CONFIG_DEVPORT +static const struct file_operations port_fops = { .llseek = memory_lseek, .read = read_port, .write = write_port, .open = open_port, }; +#endif static const struct file_operations zero_fops = { .llseek = zero_lseek, @@ -677,7 +682,9 @@ static const struct file_operations zero_fops = { .read_iter = read_iter_zero, .read = read_zero, .write_iter = write_iter_zero, - .mmap = mmap_zero, + .splice_read = copy_splice_read, + .splice_write = splice_write_zero, + .mmap_prepare = mmap_zero_prepare, .get_unmapped_area = get_unmapped_area_zero, #ifndef CONFIG_MMU .mmap_capabilities = zero_mmap_capabilities, @@ -688,6 +695,7 @@ static const struct file_operations full_fops = { .llseek = full_lseek, .read_iter = read_iter_zero, .write = write_full, + .splice_read = copy_splice_read, }; static const struct memdev { @@ -697,7 +705,7 @@ static const struct memdev { umode_t mode; } devlist[] = { #ifdef CONFIG_DEVMEM - [DEVMEM_MINOR] = { "mem", &mem_fops, FMODE_UNSIGNED_OFFSET, 0 }, + [DEVMEM_MINOR] = { "mem", &mem_fops, 0, 0 }, #endif [3] = { "null", &null_fops, FMODE_NOWAIT, 0666 }, #ifdef CONFIG_DEVPORT diff --git a/drivers/char/misc.c b/drivers/char/misc.c index 541edc26ec89..726516fb0a3b 100644 --- a/drivers/char/misc.c +++ b/drivers/char/misc.c @@ -58,31 +58,27 @@ static LIST_HEAD(misc_list); static DEFINE_MUTEX(misc_mtx); /* - * Assigned numbers, used for dynamic minors + * Assigned numbers. */ -#define DYNAMIC_MINORS 128 /* like dynamic majors */ static DEFINE_IDA(misc_minors_ida); -static int misc_minor_alloc(void) +static int misc_minor_alloc(int minor) { - int ret; + int ret = 0; - ret = ida_alloc_max(&misc_minors_ida, DYNAMIC_MINORS - 1, GFP_KERNEL); - if (ret >= 0) { - ret = DYNAMIC_MINORS - ret - 1; - } else { + if (minor == MISC_DYNAMIC_MINOR) { + /* allocate free id */ ret = ida_alloc_range(&misc_minors_ida, MISC_DYNAMIC_MINOR + 1, MINORMASK, GFP_KERNEL); + } else { + ret = ida_alloc_range(&misc_minors_ida, minor, minor, GFP_KERNEL); } return ret; } static void misc_minor_free(int minor) { - if (minor < DYNAMIC_MINORS) - ida_free(&misc_minors_ida, DYNAMIC_MINORS - minor - 1); - else if (minor > MISC_DYNAMIC_MINOR) - ida_free(&misc_minors_ida, minor); + ida_free(&misc_minors_ida, minor); } #ifdef CONFIG_PROC_FS @@ -136,7 +132,8 @@ static int misc_open(struct inode *inode, struct file *file) break; } - if (!new_fops) { + /* Only request module for fixed minor code */ + if (!new_fops && minor < MISC_DYNAMIC_MINOR) { mutex_unlock(&misc_mtx); request_module("char-major-%d-%d", MISC_MAJOR, minor); mutex_lock(&misc_mtx); @@ -148,10 +145,11 @@ static int misc_open(struct inode *inode, struct file *file) new_fops = fops_get(iter->fops); break; } - if (!new_fops) - goto fail; } + if (!new_fops) + goto fail; + /* * Place the miscdevice in the file's * private_data so it can be used by the @@ -214,12 +212,18 @@ int misc_register(struct miscdevice *misc) int err = 0; bool is_dynamic = (misc->minor == MISC_DYNAMIC_MINOR); + if (misc->minor > MISC_DYNAMIC_MINOR) { + pr_err("Invalid fixed minor %d for miscdevice '%s'\n", + misc->minor, misc->name); + return -EINVAL; + } + INIT_LIST_HEAD(&misc->list); mutex_lock(&misc_mtx); if (is_dynamic) { - int i = misc_minor_alloc(); + int i = misc_minor_alloc(misc->minor); if (i < 0) { err = -EBUSY; @@ -228,6 +232,7 @@ int misc_register(struct miscdevice *misc) misc->minor = i; } else { struct miscdevice *c; + int i; list_for_each_entry(c, &misc_list, list) { if (c->minor == misc->minor) { @@ -235,6 +240,12 @@ int misc_register(struct miscdevice *misc) goto out; } } + + i = misc_minor_alloc(misc->minor); + if (i < 0) { + err = -EBUSY; + goto out; + } } dev = MKDEV(MISC_MAJOR, misc->minor); @@ -243,8 +254,8 @@ int misc_register(struct miscdevice *misc) device_create_with_groups(&misc_class, misc->parent, dev, misc, misc->groups, "%s", misc->name); if (IS_ERR(misc->this_device)) { + misc_minor_free(misc->minor); if (is_dynamic) { - misc_minor_free(misc->minor); misc->minor = MISC_DYNAMIC_MINOR; } err = PTR_ERR(misc->this_device); @@ -272,13 +283,12 @@ EXPORT_SYMBOL(misc_register); void misc_deregister(struct miscdevice *misc) { - if (WARN_ON(list_empty(&misc->list))) - return; - mutex_lock(&misc_mtx); - list_del(&misc->list); + list_del_init(&misc->list); device_destroy(&misc_class, MKDEV(MISC_MAJOR, misc->minor)); misc_minor_free(misc->minor); + if (misc->minor > MISC_DYNAMIC_MINOR) + misc->minor = MISC_DYNAMIC_MINOR; mutex_unlock(&misc_mtx); } EXPORT_SYMBOL(misc_deregister); @@ -286,15 +296,15 @@ EXPORT_SYMBOL(misc_deregister); static int __init misc_init(void) { int err; - struct proc_dir_entry *ret; + struct proc_dir_entry *misc_proc_file; - ret = proc_create_seq("misc", 0, NULL, &misc_seq_ops); + misc_proc_file = proc_create_seq("misc", 0, NULL, &misc_seq_ops); err = class_register(&misc_class); if (err) goto fail_remove; - err = -EIO; - if (register_chrdev(MISC_MAJOR, "misc", &misc_fops)) + err = __register_chrdev(MISC_MAJOR, 0, MINORMASK + 1, "misc", &misc_fops); + if (err < 0) goto fail_printk; return 0; @@ -302,7 +312,7 @@ fail_printk: pr_err("unable to get major %d for misc devices\n", MISC_MAJOR); class_unregister(&misc_class); fail_remove: - if (ret) + if (misc_proc_file) remove_proc_entry("misc", NULL); return err; } diff --git a/drivers/char/misc_minor_kunit.c b/drivers/char/misc_minor_kunit.c new file mode 100644 index 000000000000..6fc8b05169c5 --- /dev/null +++ b/drivers/char/misc_minor_kunit.c @@ -0,0 +1,689 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <kunit/test.h> +#include <kunit/test-bug.h> +#include <linux/module.h> +#include <linux/miscdevice.h> +#include <linux/fs.h> +#include <linux/file.h> +#include <linux/init_syscalls.h> + +/* static minor (LCD_MINOR) */ +static struct miscdevice dev_static_minor = { + .minor = LCD_MINOR, + .name = "dev_static_minor", +}; + +/* misc dynamic minor */ +static struct miscdevice dev_misc_dynamic_minor = { + .minor = MISC_DYNAMIC_MINOR, + .name = "dev_misc_dynamic_minor", +}; + +static void kunit_static_minor(struct kunit *test) +{ + int ret; + + ret = misc_register(&dev_static_minor); + KUNIT_EXPECT_EQ(test, 0, ret); + KUNIT_EXPECT_EQ(test, LCD_MINOR, dev_static_minor.minor); + misc_deregister(&dev_static_minor); +} + +static void kunit_misc_dynamic_minor(struct kunit *test) +{ + int ret; + + ret = misc_register(&dev_misc_dynamic_minor); + KUNIT_EXPECT_EQ(test, 0, ret); + misc_deregister(&dev_misc_dynamic_minor); +} + +struct miscdev_test_case { + const char *str; + int minor; +}; + +static struct miscdev_test_case miscdev_test_ranges[] = { + { + .str = "lower static range, top", + .minor = 15, + }, + { + .str = "upper static range, bottom", + .minor = 130, + }, + { + .str = "lower static range, bottom", + .minor = 0, + }, + { + .str = "upper static range, top", + .minor = MISC_DYNAMIC_MINOR - 1, + }, +}; + +KUNIT_ARRAY_PARAM_DESC(miscdev, miscdev_test_ranges, str); + +static int miscdev_find_minors(struct kunit_suite *suite) +{ + int ret; + struct miscdevice miscstat = { + .name = "miscstat", + }; + int i; + + for (i = 15; i >= 0; i--) { + miscstat.minor = i; + ret = misc_register(&miscstat); + if (ret == 0) + break; + } + + if (ret == 0) { + kunit_info(suite, "found misc device minor %d available\n", + miscstat.minor); + miscdev_test_ranges[0].minor = miscstat.minor; + misc_deregister(&miscstat); + } else { + return ret; + } + + for (i = 128; i < MISC_DYNAMIC_MINOR; i++) { + miscstat.minor = i; + ret = misc_register(&miscstat); + if (ret == 0) + break; + } + + if (ret == 0) { + kunit_info(suite, "found misc device minor %d available\n", + miscstat.minor); + miscdev_test_ranges[1].minor = miscstat.minor; + misc_deregister(&miscstat); + } else { + return ret; + } + + for (i = 0; i < miscdev_test_ranges[0].minor; i++) { + miscstat.minor = i; + ret = misc_register(&miscstat); + if (ret == 0) + break; + } + + if (ret == 0) { + kunit_info(suite, "found misc device minor %d available\n", + miscstat.minor); + miscdev_test_ranges[2].minor = miscstat.minor; + misc_deregister(&miscstat); + } else { + return ret; + } + + for (i = MISC_DYNAMIC_MINOR - 1; i > miscdev_test_ranges[1].minor; i--) { + miscstat.minor = i; + ret = misc_register(&miscstat); + if (ret == 0) + break; + } + + if (ret == 0) { + kunit_info(suite, "found misc device minor %d available\n", + miscstat.minor); + miscdev_test_ranges[3].minor = miscstat.minor; + misc_deregister(&miscstat); + } + + return ret; +} + +static bool is_valid_dynamic_minor(int minor) +{ + if (minor < 0) + return false; + return minor > MISC_DYNAMIC_MINOR; +} + +static int miscdev_test_open(struct inode *inode, struct file *file) +{ + return 0; +} + +static const struct file_operations miscdev_test_fops = { + .open = miscdev_test_open, +}; + +static void __init miscdev_test_can_open(struct kunit *test, struct miscdevice *misc) +{ + int ret; + struct file *filp; + char *devname; + + devname = kasprintf(GFP_KERNEL, "/dev/%s", misc->name); + ret = init_mknod(devname, S_IFCHR | 0600, + new_encode_dev(MKDEV(MISC_MAJOR, misc->minor))); + if (ret != 0) + KUNIT_FAIL(test, "failed to create node\n"); + + filp = filp_open(devname, O_RDONLY, 0); + if (IS_ERR_OR_NULL(filp)) + KUNIT_FAIL(test, "failed to open misc device: %ld\n", PTR_ERR(filp)); + else + fput(filp); + + init_unlink(devname); + kfree(devname); +} + +static void __init miscdev_test_static_basic(struct kunit *test) +{ + struct miscdevice misc_test = { + .name = "misc_test", + .fops = &miscdev_test_fops, + }; + int ret; + const struct miscdev_test_case *params = test->param_value; + + misc_test.minor = params->minor; + + ret = misc_register(&misc_test); + KUNIT_EXPECT_EQ(test, ret, 0); + KUNIT_EXPECT_EQ(test, misc_test.minor, params->minor); + + if (ret == 0) { + miscdev_test_can_open(test, &misc_test); + misc_deregister(&misc_test); + } +} + +static void __init miscdev_test_dynamic_basic(struct kunit *test) +{ + struct miscdevice misc_test = { + .minor = MISC_DYNAMIC_MINOR, + .name = "misc_test", + .fops = &miscdev_test_fops, + }; + int ret; + + ret = misc_register(&misc_test); + KUNIT_EXPECT_EQ(test, ret, 0); + KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(misc_test.minor)); + + if (ret == 0) { + miscdev_test_can_open(test, &misc_test); + misc_deregister(&misc_test); + } +} + +static void miscdev_test_twice(struct kunit *test) +{ + struct miscdevice misc_test = { + .name = "misc_test", + .fops = &miscdev_test_fops, + }; + int ret; + const struct miscdev_test_case *params = test->param_value; + + misc_test.minor = params->minor; + + ret = misc_register(&misc_test); + KUNIT_EXPECT_EQ(test, ret, 0); + KUNIT_EXPECT_EQ(test, misc_test.minor, params->minor); + if (ret == 0) + misc_deregister(&misc_test); + + ret = misc_register(&misc_test); + KUNIT_EXPECT_EQ(test, ret, 0); + KUNIT_EXPECT_EQ(test, misc_test.minor, params->minor); + if (ret == 0) + misc_deregister(&misc_test); +} + +static void miscdev_test_duplicate_minor(struct kunit *test) +{ + struct miscdevice misc1 = { + .name = "misc1", + .fops = &miscdev_test_fops, + }; + struct miscdevice misc2 = { + .name = "misc2", + .fops = &miscdev_test_fops, + }; + int ret; + const struct miscdev_test_case *params = test->param_value; + + misc1.minor = params->minor; + misc2.minor = params->minor; + + ret = misc_register(&misc1); + KUNIT_EXPECT_EQ(test, ret, 0); + KUNIT_EXPECT_EQ(test, misc1.minor, params->minor); + + ret = misc_register(&misc2); + KUNIT_EXPECT_EQ(test, ret, -EBUSY); + if (ret == 0) + misc_deregister(&misc2); + + misc_deregister(&misc1); +} + +static void miscdev_test_duplicate_name(struct kunit *test) +{ + struct miscdevice misc1 = { + .minor = MISC_DYNAMIC_MINOR, + .name = "misc1", + .fops = &miscdev_test_fops, + }; + struct miscdevice misc2 = { + .minor = MISC_DYNAMIC_MINOR, + .name = "misc1", + .fops = &miscdev_test_fops, + }; + int ret; + + ret = misc_register(&misc1); + KUNIT_EXPECT_EQ(test, ret, 0); + KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(misc1.minor)); + + ret = misc_register(&misc2); + KUNIT_EXPECT_EQ(test, ret, -EEXIST); + if (ret == 0) + misc_deregister(&misc2); + + misc_deregister(&misc1); +} + +/* + * Test that after a duplicate name failure, the reserved minor number is + * freed to be allocated next. + */ +static void miscdev_test_duplicate_name_leak(struct kunit *test) +{ + struct miscdevice misc1 = { + .minor = MISC_DYNAMIC_MINOR, + .name = "misc1", + .fops = &miscdev_test_fops, + }; + struct miscdevice misc2 = { + .minor = MISC_DYNAMIC_MINOR, + .name = "misc1", + .fops = &miscdev_test_fops, + }; + struct miscdevice misc3 = { + .minor = MISC_DYNAMIC_MINOR, + .name = "misc3", + .fops = &miscdev_test_fops, + }; + int ret; + int dyn_minor; + + ret = misc_register(&misc1); + KUNIT_EXPECT_EQ(test, ret, 0); + KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(misc1.minor)); + + /* + * Find out what is the next minor number available. + */ + ret = misc_register(&misc3); + KUNIT_EXPECT_EQ(test, ret, 0); + KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(misc3.minor)); + dyn_minor = misc3.minor; + misc_deregister(&misc3); + misc3.minor = MISC_DYNAMIC_MINOR; + + ret = misc_register(&misc2); + KUNIT_EXPECT_EQ(test, ret, -EEXIST); + if (ret == 0) + misc_deregister(&misc2); + + /* + * Now check that we can still get the same minor we found before. + */ + ret = misc_register(&misc3); + KUNIT_EXPECT_EQ(test, ret, 0); + KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(misc3.minor)); + KUNIT_EXPECT_EQ(test, misc3.minor, dyn_minor); + misc_deregister(&misc3); + + misc_deregister(&misc1); +} + +/* + * Try to register a static minor with a duplicate name. That might not + * deallocate the minor, preventing it from being used again. + */ +static void miscdev_test_duplicate_error(struct kunit *test) +{ + struct miscdevice miscdyn = { + .minor = MISC_DYNAMIC_MINOR, + .name = "name1", + .fops = &miscdev_test_fops, + }; + struct miscdevice miscstat = { + .name = "name1", + .fops = &miscdev_test_fops, + }; + struct miscdevice miscnew = { + .name = "name2", + .fops = &miscdev_test_fops, + }; + int ret; + const struct miscdev_test_case *params = test->param_value; + + miscstat.minor = params->minor; + miscnew.minor = params->minor; + + ret = misc_register(&miscdyn); + KUNIT_EXPECT_EQ(test, ret, 0); + KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn.minor)); + + ret = misc_register(&miscstat); + KUNIT_EXPECT_EQ(test, ret, -EEXIST); + if (ret == 0) + misc_deregister(&miscstat); + + ret = misc_register(&miscnew); + KUNIT_EXPECT_EQ(test, ret, 0); + KUNIT_EXPECT_EQ(test, miscnew.minor, params->minor); + if (ret == 0) + misc_deregister(&miscnew); + + misc_deregister(&miscdyn); +} + +static void __init miscdev_test_dynamic_only_range(struct kunit *test) +{ + int ret; + struct miscdevice *miscdev; + const int dynamic_minors = 256; + int i; + + miscdev = kunit_kmalloc_array(test, dynamic_minors, + sizeof(struct miscdevice), + GFP_KERNEL | __GFP_ZERO); + + for (i = 0; i < dynamic_minors; i++) { + miscdev[i].minor = MISC_DYNAMIC_MINOR; + miscdev[i].name = kasprintf(GFP_KERNEL, "misc_test%d", i); + miscdev[i].fops = &miscdev_test_fops; + ret = misc_register(&miscdev[i]); + if (ret != 0) + break; + /* + * This is the bug we are looking for! + * We asked for a dynamic minor and got a minor in the static range space. + */ + if (miscdev[i].minor >= 0 && miscdev[i].minor <= 15) { + KUNIT_FAIL(test, "misc_register allocated minor %d\n", miscdev[i].minor); + i++; + break; + } + KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdev[i].minor)); + } + + for (i--; i >= 0; i--) { + miscdev_test_can_open(test, &miscdev[i]); + misc_deregister(&miscdev[i]); + kfree_const(miscdev[i].name); + } + + KUNIT_EXPECT_EQ(test, ret, 0); +} + +static void __init miscdev_test_collision(struct kunit *test) +{ + int ret; + struct miscdevice *miscdev; + struct miscdevice miscstat = { + .name = "miscstat", + .fops = &miscdev_test_fops, + }; + const int dynamic_minors = 256; + int i; + + miscdev = kunit_kmalloc_array(test, dynamic_minors, + sizeof(struct miscdevice), + GFP_KERNEL | __GFP_ZERO); + + miscstat.minor = miscdev_test_ranges[0].minor; + ret = misc_register(&miscstat); + KUNIT_ASSERT_EQ(test, ret, 0); + KUNIT_EXPECT_EQ(test, miscstat.minor, miscdev_test_ranges[0].minor); + + for (i = 0; i < dynamic_minors; i++) { + miscdev[i].minor = MISC_DYNAMIC_MINOR; + miscdev[i].name = kasprintf(GFP_KERNEL, "misc_test%d", i); + miscdev[i].fops = &miscdev_test_fops; + ret = misc_register(&miscdev[i]); + if (ret != 0) + break; + KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdev[i].minor)); + } + + for (i--; i >= 0; i--) { + miscdev_test_can_open(test, &miscdev[i]); + misc_deregister(&miscdev[i]); + kfree_const(miscdev[i].name); + } + + misc_deregister(&miscstat); + + KUNIT_EXPECT_EQ(test, ret, 0); +} + +static void __init miscdev_test_collision_reverse(struct kunit *test) +{ + int ret; + struct miscdevice *miscdev; + struct miscdevice miscstat = { + .name = "miscstat", + .fops = &miscdev_test_fops, + }; + const int dynamic_minors = 256; + int i; + + miscdev = kunit_kmalloc_array(test, dynamic_minors, + sizeof(struct miscdevice), + GFP_KERNEL | __GFP_ZERO); + + for (i = 0; i < dynamic_minors; i++) { + miscdev[i].minor = MISC_DYNAMIC_MINOR; + miscdev[i].name = kasprintf(GFP_KERNEL, "misc_test%d", i); + miscdev[i].fops = &miscdev_test_fops; + ret = misc_register(&miscdev[i]); + if (ret != 0) + break; + KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdev[i].minor)); + } + + KUNIT_EXPECT_EQ(test, ret, 0); + + miscstat.minor = miscdev_test_ranges[0].minor; + ret = misc_register(&miscstat); + KUNIT_EXPECT_EQ(test, ret, 0); + KUNIT_EXPECT_EQ(test, miscstat.minor, miscdev_test_ranges[0].minor); + if (ret == 0) + misc_deregister(&miscstat); + + for (i--; i >= 0; i--) { + miscdev_test_can_open(test, &miscdev[i]); + misc_deregister(&miscdev[i]); + kfree_const(miscdev[i].name); + } +} + +static void __init miscdev_test_conflict(struct kunit *test) +{ + int ret; + struct miscdevice miscdyn = { + .name = "miscdyn", + .minor = MISC_DYNAMIC_MINOR, + .fops = &miscdev_test_fops, + }; + struct miscdevice miscstat = { + .name = "miscstat", + .fops = &miscdev_test_fops, + }; + + ret = misc_register(&miscdyn); + KUNIT_ASSERT_EQ(test, ret, 0); + KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn.minor)); + + /* + * Try to register a static minor with the same minor as the + * dynamic one. + */ + miscstat.minor = miscdyn.minor; + ret = misc_register(&miscstat); + KUNIT_EXPECT_EQ(test, ret, -EINVAL); + if (ret == 0) + misc_deregister(&miscstat); + + miscdev_test_can_open(test, &miscdyn); + + misc_deregister(&miscdyn); +} + +static void __init miscdev_test_conflict_reverse(struct kunit *test) +{ + int ret; + struct miscdevice miscdyn = { + .name = "miscdyn", + .minor = MISC_DYNAMIC_MINOR, + .fops = &miscdev_test_fops, + }; + struct miscdevice miscstat = { + .name = "miscstat", + .fops = &miscdev_test_fops, + }; + + /* + * Find the first available dynamic minor to use it as a static + * minor later on. + */ + ret = misc_register(&miscdyn); + KUNIT_ASSERT_EQ(test, ret, 0); + KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn.minor)); + miscstat.minor = miscdyn.minor; + misc_deregister(&miscdyn); + + ret = misc_register(&miscstat); + KUNIT_EXPECT_EQ(test, ret, -EINVAL); + if (ret == 0) + misc_deregister(&miscstat); + + /* + * Try to register a dynamic minor after registering a static minor + * within the dynamic range. It should work but get a different + * minor. + */ + miscdyn.minor = MISC_DYNAMIC_MINOR; + ret = misc_register(&miscdyn); + KUNIT_EXPECT_EQ(test, ret, 0); + KUNIT_EXPECT_EQ(test, miscdyn.minor, miscstat.minor); + KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn.minor)); + if (ret == 0) + misc_deregister(&miscdyn); +} + +/* Take minor(> MISC_DYNAMIC_MINOR) as invalid when register miscdevice */ +static void miscdev_test_invalid_input(struct kunit *test) +{ + struct miscdevice misc_test = { + .minor = MISC_DYNAMIC_MINOR + 1, + .name = "misc_test", + .fops = &miscdev_test_fops, + }; + int ret; + + ret = misc_register(&misc_test); + KUNIT_EXPECT_EQ(test, ret, -EINVAL); + if (ret == 0) + misc_deregister(&misc_test); +} + +/* + * Verify if @miscdyn_a can still be registered successfully without + * reinitialization even if its minor ever owned was requested by + * another miscdevice such as @miscdyn_b. + */ +static void miscdev_test_dynamic_reentry(struct kunit *test) +{ + struct miscdevice miscdyn_a = { + .name = "miscdyn_a", + .minor = MISC_DYNAMIC_MINOR, + .fops = &miscdev_test_fops, + }; + struct miscdevice miscdyn_b = { + .name = "miscdyn_b", + .minor = MISC_DYNAMIC_MINOR, + .fops = &miscdev_test_fops, + }; + int ret, minor_a; + + ret = misc_register(&miscdyn_a); + KUNIT_ASSERT_EQ(test, ret, 0); + KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn_a.minor)); + minor_a = miscdyn_a.minor; + if (ret != 0) + return; + misc_deregister(&miscdyn_a); + + ret = misc_register(&miscdyn_b); + KUNIT_ASSERT_EQ(test, ret, 0); + KUNIT_EXPECT_EQ(test, miscdyn_b.minor, minor_a); + if (ret != 0) + return; + + ret = misc_register(&miscdyn_a); + KUNIT_ASSERT_EQ(test, ret, 0); + KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn_a.minor)); + KUNIT_EXPECT_NE(test, miscdyn_a.minor, miscdyn_b.minor); + if (ret == 0) + misc_deregister(&miscdyn_a); + + misc_deregister(&miscdyn_b); +} + +static struct kunit_case test_cases[] = { + KUNIT_CASE(kunit_static_minor), + KUNIT_CASE(kunit_misc_dynamic_minor), + KUNIT_CASE(miscdev_test_invalid_input), + KUNIT_CASE_PARAM(miscdev_test_twice, miscdev_gen_params), + KUNIT_CASE_PARAM(miscdev_test_duplicate_minor, miscdev_gen_params), + KUNIT_CASE(miscdev_test_duplicate_name), + KUNIT_CASE(miscdev_test_duplicate_name_leak), + KUNIT_CASE_PARAM(miscdev_test_duplicate_error, miscdev_gen_params), + KUNIT_CASE(miscdev_test_dynamic_reentry), + {} +}; + +static struct kunit_suite test_suite = { + .name = "miscdev", + .suite_init = miscdev_find_minors, + .test_cases = test_cases, +}; +kunit_test_suite(test_suite); + +static struct kunit_case __refdata test_init_cases[] = { + KUNIT_CASE_PARAM(miscdev_test_static_basic, miscdev_gen_params), + KUNIT_CASE(miscdev_test_dynamic_basic), + KUNIT_CASE(miscdev_test_dynamic_only_range), + KUNIT_CASE(miscdev_test_collision), + KUNIT_CASE(miscdev_test_collision_reverse), + KUNIT_CASE(miscdev_test_conflict), + KUNIT_CASE(miscdev_test_conflict_reverse), + {} +}; + +static struct kunit_suite test_init_suite = { + .name = "miscdev_init", + .suite_init = miscdev_find_minors, + .test_cases = test_init_cases, +}; +kunit_test_init_section_suite(test_init_suite); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Vimal Agrawal"); +MODULE_AUTHOR("Thadeu Lima de Souza Cascardo <cascardo@igalia.com>"); +MODULE_DESCRIPTION("Test module for misc character devices"); diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c deleted file mode 100644 index b35f651837c8..000000000000 --- a/drivers/char/mspec.c +++ /dev/null @@ -1,295 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights - * reserved. - */ - -/* - * SN Platform Special Memory (mspec) Support - * - * This driver exports the SN special memory (mspec) facility to user - * processes. - * There are two types of memory made available thru this driver: - * uncached and cached. - * - * Uncached are used for memory write combining feature of the ia64 - * cpu. - * - * Cached are used for areas of memory that are used as cached addresses - * on our partition and used as uncached addresses from other partitions. - * Due to a design constraint of the SN2 Shub, you can not have processors - * on the same FSB perform both a cached and uncached reference to the - * same cache line. These special memory cached regions prevent the - * kernel from ever dropping in a TLB entry and therefore prevent the - * processor from ever speculating a cache line from this page. - */ - -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/errno.h> -#include <linux/miscdevice.h> -#include <linux/spinlock.h> -#include <linux/mm.h> -#include <linux/fs.h> -#include <linux/vmalloc.h> -#include <linux/string.h> -#include <linux/slab.h> -#include <linux/numa.h> -#include <linux/refcount.h> -#include <asm/page.h> -#include <linux/atomic.h> -#include <asm/tlbflush.h> -#include <asm/uncached.h> - - -#define CACHED_ID "Cached," -#define UNCACHED_ID "Uncached" -#define REVISION "4.0" -#define MSPEC_BASENAME "mspec" - -/* - * Page types allocated by the device. - */ -enum mspec_page_type { - MSPEC_CACHED = 2, - MSPEC_UNCACHED -}; - -/* - * One of these structures is allocated when an mspec region is mmaped. The - * structure is pointed to by the vma->vm_private_data field in the vma struct. - * This structure is used to record the addresses of the mspec pages. - * This structure is shared by all vma's that are split off from the - * original vma when split_vma()'s are done. - * - * The refcnt is incremented atomically because mm->mmap_lock does not - * protect in fork case where multiple tasks share the vma_data. - */ -struct vma_data { - refcount_t refcnt; /* Number of vmas sharing the data. */ - spinlock_t lock; /* Serialize access to this structure. */ - int count; /* Number of pages allocated. */ - enum mspec_page_type type; /* Type of pages allocated. */ - unsigned long vm_start; /* Original (unsplit) base. */ - unsigned long vm_end; /* Original (unsplit) end. */ - unsigned long maddr[]; /* Array of MSPEC addresses. */ -}; - -/* - * mspec_open - * - * Called when a device mapping is created by a means other than mmap - * (via fork, munmap, etc.). Increments the reference count on the - * underlying mspec data so it is not freed prematurely. - */ -static void -mspec_open(struct vm_area_struct *vma) -{ - struct vma_data *vdata; - - vdata = vma->vm_private_data; - refcount_inc(&vdata->refcnt); -} - -/* - * mspec_close - * - * Called when unmapping a device mapping. Frees all mspec pages - * belonging to all the vma's sharing this vma_data structure. - */ -static void -mspec_close(struct vm_area_struct *vma) -{ - struct vma_data *vdata; - int index, last_index; - unsigned long my_page; - - vdata = vma->vm_private_data; - - if (!refcount_dec_and_test(&vdata->refcnt)) - return; - - last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT; - for (index = 0; index < last_index; index++) { - if (vdata->maddr[index] == 0) - continue; - /* - * Clear the page before sticking it back - * into the pool. - */ - my_page = vdata->maddr[index]; - vdata->maddr[index] = 0; - memset((char *)my_page, 0, PAGE_SIZE); - uncached_free_page(my_page, 1); - } - - kvfree(vdata); -} - -/* - * mspec_fault - * - * Creates a mspec page and maps it to user space. - */ -static vm_fault_t -mspec_fault(struct vm_fault *vmf) -{ - unsigned long paddr, maddr; - unsigned long pfn; - pgoff_t index = vmf->pgoff; - struct vma_data *vdata = vmf->vma->vm_private_data; - - maddr = (volatile unsigned long) vdata->maddr[index]; - if (maddr == 0) { - maddr = uncached_alloc_page(numa_node_id(), 1); - if (maddr == 0) - return VM_FAULT_OOM; - - spin_lock(&vdata->lock); - if (vdata->maddr[index] == 0) { - vdata->count++; - vdata->maddr[index] = maddr; - } else { - uncached_free_page(maddr, 1); - maddr = vdata->maddr[index]; - } - spin_unlock(&vdata->lock); - } - - paddr = maddr & ~__IA64_UNCACHED_OFFSET; - pfn = paddr >> PAGE_SHIFT; - - return vmf_insert_pfn(vmf->vma, vmf->address, pfn); -} - -static const struct vm_operations_struct mspec_vm_ops = { - .open = mspec_open, - .close = mspec_close, - .fault = mspec_fault, -}; - -/* - * mspec_mmap - * - * Called when mmapping the device. Initializes the vma with a fault handler - * and private data structure necessary to allocate, track, and free the - * underlying pages. - */ -static int -mspec_mmap(struct file *file, struct vm_area_struct *vma, - enum mspec_page_type type) -{ - struct vma_data *vdata; - int pages, vdata_size; - - if (vma->vm_pgoff != 0) - return -EINVAL; - - if ((vma->vm_flags & VM_SHARED) == 0) - return -EINVAL; - - if ((vma->vm_flags & VM_WRITE) == 0) - return -EPERM; - - pages = vma_pages(vma); - vdata_size = sizeof(struct vma_data) + pages * sizeof(long); - vdata = kvzalloc(vdata_size, GFP_KERNEL); - if (!vdata) - return -ENOMEM; - - vdata->vm_start = vma->vm_start; - vdata->vm_end = vma->vm_end; - vdata->type = type; - spin_lock_init(&vdata->lock); - refcount_set(&vdata->refcnt, 1); - vma->vm_private_data = vdata; - - vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); - if (vdata->type == MSPEC_UNCACHED) - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - vma->vm_ops = &mspec_vm_ops; - - return 0; -} - -static int -cached_mmap(struct file *file, struct vm_area_struct *vma) -{ - return mspec_mmap(file, vma, MSPEC_CACHED); -} - -static int -uncached_mmap(struct file *file, struct vm_area_struct *vma) -{ - return mspec_mmap(file, vma, MSPEC_UNCACHED); -} - -static const struct file_operations cached_fops = { - .owner = THIS_MODULE, - .mmap = cached_mmap, - .llseek = noop_llseek, -}; - -static struct miscdevice cached_miscdev = { - .minor = MISC_DYNAMIC_MINOR, - .name = "mspec_cached", - .fops = &cached_fops -}; - -static const struct file_operations uncached_fops = { - .owner = THIS_MODULE, - .mmap = uncached_mmap, - .llseek = noop_llseek, -}; - -static struct miscdevice uncached_miscdev = { - .minor = MISC_DYNAMIC_MINOR, - .name = "mspec_uncached", - .fops = &uncached_fops -}; - -/* - * mspec_init - * - * Called at boot time to initialize the mspec facility. - */ -static int __init -mspec_init(void) -{ - int ret; - - ret = misc_register(&cached_miscdev); - if (ret) { - printk(KERN_ERR "%s: failed to register device %i\n", - CACHED_ID, ret); - return ret; - } - ret = misc_register(&uncached_miscdev); - if (ret) { - printk(KERN_ERR "%s: failed to register device %i\n", - UNCACHED_ID, ret); - misc_deregister(&cached_miscdev); - return ret; - } - - printk(KERN_INFO "%s %s initialized devices: %s %s\n", - MSPEC_BASENAME, REVISION, CACHED_ID, UNCACHED_ID); - - return 0; -} - -static void __exit -mspec_exit(void) -{ - misc_deregister(&uncached_miscdev); - misc_deregister(&cached_miscdev); -} - -module_init(mspec_init); -module_exit(mspec_exit); - -MODULE_AUTHOR("Silicon Graphics, Inc. <linux-altix@sgi.com>"); -MODULE_DESCRIPTION("Driver for SGI SN special memory operations"); -MODULE_LICENSE("GPL"); diff --git a/drivers/char/mwave/3780i.c b/drivers/char/mwave/3780i.c index 4a8937f80570..90f93cefb21c 100644 --- a/drivers/char/mwave/3780i.c +++ b/drivers/char/mwave/3780i.c @@ -46,6 +46,8 @@ * First release to the public */ +#define pr_fmt(fmt) "3780i: " fmt + #include <linux/kernel.h> #include <linux/unistd.h> #include <linux/delay.h> @@ -75,18 +77,12 @@ unsigned short dsp3780I_ReadMsaCfg(unsigned short usDspBaseIO, unsigned long flags; unsigned short val; - PRINTK_3(TRACE_3780I, - "3780i::dsp3780I_ReadMsaCfg entry usDspBaseIO %x ulMsaAddr %lx\n", - usDspBaseIO, ulMsaAddr); - spin_lock_irqsave(&dsp_lock, flags); OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulMsaAddr); OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulMsaAddr >> 16)); val = InWordDsp(DSP_MsaDataDSISHigh); spin_unlock_irqrestore(&dsp_lock, flags); - PRINTK_2(TRACE_3780I, "3780i::dsp3780I_ReadMsaCfg exit val %x\n", val); - return val; } @@ -95,10 +91,6 @@ void dsp3780I_WriteMsaCfg(unsigned short usDspBaseIO, { unsigned long flags; - PRINTK_4(TRACE_3780I, - "3780i::dsp3780i_WriteMsaCfg entry usDspBaseIO %x ulMsaAddr %lx usValue %x\n", - usDspBaseIO, ulMsaAddr, usValue); - spin_lock_irqsave(&dsp_lock, flags); OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulMsaAddr); OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulMsaAddr >> 16)); @@ -112,64 +104,18 @@ static void dsp3780I_WriteGenCfg(unsigned short usDspBaseIO, unsigned uIndex, DSP_ISA_SLAVE_CONTROL rSlaveControl; DSP_ISA_SLAVE_CONTROL rSlaveControl_Save; - - PRINTK_4(TRACE_3780I, - "3780i::dsp3780i_WriteGenCfg entry usDspBaseIO %x uIndex %x ucValue %x\n", - usDspBaseIO, uIndex, ucValue); - MKBYTE(rSlaveControl) = InByteDsp(DSP_IsaSlaveControl); - PRINTK_2(TRACE_3780I, - "3780i::dsp3780i_WriteGenCfg rSlaveControl %x\n", - MKBYTE(rSlaveControl)); - rSlaveControl_Save = rSlaveControl; rSlaveControl.ConfigMode = true; - PRINTK_2(TRACE_3780I, - "3780i::dsp3780i_WriteGenCfg entry rSlaveControl+ConfigMode %x\n", - MKBYTE(rSlaveControl)); - OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl)); OutByteDsp(DSP_ConfigAddress, (unsigned char) uIndex); OutByteDsp(DSP_ConfigData, ucValue); OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl_Save)); - - PRINTK_1(TRACE_3780I, "3780i::dsp3780i_WriteGenCfg exit\n"); - - } -#if 0 -unsigned char dsp3780I_ReadGenCfg(unsigned short usDspBaseIO, - unsigned uIndex) -{ - DSP_ISA_SLAVE_CONTROL rSlaveControl; - DSP_ISA_SLAVE_CONTROL rSlaveControl_Save; - unsigned char ucValue; - - - PRINTK_3(TRACE_3780I, - "3780i::dsp3780i_ReadGenCfg entry usDspBaseIO %x uIndex %x\n", - usDspBaseIO, uIndex); - - MKBYTE(rSlaveControl) = InByteDsp(DSP_IsaSlaveControl); - rSlaveControl_Save = rSlaveControl; - rSlaveControl.ConfigMode = true; - OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl)); - OutByteDsp(DSP_ConfigAddress, (unsigned char) uIndex); - ucValue = InByteDsp(DSP_ConfigData); - OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl_Save)); - - PRINTK_2(TRACE_3780I, - "3780i::dsp3780i_ReadGenCfg exit ucValue %x\n", ucValue); - - - return ucValue; -} -#endif /* 0 */ - -int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings, +int dsp3780I_EnableDSP(struct dsp_3780i_config_settings *pSettings, unsigned short *pIrqMap, unsigned short *pDmaMap) { @@ -191,25 +137,13 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings, DSP_CLOCK_CONTROL_2 rClockControl2; DSP_ISA_SLAVE_CONTROL rSlaveControl; DSP_HBRIDGE_CONTROL rHBridgeControl; - unsigned short ChipID = 0; unsigned short tval; - - PRINTK_2(TRACE_3780I, - "3780i::dsp3780I_EnableDSP entry pSettings->bDSPEnabled %x\n", - pSettings->bDSPEnabled); - - if (!pSettings->bDSPEnabled) { - PRINTK_ERROR( KERN_ERR "3780i::dsp3780I_EnableDSP: Error: DSP not enabled. Aborting.\n" ); + pr_err("%s: Error: DSP not enabled. Aborting.\n", __func__); return -EIO; } - - PRINTK_2(TRACE_3780I, - "3780i::dsp3780i_EnableDSP entry pSettings->bModemEnabled %x\n", - pSettings->bModemEnabled); - if (pSettings->bModemEnabled) { rUartCfg1.Reserved = rUartCfg2.Reserved = 0; rUartCfg1.IrqActiveLow = pSettings->bUartIrqActiveLow; @@ -282,23 +216,10 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings, rSlaveControl.ConfigMode = false; rSlaveControl.Reserved = 0; - PRINTK_4(TRACE_3780I, - "3780i::dsp3780i_EnableDSP usDspBaseIO %x index %x taddr %x\n", - usDspBaseIO, DSP_IsaSlaveControl, - usDspBaseIO + DSP_IsaSlaveControl); - - PRINTK_2(TRACE_3780I, - "3780i::dsp3780i_EnableDSP rSlaveContrl %x\n", - MKWORD(rSlaveControl)); - spin_lock_irqsave(&dsp_lock, flags); OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl)); MKWORD(tval) = InWordDsp(DSP_IsaSlaveControl); - PRINTK_2(TRACE_3780I, - "3780i::dsp3780i_EnableDSP rSlaveControl 2 %x\n", tval); - - for (i = 0; i < 11; i++) udelay(2000); @@ -307,10 +228,6 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings, MKWORD(tval) = InWordDsp(DSP_IsaSlaveControl); - PRINTK_2(TRACE_3780I, - "3780i::dsp3780i_EnableDSP rSlaveControl 3 %x\n", tval); - - /* Program our general configuration registers */ WriteGenCfg(DSP_HBridgeCfg1Index, MKBYTE(rHBridgeCfg1)); WriteGenCfg(DSP_HBridgeCfg2Index, MKBYTE(rHBridgeCfg2)); @@ -331,10 +248,6 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings, rHBridgeControl.IoAutoInc = false; rHBridgeControl.DiagnosticMode = false; - PRINTK_3(TRACE_3780I, - "3780i::dsp3780i_EnableDSP DSP_HBridgeControl %x rHBridgeControl %x\n", - DSP_HBridgeControl, MKWORD(rHBridgeControl)); - OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl)); spin_unlock_irqrestore(&dsp_lock, flags); WriteMsaCfg(DSP_LBusTimeoutDisable, MKWORD(rLBusTimeoutDisable)); @@ -342,24 +255,17 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings, WriteMsaCfg(DSP_ClockControl_2, MKWORD(rClockControl2)); WriteMsaCfg(DSP_ChipReset, MKWORD(rChipReset)); - ChipID = ReadMsaCfg(DSP_ChipID); - - PRINTK_2(TRACE_3780I, - "3780i::dsp3780I_EnableDSP exiting bRC=true, ChipID %x\n", - ChipID); + ReadMsaCfg(DSP_ChipID); return 0; } -int dsp3780I_DisableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings) +int dsp3780I_DisableDSP(struct dsp_3780i_config_settings *pSettings) { unsigned long flags; unsigned short usDspBaseIO = pSettings->usDspBaseIO; DSP_ISA_SLAVE_CONTROL rSlaveControl; - - PRINTK_1(TRACE_3780I, "3780i::dsp3780i_DisableDSP entry\n"); - rSlaveControl.ClockControl = 0; rSlaveControl.SoftReset = true; rSlaveControl.ConfigMode = false; @@ -375,29 +281,20 @@ int dsp3780I_DisableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings) udelay(5); - - PRINTK_1(TRACE_3780I, "3780i::dsp3780i_DisableDSP exit\n"); - return 0; } -int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings) +int dsp3780I_Reset(struct dsp_3780i_config_settings *pSettings) { unsigned long flags; unsigned short usDspBaseIO = pSettings->usDspBaseIO; DSP_BOOT_DOMAIN rBootDomain; DSP_HBRIDGE_CONTROL rHBridgeControl; - - PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Reset entry\n"); - spin_lock_irqsave(&dsp_lock, flags); /* Mask DSP to PC interrupt */ MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl); - PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Reset rHBridgeControl %x\n", - MKWORD(rHBridgeControl)); - rHBridgeControl.EnableDspInt = false; OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl)); spin_unlock_irqrestore(&dsp_lock, flags); @@ -408,9 +305,6 @@ int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings) rBootDomain.NMI = true; rBootDomain.Reserved = 0; - PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Reset rBootDomain %x\n", - MKWORD(rBootDomain)); - WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain)); /* Reset all the chiplets and then reactivate them */ @@ -419,24 +313,17 @@ int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings) WriteMsaCfg(DSP_ChipReset, (unsigned short) (~pSettings->usChipletEnable)); - - PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Reset exit bRC=0\n"); - return 0; } -int dsp3780I_Run(DSP_3780I_CONFIG_SETTINGS * pSettings) +int dsp3780I_Run(struct dsp_3780i_config_settings *pSettings) { unsigned long flags; unsigned short usDspBaseIO = pSettings->usDspBaseIO; DSP_BOOT_DOMAIN rBootDomain; DSP_HBRIDGE_CONTROL rHBridgeControl; - - PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Run entry\n"); - - /* Transition the core to a running state */ rBootDomain.ResetCore = true; rBootDomain.Halt = false; @@ -459,15 +346,9 @@ int dsp3780I_Run(DSP_3780I_CONFIG_SETTINGS * pSettings) MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl); rHBridgeControl.EnableDspInt = true; - PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Run rHBridgeControl %x\n", - MKWORD(rHBridgeControl)); - OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl)); spin_unlock_irqrestore(&dsp_lock, flags); - - PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Run exit bRC=true\n"); - return 0; } @@ -479,12 +360,6 @@ int dsp3780I_ReadDStore(unsigned short usDspBaseIO, void __user *pvBuffer, unsigned short __user *pusBuffer = pvBuffer; unsigned short val; - - PRINTK_5(TRACE_3780I, - "3780i::dsp3780I_ReadDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n", - usDspBaseIO, pusBuffer, uCount, ulDSPAddr); - - /* Set the initial MSA address. No adjustments need to be made to data store addresses */ spin_lock_irqsave(&dsp_lock, flags); OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr); @@ -499,17 +374,9 @@ int dsp3780I_ReadDStore(unsigned short usDspBaseIO, void __user *pvBuffer, if(put_user(val, pusBuffer++)) return -EFAULT; - PRINTK_3(TRACE_3780I, - "3780I::dsp3780I_ReadDStore uCount %x val %x\n", - uCount, val); - PaceMsaAccess(usDspBaseIO); } - - PRINTK_1(TRACE_3780I, - "3780I::dsp3780I_ReadDStore exit bRC=true\n"); - return 0; } @@ -521,12 +388,6 @@ int dsp3780I_ReadAndClearDStore(unsigned short usDspBaseIO, unsigned short __user *pusBuffer = pvBuffer; unsigned short val; - - PRINTK_5(TRACE_3780I, - "3780i::dsp3780I_ReadAndDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n", - usDspBaseIO, pusBuffer, uCount, ulDSPAddr); - - /* Set the initial MSA address. No adjustments need to be made to data store addresses */ spin_lock_irqsave(&dsp_lock, flags); OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr); @@ -541,17 +402,9 @@ int dsp3780I_ReadAndClearDStore(unsigned short usDspBaseIO, if(put_user(val, pusBuffer++)) return -EFAULT; - PRINTK_3(TRACE_3780I, - "3780I::dsp3780I_ReadAndCleanDStore uCount %x val %x\n", - uCount, val); - PaceMsaAccess(usDspBaseIO); } - - PRINTK_1(TRACE_3780I, - "3780I::dsp3780I_ReadAndClearDStore exit bRC=true\n"); - return 0; } @@ -562,12 +415,6 @@ int dsp3780I_WriteDStore(unsigned short usDspBaseIO, void __user *pvBuffer, unsigned long flags; unsigned short __user *pusBuffer = pvBuffer; - - PRINTK_5(TRACE_3780I, - "3780i::dsp3780D_WriteDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n", - usDspBaseIO, pusBuffer, uCount, ulDSPAddr); - - /* Set the initial MSA address. No adjustments need to be made to data store addresses */ spin_lock_irqsave(&dsp_lock, flags); OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr); @@ -583,17 +430,9 @@ int dsp3780I_WriteDStore(unsigned short usDspBaseIO, void __user *pvBuffer, OutWordDsp(DSP_MsaDataDSISHigh, val); spin_unlock_irqrestore(&dsp_lock, flags); - PRINTK_3(TRACE_3780I, - "3780I::dsp3780I_WriteDStore uCount %x val %x\n", - uCount, val); - PaceMsaAccess(usDspBaseIO); } - - PRINTK_1(TRACE_3780I, - "3780I::dsp3780D_WriteDStore exit bRC=true\n"); - return 0; } @@ -604,10 +443,6 @@ int dsp3780I_ReadIStore(unsigned short usDspBaseIO, void __user *pvBuffer, unsigned long flags; unsigned short __user *pusBuffer = pvBuffer; - PRINTK_5(TRACE_3780I, - "3780i::dsp3780I_ReadIStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n", - usDspBaseIO, pusBuffer, uCount, ulDSPAddr); - /* * Set the initial MSA address. To convert from an instruction store * address to an MSA address @@ -631,17 +466,10 @@ int dsp3780I_ReadIStore(unsigned short usDspBaseIO, void __user *pvBuffer, if(put_user(val_hi, pusBuffer++)) return -EFAULT; - PRINTK_4(TRACE_3780I, - "3780I::dsp3780I_ReadIStore uCount %x val_lo %x val_hi %x\n", - uCount, val_lo, val_hi); - PaceMsaAccess(usDspBaseIO); } - PRINTK_1(TRACE_3780I, - "3780I::dsp3780I_ReadIStore exit bRC=true\n"); - return 0; } @@ -652,11 +480,6 @@ int dsp3780I_WriteIStore(unsigned short usDspBaseIO, void __user *pvBuffer, unsigned long flags; unsigned short __user *pusBuffer = pvBuffer; - PRINTK_5(TRACE_3780I, - "3780i::dsp3780I_WriteIStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n", - usDspBaseIO, pusBuffer, uCount, ulDSPAddr); - - /* * Set the initial MSA address. To convert from an instruction store * address to an MSA address @@ -680,17 +503,9 @@ int dsp3780I_WriteIStore(unsigned short usDspBaseIO, void __user *pvBuffer, OutWordDsp(DSP_MsaDataDSISHigh, val_hi); spin_unlock_irqrestore(&dsp_lock, flags); - PRINTK_4(TRACE_3780I, - "3780I::dsp3780I_WriteIStore uCount %x val_lo %x val_hi %x\n", - uCount, val_lo, val_hi); - PaceMsaAccess(usDspBaseIO); - } - PRINTK_1(TRACE_3780I, - "3780I::dsp3780I_WriteIStore exit bRC=true\n"); - return 0; } @@ -700,12 +515,6 @@ int dsp3780I_GetIPCSource(unsigned short usDspBaseIO, { unsigned long flags; DSP_HBRIDGE_CONTROL rHBridgeControl; - unsigned short temp; - - - PRINTK_3(TRACE_3780I, - "3780i::dsp3780I_GetIPCSource entry usDspBaseIO %x pusIPCSource %p\n", - usDspBaseIO, pusIPCSource); /* * Disable DSP to PC interrupts, read the interrupt register, @@ -717,22 +526,11 @@ int dsp3780I_GetIPCSource(unsigned short usDspBaseIO, OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl)); *pusIPCSource = InWordDsp(DSP_Interrupt); - temp = (unsigned short) ~(*pusIPCSource); - - PRINTK_3(TRACE_3780I, - "3780i::dsp3780I_GetIPCSource, usIPCSource %x ~ %x\n", - *pusIPCSource, temp); - OutWordDsp(DSP_Interrupt, (unsigned short) ~(*pusIPCSource)); rHBridgeControl.EnableDspInt = true; OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl)); spin_unlock_irqrestore(&dsp_lock, flags); - - PRINTK_2(TRACE_3780I, - "3780i::dsp3780I_GetIPCSource exit usIPCSource %x\n", - *pusIPCSource); - return 0; } diff --git a/drivers/char/mwave/3780i.h b/drivers/char/mwave/3780i.h index 95164246afd1..53dafceb20e0 100644 --- a/drivers/char/mwave/3780i.h +++ b/drivers/char/mwave/3780i.h @@ -261,7 +261,7 @@ typedef struct { * the only values maintained by the 3780i support layer are the saved UART * registers. */ -typedef struct _DSP_3780I_CONFIG_SETTINGS { +struct dsp_3780i_config_settings { /* Location of base configuration register */ unsigned short usBaseConfigIO; @@ -313,16 +313,16 @@ typedef struct _DSP_3780I_CONFIG_SETTINGS { unsigned char ucSCR; /* Scratch register */ unsigned char ucDLL; /* Divisor latch, low byte */ unsigned char ucDLM; /* Divisor latch, high byte */ -} DSP_3780I_CONFIG_SETTINGS; +}; /* 3780i support functions */ -int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings, +int dsp3780I_EnableDSP(struct dsp_3780i_config_settings *pSettings, unsigned short *pIrqMap, unsigned short *pDmaMap); -int dsp3780I_DisableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings); -int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings); -int dsp3780I_Run(DSP_3780I_CONFIG_SETTINGS * pSettings); +int dsp3780I_DisableDSP(struct dsp_3780i_config_settings *pSettings); +int dsp3780I_Reset(struct dsp_3780i_config_settings *pSettings); +int dsp3780I_Run(struct dsp_3780i_config_settings *pSettings); int dsp3780I_ReadDStore(unsigned short usDspBaseIO, void __user *pvBuffer, unsigned uCount, unsigned long ulDSPAddr); int dsp3780I_ReadAndClearDStore(unsigned short usDspBaseIO, diff --git a/drivers/char/mwave/Makefile b/drivers/char/mwave/Makefile index a24fe96e3c96..e56c1a375535 100644 --- a/drivers/char/mwave/Makefile +++ b/drivers/char/mwave/Makefile @@ -8,9 +8,3 @@ obj-$(CONFIG_MWAVE) += mwave.o mwave-y := mwavedd.o smapi.o tp3780i.o 3780i.o - -# To have the mwave driver disable other uarts if necessary -# ccflags-y := -DMWAVE_FUTZ_WITH_OTHER_DEVICES - -# To compile in lots (~20 KiB) of run-time enablable printk()s for debugging: -ccflags-y += -DMW_TRACE diff --git a/drivers/char/mwave/README b/drivers/char/mwave/README index c2a58f428bc8..6224aa814c62 100644 --- a/drivers/char/mwave/README +++ b/drivers/char/mwave/README @@ -4,16 +4,6 @@ Module options The mwave module takes the following options. Note that these options are not saved by the BIOS and so do not persist after unload and reload. - mwave_debug=value, where value is bitwise OR of trace flags: - 0x0001 mwavedd api tracing - 0x0002 smapi api tracing - 0x0004 3780i tracing - 0x0008 tp3780i tracing - - Tracing only occurs if the driver has been compiled with the - MW_TRACE macro #defined (i.e. let ccflags-y := -DMW_TRACE - in the Makefile). - mwave_3780i_irq=5/7/10/11/15 If the dsp irq has not been setup and stored in bios by the thinkpad configuration utility then this parameter allows the diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c index 11272d605ecd..640a9cb0dd8d 100644 --- a/drivers/char/mwave/mwavedd.c +++ b/drivers/char/mwave/mwavedd.c @@ -46,6 +46,8 @@ * First release to the public */ +#define pr_fmt(fmt) "mwavedd: " fmt + #include <linux/module.h> #include <linux/kernel.h> #include <linux/fs.h> @@ -75,131 +77,62 @@ MODULE_LICENSE("GPL"); * We'll depend on users using the tpctl utility to do that for now */ static DEFINE_MUTEX(mwave_mutex); -int mwave_debug = 0; int mwave_3780i_irq = 0; int mwave_3780i_io = 0; int mwave_uart_irq = 0; int mwave_uart_io = 0; -module_param(mwave_debug, int, 0); module_param_hw(mwave_3780i_irq, int, irq, 0); module_param_hw(mwave_3780i_io, int, ioport, 0); module_param_hw(mwave_uart_irq, int, irq, 0); module_param_hw(mwave_uart_io, int, ioport, 0); -static int mwave_open(struct inode *inode, struct file *file); -static int mwave_close(struct inode *inode, struct file *file); -static long mwave_ioctl(struct file *filp, unsigned int iocmd, - unsigned long ioarg); - -MWAVE_DEVICE_DATA mwave_s_mdd; - -static int mwave_open(struct inode *inode, struct file *file) -{ - unsigned int retval = 0; - - PRINTK_3(TRACE_MWAVE, - "mwavedd::mwave_open, entry inode %p file %p\n", - inode, file); - PRINTK_2(TRACE_MWAVE, - "mwavedd::mwave_open, exit return retval %x\n", retval); - - return retval; -} - -static int mwave_close(struct inode *inode, struct file *file) -{ - unsigned int retval = 0; - - PRINTK_3(TRACE_MWAVE, - "mwavedd::mwave_close, entry inode %p file %p\n", - inode, file); - - PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_close, exit retval %x\n", - retval); - - return retval; -} +struct mwave_device_data mwave_s_mdd; static long mwave_ioctl(struct file *file, unsigned int iocmd, unsigned long ioarg) { unsigned int retval = 0; - pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd; + struct mwave_device_data *pDrvData = &mwave_s_mdd; void __user *arg = (void __user *)ioarg; - PRINTK_4(TRACE_MWAVE, - "mwavedd::mwave_ioctl, entry file %p cmd %x arg %x\n", - file, iocmd, (int) ioarg); - switch (iocmd) { case IOCTL_MW_RESET: - PRINTK_1(TRACE_MWAVE, - "mwavedd::mwave_ioctl, IOCTL_MW_RESET" - " calling tp3780I_ResetDSP\n"); mutex_lock(&mwave_mutex); retval = tp3780I_ResetDSP(&pDrvData->rBDData); mutex_unlock(&mwave_mutex); - PRINTK_2(TRACE_MWAVE, - "mwavedd::mwave_ioctl, IOCTL_MW_RESET" - " retval %x from tp3780I_ResetDSP\n", - retval); break; case IOCTL_MW_RUN: - PRINTK_1(TRACE_MWAVE, - "mwavedd::mwave_ioctl, IOCTL_MW_RUN" - " calling tp3780I_StartDSP\n"); mutex_lock(&mwave_mutex); retval = tp3780I_StartDSP(&pDrvData->rBDData); mutex_unlock(&mwave_mutex); - PRINTK_2(TRACE_MWAVE, - "mwavedd::mwave_ioctl, IOCTL_MW_RUN" - " retval %x from tp3780I_StartDSP\n", - retval); break; case IOCTL_MW_DSP_ABILITIES: { - MW_ABILITIES rAbilities; + struct mw_abilities rAbilities; - PRINTK_1(TRACE_MWAVE, - "mwavedd::mwave_ioctl," - " IOCTL_MW_DSP_ABILITIES calling" - " tp3780I_QueryAbilities\n"); mutex_lock(&mwave_mutex); retval = tp3780I_QueryAbilities(&pDrvData->rBDData, &rAbilities); mutex_unlock(&mwave_mutex); - PRINTK_2(TRACE_MWAVE, - "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES" - " retval %x from tp3780I_QueryAbilities\n", - retval); if (retval == 0) { - if( copy_to_user(arg, &rAbilities, - sizeof(MW_ABILITIES)) ) + if (copy_to_user(arg, &rAbilities, sizeof(rAbilities))) return -EFAULT; } - PRINTK_2(TRACE_MWAVE, - "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES" - " exit retval %x\n", - retval); } break; case IOCTL_MW_READ_DATA: case IOCTL_MW_READCLEAR_DATA: { - MW_READWRITE rReadData; + struct mw_readwrite rReadData; unsigned short __user *pusBuffer = NULL; if( copy_from_user(&rReadData, arg, - sizeof(MW_READWRITE)) ) + sizeof(struct mw_readwrite)) ) return -EFAULT; pusBuffer = (unsigned short __user *) (rReadData.pBuf); - PRINTK_4(TRACE_MWAVE, - "mwavedd::mwave_ioctl IOCTL_MW_READ_DATA," - " size %lx, ioarg %lx pusBuffer %p\n", - rReadData.ulDataLength, ioarg, pusBuffer); mutex_lock(&mwave_mutex); retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData, iocmd, @@ -211,19 +144,13 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, break; case IOCTL_MW_READ_INST: { - MW_READWRITE rReadData; + struct mw_readwrite rReadData; unsigned short __user *pusBuffer = NULL; - if( copy_from_user(&rReadData, arg, - sizeof(MW_READWRITE)) ) + if (copy_from_user(&rReadData, arg, sizeof(rReadData))) return -EFAULT; pusBuffer = (unsigned short __user *) (rReadData.pBuf); - PRINTK_4(TRACE_MWAVE, - "mwavedd::mwave_ioctl IOCTL_MW_READ_INST," - " size %lx, ioarg %lx pusBuffer %p\n", - rReadData.ulDataLength / 2, ioarg, - pusBuffer); mutex_lock(&mwave_mutex); retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData, iocmd, pusBuffer, @@ -234,19 +161,13 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, break; case IOCTL_MW_WRITE_DATA: { - MW_READWRITE rWriteData; + struct mw_readwrite rWriteData; unsigned short __user *pusBuffer = NULL; - if( copy_from_user(&rWriteData, arg, - sizeof(MW_READWRITE)) ) + if (copy_from_user(&rWriteData, arg, sizeof(rWriteData))) return -EFAULT; pusBuffer = (unsigned short __user *) (rWriteData.pBuf); - PRINTK_4(TRACE_MWAVE, - "mwavedd::mwave_ioctl IOCTL_MW_WRITE_DATA," - " size %lx, ioarg %lx pusBuffer %p\n", - rWriteData.ulDataLength, ioarg, - pusBuffer); mutex_lock(&mwave_mutex); retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData, iocmd, pusBuffer, @@ -257,19 +178,13 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, break; case IOCTL_MW_WRITE_INST: { - MW_READWRITE rWriteData; + struct mw_readwrite rWriteData; unsigned short __user *pusBuffer = NULL; - if( copy_from_user(&rWriteData, arg, - sizeof(MW_READWRITE)) ) + if (copy_from_user(&rWriteData, arg, sizeof(rWriteData))) return -EFAULT; pusBuffer = (unsigned short __user *)(rWriteData.pBuf); - PRINTK_4(TRACE_MWAVE, - "mwavedd::mwave_ioctl IOCTL_MW_WRITE_INST," - " size %lx, ioarg %lx pusBuffer %p\n", - rWriteData.ulDataLength, ioarg, - pusBuffer); mutex_lock(&mwave_mutex); retval = tp3780I_ReadWriteDspIStore(&pDrvData->rBDData, iocmd, pusBuffer, @@ -283,30 +198,17 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, unsigned int ipcnum = (unsigned int) ioarg; if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) { - PRINTK_ERROR(KERN_ERR_MWAVE - "mwavedd::mwave_ioctl:" - " IOCTL_MW_REGISTER_IPC:" - " Error: Invalid ipcnum %x\n", - ipcnum); + pr_err("%s: IOCTL_MW_REGISTER_IPC: Error: Invalid ipcnum %x\n", + __func__, ipcnum); return -EINVAL; } ipcnum = array_index_nospec(ipcnum, ARRAY_SIZE(pDrvData->IPCs)); - PRINTK_3(TRACE_MWAVE, - "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" - " ipcnum %x entry usIntCount %x\n", - ipcnum, - pDrvData->IPCs[ipcnum].usIntCount); mutex_lock(&mwave_mutex); pDrvData->IPCs[ipcnum].bIsHere = false; pDrvData->IPCs[ipcnum].bIsEnabled = true; mutex_unlock(&mwave_mutex); - - PRINTK_2(TRACE_MWAVE, - "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" - " ipcnum %x exit\n", - ipcnum); } break; @@ -314,28 +216,17 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, unsigned int ipcnum = (unsigned int) ioarg; if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) { - PRINTK_ERROR(KERN_ERR_MWAVE - "mwavedd::mwave_ioctl:" - " IOCTL_MW_GET_IPC: Error:" - " Invalid ipcnum %x\n", ipcnum); + pr_err("%s: IOCTL_MW_GET_IPC: Error: Invalid ipcnum %x\n", __func__, + ipcnum); return -EINVAL; } ipcnum = array_index_nospec(ipcnum, ARRAY_SIZE(pDrvData->IPCs)); - PRINTK_3(TRACE_MWAVE, - "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC" - " ipcnum %x, usIntCount %x\n", - ipcnum, - pDrvData->IPCs[ipcnum].usIntCount); - + mutex_lock(&mwave_mutex); if (pDrvData->IPCs[ipcnum].bIsEnabled == true) { DECLARE_WAITQUEUE(wait, current); - PRINTK_2(TRACE_MWAVE, - "mwavedd::mwave_ioctl, thread for" - " ipc %x going to sleep\n", - ipcnum); add_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait); pDrvData->IPCs[ipcnum].bIsHere = true; set_current_state(TASK_INTERRUPTIBLE); @@ -343,31 +234,15 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, /* the interrupt handler while we were gone */ if (pDrvData->IPCs[ipcnum].usIntCount == 1) { /* first int has occurred (race condition) */ pDrvData->IPCs[ipcnum].usIntCount = 2; /* first int has been handled */ - PRINTK_2(TRACE_MWAVE, - "mwavedd::mwave_ioctl" - " IOCTL_MW_GET_IPC ipcnum %x" - " handling first int\n", - ipcnum); } else { /* either 1st int has not yet occurred, or we have already handled the first int */ schedule(); if (pDrvData->IPCs[ipcnum].usIntCount == 1) { pDrvData->IPCs[ipcnum].usIntCount = 2; } - PRINTK_2(TRACE_MWAVE, - "mwavedd::mwave_ioctl" - " IOCTL_MW_GET_IPC ipcnum %x" - " woke up and returning to" - " application\n", - ipcnum); } pDrvData->IPCs[ipcnum].bIsHere = false; remove_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait); set_current_state(TASK_RUNNING); - PRINTK_2(TRACE_MWAVE, - "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC," - " returning thread for ipc %x" - " processing\n", - ipcnum); } mutex_unlock(&mwave_mutex); } @@ -376,16 +251,9 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, case IOCTL_MW_UNREGISTER_IPC: { unsigned int ipcnum = (unsigned int) ioarg; - PRINTK_2(TRACE_MWAVE, - "mwavedd::mwave_ioctl IOCTL_MW_UNREGISTER_IPC" - " ipcnum %x\n", - ipcnum); if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) { - PRINTK_ERROR(KERN_ERR_MWAVE - "mwavedd::mwave_ioctl:" - " IOCTL_MW_UNREGISTER_IPC:" - " Error: Invalid ipcnum %x\n", - ipcnum); + pr_err("%s: IOCTL_MW_UNREGISTER_IPC: Error: Invalid ipcnum %x\n", + __func__, ipcnum); return -EINVAL; } ipcnum = array_index_nospec(ipcnum, @@ -405,35 +273,9 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, return -ENOTTY; } /* switch */ - PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, exit retval %x\n", retval); - return retval; } - -static ssize_t mwave_read(struct file *file, char __user *buf, size_t count, - loff_t * ppos) -{ - PRINTK_5(TRACE_MWAVE, - "mwavedd::mwave_read entry file %p, buf %p, count %zx ppos %p\n", - file, buf, count, ppos); - - return -EINVAL; -} - - -static ssize_t mwave_write(struct file *file, const char __user *buf, - size_t count, loff_t * ppos) -{ - PRINTK_5(TRACE_MWAVE, - "mwavedd::mwave_write entry file %p, buf %p," - " count %zx ppos %p\n", - file, buf, count, ppos); - - return -EINVAL; -} - - static int register_serial_portandirq(unsigned int port, int irq) { struct uart_8250_port uart; @@ -446,9 +288,7 @@ static int register_serial_portandirq(unsigned int port, int irq) /* OK */ break; default: - PRINTK_ERROR(KERN_ERR_MWAVE - "mwavedd::register_serial_portandirq:" - " Error: Illegal port %x\n", port ); + pr_err("%s: Error: Illegal port %x\n", __func__, port); return -1; } /* switch */ /* port is okay */ @@ -461,9 +301,7 @@ static int register_serial_portandirq(unsigned int port, int irq) /* OK */ break; default: - PRINTK_ERROR(KERN_ERR_MWAVE - "mwavedd::register_serial_portandirq:" - " Error: Illegal irq %x\n", irq ); + pr_err("%s: Error: Illegal irq %x\n", __func__, irq); return -1; } /* switch */ /* irq is okay */ @@ -478,56 +316,14 @@ static int register_serial_portandirq(unsigned int port, int irq) return serial8250_register_8250_port(&uart); } - static const struct file_operations mwave_fops = { .owner = THIS_MODULE, - .read = mwave_read, - .write = mwave_write, .unlocked_ioctl = mwave_ioctl, - .open = mwave_open, - .release = mwave_close, .llseek = default_llseek, }; - static struct miscdevice mwave_misc_dev = { MWAVE_MINOR, "mwave", &mwave_fops }; -#if 0 /* totally b0rked */ -/* - * sysfs support <paulsch@us.ibm.com> - */ - -struct device mwave_device; - -/* Prevent code redundancy, create a macro for mwave_show_* functions. */ -#define mwave_show_function(attr_name, format_string, field) \ -static ssize_t mwave_show_##attr_name(struct device *dev, struct device_attribute *attr, char *buf) \ -{ \ - DSP_3780I_CONFIG_SETTINGS *pSettings = \ - &mwave_s_mdd.rBDData.rDspSettings; \ - return sprintf(buf, format_string, pSettings->field); \ -} - -/* All of our attributes are read attributes. */ -#define mwave_dev_rd_attr(attr_name, format_string, field) \ - mwave_show_function(attr_name, format_string, field) \ -static DEVICE_ATTR(attr_name, S_IRUGO, mwave_show_##attr_name, NULL) - -mwave_dev_rd_attr (3780i_dma, "%i\n", usDspDma); -mwave_dev_rd_attr (3780i_irq, "%i\n", usDspIrq); -mwave_dev_rd_attr (3780i_io, "%#.4x\n", usDspBaseIO); -mwave_dev_rd_attr (uart_irq, "%i\n", usUartIrq); -mwave_dev_rd_attr (uart_io, "%#.4x\n", usUartBaseIO); - -static struct device_attribute * const mwave_dev_attrs[] = { - &dev_attr_3780i_dma, - &dev_attr_3780i_irq, - &dev_attr_3780i_io, - &dev_attr_uart_irq, - &dev_attr_uart_io, -}; -#endif - /* * mwave_init is called on module load * @@ -536,20 +332,7 @@ static struct device_attribute * const mwave_dev_attrs[] = { */ static void mwave_exit(void) { - pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd; - - PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit entry\n"); - -#if 0 - for (i = 0; i < pDrvData->nr_registered_attrs; i++) - device_remove_file(&mwave_device, mwave_dev_attrs[i]); - pDrvData->nr_registered_attrs = 0; - - if (pDrvData->device_registered) { - device_unregister(&mwave_device); - pDrvData->device_registered = false; - } -#endif + struct mwave_device_data *pDrvData = &mwave_s_mdd; if ( pDrvData->sLine >= 0 ) { serial8250_unregister_port(pDrvData->sLine); @@ -566,8 +349,6 @@ static void mwave_exit(void) if (pDrvData->bBDInitialized) { tp3780I_Cleanup(&pDrvData->rBDData); } - - PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit exit\n"); } module_exit(mwave_exit); @@ -576,11 +357,9 @@ static int __init mwave_init(void) { int i; int retval = 0; - pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd; + struct mwave_device_data *pDrvData = &mwave_s_mdd; - PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_init entry\n"); - - memset(&mwave_s_mdd, 0, sizeof(MWAVE_DEVICE_DATA)); + memset(&mwave_s_mdd, 0, sizeof(mwave_s_mdd)); pDrvData->bBDInitialized = false; pDrvData->bResourcesClaimed = false; @@ -597,60 +376,34 @@ static int __init mwave_init(void) } retval = tp3780I_InitializeBoardData(&pDrvData->rBDData); - PRINTK_2(TRACE_MWAVE, - "mwavedd::mwave_init, return from tp3780I_InitializeBoardData" - " retval %x\n", - retval); if (retval) { - PRINTK_ERROR(KERN_ERR_MWAVE - "mwavedd::mwave_init: Error:" - " Failed to initialize board data\n"); + pr_err("%s: Error: Failed to initialize board data\n", __func__); goto cleanup_error; } pDrvData->bBDInitialized = true; retval = tp3780I_CalcResources(&pDrvData->rBDData); - PRINTK_2(TRACE_MWAVE, - "mwavedd::mwave_init, return from tp3780I_CalcResources" - " retval %x\n", - retval); if (retval) { - PRINTK_ERROR(KERN_ERR_MWAVE - "mwavedd:mwave_init: Error:" - " Failed to calculate resources\n"); + pr_err("%s: Error: Failed to calculate resources\n", __func__); goto cleanup_error; } retval = tp3780I_ClaimResources(&pDrvData->rBDData); - PRINTK_2(TRACE_MWAVE, - "mwavedd::mwave_init, return from tp3780I_ClaimResources" - " retval %x\n", - retval); if (retval) { - PRINTK_ERROR(KERN_ERR_MWAVE - "mwavedd:mwave_init: Error:" - " Failed to claim resources\n"); + pr_err("%s: Error: Failed to claim resources\n", __func__); goto cleanup_error; } pDrvData->bResourcesClaimed = true; retval = tp3780I_EnableDSP(&pDrvData->rBDData); - PRINTK_2(TRACE_MWAVE, - "mwavedd::mwave_init, return from tp3780I_EnableDSP" - " retval %x\n", - retval); if (retval) { - PRINTK_ERROR(KERN_ERR_MWAVE - "mwavedd:mwave_init: Error:" - " Failed to enable DSP\n"); + pr_err("%s: Error: Failed to enable DSP\n", __func__); goto cleanup_error; } pDrvData->bDSPEnabled = true; if (misc_register(&mwave_misc_dev) < 0) { - PRINTK_ERROR(KERN_ERR_MWAVE - "mwavedd:mwave_init: Error:" - " Failed to register misc device\n"); + pr_err("%s: Error: Failed to register misc device\n", __func__); goto cleanup_error; } pDrvData->bMwaveDevRegistered = true; @@ -660,40 +413,16 @@ static int __init mwave_init(void) pDrvData->rBDData.rDspSettings.usUartIrq ); if (pDrvData->sLine < 0) { - PRINTK_ERROR(KERN_ERR_MWAVE - "mwavedd:mwave_init: Error:" - " Failed to register serial driver\n"); + pr_err("%s: Error: Failed to register serial driver\n", __func__); goto cleanup_error; } /* uart is registered */ -#if 0 - /* sysfs */ - memset(&mwave_device, 0, sizeof (struct device)); - dev_set_name(&mwave_device, "mwave"); - - if (device_register(&mwave_device)) - goto cleanup_error; - pDrvData->device_registered = true; - for (i = 0; i < ARRAY_SIZE(mwave_dev_attrs); i++) { - if(device_create_file(&mwave_device, mwave_dev_attrs[i])) { - PRINTK_ERROR(KERN_ERR_MWAVE - "mwavedd:mwave_init: Error:" - " Failed to create sysfs file %s\n", - mwave_dev_attrs[i]->attr.name); - goto cleanup_error; - } - pDrvData->nr_registered_attrs++; - } -#endif - /* SUCCESS! */ return 0; cleanup_error: - PRINTK_ERROR(KERN_ERR_MWAVE - "mwavedd::mwave_init: Error:" - " Failed to initialize\n"); + pr_err("%s: Error: Failed to initialize\n", __func__); mwave_exit(); /* clean up */ return -EIO; diff --git a/drivers/char/mwave/mwavedd.h b/drivers/char/mwave/mwavedd.h index 21cb09c7bed7..e1da1493eec5 100644 --- a/drivers/char/mwave/mwavedd.h +++ b/drivers/char/mwave/mwavedd.h @@ -56,97 +56,35 @@ #include <linux/uaccess.h> #include <linux/wait.h> -extern int mwave_debug; extern int mwave_3780i_irq; extern int mwave_3780i_io; extern int mwave_uart_irq; extern int mwave_uart_io; -#define PRINTK_ERROR printk -#define KERN_ERR_MWAVE KERN_ERR "mwave: " - -#define TRACE_MWAVE 0x0001 -#define TRACE_SMAPI 0x0002 -#define TRACE_3780I 0x0004 -#define TRACE_TP3780I 0x0008 - -#ifdef MW_TRACE -#define PRINTK_1(f,s) \ - if (f & (mwave_debug)) { \ - printk(s); \ - } - -#define PRINTK_2(f,s,v1) \ - if (f & (mwave_debug)) { \ - printk(s,v1); \ - } - -#define PRINTK_3(f,s,v1,v2) \ - if (f & (mwave_debug)) { \ - printk(s,v1,v2); \ - } - -#define PRINTK_4(f,s,v1,v2,v3) \ - if (f & (mwave_debug)) { \ - printk(s,v1,v2,v3); \ - } - -#define PRINTK_5(f,s,v1,v2,v3,v4) \ - if (f & (mwave_debug)) { \ - printk(s,v1,v2,v3,v4); \ - } - -#define PRINTK_6(f,s,v1,v2,v3,v4,v5) \ - if (f & (mwave_debug)) { \ - printk(s,v1,v2,v3,v4,v5); \ - } - -#define PRINTK_7(f,s,v1,v2,v3,v4,v5,v6) \ - if (f & (mwave_debug)) { \ - printk(s,v1,v2,v3,v4,v5,v6); \ - } - -#define PRINTK_8(f,s,v1,v2,v3,v4,v5,v6,v7) \ - if (f & (mwave_debug)) { \ - printk(s,v1,v2,v3,v4,v5,v6,v7); \ - } - -#else -#define PRINTK_1(f,s) -#define PRINTK_2(f,s,v1) -#define PRINTK_3(f,s,v1,v2) -#define PRINTK_4(f,s,v1,v2,v3) -#define PRINTK_5(f,s,v1,v2,v3,v4) -#define PRINTK_6(f,s,v1,v2,v3,v4,v5) -#define PRINTK_7(f,s,v1,v2,v3,v4,v5,v6) -#define PRINTK_8(f,s,v1,v2,v3,v4,v5,v6,v7) -#endif - - -typedef struct _MWAVE_IPC { +struct mwave_ipc { unsigned short usIntCount; /* 0=none, 1=first, 2=greater than 1st */ bool bIsEnabled; bool bIsHere; /* entry spin lock */ wait_queue_head_t ipc_wait_queue; -} MWAVE_IPC; +}; -typedef struct _MWAVE_DEVICE_DATA { - THINKPAD_BD_DATA rBDData; /* board driver's data area */ +struct mwave_device_data { + struct thinkpad_bd_data rBDData; /* board driver's data area */ unsigned long ulIPCSource_ISR; /* IPC source bits for recently processed intr, set during ISR processing */ unsigned long ulIPCSource_DPC; /* IPC source bits for recently processed intr, set during DPC processing */ bool bBDInitialized; bool bResourcesClaimed; bool bDSPEnabled; bool bDSPReset; - MWAVE_IPC IPCs[16]; + struct mwave_ipc IPCs[16]; bool bMwaveDevRegistered; short sLine; int nr_registered_attrs; int device_registered; -} MWAVE_DEVICE_DATA, *pMWAVE_DEVICE_DATA; +}; -extern MWAVE_DEVICE_DATA mwave_s_mdd; +extern struct mwave_device_data mwave_s_mdd; #endif diff --git a/drivers/char/mwave/mwavepub.h b/drivers/char/mwave/mwavepub.h index 60c961ae23b4..280327bdaa38 100644 --- a/drivers/char/mwave/mwavepub.h +++ b/drivers/char/mwave/mwavepub.h @@ -53,7 +53,7 @@ #include <linux/miscdevice.h> -typedef struct _MW_ABILITIES { +struct mw_abilities { unsigned long instr_per_sec; unsigned long data_size; unsigned long inst_size; @@ -63,27 +63,27 @@ typedef struct _MW_ABILITIES { unsigned long component_list[7]; char mwave_os_name[16]; char bios_task_name[16]; -} MW_ABILITIES, *pMW_ABILITIES; +}; -typedef struct _MW_READWRITE { +struct mw_readwrite { unsigned short usDspAddress; /* The dsp address */ unsigned long ulDataLength; /* The size in bytes of the data or user buffer */ void __user *pBuf; /* Input:variable sized buffer */ -} MW_READWRITE, *pMW_READWRITE; +}; #define IOCTL_MW_RESET _IO(MWAVE_MINOR,1) #define IOCTL_MW_RUN _IO(MWAVE_MINOR,2) -#define IOCTL_MW_DSP_ABILITIES _IOR(MWAVE_MINOR,3,MW_ABILITIES) -#define IOCTL_MW_READ_DATA _IOR(MWAVE_MINOR,4,MW_READWRITE) -#define IOCTL_MW_READCLEAR_DATA _IOR(MWAVE_MINOR,5,MW_READWRITE) -#define IOCTL_MW_READ_INST _IOR(MWAVE_MINOR,6,MW_READWRITE) -#define IOCTL_MW_WRITE_DATA _IOW(MWAVE_MINOR,7,MW_READWRITE) -#define IOCTL_MW_WRITE_INST _IOW(MWAVE_MINOR,8,MW_READWRITE) +#define IOCTL_MW_DSP_ABILITIES _IOR(MWAVE_MINOR,3,struct mw_abilities) +#define IOCTL_MW_READ_DATA _IOR(MWAVE_MINOR,4,struct mw_readwrite) +#define IOCTL_MW_READCLEAR_DATA _IOR(MWAVE_MINOR,5,struct mw_readwrite) +#define IOCTL_MW_READ_INST _IOR(MWAVE_MINOR,6,struct mw_readwrite) +#define IOCTL_MW_WRITE_DATA _IOW(MWAVE_MINOR,7,struct mw_readwrite) +#define IOCTL_MW_WRITE_INST _IOW(MWAVE_MINOR,8,struct mw_readwrite) #define IOCTL_MW_REGISTER_IPC _IOW(MWAVE_MINOR,9,int) #define IOCTL_MW_UNREGISTER_IPC _IOW(MWAVE_MINOR,10,int) #define IOCTL_MW_GET_IPC _IOW(MWAVE_MINOR,11,int) -#define IOCTL_MW_TRACE _IOR(MWAVE_MINOR,12,MW_READWRITE) +#define IOCTL_MW_TRACE _IOR(MWAVE_MINOR,12,struct mw_readwrite) #endif diff --git a/drivers/char/mwave/smapi.c b/drivers/char/mwave/smapi.c index f8d79d393b69..df6354b24339 100644 --- a/drivers/char/mwave/smapi.c +++ b/drivers/char/mwave/smapi.c @@ -46,6 +46,8 @@ * First release to the public */ +#define pr_fmt(fmt) "smapi: " fmt + #include <linux/kernel.h> #include <linux/mc146818rtc.h> /* CMOS defines */ #include "smapi.h" @@ -69,10 +71,6 @@ static int smapi_request(unsigned short inBX, unsigned short inCX, unsigned short usSmapiOK = -EIO, *pusSmapiOK = &usSmapiOK; unsigned int inBXCX = (inBX << 16) | inCX; unsigned int inDISI = (inDI << 16) | inSI; - int retval = 0; - - PRINTK_5(TRACE_SMAPI, "inBX %x inCX %x inDI %x inSI %x\n", - inBX, inCX, inDI, inSI); __asm__ __volatile__("movw $0x5380,%%ax\n\t" "movl %7,%%ebx\n\t" @@ -107,10 +105,6 @@ static int smapi_request(unsigned short inBX, unsigned short inCX, :"%eax", "%ebx", "%ecx", "%edx", "%edi", "%esi"); - PRINTK_8(TRACE_SMAPI, - "myoutAX %x myoutBX %x myoutCX %x myoutDX %x myoutDI %x myoutSI %x usSmapiOK %x\n", - myoutAX, myoutBX, myoutCX, myoutDX, myoutDI, myoutSI, - usSmapiOK); *outAX = myoutAX; *outBX = myoutBX; *outCX = myoutCX; @@ -118,13 +112,11 @@ static int smapi_request(unsigned short inBX, unsigned short inCX, *outDI = myoutDI; *outSI = myoutSI; - retval = (usSmapiOK == 1) ? 0 : -EIO; - PRINTK_2(TRACE_SMAPI, "smapi::smapi_request exit retval %x\n", retval); - return retval; + return usSmapiOK == 1 ? 0 : -EIO; } -int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings) +int smapi_query_DSP_cfg(struct smapi_dsp_settings *pSettings) { int bRC; unsigned short usAX, usBX, usCX, usDX, usDI, usSI; @@ -134,17 +126,13 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings) static const unsigned short ausUartBases[] = { 0x03F8, 0x02F8, 0x03E8, 0x02E8 }; - PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg entry\n"); - bRC = smapi_request(0x1802, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) { - PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Error: Could not get DSP Settings. Aborting.\n"); + pr_err("%s: Error: Could not get DSP Settings. Aborting.\n", __func__); return bRC; } - PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg, smapi_request OK\n"); - pSettings->bDSPPresent = ((usBX & 0x0100) != 0); pSettings->bDSPEnabled = ((usCX & 0x0001) != 0); pSettings->usDspIRQ = usSI & 0x00FF; @@ -154,27 +142,20 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings) } else { pSettings->usDspBaseIO = 0; } - PRINTK_6(TRACE_SMAPI, - "smapi::smapi_query_DSP_cfg get DSP Settings bDSPPresent %x bDSPEnabled %x usDspIRQ %x usDspDMA %x usDspBaseIO %x\n", - pSettings->bDSPPresent, pSettings->bDSPEnabled, - pSettings->usDspIRQ, pSettings->usDspDMA, - pSettings->usDspBaseIO); /* check for illegal values */ if ( pSettings->usDspBaseIO == 0 ) - PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: DSP base I/O address is 0\n"); + pr_err("%s: Worry: DSP base I/O address is 0\n", __func__); if ( pSettings->usDspIRQ == 0 ) - PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: DSP IRQ line is 0\n"); + pr_err("%s: Worry: DSP IRQ line is 0\n", __func__); bRC = smapi_request(0x1804, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) { - PRINTK_ERROR("smapi::smapi_query_DSP_cfg: Error: Could not get DSP modem settings. Aborting.\n"); + pr_err("%s: Error: Could not get DSP modem settings. Aborting.\n", __func__); return bRC; } - PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg, smapi_request OK\n"); - pSettings->bModemEnabled = ((usCX & 0x0001) != 0); pSettings->usUartIRQ = usSI & 0x000F; if (((usSI & 0xFF00) >> 8) < ARRAY_SIZE(ausUartBases)) { @@ -183,19 +164,11 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings) pSettings->usUartBaseIO = 0; } - PRINTK_4(TRACE_SMAPI, - "smapi::smapi_query_DSP_cfg get DSP modem settings bModemEnabled %x usUartIRQ %x usUartBaseIO %x\n", - pSettings->bModemEnabled, - pSettings->usUartIRQ, - pSettings->usUartBaseIO); - /* check for illegal values */ if ( pSettings->usUartBaseIO == 0 ) - PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: UART base I/O address is 0\n"); + pr_err("%s: Worry: UART base I/O address is 0\n", __func__); if ( pSettings->usUartIRQ == 0 ) - PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: UART IRQ line is 0\n"); - - PRINTK_2(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg exit bRC %x\n", bRC); + pr_err("%s: Worry: UART IRQ line is 0\n", __func__); return bRC; } @@ -218,17 +191,14 @@ int smapi_set_DSP_cfg(void) unsigned short dspio_index = 0, uartio_index = 0; - PRINTK_5(TRACE_SMAPI, - "smapi::smapi_set_DSP_cfg entry mwave_3780i_irq %x mwave_3780i_io %x mwave_uart_irq %x mwave_uart_io %x\n", - mwave_3780i_irq, mwave_3780i_io, mwave_uart_irq, mwave_uart_io); - if (mwave_3780i_io) { for (i = 0; i < ARRAY_SIZE(ausDspBases); i++) { if (mwave_3780i_io == ausDspBases[i]) break; } if (i == ARRAY_SIZE(ausDspBases)) { - PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_io address %x. Aborting.\n", mwave_3780i_io); + pr_err("%s: Error: Invalid mwave_3780i_io address %x. Aborting.\n", + __func__, mwave_3780i_io); return bRC; } dspio_index = i; @@ -240,7 +210,8 @@ int smapi_set_DSP_cfg(void) break; } if (i == ARRAY_SIZE(ausDspIrqs)) { - PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_irq %x. Aborting.\n", mwave_3780i_irq); + pr_err("%s: Error: Invalid mwave_3780i_irq %x. Aborting.\n", __func__, + mwave_3780i_irq); return bRC; } } @@ -251,7 +222,8 @@ int smapi_set_DSP_cfg(void) break; } if (i == ARRAY_SIZE(ausUartBases)) { - PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_io address %x. Aborting.\n", mwave_uart_io); + pr_err("%s: Error: Invalid mwave_uart_io address %x. Aborting.\n", __func__, + mwave_uart_io); return bRC; } uartio_index = i; @@ -264,7 +236,8 @@ int smapi_set_DSP_cfg(void) break; } if (i == ARRAY_SIZE(ausUartIrqs)) { - PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_irq %x. Aborting.\n", mwave_uart_irq); + pr_err("%s: Error: Invalid mwave_uart_irq %x. Aborting.\n", __func__, + mwave_uart_irq); return bRC; } } @@ -279,46 +252,15 @@ int smapi_set_DSP_cfg(void) if (usBX & 0x0100) { /* serial port A is present */ if (usCX & 1) { /* serial port is enabled */ if ((usSI & 0xFF) == mwave_uart_irq) { -#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES - PRINTK_ERROR(KERN_ERR_MWAVE - "smapi::smapi_set_DSP_cfg: Serial port A irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq); -#else - PRINTK_3(TRACE_SMAPI, - "smapi::smapi_set_DSP_cfg: Serial port A irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq); -#endif -#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES - PRINTK_1(TRACE_SMAPI, - "smapi::smapi_set_DSP_cfg Disabling conflicting serial port\n"); - bRC = smapi_request(0x1403, 0x0100, 0, usSI, - &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); - if (bRC) goto exit_smapi_request_error; - bRC = smapi_request(0x1402, 0x0000, 0, 0, - &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); - if (bRC) goto exit_smapi_request_error; -#else + pr_err("%s: Serial port A irq %x conflicts with mwave_uart_irq %x\n", + __func__, usSI & 0xFF, mwave_uart_irq); goto exit_conflict; -#endif } else { if ((usSI >> 8) == uartio_index) { -#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES - PRINTK_ERROR(KERN_ERR_MWAVE - "smapi::smapi_set_DSP_cfg: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]); -#else - PRINTK_3(TRACE_SMAPI, - "smapi::smapi_set_DSP_cfg: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]); -#endif -#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES - PRINTK_1(TRACE_SMAPI, - "smapi::smapi_set_DSP_cfg Disabling conflicting serial port A\n"); - bRC = smapi_request (0x1403, 0x0100, 0, usSI, - &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); - if (bRC) goto exit_smapi_request_error; - bRC = smapi_request (0x1402, 0x0000, 0, 0, - &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); - if (bRC) goto exit_smapi_request_error; -#else + pr_err("%s: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n", + __func__, ausUartBases[usSI >> 8], + ausUartBases[uartio_index]); goto exit_conflict; -#endif } } } @@ -332,46 +274,15 @@ int smapi_set_DSP_cfg(void) if (usBX & 0x0100) { /* serial port B is present */ if (usCX & 1) { /* serial port is enabled */ if ((usSI & 0xFF) == mwave_uart_irq) { -#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES - PRINTK_ERROR(KERN_ERR_MWAVE - "smapi::smapi_set_DSP_cfg: Serial port B irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq); -#else - PRINTK_3(TRACE_SMAPI, - "smapi::smapi_set_DSP_cfg: Serial port B irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq); -#endif -#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES - PRINTK_1(TRACE_SMAPI, - "smapi::smapi_set_DSP_cfg Disabling conflicting serial port B\n"); - bRC = smapi_request(0x1405, 0x0100, 0, usSI, - &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); - if (bRC) goto exit_smapi_request_error; - bRC = smapi_request(0x1404, 0x0000, 0, 0, - &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); - if (bRC) goto exit_smapi_request_error; -#else + pr_err("%s: Serial port B irq %x conflicts with mwave_uart_irq %x\n", + __func__, usSI & 0xFF, mwave_uart_irq); goto exit_conflict; -#endif } else { if ((usSI >> 8) == uartio_index) { -#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES - PRINTK_ERROR(KERN_ERR_MWAVE - "smapi::smapi_set_DSP_cfg: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]); -#else - PRINTK_3(TRACE_SMAPI, - "smapi::smapi_set_DSP_cfg: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]); -#endif -#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES - PRINTK_1 (TRACE_SMAPI, - "smapi::smapi_set_DSP_cfg Disabling conflicting serial port B\n"); - bRC = smapi_request (0x1405, 0x0100, 0, usSI, - &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); - if (bRC) goto exit_smapi_request_error; - bRC = smapi_request (0x1404, 0x0000, 0, 0, - &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); - if (bRC) goto exit_smapi_request_error; -#else + pr_err("%s: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n", + __func__, ausUartBases[usSI >> 8], + ausUartBases[uartio_index]); goto exit_conflict; -#endif } } } @@ -387,58 +298,15 @@ int smapi_set_DSP_cfg(void) /* bRC == 0 */ if ((usCX & 0xff) != 0xff) { /* IR port not disabled */ if ((usCX & 0xff) == mwave_uart_irq) { -#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES - PRINTK_ERROR(KERN_ERR_MWAVE - "smapi::smapi_set_DSP_cfg: IR port irq %x conflicts with mwave_uart_irq %x\n", usCX & 0xff, mwave_uart_irq); -#else - PRINTK_3(TRACE_SMAPI, - "smapi::smapi_set_DSP_cfg: IR port irq %x conflicts with mwave_uart_irq %x\n", usCX & 0xff, mwave_uart_irq); -#endif -#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES - PRINTK_1(TRACE_SMAPI, - "smapi::smapi_set_DSP_cfg Disabling conflicting IR port\n"); - bRC = smapi_request(0x1701, 0x0100, 0, 0, - &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); - if (bRC) goto exit_smapi_request_error; - bRC = smapi_request(0x1700, 0, 0, 0, - &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); - if (bRC) goto exit_smapi_request_error; - bRC = smapi_request(0x1705, 0x01ff, 0, usSI, - &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); - if (bRC) goto exit_smapi_request_error; - bRC = smapi_request(0x1704, 0x0000, 0, 0, - &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); - if (bRC) goto exit_smapi_request_error; -#else + pr_err("%s: IR port irq %x conflicts with mwave_uart_irq %x\n", + __func__, usCX & 0xff, mwave_uart_irq); goto exit_conflict; -#endif } else { if ((usSI & 0xff) == uartio_index) { -#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES - PRINTK_ERROR(KERN_ERR_MWAVE - "smapi::smapi_set_DSP_cfg: IR port base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI & 0xff], ausUartBases[uartio_index]); -#else - PRINTK_3(TRACE_SMAPI, - "smapi::smapi_set_DSP_cfg: IR port base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI & 0xff], ausUartBases[uartio_index]); -#endif -#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES - PRINTK_1(TRACE_SMAPI, - "smapi::smapi_set_DSP_cfg Disabling conflicting IR port\n"); - bRC = smapi_request(0x1701, 0x0100, 0, 0, - &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); - if (bRC) goto exit_smapi_request_error; - bRC = smapi_request(0x1700, 0, 0, 0, - &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); - if (bRC) goto exit_smapi_request_error; - bRC = smapi_request(0x1705, 0x01ff, 0, usSI, - &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); - if (bRC) goto exit_smapi_request_error; - bRC = smapi_request(0x1704, 0x0000, 0, 0, - &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); - if (bRC) goto exit_smapi_request_error; -#else + pr_err("%s: IR port base I/O address %x conflicts with mwave uart I/O %x\n", + __func__, ausUartBases[usSI & 0xff], + ausUartBases[uartio_index]); goto exit_conflict; -#endif } } } @@ -482,7 +350,6 @@ int smapi_set_DSP_cfg(void) if (bRC) goto exit_smapi_request_error; /* normal exit: */ - PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg exit\n"); return 0; exit_conflict: @@ -490,64 +357,32 @@ exit_conflict: return -EIO; exit_smapi_request_error: - PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg exit on smapi_request error bRC %x\n", bRC); + pr_err("%s: exit on smapi_request error bRC %x\n", __func__, bRC); return bRC; } int smapi_set_DSP_power_state(bool bOn) { - int bRC; unsigned short usAX, usBX, usCX, usDX, usDI, usSI; unsigned short usPowerFunction; - PRINTK_2(TRACE_SMAPI, "smapi::smapi_set_DSP_power_state entry bOn %x\n", bOn); - usPowerFunction = (bOn) ? 1 : 0; - bRC = smapi_request(0x4901, 0x0000, 0, usPowerFunction, - &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); - - PRINTK_2(TRACE_SMAPI, "smapi::smapi_set_DSP_power_state exit bRC %x\n", bRC); - - return bRC; + return smapi_request(0x4901, 0x0000, 0, usPowerFunction, &usAX, &usBX, &usCX, &usDX, &usDI, + &usSI); } -#if 0 -static int SmapiQuerySystemID(void) -{ - int bRC = -EIO; - unsigned short usAX = 0xffff, usBX = 0xffff, usCX = 0xffff, - usDX = 0xffff, usDI = 0xffff, usSI = 0xffff; - - printk("smapi::SmapiQUerySystemID entry\n"); - bRC = smapi_request(0x0000, 0, 0, 0, - &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); - - if (bRC == 0) { - printk("AX=%x, BX=%x, CX=%x, DX=%x, DI=%x, SI=%x\n", - usAX, usBX, usCX, usDX, usDI, usSI); - } else { - printk("smapi::SmapiQuerySystemID smapi_request error\n"); - } - - return bRC; -} -#endif /* 0 */ - int smapi_init(void) { int retval = -EIO; unsigned short usSmapiID = 0; unsigned long flags; - PRINTK_1(TRACE_SMAPI, "smapi::smapi_init entry\n"); - spin_lock_irqsave(&rtc_lock, flags); usSmapiID = CMOS_READ(0x7C); usSmapiID |= (CMOS_READ(0x7D) << 8); spin_unlock_irqrestore(&rtc_lock, flags); - PRINTK_2(TRACE_SMAPI, "smapi::smapi_init usSmapiID %x\n", usSmapiID); if (usSmapiID == 0x5349) { spin_lock_irqsave(&rtc_lock, flags); @@ -555,16 +390,13 @@ int smapi_init(void) g_usSmapiPort |= (CMOS_READ(0x7F) << 8); spin_unlock_irqrestore(&rtc_lock, flags); if (g_usSmapiPort == 0) { - PRINTK_ERROR("smapi::smapi_init, ERROR unable to read from SMAPI port\n"); + pr_err("%s: ERROR unable to read from SMAPI port\n", __func__); } else { - PRINTK_2(TRACE_SMAPI, - "smapi::smapi_init, exit true g_usSmapiPort %x\n", - g_usSmapiPort); retval = 0; //SmapiQuerySystemID(); } } else { - PRINTK_ERROR("smapi::smapi_init, ERROR invalid usSmapiID\n"); + pr_err("%s: ERROR invalid usSmapiID\n", __func__); retval = -ENXIO; } diff --git a/drivers/char/mwave/smapi.h b/drivers/char/mwave/smapi.h index ebc206b000b9..e605b16ed23c 100644 --- a/drivers/char/mwave/smapi.h +++ b/drivers/char/mwave/smapi.h @@ -49,7 +49,7 @@ #ifndef _LINUX_SMAPI_H #define _LINUX_SMAPI_H -typedef struct { +struct smapi_dsp_settings { int bDSPPresent; int bDSPEnabled; int bModemEnabled; @@ -65,10 +65,10 @@ typedef struct { unsigned short usSndblstIRQ; unsigned short usSndblstDMA; unsigned short usSndblstBaseIO; -} SMAPI_DSP_SETTINGS; +}; int smapi_init(void); -int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings); +int smapi_query_DSP_cfg(struct smapi_dsp_settings *pSettings); int smapi_set_DSP_cfg(void); int smapi_set_DSP_power_state(bool bOn); diff --git a/drivers/char/mwave/tp3780i.c b/drivers/char/mwave/tp3780i.c index 83eaffeb22c8..7363b0f764e0 100644 --- a/drivers/char/mwave/tp3780i.c +++ b/drivers/char/mwave/tp3780i.c @@ -46,6 +46,8 @@ * First release to the public */ +#define pr_fmt(fmt) "tp3780i: " fmt + #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/ptrace.h> @@ -65,16 +67,14 @@ static unsigned short s_ausThinkpadDmaToField[8] = static unsigned short s_numIrqs = 16, s_numDmas = 8; -static void EnableSRAM(THINKPAD_BD_DATA * pBDData) +static void EnableSRAM(struct thinkpad_bd_data *pBDData) { - DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; + struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings; unsigned short usDspBaseIO = pSettings->usDspBaseIO; DSP_GPIO_OUTPUT_DATA_15_8 rGpioOutputData; DSP_GPIO_DRIVER_ENABLE_15_8 rGpioDriverEnable; DSP_GPIO_MODE_15_8 rGpioMode; - PRINTK_1(TRACE_TP3780I, "tp3780i::EnableSRAM, entry\n"); - MKWORD(rGpioMode) = ReadMsaCfg(DSP_GpioModeControl_15_8); rGpioMode.GpioMode10 = 0; WriteMsaCfg(DSP_GpioModeControl_15_8, MKWORD(rGpioMode)); @@ -88,54 +88,31 @@ static void EnableSRAM(THINKPAD_BD_DATA * pBDData) rGpioOutputData.Latch10 = 0; rGpioOutputData.Mask10 = true; WriteMsaCfg(DSP_GpioOutputData_15_8, MKWORD(rGpioOutputData)); - - PRINTK_1(TRACE_TP3780I, "tp3780i::EnableSRAM exit\n"); } static irqreturn_t UartInterrupt(int irq, void *dev_id) { - PRINTK_3(TRACE_TP3780I, - "tp3780i::UartInterrupt entry irq %x dev_id %p\n", irq, dev_id); return IRQ_HANDLED; } static irqreturn_t DspInterrupt(int irq, void *dev_id) { - pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd; - DSP_3780I_CONFIG_SETTINGS *pSettings = &pDrvData->rBDData.rDspSettings; + struct mwave_device_data *pDrvData = &mwave_s_mdd; + struct dsp_3780i_config_settings *pSettings = &pDrvData->rBDData.rDspSettings; unsigned short usDspBaseIO = pSettings->usDspBaseIO; unsigned short usIPCSource = 0, usIsolationMask, usPCNum; - PRINTK_3(TRACE_TP3780I, - "tp3780i::DspInterrupt entry irq %x dev_id %p\n", irq, dev_id); - if (dsp3780I_GetIPCSource(usDspBaseIO, &usIPCSource) == 0) { - PRINTK_2(TRACE_TP3780I, - "tp3780i::DspInterrupt, return from dsp3780i_GetIPCSource, usIPCSource %x\n", - usIPCSource); usIsolationMask = 1; for (usPCNum = 1; usPCNum <= 16; usPCNum++) { if (usIPCSource & usIsolationMask) { usIPCSource &= ~usIsolationMask; - PRINTK_3(TRACE_TP3780I, - "tp3780i::DspInterrupt usPCNum %x usIPCSource %x\n", - usPCNum, usIPCSource); if (pDrvData->IPCs[usPCNum - 1].usIntCount == 0) { pDrvData->IPCs[usPCNum - 1].usIntCount = 1; } - PRINTK_2(TRACE_TP3780I, - "tp3780i::DspInterrupt usIntCount %x\n", - pDrvData->IPCs[usPCNum - 1].usIntCount); if (pDrvData->IPCs[usPCNum - 1].bIsEnabled == true) { - PRINTK_2(TRACE_TP3780I, - "tp3780i::DspInterrupt, waking up usPCNum %x\n", - usPCNum - 1); wake_up_interruptible(&pDrvData->IPCs[usPCNum - 1].ipc_wait_queue); - } else { - PRINTK_2(TRACE_TP3780I, - "tp3780i::DspInterrupt, no one waiting for IPC %x\n", - usPCNum - 1); } } if (usIPCSource == 0) @@ -143,56 +120,42 @@ static irqreturn_t DspInterrupt(int irq, void *dev_id) /* try next IPC */ usIsolationMask = usIsolationMask << 1; } - } else { - PRINTK_1(TRACE_TP3780I, - "tp3780i::DspInterrupt, return false from dsp3780i_GetIPCSource\n"); } - PRINTK_1(TRACE_TP3780I, "tp3780i::DspInterrupt exit\n"); return IRQ_HANDLED; } -int tp3780I_InitializeBoardData(THINKPAD_BD_DATA * pBDData) +int tp3780I_InitializeBoardData(struct thinkpad_bd_data *pBDData) { int retval = 0; - DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; - - - PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_InitializeBoardData entry pBDData %p\n", pBDData); + struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings; pBDData->bDSPEnabled = false; pSettings->bInterruptClaimed = false; retval = smapi_init(); if (retval) { - PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_InitializeBoardData: Error: SMAPI is not available on this machine\n"); + pr_err("%s: Error: SMAPI is not available on this machine\n", __func__); } else { if (mwave_3780i_irq || mwave_3780i_io || mwave_uart_irq || mwave_uart_io) { retval = smapi_set_DSP_cfg(); } } - PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_InitializeBoardData exit retval %x\n", retval); - return retval; } -void tp3780I_Cleanup(THINKPAD_BD_DATA *pBDData) +void tp3780I_Cleanup(struct thinkpad_bd_data *pBDData) { - PRINTK_2(TRACE_TP3780I, - "tp3780i::tp3780I_Cleanup entry and exit pBDData %p\n", pBDData); } -int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData) +int tp3780I_CalcResources(struct thinkpad_bd_data *pBDData) { - SMAPI_DSP_SETTINGS rSmapiInfo; - DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; - - PRINTK_2(TRACE_TP3780I, - "tp3780i::tp3780I_CalcResources entry pBDData %p\n", pBDData); + struct smapi_dsp_settings rSmapiInfo; + struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings; if (smapi_query_DSP_cfg(&rSmapiInfo)) { - PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_CalcResources: Error: Could not query DSP config. Aborting.\n"); + pr_err("%s: Error: Could not query DSP config. Aborting.\n", __func__); return -EIO; } @@ -203,7 +166,7 @@ int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData) || ( rSmapiInfo.usUartIRQ == 0 ) || ( rSmapiInfo.usUartBaseIO == 0 ) ) { - PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_CalcResources: Error: Illegal resource setting. Aborting.\n"); + pr_err("%s: Error: Illegal resource setting. Aborting.\n", __func__); return -EIO; } @@ -225,41 +188,31 @@ int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData) pBDData->bShareDspIrq = pBDData->bShareUartIrq = 0; } - PRINTK_1(TRACE_TP3780I, "tp3780i::tp3780I_CalcResources exit\n"); - return 0; } -int tp3780I_ClaimResources(THINKPAD_BD_DATA * pBDData) +int tp3780I_ClaimResources(struct thinkpad_bd_data *pBDData) { int retval = 0; - DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; + struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings; struct resource *pres; - PRINTK_2(TRACE_TP3780I, - "tp3780i::tp3780I_ClaimResources entry pBDData %p\n", pBDData); - pres = request_region(pSettings->usDspBaseIO, 16, "mwave_3780i"); if ( pres == NULL ) retval = -EIO; if (retval) { - PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_ClaimResources: Error: Could not claim I/O region starting at %x\n", pSettings->usDspBaseIO); - retval = -EIO; + pr_err("%s: Error: Could not claim I/O region starting at %x\n", __func__, + pSettings->usDspBaseIO); + return -EIO; } - PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ClaimResources exit retval %x\n", retval); - return retval; } -int tp3780I_ReleaseResources(THINKPAD_BD_DATA * pBDData) +int tp3780I_ReleaseResources(struct thinkpad_bd_data *pBDData) { - int retval = 0; - DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; - - PRINTK_2(TRACE_TP3780I, - "tp3780i::tp3780I_ReleaseResources entry pBDData %p\n", pBDData); + struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings; release_region(pSettings->usDspBaseIO & (~3), 16); @@ -268,28 +221,23 @@ int tp3780I_ReleaseResources(THINKPAD_BD_DATA * pBDData) pSettings->bInterruptClaimed = false; } - PRINTK_2(TRACE_TP3780I, - "tp3780i::tp3780I_ReleaseResources exit retval %x\n", retval); - - return retval; + return 0; } -int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData) +int tp3780I_EnableDSP(struct thinkpad_bd_data *pBDData) { - DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; + struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings; bool bDSPPoweredUp = false, bInterruptAllocated = false; - PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_EnableDSP entry pBDData %p\n", pBDData); - if (pBDData->bDSPEnabled) { - PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: DSP already enabled!\n"); + pr_err("%s: Error: DSP already enabled!\n", __func__); goto exit_cleanup; } if (!pSettings->bDSPEnabled) { - PRINTK_ERROR(KERN_ERR_MWAVE "tp3780::tp3780I_EnableDSP: Error: pSettings->bDSPEnabled not set\n"); + pr_err("%s: Error: pSettings->bDSPEnabled not set\n", __func__); goto exit_cleanup; } @@ -299,7 +247,7 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData) || (s_ausThinkpadIrqToField[pSettings->usDspIrq] == 0xFFFF) || (s_ausThinkpadDmaToField[pSettings->usDspDma] == 0xFFFF) ) { - PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: invalid irq %x\n", pSettings->usDspIrq); + pr_err("%s: Error: invalid irq %x\n", __func__, pSettings->usDspIrq); goto exit_cleanup; } @@ -307,7 +255,8 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData) ((pSettings->usDspBaseIO & 0xF00F) != 0) || (pSettings->usDspBaseIO & 0x0FF0) == 0 ) { - PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Invalid DSP base I/O address %x\n", pSettings->usDspBaseIO); + pr_err("%s: Error: Invalid DSP base I/O address %x\n", __func__, + pSettings->usDspBaseIO); goto exit_cleanup; } @@ -316,7 +265,7 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData) pSettings->usUartIrq >= s_numIrqs || s_ausThinkpadIrqToField[pSettings->usUartIrq] == 0xFFFF ) { - PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Invalid UART IRQ %x\n", pSettings->usUartIrq); + pr_err("%s: Error: Invalid UART IRQ %x\n", __func__, pSettings->usUartIrq); goto exit_cleanup; } switch (pSettings->usUartBaseIO) { @@ -327,7 +276,8 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData) break; default: - PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: Invalid UART base I/O address %x\n", pSettings->usUartBaseIO); + pr_err("%s: Error: Invalid UART base I/O address %x\n", __func__, + pSettings->usUartBaseIO); goto exit_cleanup; } } @@ -356,33 +306,30 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData) pSettings->usChipletEnable = TP_CFG_ChipletEnable; if (request_irq(pSettings->usUartIrq, &UartInterrupt, 0, "mwave_uart", NULL)) { - PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Could not get UART IRQ %x\n", pSettings->usUartIrq); + pr_err("%s: Error: Could not get UART IRQ %x\n", __func__, pSettings->usUartIrq); goto exit_cleanup; } else { /* no conflict just release */ free_irq(pSettings->usUartIrq, NULL); } if (request_irq(pSettings->usDspIrq, &DspInterrupt, 0, "mwave_3780i", NULL)) { - PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: Could not get 3780i IRQ %x\n", pSettings->usDspIrq); + pr_err("%s: Error: Could not get 3780i IRQ %x\n", __func__, pSettings->usDspIrq); goto exit_cleanup; } else { - PRINTK_3(TRACE_TP3780I, - "tp3780i::tp3780I_EnableDSP, got interrupt %x bShareDspIrq %x\n", - pSettings->usDspIrq, pBDData->bShareDspIrq); bInterruptAllocated = true; pSettings->bInterruptClaimed = true; } smapi_set_DSP_power_state(false); if (smapi_set_DSP_power_state(true)) { - PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: smapi_set_DSP_power_state(true) failed\n"); + pr_err("%s: Error: smapi_set_DSP_power_state(true) failed\n", __func__); goto exit_cleanup; } else { bDSPPoweredUp = true; } if (dsp3780I_EnableDSP(pSettings, s_ausThinkpadIrqToField, s_ausThinkpadDmaToField)) { - PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: dsp7880I_EnableDSP() failed\n"); + pr_err("%s: Error: dsp7880I_EnableDSP() failed\n", __func__); goto exit_cleanup; } @@ -390,12 +337,10 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData) pBDData->bDSPEnabled = true; - PRINTK_1(TRACE_TP3780I, "tp3780i::tp3780I_EnableDSP exit\n"); - return 0; exit_cleanup: - PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Cleaning up\n"); + pr_err("%s: Cleaning up\n", __func__); if (bDSPPoweredUp) smapi_set_DSP_power_state(false); if (bInterruptAllocated) { @@ -406,12 +351,9 @@ exit_cleanup: } -int tp3780I_DisableDSP(THINKPAD_BD_DATA * pBDData) +int tp3780I_DisableDSP(struct thinkpad_bd_data *pBDData) { - int retval = 0; - DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; - - PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_DisableDSP entry pBDData %p\n", pBDData); + struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings; if (pBDData->bDSPEnabled) { dsp3780I_DisableDSP(&pBDData->rDspSettings); @@ -423,56 +365,38 @@ int tp3780I_DisableDSP(THINKPAD_BD_DATA * pBDData) pBDData->bDSPEnabled = false; } - PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_DisableDSP exit retval %x\n", retval); - - return retval; + return 0; } -int tp3780I_ResetDSP(THINKPAD_BD_DATA * pBDData) +int tp3780I_ResetDSP(struct thinkpad_bd_data *pBDData) { - int retval = 0; - DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; - - PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ResetDSP entry pBDData %p\n", - pBDData); + struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings; if (dsp3780I_Reset(pSettings) == 0) { EnableSRAM(pBDData); - } else { - retval = -EIO; + return 0; } - - PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ResetDSP exit retval %x\n", retval); - - return retval; + return -EIO; } -int tp3780I_StartDSP(THINKPAD_BD_DATA * pBDData) +int tp3780I_StartDSP(struct thinkpad_bd_data *pBDData) { - int retval = 0; - DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; - - PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_StartDSP entry pBDData %p\n", pBDData); + struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings; if (dsp3780I_Run(pSettings) == 0) { // @BUG @TBD EnableSRAM(pBDData); } else { - retval = -EIO; + return -EIO; } - PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_StartDSP exit retval %x\n", retval); - - return retval; + return 0; } -int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities) +int tp3780I_QueryAbilities(struct thinkpad_bd_data *pBDData, struct mw_abilities *pAbilities) { - PRINTK_2(TRACE_TP3780I, - "tp3780i::tp3780I_QueryAbilities entry pBDData %p\n", pBDData); - memset(pAbilities, 0, sizeof(*pAbilities)); /* fill out standard constant fields */ pAbilities->instr_per_sec = pBDData->rDspSettings.uIps; @@ -497,25 +421,17 @@ int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities memcpy(pAbilities->bios_task_name, TP_ABILITIES_BIOSTASK_NAME, sizeof(TP_ABILITIES_BIOSTASK_NAME)); - PRINTK_1(TRACE_TP3780I, - "tp3780i::tp3780I_QueryAbilities exit retval=SUCCESSFUL\n"); - return 0; } -int tp3780I_ReadWriteDspDStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode, +int tp3780I_ReadWriteDspDStore(struct thinkpad_bd_data *pBDData, unsigned int uOpcode, void __user *pvBuffer, unsigned int uCount, unsigned long ulDSPAddr) { - int retval = 0; - DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; + struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings; unsigned short usDspBaseIO = pSettings->usDspBaseIO; bool bRC = 0; - PRINTK_6(TRACE_TP3780I, - "tp3780i::tp3780I_ReadWriteDspDStore entry pBDData %p, uOpcode %x, pvBuffer %p, uCount %x, ulDSPAddr %lx\n", - pBDData, uOpcode, pvBuffer, uCount, ulDSPAddr); - if (pBDData->bDSPEnabled) { switch (uOpcode) { case IOCTL_MW_READ_DATA: @@ -532,26 +448,18 @@ int tp3780I_ReadWriteDspDStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode, } } - retval = (bRC) ? -EIO : 0; - PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ReadWriteDspDStore exit retval %x\n", retval); - - return retval; + return bRC ? -EIO : 0; } -int tp3780I_ReadWriteDspIStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode, +int tp3780I_ReadWriteDspIStore(struct thinkpad_bd_data *pBDData, unsigned int uOpcode, void __user *pvBuffer, unsigned int uCount, unsigned long ulDSPAddr) { - int retval = 0; - DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; + struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings; unsigned short usDspBaseIO = pSettings->usDspBaseIO; bool bRC = 0; - PRINTK_6(TRACE_TP3780I, - "tp3780i::tp3780I_ReadWriteDspIStore entry pBDData %p, uOpcode %x, pvBuffer %p, uCount %x, ulDSPAddr %lx\n", - pBDData, uOpcode, pvBuffer, uCount, ulDSPAddr); - if (pBDData->bDSPEnabled) { switch (uOpcode) { case IOCTL_MW_READ_INST: @@ -564,11 +472,6 @@ int tp3780I_ReadWriteDspIStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode, } } - retval = (bRC) ? -EIO : 0; - - PRINTK_2(TRACE_TP3780I, - "tp3780i::tp3780I_ReadWriteDspIStore exit retval %x\n", retval); - - return retval; + return bRC ? -EIO : 0; } diff --git a/drivers/char/mwave/tp3780i.h b/drivers/char/mwave/tp3780i.h index 8bd976d42fae..c0001a344741 100644 --- a/drivers/char/mwave/tp3780i.h +++ b/drivers/char/mwave/tp3780i.h @@ -75,27 +75,27 @@ #define TP_CFG_PllBypass 0 /* don't bypass */ #define TP_CFG_ChipletEnable 0xFFFF /* Enable all chiplets */ -typedef struct { +struct thinkpad_bd_data { int bDSPEnabled; int bShareDspIrq; int bShareUartIrq; - DSP_3780I_CONFIG_SETTINGS rDspSettings; -} THINKPAD_BD_DATA; + struct dsp_3780i_config_settings rDspSettings; +}; -int tp3780I_InitializeBoardData(THINKPAD_BD_DATA * pBDData); -int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData); -int tp3780I_ClaimResources(THINKPAD_BD_DATA * pBDData); -int tp3780I_ReleaseResources(THINKPAD_BD_DATA * pBDData); -int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData); -int tp3780I_DisableDSP(THINKPAD_BD_DATA * pBDData); -int tp3780I_ResetDSP(THINKPAD_BD_DATA * pBDData); -int tp3780I_StartDSP(THINKPAD_BD_DATA * pBDData); -int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities); -void tp3780I_Cleanup(THINKPAD_BD_DATA *pBDData); -int tp3780I_ReadWriteDspDStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode, +int tp3780I_InitializeBoardData(struct thinkpad_bd_data *pBDData); +int tp3780I_CalcResources(struct thinkpad_bd_data *pBDData); +int tp3780I_ClaimResources(struct thinkpad_bd_data *pBDData); +int tp3780I_ReleaseResources(struct thinkpad_bd_data *pBDData); +int tp3780I_EnableDSP(struct thinkpad_bd_data *pBDData); +int tp3780I_DisableDSP(struct thinkpad_bd_data *pBDData); +int tp3780I_ResetDSP(struct thinkpad_bd_data *pBDData); +int tp3780I_StartDSP(struct thinkpad_bd_data *pBDData); +int tp3780I_QueryAbilities(struct thinkpad_bd_data *pBDData, struct mw_abilities *pAbilities); +void tp3780I_Cleanup(struct thinkpad_bd_data *pBDData); +int tp3780I_ReadWriteDspDStore(struct thinkpad_bd_data *pBDData, unsigned int uOpcode, void __user *pvBuffer, unsigned int uCount, unsigned long ulDSPAddr); -int tp3780I_ReadWriteDspIStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode, +int tp3780I_ReadWriteDspIStore(struct thinkpad_bd_data *pBDData, unsigned int uOpcode, void __user *pvBuffer, unsigned int uCount, unsigned long ulDSPAddr); diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c index e9f694b36871..9eff426a9286 100644 --- a/drivers/char/nvram.c +++ b/drivers/char/nvram.c @@ -540,6 +540,7 @@ static void __exit nvram_module_exit(void) module_init(nvram_module_init); module_exit(nvram_module_exit); +MODULE_DESCRIPTION("CMOS/NV-RAM driver for Linux"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(NVRAM_MINOR); MODULE_ALIAS("devname:nvram"); diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c index ea378c0ed549..92cee5717237 100644 --- a/drivers/char/nwbutton.c +++ b/drivers/char/nwbutton.c @@ -241,6 +241,7 @@ static void __exit nwbutton_exit (void) MODULE_AUTHOR("Alex Holden"); +MODULE_DESCRIPTION("NetWinder button driver"); MODULE_LICENSE("GPL"); module_init(nwbutton_init); diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c index 0973c2c2b01a..9f52f0306ef7 100644 --- a/drivers/char/nwflash.c +++ b/drivers/char/nwflash.c @@ -618,6 +618,7 @@ static void __exit nwflash_exit(void) iounmap((void *)FLASH_BASE); } +MODULE_DESCRIPTION("NetWinder flash memory driver"); MODULE_LICENSE("GPL"); module_param(flashdebug, bool, 0644); diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c index c39a836ebd15..5f4696813cea 100644 --- a/drivers/char/pc8736x_gpio.c +++ b/drivers/char/pc8736x_gpio.c @@ -235,7 +235,6 @@ static const struct file_operations pc8736x_gpio_fileops = { .open = pc8736x_gpio_open, .write = nsc_gpio_write, .read = nsc_gpio_read, - .llseek = no_llseek, }; static void __init pc8736x_init_shadow(void) diff --git a/drivers/char/powernv-op-panel.c b/drivers/char/powernv-op-panel.c index 3c99696b145e..53467b0a6187 100644 --- a/drivers/char/powernv-op-panel.c +++ b/drivers/char/powernv-op-panel.c @@ -195,12 +195,11 @@ free_oppanel_data: return rc; } -static int oppanel_remove(struct platform_device *pdev) +static void oppanel_remove(struct platform_device *pdev) { misc_deregister(&oppanel_dev); kfree(oppanel_lines); kfree(oppanel_data); - return 0; } static const struct of_device_id oppanel_match[] = { diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index 4c188e9e477c..d1dfbd8d4d42 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c @@ -296,28 +296,35 @@ static int register_device(int minor, struct pp_struct *pp) if (!port) { pr_warn("%s: no associated port!\n", name); rc = -ENXIO; - goto err; + goto err_free_name; + } + + index = ida_alloc(&ida_index, GFP_KERNEL); + if (index < 0) { + pr_warn("%s: failed to get index!\n", name); + rc = index; + goto err_put_port; } - index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL); memset(&ppdev_cb, 0, sizeof(ppdev_cb)); ppdev_cb.irq_func = pp_irq; ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; ppdev_cb.private = pp; pdev = parport_register_dev_model(port, name, &ppdev_cb, index); - parport_put_port(port); if (!pdev) { pr_warn("%s: failed to register device!\n", name); rc = -ENXIO; - ida_simple_remove(&ida_index, index); - goto err; + ida_free(&ida_index, index); + goto err_put_port; } pp->pdev = pdev; pp->index = index; dev_dbg(&pdev->dev, "registered pardevice\n"); -err: +err_put_port: + parport_put_port(port); +err_free_name: kfree(name); return rc; } @@ -750,7 +757,7 @@ static int pp_release(struct inode *inode, struct file *file) if (pp->pdev) { parport_unregister_device(pp->pdev); - ida_simple_remove(&ida_index, pp->index); + ida_free(&ida_index, pp->index); pp->pdev = NULL; pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); } @@ -779,7 +786,6 @@ static const struct class ppdev_class = { static const struct file_operations pp_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .read = pp_read, .write = pp_write, .poll = pp_poll, @@ -832,7 +838,6 @@ static struct parport_driver pp_driver = { .probe = pp_probe, .match_port = pp_attach, .detach = pp_detach, - .devmodel = true, }; static int __init ppdev_init(void) @@ -875,5 +880,6 @@ static void __exit ppdev_cleanup(void) module_init(ppdev_init); module_exit(ppdev_cleanup); +MODULE_DESCRIPTION("Support for user-space parallel port device drivers"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(PP_MAJOR); diff --git a/drivers/char/random.c b/drivers/char/random.c index 3cb37760dfec..bab03c7c4194 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) /* - * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * Copyright (C) 2017-2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved. * @@ -56,6 +56,11 @@ #include <linux/sched/isolation.h> #include <crypto/chacha.h> #include <crypto/blake2s.h> +#ifdef CONFIG_VDSO_GETRANDOM +#include <vdso/getrandom.h> +#include <vdso/datapage.h> +#include <vdso/vsyscall.h> +#endif #include <asm/archrandom.h> #include <asm/processor.h> #include <asm/irq.h> @@ -254,8 +259,8 @@ static void crng_reseed(struct work_struct *work) u8 key[CHACHA_KEY_SIZE]; /* Immediately schedule the next reseeding, so that it fires sooner rather than later. */ - if (likely(system_unbound_wq)) - queue_delayed_work(system_unbound_wq, &next_reseed, crng_reseed_interval()); + if (likely(system_dfl_wq)) + queue_delayed_work(system_dfl_wq, &next_reseed, crng_reseed_interval()); extract_entropy(key, sizeof(key)); @@ -271,6 +276,22 @@ static void crng_reseed(struct work_struct *work) if (next_gen == ULONG_MAX) ++next_gen; WRITE_ONCE(base_crng.generation, next_gen); +#ifdef CONFIG_VDSO_GETRANDOM + /* base_crng.generation's invalid value is ULONG_MAX, while + * vdso_k_rng_data->generation's invalid value is 0, so add one to the + * former to arrive at the latter. Use smp_store_release so that this + * is ordered with the write above to base_crng.generation. Pairs with + * the smp_rmb() before the syscall in the vDSO code. + * + * Cast to unsigned long for 32-bit architectures, since atomic 64-bit + * operations are not supported on those architectures. This is safe + * because base_crng.generation is a 32-bit value. On big-endian + * architectures it will be stored in the upper 32 bits, but that's okay + * because the vDSO side only checks whether the value changed, without + * actually using or interpreting the value. + */ + smp_store_release((unsigned long *)&vdso_k_rng_data->generation, next_gen + 1); +#endif if (!static_branch_likely(&crng_is_ready)) crng_init = CRNG_READY; spin_unlock_irqrestore(&base_crng.lock, flags); @@ -288,11 +309,11 @@ static void crng_reseed(struct work_struct *work) * key value, at index 4, so the state should always be zeroed out * immediately after using in order to maintain forward secrecy. * If the state cannot be erased in a timely manner, then it is - * safer to set the random_data parameter to &chacha_state[4] so - * that this function overwrites it before returning. + * safer to set the random_data parameter to &chacha_state->x[4] + * so that this function overwrites it before returning. */ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], - u32 chacha_state[CHACHA_STATE_WORDS], + struct chacha_state *chacha_state, u8 *random_data, size_t random_data_len) { u8 first_block[CHACHA_BLOCK_SIZE]; @@ -300,8 +321,8 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], BUG_ON(random_data_len > 32); chacha_init_consts(chacha_state); - memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE); - memset(&chacha_state[12], 0, sizeof(u32) * 4); + memcpy(&chacha_state->x[4], key, CHACHA_KEY_SIZE); + memset(&chacha_state->x[12], 0, sizeof(u32) * 4); chacha20_block(chacha_state, first_block); memcpy(key, first_block, CHACHA_KEY_SIZE); @@ -314,7 +335,7 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], * random data. It also returns up to 32 bytes on its own of random data * that may be used; random_data_len may not be greater than 32. */ -static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], +static void crng_make_state(struct chacha_state *chacha_state, u8 *random_data, size_t random_data_len) { unsigned long flags; @@ -374,7 +395,7 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], static void _get_random_bytes(void *buf, size_t len) { - u32 chacha_state[CHACHA_STATE_WORDS]; + struct chacha_state chacha_state; u8 tmp[CHACHA_BLOCK_SIZE]; size_t first_block_len; @@ -382,31 +403,31 @@ static void _get_random_bytes(void *buf, size_t len) return; first_block_len = min_t(size_t, 32, len); - crng_make_state(chacha_state, buf, first_block_len); + crng_make_state(&chacha_state, buf, first_block_len); len -= first_block_len; buf += first_block_len; while (len) { if (len < CHACHA_BLOCK_SIZE) { - chacha20_block(chacha_state, tmp); + chacha20_block(&chacha_state, tmp); memcpy(buf, tmp, len); memzero_explicit(tmp, sizeof(tmp)); break; } - chacha20_block(chacha_state, buf); - if (unlikely(chacha_state[12] == 0)) - ++chacha_state[13]; + chacha20_block(&chacha_state, buf); + if (unlikely(chacha_state.x[12] == 0)) + ++chacha_state.x[13]; len -= CHACHA_BLOCK_SIZE; buf += CHACHA_BLOCK_SIZE; } - memzero_explicit(chacha_state, sizeof(chacha_state)); + chacha_zeroize_state(&chacha_state); } /* * This returns random bytes in arbitrary quantities. The quality of the - * random bytes is good as /dev/urandom. In order to ensure that the + * random bytes is as good as /dev/urandom. In order to ensure that the * randomness provided by this function is okay, the function * wait_for_random_bytes() should be called and return 0 at least once * at any point prior. @@ -420,7 +441,7 @@ EXPORT_SYMBOL(get_random_bytes); static ssize_t get_random_bytes_user(struct iov_iter *iter) { - u32 chacha_state[CHACHA_STATE_WORDS]; + struct chacha_state chacha_state; u8 block[CHACHA_BLOCK_SIZE]; size_t ret = 0, copied; @@ -432,21 +453,22 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter) * bytes, in case userspace causes copy_to_iter() below to sleep * forever, so that we still retain forward secrecy in that case. */ - crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE); + crng_make_state(&chacha_state, (u8 *)&chacha_state.x[4], + CHACHA_KEY_SIZE); /* * However, if we're doing a read of len <= 32, we don't need to * use chacha_state after, so we can simply return those bytes to * the user directly. */ if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) { - ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter); + ret = copy_to_iter(&chacha_state.x[4], CHACHA_KEY_SIZE, iter); goto out_zero_chacha; } for (;;) { - chacha20_block(chacha_state, block); - if (unlikely(chacha_state[12] == 0)) - ++chacha_state[13]; + chacha20_block(&chacha_state, block); + if (unlikely(chacha_state.x[12] == 0)) + ++chacha_state.x[13]; copied = copy_to_iter(block, sizeof(block), iter); ret += copied; @@ -463,13 +485,13 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter) memzero_explicit(block, sizeof(block)); out_zero_chacha: - memzero_explicit(chacha_state, sizeof(chacha_state)); + chacha_zeroize_state(&chacha_state); return ret ? ret : -EFAULT; } /* * Batched entropy returns random integers. The quality of the random - * number is good as /dev/urandom. In order to ensure that the randomness + * number is as good as /dev/urandom. In order to ensure that the randomness * provided by this function is okay, the function wait_for_random_bytes() * should be called and return 0 at least once at any point prior. */ @@ -614,7 +636,7 @@ enum { }; static struct { - struct blake2s_state hash; + struct blake2s_ctx hash; spinlock_t lock; unsigned int init_bits; } input_pool = { @@ -679,7 +701,7 @@ static void extract_entropy(void *buf, size_t len) /* next_key = HASHPRF(seed, RDSEED || 0) */ block.counter = 0; - blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed)); + blake2s(seed, sizeof(seed), (const u8 *)&block, sizeof(block), next_key, sizeof(next_key)); blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key)); spin_unlock_irqrestore(&input_pool.lock, flags); @@ -689,7 +711,7 @@ static void extract_entropy(void *buf, size_t len) i = min_t(size_t, len, BLAKE2S_HASH_SIZE); /* output = HASHPRF(seed, RDSEED || ++counter) */ ++block.counter; - blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed)); + blake2s(seed, sizeof(seed), (const u8 *)&block, sizeof(block), buf, i); len -= i; buf += i; } @@ -702,9 +724,10 @@ static void extract_entropy(void *buf, size_t len) static void __cold _credit_init_bits(size_t bits) { - static struct execute_work set_ready; + static DECLARE_WORK(set_ready, crng_set_ready); unsigned int new, orig, add; unsigned long flags; + int m; if (!bits) return; @@ -718,15 +741,18 @@ static void __cold _credit_init_bits(size_t bits) if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) { crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */ - if (static_key_initialized) - execute_in_process_context(crng_set_ready, &set_ready); + if (system_dfl_wq) + queue_work(system_dfl_wq, &set_ready); atomic_notifier_call_chain(&random_ready_notifier, 0, NULL); +#ifdef CONFIG_VDSO_GETRANDOM + WRITE_ONCE(vdso_k_rng_data->is_ready, true); +#endif wake_up_interruptible(&crng_init_wait); kill_fasync(&fasync, SIGIO, POLL_IN); pr_notice("crng init done\n"); - if (urandom_warning.missed) - pr_notice("%d urandom warning(s) missed due to ratelimiting\n", - urandom_warning.missed); + m = ratelimit_state_get_miss(&urandom_warning); + if (m) + pr_notice("%d urandom warning(s) missed due to ratelimiting\n", m); } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) { spin_lock_irqsave(&base_crng.lock, flags); /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */ @@ -768,7 +794,7 @@ static void __cold _credit_init_bits(size_t bits) * * add_bootloader_randomness() is called by bootloader drivers, such as EFI * and device tree, and credits its input depending on whether or not the - * command line option 'random.trust_bootloader'. + * command line option 'random.trust_bootloader' is set. * * add_vmfork_randomness() adds a unique (but not necessarily secret) ID * representing the current instance of a VM to the pool, without crediting, @@ -889,9 +915,8 @@ void __init random_init(void) add_latent_entropy(); /* - * If we were initialized by the cpu or bootloader before jump labels - * are initialized, then we should enable the static branch here, where - * it's guaranteed that jump labels have been initialized. + * If we were initialized by the cpu or bootloader before workqueues + * are initialized, then we should enable the static branch here. */ if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY) crng_set_ready(NULL); @@ -1270,6 +1295,7 @@ static void __cold try_to_generate_entropy(void) struct entropy_timer_state *stack = PTR_ALIGN((void *)stack_bytes, SMP_CACHE_BYTES); unsigned int i, num_different = 0; unsigned long last = random_get_entropy(); + cpumask_var_t timer_cpus; int cpu = -1; for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) { @@ -1284,13 +1310,15 @@ static void __cold try_to_generate_entropy(void) atomic_set(&stack->samples, 0); timer_setup_on_stack(&stack->timer, entropy_timer, 0); + if (!alloc_cpumask_var(&timer_cpus, GFP_KERNEL)) + goto out; + while (!crng_ready() && !signal_pending(current)) { /* * Check !timer_pending() and then ensure that any previous callback has finished - * executing by checking try_to_del_timer_sync(), before queueing the next one. + * executing by checking timer_delete_sync_try(), before queueing the next one. */ - if (!timer_pending(&stack->timer) && try_to_del_timer_sync(&stack->timer) >= 0) { - struct cpumask timer_cpus; + if (!timer_pending(&stack->timer) && timer_delete_sync_try(&stack->timer) >= 0) { unsigned int num_cpus; /* @@ -1300,19 +1328,19 @@ static void __cold try_to_generate_entropy(void) preempt_disable(); /* Only schedule callbacks on timer CPUs that are online. */ - cpumask_and(&timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask); - num_cpus = cpumask_weight(&timer_cpus); + cpumask_and(timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask); + num_cpus = cpumask_weight(timer_cpus); /* In very bizarre case of misconfiguration, fallback to all online. */ if (unlikely(num_cpus == 0)) { - timer_cpus = *cpu_online_mask; - num_cpus = cpumask_weight(&timer_cpus); + *timer_cpus = *cpu_online_mask; + num_cpus = cpumask_weight(timer_cpus); } /* Basic CPU round-robin, which avoids the current CPU. */ do { - cpu = cpumask_next(cpu, &timer_cpus); + cpu = cpumask_next(cpu, timer_cpus); if (cpu >= nr_cpu_ids) - cpu = cpumask_first(&timer_cpus); + cpu = cpumask_first(timer_cpus); } while (cpu == smp_processor_id() && num_cpus > 1); /* Expiring the timer at `jiffies` means it's the next tick. */ @@ -1328,8 +1356,10 @@ static void __cold try_to_generate_entropy(void) } mix_pool_bytes(&stack->entropy, sizeof(stack->entropy)); - del_timer_sync(&stack->timer); - destroy_timer_on_stack(&stack->timer); + free_cpumask_var(timer_cpus); +out: + timer_delete_sync(&stack->timer); + timer_destroy_on_stack(&stack->timer); } @@ -1364,7 +1394,6 @@ static void __cold try_to_generate_entropy(void) SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags) { struct iov_iter iter; - struct iovec iov; int ret; if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE)) @@ -1385,7 +1414,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags return ret; } - ret = import_single_range(ITER_DEST, ubuf, len, &iov, &iter); + ret = import_ubuf(ITER_DEST, ubuf, len, &iter); if (unlikely(ret)) return ret; return get_random_bytes_user(&iter); @@ -1443,7 +1472,7 @@ static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter) if (!crng_ready()) { if (!ratelimit_disable && maxwarn <= 0) - ++urandom_warning.missed; + ratelimit_state_inc_miss(&urandom_warning); else if (ratelimit_disable || __ratelimit(&urandom_warning)) { --maxwarn; pr_notice("%s: uninitialized urandom read (%zu bytes read)\n", @@ -1491,7 +1520,6 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) return 0; case RNDADDENTROPY: { struct iov_iter iter; - struct iovec iov; ssize_t ret; int len; @@ -1503,7 +1531,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) return -EINVAL; if (get_user(len, p++)) return -EFAULT; - ret = import_single_range(ITER_SOURCE, p, len, &iov, &iter); + ret = import_ubuf(ITER_SOURCE, p, len, &iter); if (unlikely(ret)) return ret; ret = write_pool_user(&iter); @@ -1606,7 +1634,7 @@ static u8 sysctl_bootid[UUID_SIZE]; * UUID. The difference is in whether table->data is NULL; if it is, * then a new UUID is generated and returned to the user. */ -static int proc_do_uuid(struct ctl_table *table, int write, void *buf, +static int proc_do_uuid(const struct ctl_table *table, int write, void *buf, size_t *lenp, loff_t *ppos) { u8 tmp_uuid[UUID_SIZE], *uuid; @@ -1637,13 +1665,13 @@ static int proc_do_uuid(struct ctl_table *table, int write, void *buf, } /* The same as proc_dointvec, but writes don't change anything. */ -static int proc_do_rointvec(struct ctl_table *table, int write, void *buf, +static int proc_do_rointvec(const struct ctl_table *table, int write, void *buf, size_t *lenp, loff_t *ppos) { return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos); } -static struct ctl_table random_table[] = { +static const struct ctl_table random_table[] = { { .procname = "poolsize", .data = &sysctl_poolsize, @@ -1683,7 +1711,6 @@ static struct ctl_table random_table[] = { .mode = 0444, .proc_handler = proc_do_uuid, }, - { } }; /* diff --git a/drivers/char/scx200_gpio.c b/drivers/char/scx200_gpio.c index 9f701dcba95c..700e6affea6f 100644 --- a/drivers/char/scx200_gpio.c +++ b/drivers/char/scx200_gpio.c @@ -68,7 +68,6 @@ static const struct file_operations scx200_gpio_fileops = { .read = nsc_gpio_read, .open = scx200_gpio_open, .release = scx200_gpio_release, - .llseek = no_llseek, }; static struct cdev scx200_gpio_cdev; /* use 1 cdev for all pins */ diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c index 9211531689b2..677bb5ac950a 100644 --- a/drivers/char/sonypi.c +++ b/drivers/char/sonypi.c @@ -37,6 +37,7 @@ #include <linux/kfifo.h> #include <linux/platform_device.h> #include <linux/gfp.h> +#include <linux/string_choices.h> #include <linux/uaccess.h> #include <asm/io.h> @@ -920,7 +921,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf, if (ret > 0) { struct inode *inode = file_inode(file); - inode->i_atime = current_time(inode); + inode_set_atime_to_ts(inode, current_time(inode)); } return ret; @@ -1054,7 +1055,6 @@ static const struct file_operations sonypi_misc_fops = { .release = sonypi_misc_release, .fasync = sonypi_misc_fasync, .unlocked_ioctl = sonypi_misc_ioctl, - .llseek = no_llseek, }; static struct miscdevice sonypi_misc_device = { @@ -1269,12 +1269,12 @@ static void sonypi_display_info(void) "compat = %s, mask = 0x%08lx, useinput = %s, acpi = %s\n", sonypi_device.model, verbose, - fnkeyinit ? "on" : "off", - camera ? "on" : "off", - compat ? "on" : "off", + str_on_off(fnkeyinit), + str_on_off(camera), + str_on_off(compat), mask, - useinput ? "on" : "off", - SONYPI_ACPI_ACTIVE ? "on" : "off"); + str_on_off(useinput), + str_on_off(SONYPI_ACPI_ACTIVE)); printk(KERN_INFO "sonypi: enabled at irq=%d, port1=0x%x, port2=0x%x\n", sonypi_device.irq, sonypi_device.ioport1, sonypi_device.ioport2); @@ -1408,7 +1408,7 @@ static int sonypi_probe(struct platform_device *dev) return error; } -static int sonypi_remove(struct platform_device *dev) +static void sonypi_remove(struct platform_device *dev) { sonypi_disable(); @@ -1432,8 +1432,6 @@ static int sonypi_remove(struct platform_device *dev) } kfifo_free(&sonypi_device.fifo); - - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c index 896a3550fba9..b381ea7e85d2 100644 --- a/drivers/char/tlclk.c +++ b/drivers/char/tlclk.c @@ -42,11 +42,12 @@ #include <linux/sysfs.h> #include <linux/device.h> #include <linux/miscdevice.h> -#include <linux/platform_device.h> +#include <linux/device/faux.h> #include <asm/io.h> /* inb/outb */ #include <linux/uaccess.h> MODULE_AUTHOR("Sebastien Bouchard <sebastien.bouchard@ca.kontron.com>"); +MODULE_DESCRIPTION("Telecom Clock driver for Intel NetStructure(tm) MPCBL0010"); MODULE_LICENSE("GPL"); /*Hardware Reset of the PLL */ @@ -741,7 +742,7 @@ static ssize_t store_reset (struct device *d, static DEVICE_ATTR(reset, (S_IWUSR|S_IWGRP), NULL, store_reset); -static struct attribute *tlclk_sysfs_entries[] = { +static struct attribute *tlclk_attrs[] = { &dev_attr_current_ref.attr, &dev_attr_telclock_version.attr, &dev_attr_alarms.attr, @@ -765,13 +766,9 @@ static struct attribute *tlclk_sysfs_entries[] = { &dev_attr_reset.attr, NULL }; +ATTRIBUTE_GROUPS(tlclk); -static const struct attribute_group tlclk_attribute_group = { - .name = NULL, /* put in device directory */ - .attrs = tlclk_sysfs_entries, -}; - -static struct platform_device *tlclk_device; +static struct faux_device *tlclk_device; static int __init tlclk_init(void) { @@ -816,24 +813,13 @@ static int __init tlclk_init(void) goto out3; } - tlclk_device = platform_device_register_simple("telco_clock", - -1, NULL, 0); - if (IS_ERR(tlclk_device)) { - printk(KERN_ERR "tlclk: platform_device_register failed.\n"); - ret = PTR_ERR(tlclk_device); + tlclk_device = faux_device_create_with_groups("telco_clock", NULL, NULL, tlclk_groups); + if (!tlclk_device) { + ret = -ENODEV; goto out4; } - ret = sysfs_create_group(&tlclk_device->dev.kobj, - &tlclk_attribute_group); - if (ret) { - printk(KERN_ERR "tlclk: failed to create sysfs device attributes.\n"); - goto out5; - } - return 0; -out5: - platform_device_unregister(tlclk_device); out4: misc_deregister(&tlclk_miscdev); out3: @@ -847,13 +833,12 @@ out1: static void __exit tlclk_cleanup(void) { - sysfs_remove_group(&tlclk_device->dev.kobj, &tlclk_attribute_group); - platform_device_unregister(tlclk_device); + faux_device_destroy(tlclk_device); misc_deregister(&tlclk_miscdev); unregister_chrdev(tlclk_major, "telco_clock"); release_region(TLCLK_BASE, 8); - del_timer_sync(&switchover_timer); + timer_delete_sync(&switchover_timer); kfree(alarm_events); } @@ -871,7 +856,7 @@ static void switchover_timeout(struct timer_list *unused) } /* Alarm processing is done, wake up read task */ - del_timer(&switchover_timer); + timer_delete(&switchover_timer); got_event = 1; wake_up(&wq); } diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index 927088b2c3d3..8a8f692b6088 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -27,6 +27,21 @@ menuconfig TCG_TPM if TCG_TPM +config TCG_TPM2_HMAC + bool "Use HMAC and encrypted transactions on the TPM bus" + default n + select CRYPTO_ECDH + select CRYPTO_LIB_AESCFB + select CRYPTO_LIB_SHA256 + select CRYPTO_LIB_UTILS + help + Setting this causes us to deploy a scheme which uses request + and response HMACs in addition to encryption for + communicating with the TPM to prevent or detect bus snooping + and interposer attacks (see tpm-security.rst). Saying Y + here adds some encryption overhead to all kernel to TPM + transactions. + config HW_RANDOM_TPM bool "TPM HW Random Number Generator support" depends on TCG_TPM && HW_RANDOM && !(TCG_TPM=y && HW_RANDOM=m) @@ -148,7 +163,8 @@ config TCG_NSC config TCG_ATMEL tristate "Atmel TPM Interface" - depends on PPC64 || HAS_IOPORT_MAP + depends on HAS_IOPORT_MAP + depends on HAS_IOPORT help If you have a TPM security chip from Atmel say Yes and it will be accessible from within Linux. To compile this driver @@ -156,7 +172,7 @@ config TCG_ATMEL config TCG_INFINEON tristate "Infineon Technologies TPM Interface" - depends on PNP + depends on PNP || COMPILE_TEST help If you have a TPM security chip from Infineon Technologies (either SLD 9630 TT 1.1 or SLB 9635 TT 1.2) say Yes and it @@ -174,6 +190,15 @@ config TCG_IBMVTPM will be accessible from within Linux. To compile this driver as a module, choose M here; the module will be called tpm_ibmvtpm. +config TCG_LOONGSON + tristate "Loongson TPM Interface" + depends on MFD_LOONGSON_SE + help + If you want to make Loongson TPM support available, say Yes and + it will be accessible from within Linux. To compile this + driver as a module, choose M here; the module will be called + tpm_loongson. + config TCG_XEN tristate "XEN TPM Interface" depends on TCG_TPM && XEN @@ -195,6 +220,15 @@ config TCG_CRB from within Linux. To compile this driver as a module, choose M here; the module will be called tpm_crb. +config TCG_ARM_CRB_FFA + tristate "TPM CRB over Arm FF-A Transport" + depends on ARM_FFA_TRANSPORT && TCG_CRB + default TCG_CRB + help + If the Arm FF-A transport is used to access the TPM say Yes. + To compile this driver as a module, choose M here; the module + will be called tpm_crb_ffa. + config TCG_VTPM_PROXY tristate "VTPM Proxy Interface" depends on TCG_TPM @@ -210,5 +244,15 @@ config TCG_FTPM_TEE help This driver proxies for firmware TPM running in TEE. +config TCG_SVSM + tristate "SNP SVSM vTPM interface" + depends on AMD_MEM_ENCRYPT + help + This is a driver for the AMD SVSM vTPM protocol that a SEV-SNP guest + OS can use to discover and talk to a vTPM emulated by the Secure VM + Service Module (SVSM) in the guest context, but at a more privileged + level (usually VMPL0). To compile this driver as a module, choose M + here; the module will be called tpm_svsm. + source "drivers/char/tpm/st33zp24/Kconfig" endif # TCG_TPM diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile index 0222b1ddb310..5b5cdc0d32e4 100644 --- a/drivers/char/tpm/Makefile +++ b/drivers/char/tpm/Makefile @@ -15,6 +15,8 @@ tpm-y += tpm-sysfs.o tpm-y += eventlog/common.o tpm-y += eventlog/tpm1.o tpm-y += eventlog/tpm2.o +tpm-y += tpm-buf.o +tpm-y += tpm2-sessions.o tpm-$(CONFIG_ACPI) += tpm_ppi.o eventlog/acpi.o tpm-$(CONFIG_EFI) += eventlog/efi.o @@ -40,5 +42,8 @@ obj-$(CONFIG_TCG_IBMVTPM) += tpm_ibmvtpm.o obj-$(CONFIG_TCG_TIS_ST33ZP24) += st33zp24/ obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o obj-$(CONFIG_TCG_CRB) += tpm_crb.o +obj-$(CONFIG_TCG_ARM_CRB_FFA) += tpm_crb_ffa.o obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o +obj-$(CONFIG_TCG_SVSM) += tpm_svsm.o +obj-$(CONFIG_TCG_LOONGSON) += tpm_loongson.o diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c index bd757d836c5c..cf02ec646f46 100644 --- a/drivers/char/tpm/eventlog/acpi.c +++ b/drivers/char/tpm/eventlog/acpi.c @@ -63,6 +63,11 @@ static bool tpm_is_tpm2_log(void *bios_event_log, u64 len) return n == 0; } +static void tpm_bios_log_free(void *data) +{ + kvfree(data); +} + /* read binary bios log */ int tpm_read_log_acpi(struct tpm_chip *chip) { @@ -136,13 +141,12 @@ int tpm_read_log_acpi(struct tpm_chip *chip) } /* malloc EventLog space */ - log->bios_event_log = devm_kmalloc(&chip->dev, len, GFP_KERNEL); + log->bios_event_log = kvmalloc(len, GFP_KERNEL); if (!log->bios_event_log) return -ENOMEM; log->bios_event_log_end = log->bios_event_log + len; - ret = -EIO; virt = acpi_os_map_iomem(start, len); if (!virt) { dev_warn(&chip->dev, "%s: Failed to map ACPI memory\n", __func__); @@ -162,10 +166,16 @@ int tpm_read_log_acpi(struct tpm_chip *chip) goto err; } + ret = devm_add_action(&chip->dev, tpm_bios_log_free, log->bios_event_log); + if (ret) { + log->bios_event_log = NULL; + goto err; + } + return format; err: - devm_kfree(&chip->dev, log->bios_event_log); + tpm_bios_log_free(log->bios_event_log); log->bios_event_log = NULL; return ret; } diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c index 639c3f395a5a..691813d2a5a2 100644 --- a/drivers/char/tpm/eventlog/common.c +++ b/drivers/char/tpm/eventlog/common.c @@ -32,7 +32,7 @@ static int tpm_bios_measurements_open(struct inode *inode, struct tpm_chip *chip; inode_lock(inode); - if (!inode->i_private) { + if (!inode->i_nlink) { inode_unlock(inode); return -ENODEV; } @@ -47,6 +47,8 @@ static int tpm_bios_measurements_open(struct inode *inode, if (!err) { seq = file->private_data; seq->private = chip; + } else { + put_device(&chip->dev); } return err; @@ -103,7 +105,7 @@ static int tpm_read_log(struct tpm_chip *chip) void tpm_bios_log_setup(struct tpm_chip *chip) { const char *name = dev_name(&chip->dev); - unsigned int cnt; + struct dentry *dentry; int log_version; int rc = 0; @@ -115,14 +117,12 @@ void tpm_bios_log_setup(struct tpm_chip *chip) return; log_version = rc; - cnt = 0; - chip->bios_dir[cnt] = securityfs_create_dir(name, NULL); + chip->bios_dir = securityfs_create_dir(name, NULL); /* NOTE: securityfs_create_dir can return ENODEV if securityfs is * compiled out. The caller should ignore the ENODEV return code. */ - if (IS_ERR(chip->bios_dir[cnt])) - goto err; - cnt++; + if (IS_ERR(chip->bios_dir)) + return; chip->bin_log_seqops.chip = chip; if (log_version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) @@ -133,14 +133,13 @@ void tpm_bios_log_setup(struct tpm_chip *chip) &tpm1_binary_b_measurements_seqops; - chip->bios_dir[cnt] = + dentry = securityfs_create_file("binary_bios_measurements", - 0440, chip->bios_dir[0], + 0440, chip->bios_dir, (void *)&chip->bin_log_seqops, &tpm_bios_measurements_ops); - if (IS_ERR(chip->bios_dir[cnt])) + if (IS_ERR(dentry)) goto err; - cnt++; if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { @@ -148,42 +147,23 @@ void tpm_bios_log_setup(struct tpm_chip *chip) chip->ascii_log_seqops.seqops = &tpm1_ascii_b_measurements_seqops; - chip->bios_dir[cnt] = + dentry = securityfs_create_file("ascii_bios_measurements", - 0440, chip->bios_dir[0], + 0440, chip->bios_dir, (void *)&chip->ascii_log_seqops, &tpm_bios_measurements_ops); - if (IS_ERR(chip->bios_dir[cnt])) + if (IS_ERR(dentry)) goto err; - cnt++; } return; err: - chip->bios_dir[cnt] = NULL; tpm_bios_log_teardown(chip); return; } void tpm_bios_log_teardown(struct tpm_chip *chip) { - int i; - struct inode *inode; - - /* securityfs_remove currently doesn't take care of handling sync - * between removal and opening of pseudo files. To handle this, a - * workaround is added by making i_private = NULL here during removal - * and to check it during open(), both within inode_lock()/unlock(). - * This design ensures that open() either safely gets kref or fails. - */ - for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) { - if (chip->bios_dir[i]) { - inode = d_inode(chip->bios_dir[i]); - inode_lock(inode); - inode->i_private = NULL; - inode_unlock(inode); - securityfs_remove(chip->bios_dir[i]); - } - } + securityfs_remove(chip->bios_dir); } diff --git a/drivers/char/tpm/eventlog/of.c b/drivers/char/tpm/eventlog/of.c index 930fe43d5daf..92cec9722ee4 100644 --- a/drivers/char/tpm/eventlog/of.c +++ b/drivers/char/tpm/eventlog/of.c @@ -24,16 +24,10 @@ static int tpm_read_log_memory_region(struct tpm_chip *chip) { - struct device_node *node; struct resource res; int rc; - node = of_parse_phandle(chip->dev.parent->of_node, "memory-region", 0); - if (!node) - return -ENODEV; - - rc = of_address_to_resource(node, 0, &res); - of_node_put(node); + rc = of_reserved_mem_region_to_resource(chip->dev.parent->of_node, 0, &res); if (rc) return rc; diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c index 12ee42a31c71..e7913b2853d5 100644 --- a/drivers/char/tpm/eventlog/tpm1.c +++ b/drivers/char/tpm/eventlog/tpm1.c @@ -257,11 +257,8 @@ static int tpm1_ascii_bios_measurements_show(struct seq_file *m, void *v) (unsigned char *)(v + sizeof(struct tcpa_event)); eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL); - if (!eventname) { - printk(KERN_ERR "%s: ERROR - No Memory for event name\n ", - __func__); - return -EFAULT; - } + if (!eventname) + return -ENOMEM; /* 1st: PCR */ seq_printf(m, "%2d ", do_endian_conversion(event->pcr_index)); diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c index 661574bb0acf..81348487c125 100644 --- a/drivers/char/tpm/st33zp24/i2c.c +++ b/drivers/char/tpm/st33zp24/i2c.c @@ -133,7 +133,7 @@ static void st33zp24_i2c_remove(struct i2c_client *client) } static const struct i2c_device_id st33zp24_i2c_id[] = { - {TPM_ST33_I2C, 0}, + { TPM_ST33_I2C }, {} }; MODULE_DEVICE_TABLE(i2c, st33zp24_i2c_id); @@ -167,7 +167,7 @@ static struct i2c_driver st33zp24_i2c_driver = { module_i2c_driver(st33zp24_i2c_driver); -MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)"); +MODULE_AUTHOR("TPM support <TPMsupport@list.st.com>"); MODULE_DESCRIPTION("STM TPM 1.2 I2C ST33 Driver"); MODULE_VERSION("1.3.0"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c index f5811b301d3b..5149231f3de2 100644 --- a/drivers/char/tpm/st33zp24/spi.c +++ b/drivers/char/tpm/st33zp24/spi.c @@ -284,7 +284,7 @@ static struct spi_driver st33zp24_spi_driver = { module_spi_driver(st33zp24_spi_driver); -MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)"); +MODULE_AUTHOR("TPM support <TPMsupport@list.st.com>"); MODULE_DESCRIPTION("STM TPM 1.2 SPI ST33 Driver"); MODULE_VERSION("1.3.0"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c index a5b554cd4778..2ed7815e4899 100644 --- a/drivers/char/tpm/st33zp24/st33zp24.c +++ b/drivers/char/tpm/st33zp24/st33zp24.c @@ -300,7 +300,7 @@ static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id) * send TPM commands through the I2C bus. */ static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf, - size_t len) + size_t bufsiz, size_t len) { struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); u32 status, i, size, ordinal; @@ -582,7 +582,7 @@ int st33zp24_pm_resume(struct device *dev) EXPORT_SYMBOL(st33zp24_pm_resume); #endif -MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)"); +MODULE_AUTHOR("TPM support <TPMsupport@list.st.com>"); MODULE_DESCRIPTION("ST33ZP24 TPM 1.2 driver"); MODULE_VERSION("1.3.0"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpm-buf.c b/drivers/char/tpm/tpm-buf.c new file mode 100644 index 000000000000..dc882fc9fa9e --- /dev/null +++ b/drivers/char/tpm/tpm-buf.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Handling of TPM command and other buffers. + */ + +#include <linux/tpm_command.h> +#include <linux/module.h> +#include <linux/tpm.h> + +/** + * tpm_buf_init() - Allocate and initialize a TPM command + * @buf: A &tpm_buf + * @tag: TPM_TAG_RQU_COMMAND, TPM2_ST_NO_SESSIONS or TPM2_ST_SESSIONS + * @ordinal: A command ordinal + * + * Return: 0 or -ENOMEM + */ +int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal) +{ + buf->data = (u8 *)__get_free_page(GFP_KERNEL); + if (!buf->data) + return -ENOMEM; + + tpm_buf_reset(buf, tag, ordinal); + return 0; +} +EXPORT_SYMBOL_GPL(tpm_buf_init); + +/** + * tpm_buf_reset() - Initialize a TPM command + * @buf: A &tpm_buf + * @tag: TPM_TAG_RQU_COMMAND, TPM2_ST_NO_SESSIONS or TPM2_ST_SESSIONS + * @ordinal: A command ordinal + */ +void tpm_buf_reset(struct tpm_buf *buf, u16 tag, u32 ordinal) +{ + struct tpm_header *head = (struct tpm_header *)buf->data; + + WARN_ON(tag != TPM_TAG_RQU_COMMAND && tag != TPM2_ST_NO_SESSIONS && + tag != TPM2_ST_SESSIONS && tag != 0); + + buf->flags = 0; + buf->length = sizeof(*head); + head->tag = cpu_to_be16(tag); + head->length = cpu_to_be32(sizeof(*head)); + head->ordinal = cpu_to_be32(ordinal); + buf->handles = 0; +} +EXPORT_SYMBOL_GPL(tpm_buf_reset); + +/** + * tpm_buf_init_sized() - Allocate and initialize a sized (TPM2B) buffer + * @buf: A @tpm_buf + * + * Return: 0 or -ENOMEM + */ +int tpm_buf_init_sized(struct tpm_buf *buf) +{ + buf->data = (u8 *)__get_free_page(GFP_KERNEL); + if (!buf->data) + return -ENOMEM; + + tpm_buf_reset_sized(buf); + return 0; +} +EXPORT_SYMBOL_GPL(tpm_buf_init_sized); + +/** + * tpm_buf_reset_sized() - Initialize a sized buffer + * @buf: A &tpm_buf + */ +void tpm_buf_reset_sized(struct tpm_buf *buf) +{ + buf->flags = TPM_BUF_TPM2B; + buf->length = 2; + buf->data[0] = 0; + buf->data[1] = 0; +} +EXPORT_SYMBOL_GPL(tpm_buf_reset_sized); + +void tpm_buf_destroy(struct tpm_buf *buf) +{ + free_page((unsigned long)buf->data); +} +EXPORT_SYMBOL_GPL(tpm_buf_destroy); + +/** + * tpm_buf_length() - Return the number of bytes consumed by the data + * @buf: A &tpm_buf + * + * Return: The number of bytes consumed by the buffer + */ +u32 tpm_buf_length(struct tpm_buf *buf) +{ + return buf->length; +} +EXPORT_SYMBOL_GPL(tpm_buf_length); + +/** + * tpm_buf_append() - Append data to an initialized buffer + * @buf: A &tpm_buf + * @new_data: A data blob + * @new_length: Size of the appended data + */ +void tpm_buf_append(struct tpm_buf *buf, const u8 *new_data, u16 new_length) +{ + /* Return silently if overflow has already happened. */ + if (buf->flags & TPM_BUF_OVERFLOW) + return; + + if ((buf->length + new_length) > PAGE_SIZE) { + WARN(1, "tpm_buf: write overflow\n"); + buf->flags |= TPM_BUF_OVERFLOW; + return; + } + + memcpy(&buf->data[buf->length], new_data, new_length); + buf->length += new_length; + + if (buf->flags & TPM_BUF_TPM2B) + ((__be16 *)buf->data)[0] = cpu_to_be16(buf->length - 2); + else + ((struct tpm_header *)buf->data)->length = cpu_to_be32(buf->length); +} +EXPORT_SYMBOL_GPL(tpm_buf_append); + +void tpm_buf_append_u8(struct tpm_buf *buf, const u8 value) +{ + tpm_buf_append(buf, &value, 1); +} +EXPORT_SYMBOL_GPL(tpm_buf_append_u8); + +void tpm_buf_append_u16(struct tpm_buf *buf, const u16 value) +{ + __be16 value2 = cpu_to_be16(value); + + tpm_buf_append(buf, (u8 *)&value2, 2); +} +EXPORT_SYMBOL_GPL(tpm_buf_append_u16); + +void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value) +{ + __be32 value2 = cpu_to_be32(value); + + tpm_buf_append(buf, (u8 *)&value2, 4); +} +EXPORT_SYMBOL_GPL(tpm_buf_append_u32); + +/** + * tpm_buf_append_handle() - Add a handle + * @chip: &tpm_chip instance + * @buf: &tpm_buf instance + * @handle: a TPM object handle + * + * Add a handle to the buffer, and increase the count tracking the number of + * handles in the command buffer. Works only for command buffers. + */ +void tpm_buf_append_handle(struct tpm_chip *chip, struct tpm_buf *buf, u32 handle) +{ + if (buf->flags & TPM_BUF_TPM2B) { + dev_err(&chip->dev, "Invalid buffer type (TPM2B)\n"); + return; + } + + tpm_buf_append_u32(buf, handle); + buf->handles++; +} + +/** + * tpm_buf_read() - Read from a TPM buffer + * @buf: &tpm_buf instance + * @offset: offset within the buffer + * @count: the number of bytes to read + * @output: the output buffer + */ +static void tpm_buf_read(struct tpm_buf *buf, off_t *offset, size_t count, void *output) +{ + off_t next_offset; + + /* Return silently if overflow has already happened. */ + if (buf->flags & TPM_BUF_BOUNDARY_ERROR) + return; + + next_offset = *offset + count; + if (next_offset > buf->length) { + WARN(1, "tpm_buf: read out of boundary\n"); + buf->flags |= TPM_BUF_BOUNDARY_ERROR; + return; + } + + memcpy(output, &buf->data[*offset], count); + *offset = next_offset; +} + +/** + * tpm_buf_read_u8() - Read 8-bit word from a TPM buffer + * @buf: &tpm_buf instance + * @offset: offset within the buffer + * + * Return: next 8-bit word + */ +u8 tpm_buf_read_u8(struct tpm_buf *buf, off_t *offset) +{ + u8 value = 0; + + tpm_buf_read(buf, offset, sizeof(value), &value); + + return value; +} +EXPORT_SYMBOL_GPL(tpm_buf_read_u8); + +/** + * tpm_buf_read_u16() - Read 16-bit word from a TPM buffer + * @buf: &tpm_buf instance + * @offset: offset within the buffer + * + * Return: next 16-bit word + */ +u16 tpm_buf_read_u16(struct tpm_buf *buf, off_t *offset) +{ + u16 value = 0; + + tpm_buf_read(buf, offset, sizeof(value), &value); + + return be16_to_cpu(value); +} +EXPORT_SYMBOL_GPL(tpm_buf_read_u16); + +/** + * tpm_buf_read_u32() - Read 32-bit word from a TPM buffer + * @buf: &tpm_buf instance + * @offset: offset within the buffer + * + * Return: next 32-bit word + */ +u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset) +{ + u32 value = 0; + + tpm_buf_read(buf, offset, sizeof(value), &value); + + return be32_to_cpu(value); +} +EXPORT_SYMBOL_GPL(tpm_buf_read_u32); + + diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index ea6b4013bc38..082b910ddf0d 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -28,8 +28,13 @@ DEFINE_IDR(dev_nums_idr); static DEFINE_MUTEX(idr_lock); -struct class *tpm_class; -struct class *tpmrm_class; +const struct class tpm_class = { + .name = "tpm", + .shutdown_pre = tpm_class_shutdown, +}; +const struct class tpmrm_class = { + .name = "tpmrm", +}; dev_t tpm_devt; static int tpm_request_locality(struct tpm_chip *chip) @@ -153,6 +158,9 @@ int tpm_try_get_ops(struct tpm_chip *chip) { int rc = -EIO; + if (chip->flags & TPM_CHIP_FLAG_DISABLE) + return rc; + get_device(&chip->dev); down_read(&chip->ops_sem); @@ -160,6 +168,11 @@ int tpm_try_get_ops(struct tpm_chip *chip) goto out_ops; mutex_lock(&chip->tpm_mutex); + + /* tmp_chip_start may issue IO that is denied while suspended */ + if (chip->flags & TPM_CHIP_FLAG_SUSPENDED) + goto out_lock; + rc = tpm_chip_start(chip); if (rc) goto out_lock; @@ -218,42 +231,6 @@ struct tpm_chip *tpm_default_chip(void) EXPORT_SYMBOL_GPL(tpm_default_chip); /** - * tpm_find_get_ops() - find and reserve a TPM chip - * @chip: a &struct tpm_chip instance, %NULL for the default chip - * - * Finds a TPM chip and reserves its class device and operations. The chip must - * be released with tpm_put_ops() after use. - * This function is for internal use only. It supports existing TPM callers - * by accepting NULL, but those callers should be converted to pass in a chip - * directly. - * - * Return: - * A reserved &struct tpm_chip instance. - * %NULL if a chip is not found. - * %NULL if the chip is not available. - */ -struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip) -{ - int rc; - - if (chip) { - if (!tpm_try_get_ops(chip)) - return chip; - return NULL; - } - - chip = tpm_default_chip(); - if (!chip) - return NULL; - rc = tpm_try_get_ops(chip); - /* release additional reference we got from tpm_default_chip() */ - put_device(&chip->dev); - if (rc) - return NULL; - return chip; -} - -/** * tpm_dev_release() - free chip memory and the device number * @dev: the character device for the TPM chip * @@ -269,7 +246,9 @@ static void tpm_dev_release(struct device *dev) kfree(chip->work_space.context_buf); kfree(chip->work_space.session_buf); - kfree(chip->allocated_banks); +#ifdef CONFIG_TCG_TPM2_HMAC + kfree(chip->auth); +#endif kfree(chip); } @@ -289,6 +268,7 @@ int tpm_class_shutdown(struct device *dev) down_write(&chip->ops_sem); if (chip->flags & TPM_CHIP_FLAG_TPM2) { if (!tpm_chip_start(chip)) { + tpm2_end_auth_session(chip); tpm2_shutdown(chip, TPM2_SU_CLEAR); tpm_chip_stop(chip); } @@ -336,7 +316,7 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, device_initialize(&chip->dev); - chip->dev.class = tpm_class; + chip->dev.class = &tpm_class; chip->dev.release = tpm_dev_release; chip->dev.parent = pdev; chip->dev.groups = chip->groups; @@ -514,10 +494,6 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng); - /* Give back zero bytes, as TPM chip has not yet fully resumed: */ - if (chip->flags & TPM_CHIP_FLAG_SUSPENDED) - return 0; - return tpm_get_random(chip, data, max); } @@ -663,6 +639,16 @@ EXPORT_SYMBOL_GPL(tpm_chip_register); */ void tpm_chip_unregister(struct tpm_chip *chip) { +#ifdef CONFIG_TCG_TPM2_HMAC + int rc; + + rc = tpm_try_get_ops(chip); + if (!rc) { + tpm2_end_auth_session(chip); + tpm_put_ops(chip); + } +#endif + tpm_del_legacy_sysfs(chip); if (tpm_is_hwrng_enabled(chip)) hwrng_unregister(&chip->hwrng); diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index 30b4c288c1bb..f942c0c8e402 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -27,6 +27,9 @@ static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space, struct tpm_header *header = (void *)buf; ssize_t ret, len; + if (chip->flags & TPM_CHIP_FLAG_TPM2) + tpm2_end_auth_session(chip); + ret = tpm2_prepare_space(chip, space, buf, bufsiz); /* If the command is not implemented by the TPM, synthesize a * response with a TPM2_RC_COMMAND_CODE return for user-space. @@ -47,6 +50,8 @@ static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space, if (!ret) ret = tpm2_commit_space(chip, space, buf, &len); + else + tpm2_flush_space(chip); out_rc: return ret ? ret : len; @@ -86,7 +91,7 @@ out: static void user_reader_timeout(struct timer_list *t) { - struct file_priv *priv = from_timer(priv, t, user_read_timer); + struct file_priv *priv = timer_container_of(priv, t, user_read_timer); pr_warn("TPM user space timeout is deprecated (pid=%d)\n", task_tgid_nr(current)); @@ -155,7 +160,7 @@ ssize_t tpm_common_read(struct file *file, char __user *buf, out: if (!priv->response_length) { *off = 0; - del_timer_sync(&priv->user_read_timer); + timer_delete_sync(&priv->user_read_timer); flush_work(&priv->timeout_work); } mutex_unlock(&priv->buffer_mutex); @@ -262,7 +267,7 @@ __poll_t tpm_common_poll(struct file *file, poll_table *wait) void tpm_common_release(struct file *file, struct file_priv *priv) { flush_work(&priv->async_work); - del_timer_sync(&priv->user_read_timer); + timer_delete_sync(&priv->user_read_timer); flush_work(&priv->timeout_work); file->private_data = NULL; priv->response_length = 0; @@ -270,7 +275,8 @@ void tpm_common_release(struct file *file, struct file_priv *priv) int __init tpm_dev_common_init(void) { - tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM, 0); + tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM | WQ_PERCPU, + 0); return !tpm_dev_wq ? -ENOMEM : 0; } diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c index e2c0baa69fef..97c94b5e9340 100644 --- a/drivers/char/tpm/tpm-dev.c +++ b/drivers/char/tpm/tpm-dev.c @@ -59,7 +59,6 @@ static int tpm_release(struct inode *inode, struct file *file) const struct file_operations tpm_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .open = tpm_open, .read = tpm_common_read, .write = tpm_common_write, diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 586ca10b0d72..f745a098908b 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -52,12 +52,43 @@ MODULE_PARM_DESC(suspend_pcr, unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal) { if (chip->flags & TPM_CHIP_FLAG_TPM2) - return tpm2_calc_ordinal_duration(chip, ordinal); + return tpm2_calc_ordinal_duration(ordinal); else return tpm1_calc_ordinal_duration(chip, ordinal); } EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration); +static void tpm_chip_cancel(struct tpm_chip *chip) +{ + if (!chip->ops->cancel) + return; + + chip->ops->cancel(chip); +} + +static u8 tpm_chip_status(struct tpm_chip *chip) +{ + if (!chip->ops->status) + return 0; + + return chip->ops->status(chip); +} + +static bool tpm_chip_req_canceled(struct tpm_chip *chip, u8 status) +{ + if (!chip->ops->req_canceled) + return false; + + return chip->ops->req_canceled(chip, status); +} + +static bool tpm_transmit_completed(u8 status, struct tpm_chip *chip) +{ + u8 status_masked = status & chip->ops->req_complete_mask; + + return status_masked == chip->ops->req_complete_val; +} + static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz) { struct tpm_header *header = buf; @@ -82,7 +113,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz) return -E2BIG; } - rc = chip->ops->send(chip, buf, count); + rc = chip->ops->send(chip, buf, bufsiz, count); if (rc < 0) { if (rc != -EPIPE) dev_err(&chip->dev, @@ -90,8 +121,19 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz) return rc; } - /* A sanity check. send() should just return zero on success e.g. - * not the command length. + /* + * Synchronous devices return the response directly during the send() + * call in the same buffer. + */ + if (chip->flags & TPM_CHIP_FLAG_SYNC) { + len = rc; + rc = 0; + goto out_sync; + } + + /* + * A sanity check. send() of asynchronous devices should just return + * zero on success e.g. not the command length. */ if (rc > 0) { dev_warn(&chip->dev, @@ -104,12 +146,11 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz) stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal); do { - u8 status = chip->ops->status(chip); - if ((status & chip->ops->req_complete_mask) == - chip->ops->req_complete_val) + u8 status = tpm_chip_status(chip); + if (tpm_transmit_completed(status, chip)) goto out_recv; - if (chip->ops->req_canceled(chip, status)) { + if (tpm_chip_req_canceled(chip, status)) { dev_err(&chip->dev, "Operation Canceled\n"); return -ECANCELED; } @@ -118,7 +159,14 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz) rmb(); } while (time_before(jiffies, stop)); - chip->ops->cancel(chip); + /* + * Check for completion one more time, just in case the device reported + * it while the driver was sleeping in the busy loop above. + */ + if (tpm_transmit_completed(tpm_chip_status(chip), chip)) + goto out_recv; + + tpm_chip_cancel(chip); dev_err(&chip->dev, "Operation Timed out\n"); return -ETIME; @@ -127,7 +175,10 @@ out_recv: if (len < 0) { rc = len; dev_err(&chip->dev, "tpm_transmit: tpm_recv: error %d\n", rc); - } else if (len < TPM_HEADER_SIZE || len != be32_to_cpu(header->length)) + return rc; + } +out_sync: + if (len < TPM_HEADER_SIZE || len != be32_to_cpu(header->length)) rc = -EFAULT; return rc ? rc : len; @@ -232,6 +283,7 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_buf *buf, if (len < min_rsp_body_length + TPM_HEADER_SIZE) return -EFAULT; + buf->length = len; return 0; } EXPORT_SYMBOL_GPL(tpm_transmit_cmd); @@ -261,10 +313,13 @@ int tpm_is_tpm2(struct tpm_chip *chip) { int rc; - chip = tpm_find_get_ops(chip); if (!chip) return -ENODEV; + rc = tpm_try_get_ops(chip); + if (rc) + return rc; + rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0; tpm_put_ops(chip); @@ -286,10 +341,13 @@ int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx, { int rc; - chip = tpm_find_get_ops(chip); if (!chip) return -ENODEV; + rc = tpm_try_get_ops(chip); + if (rc) + return rc; + if (chip->flags & TPM_CHIP_FLAG_TPM2) rc = tpm2_pcr_read(chip, pcr_idx, digest, NULL); else @@ -317,10 +375,13 @@ int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, int rc; int i; - chip = tpm_find_get_ops(chip); if (!chip) return -ENODEV; + rc = tpm_try_get_ops(chip); + if (rc) + return rc; + for (i = 0; i < chip->nr_allocated_banks; i++) { if (digests[i].alg_id != chip->allocated_banks[i].alg_id) { rc = -EINVAL; @@ -342,31 +403,6 @@ out: } EXPORT_SYMBOL_GPL(tpm_pcr_extend); -/** - * tpm_send - send a TPM command - * @chip: a &struct tpm_chip instance, %NULL for the default chip - * @cmd: a TPM command buffer - * @buflen: the length of the TPM command buffer - * - * Return: same as with tpm_transmit_cmd() - */ -int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen) -{ - struct tpm_buf buf; - int rc; - - chip = tpm_find_get_ops(chip); - if (!chip) - return -ENODEV; - - buf.data = cmd; - rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to a send a command"); - - tpm_put_ops(chip); - return rc; -} -EXPORT_SYMBOL_GPL(tpm_send); - int tpm_auto_startup(struct tpm_chip *chip) { int rc; @@ -394,6 +430,13 @@ int tpm_pm_suspend(struct device *dev) if (!chip) return -ENODEV; + rc = tpm_try_get_ops(chip); + if (rc) { + /* Can be safely set out of locks, as no action cannot race: */ + chip->flags |= TPM_CHIP_FLAG_SUSPENDED; + goto out; + } + if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED) goto suspended; @@ -401,19 +444,19 @@ int tpm_pm_suspend(struct device *dev) !pm_suspend_via_firmware()) goto suspended; - rc = tpm_try_get_ops(chip); - if (!rc) { - if (chip->flags & TPM_CHIP_FLAG_TPM2) - tpm2_shutdown(chip, TPM2_SU_STATE); - else - rc = tpm1_pm_suspend(chip, tpm_suspend_pcr); - - tpm_put_ops(chip); + if (chip->flags & TPM_CHIP_FLAG_TPM2) { + tpm2_end_auth_session(chip); + tpm2_shutdown(chip, TPM2_SU_STATE); + goto suspended; } + rc = tpm1_pm_suspend(chip, tpm_suspend_pcr); + suspended: chip->flags |= TPM_CHIP_FLAG_SUSPENDED; + tpm_put_ops(chip); +out: if (rc) dev_err(dev, "Ignoring error %d while suspending\n", rc); return 0; @@ -458,10 +501,13 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max) if (!out || max > TPM_MAX_RNG_DATA) return -EINVAL; - chip = tpm_find_get_ops(chip); if (!chip) return -ENODEV; + rc = tpm_try_get_ops(chip); + if (rc) + return rc; + if (chip->flags & TPM_CHIP_FLAG_TPM2) rc = tpm2_get_random(chip, out, max); else @@ -476,18 +522,15 @@ static int __init tpm_init(void) { int rc; - tpm_class = class_create("tpm"); - if (IS_ERR(tpm_class)) { + rc = class_register(&tpm_class); + if (rc) { pr_err("couldn't create tpm class\n"); - return PTR_ERR(tpm_class); + return rc; } - tpm_class->shutdown_pre = tpm_class_shutdown; - - tpmrm_class = class_create("tpmrm"); - if (IS_ERR(tpmrm_class)) { + rc = class_register(&tpmrm_class); + if (rc) { pr_err("couldn't create tpmrm class\n"); - rc = PTR_ERR(tpmrm_class); goto out_destroy_tpm_class; } @@ -508,9 +551,9 @@ static int __init tpm_init(void) out_unreg_chrdev: unregister_chrdev_region(tpm_devt, 2 * TPM_NUM_DEVICES); out_destroy_tpmrm_class: - class_destroy(tpmrm_class); + class_unregister(&tpmrm_class); out_destroy_tpm_class: - class_destroy(tpm_class); + class_unregister(&tpm_class); return rc; } @@ -518,8 +561,8 @@ out_destroy_tpm_class: static void __exit tpm_exit(void) { idr_destroy(&dev_nums_idr); - class_destroy(tpm_class); - class_destroy(tpmrm_class); + class_unregister(&tpm_class); + class_unregister(&tpmrm_class); unregister_chrdev_region(tpm_devt, 2*TPM_NUM_DEVICES); tpm_dev_common_exit(); } @@ -527,7 +570,7 @@ static void __exit tpm_exit(void) subsys_initcall(tpm_init); module_exit(tpm_exit); -MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); +MODULE_AUTHOR("Leendert van Doorn <leendert@watson.ibm.com>"); MODULE_DESCRIPTION("TPM Driver"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c index 54c71473aa29..94231f052ea7 100644 --- a/drivers/char/tpm/tpm-sysfs.c +++ b/drivers/char/tpm/tpm-sysfs.c @@ -309,6 +309,21 @@ static ssize_t tpm_version_major_show(struct device *dev, } static DEVICE_ATTR_RO(tpm_version_major); +#ifdef CONFIG_TCG_TPM2_HMAC +static ssize_t null_name_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tpm_chip *chip = to_tpm_chip(dev); + int size = TPM2_NAME_SIZE; + + bin2hex(buf, chip->null_key_name, size); + size *= 2; + buf[size++] = '\n'; + return size; +} +static DEVICE_ATTR_RO(null_name); +#endif + static struct attribute *tpm1_dev_attrs[] = { &dev_attr_pubek.attr, &dev_attr_pcrs.attr, @@ -326,6 +341,9 @@ static struct attribute *tpm1_dev_attrs[] = { static struct attribute *tpm2_dev_attrs[] = { &dev_attr_tpm_version_major.attr, +#ifdef CONFIG_TCG_TPM2_HMAC + &dev_attr_null_name.attr, +#endif NULL }; diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 460bb85dd142..02c07fef41ba 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -28,7 +28,7 @@ #include <linux/tpm_eventlog.h> #ifdef CONFIG_X86 -#include <asm/intel-family.h> +#include <asm/cpu_device_id.h> #endif #define TPM_MINOR 224 /* officially assigned */ @@ -230,8 +230,8 @@ enum tpm2_pt_props { * compiler warnings about stack frame size. */ #define TPM_MAX_RNG_DATA 128 -extern struct class *tpm_class; -extern struct class *tpmrm_class; +extern const struct class tpm_class; +extern const struct class tpmrm_class; extern dev_t tpm_devt; extern const struct file_operations tpm_fops; extern const struct file_operations tpmrm_fops; @@ -267,7 +267,6 @@ static inline void tpm_msleep(unsigned int delay_msec) int tpm_chip_bootstrap(struct tpm_chip *chip); int tpm_chip_start(struct tpm_chip *chip); void tpm_chip_stop(struct tpm_chip *chip); -struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip); struct tpm_chip *tpm_chip_alloc(struct device *dev, const struct tpm_class_ops *ops); @@ -299,7 +298,7 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip); int tpm2_auto_startup(struct tpm_chip *chip); void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type); -unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal); +unsigned long tpm2_calc_ordinal_duration(u32 ordinal); int tpm2_probe(struct tpm_chip *chip); int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip); int tpm2_find_cc(struct tpm_chip *chip, u32 cc); @@ -312,9 +311,23 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf, size_t *bufsiz); int tpm_devs_add(struct tpm_chip *chip); void tpm_devs_remove(struct tpm_chip *chip); +int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf, + unsigned int buf_size, unsigned int *offset); +int tpm2_load_context(struct tpm_chip *chip, u8 *buf, + unsigned int *offset, u32 *handle); void tpm_bios_log_setup(struct tpm_chip *chip); void tpm_bios_log_teardown(struct tpm_chip *chip); int tpm_dev_common_init(void); void tpm_dev_common_exit(void); + +#ifdef CONFIG_TCG_TPM2_HMAC +int tpm2_sessions_init(struct tpm_chip *chip); +#else +static inline int tpm2_sessions_init(struct tpm_chip *chip) +{ + return 0; +} +#endif + #endif diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c index cf64c7385105..b49a790f1bd5 100644 --- a/drivers/char/tpm/tpm1-cmd.c +++ b/drivers/char/tpm/tpm1-cmd.c @@ -799,11 +799,6 @@ int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr) */ int tpm1_get_pcr_allocation(struct tpm_chip *chip) { - chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks), - GFP_KERNEL); - if (!chip->allocated_banks) - return -ENOMEM; - chip->allocated_banks[0].alg_id = TPM_ALG_SHA1; chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1]; chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1; diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index 93545be190a5..3a77be7ebf4a 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -11,10 +11,17 @@ * used by the kernel internally. */ +#include "linux/dev_printk.h" +#include "linux/tpm.h" #include "tpm.h" #include <crypto/hash_info.h> +#include <linux/unaligned.h> -static struct tpm2_hash tpm2_hash_map[] = { +static bool disable_pcr_integrity; +module_param(disable_pcr_integrity, bool, 0444); +MODULE_PARM_DESC(disable_pcr_integrity, "Disable integrity protection of TPM2_PCR_Extend"); + +struct tpm2_hash tpm2_hash_map[] = { {HASH_ALGO_SHA1, TPM_ALG_SHA1}, {HASH_ALGO_SHA256, TPM_ALG_SHA256}, {HASH_ALGO_SHA384, TPM_ALG_SHA384}, @@ -22,122 +29,71 @@ static struct tpm2_hash tpm2_hash_map[] = { {HASH_ALGO_SM3_256, TPM_ALG_SM3_256}, }; +int tpm2_find_hash_alg(unsigned int crypto_id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(tpm2_hash_map); i++) + if (crypto_id == tpm2_hash_map[i].crypto_id) + return tpm2_hash_map[i].tpm_id; + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(tpm2_find_hash_alg); + int tpm2_get_timeouts(struct tpm_chip *chip) { - /* Fixed timeouts for TPM2 */ chip->timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A); chip->timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B); chip->timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C); chip->timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D); - - /* PTP spec timeouts */ - chip->duration[TPM_SHORT] = msecs_to_jiffies(TPM2_DURATION_SHORT); - chip->duration[TPM_MEDIUM] = msecs_to_jiffies(TPM2_DURATION_MEDIUM); - chip->duration[TPM_LONG] = msecs_to_jiffies(TPM2_DURATION_LONG); - - /* Key creation commands long timeouts */ - chip->duration[TPM_LONG_LONG] = - msecs_to_jiffies(TPM2_DURATION_LONG_LONG); - chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS; - return 0; } -/** - * tpm2_ordinal_duration_index() - returns an index to the chip duration table - * @ordinal: TPM command ordinal. - * - * The function returns an index to the chip duration table - * (enum tpm_duration), that describes the maximum amount of - * time the chip could take to return the result for a particular ordinal. - * - * The values of the MEDIUM, and LONG durations are taken - * from the PC Client Profile (PTP) specification (750, 2000 msec) - * - * LONG_LONG is for commands that generates keys which empirically takes - * a longer time on some systems. - * - * Return: - * * TPM_MEDIUM - * * TPM_LONG - * * TPM_LONG_LONG - * * TPM_UNDEFINED +/* + * Contains the maximum durations in milliseconds for TPM2 commands. */ -static u8 tpm2_ordinal_duration_index(u32 ordinal) -{ - switch (ordinal) { - /* Startup */ - case TPM2_CC_STARTUP: /* 144 */ - return TPM_MEDIUM; - - case TPM2_CC_SELF_TEST: /* 143 */ - return TPM_LONG; - - case TPM2_CC_GET_RANDOM: /* 17B */ - return TPM_LONG; - - case TPM2_CC_SEQUENCE_UPDATE: /* 15C */ - return TPM_MEDIUM; - case TPM2_CC_SEQUENCE_COMPLETE: /* 13E */ - return TPM_MEDIUM; - case TPM2_CC_EVENT_SEQUENCE_COMPLETE: /* 185 */ - return TPM_MEDIUM; - case TPM2_CC_HASH_SEQUENCE_START: /* 186 */ - return TPM_MEDIUM; - - case TPM2_CC_VERIFY_SIGNATURE: /* 177 */ - return TPM_LONG_LONG; - - case TPM2_CC_PCR_EXTEND: /* 182 */ - return TPM_MEDIUM; - - case TPM2_CC_HIERARCHY_CONTROL: /* 121 */ - return TPM_LONG; - case TPM2_CC_HIERARCHY_CHANGE_AUTH: /* 129 */ - return TPM_LONG; - - case TPM2_CC_GET_CAPABILITY: /* 17A */ - return TPM_MEDIUM; - - case TPM2_CC_NV_READ: /* 14E */ - return TPM_LONG; - - case TPM2_CC_CREATE_PRIMARY: /* 131 */ - return TPM_LONG_LONG; - case TPM2_CC_CREATE: /* 153 */ - return TPM_LONG_LONG; - case TPM2_CC_CREATE_LOADED: /* 191 */ - return TPM_LONG_LONG; - - default: - return TPM_UNDEFINED; - } -} +static const struct { + unsigned long ordinal; + unsigned long duration; +} tpm2_ordinal_duration_map[] = { + {TPM2_CC_STARTUP, 750}, + {TPM2_CC_SELF_TEST, 3000}, + {TPM2_CC_GET_RANDOM, 2000}, + {TPM2_CC_SEQUENCE_UPDATE, 750}, + {TPM2_CC_SEQUENCE_COMPLETE, 750}, + {TPM2_CC_EVENT_SEQUENCE_COMPLETE, 750}, + {TPM2_CC_HASH_SEQUENCE_START, 750}, + {TPM2_CC_VERIFY_SIGNATURE, 30000}, + {TPM2_CC_PCR_EXTEND, 750}, + {TPM2_CC_HIERARCHY_CONTROL, 2000}, + {TPM2_CC_HIERARCHY_CHANGE_AUTH, 2000}, + {TPM2_CC_GET_CAPABILITY, 750}, + {TPM2_CC_NV_READ, 2000}, + {TPM2_CC_CREATE_PRIMARY, 30000}, + {TPM2_CC_CREATE, 30000}, + {TPM2_CC_CREATE_LOADED, 30000}, +}; /** - * tpm2_calc_ordinal_duration() - calculate the maximum command duration - * @chip: TPM chip to use. + * tpm2_calc_ordinal_duration() - Calculate the maximum command duration * @ordinal: TPM command ordinal. * - * The function returns the maximum amount of time the chip could take - * to return the result for a particular ordinal in jiffies. - * - * Return: A maximal duration time for an ordinal in jiffies. + * Returns the maximum amount of time the chip is expected by kernel to + * take in jiffies. */ -unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal) +unsigned long tpm2_calc_ordinal_duration(u32 ordinal) { - unsigned int index; + int i; - index = tpm2_ordinal_duration_index(ordinal); + for (i = 0; i < ARRAY_SIZE(tpm2_ordinal_duration_map); i++) + if (ordinal == tpm2_ordinal_duration_map[i].ordinal) + return msecs_to_jiffies(tpm2_ordinal_duration_map[i].duration); - if (index != TPM_UNDEFINED) - return chip->duration[index]; - else - return msecs_to_jiffies(TPM2_DURATION_DEFAULT); + return msecs_to_jiffies(TPM2_DURATION_DEFAULT); } - struct tpm2_pcr_read_out { __be32 update_cnt; __be32 pcr_selects_cnt; @@ -216,13 +172,6 @@ out: return rc; } -struct tpm2_null_auth_area { - __be32 handle; - __be16 nonce_size; - u8 attributes; - __be16 auth_size; -} __packed; - /** * tpm2_pcr_extend() - extend a PCR value * @@ -236,24 +185,34 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, struct tpm_digest *digests) { struct tpm_buf buf; - struct tpm2_null_auth_area auth_area; int rc; int i; + if (!disable_pcr_integrity) { + rc = tpm2_start_auth_session(chip); + if (rc) + return rc; + } + rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_PCR_EXTEND); - if (rc) + if (rc) { + if (!disable_pcr_integrity) + tpm2_end_auth_session(chip); return rc; + } - tpm_buf_append_u32(&buf, pcr_idx); - - auth_area.handle = cpu_to_be32(TPM2_RS_PW); - auth_area.nonce_size = 0; - auth_area.attributes = 0; - auth_area.auth_size = 0; + if (!disable_pcr_integrity) { + rc = tpm_buf_append_name(chip, &buf, pcr_idx, NULL); + if (rc) { + tpm_buf_destroy(&buf); + return rc; + } + tpm_buf_append_hmac_session(chip, &buf, 0, NULL, 0); + } else { + tpm_buf_append_handle(chip, &buf, pcr_idx); + tpm_buf_append_auth(chip, &buf, NULL, 0); + } - tpm_buf_append_u32(&buf, sizeof(struct tpm2_null_auth_area)); - tpm_buf_append(&buf, (const unsigned char *)&auth_area, - sizeof(auth_area)); tpm_buf_append_u32(&buf, chip->nr_allocated_banks); for (i = 0; i < chip->nr_allocated_banks; i++) { @@ -262,7 +221,17 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, chip->allocated_banks[i].digest_size); } + if (!disable_pcr_integrity) { + rc = tpm_buf_fill_hmac_session(chip, &buf); + if (rc) { + tpm_buf_destroy(&buf); + return rc; + } + } + rc = tpm_transmit_cmd(chip, &buf, 0, "attempting extend a PCR value"); + if (!disable_pcr_integrity) + rc = tpm_buf_check_hmac_response(chip, &buf, rc); tpm_buf_destroy(&buf); @@ -288,6 +257,7 @@ struct tpm2_get_random_out { int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max) { struct tpm2_get_random_out *out; + struct tpm_header *head; struct tpm_buf buf; u32 recd; u32 num_bytes = max; @@ -295,29 +265,59 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max) int total = 0; int retries = 5; u8 *dest_ptr = dest; + off_t offset; if (!num_bytes || max > TPM_MAX_RNG_DATA) return -EINVAL; - err = tpm_buf_init(&buf, 0, 0); + err = tpm2_start_auth_session(chip); if (err) return err; + err = tpm_buf_init(&buf, 0, 0); + if (err) { + tpm2_end_auth_session(chip); + return err; + } + do { - tpm_buf_reset(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_RANDOM); + tpm_buf_reset(&buf, TPM2_ST_SESSIONS, TPM2_CC_GET_RANDOM); + if (tpm2_chip_auth(chip)) { + tpm_buf_append_hmac_session(chip, &buf, + TPM2_SA_ENCRYPT | + TPM2_SA_CONTINUE_SESSION, + NULL, 0); + } else { + offset = buf.handles * 4 + TPM_HEADER_SIZE; + head = (struct tpm_header *)buf.data; + if (tpm_buf_length(&buf) == offset) + head->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS); + } tpm_buf_append_u16(&buf, num_bytes); + err = tpm_buf_fill_hmac_session(chip, &buf); + if (err) { + tpm_buf_destroy(&buf); + return err; + } + err = tpm_transmit_cmd(chip, &buf, offsetof(struct tpm2_get_random_out, buffer), "attempting get random"); + err = tpm_buf_check_hmac_response(chip, &buf, err); if (err) { if (err > 0) err = -EIO; goto out; } - out = (struct tpm2_get_random_out *) - &buf.data[TPM_HEADER_SIZE]; + head = (struct tpm_header *)buf.data; + offset = TPM_HEADER_SIZE; + /* Skip the parameter size field: */ + if (be16_to_cpu(head->tag) == TPM2_ST_SESSIONS) + offset += 4; + + out = (struct tpm2_get_random_out *)&buf.data[offset]; recd = min_t(u32, be16_to_cpu(out->size), num_bytes); if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + @@ -334,9 +334,11 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max) } while (retries-- && total < max); tpm_buf_destroy(&buf); + return total ? total : -EIO; out: tpm_buf_destroy(&buf); + tpm2_end_auth_session(chip); return err; } @@ -574,11 +576,9 @@ ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip) nr_possible_banks = be32_to_cpup( (__be32 *)&buf.data[TPM_HEADER_SIZE + 5]); - - chip->allocated_banks = kcalloc(nr_possible_banks, - sizeof(*chip->allocated_banks), - GFP_KERNEL); - if (!chip->allocated_banks) { + if (nr_possible_banks > TPM2_MAX_PCR_BANKS) { + pr_err("tpm: out of bank capacity: %u > %u\n", + nr_possible_banks, TPM2_MAX_PCR_BANKS); rc = -ENOMEM; goto out; } @@ -759,6 +759,11 @@ int tpm2_auto_startup(struct tpm_chip *chip) rc = 0; } + if (rc) + goto out; + + rc = tpm2_sessions_init(chip); + out: /* * Infineon TPM in field upgrade mode will return no data for the number diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c new file mode 100644 index 000000000000..4149379665c4 --- /dev/null +++ b/drivers/char/tpm/tpm2-sessions.c @@ -0,0 +1,1398 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2018 James.Bottomley@HansenPartnership.com + * + * Cryptographic helper routines for handling TPM2 sessions for + * authorization HMAC and request response encryption. + * + * The idea is to ensure that every TPM command is HMAC protected by a + * session, meaning in-flight tampering would be detected and in + * addition all sensitive inputs and responses should be encrypted. + * + * The basic way this works is to use a TPM feature called salted + * sessions where a random secret used in session construction is + * encrypted to the public part of a known TPM key. The problem is we + * have no known keys, so initially a primary Elliptic Curve key is + * derived from the NULL seed (we use EC because most TPMs generate + * these keys much faster than RSA ones). The curve used is NIST_P256 + * because that's now mandated to be present in 'TCG TPM v2.0 + * Provisioning Guidance' + * + * Threat problems: the initial TPM2_CreatePrimary is not (and cannot + * be) session protected, so a clever Man in the Middle could return a + * public key they control to this command and from there intercept + * and decode all subsequent session based transactions. The kernel + * cannot mitigate this threat but, after boot, userspace can get + * proof this has not happened by asking the TPM to certify the NULL + * key. This certification would chain back to the TPM Endorsement + * Certificate and prove the NULL seed primary had not been tampered + * with and thus all sessions must have been cryptographically secure. + * To assist with this, the initial NULL seed public key name is made + * available in a sysfs file. + * + * Use of these functions: + * + * The design is all the crypto, hash and hmac gunk is confined in this + * file and never needs to be seen even by the kernel internal user. To + * the user there's an init function tpm2_sessions_init() that needs to + * be called once per TPM which generates the NULL seed primary key. + * + * These are the usage functions: + * + * tpm2_end_auth_session() kills the session and frees the resources. + * Under normal operation this function is done by + * tpm_buf_check_hmac_response(), so this is only to be used on + * error legs where the latter is not executed. + * tpm_buf_append_name() to add a handle to the buffer. This must be + * used in place of the usual tpm_buf_append_u32() for adding + * handles because handles have to be processed specially when + * calculating the HMAC. In particular, for NV, volatile and + * permanent objects you now need to provide the name. + * tpm_buf_append_hmac_session() which appends the hmac session to the + * buf in the same way tpm_buf_append_auth does(). + * tpm_buf_fill_hmac_session() This calculates the correct hash and + * places it in the buffer. It must be called after the complete + * command buffer is finalized so it can fill in the correct HMAC + * based on the parameters. + * tpm_buf_check_hmac_response() which checks the session response in + * the buffer and calculates what it should be. If there's a + * mismatch it will log a warning and return an error. If + * tpm_buf_append_hmac_session() did not specify + * TPM_SA_CONTINUE_SESSION then the session will be closed (if it + * hasn't been consumed) and the auth structure freed. + */ + +#include "tpm.h" +#include <linux/random.h> +#include <linux/scatterlist.h> +#include <linux/unaligned.h> +#include <crypto/kpp.h> +#include <crypto/ecdh.h> +#include <crypto/sha2.h> +#include <crypto/utils.h> + +/* maximum number of names the TPM must remember for authorization */ +#define AUTH_MAX_NAMES 3 + +#define AES_KEY_BYTES AES_KEYSIZE_128 +#define AES_KEY_BITS (AES_KEY_BYTES*8) + +/* + * This is the structure that carries all the auth information (like + * session handle, nonces, session key and auth) from use to use it is + * designed to be opaque to anything outside. + */ +struct tpm2_auth { + u32 handle; + /* + * This has two meanings: before tpm_buf_fill_hmac_session() + * it marks the offset in the buffer of the start of the + * sessions (i.e. after all the handles). Once the buffer has + * been filled it markes the session number of our auth + * session so we can find it again in the response buffer. + * + * The two cases are distinguished because the first offset + * must always be greater than TPM_HEADER_SIZE and the second + * must be less than or equal to 5. + */ + u32 session; + /* + * the size here is variable and set by the size of our_nonce + * which must be between 16 and the name hash length. we set + * the maximum sha256 size for the greatest protection + */ + u8 our_nonce[SHA256_DIGEST_SIZE]; + u8 tpm_nonce[SHA256_DIGEST_SIZE]; + /* + * the salt is only used across the session command/response + * after that it can be used as a scratch area + */ + union { + u8 salt[EC_PT_SZ]; + /* scratch for key + IV */ + u8 scratch[AES_KEY_BYTES + AES_BLOCK_SIZE]; + }; + /* + * the session key and passphrase are the same size as the + * name digest (sha256 again). The session key is constant + * for the use of the session and the passphrase can change + * with every invocation. + * + * Note: these fields must be adjacent and in this order + * because several HMAC/KDF schemes use the combination of the + * session_key and passphrase. + */ + u8 session_key[SHA256_DIGEST_SIZE]; + u8 passphrase[SHA256_DIGEST_SIZE]; + int passphrase_len; + struct crypto_aes_ctx aes_ctx; + /* saved session attributes: */ + u8 attrs; + __be32 ordinal; + + /* + * memory for three authorization handles. We know them by + * handle, but they are part of the session by name, which + * we must compute and remember + */ + u32 name_h[AUTH_MAX_NAMES]; + u8 name[AUTH_MAX_NAMES][2 + SHA512_DIGEST_SIZE]; +}; + +#ifdef CONFIG_TCG_TPM2_HMAC +/* + * Name Size based on TPM algorithm (assumes no hash bigger than 255) + */ +static int name_size(const u8 *name) +{ + u16 hash_alg = get_unaligned_be16(name); + + switch (hash_alg) { + case TPM_ALG_SHA1: + return SHA1_DIGEST_SIZE + 2; + case TPM_ALG_SHA256: + return SHA256_DIGEST_SIZE + 2; + case TPM_ALG_SHA384: + return SHA384_DIGEST_SIZE + 2; + case TPM_ALG_SHA512: + return SHA512_DIGEST_SIZE + 2; + default: + pr_warn("tpm: unsupported name algorithm: 0x%04x\n", hash_alg); + return -EINVAL; + } +} + +static int tpm2_read_public(struct tpm_chip *chip, u32 handle, void *name) +{ + u32 mso = tpm2_handle_mso(handle); + off_t offset = TPM_HEADER_SIZE; + int rc, name_size_alg; + struct tpm_buf buf; + + if (mso != TPM2_MSO_PERSISTENT && mso != TPM2_MSO_VOLATILE && + mso != TPM2_MSO_NVRAM) { + memcpy(name, &handle, sizeof(u32)); + return sizeof(u32); + } + + rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_READ_PUBLIC); + if (rc) + return rc; + + tpm_buf_append_u32(&buf, handle); + + rc = tpm_transmit_cmd(chip, &buf, 0, "TPM2_ReadPublic"); + if (rc) { + tpm_buf_destroy(&buf); + return tpm_ret_to_err(rc); + } + + /* Skip TPMT_PUBLIC: */ + offset += tpm_buf_read_u16(&buf, &offset); + + /* + * Ensure space for the length field of TPM2B_NAME and hashAlg field of + * TPMT_HA (the extra four bytes). + */ + if (offset + 4 > tpm_buf_length(&buf)) { + tpm_buf_destroy(&buf); + return -EIO; + } + + rc = tpm_buf_read_u16(&buf, &offset); + name_size_alg = name_size(&buf.data[offset]); + + if (name_size_alg < 0) + return name_size_alg; + + if (rc != name_size_alg) { + tpm_buf_destroy(&buf); + return -EIO; + } + + if (offset + rc > tpm_buf_length(&buf)) { + tpm_buf_destroy(&buf); + return -EIO; + } + + memcpy(name, &buf.data[offset], rc); + return name_size_alg; +} +#endif /* CONFIG_TCG_TPM2_HMAC */ + +/** + * tpm_buf_append_name() - add a handle area to the buffer + * @chip: the TPM chip structure + * @buf: The buffer to be appended + * @handle: The handle to be appended + * @name: The name of the handle (may be NULL) + * + * In order to compute session HMACs, we need to know the names of the + * objects pointed to by the handles. For most objects, this is simply + * the actual 4 byte handle or an empty buf (in these cases @name + * should be NULL) but for volatile objects, permanent objects and NV + * areas, the name is defined as the hash (according to the name + * algorithm which should be set to sha256) of the public area to + * which the two byte algorithm id has been appended. For these + * objects, the @name pointer should point to this. If a name is + * required but @name is NULL, then TPM2_ReadPublic() will be called + * on the handle to obtain the name. + * + * As with most tpm_buf operations, success is assumed because failure + * will be caused by an incorrect programming model and indicated by a + * kernel message. + * + * Ends the authorization session on failure. + */ +int tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf, + u32 handle, u8 *name) +{ +#ifdef CONFIG_TCG_TPM2_HMAC + enum tpm2_mso_type mso = tpm2_handle_mso(handle); + struct tpm2_auth *auth; + u16 name_size_alg; + int slot; + int ret; +#endif + + if (!tpm2_chip_auth(chip)) { + tpm_buf_append_handle(chip, buf, handle); + return 0; + } + +#ifdef CONFIG_TCG_TPM2_HMAC + slot = (tpm_buf_length(buf) - TPM_HEADER_SIZE) / 4; + if (slot >= AUTH_MAX_NAMES) { + dev_err(&chip->dev, "too many handles\n"); + ret = -EIO; + goto err; + } + auth = chip->auth; + if (auth->session != tpm_buf_length(buf)) { + dev_err(&chip->dev, "session state malformed"); + ret = -EIO; + goto err; + } + tpm_buf_append_u32(buf, handle); + auth->session += 4; + + if (mso == TPM2_MSO_PERSISTENT || + mso == TPM2_MSO_VOLATILE || + mso == TPM2_MSO_NVRAM) { + if (!name) { + ret = tpm2_read_public(chip, handle, auth->name[slot]); + if (ret < 0) + goto err; + + name_size_alg = ret; + } + } else { + if (name) { + dev_err(&chip->dev, "handle 0x%08x does not use a name\n", + handle); + ret = -EIO; + goto err; + } + } + + auth->name_h[slot] = handle; + if (name) + memcpy(auth->name[slot], name, name_size_alg); +#endif + return 0; + +#ifdef CONFIG_TCG_TPM2_HMAC +err: + tpm2_end_auth_session(chip); + return tpm_ret_to_err(ret); +#endif +} +EXPORT_SYMBOL_GPL(tpm_buf_append_name); + +void tpm_buf_append_auth(struct tpm_chip *chip, struct tpm_buf *buf, + u8 *passphrase, int passphrase_len) +{ + /* offset tells us where the sessions area begins */ + int offset = buf->handles * 4 + TPM_HEADER_SIZE; + u32 len = 9 + passphrase_len; + + if (tpm_buf_length(buf) != offset) { + /* not the first session so update the existing length */ + len += get_unaligned_be32(&buf->data[offset]); + put_unaligned_be32(len, &buf->data[offset]); + } else { + tpm_buf_append_u32(buf, len); + } + /* auth handle */ + tpm_buf_append_u32(buf, TPM2_RS_PW); + /* nonce */ + tpm_buf_append_u16(buf, 0); + /* attributes */ + tpm_buf_append_u8(buf, 0); + /* passphrase */ + tpm_buf_append_u16(buf, passphrase_len); + tpm_buf_append(buf, passphrase, passphrase_len); +} + +/** + * tpm_buf_append_hmac_session() - Append a TPM session element + * @chip: the TPM chip structure + * @buf: The buffer to be appended + * @attributes: The session attributes + * @passphrase: The session authority (NULL if none) + * @passphrase_len: The length of the session authority (0 if none) + * + * This fills in a session structure in the TPM command buffer, except + * for the HMAC which cannot be computed until the command buffer is + * complete. The type of session is controlled by the @attributes, + * the main ones of which are TPM2_SA_CONTINUE_SESSION which means the + * session won't terminate after tpm_buf_check_hmac_response(), + * TPM2_SA_DECRYPT which means this buffers first parameter should be + * encrypted with a session key and TPM2_SA_ENCRYPT, which means the + * response buffer's first parameter needs to be decrypted (confusing, + * but the defines are written from the point of view of the TPM). + * + * Any session appended by this command must be finalized by calling + * tpm_buf_fill_hmac_session() otherwise the HMAC will be incorrect + * and the TPM will reject the command. + * + * As with most tpm_buf operations, success is assumed because failure + * will be caused by an incorrect programming model and indicated by a + * kernel message. + */ +void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf, + u8 attributes, u8 *passphrase, + int passphrase_len) +{ +#ifdef CONFIG_TCG_TPM2_HMAC + u8 nonce[SHA256_DIGEST_SIZE]; + struct tpm2_auth *auth; + u32 len; +#endif + + if (!tpm2_chip_auth(chip)) { + tpm_buf_append_auth(chip, buf, passphrase, passphrase_len); + return; + } + +#ifdef CONFIG_TCG_TPM2_HMAC + /* The first write to /dev/tpm{rm0} will flush the session. */ + attributes |= TPM2_SA_CONTINUE_SESSION; + + /* + * The Architecture Guide requires us to strip trailing zeros + * before computing the HMAC + */ + while (passphrase && passphrase_len > 0 && passphrase[passphrase_len - 1] == '\0') + passphrase_len--; + + auth = chip->auth; + auth->attrs = attributes; + auth->passphrase_len = passphrase_len; + if (passphrase_len) + memcpy(auth->passphrase, passphrase, passphrase_len); + + if (auth->session != tpm_buf_length(buf)) { + /* we're not the first session */ + len = get_unaligned_be32(&buf->data[auth->session]); + if (4 + len + auth->session != tpm_buf_length(buf)) { + WARN(1, "session length mismatch, cannot append"); + return; + } + + /* add our new session */ + len += 9 + 2 * SHA256_DIGEST_SIZE; + put_unaligned_be32(len, &buf->data[auth->session]); + } else { + tpm_buf_append_u32(buf, 9 + 2 * SHA256_DIGEST_SIZE); + } + + /* random number for our nonce */ + get_random_bytes(nonce, sizeof(nonce)); + memcpy(auth->our_nonce, nonce, sizeof(nonce)); + tpm_buf_append_u32(buf, auth->handle); + /* our new nonce */ + tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE); + tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE); + tpm_buf_append_u8(buf, auth->attrs); + /* and put a placeholder for the hmac */ + tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE); + tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE); +#endif +} +EXPORT_SYMBOL_GPL(tpm_buf_append_hmac_session); + +#ifdef CONFIG_TCG_TPM2_HMAC + +static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy, + u32 *handle, u8 *name); + +/* + * assume hash sha256 and nonces u, v of size SHA256_DIGEST_SIZE but + * otherwise standard tpm2_KDFa. Note output is in bytes not bits. + */ +static void tpm2_KDFa(u8 *key, u32 key_len, const char *label, u8 *u, + u8 *v, u32 bytes, u8 *out) +{ + u32 counter = 1; + const __be32 bits = cpu_to_be32(bytes * 8); + + while (bytes > 0) { + struct hmac_sha256_ctx hctx; + __be32 c = cpu_to_be32(counter); + + hmac_sha256_init_usingrawkey(&hctx, key, key_len); + hmac_sha256_update(&hctx, (u8 *)&c, sizeof(c)); + hmac_sha256_update(&hctx, label, strlen(label) + 1); + hmac_sha256_update(&hctx, u, SHA256_DIGEST_SIZE); + hmac_sha256_update(&hctx, v, SHA256_DIGEST_SIZE); + hmac_sha256_update(&hctx, (u8 *)&bits, sizeof(bits)); + hmac_sha256_final(&hctx, out); + + bytes -= SHA256_DIGEST_SIZE; + counter++; + out += SHA256_DIGEST_SIZE; + } +} + +/* + * Somewhat of a bastardization of the real KDFe. We're assuming + * we're working with known point sizes for the input parameters and + * the hash algorithm is fixed at sha256. Because we know that the + * point size is 32 bytes like the hash size, there's no need to loop + * in this KDF. + */ +static void tpm2_KDFe(u8 z[EC_PT_SZ], const char *str, u8 *pt_u, u8 *pt_v, + u8 *out) +{ + struct sha256_ctx sctx; + /* + * this should be an iterative counter, but because we know + * we're only taking 32 bytes for the point using a sha256 + * hash which is also 32 bytes, there's only one loop + */ + __be32 c = cpu_to_be32(1); + + sha256_init(&sctx); + /* counter (BE) */ + sha256_update(&sctx, (u8 *)&c, sizeof(c)); + /* secret value */ + sha256_update(&sctx, z, EC_PT_SZ); + /* string including trailing zero */ + sha256_update(&sctx, str, strlen(str)+1); + sha256_update(&sctx, pt_u, EC_PT_SZ); + sha256_update(&sctx, pt_v, EC_PT_SZ); + sha256_final(&sctx, out); +} + +static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip, + struct tpm2_auth *auth) +{ + struct crypto_kpp *kpp; + struct kpp_request *req; + struct scatterlist s[2], d[1]; + struct ecdh p = {0}; + u8 encoded_key[EC_PT_SZ], *x, *y; + unsigned int buf_len; + + /* secret is two sized points */ + tpm_buf_append_u16(buf, (EC_PT_SZ + 2)*2); + /* + * we cheat here and append uninitialized data to form + * the points. All we care about is getting the two + * co-ordinate pointers, which will be used to overwrite + * the uninitialized data + */ + tpm_buf_append_u16(buf, EC_PT_SZ); + x = &buf->data[tpm_buf_length(buf)]; + tpm_buf_append(buf, encoded_key, EC_PT_SZ); + tpm_buf_append_u16(buf, EC_PT_SZ); + y = &buf->data[tpm_buf_length(buf)]; + tpm_buf_append(buf, encoded_key, EC_PT_SZ); + sg_init_table(s, 2); + sg_set_buf(&s[0], x, EC_PT_SZ); + sg_set_buf(&s[1], y, EC_PT_SZ); + + kpp = crypto_alloc_kpp("ecdh-nist-p256", CRYPTO_ALG_INTERNAL, 0); + if (IS_ERR(kpp)) { + dev_err(&chip->dev, "crypto ecdh allocation failed\n"); + return; + } + + buf_len = crypto_ecdh_key_len(&p); + if (sizeof(encoded_key) < buf_len) { + dev_err(&chip->dev, "salt buffer too small needs %d\n", + buf_len); + goto out; + } + crypto_ecdh_encode_key(encoded_key, buf_len, &p); + /* this generates a random private key */ + crypto_kpp_set_secret(kpp, encoded_key, buf_len); + + /* salt is now the public point of this private key */ + req = kpp_request_alloc(kpp, GFP_KERNEL); + if (!req) + goto out; + kpp_request_set_input(req, NULL, 0); + kpp_request_set_output(req, s, EC_PT_SZ*2); + crypto_kpp_generate_public_key(req); + /* + * we're not done: now we have to compute the shared secret + * which is our private key multiplied by the tpm_key public + * point, we actually only take the x point and discard the y + * point and feed it through KDFe to get the final secret salt + */ + sg_set_buf(&s[0], chip->null_ec_key_x, EC_PT_SZ); + sg_set_buf(&s[1], chip->null_ec_key_y, EC_PT_SZ); + kpp_request_set_input(req, s, EC_PT_SZ*2); + sg_init_one(d, auth->salt, EC_PT_SZ); + kpp_request_set_output(req, d, EC_PT_SZ); + crypto_kpp_compute_shared_secret(req); + kpp_request_free(req); + + /* + * pass the shared secret through KDFe for salt. Note salt + * area is used both for input shared secret and output salt. + * This works because KDFe fully consumes the secret before it + * writes the salt + */ + tpm2_KDFe(auth->salt, "SECRET", x, chip->null_ec_key_x, auth->salt); + + out: + crypto_free_kpp(kpp); +} + +/** + * tpm_buf_fill_hmac_session() - finalize the session HMAC + * @chip: the TPM chip structure + * @buf: The buffer to be appended + * + * This command must not be called until all of the parameters have + * been appended to @buf otherwise the computed HMAC will be + * incorrect. + * + * This function computes and fills in the session HMAC using the + * session key and, if TPM2_SA_DECRYPT was specified, computes the + * encryption key and encrypts the first parameter of the command + * buffer with it. + * + * Ends the authorization session on failure. + */ +int tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf) +{ + u32 cc, handles, val; + struct tpm2_auth *auth = chip->auth; + int i; + struct tpm_header *head = (struct tpm_header *)buf->data; + off_t offset_s = TPM_HEADER_SIZE, offset_p; + u8 *hmac = NULL; + u32 attrs; + u8 cphash[SHA256_DIGEST_SIZE]; + struct sha256_ctx sctx; + struct hmac_sha256_ctx hctx; + int ret; + + if (!auth) { + ret = -EIO; + goto err; + } + + /* save the command code in BE format */ + auth->ordinal = head->ordinal; + + cc = be32_to_cpu(head->ordinal); + + i = tpm2_find_cc(chip, cc); + if (i < 0) { + dev_err(&chip->dev, "command 0x%08x not found\n", cc); + ret = -EIO; + goto err; + } + + attrs = chip->cc_attrs_tbl[i]; + + handles = (attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0); + + /* + * just check the names, it's easy to make mistakes. This + * would happen if someone added a handle via + * tpm_buf_append_u32() instead of tpm_buf_append_name() + */ + for (i = 0; i < handles; i++) { + u32 handle = tpm_buf_read_u32(buf, &offset_s); + + if (auth->name_h[i] != handle) { + dev_err(&chip->dev, "invalid handle 0x%08x\n", handle); + ret = -EIO; + goto err; + } + } + /* point offset_s to the start of the sessions */ + val = tpm_buf_read_u32(buf, &offset_s); + /* point offset_p to the start of the parameters */ + offset_p = offset_s + val; + for (i = 1; offset_s < offset_p; i++) { + u32 handle = tpm_buf_read_u32(buf, &offset_s); + u16 len; + u8 a; + + /* nonce (already in auth) */ + len = tpm_buf_read_u16(buf, &offset_s); + offset_s += len; + + a = tpm_buf_read_u8(buf, &offset_s); + + len = tpm_buf_read_u16(buf, &offset_s); + if (handle == auth->handle && auth->attrs == a) { + hmac = &buf->data[offset_s]; + /* + * save our session number so we know which + * session in the response belongs to us + */ + auth->session = i; + } + + offset_s += len; + } + if (offset_s != offset_p) { + dev_err(&chip->dev, "session length is incorrect\n"); + ret = -EIO; + goto err; + } + if (!hmac) { + dev_err(&chip->dev, "could not find HMAC session\n"); + ret = -EIO; + goto err; + } + + /* encrypt before HMAC */ + if (auth->attrs & TPM2_SA_DECRYPT) { + u16 len; + + /* need key and IV */ + tpm2_KDFa(auth->session_key, SHA256_DIGEST_SIZE + + auth->passphrase_len, "CFB", auth->our_nonce, + auth->tpm_nonce, AES_KEY_BYTES + AES_BLOCK_SIZE, + auth->scratch); + + len = tpm_buf_read_u16(buf, &offset_p); + aes_expandkey(&auth->aes_ctx, auth->scratch, AES_KEY_BYTES); + aescfb_encrypt(&auth->aes_ctx, &buf->data[offset_p], + &buf->data[offset_p], len, + auth->scratch + AES_KEY_BYTES); + /* reset p to beginning of parameters for HMAC */ + offset_p -= 2; + } + + sha256_init(&sctx); + /* ordinal is already BE */ + sha256_update(&sctx, (u8 *)&head->ordinal, sizeof(head->ordinal)); + /* add the handle names */ + for (i = 0; i < handles; i++) { + enum tpm2_mso_type mso = tpm2_handle_mso(auth->name_h[i]); + + if (mso == TPM2_MSO_PERSISTENT || + mso == TPM2_MSO_VOLATILE || + mso == TPM2_MSO_NVRAM) { + ret = name_size(auth->name[i]); + if (ret < 0) + goto err; + + sha256_update(&sctx, auth->name[i], ret); + } else { + __be32 h = cpu_to_be32(auth->name_h[i]); + + sha256_update(&sctx, (u8 *)&h, 4); + } + } + if (offset_s != tpm_buf_length(buf)) + sha256_update(&sctx, &buf->data[offset_s], + tpm_buf_length(buf) - offset_s); + sha256_final(&sctx, cphash); + + /* now calculate the hmac */ + hmac_sha256_init_usingrawkey(&hctx, auth->session_key, + sizeof(auth->session_key) + + auth->passphrase_len); + hmac_sha256_update(&hctx, cphash, sizeof(cphash)); + hmac_sha256_update(&hctx, auth->our_nonce, sizeof(auth->our_nonce)); + hmac_sha256_update(&hctx, auth->tpm_nonce, sizeof(auth->tpm_nonce)); + hmac_sha256_update(&hctx, &auth->attrs, 1); + hmac_sha256_final(&hctx, hmac); + return 0; + +err: + tpm2_end_auth_session(chip); + return ret; +} +EXPORT_SYMBOL(tpm_buf_fill_hmac_session); + +/** + * tpm_buf_check_hmac_response() - check the TPM return HMAC for correctness + * @chip: the TPM chip structure + * @buf: the original command buffer (which now contains the response) + * @rc: the return code from tpm_transmit_cmd + * + * If @rc is non zero, @buf may not contain an actual return, so @rc + * is passed through as the return and the session cleaned up and + * de-allocated if required (this is required if + * TPM2_SA_CONTINUE_SESSION was not specified as a session flag). + * + * If @rc is zero, the response HMAC is computed against the returned + * @buf and matched to the TPM one in the session area. If there is a + * mismatch, an error is logged and -EINVAL returned. + * + * The reason for this is that the command issue and HMAC check + * sequence should look like: + * + * rc = tpm_transmit_cmd(...); + * rc = tpm_buf_check_hmac_response(&buf, auth, rc); + * if (rc) + * ... + * + * Which is easily layered into the current contrl flow. + * + * Returns: 0 on success or an error. + */ +int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf, + int rc) +{ + struct tpm_header *head = (struct tpm_header *)buf->data; + struct tpm2_auth *auth = chip->auth; + off_t offset_s, offset_p; + u8 rphash[SHA256_DIGEST_SIZE]; + u32 attrs, cc; + struct sha256_ctx sctx; + struct hmac_sha256_ctx hctx; + u16 tag = be16_to_cpu(head->tag); + int parm_len, len, i, handles; + + if (!auth) + return rc; + + cc = be32_to_cpu(auth->ordinal); + + if (auth->session >= TPM_HEADER_SIZE) { + WARN(1, "tpm session not filled correctly\n"); + goto out; + } + + if (rc != 0) + /* pass non success rc through and close the session */ + goto out; + + rc = -EINVAL; + if (tag != TPM2_ST_SESSIONS) { + dev_err(&chip->dev, "TPM: HMAC response check has no sessions tag\n"); + goto out; + } + + i = tpm2_find_cc(chip, cc); + if (i < 0) + goto out; + attrs = chip->cc_attrs_tbl[i]; + handles = (attrs >> TPM2_CC_ATTR_RHANDLE) & 1; + + /* point to area beyond handles */ + offset_s = TPM_HEADER_SIZE + handles * 4; + parm_len = tpm_buf_read_u32(buf, &offset_s); + offset_p = offset_s; + offset_s += parm_len; + /* skip over any sessions before ours */ + for (i = 0; i < auth->session - 1; i++) { + len = tpm_buf_read_u16(buf, &offset_s); + offset_s += len + 1; + len = tpm_buf_read_u16(buf, &offset_s); + offset_s += len; + } + /* TPM nonce */ + len = tpm_buf_read_u16(buf, &offset_s); + if (offset_s + len > tpm_buf_length(buf)) + goto out; + if (len != SHA256_DIGEST_SIZE) + goto out; + memcpy(auth->tpm_nonce, &buf->data[offset_s], len); + offset_s += len; + attrs = tpm_buf_read_u8(buf, &offset_s); + len = tpm_buf_read_u16(buf, &offset_s); + if (offset_s + len != tpm_buf_length(buf)) + goto out; + if (len != SHA256_DIGEST_SIZE) + goto out; + /* + * offset_s points to the HMAC. now calculate comparison, beginning + * with rphash + */ + sha256_init(&sctx); + /* yes, I know this is now zero, but it's what the standard says */ + sha256_update(&sctx, (u8 *)&head->return_code, + sizeof(head->return_code)); + /* ordinal is already BE */ + sha256_update(&sctx, (u8 *)&auth->ordinal, sizeof(auth->ordinal)); + sha256_update(&sctx, &buf->data[offset_p], parm_len); + sha256_final(&sctx, rphash); + + /* now calculate the hmac */ + hmac_sha256_init_usingrawkey(&hctx, auth->session_key, + sizeof(auth->session_key) + + auth->passphrase_len); + hmac_sha256_update(&hctx, rphash, sizeof(rphash)); + hmac_sha256_update(&hctx, auth->tpm_nonce, sizeof(auth->tpm_nonce)); + hmac_sha256_update(&hctx, auth->our_nonce, sizeof(auth->our_nonce)); + hmac_sha256_update(&hctx, &auth->attrs, 1); + /* we're done with the rphash, so put our idea of the hmac there */ + hmac_sha256_final(&hctx, rphash); + if (crypto_memneq(rphash, &buf->data[offset_s], SHA256_DIGEST_SIZE)) { + dev_err(&chip->dev, "TPM: HMAC check failed\n"); + goto out; + } + rc = 0; + + /* now do response decryption */ + if (auth->attrs & TPM2_SA_ENCRYPT) { + /* need key and IV */ + tpm2_KDFa(auth->session_key, SHA256_DIGEST_SIZE + + auth->passphrase_len, "CFB", auth->tpm_nonce, + auth->our_nonce, AES_KEY_BYTES + AES_BLOCK_SIZE, + auth->scratch); + + len = tpm_buf_read_u16(buf, &offset_p); + aes_expandkey(&auth->aes_ctx, auth->scratch, AES_KEY_BYTES); + aescfb_decrypt(&auth->aes_ctx, &buf->data[offset_p], + &buf->data[offset_p], len, + auth->scratch + AES_KEY_BYTES); + } + + out: + if ((auth->attrs & TPM2_SA_CONTINUE_SESSION) == 0) { + if (rc) + /* manually close the session if it wasn't consumed */ + tpm2_flush_context(chip, auth->handle); + + kfree_sensitive(auth); + chip->auth = NULL; + } else { + /* reset for next use */ + auth->session = TPM_HEADER_SIZE; + } + + return rc; +} +EXPORT_SYMBOL(tpm_buf_check_hmac_response); + +/** + * tpm2_end_auth_session() - kill the allocated auth session + * @chip: the TPM chip structure + * + * ends the session started by tpm2_start_auth_session and frees all + * the resources. Under normal conditions, + * tpm_buf_check_hmac_response() will correctly end the session if + * required, so this function is only for use in error legs that will + * bypass the normal invocation of tpm_buf_check_hmac_response(). + */ +void tpm2_end_auth_session(struct tpm_chip *chip) +{ + struct tpm2_auth *auth = chip->auth; + + if (!auth) + return; + + tpm2_flush_context(chip, auth->handle); + kfree_sensitive(auth); + chip->auth = NULL; +} +EXPORT_SYMBOL(tpm2_end_auth_session); + +static int tpm2_parse_start_auth_session(struct tpm2_auth *auth, + struct tpm_buf *buf) +{ + struct tpm_header *head = (struct tpm_header *)buf->data; + u32 tot_len = be32_to_cpu(head->length); + off_t offset = TPM_HEADER_SIZE; + u32 val; + + /* we're starting after the header so adjust the length */ + tot_len -= TPM_HEADER_SIZE; + + /* should have handle plus nonce */ + if (tot_len != 4 + 2 + sizeof(auth->tpm_nonce)) + return -EINVAL; + + auth->handle = tpm_buf_read_u32(buf, &offset); + val = tpm_buf_read_u16(buf, &offset); + if (val != sizeof(auth->tpm_nonce)) + return -EINVAL; + memcpy(auth->tpm_nonce, &buf->data[offset], sizeof(auth->tpm_nonce)); + /* now compute the session key from the nonces */ + tpm2_KDFa(auth->salt, sizeof(auth->salt), "ATH", auth->tpm_nonce, + auth->our_nonce, sizeof(auth->session_key), + auth->session_key); + + return 0; +} + +static int tpm2_load_null(struct tpm_chip *chip, u32 *null_key) +{ + unsigned int offset = 0; /* dummy offset for null seed context */ + u8 name[SHA256_DIGEST_SIZE + 2]; + u32 tmp_null_key; + int rc; + + rc = tpm2_load_context(chip, chip->null_key_context, &offset, + &tmp_null_key); + if (rc != -EINVAL) { + if (!rc) + *null_key = tmp_null_key; + goto err; + } + + /* Try to re-create null key, given the integrity failure: */ + rc = tpm2_create_primary(chip, TPM2_RH_NULL, &tmp_null_key, name); + if (rc) + goto err; + + /* Return null key if the name has not been changed: */ + if (!memcmp(name, chip->null_key_name, sizeof(name))) { + *null_key = tmp_null_key; + return 0; + } + + /* Deduce from the name change TPM interference: */ + dev_err(&chip->dev, "null key integrity check failed\n"); + tpm2_flush_context(chip, tmp_null_key); + +err: + if (rc) { + chip->flags |= TPM_CHIP_FLAG_DISABLE; + rc = -ENODEV; + } + return rc; +} + +/** + * tpm2_start_auth_session() - Create an a HMAC authentication session + * @chip: A TPM chip + * + * Loads the ephemeral key (null seed), and starts an HMAC authenticated + * session. The null seed is flushed before the return. + * + * Returns zero on success, or a POSIX error code. + */ +int tpm2_start_auth_session(struct tpm_chip *chip) +{ + struct tpm2_auth *auth; + struct tpm_buf buf; + u32 null_key; + int rc; + + if (chip->auth) { + dev_dbg_once(&chip->dev, "auth session is active\n"); + return 0; + } + + auth = kzalloc(sizeof(*auth), GFP_KERNEL); + if (!auth) + return -ENOMEM; + + rc = tpm2_load_null(chip, &null_key); + if (rc) + goto out; + + auth->session = TPM_HEADER_SIZE; + + rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_START_AUTH_SESS); + if (rc) + goto out; + + /* salt key handle */ + tpm_buf_append_u32(&buf, null_key); + /* bind key handle */ + tpm_buf_append_u32(&buf, TPM2_RH_NULL); + /* nonce caller */ + get_random_bytes(auth->our_nonce, sizeof(auth->our_nonce)); + tpm_buf_append_u16(&buf, sizeof(auth->our_nonce)); + tpm_buf_append(&buf, auth->our_nonce, sizeof(auth->our_nonce)); + + /* append encrypted salt and squirrel away unencrypted in auth */ + tpm_buf_append_salt(&buf, chip, auth); + /* session type (HMAC, audit or policy) */ + tpm_buf_append_u8(&buf, TPM2_SE_HMAC); + + /* symmetric encryption parameters */ + /* symmetric algorithm */ + tpm_buf_append_u16(&buf, TPM_ALG_AES); + /* bits for symmetric algorithm */ + tpm_buf_append_u16(&buf, AES_KEY_BITS); + /* symmetric algorithm mode (must be CFB) */ + tpm_buf_append_u16(&buf, TPM_ALG_CFB); + /* hash algorithm for session */ + tpm_buf_append_u16(&buf, TPM_ALG_SHA256); + + rc = tpm_ret_to_err(tpm_transmit_cmd(chip, &buf, 0, "StartAuthSession")); + tpm2_flush_context(chip, null_key); + + if (rc == TPM2_RC_SUCCESS) + rc = tpm2_parse_start_auth_session(auth, &buf); + + tpm_buf_destroy(&buf); + + if (rc == TPM2_RC_SUCCESS) { + chip->auth = auth; + return 0; + } + +out: + kfree_sensitive(auth); + return rc; +} +EXPORT_SYMBOL(tpm2_start_auth_session); + +/* + * A mask containing the object attributes for the kernel held null primary key + * used in HMAC encryption. For more information on specific attributes look up + * to "8.3 TPMA_OBJECT (Object Attributes)". + */ +#define TPM2_OA_NULL_KEY ( \ + TPM2_OA_NO_DA | \ + TPM2_OA_FIXED_TPM | \ + TPM2_OA_FIXED_PARENT | \ + TPM2_OA_SENSITIVE_DATA_ORIGIN | \ + TPM2_OA_USER_WITH_AUTH | \ + TPM2_OA_DECRYPT | \ + TPM2_OA_RESTRICTED) + +/** + * tpm2_parse_create_primary() - parse the data returned from TPM_CC_CREATE_PRIMARY + * + * @chip: The TPM the primary was created under + * @buf: The response buffer from the chip + * @handle: pointer to be filled in with the return handle of the primary + * @hierarchy: The hierarchy the primary was created for + * @name: pointer to be filled in with the primary key name + * + * Return: + * * 0 - OK + * * -errno - A system error + * * TPM_RC - A TPM error + */ +static int tpm2_parse_create_primary(struct tpm_chip *chip, struct tpm_buf *buf, + u32 *handle, u32 hierarchy, u8 *name) +{ + struct tpm_header *head = (struct tpm_header *)buf->data; + off_t offset_r = TPM_HEADER_SIZE, offset_t; + u16 len = TPM_HEADER_SIZE; + u32 total_len = be32_to_cpu(head->length); + u32 val, param_len, keyhandle; + + keyhandle = tpm_buf_read_u32(buf, &offset_r); + if (handle) + *handle = keyhandle; + else + tpm2_flush_context(chip, keyhandle); + + param_len = tpm_buf_read_u32(buf, &offset_r); + /* + * param_len doesn't include the header, but all the other + * lengths and offsets do, so add it to parm len to make + * the comparisons easier + */ + param_len += TPM_HEADER_SIZE; + + if (param_len + 8 > total_len) + return -EINVAL; + len = tpm_buf_read_u16(buf, &offset_r); + offset_t = offset_r; + if (name) { + /* + * now we have the public area, compute the name of + * the object + */ + put_unaligned_be16(TPM_ALG_SHA256, name); + sha256(&buf->data[offset_r], len, name + 2); + } + + /* validate the public key */ + val = tpm_buf_read_u16(buf, &offset_t); + + /* key type (must be what we asked for) */ + if (val != TPM_ALG_ECC) + return -EINVAL; + val = tpm_buf_read_u16(buf, &offset_t); + + /* name algorithm */ + if (val != TPM_ALG_SHA256) + return -EINVAL; + val = tpm_buf_read_u32(buf, &offset_t); + + /* object properties */ + if (val != TPM2_OA_NULL_KEY) + return -EINVAL; + + /* auth policy (empty) */ + val = tpm_buf_read_u16(buf, &offset_t); + if (val != 0) + return -EINVAL; + + /* symmetric key parameters */ + val = tpm_buf_read_u16(buf, &offset_t); + if (val != TPM_ALG_AES) + return -EINVAL; + + /* symmetric key length */ + val = tpm_buf_read_u16(buf, &offset_t); + if (val != AES_KEY_BITS) + return -EINVAL; + + /* symmetric encryption scheme */ + val = tpm_buf_read_u16(buf, &offset_t); + if (val != TPM_ALG_CFB) + return -EINVAL; + + /* signing scheme */ + val = tpm_buf_read_u16(buf, &offset_t); + if (val != TPM_ALG_NULL) + return -EINVAL; + + /* ECC Curve */ + val = tpm_buf_read_u16(buf, &offset_t); + if (val != TPM2_ECC_NIST_P256) + return -EINVAL; + + /* KDF Scheme */ + val = tpm_buf_read_u16(buf, &offset_t); + if (val != TPM_ALG_NULL) + return -EINVAL; + + /* extract public key (x and y points) */ + val = tpm_buf_read_u16(buf, &offset_t); + if (val != EC_PT_SZ) + return -EINVAL; + memcpy(chip->null_ec_key_x, &buf->data[offset_t], val); + offset_t += val; + val = tpm_buf_read_u16(buf, &offset_t); + if (val != EC_PT_SZ) + return -EINVAL; + memcpy(chip->null_ec_key_y, &buf->data[offset_t], val); + offset_t += val; + + /* original length of the whole TPM2B */ + offset_r += len; + + /* should have exactly consumed the TPM2B public structure */ + if (offset_t != offset_r) + return -EINVAL; + if (offset_r > param_len) + return -EINVAL; + + /* creation data (skip) */ + len = tpm_buf_read_u16(buf, &offset_r); + offset_r += len; + if (offset_r > param_len) + return -EINVAL; + + /* creation digest (must be sha256) */ + len = tpm_buf_read_u16(buf, &offset_r); + offset_r += len; + if (len != SHA256_DIGEST_SIZE || offset_r > param_len) + return -EINVAL; + + /* TPMT_TK_CREATION follows */ + /* tag, must be TPM_ST_CREATION (0x8021) */ + val = tpm_buf_read_u16(buf, &offset_r); + if (val != TPM2_ST_CREATION || offset_r > param_len) + return -EINVAL; + + /* hierarchy */ + val = tpm_buf_read_u32(buf, &offset_r); + if (val != hierarchy || offset_r > param_len) + return -EINVAL; + + /* the ticket digest HMAC (might not be sha256) */ + len = tpm_buf_read_u16(buf, &offset_r); + offset_r += len; + if (offset_r > param_len) + return -EINVAL; + + /* + * finally we have the name, which is a sha256 digest plus a 2 + * byte algorithm type + */ + len = tpm_buf_read_u16(buf, &offset_r); + if (offset_r + len != param_len + 8) + return -EINVAL; + if (len != SHA256_DIGEST_SIZE + 2) + return -EINVAL; + + if (memcmp(chip->null_key_name, &buf->data[offset_r], + SHA256_DIGEST_SIZE + 2) != 0) { + dev_err(&chip->dev, "NULL Seed name comparison failed\n"); + return -EINVAL; + } + + return 0; +} + +/** + * tpm2_create_primary() - create a primary key using a fixed P-256 template + * + * @chip: the TPM chip to create under + * @hierarchy: The hierarchy handle to create under + * @handle: The returned volatile handle on success + * @name: The name of the returned key + * + * For platforms that might not have a persistent primary, this can be + * used to create one quickly on the fly (it uses Elliptic Curve not + * RSA, so even slow TPMs can create one fast). The template uses the + * TCG mandated H one for non-endorsement ECC primaries, i.e. P-256 + * elliptic curve (the only current one all TPM2s are required to + * have) a sha256 name hash and no policy. + * + * Return: + * * 0 - OK + * * -errno - A system error + * * TPM_RC - A TPM error + */ +static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy, + u32 *handle, u8 *name) +{ + int rc; + struct tpm_buf buf; + struct tpm_buf template; + + rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_CREATE_PRIMARY); + if (rc) + return rc; + + rc = tpm_buf_init_sized(&template); + if (rc) { + tpm_buf_destroy(&buf); + return rc; + } + + /* + * create the template. Note: in order for userspace to + * verify the security of the system, it will have to create + * and certify this NULL primary, meaning all the template + * parameters will have to be identical, so conform exactly to + * the TCG TPM v2.0 Provisioning Guidance for the SRK ECC + * key H template (H has zero size unique points) + */ + + /* key type */ + tpm_buf_append_u16(&template, TPM_ALG_ECC); + + /* name algorithm */ + tpm_buf_append_u16(&template, TPM_ALG_SHA256); + + /* object properties */ + tpm_buf_append_u32(&template, TPM2_OA_NULL_KEY); + + /* sauth policy (empty) */ + tpm_buf_append_u16(&template, 0); + + /* BEGIN parameters: key specific; for ECC*/ + + /* symmetric algorithm */ + tpm_buf_append_u16(&template, TPM_ALG_AES); + + /* bits for symmetric algorithm */ + tpm_buf_append_u16(&template, AES_KEY_BITS); + + /* algorithm mode (must be CFB) */ + tpm_buf_append_u16(&template, TPM_ALG_CFB); + + /* scheme (NULL means any scheme) */ + tpm_buf_append_u16(&template, TPM_ALG_NULL); + + /* ECC Curve ID */ + tpm_buf_append_u16(&template, TPM2_ECC_NIST_P256); + + /* KDF Scheme */ + tpm_buf_append_u16(&template, TPM_ALG_NULL); + + /* unique: key specific; for ECC it is two zero size points */ + tpm_buf_append_u16(&template, 0); + tpm_buf_append_u16(&template, 0); + + /* END parameters */ + + /* primary handle */ + tpm_buf_append_u32(&buf, hierarchy); + tpm_buf_append_empty_auth(&buf, TPM2_RS_PW); + + /* sensitive create size is 4 for two empty buffers */ + tpm_buf_append_u16(&buf, 4); + + /* sensitive create auth data (empty) */ + tpm_buf_append_u16(&buf, 0); + + /* sensitive create sensitive data (empty) */ + tpm_buf_append_u16(&buf, 0); + + /* the public template */ + tpm_buf_append(&buf, template.data, template.length); + tpm_buf_destroy(&template); + + /* outside info (empty) */ + tpm_buf_append_u16(&buf, 0); + + /* creation PCR (none) */ + tpm_buf_append_u32(&buf, 0); + + rc = tpm_transmit_cmd(chip, &buf, 0, + "attempting to create NULL primary"); + + if (rc == TPM2_RC_SUCCESS) + rc = tpm2_parse_create_primary(chip, &buf, handle, hierarchy, + name); + + tpm_buf_destroy(&buf); + + return rc; +} + +static int tpm2_create_null_primary(struct tpm_chip *chip) +{ + u32 null_key; + int rc; + + rc = tpm2_create_primary(chip, TPM2_RH_NULL, &null_key, + chip->null_key_name); + + if (rc == TPM2_RC_SUCCESS) { + unsigned int offset = 0; /* dummy offset for null key context */ + + rc = tpm2_save_context(chip, null_key, chip->null_key_context, + sizeof(chip->null_key_context), &offset); + tpm2_flush_context(chip, null_key); + } + + return rc; +} + +/** + * tpm2_sessions_init() - start of day initialization for the sessions code + * @chip: TPM chip + * + * Derive and context save the null primary and allocate memory in the + * struct tpm_chip for the authorizations. + * + * Return: + * * 0 - OK + * * -errno - A system error + * * TPM_RC - A TPM error + */ +int tpm2_sessions_init(struct tpm_chip *chip) +{ + int rc; + + rc = tpm2_create_null_primary(chip); + if (rc) { + dev_err(&chip->dev, "null key creation failed with %d\n", rc); + return rc; + } + + return rc; +} +#endif /* CONFIG_TCG_TPM2_HMAC */ diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c index ffb35f0154c1..60354cd53b5c 100644 --- a/drivers/char/tpm/tpm2-space.c +++ b/drivers/char/tpm/tpm2-space.c @@ -12,7 +12,7 @@ */ #include <linux/gfp.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "tpm.h" enum tpm2_handle_types { @@ -68,8 +68,8 @@ void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space) kfree(space->session_buf); } -static int tpm2_load_context(struct tpm_chip *chip, u8 *buf, - unsigned int *offset, u32 *handle) +int tpm2_load_context(struct tpm_chip *chip, u8 *buf, + unsigned int *offset, u32 *handle) { struct tpm_buf tbuf; struct tpm2_context *ctx; @@ -105,6 +105,9 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf, *handle = 0; tpm_buf_destroy(&tbuf); return -ENOENT; + } else if (tpm2_rc_value(rc) == TPM2_RC_INTEGRITY) { + tpm_buf_destroy(&tbuf); + return -EINVAL; } else if (rc > 0) { dev_warn(&chip->dev, "%s: failed with a TPM error 0x%04X\n", __func__, rc); @@ -119,8 +122,8 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf, return 0; } -static int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf, - unsigned int buf_size, unsigned int *offset) +int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf, + unsigned int buf_size, unsigned int *offset) { struct tpm_buf tbuf; unsigned int body_size; @@ -166,6 +169,9 @@ void tpm2_flush_space(struct tpm_chip *chip) struct tpm_space *space = &chip->work_space; int i; + if (!space) + return; + for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++) if (space->context_tbl[i] && ~space->context_tbl[i]) tpm2_flush_context(chip, space->context_tbl[i]); @@ -606,7 +612,7 @@ int tpm_devs_add(struct tpm_chip *chip) device_initialize(&chip->devs); chip->devs.parent = chip->dev.parent; - chip->devs.class = tpmrm_class; + chip->devs.class = &tpmrm_class; /* * Get extra reference on main device to hold on behalf of devs. diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c index 54a6750a6757..f25faf468bba 100644 --- a/drivers/char/tpm/tpm_atmel.c +++ b/drivers/char/tpm/tpm_atmel.c @@ -15,7 +15,66 @@ */ #include "tpm.h" -#include "tpm_atmel.h" + +struct tpm_atmel_priv { + int region_size; + int have_region; + unsigned long base; + void __iomem *iobase; +}; + +#define atmel_getb(chip, offset) inb(atmel_get_priv(chip)->base + (offset)) +#define atmel_putb(val, chip, offset) \ + outb(val, atmel_get_priv(chip)->base + (offset)) +#define atmel_request_region request_region +#define atmel_release_region release_region +/* Atmel definitions */ +enum tpm_atmel_addr { + TPM_ATMEL_BASE_ADDR_LO = 0x08, + TPM_ATMEL_BASE_ADDR_HI = 0x09 +}; + +static inline int tpm_read_index(int base, int index) +{ + outb(index, base); + return inb(base + 1) & 0xFF; +} + +/* Verify this is a 1.1 Atmel TPM */ +static int atmel_verify_tpm11(void) +{ + /* verify that it is an Atmel part */ + if (tpm_read_index(TPM_ADDR, 4) != 'A' || + tpm_read_index(TPM_ADDR, 5) != 'T' || + tpm_read_index(TPM_ADDR, 6) != 'M' || + tpm_read_index(TPM_ADDR, 7) != 'L') + return 1; + + /* query chip for its version number */ + if (tpm_read_index(TPM_ADDR, 0x00) != 1 || + tpm_read_index(TPM_ADDR, 0x01) != 1) + return 1; + + /* This is an atmel supported part */ + return 0; +} + +/* Determine where to talk to device */ +static void __iomem *atmel_get_base_addr(unsigned long *base, int *region_size) +{ + int lo, hi; + + if (atmel_verify_tpm11() != 0) + return NULL; + + lo = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_LO); + hi = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_HI); + + *base = (hi << 8) | lo; + *region_size = 2; + + return ioport_map(*base, *region_size); +} /* write status bits */ enum tpm_atmel_write_status { @@ -89,7 +148,8 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) return size; } -static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count) +static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t count) { struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev); int i; @@ -142,7 +202,6 @@ static void atml_plat_remove(void) tpm_chip_unregister(chip); if (priv->have_region) atmel_release_region(priv->base, priv->region_size); - atmel_put_base_addr(priv->iobase); platform_device_unregister(pdev); } @@ -211,7 +270,6 @@ static int __init init_atmel(void) err_unreg_dev: platform_device_unregister(pdev); err_rel_reg: - atmel_put_base_addr(iobase); if (have_region) atmel_release_region(base, region_size); @@ -229,7 +287,7 @@ static void __exit cleanup_atmel(void) module_init(init_atmel); module_exit(cleanup_atmel); -MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); +MODULE_AUTHOR("Leendert van Doorn <leendert@watson.ibm.com>"); MODULE_DESCRIPTION("TPM Driver"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpm_atmel.h b/drivers/char/tpm/tpm_atmel.h deleted file mode 100644 index 7ac3f69dcf0f..000000000000 --- a/drivers/char/tpm/tpm_atmel.h +++ /dev/null @@ -1,140 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2005 IBM Corporation - * - * Authors: - * Kylene Hall <kjhall@us.ibm.com> - * - * Maintained by: <tpmdd-devel@lists.sourceforge.net> - * - * Device driver for TCG/TCPA TPM (trusted platform module). - * Specifications at www.trustedcomputinggroup.org - * - * These difference are required on power because the device must be - * discovered through the device tree and iomap must be used to get - * around the need for holes in the io_page_mask. This does not happen - * automatically because the tpm is not a normal pci device and lives - * under the root node. - */ - -struct tpm_atmel_priv { - int region_size; - int have_region; - unsigned long base; - void __iomem *iobase; -}; - -#ifdef CONFIG_PPC64 - -#include <linux/of.h> - -#define atmel_getb(priv, offset) readb(priv->iobase + offset) -#define atmel_putb(val, priv, offset) writeb(val, priv->iobase + offset) -#define atmel_request_region request_mem_region -#define atmel_release_region release_mem_region - -static inline void atmel_put_base_addr(void __iomem *iobase) -{ - iounmap(iobase); -} - -static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size) -{ - struct device_node *dn; - unsigned long address, size; - const unsigned int *reg; - int reglen; - int naddrc; - int nsizec; - - dn = of_find_node_by_name(NULL, "tpm"); - - if (!dn) - return NULL; - - if (!of_device_is_compatible(dn, "AT97SC3201")) { - of_node_put(dn); - return NULL; - } - - reg = of_get_property(dn, "reg", ®len); - naddrc = of_n_addr_cells(dn); - nsizec = of_n_size_cells(dn); - - of_node_put(dn); - - - if (naddrc == 2) - address = ((unsigned long) reg[0] << 32) | reg[1]; - else - address = reg[0]; - - if (nsizec == 2) - size = - ((unsigned long) reg[naddrc] << 32) | reg[naddrc + 1]; - else - size = reg[naddrc]; - - *base = address; - *region_size = size; - return ioremap(*base, *region_size); -} -#else -#define atmel_getb(chip, offset) inb(atmel_get_priv(chip)->base + offset) -#define atmel_putb(val, chip, offset) \ - outb(val, atmel_get_priv(chip)->base + offset) -#define atmel_request_region request_region -#define atmel_release_region release_region -/* Atmel definitions */ -enum tpm_atmel_addr { - TPM_ATMEL_BASE_ADDR_LO = 0x08, - TPM_ATMEL_BASE_ADDR_HI = 0x09 -}; - -static inline int tpm_read_index(int base, int index) -{ - outb(index, base); - return inb(base+1) & 0xFF; -} - -/* Verify this is a 1.1 Atmel TPM */ -static int atmel_verify_tpm11(void) -{ - - /* verify that it is an Atmel part */ - if (tpm_read_index(TPM_ADDR, 4) != 'A' || - tpm_read_index(TPM_ADDR, 5) != 'T' || - tpm_read_index(TPM_ADDR, 6) != 'M' || - tpm_read_index(TPM_ADDR, 7) != 'L') - return 1; - - /* query chip for its version number */ - if (tpm_read_index(TPM_ADDR, 0x00) != 1 || - tpm_read_index(TPM_ADDR, 0x01) != 1) - return 1; - - /* This is an atmel supported part */ - return 0; -} - -static inline void atmel_put_base_addr(void __iomem *iobase) -{ -} - -/* Determine where to talk to device */ -static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size) -{ - int lo, hi; - - if (atmel_verify_tpm11() != 0) - return NULL; - - lo = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_LO); - hi = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_HI); - - *base = (hi << 8) | lo; - *region_size = 2; - - return ioport_map(*base, *region_size); -} -#endif diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index 9eb1a1859012..6c25305c256e 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -19,6 +19,7 @@ #ifdef CONFIG_ARM64 #include <linux/arm-smccc.h> #endif +#include "tpm_crb_ffa.h" #include "tpm.h" #define ACPI_SIG_TPM2 "TPM2" @@ -100,6 +101,8 @@ struct crb_priv { u32 smc_func_id; u32 __iomem *pluton_start_addr; u32 __iomem *pluton_reply_addr; + u8 ffa_flags; + u8 ffa_attributes; }; struct tpm2_crb_smc { @@ -110,11 +113,29 @@ struct tpm2_crb_smc { u32 smc_func_id; }; +/* CRB over FFA start method parameters in TCG2 ACPI table */ +struct tpm2_crb_ffa { + u8 flags; + u8 attributes; + u16 partition_id; + u8 reserved[8]; +}; + struct tpm2_crb_pluton { u64 start_addr; u64 reply_addr; }; +/* + * Returns true if the start method supports idle. + */ +static inline bool tpm_crb_has_idle(u32 start_method) +{ + return !(start_method == ACPI_TPM2_START_METHOD || + start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD || + start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC); +} + static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value, unsigned long timeout) { @@ -158,6 +179,7 @@ static int crb_try_pluton_doorbell(struct crb_priv *priv, bool wait_for_complete * * @dev: crb device * @priv: crb private data + * @loc: locality * * Write CRB_CTRL_REQ_GO_IDLE to TPM_CRB_CTRL_REQ * The device should respond within TIMEOUT_C by clearing the bit. @@ -169,17 +191,21 @@ static int crb_try_pluton_doorbell(struct crb_priv *priv, bool wait_for_complete * * Return: 0 always */ -static int __crb_go_idle(struct device *dev, struct crb_priv *priv) +static int __crb_go_idle(struct device *dev, struct crb_priv *priv, int loc) { int rc; - if ((priv->sm == ACPI_TPM2_START_METHOD) || - (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) || - (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC)) + if (!tpm_crb_has_idle(priv->sm)) return 0; iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req); + if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) { + rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, loc); + if (rc) + return rc; + } + rc = crb_try_pluton_doorbell(priv, true); if (rc) return rc; @@ -200,7 +226,7 @@ static int crb_go_idle(struct tpm_chip *chip) struct device *dev = &chip->dev; struct crb_priv *priv = dev_get_drvdata(dev); - return __crb_go_idle(dev, priv); + return __crb_go_idle(dev, priv, chip->locality); } /** @@ -208,6 +234,7 @@ static int crb_go_idle(struct tpm_chip *chip) * * @dev: crb device * @priv: crb private data + * @loc: locality * * Write CRB_CTRL_REQ_CMD_READY to TPM_CRB_CTRL_REQ * and poll till the device acknowledge it by clearing the bit. @@ -218,17 +245,21 @@ static int crb_go_idle(struct tpm_chip *chip) * * Return: 0 on success -ETIME on timeout; */ -static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv) +static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv, int loc) { int rc; - if ((priv->sm == ACPI_TPM2_START_METHOD) || - (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) || - (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC)) + if (!tpm_crb_has_idle(priv->sm)) return 0; iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req); + if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) { + rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, loc); + if (rc) + return rc; + } + rc = crb_try_pluton_doorbell(priv, true); if (rc) return rc; @@ -249,19 +280,26 @@ static int crb_cmd_ready(struct tpm_chip *chip) struct device *dev = &chip->dev; struct crb_priv *priv = dev_get_drvdata(dev); - return __crb_cmd_ready(dev, priv); + return __crb_cmd_ready(dev, priv, chip->locality); } static int __crb_request_locality(struct device *dev, struct crb_priv *priv, int loc) { - u32 value = CRB_LOC_STATE_LOC_ASSIGNED | - CRB_LOC_STATE_TPM_REG_VALID_STS; + u32 value = CRB_LOC_STATE_LOC_ASSIGNED | CRB_LOC_STATE_TPM_REG_VALID_STS; + int rc; if (!priv->regs_h) return 0; iowrite32(CRB_LOC_CTRL_REQUEST_ACCESS, &priv->regs_h->loc_ctrl); + + if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) { + rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_LOCALITY_REQUEST, loc); + if (rc) + return rc; + } + if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, value, value, TPM2_TIMEOUT_C)) { dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n"); @@ -281,14 +319,21 @@ static int crb_request_locality(struct tpm_chip *chip, int loc) static int __crb_relinquish_locality(struct device *dev, struct crb_priv *priv, int loc) { - u32 mask = CRB_LOC_STATE_LOC_ASSIGNED | - CRB_LOC_STATE_TPM_REG_VALID_STS; + u32 mask = CRB_LOC_STATE_LOC_ASSIGNED | CRB_LOC_STATE_TPM_REG_VALID_STS; u32 value = CRB_LOC_STATE_TPM_REG_VALID_STS; + int rc; if (!priv->regs_h) return 0; iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl); + + if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) { + rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_LOCALITY_REQUEST, loc); + if (rc) + return rc; + } + if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value, TPM2_TIMEOUT_C)) { dev_warn(dev, "TPM_LOC_STATE_x.Relinquish timed out\n"); @@ -369,7 +414,7 @@ static int crb_do_acpi_start(struct tpm_chip *chip) #ifdef CONFIG_ARM64 /* * This is a TPM Command Response Buffer start method that invokes a - * Secure Monitor Call to requrest the firmware to execute or cancel + * Secure Monitor Call to request the firmware to execute or cancel * a TPM 2.0 command. */ static int tpm_crb_smc_start(struct device *dev, unsigned long func_id) @@ -394,7 +439,7 @@ static int tpm_crb_smc_start(struct device *dev, unsigned long func_id) } #endif -static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len) +static int crb_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, size_t len) { struct crb_priv *priv = dev_get_drvdata(&chip->dev); int rc = 0; @@ -412,7 +457,7 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len) /* Seems to be necessary for every command */ if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) - __crb_cmd_ready(&chip->dev, priv); + __crb_cmd_ready(&chip->dev, priv, chip->locality); memcpy_toio(priv->cmd, buf, len); @@ -423,13 +468,13 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len) * report only ACPI start but in practice seems to require both * CRB start, hence invoking CRB start method if hid == MSFT0101. */ - if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) || - (priv->sm == ACPI_TPM2_MEMORY_MAPPED) || - (!strcmp(priv->hid, "MSFT0101"))) + if (priv->sm == ACPI_TPM2_COMMAND_BUFFER || + priv->sm == ACPI_TPM2_MEMORY_MAPPED || + !strcmp(priv->hid, "MSFT0101")) iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start); - if ((priv->sm == ACPI_TPM2_START_METHOD) || - (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)) + if (priv->sm == ACPI_TPM2_START_METHOD || + priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) rc = crb_do_acpi_start(chip); if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) { @@ -437,6 +482,11 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len) rc = tpm_crb_smc_start(&chip->dev, priv->smc_func_id); } + if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) { + iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start); + rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, chip->locality); + } + if (rc) return rc; @@ -446,13 +496,20 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len) static void crb_cancel(struct tpm_chip *chip) { struct crb_priv *priv = dev_get_drvdata(&chip->dev); + int rc; iowrite32(CRB_CANCEL_INVOKE, &priv->regs_t->ctrl_cancel); - if (((priv->sm == ACPI_TPM2_START_METHOD) || - (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)) && + if ((priv->sm == ACPI_TPM2_START_METHOD || + priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) && crb_do_acpi_start(chip)) dev_err(&chip->dev, "ACPI Start failed\n"); + + if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) { + rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, chip->locality); + if (rc) + dev_err(&chip->dev, "FF-A Start failed\n"); + } } static bool crb_req_canceled(struct tpm_chip *chip, u8 status) @@ -463,28 +520,6 @@ static bool crb_req_canceled(struct tpm_chip *chip, u8 status) return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE; } -static int crb_check_flags(struct tpm_chip *chip) -{ - u32 val; - int ret; - - ret = crb_request_locality(chip, 0); - if (ret) - return ret; - - ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val, NULL); - if (ret) - goto release; - - if (val == 0x414D4400U /* AMD */) - chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED; - -release: - crb_relinquish_locality(chip, 0); - - return ret; -} - static const struct tpm_class_ops tpm_crb = { .flags = TPM_OPS_AUTO_STARTUP, .status = crb_status, @@ -631,8 +666,9 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, * the control area, as one nice sane region except for some older * stuff that puts the control area outside the ACPI IO region. */ - if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) || - (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) { + if (priv->sm == ACPI_TPM2_COMMAND_BUFFER || + priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA || + priv->sm == ACPI_TPM2_MEMORY_MAPPED) { if (iores && buf->control_address == iores->start + sizeof(*priv->regs_h)) @@ -649,7 +685,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, * PTT HW bug w/a: wake up the device to access * possibly not retained registers. */ - ret = __crb_cmd_ready(dev, priv); + ret = __crb_cmd_ready(dev, priv, 0); if (ret) goto out_relinquish_locality; @@ -721,7 +757,7 @@ out: if (!ret) priv->cmd_size = cmd_size; - __crb_go_idle(dev, priv); + __crb_go_idle(dev, priv, 0); out_relinquish_locality: @@ -753,6 +789,7 @@ static int crb_acpi_add(struct acpi_device *device) struct tpm_chip *chip; struct device *dev = &device->dev; struct tpm2_crb_smc *crb_smc; + struct tpm2_crb_ffa *crb_ffa; struct tpm2_crb_pluton *crb_pluton; acpi_status status; u32 sm; @@ -791,18 +828,40 @@ static int crb_acpi_add(struct acpi_device *device) priv->smc_func_id = crb_smc->smc_func_id; } + if (sm == ACPI_TPM2_CRB_WITH_ARM_FFA) { + if (buf->header.length < (sizeof(*buf) + sizeof(*crb_ffa))) { + dev_err(dev, + FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n", + buf->header.length, + ACPI_TPM2_CRB_WITH_ARM_FFA); + rc = -EINVAL; + goto out; + } + crb_ffa = ACPI_ADD_PTR(struct tpm2_crb_ffa, buf, sizeof(*buf)); + priv->ffa_flags = crb_ffa->flags; + priv->ffa_attributes = crb_ffa->attributes; + rc = tpm_crb_ffa_init(); + if (rc) { + /* If FF-A driver is not available yet, request probe retry */ + if (rc == -ENOENT) + rc = -EPROBE_DEFER; + goto out; + } + } + if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) { if (buf->header.length < (sizeof(*buf) + sizeof(*crb_pluton))) { dev_err(dev, FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n", buf->header.length, ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON); - return -EINVAL; + rc = -EINVAL; + goto out; } crb_pluton = ACPI_ADD_PTR(struct tpm2_crb_pluton, buf, sizeof(*buf)); rc = crb_map_pluton(dev, priv, buf, crb_pluton); if (rc) - return rc; + goto out; } priv->sm = sm; @@ -826,9 +885,14 @@ static int crb_acpi_add(struct acpi_device *device) if (rc) goto out; - rc = crb_check_flags(chip); - if (rc) - goto out; +#ifdef CONFIG_X86 + /* A quirk for https://www.amd.com/en/support/kb/faq/pa-410 */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) { + dev_info(dev, "Disabling hwrng\n"); + chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED; + } +#endif /* CONFIG_X86 */ rc = tpm_chip_register(chip); diff --git a/drivers/char/tpm/tpm_crb_ffa.c b/drivers/char/tpm/tpm_crb_ffa.c new file mode 100644 index 000000000000..755b77b32ea4 --- /dev/null +++ b/drivers/char/tpm/tpm_crb_ffa.c @@ -0,0 +1,414 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024 Arm Ltd. + * + * This device driver implements the TPM CRB start method + * as defined in the TPM Service Command Response Buffer + * Interface Over FF-A (DEN0138). + */ + +#define pr_fmt(fmt) "CRB_FFA: " fmt + +#include <linux/arm_ffa.h> +#include <linux/delay.h> +#include <linux/moduleparam.h> +#include "tpm_crb_ffa.h" + +static unsigned int busy_timeout_ms = 2000; + +module_param(busy_timeout_ms, uint, 0644); +MODULE_PARM_DESC(busy_timeout_ms, + "Maximum time in ms to retry before giving up on busy"); + +/* TPM service function status codes */ +#define CRB_FFA_OK 0x05000001 +#define CRB_FFA_OK_RESULTS_RETURNED 0x05000002 +#define CRB_FFA_NOFUNC 0x8e000001 +#define CRB_FFA_NOTSUP 0x8e000002 +#define CRB_FFA_INVARG 0x8e000005 +#define CRB_FFA_INV_CRB_CTRL_DATA 0x8e000006 +#define CRB_FFA_ALREADY 0x8e000009 +#define CRB_FFA_DENIED 0x8e00000a +#define CRB_FFA_NOMEM 0x8e00000b + +#define CRB_FFA_VERSION_MAJOR 1 +#define CRB_FFA_VERSION_MINOR 0 + +/* version encoding */ +#define CRB_FFA_MAJOR_VERSION_MASK GENMASK(30, 16) +#define CRB_FFA_MINOR_VERSION_MASK GENMASK(15, 0) +#define CRB_FFA_MAJOR_VERSION(x) ((u16)(FIELD_GET(CRB_FFA_MAJOR_VERSION_MASK, (x)))) +#define CRB_FFA_MINOR_VERSION(x) ((u16)(FIELD_GET(CRB_FFA_MINOR_VERSION_MASK, (x)))) + +/* + * Normal world sends requests with FFA_MSG_SEND_DIRECT_REQ and + * responses are returned with FFA_MSG_SEND_DIRECT_RESP for normal + * messages. + * + * All requests with FFA_MSG_SEND_DIRECT_REQ and FFA_MSG_SEND_DIRECT_RESP + * are using the AArch32 or AArch64 SMC calling convention with register usage + * as defined in FF-A specification: + * w0: Function ID + * -for 32-bit: 0x8400006F or 0x84000070 + * -for 64-bit: 0xC400006F or 0xC4000070 + * w1: Source/Destination IDs + * w2: Reserved (MBZ) + * w3-w7: Implementation defined, free to be used below + */ + +/* + * Returns the version of the interface that is available + * Call register usage: + * w3: Not used (MBZ) + * w4: TPM service function ID, CRB_FFA_GET_INTERFACE_VERSION + * w5-w7: Reserved (MBZ) + * + * Return register usage: + * w3: Not used (MBZ) + * w4: TPM service function status + * w5: TPM service interface version + * Bits[31:16]: major version + * Bits[15:0]: minor version + * w6-w7: Reserved (MBZ) + * + * Possible function status codes in register w4: + * CRB_FFA_OK_RESULTS_RETURNED: The version of the interface has been + * returned. + */ +#define CRB_FFA_GET_INTERFACE_VERSION 0x0f000001 + +/* + * Notifies the TPM service that a TPM command or TPM locality request is + * ready to be processed, and allows the TPM service to process it. + * Call register usage: + * w3: Not used (MBZ) + * w4: TPM service function ID, CRB_FFA_START + * w5: Start function qualifier + * Bits[31:8] (MBZ) + * Bits[7:0] + * 0: Notifies TPM that a command is ready to be processed + * 1: Notifies TPM that a locality request is ready to be processed + * w6: TPM locality, one of 0..4 + * -If the start function qualifier is 0, identifies the locality + * from where the command originated. + * -If the start function qualifier is 1, identifies the locality + * of the locality request + * w6-w7: Reserved (MBZ) + * + * Return register usage: + * w3: Not used (MBZ) + * w4: TPM service function status + * w5-w7: Reserved (MBZ) + * + * Possible function status codes in register w4: + * CRB_FFA_OK: the TPM service has been notified successfully + * CRB_FFA_INVARG: one or more arguments are not valid + * CRB_FFA_INV_CRB_CTRL_DATA: CRB control data or locality control + * data at the given TPM locality is not valid + * CRB_FFA_DENIED: the TPM has previously disabled locality requests and + * command processing at the given locality + */ +#define CRB_FFA_START 0x0f000201 + +struct tpm_crb_ffa { + struct ffa_device *ffa_dev; + u16 major_version; + u16 minor_version; + /* lock to protect sending of FF-A messages: */ + struct mutex msg_data_lock; + union { + struct ffa_send_direct_data direct_msg_data; + struct ffa_send_direct_data2 direct_msg_data2; + }; +}; + +static struct tpm_crb_ffa *tpm_crb_ffa; +static struct ffa_driver tpm_crb_ffa_driver; + +static int tpm_crb_ffa_to_linux_errno(int errno) +{ + int rc; + + switch (errno) { + case CRB_FFA_OK: + rc = 0; + break; + case CRB_FFA_OK_RESULTS_RETURNED: + rc = 0; + break; + case CRB_FFA_NOFUNC: + rc = -ENOENT; + break; + case CRB_FFA_NOTSUP: + rc = -EPERM; + break; + case CRB_FFA_INVARG: + rc = -EINVAL; + break; + case CRB_FFA_INV_CRB_CTRL_DATA: + rc = -ENOEXEC; + break; + case CRB_FFA_ALREADY: + rc = -EEXIST; + break; + case CRB_FFA_DENIED: + rc = -EACCES; + break; + case CRB_FFA_NOMEM: + rc = -ENOMEM; + break; + default: + rc = -EINVAL; + } + + return rc; +} + +/** + * tpm_crb_ffa_init - called by the CRB driver to do any needed initialization + * + * This function is called by the tpm_crb driver during the tpm_crb + * driver's initialization. If the tpm_crb_ffa has not been probed + * yet, returns -ENOENT in order to force a retry. If th ffa_crb + * driver had been probed but failed with an error, returns -ENODEV + * in order to prevent further retries. + * + * Return: 0 on success, negative error code on failure. + */ +int tpm_crb_ffa_init(void) +{ + int ret = 0; + + if (!IS_MODULE(CONFIG_TCG_ARM_CRB_FFA)) { + ret = ffa_register(&tpm_crb_ffa_driver); + if (ret) { + tpm_crb_ffa = ERR_PTR(-ENODEV); + return ret; + } + } + + if (!tpm_crb_ffa) + ret = -ENOENT; + + if (IS_ERR_VALUE(tpm_crb_ffa)) + ret = -ENODEV; + + return ret; +} +EXPORT_SYMBOL_GPL(tpm_crb_ffa_init); + +static int __tpm_crb_ffa_try_send_receive(unsigned long func_id, + unsigned long a0, unsigned long a1, + unsigned long a2) +{ + const struct ffa_msg_ops *msg_ops; + int ret; + + msg_ops = tpm_crb_ffa->ffa_dev->ops->msg_ops; + + if (ffa_partition_supports_direct_req2_recv(tpm_crb_ffa->ffa_dev)) { + tpm_crb_ffa->direct_msg_data2 = (struct ffa_send_direct_data2){ + .data = { func_id, a0, a1, a2 }, + }; + + ret = msg_ops->sync_send_receive2(tpm_crb_ffa->ffa_dev, + &tpm_crb_ffa->direct_msg_data2); + if (!ret) + ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data2.data[0]); + } else { + tpm_crb_ffa->direct_msg_data = (struct ffa_send_direct_data){ + .data1 = func_id, + .data2 = a0, + .data3 = a1, + .data4 = a2, + }; + + ret = msg_ops->sync_send_receive(tpm_crb_ffa->ffa_dev, + &tpm_crb_ffa->direct_msg_data); + if (!ret) + ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data.data1); + } + + return ret; +} + +static int __tpm_crb_ffa_send_receive(unsigned long func_id, unsigned long a0, + unsigned long a1, unsigned long a2) +{ + ktime_t start, stop; + int ret; + + if (!tpm_crb_ffa) + return -ENOENT; + + start = ktime_get(); + stop = ktime_add(start, ms_to_ktime(busy_timeout_ms)); + + for (;;) { + ret = __tpm_crb_ffa_try_send_receive(func_id, a0, a1, a2); + if (ret != -EBUSY) + break; + + usleep_range(50, 100); + if (ktime_after(ktime_get(), stop)) { + dev_warn(&tpm_crb_ffa->ffa_dev->dev, + "Busy retry timed out\n"); + break; + } + } + + return ret; +} + +/** + * tpm_crb_ffa_get_interface_version() - gets the ABI version of the TPM service + * @major: Pointer to caller-allocated buffer to hold the major version + * number the ABI + * @minor: Pointer to caller-allocated buffer to hold the minor version + * number the ABI + * + * Returns the major and minor version of the ABI of the FF-A based TPM. + * Allows the caller to evaluate its compatibility with the version of + * the ABI. + * + * Return: 0 on success, negative error code on failure. + */ +static int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor) +{ + int rc; + + if (!tpm_crb_ffa) + return -ENOENT; + + if (IS_ERR_VALUE(tpm_crb_ffa)) + return -ENODEV; + + if (!major || !minor) + return -EINVAL; + + guard(mutex)(&tpm_crb_ffa->msg_data_lock); + + rc = __tpm_crb_ffa_send_receive(CRB_FFA_GET_INTERFACE_VERSION, 0x00, 0x00, 0x00); + if (!rc) { + if (ffa_partition_supports_direct_req2_recv(tpm_crb_ffa->ffa_dev)) { + *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data2.data[1]); + *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data2.data[1]); + } else { + *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data.data2); + *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data.data2); + } + } + + return rc; +} + +/** + * tpm_crb_ffa_start() - signals the TPM that a field has changed in the CRB + * @request_type: Identifies whether the change to the CRB is in the command + * fields or locality fields. + * @locality: Specifies the locality number. + * + * Used by the CRB driver + * that might be useful to those using or modifying it. Begins with + * empty comment line, and may include additional embedded empty + * comment lines. + * + * Return: 0 on success, negative error code on failure. + */ +int tpm_crb_ffa_start(int request_type, int locality) +{ + if (!tpm_crb_ffa) + return -ENOENT; + + if (IS_ERR_VALUE(tpm_crb_ffa)) + return -ENODEV; + + guard(mutex)(&tpm_crb_ffa->msg_data_lock); + + return __tpm_crb_ffa_send_receive(CRB_FFA_START, request_type, locality, 0x00); +} +EXPORT_SYMBOL_GPL(tpm_crb_ffa_start); + +static int tpm_crb_ffa_probe(struct ffa_device *ffa_dev) +{ + struct tpm_crb_ffa *p; + int rc; + + /* only one instance of a TPM partition is supported */ + if (tpm_crb_ffa && !IS_ERR_VALUE(tpm_crb_ffa)) + return -EEXIST; + + tpm_crb_ffa = ERR_PTR(-ENODEV); // set tpm_crb_ffa so we can detect probe failure + + if (!ffa_partition_supports_direct_recv(ffa_dev) && + !ffa_partition_supports_direct_req2_recv(ffa_dev)) { + dev_warn(&ffa_dev->dev, "partition doesn't support direct message receive.\n"); + return -EINVAL; + } + + p = kzalloc(sizeof(*tpm_crb_ffa), GFP_KERNEL); + if (!p) + return -ENOMEM; + tpm_crb_ffa = p; + + mutex_init(&tpm_crb_ffa->msg_data_lock); + tpm_crb_ffa->ffa_dev = ffa_dev; + ffa_dev_set_drvdata(ffa_dev, tpm_crb_ffa); + + /* if TPM is aarch32 use 32-bit SMCs */ + if (!ffa_partition_check_property(ffa_dev, FFA_PARTITION_AARCH64_EXEC)) + ffa_dev->ops->msg_ops->mode_32bit_set(ffa_dev); + + /* verify compatibility of TPM service version number */ + rc = tpm_crb_ffa_get_interface_version(&tpm_crb_ffa->major_version, + &tpm_crb_ffa->minor_version); + if (rc) { + dev_err(&ffa_dev->dev, "failed to get crb interface version. rc:%d\n", rc); + goto out; + } + + dev_info(&ffa_dev->dev, "ABI version %u.%u\n", tpm_crb_ffa->major_version, + tpm_crb_ffa->minor_version); + + if (tpm_crb_ffa->major_version != CRB_FFA_VERSION_MAJOR || + (tpm_crb_ffa->minor_version > 0 && + tpm_crb_ffa->minor_version < CRB_FFA_VERSION_MINOR)) { + dev_warn(&ffa_dev->dev, "Incompatible ABI version\n"); + goto out; + } + + return 0; + +out: + kfree(tpm_crb_ffa); + tpm_crb_ffa = ERR_PTR(-ENODEV); + return -EINVAL; +} + +static void tpm_crb_ffa_remove(struct ffa_device *ffa_dev) +{ + kfree(tpm_crb_ffa); + tpm_crb_ffa = NULL; +} + +static const struct ffa_device_id tpm_crb_ffa_device_id[] = { + /* 17b862a4-1806-4faf-86b3-089a58353861 */ + { UUID_INIT(0x17b862a4, 0x1806, 0x4faf, + 0x86, 0xb3, 0x08, 0x9a, 0x58, 0x35, 0x38, 0x61) }, + {} +}; + +static struct ffa_driver tpm_crb_ffa_driver = { + .name = "ffa-crb", + .probe = tpm_crb_ffa_probe, + .remove = tpm_crb_ffa_remove, + .id_table = tpm_crb_ffa_device_id, +}; + +#ifdef MODULE +module_ffa_driver(tpm_crb_ffa_driver); +#endif + +MODULE_AUTHOR("Arm"); +MODULE_DESCRIPTION("TPM CRB FFA driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpm_crb_ffa.h b/drivers/char/tpm/tpm_crb_ffa.h new file mode 100644 index 000000000000..d7e1344ea003 --- /dev/null +++ b/drivers/char/tpm/tpm_crb_ffa.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2024 Arm Ltd. + * + * This device driver implements the TPM CRB start method + * as defined in the TPM Service Command Response Buffer + * Interface Over FF-A (DEN0138). + */ +#ifndef _TPM_CRB_FFA_H +#define _TPM_CRB_FFA_H + +#if IS_REACHABLE(CONFIG_TCG_ARM_CRB_FFA) +int tpm_crb_ffa_init(void); +int tpm_crb_ffa_start(int request_type, int locality); +#else +static inline int tpm_crb_ffa_init(void) { return 0; } +static inline int tpm_crb_ffa_start(int request_type, int locality) { return 0; } +#endif + +#define CRB_FFA_START_TYPE_COMMAND 0 +#define CRB_FFA_START_TYPE_LOCALITY_REQUEST 1 + +#endif diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c index 76adb108076c..4e63c30aeaf1 100644 --- a/drivers/char/tpm/tpm_ftpm_tee.c +++ b/drivers/char/tpm/tpm_ftpm_tee.c @@ -31,45 +31,19 @@ static const uuid_t ftpm_ta_uuid = 0x82, 0xCB, 0x34, 0x3F, 0xB7, 0xF3, 0x78, 0x96); /** - * ftpm_tee_tpm_op_recv() - retrieve fTPM response. - * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h. - * @buf: the buffer to store data. - * @count: the number of bytes to read. - * - * Return: - * In case of success the number of bytes received. - * On failure, -errno. - */ -static int ftpm_tee_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count) -{ - struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent); - size_t len; - - len = pvt_data->resp_len; - if (count < len) { - dev_err(&chip->dev, - "%s: Invalid size in recv: count=%zd, resp_len=%zd\n", - __func__, count, len); - return -EIO; - } - - memcpy(buf, pvt_data->resp_buf, len); - pvt_data->resp_len = 0; - - return len; -} - -/** - * ftpm_tee_tpm_op_send() - send TPM commands through the TEE shared memory. + * ftpm_tee_tpm_op_send() - send TPM commands through the TEE shared memory + * and retrieve the response. * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h - * @buf: the buffer to send. - * @len: the number of bytes to send. + * @buf: the buffer to send and to store the response. + * @bufsiz: the size of the buffer. + * @cmd_len: the number of bytes to send. * * Return: - * In case of success, returns 0. + * In case of success, returns the number of bytes received. * On failure, -errno */ -static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len) +static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t cmd_len) { struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent); size_t resp_len; @@ -80,16 +54,15 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len) struct tee_param command_params[4]; struct tee_shm *shm = pvt_data->shm; - if (len > MAX_COMMAND_SIZE) { + if (cmd_len > MAX_COMMAND_SIZE) { dev_err(&chip->dev, "%s: len=%zd exceeds MAX_COMMAND_SIZE supported by fTPM TA\n", - __func__, len); + __func__, cmd_len); return -EIO; } memset(&transceive_args, 0, sizeof(transceive_args)); memset(command_params, 0, sizeof(command_params)); - pvt_data->resp_len = 0; /* Invoke FTPM_OPTEE_TA_SUBMIT_COMMAND function of fTPM TA */ transceive_args = (struct tee_ioctl_invoke_arg) { @@ -103,7 +76,7 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len) .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT, .u.memref = { .shm = shm, - .size = len, + .size = cmd_len, .shm_offs = 0, }, }; @@ -115,7 +88,7 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len) return PTR_ERR(temp_buf); } memset(temp_buf, 0, (MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE)); - memcpy(temp_buf, buf, len); + memcpy(temp_buf, buf, cmd_len); command_params[1] = (struct tee_param) { .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT, @@ -156,38 +129,21 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len) __func__, resp_len); return -EIO; } + if (resp_len > bufsiz) { + dev_err(&chip->dev, + "%s: resp_len=%zd exceeds bufsiz=%zd\n", + __func__, resp_len, bufsiz); + return -EIO; + } - /* sanity checks look good, cache the response */ - memcpy(pvt_data->resp_buf, temp_buf, resp_len); - pvt_data->resp_len = resp_len; - - return 0; -} - -static void ftpm_tee_tpm_op_cancel(struct tpm_chip *chip) -{ - /* not supported */ -} - -static u8 ftpm_tee_tpm_op_status(struct tpm_chip *chip) -{ - return 0; -} + memcpy(buf, temp_buf, resp_len); -static bool ftpm_tee_tpm_req_canceled(struct tpm_chip *chip, u8 status) -{ - return false; + return resp_len; } static const struct tpm_class_ops ftpm_tee_tpm_ops = { .flags = TPM_OPS_AUTO_STARTUP, - .recv = ftpm_tee_tpm_op_recv, .send = ftpm_tee_tpm_op_send, - .cancel = ftpm_tee_tpm_op_cancel, - .status = ftpm_tee_tpm_op_status, - .req_complete_mask = 0, - .req_complete_val = 0, - .req_canceled = ftpm_tee_tpm_req_canceled, }; /* @@ -208,7 +164,7 @@ static int ftpm_tee_match(struct tee_ioctl_version_data *ver, const void *data) /** * ftpm_tee_probe() - initialize the fTPM - * @pdev: the platform_device description. + * @dev: the device description. * * Return: * On success, 0. On failure, -errno. @@ -271,7 +227,7 @@ static int ftpm_tee_probe(struct device *dev) } pvt_data->chip = chip; - pvt_data->chip->flags |= TPM_CHIP_FLAG_TPM2; + pvt_data->chip->flags |= TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_SYNC; /* Create a character device for the fTPM */ rc = tpm_chip_register(pvt_data->chip); @@ -304,7 +260,7 @@ static int ftpm_plat_tee_probe(struct platform_device *pdev) /** * ftpm_tee_remove() - remove the TPM device - * @pdev: the platform_device description. + * @dev: the device description. * * Return: * 0 always. @@ -341,7 +297,7 @@ static void ftpm_plat_tee_remove(struct platform_device *pdev) } /** - * ftpm_tee_shutdown() - shutdown the TPM device + * ftpm_plat_tee_shutdown() - shutdown the TPM device * @pdev: the platform_device description. */ static void ftpm_plat_tee_shutdown(struct platform_device *pdev) @@ -362,11 +318,11 @@ MODULE_DEVICE_TABLE(of, of_ftpm_tee_ids); static struct platform_driver ftpm_tee_plat_driver = { .driver = { .name = "ftpm-tee", - .of_match_table = of_match_ptr(of_ftpm_tee_ids), + .of_match_table = of_ftpm_tee_ids, }, .shutdown = ftpm_plat_tee_shutdown, .probe = ftpm_plat_tee_probe, - .remove_new = ftpm_plat_tee_remove, + .remove = ftpm_plat_tee_remove, }; /* UUID of the fTPM TA */ diff --git a/drivers/char/tpm/tpm_ftpm_tee.h b/drivers/char/tpm/tpm_ftpm_tee.h index f98daa7bf68c..8d5c3f0d2879 100644 --- a/drivers/char/tpm/tpm_ftpm_tee.h +++ b/drivers/char/tpm/tpm_ftpm_tee.h @@ -21,18 +21,13 @@ /** * struct ftpm_tee_private - fTPM's private data * @chip: struct tpm_chip instance registered with tpm framework. - * @state: internal state * @session: fTPM TA session identifier. - * @resp_len: cached response buffer length. - * @resp_buf: cached response buffer. * @ctx: TEE context handler. * @shm: Memory pool shared with fTPM TA in TEE. */ struct ftpm_tee_private { struct tpm_chip *chip; u32 session; - size_t resp_len; - u8 resp_buf[MAX_RESPONSE_SIZE]; struct tee_context *ctx; struct tee_shm *shm; }; diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c index 301a95b3734f..4f229656a8e2 100644 --- a/drivers/char/tpm/tpm_i2c_atmel.c +++ b/drivers/char/tpm/tpm_i2c_atmel.c @@ -37,7 +37,8 @@ struct priv_data { u8 buffer[sizeof(struct tpm_header) + 25]; }; -static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) +static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t len) { struct priv_data *priv = dev_get_drvdata(&chip->dev); struct i2c_client *client = to_i2c_client(chip->dev.parent); @@ -186,7 +187,7 @@ static void i2c_atmel_remove(struct i2c_client *client) } static const struct i2c_device_id i2c_atmel_id[] = { - {I2C_DRIVER_NAME, 0}, + { I2C_DRIVER_NAME }, {} }; MODULE_DEVICE_TABLE(i2c, i2c_atmel_id); diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c index 81d8a78dc655..bdf1f329a679 100644 --- a/drivers/char/tpm/tpm_i2c_infineon.c +++ b/drivers/char/tpm/tpm_i2c_infineon.c @@ -514,7 +514,8 @@ out: return size; } -static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len) +static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t len) { int rc, status; ssize_t burstcnt; diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c index d7be03c41098..d44903b29929 100644 --- a/drivers/char/tpm/tpm_i2c_nuvoton.c +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c @@ -19,7 +19,8 @@ #include <linux/interrupt.h> #include <linux/wait.h> #include <linux/i2c.h> -#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/property.h> #include "tpm.h" /* I2C interface offsets */ @@ -349,7 +350,8 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) * tpm.c can skip polling for the data to be available as the interrupt is * waited for here */ -static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) +static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t len) { struct priv_data *priv = dev_get_drvdata(&chip->dev); struct device *dev = chip->dev.parent; @@ -524,7 +526,6 @@ static int get_vid(struct i2c_client *client, u32 *res) static int i2c_nuvoton_probe(struct i2c_client *client) { - const struct i2c_device_id *id = i2c_client_get_device_id(client); int rc; struct tpm_chip *chip; struct device *dev = &client->dev; @@ -546,15 +547,8 @@ static int i2c_nuvoton_probe(struct i2c_client *client) if (!priv) return -ENOMEM; - if (dev->of_node) { - const struct of_device_id *of_id; - - of_id = of_match_device(dev->driver->of_match_table, dev); - if (of_id && of_id->data == OF_IS_TPM2) - chip->flags |= TPM_CHIP_FLAG_TPM2; - } else - if (id->driver_data == I2C_IS_TPM2) - chip->flags |= TPM_CHIP_FLAG_TPM2; + if (i2c_get_match_data(client)) + chip->flags |= TPM_CHIP_FLAG_TPM2; init_waitqueue_head(&priv->read_queue); @@ -661,6 +655,6 @@ static struct i2c_driver i2c_nuvoton_driver = { module_i2c_driver(i2c_nuvoton_driver); -MODULE_AUTHOR("Dan Morav (dan.morav@nuvoton.com)"); +MODULE_AUTHOR("Dan Morav <dan.morav@nuvoton.com>"); MODULE_DESCRIPTION("Nuvoton TPM I2C Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index d3989b257f42..4734a69406ce 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -191,13 +191,15 @@ static int tpm_ibmvtpm_resume(struct device *dev) * tpm_ibmvtpm_send() - Send a TPM command * @chip: tpm chip struct * @buf: buffer contains data to send - * @count: size of buffer + * @bufsiz: size of the buffer + * @count: length of the command * * Return: * 0 on success, * -errno on error */ -static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) +static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t count) { struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); bool retry = true; @@ -450,6 +452,7 @@ static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status) } static const struct tpm_class_ops tpm_ibmvtpm = { + .flags = TPM_OPS_AUTO_STARTUP, .recv = tpm_ibmvtpm_recv, .send = tpm_ibmvtpm_send, .cancel = tpm_ibmvtpm_cancel, @@ -690,16 +693,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, if (!strcmp(id->compat, "IBM,vtpm20")) chip->flags |= TPM_CHIP_FLAG_TPM2; - rc = tpm_get_timeouts(chip); - if (rc) - goto init_irq_cleanup; - - if (chip->flags & TPM_CHIP_FLAG_TPM2) { - rc = tpm2_get_cc_attrs_tbl(chip); - if (rc) - goto init_irq_cleanup; - } - return tpm_chip_register(chip); init_irq_cleanup: do { diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c index 9c924a1440a9..7638b65b851b 100644 --- a/drivers/char/tpm/tpm_infineon.c +++ b/drivers/char/tpm/tpm_infineon.c @@ -51,34 +51,40 @@ static struct tpm_inf_dev tpm_dev; static inline void tpm_data_out(unsigned char data, unsigned char offset) { +#ifdef CONFIG_HAS_IOPORT if (tpm_dev.iotype == TPM_INF_IO_PORT) outb(data, tpm_dev.data_regs + offset); else +#endif writeb(data, tpm_dev.mem_base + tpm_dev.data_regs + offset); } static inline unsigned char tpm_data_in(unsigned char offset) { +#ifdef CONFIG_HAS_IOPORT if (tpm_dev.iotype == TPM_INF_IO_PORT) return inb(tpm_dev.data_regs + offset); - else - return readb(tpm_dev.mem_base + tpm_dev.data_regs + offset); +#endif + return readb(tpm_dev.mem_base + tpm_dev.data_regs + offset); } static inline void tpm_config_out(unsigned char data, unsigned char offset) { +#ifdef CONFIG_HAS_IOPORT if (tpm_dev.iotype == TPM_INF_IO_PORT) outb(data, tpm_dev.config_port + offset); else +#endif writeb(data, tpm_dev.mem_base + tpm_dev.index_off + offset); } static inline unsigned char tpm_config_in(unsigned char offset) { +#ifdef CONFIG_HAS_IOPORT if (tpm_dev.iotype == TPM_INF_IO_PORT) return inb(tpm_dev.config_port + offset); - else - return readb(tpm_dev.mem_base + tpm_dev.index_off + offset); +#endif + return readb(tpm_dev.mem_base + tpm_dev.index_off + offset); } /* TPM header definitions */ @@ -306,7 +312,8 @@ recv_begin: return -EIO; } -static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count) +static int tpm_inf_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t count) { int i; int ret; diff --git a/drivers/char/tpm/tpm_loongson.c b/drivers/char/tpm/tpm_loongson.c new file mode 100644 index 000000000000..9e50250763d1 --- /dev/null +++ b/drivers/char/tpm/tpm_loongson.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Loongson Technology Corporation Limited. */ + +#include <linux/device.h> +#include <linux/mfd/loongson-se.h> +#include <linux/platform_device.h> +#include <linux/wait.h> + +#include "tpm.h" + +struct tpm_loongson_cmd { + u32 cmd_id; + u32 data_off; + u32 data_len; + u32 pad[5]; +}; + +static int tpm_loongson_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + struct loongson_se_engine *tpm_engine = dev_get_drvdata(&chip->dev); + struct tpm_loongson_cmd *cmd_ret = tpm_engine->command_ret; + + if (cmd_ret->data_len > count) + return -EIO; + + memcpy(buf, tpm_engine->data_buffer, cmd_ret->data_len); + + return cmd_ret->data_len; +} + +static int tpm_loongson_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, size_t count) +{ + struct loongson_se_engine *tpm_engine = dev_get_drvdata(&chip->dev); + struct tpm_loongson_cmd *cmd = tpm_engine->command; + + if (count > tpm_engine->buffer_size) + return -E2BIG; + + cmd->data_len = count; + memcpy(tpm_engine->data_buffer, buf, count); + + return loongson_se_send_engine_cmd(tpm_engine); +} + +static const struct tpm_class_ops tpm_loongson_ops = { + .flags = TPM_OPS_AUTO_STARTUP, + .recv = tpm_loongson_recv, + .send = tpm_loongson_send, +}; + +static int tpm_loongson_probe(struct platform_device *pdev) +{ + struct loongson_se_engine *tpm_engine; + struct device *dev = &pdev->dev; + struct tpm_loongson_cmd *cmd; + struct tpm_chip *chip; + + tpm_engine = loongson_se_init_engine(dev->parent, SE_ENGINE_TPM); + if (!tpm_engine) + return -ENODEV; + cmd = tpm_engine->command; + cmd->cmd_id = SE_CMD_TPM; + cmd->data_off = tpm_engine->buffer_off; + + chip = tpmm_chip_alloc(dev, &tpm_loongson_ops); + if (IS_ERR(chip)) + return PTR_ERR(chip); + chip->flags = TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_IRQ; + dev_set_drvdata(&chip->dev, tpm_engine); + + return tpm_chip_register(chip); +} + +static struct platform_driver tpm_loongson = { + .probe = tpm_loongson_probe, + .driver = { + .name = "tpm_loongson", + }, +}; +module_platform_driver(tpm_loongson); + +MODULE_ALIAS("platform:tpm_loongson"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Loongson TPM driver"); diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c index 038701d48351..879ac88f5783 100644 --- a/drivers/char/tpm/tpm_nsc.c +++ b/drivers/char/tpm/tpm_nsc.c @@ -178,7 +178,8 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count) return size; } -static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count) +static int tpm_nsc_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t count) { struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev); u8 data; @@ -410,7 +411,7 @@ static void __exit cleanup_nsc(void) module_init(init_nsc); module_exit(cleanup_nsc); -MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); +MODULE_AUTHOR("Leendert van Doorn <leendert@watson.ibm.com>"); MODULE_DESCRIPTION("TPM Driver"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c index bc7b1b4501b3..c9793a3d986d 100644 --- a/drivers/char/tpm/tpm_ppi.c +++ b/drivers/char/tpm/tpm_ppi.c @@ -33,6 +33,20 @@ static const guid_t tpm_ppi_guid = GUID_INIT(0x3DDDFAA6, 0x361B, 0x4EB4, 0xA4, 0x24, 0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53); +static const char * const tpm_ppi_info[] = { + "Not implemented", + "BIOS only", + "Blocked for OS by system firmware", + "User required", + "User not required", +}; + +/* A spinlock to protect access to the cache from concurrent reads */ +static DEFINE_MUTEX(tpm_ppi_lock); + +static u32 ppi_operations_cache[PPI_VS_REQ_END + 1]; +static bool ppi_cache_populated; + static bool tpm_ppi_req_has_parameter(u64 req) { return req == 23; @@ -52,7 +66,7 @@ static ssize_t tpm_show_ppi_version(struct device *dev, { struct tpm_chip *chip = to_tpm_chip(dev); - return scnprintf(buf, PAGE_SIZE, "%s\n", chip->ppi_version); + return sysfs_emit(buf, "%s\n", chip->ppi_version); } static ssize_t tpm_show_ppi_request(struct device *dev, @@ -87,12 +101,10 @@ static ssize_t tpm_show_ppi_request(struct device *dev, else { req = obj->package.elements[1].integer.value; if (tpm_ppi_req_has_parameter(req)) - size = scnprintf(buf, PAGE_SIZE, - "%llu %llu\n", req, - obj->package.elements[2].integer.value); + size = sysfs_emit(buf, "%llu %llu\n", req, + obj->package.elements[2].integer.value); else - size = scnprintf(buf, PAGE_SIZE, - "%llu\n", req); + size = sysfs_emit(buf, "%llu\n", req); } } else if (obj->package.count == 2 && obj->package.elements[0].type == ACPI_TYPE_INTEGER && @@ -100,8 +112,8 @@ static ssize_t tpm_show_ppi_request(struct device *dev, if (obj->package.elements[0].integer.value) size = -EFAULT; else - size = scnprintf(buf, PAGE_SIZE, "%llu\n", - obj->package.elements[1].integer.value); + size = sysfs_emit(buf, "%llu\n", + obj->package.elements[1].integer.value); } ACPI_FREE(obj); @@ -211,10 +223,10 @@ static ssize_t tpm_show_ppi_transition_action(struct device *dev, } if (ret < ARRAY_SIZE(info) - 1) - status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret, info[ret]); + status = sysfs_emit(buf, "%d: %s\n", ret, info[ret]); else - status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret, - info[ARRAY_SIZE(info)-1]); + status = sysfs_emit(buf, "%d: %s\n", ret, + info[ARRAY_SIZE(info) - 1]); return status; } @@ -255,23 +267,23 @@ static ssize_t tpm_show_ppi_response(struct device *dev, res = ret_obj[2].integer.value; if (req) { if (res == 0) - status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req, - "0: Success"); + status = sysfs_emit(buf, "%llu %s\n", req, + "0: Success"); else if (res == 0xFFFFFFF0) - status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req, - "0xFFFFFFF0: User Abort"); + status = sysfs_emit(buf, "%llu %s\n", req, + "0xFFFFFFF0: User Abort"); else if (res == 0xFFFFFFF1) - status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req, - "0xFFFFFFF1: BIOS Failure"); + status = sysfs_emit(buf, "%llu %s\n", req, + "0xFFFFFFF1: BIOS Failure"); else if (res >= 1 && res <= 0x00000FFF) - status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n", - req, res, "Corresponding TPM error"); + status = sysfs_emit(buf, "%llu %llu: %s\n", + req, res, "Corresponding TPM error"); else - status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n", - req, res, "Error"); + status = sysfs_emit(buf, "%llu %llu: %s\n", + req, res, "Error"); } else { - status = scnprintf(buf, PAGE_SIZE, "%llu: %s\n", - req, "No Recent Request"); + status = sysfs_emit(buf, "%llu: %s\n", + req, "No Recent Request"); } cleanup: @@ -279,46 +291,33 @@ cleanup: return status; } -static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start, - u32 end) +static ssize_t cache_ppi_operations(acpi_handle dev_handle, char *buf) { int i; u32 ret; - char *str = buf; + int len = 0; union acpi_object *obj, tmp; union acpi_object argv = ACPI_INIT_DSM_ARGV4(1, &tmp); - static char *info[] = { - "Not implemented", - "BIOS only", - "Blocked for OS by BIOS", - "User required", - "User not required", - }; - if (!acpi_check_dsm(dev_handle, &tpm_ppi_guid, TPM_PPI_REVISION_ID_1, 1 << TPM_PPI_FN_GETOPR)) return -EPERM; tmp.integer.type = ACPI_TYPE_INTEGER; - for (i = start; i <= end; i++) { + for (i = 0; i <= PPI_VS_REQ_END; i++) { tmp.integer.value = i; obj = tpm_eval_dsm(dev_handle, TPM_PPI_FN_GETOPR, ACPI_TYPE_INTEGER, &argv, TPM_PPI_REVISION_ID_1); - if (!obj) { + if (!obj) return -ENOMEM; - } else { - ret = obj->integer.value; - ACPI_FREE(obj); - } - if (ret > 0 && ret < ARRAY_SIZE(info)) - str += scnprintf(str, PAGE_SIZE, "%d %d: %s\n", - i, ret, info[ret]); + ret = obj->integer.value; + ppi_operations_cache[i] = ret; + ACPI_FREE(obj); } - return str - buf; + return len; } static ssize_t tpm_show_ppi_tcg_operations(struct device *dev, @@ -326,9 +325,30 @@ static ssize_t tpm_show_ppi_tcg_operations(struct device *dev, char *buf) { struct tpm_chip *chip = to_tpm_chip(dev); + ssize_t len = 0; + u32 ret; + int i; + + mutex_lock(&tpm_ppi_lock); + if (!ppi_cache_populated) { + len = cache_ppi_operations(chip->acpi_dev_handle, buf); + if (len < 0) { + mutex_unlock(&tpm_ppi_lock); + return len; + } + + ppi_cache_populated = true; + } - return show_ppi_operations(chip->acpi_dev_handle, buf, 0, - PPI_TPM_REQ_MAX); + for (i = 0; i <= PPI_TPM_REQ_MAX; i++) { + ret = ppi_operations_cache[i]; + if (ret >= 0 && ret < ARRAY_SIZE(tpm_ppi_info)) + len += sysfs_emit_at(buf, len, "%d %d: %s\n", + i, ret, tpm_ppi_info[ret]); + } + mutex_unlock(&tpm_ppi_lock); + + return len; } static ssize_t tpm_show_ppi_vs_operations(struct device *dev, @@ -336,9 +356,30 @@ static ssize_t tpm_show_ppi_vs_operations(struct device *dev, char *buf) { struct tpm_chip *chip = to_tpm_chip(dev); + ssize_t len = 0; + u32 ret; + int i; + + mutex_lock(&tpm_ppi_lock); + if (!ppi_cache_populated) { + len = cache_ppi_operations(chip->acpi_dev_handle, buf); + if (len < 0) { + mutex_unlock(&tpm_ppi_lock); + return len; + } + + ppi_cache_populated = true; + } + + for (i = PPI_VS_REQ_START; i <= PPI_VS_REQ_END; i++) { + ret = ppi_operations_cache[i]; + if (ret >= 0 && ret < ARRAY_SIZE(tpm_ppi_info)) + len += sysfs_emit_at(buf, len, "%d %d: %s\n", + i, ret, tpm_ppi_info[ret]); + } + mutex_unlock(&tpm_ppi_lock); - return show_ppi_operations(chip->acpi_dev_handle, buf, PPI_VS_REQ_START, - PPI_VS_REQ_END); + return len; } static DEVICE_ATTR(version, S_IRUGO, tpm_show_ppi_version, NULL); diff --git a/drivers/char/tpm/tpm_svsm.c b/drivers/char/tpm/tpm_svsm.c new file mode 100644 index 000000000000..f5ba0f64850b --- /dev/null +++ b/drivers/char/tpm/tpm_svsm.c @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * + * Driver for the vTPM defined by the AMD SVSM spec [1]. + * + * The specification defines a protocol that a SEV-SNP guest OS can use to + * discover and talk to a vTPM emulated by the Secure VM Service Module (SVSM) + * in the guest context, but at a more privileged level (usually VMPL0). + * + * [1] "Secure VM Service Module for SEV-SNP Guests" + * Publication # 58019 Revision: 1.00 + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/tpm_svsm.h> + +#include <asm/sev.h> + +#include "tpm.h" + +struct tpm_svsm_priv { + void *buffer; +}; + +static int tpm_svsm_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t cmd_len) +{ + struct tpm_svsm_priv *priv = dev_get_drvdata(&chip->dev); + int ret; + + ret = svsm_vtpm_cmd_request_fill(priv->buffer, 0, buf, cmd_len); + if (ret) + return ret; + + /* + * The SVSM call uses the same buffer for the command and for the + * response, so after this call, the buffer will contain the response. + * + * Note: we have to use an internal buffer because the device in SVSM + * expects the svsm_vtpm header + data to be physically contiguous. + */ + ret = snp_svsm_vtpm_send_command(priv->buffer); + if (ret) + return ret; + + return svsm_vtpm_cmd_response_parse(priv->buffer, buf, bufsiz); +} + +static struct tpm_class_ops tpm_chip_ops = { + .flags = TPM_OPS_AUTO_STARTUP, + .send = tpm_svsm_send, +}; + +static int __init tpm_svsm_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct tpm_svsm_priv *priv; + struct tpm_chip *chip; + int err; + + priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + /* + * The maximum buffer supported is one page (see SVSM_VTPM_MAX_BUFFER + * in tpm_svsm.h). + */ + priv->buffer = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0); + if (!priv->buffer) + return -ENOMEM; + + chip = tpmm_chip_alloc(dev, &tpm_chip_ops); + if (IS_ERR(chip)) + return PTR_ERR(chip); + + dev_set_drvdata(&chip->dev, priv); + + chip->flags |= TPM_CHIP_FLAG_SYNC; + err = tpm2_probe(chip); + if (err) + return err; + + err = tpm_chip_register(chip); + if (err) + return err; + + dev_info(dev, "SNP SVSM vTPM %s device\n", + (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2"); + + return 0; +} + +static void __exit tpm_svsm_remove(struct platform_device *pdev) +{ + struct tpm_chip *chip = platform_get_drvdata(pdev); + + tpm_chip_unregister(chip); +} + +/* + * tpm_svsm_remove() lives in .exit.text. For drivers registered via + * module_platform_driver_probe() this is ok because they cannot get unbound + * at runtime. So mark the driver struct with __refdata to prevent modpost + * triggering a section mismatch warning. + */ +static struct platform_driver tpm_svsm_driver __refdata = { + .remove = __exit_p(tpm_svsm_remove), + .driver = { + .name = "tpm-svsm", + }, +}; + +module_platform_driver_probe(tpm_svsm_driver, tpm_svsm_probe); + +MODULE_DESCRIPTION("SNP SVSM vTPM Driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:tpm-svsm"); diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 2c52b7905b07..9aa230a63616 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -347,6 +347,7 @@ static void tpm_tis_plat_remove(struct platform_device *pdev) #ifdef CONFIG_OF static const struct of_device_id tis_of_platform_match[] = { + {.compatible = "atmel,at97sc3204"}, {.compatible = "tcg,tpm-tis-mmio"}, {}, }; @@ -355,7 +356,7 @@ MODULE_DEVICE_TABLE(of, tis_of_platform_match); static struct platform_driver tis_drv = { .probe = tpm_tis_plat_probe, - .remove_new = tpm_tis_plat_remove, + .remove = tpm_tis_plat_remove, .driver = { .name = "tpm_tis", .pm = &tpm_tis_pm, @@ -428,7 +429,7 @@ static void __exit cleanup_tis(void) module_init(init_tis); module_exit(cleanup_tis); -MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); +MODULE_AUTHOR("Leendert van Doorn <leendert@watson.ibm.com>"); MODULE_DESCRIPTION("TPM Driver"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 1b350412d8a6..e2a1769081b1 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -114,11 +114,10 @@ again: return 0; /* process status changes without irq support */ do { + usleep_range(priv->timeout_min, priv->timeout_max); status = chip->ops->status(chip); if ((status & mask) == mask) return 0; - usleep_range(priv->timeout_min, - priv->timeout_max); } while (time_before(jiffies, stop)); return -ETIME; } @@ -266,8 +265,7 @@ static u8 tpm_tis_status(struct tpm_chip *chip) /* * Dump stack for forensics, as invalid TPM_STS.x could be - * potentially triggered by impaired tpm_try_get_ops() or - * tpm_find_get_ops(). + * potentially triggered by impaired tpm_try_get_ops(). */ dump_stack(); } @@ -464,7 +462,10 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len) if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c, &priv->int_queue, false) < 0) { - rc = -ETIME; + if (test_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags)) + rc = -EAGAIN; + else + rc = -ETIME; goto out_err; } status = tpm_tis_status(chip); @@ -481,7 +482,10 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len) if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c, &priv->int_queue, false) < 0) { - rc = -ETIME; + if (test_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags)) + rc = -EAGAIN; + else + rc = -ETIME; goto out_err; } status = tpm_tis_status(chip); @@ -546,9 +550,11 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len) if (rc >= 0) /* Data transfer done successfully */ break; - else if (rc != -EIO) + else if (rc != -EAGAIN && rc != -EIO) /* Data transfer failed, not recoverable */ return rc; + + usleep_range(priv->timeout_min, priv->timeout_max); } /* go and do it */ @@ -573,7 +579,8 @@ out_err: return rc; } -static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) +static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t len) { int rc, irq; struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); @@ -919,8 +926,6 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask, int rc; u32 int_status; - INIT_WORK(&priv->free_irq_work, tpm_tis_free_irq_func); - rc = devm_request_threaded_irq(chip->dev.parent, irq, NULL, tis_int_handler, IRQF_ONESHOT | flags, dev_name(&chip->dev), chip); @@ -972,8 +977,8 @@ restore_irqs: * will call disable_irq which undoes all of the above. */ if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) { - tpm_tis_write8(priv, original_int_vec, - TPM_INT_VECTOR(priv->locality)); + tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), + original_int_vec); rc = -1; } @@ -1022,7 +1027,8 @@ void tpm_tis_remove(struct tpm_chip *chip) interrupt = 0; tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt); - flush_work(&priv->free_irq_work); + if (priv->free_irq_work.func) + flush_work(&priv->free_irq_work); tpm_tis_clkrun_enable(chip, false); @@ -1059,11 +1065,6 @@ static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value) clkrun_val &= ~LPC_CLKRUN_EN; iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET); - /* - * Write any random value on port 0x80 which is on LPC, to make - * sure LPC clock is running before sending any TPM command. - */ - outb(0xCC, 0x80); } else { data->clkrun_enabled--; if (data->clkrun_enabled) @@ -1074,13 +1075,15 @@ static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value) /* Enable LPC CLKRUN# */ clkrun_val |= LPC_CLKRUN_EN; iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET); - - /* - * Write any random value on port 0x80 which is on LPC, to make - * sure LPC clock is running before sending any TPM command. - */ - outb(0xCC, 0x80); } + +#ifdef CONFIG_HAS_IOPORT + /* + * Write any random value on port 0x80 which is on LPC, to make + * sure LPC clock is running before sending any TPM command. + */ + outb(0xCC, 0x80); +#endif } static const struct tpm_class_ops tpm_tis = { @@ -1132,6 +1135,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, priv->phy_ops = phy_ops; priv->locality_count = 0; mutex_init(&priv->locality_count_mutex); + INIT_WORK(&priv->free_irq_work, tpm_tis_free_irq_func); dev_set_drvdata(&chip->dev, priv); @@ -1147,6 +1151,9 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, priv->timeout_max = TIS_TIMEOUT_MAX_ATML; } + if (priv->manufacturer_id == TPM_VID_IFX) + set_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags); + if (is_bsw()) { priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR, ILB_REMAP_SIZE); @@ -1361,7 +1368,7 @@ int tpm_tis_resume(struct device *dev) EXPORT_SYMBOL_GPL(tpm_tis_resume); #endif -MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); +MODULE_AUTHOR("Leendert van Doorn <leendert@watson.ibm.com>"); MODULE_DESCRIPTION("TPM Driver"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h index 13e99cf65efe..6c3aa480396b 100644 --- a/drivers/char/tpm/tpm_tis_core.h +++ b/drivers/char/tpm/tpm_tis_core.h @@ -54,7 +54,7 @@ enum tis_int_flags { enum tis_defaults { TIS_MEM_LEN = 0x5000, TIS_SHORT_TIMEOUT = 750, /* ms */ - TIS_LONG_TIMEOUT = 2000, /* 2 sec */ + TIS_LONG_TIMEOUT = 4000, /* 4 secs */ TIS_TIMEOUT_MIN_ATML = 14700, /* usecs */ TIS_TIMEOUT_MAX_ATML = 15000, /* usecs */ }; @@ -89,6 +89,7 @@ enum tpm_tis_flags { TPM_TIS_INVALID_STATUS = 1, TPM_TIS_DEFAULT_CANCELLATION = 2, TPM_TIS_IRQ_TESTED = 3, + TPM_TIS_STATUS_VALID_RETRY = 4, }; struct tpm_tis_data { @@ -210,7 +211,7 @@ static inline int tpm_tis_verify_crc(struct tpm_tis_data *data, size_t len, static inline bool is_bsw(void) { #ifdef CONFIG_X86 - return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0); + return (boot_cpu_data.x86_vfm == INTEL_ATOM_AIRMONT) ? 1 : 0; #else return false; #endif diff --git a/drivers/char/tpm/tpm_tis_i2c.c b/drivers/char/tpm/tpm_tis_i2c.c index a897402cc36a..6cd07dd34507 100644 --- a/drivers/char/tpm/tpm_tis_i2c.c +++ b/drivers/char/tpm/tpm_tis_i2c.c @@ -375,7 +375,7 @@ static void tpm_tis_i2c_remove(struct i2c_client *client) } static const struct i2c_device_id tpm_tis_i2c_id[] = { - { "tpm_tis_i2c", 0 }, + { "tpm_tis_i2c" }, {} }; MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_id); @@ -383,6 +383,8 @@ MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_id); #ifdef CONFIG_OF static const struct of_device_id of_tis_i2c_match[] = { { .compatible = "infineon,slb9673", }, + { .compatible = "nuvoton,npct75x", }, + { .compatible = "tcg,tpm-tis-i2c", }, {} }; MODULE_DEVICE_TABLE(of, of_tis_i2c_match); diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c index e70abd69e1ae..fc6891a0b693 100644 --- a/drivers/char/tpm/tpm_tis_i2c_cr50.c +++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c @@ -17,6 +17,7 @@ */ #include <linux/acpi.h> +#include <linux/bug.h> #include <linux/completion.h> #include <linux/i2c.h> #include <linux/interrupt.h> @@ -30,11 +31,13 @@ #define TPM_CR50_MAX_BUFSIZE 64 #define TPM_CR50_TIMEOUT_SHORT_MS 2 /* Short timeout during transactions */ #define TPM_CR50_TIMEOUT_NOIRQ_MS 20 /* Timeout for TPM ready without IRQ */ -#define TPM_CR50_I2C_DID_VID 0x00281ae0L /* Device and vendor ID reg value */ -#define TPM_TI50_I2C_DID_VID 0x504a6666L /* Device and vendor ID reg value */ +#define TPM_CR50_I2C_DID_VID 0x00281ae0L /* Device and vendor ID for Cr50 H1 */ +#define TPM_TI50_DT_I2C_DID_VID 0x504a6666L /* Device and vendor ID for Ti50 DT */ +#define TPM_TI50_OT_I2C_DID_VID 0x50666666L /* Device and vendor ID for TI50 OT */ #define TPM_CR50_I2C_MAX_RETRIES 3 /* Max retries due to I2C errors */ #define TPM_CR50_I2C_RETRY_DELAY_LO 55 /* Min usecs between retries on I2C */ #define TPM_CR50_I2C_RETRY_DELAY_HI 65 /* Max usecs between retries on I2C */ +#define TPM_CR50_I2C_DEFAULT_LOC 0 #define TPM_I2C_ACCESS(l) (0x0000 | ((l) << 4)) #define TPM_I2C_STS(l) (0x0001 | ((l) << 4)) @@ -199,8 +202,6 @@ static int tpm_cr50_i2c_read(struct tpm_chip *chip, u8 addr, u8 *buffer, size_t }; int rc; - i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT); - /* Prepare for completion interrupt */ tpm_cr50_i2c_enable_tpm_irq(chip); @@ -219,7 +220,6 @@ static int tpm_cr50_i2c_read(struct tpm_chip *chip, u8 addr, u8 *buffer, size_t out: tpm_cr50_i2c_disable_tpm_irq(chip); - i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT); if (rc < 0) return rc; @@ -235,7 +235,7 @@ out: * @len: Number of bytes to write. * * The provided address is prepended to the data in 'buffer', the - * cobined address+data is sent to the TPM, then wait for TPM to + * combined address+data is sent to the TPM, then wait for TPM to * indicate it is done writing. * * Return: @@ -261,8 +261,6 @@ static int tpm_cr50_i2c_write(struct tpm_chip *chip, u8 addr, u8 *buffer, priv->buf[0] = addr; memcpy(priv->buf + 1, buffer, len); - i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT); - /* Prepare for completion interrupt */ tpm_cr50_i2c_enable_tpm_irq(chip); @@ -276,7 +274,6 @@ static int tpm_cr50_i2c_write(struct tpm_chip *chip, u8 addr, u8 *buffer, out: tpm_cr50_i2c_disable_tpm_irq(chip); - i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT); if (rc < 0) return rc; @@ -285,25 +282,26 @@ out: } /** - * tpm_cr50_check_locality() - Verify TPM locality 0 is active. + * tpm_cr50_check_locality() - Verify if required TPM locality is active. * @chip: A TPM chip. + * @loc: Locality to be verified * * Return: - * - 0: Success. + * - loc: Success. * - -errno: A POSIX error code. */ -static int tpm_cr50_check_locality(struct tpm_chip *chip) +static int tpm_cr50_check_locality(struct tpm_chip *chip, int loc) { u8 mask = TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY; u8 buf; int rc; - rc = tpm_cr50_i2c_read(chip, TPM_I2C_ACCESS(0), &buf, sizeof(buf)); + rc = tpm_cr50_i2c_read(chip, TPM_I2C_ACCESS(loc), &buf, sizeof(buf)); if (rc < 0) return rc; if ((buf & mask) == mask) - return 0; + return loc; return -EIO; } @@ -311,53 +309,72 @@ static int tpm_cr50_check_locality(struct tpm_chip *chip) /** * tpm_cr50_release_locality() - Release TPM locality. * @chip: A TPM chip. - * @force: Flag to force release if set. + * @loc: Locality to be released + * + * Return: + * - 0: Success. + * - -errno: A POSIX error code. */ -static void tpm_cr50_release_locality(struct tpm_chip *chip, bool force) +static int tpm_cr50_release_locality(struct tpm_chip *chip, int loc) { + struct i2c_client *client = to_i2c_client(chip->dev.parent); u8 mask = TPM_ACCESS_VALID | TPM_ACCESS_REQUEST_PENDING; - u8 addr = TPM_I2C_ACCESS(0); + u8 addr = TPM_I2C_ACCESS(loc); u8 buf; + int rc; - if (tpm_cr50_i2c_read(chip, addr, &buf, sizeof(buf)) < 0) - return; + rc = tpm_cr50_i2c_read(chip, addr, &buf, sizeof(buf)); + if (rc < 0) + goto unlock_out; - if (force || (buf & mask) == mask) { + if ((buf & mask) == mask) { buf = TPM_ACCESS_ACTIVE_LOCALITY; - tpm_cr50_i2c_write(chip, addr, &buf, sizeof(buf)); + rc = tpm_cr50_i2c_write(chip, addr, &buf, sizeof(buf)); } + +unlock_out: + i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT); + return rc; } /** - * tpm_cr50_request_locality() - Request TPM locality 0. + * tpm_cr50_request_locality() - Request TPM locality. * @chip: A TPM chip. + * @loc: Locality to be requested. * * Return: - * - 0: Success. + * - loc: Success. * - -errno: A POSIX error code. */ -static int tpm_cr50_request_locality(struct tpm_chip *chip) +static int tpm_cr50_request_locality(struct tpm_chip *chip, int loc) { + struct i2c_client *client = to_i2c_client(chip->dev.parent); u8 buf = TPM_ACCESS_REQUEST_USE; unsigned long stop; int rc; - if (!tpm_cr50_check_locality(chip)) - return 0; + i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT); - rc = tpm_cr50_i2c_write(chip, TPM_I2C_ACCESS(0), &buf, sizeof(buf)); + if (tpm_cr50_check_locality(chip, loc) == loc) + return loc; + + rc = tpm_cr50_i2c_write(chip, TPM_I2C_ACCESS(loc), &buf, sizeof(buf)); if (rc < 0) - return rc; + goto unlock_out; stop = jiffies + chip->timeout_a; do { - if (!tpm_cr50_check_locality(chip)) - return 0; + if (tpm_cr50_check_locality(chip, loc) == loc) + return loc; msleep(TPM_CR50_TIMEOUT_SHORT_MS); } while (time_before(jiffies, stop)); - return -ETIMEDOUT; + rc = -ETIMEDOUT; + +unlock_out: + i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT); + return rc; } /** @@ -373,7 +390,7 @@ static u8 tpm_cr50_i2c_tis_status(struct tpm_chip *chip) { u8 buf[4]; - if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(0), buf, sizeof(buf)) < 0) + if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(chip->locality), buf, sizeof(buf)) < 0) return 0; return buf[0]; @@ -389,7 +406,7 @@ static void tpm_cr50_i2c_tis_set_ready(struct tpm_chip *chip) { u8 buf[4] = { TPM_STS_COMMAND_READY }; - tpm_cr50_i2c_write(chip, TPM_I2C_STS(0), buf, sizeof(buf)); + tpm_cr50_i2c_write(chip, TPM_I2C_STS(chip->locality), buf, sizeof(buf)); msleep(TPM_CR50_TIMEOUT_SHORT_MS); } @@ -419,7 +436,7 @@ static int tpm_cr50_i2c_get_burst_and_status(struct tpm_chip *chip, u8 mask, stop = jiffies + chip->timeout_b; do { - if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(0), buf, sizeof(buf)) < 0) { + if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(chip->locality), buf, sizeof(buf)) < 0) { msleep(TPM_CR50_TIMEOUT_SHORT_MS); continue; } @@ -453,7 +470,7 @@ static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len) u8 mask = TPM_STS_VALID | TPM_STS_DATA_AVAIL; size_t burstcnt, cur, len, expected; - u8 addr = TPM_I2C_DATA_FIFO(0); + u8 addr = TPM_I2C_DATA_FIFO(chip->locality); u32 status; int rc; @@ -515,7 +532,6 @@ static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len) goto out_err; } - tpm_cr50_release_locality(chip, false); return cur; out_err: @@ -523,7 +539,6 @@ out_err: if (tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY) tpm_cr50_i2c_tis_set_ready(chip); - tpm_cr50_release_locality(chip, false); return rc; } @@ -531,13 +546,15 @@ out_err: * tpm_cr50_i2c_tis_send() - TPM transmission callback. * @chip: A TPM chip. * @buf: Buffer to send. - * @len: Buffer length. + * @bufsiz: Buffer size. + * @len: Command length. * * Return: * - 0: Success. * - -errno: A POSIX error code. */ -static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) +static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t len) { size_t burstcnt, limit, sent = 0; u8 tpm_go[4] = { TPM_STS_GO }; @@ -545,10 +562,6 @@ static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) u32 status; int rc; - rc = tpm_cr50_request_locality(chip); - if (rc < 0) - return rc; - /* Wait until TPM is ready for a command */ stop = jiffies + chip->timeout_b; while (!(tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)) { @@ -577,7 +590,8 @@ static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) * that is inserted by tpm_cr50_i2c_write() */ limit = min_t(size_t, burstcnt - 1, len); - rc = tpm_cr50_i2c_write(chip, TPM_I2C_DATA_FIFO(0), &buf[sent], limit); + rc = tpm_cr50_i2c_write(chip, TPM_I2C_DATA_FIFO(chip->locality), + &buf[sent], limit); if (rc < 0) { dev_err(&chip->dev, "Write failed\n"); goto out_err; @@ -598,7 +612,7 @@ static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) } /* Start the TPM command */ - rc = tpm_cr50_i2c_write(chip, TPM_I2C_STS(0), tpm_go, + rc = tpm_cr50_i2c_write(chip, TPM_I2C_STS(chip->locality), tpm_go, sizeof(tpm_go)); if (rc < 0) { dev_err(&chip->dev, "Start command failed\n"); @@ -611,7 +625,6 @@ out_err: if (tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY) tpm_cr50_i2c_tis_set_ready(chip); - tpm_cr50_release_locality(chip, false); return rc; } @@ -650,6 +663,8 @@ static const struct tpm_class_ops cr50_i2c = { .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, .req_canceled = &tpm_cr50_i2c_req_canceled, + .request_locality = &tpm_cr50_request_locality, + .relinquish_locality = &tpm_cr50_release_locality, }; #ifdef CONFIG_ACPI @@ -669,9 +684,29 @@ MODULE_DEVICE_TABLE(of, of_cr50_i2c_match); #endif /** + * tpm_cr50_vid_to_name() - Maps VID to name. + * @vendor: Vendor identifier to map to name + * + * Return: + * A valid string for the vendor or empty string + */ +static const char *tpm_cr50_vid_to_name(u32 vendor) +{ + switch (vendor) { + case TPM_CR50_I2C_DID_VID: + return "cr50"; + case TPM_TI50_DT_I2C_DID_VID: + return "ti50 DT"; + case TPM_TI50_OT_I2C_DID_VID: + return "ti50 OT"; + default: + return "unknown"; + } +} + +/** * tpm_cr50_i2c_probe() - Driver probe function. * @client: I2C client information. - * @id: I2C device id. * * Return: * - 0: Success. @@ -685,6 +720,7 @@ static int tpm_cr50_i2c_probe(struct i2c_client *client) u32 vendor; u8 buf[4]; int rc; + int loc; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -ENODEV; @@ -727,29 +763,37 @@ static int tpm_cr50_i2c_probe(struct i2c_client *client) TPM_CR50_TIMEOUT_NOIRQ_MS); } - rc = tpm_cr50_request_locality(chip); - if (rc < 0) { + loc = tpm_cr50_request_locality(chip, TPM_CR50_I2C_DEFAULT_LOC); + if (loc < 0) { dev_err(dev, "Could not request locality\n"); - return rc; + return loc; } /* Read four bytes from DID_VID register */ - rc = tpm_cr50_i2c_read(chip, TPM_I2C_DID_VID(0), buf, sizeof(buf)); + rc = tpm_cr50_i2c_read(chip, TPM_I2C_DID_VID(loc), buf, sizeof(buf)); if (rc < 0) { dev_err(dev, "Could not read vendor id\n"); - tpm_cr50_release_locality(chip, true); + if (tpm_cr50_release_locality(chip, loc)) + dev_err(dev, "Could not release locality\n"); + return rc; + } + + rc = tpm_cr50_release_locality(chip, loc); + if (rc) { + dev_err(dev, "Could not release locality\n"); return rc; } vendor = le32_to_cpup((__le32 *)buf); - if (vendor != TPM_CR50_I2C_DID_VID && vendor != TPM_TI50_I2C_DID_VID) { + if (vendor != TPM_CR50_I2C_DID_VID && + vendor != TPM_TI50_DT_I2C_DID_VID && + vendor != TPM_TI50_OT_I2C_DID_VID) { dev_err(dev, "Vendor ID did not match! ID was %08x\n", vendor); - tpm_cr50_release_locality(chip, true); return -ENODEV; } dev_info(dev, "%s TPM 2.0 (i2c 0x%02x irq %d id 0x%x)\n", - vendor == TPM_TI50_I2C_DID_VID ? "ti50" : "cr50", + tpm_cr50_vid_to_name(vendor), client->addr, client->irq, vendor >> 16); return tpm_chip_register(chip); } @@ -773,7 +817,6 @@ static void tpm_cr50_i2c_remove(struct i2c_client *client) } tpm_chip_unregister(chip); - tpm_cr50_release_locality(chip, true); } static SIMPLE_DEV_PM_OPS(cr50_i2c_pm, tpm_pm_suspend, tpm_pm_resume); diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c index c5c3197ee29f..61b42c83ced8 100644 --- a/drivers/char/tpm/tpm_tis_spi_main.c +++ b/drivers/char/tpm/tpm_tis_spi_main.c @@ -37,6 +37,7 @@ #include "tpm_tis_spi.h" #define MAX_SPI_FRAMESIZE 64 +#define SPI_HDRSIZE 4 /* * TCG SPI flow control is documented in section 6.4 of the spec[1]. In short, @@ -146,7 +147,7 @@ static int tpm_tis_spi_transfer_full(struct tpm_tis_data *data, u32 addr, struct spi_transfer spi_xfer; u8 transfer_len; - spi_bus_lock(phy->spi_device->master); + spi_bus_lock(phy->spi_device->controller); while (len) { transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE); @@ -210,7 +211,7 @@ exit: spi_sync_locked(phy->spi_device, &m); } - spi_bus_unlock(phy->spi_device->master); + spi_bus_unlock(phy->spi_device->controller); return ret; } @@ -247,7 +248,7 @@ static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr, int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy, int irq, const struct tpm_tis_phy_ops *phy_ops) { - phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL); + phy->iobuf = devm_kmalloc(&spi->dev, SPI_HDRSIZE + MAX_SPI_FRAMESIZE, GFP_KERNEL); if (!phy->iobuf) return -ENOMEM; @@ -317,6 +318,7 @@ static void tpm_tis_spi_remove(struct spi_device *dev) } static const struct spi_device_id tpm_tis_spi_id[] = { + { "attpm20p", (unsigned long)tpm_tis_spi_probe }, { "st33htpm-spi", (unsigned long)tpm_tis_spi_probe }, { "slb9670", (unsigned long)tpm_tis_spi_probe }, { "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe }, @@ -327,6 +329,7 @@ static const struct spi_device_id tpm_tis_spi_id[] = { MODULE_DEVICE_TABLE(spi, tpm_tis_spi_id); static const struct of_device_id of_tis_spi_match[] __maybe_unused = { + { .compatible = "atmel,attpm20p", .data = tpm_tis_spi_probe }, { .compatible = "st,st33htpm-spi", .data = tpm_tis_spi_probe }, { .compatible = "infineon,slb9670", .data = tpm_tis_spi_probe }, { .compatible = "tcg,tpm_tis-spi", .data = tpm_tis_spi_probe }, diff --git a/drivers/char/tpm/tpm_tis_synquacer.c b/drivers/char/tpm/tpm_tis_synquacer.c index 0621ebec530b..4927714d277a 100644 --- a/drivers/char/tpm/tpm_tis_synquacer.c +++ b/drivers/char/tpm/tpm_tis_synquacer.c @@ -152,7 +152,7 @@ MODULE_DEVICE_TABLE(acpi, tpm_synquacer_acpi_tbl); static struct platform_driver tis_synquacer_drv = { .probe = tpm_tis_synquacer_probe, - .remove_new = tpm_tis_synquacer_remove, + .remove = tpm_tis_synquacer_remove, .driver = { .name = "tpm_tis_synquacer", .pm = &tpm_tis_synquacer_pm, diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c index 30e953988cab..0818bb517805 100644 --- a/drivers/char/tpm/tpm_vtpm_proxy.c +++ b/drivers/char/tpm/tpm_vtpm_proxy.c @@ -243,7 +243,6 @@ static int vtpm_proxy_fops_release(struct inode *inode, struct file *filp) static const struct file_operations vtpm_proxy_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .read = vtpm_proxy_fops_read, .write = vtpm_proxy_fops_write, .poll = vtpm_proxy_fops_poll, @@ -322,12 +321,14 @@ static int vtpm_proxy_is_driver_command(struct tpm_chip *chip, * * @chip: tpm chip to use * @buf: send buffer + * @bufsiz: size of the buffer * @count: bytes to send * * Return: * 0 in case of success, negative error value otherwise. */ -static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count) +static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t count) { struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); @@ -711,7 +712,7 @@ static void __exit vtpm_module_exit(void) module_init(vtpm_module_init); module_exit(vtpm_module_exit); -MODULE_AUTHOR("Stefan Berger (stefanb@us.ibm.com)"); +MODULE_AUTHOR("Stefan Berger <stefanb@us.ibm.com>"); MODULE_DESCRIPTION("vTPM Driver"); MODULE_VERSION("0.1"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c index eef0fb06ea83..c25df7ea064e 100644 --- a/drivers/char/tpm/tpmrm-dev.c +++ b/drivers/char/tpm/tpmrm-dev.c @@ -46,7 +46,6 @@ static int tpmrm_release(struct inode *inode, struct file *file) const struct file_operations tpmrm_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .open = tpmrm_open, .read = tpm_common_read, .write = tpm_common_write, diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c index 80cca3b83b22..556bf2256716 100644 --- a/drivers/char/tpm/xen-tpmfront.c +++ b/drivers/char/tpm/xen-tpmfront.c @@ -131,7 +131,8 @@ static size_t shr_data_offset(struct vtpm_shared_page *shr) return struct_size(shr, extra_pages, shr->nr_extra_pages); } -static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) +static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, + size_t count) { struct tpm_private *priv = dev_get_drvdata(&chip->dev); struct vtpm_shared_page *shr = priv->shr; diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c index ed45d04905c2..d7f841ab4323 100644 --- a/drivers/char/ttyprintk.c +++ b/drivers/char/ttyprintk.c @@ -40,7 +40,7 @@ static struct ttyprintk_port tpk_port; static int tpk_curr; -static char tpk_buffer[TPK_STR_SIZE + 4]; +static u8 tpk_buffer[TPK_STR_SIZE + 4]; static void tpk_flush(void) { @@ -51,9 +51,9 @@ static void tpk_flush(void) } } -static int tpk_printk(const unsigned char *buf, int count) +static int tpk_printk(const u8 *buf, size_t count) { - int i; + size_t i; for (i = 0; i < count; i++) { if (tpk_curr >= TPK_STR_SIZE) { @@ -103,8 +103,7 @@ static void tpk_close(struct tty_struct *tty, struct file *filp) /* * TTY operations write function. */ -static int tpk_write(struct tty_struct *tty, - const unsigned char *buf, int count) +static ssize_t tpk_write(struct tty_struct *tty, const u8 *buf, size_t count) { struct ttyprintk_port *tpkp = tty->driver_data; unsigned long flags; @@ -229,4 +228,5 @@ static void __exit ttyprintk_exit(void) device_initcall(ttyprintk_init); module_exit(ttyprintk_exit); +MODULE_DESCRIPTION("TTY driver to output user messages via printk"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 680d1ef2a217..088182e54deb 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -26,6 +26,7 @@ #include <linux/workqueue.h> #include <linux/module.h> #include <linux/dma-mapping.h> +#include <linux/string_choices.h> #include "../tty/hvc/hvc_console.h" #define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC) @@ -106,7 +107,7 @@ struct port_buffer { unsigned int sgpages; /* sg is used if spages > 0. sg must be the last in is struct */ - struct scatterlist sg[]; + struct scatterlist sg[] __counted_by(sgpages); }; /* @@ -230,9 +231,6 @@ struct port { bool guest_connected; }; -/* This is the very early arch-specified put chars function. */ -static int (*early_put_chars)(u32, const char *, int); - static struct port *find_port_by_vtermno(u32 vtermno) { struct port *port; @@ -653,7 +651,7 @@ done: * Give out the data that's requested from the buffer that we have * queued up. */ -static ssize_t fill_readbuf(struct port *port, char __user *out_buf, +static ssize_t fill_readbuf(struct port *port, u8 __user *out_buf, size_t out_count, bool to_user) { struct port_buffer *buf; @@ -672,7 +670,7 @@ static ssize_t fill_readbuf(struct port *port, char __user *out_buf, if (ret) return -EFAULT; } else { - memcpy((__force char *)out_buf, buf->buf + buf->offset, + memcpy((__force u8 *)out_buf, buf->buf + buf->offset, out_count); } @@ -886,9 +884,9 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf, if (len + offset > PAGE_SIZE) len = PAGE_SIZE - offset; - src = kmap_atomic(buf->page); + src = kmap_local_page(buf->page); memcpy(page_address(page) + offset, src + buf->offset, len); - kunmap_atomic(src); + kunmap_local(src); sg_set_page(&(sgl->sg[sgl->n]), page, len, offset); } @@ -926,14 +924,14 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, pipe_lock(pipe); ret = 0; - if (pipe_empty(pipe->head, pipe->tail)) + if (pipe_is_empty(pipe)) goto error_out; ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK); if (ret < 0) goto error_out; - occupancy = pipe_occupancy(pipe->head, pipe->tail); + occupancy = pipe_buf_usage(pipe); buf = alloc_buf(port->portdev->vdev, 0, occupancy); if (!buf) { @@ -1096,7 +1094,6 @@ static const struct file_operations port_fops = { .poll = port_fops_poll, .release = port_fops_release, .fasync = port_fops_fasync, - .llseek = no_llseek, }; /* @@ -1107,16 +1104,13 @@ static const struct file_operations port_fops = { * it to finish: inefficient in theory, but in practice * implementations will do it immediately. */ -static int put_chars(u32 vtermno, const char *buf, int count) +static ssize_t put_chars(u32 vtermno, const u8 *buf, size_t count) { struct port *port; struct scatterlist sg[1]; void *data; int ret; - if (unlikely(early_put_chars)) - return early_put_chars(vtermno, buf, count); - port = find_port_by_vtermno(vtermno); if (!port) return -EPIPE; @@ -1138,14 +1132,10 @@ static int put_chars(u32 vtermno, const char *buf, int count) * We call out to fill_readbuf that gets us the required data from the * buffers that are queued up. */ -static int get_chars(u32 vtermno, char *buf, int count) +static ssize_t get_chars(u32 vtermno, u8 *buf, size_t count) { struct port *port; - /* If we've not set up the port yet, we have no input to give. */ - if (unlikely(early_put_chars)) - return 0; - port = find_port_by_vtermno(vtermno); if (!port) return -EPIPE; @@ -1153,7 +1143,7 @@ static int get_chars(u32 vtermno, char *buf, int count) /* If we don't have an input queue yet, we can't get input. */ BUG_ON(!port->in_vq); - return fill_readbuf(port, (__force char __user *)buf, count, false); + return fill_readbuf(port, (__force u8 __user *)buf, count, false); } static void resize_console(struct port *port) @@ -1201,21 +1191,6 @@ static const struct hv_ops hv_ops = { .notifier_hangup = notifier_del_vio, }; -/* - * Console drivers are initialized very early so boot messages can go - * out, so we do things slightly differently from the generic virtio - * initialization of the net and block drivers. - * - * At this stage, the console is output-only. It's too early to set - * up a virtqueue, so we let the drivers do some boutique early-output - * thing. - */ -int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)) -{ - early_put_chars = put_chars; - return hvc_instantiate(0, 0, &hv_ops); -} - static int init_port_console(struct port *port) { int ret; @@ -1256,13 +1231,6 @@ static int init_port_console(struct port *port) spin_unlock_irq(&pdrvdata_lock); port->guest_connected = true; - /* - * Start using the new console output if this is the first - * console to come up. - */ - if (early_put_chars) - early_put_chars = NULL; - /* Notify host of port being opened */ send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); @@ -1302,8 +1270,7 @@ static int port_debugfs_show(struct seq_file *s, void *data) seq_printf(s, "bytes_sent: %lu\n", port->stats.bytes_sent); seq_printf(s, "bytes_received: %lu\n", port->stats.bytes_received); seq_printf(s, "bytes_discarded: %lu\n", port->stats.bytes_discarded); - seq_printf(s, "is_console: %s\n", - is_console_port(port) ? "yes" : "no"); + seq_printf(s, "is_console: %s\n", str_yes_no(is_console_port(port))); seq_printf(s, "console_vtermno: %u\n", port->cons.vtermno); return 0; @@ -1354,7 +1321,6 @@ static void send_sigio_to_port(struct port *port) static int add_port(struct ports_device *portdev, u32 id) { - char debugfs_name[16]; struct port *port; dev_t devt; int err; @@ -1457,9 +1423,7 @@ static int add_port(struct ports_device *portdev, u32 id) * Finally, create the debugfs file that we can use to * inspect a port's state at any time */ - snprintf(debugfs_name, sizeof(debugfs_name), "vport%up%u", - port->portdev->vdev->index, id); - port->debugfs_file = debugfs_create_file(debugfs_name, 0444, + port->debugfs_file = debugfs_create_file(dev_name(port->dev), 0444, pdrvdata.debugfs_dir, port, &port_debugfs_fops); return 0; @@ -1612,8 +1576,8 @@ static void handle_control_message(struct virtio_device *vdev, break; case VIRTIO_CONSOLE_RESIZE: { struct { - __u16 rows; - __u16 cols; + __virtio16 cols; + __virtio16 rows; } size; if (!is_console_port(port)) @@ -1621,7 +1585,8 @@ static void handle_control_message(struct virtio_device *vdev, memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt), sizeof(size)); - set_console_size(port, size.rows, size.cols); + set_console_size(port, virtio16_to_cpu(vdev, size.rows), + virtio16_to_cpu(vdev, size.cols)); port->cons.hvc->irq_requested = 1; resize_console(port); @@ -1836,8 +1801,7 @@ static void config_work_handler(struct work_struct *work) static int init_vqs(struct ports_device *portdev) { - vq_callback_t **io_callbacks; - char **io_names; + struct virtqueue_info *vqs_info; struct virtqueue **vqs; u32 i, j, nr_ports, nr_queues; int err; @@ -1846,15 +1810,12 @@ static int init_vqs(struct ports_device *portdev) nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; vqs = kmalloc_array(nr_queues, sizeof(struct virtqueue *), GFP_KERNEL); - io_callbacks = kmalloc_array(nr_queues, sizeof(vq_callback_t *), - GFP_KERNEL); - io_names = kmalloc_array(nr_queues, sizeof(char *), GFP_KERNEL); + vqs_info = kcalloc(nr_queues, sizeof(*vqs_info), GFP_KERNEL); portdev->in_vqs = kmalloc_array(nr_ports, sizeof(struct virtqueue *), GFP_KERNEL); portdev->out_vqs = kmalloc_array(nr_ports, sizeof(struct virtqueue *), GFP_KERNEL); - if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs || - !portdev->out_vqs) { + if (!vqs || !vqs_info || !portdev->in_vqs || !portdev->out_vqs) { err = -ENOMEM; goto free; } @@ -1865,30 +1826,27 @@ static int init_vqs(struct ports_device *portdev) * 0 before others. */ j = 0; - io_callbacks[j] = in_intr; - io_callbacks[j + 1] = out_intr; - io_names[j] = "input"; - io_names[j + 1] = "output"; + vqs_info[j].callback = in_intr; + vqs_info[j + 1].callback = out_intr; + vqs_info[j].name = "input"; + vqs_info[j + 1].name = "output"; j += 2; if (use_multiport(portdev)) { - io_callbacks[j] = control_intr; - io_callbacks[j + 1] = NULL; - io_names[j] = "control-i"; - io_names[j + 1] = "control-o"; + vqs_info[j].callback = control_intr; + vqs_info[j].name = "control-i"; + vqs_info[j + 1].name = "control-o"; for (i = 1; i < nr_ports; i++) { j += 2; - io_callbacks[j] = in_intr; - io_callbacks[j + 1] = out_intr; - io_names[j] = "input"; - io_names[j + 1] = "output"; + vqs_info[j].callback = in_intr; + vqs_info[j + 1].callback = out_intr; + vqs_info[j].name = "input"; + vqs_info[j + 1].name = "output"; } } /* Find the queues. */ - err = virtio_find_vqs(portdev->vdev, nr_queues, vqs, - io_callbacks, - (const char **)io_names, NULL); + err = virtio_find_vqs(portdev->vdev, nr_queues, vqs, vqs_info, NULL); if (err) goto free; @@ -1906,8 +1864,7 @@ static int init_vqs(struct ports_device *portdev) portdev->out_vqs[i] = vqs[j + 1]; } } - kfree(io_names); - kfree(io_callbacks); + kfree(vqs_info); kfree(vqs); return 0; @@ -1915,8 +1872,7 @@ static int init_vqs(struct ports_device *portdev) free: kfree(portdev->out_vqs); kfree(portdev->in_vqs); - kfree(io_names); - kfree(io_callbacks); + kfree(vqs_info); kfree(vqs); return err; @@ -1999,7 +1955,6 @@ static int virtcons_probe(struct virtio_device *vdev) struct ports_device *portdev; int err; bool multiport; - bool early = early_put_chars != NULL; /* We only need a config space if features are offered */ if (!vdev->config->get && @@ -2010,9 +1965,6 @@ static int virtcons_probe(struct virtio_device *vdev) return -EINVAL; } - /* Ensure to read early_put_chars now */ - barrier(); - portdev = kmalloc(sizeof(*portdev), GFP_KERNEL); if (!portdev) { err = -ENOMEM; @@ -2052,25 +2004,27 @@ static int virtcons_probe(struct virtio_device *vdev) multiport = true; } - err = init_vqs(portdev); - if (err < 0) { - dev_err(&vdev->dev, "Error %d initializing vqs\n", err); - goto free_chrdev; - } - spin_lock_init(&portdev->ports_lock); INIT_LIST_HEAD(&portdev->ports); INIT_LIST_HEAD(&portdev->list); - virtio_device_ready(portdev->vdev); - INIT_WORK(&portdev->config_work, &config_work_handler); INIT_WORK(&portdev->control_work, &control_work_handler); if (multiport) { spin_lock_init(&portdev->c_ivq_lock); spin_lock_init(&portdev->c_ovq_lock); + } + err = init_vqs(portdev); + if (err < 0) { + dev_err(&vdev->dev, "Error %d initializing vqs\n", err); + goto free_chrdev; + } + + virtio_device_ready(portdev->vdev); + + if (multiport) { err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock); if (err < 0) { dev_err(&vdev->dev, @@ -2100,18 +2054,6 @@ static int virtcons_probe(struct virtio_device *vdev) __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, VIRTIO_CONSOLE_DEVICE_READY, 1); - /* - * If there was an early virtio console, assume that there are no - * other consoles. We need to wait until the hvc_alloc matches the - * hvc_instantiate, otherwise tty_open will complain, resulting in - * a "Warning: unable to open an initial console" boot failure. - * Without multiport this is done in add_port above. With multiport - * this might take some host<->guest communication - thus we have to - * wait. - */ - if (multiport && early) - wait_for_completion(&early_console_added); - return 0; free_chrdev: @@ -2221,7 +2163,6 @@ static struct virtio_driver virtio_console = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtcons_probe, .remove = virtcons_remove, @@ -2236,7 +2177,6 @@ static struct virtio_driver virtio_rproc_serial = { .feature_table = rproc_serial_features, .feature_table_size = ARRAY_SIZE(rproc_serial_features), .driver.name = "virtio_rproc_serial", - .driver.owner = THIS_MODULE, .id_table = rproc_serial_id_table, .probe = virtcons_probe, .remove = virtcons_remove, diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c index f60bb6151402..34a345dc5e72 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c @@ -84,18 +84,13 @@ #include <linux/sysctl.h> #include <linux/fs.h> #include <linux/cdev.h> +#include <linux/of.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/uaccess.h> -#ifdef CONFIG_OF -/* For open firmware. */ -#include <linux/of_address.h> -#include <linux/of_device.h> -#include <linux/of_platform.h> -#endif - #include "xilinx_hwicap.h" #include "buffer_icap.h" #include "fifo_icap.h" @@ -601,14 +596,14 @@ static const struct file_operations hwicap_fops = { .llseek = noop_llseek, }; -static int hwicap_setup(struct device *dev, int id, - const struct resource *regs_res, +static int hwicap_setup(struct platform_device *pdev, int id, const struct hwicap_driver_config *config, const struct config_registers *config_regs) { dev_t devt; struct hwicap_drvdata *drvdata = NULL; - int retval = 0; + struct device *dev = &pdev->dev; + int retval; dev_info(dev, "Xilinx icap port driver\n"); @@ -636,72 +631,39 @@ static int hwicap_setup(struct device *dev, int id, devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR + id); - drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL); + drvdata = devm_kzalloc(dev, sizeof(struct hwicap_drvdata), GFP_KERNEL); if (!drvdata) { retval = -ENOMEM; - goto failed0; - } - dev_set_drvdata(dev, (void *)drvdata); - - if (!regs_res) { - dev_err(dev, "Couldn't get registers resource\n"); - retval = -EFAULT; - goto failed1; + goto failed; } + dev_set_drvdata(dev, drvdata); - drvdata->mem_start = regs_res->start; - drvdata->mem_end = regs_res->end; - drvdata->mem_size = resource_size(regs_res); - - if (!request_mem_region(drvdata->mem_start, - drvdata->mem_size, DRIVER_NAME)) { - dev_err(dev, "Couldn't lock memory region at %Lx\n", - (unsigned long long) regs_res->start); - retval = -EBUSY; - goto failed1; + drvdata->base_address = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(drvdata->base_address)) { + retval = PTR_ERR(drvdata->base_address); + goto failed; } drvdata->devt = devt; drvdata->dev = dev; - drvdata->base_address = ioremap(drvdata->mem_start, drvdata->mem_size); - if (!drvdata->base_address) { - dev_err(dev, "ioremap() failed\n"); - retval = -ENOMEM; - goto failed2; - } - drvdata->config = config; drvdata->config_regs = config_regs; mutex_init(&drvdata->sem); drvdata->is_open = 0; - dev_info(dev, "ioremap %llx to %p with size %llx\n", - (unsigned long long) drvdata->mem_start, - drvdata->base_address, - (unsigned long long) drvdata->mem_size); - cdev_init(&drvdata->cdev, &hwicap_fops); drvdata->cdev.owner = THIS_MODULE; retval = cdev_add(&drvdata->cdev, devt, 1); if (retval) { dev_err(dev, "cdev_add() failed\n"); - goto failed3; + goto failed; } device_create(&icap_class, dev, devt, NULL, "%s%d", DRIVER_NAME, id); return 0; /* success */ - failed3: - iounmap(drvdata->base_address); - - failed2: - release_mem_region(regs_res->start, drvdata->mem_size); - - failed1: - kfree(drvdata); - - failed0: + failed: mutex_lock(&icap_sem); probed_devices[id] = 0; mutex_unlock(&icap_sem); @@ -723,75 +685,22 @@ static struct hwicap_driver_config fifo_icap_config = { .reset = fifo_icap_reset, }; -#ifdef CONFIG_OF -static int hwicap_of_probe(struct platform_device *op, - const struct hwicap_driver_config *config) -{ - struct resource res; - const unsigned int *id; - const char *family; - int rc; - const struct config_registers *regs; - - - rc = of_address_to_resource(op->dev.of_node, 0, &res); - if (rc) { - dev_err(&op->dev, "invalid address\n"); - return rc; - } - - id = of_get_property(op->dev.of_node, "port-number", NULL); - - /* It's most likely that we're using V4, if the family is not - * specified - */ - regs = &v4_config_registers; - family = of_get_property(op->dev.of_node, "xlnx,family", NULL); - - if (family) { - if (!strcmp(family, "virtex2p")) - regs = &v2_config_registers; - else if (!strcmp(family, "virtex4")) - regs = &v4_config_registers; - else if (!strcmp(family, "virtex5")) - regs = &v5_config_registers; - else if (!strcmp(family, "virtex6")) - regs = &v6_config_registers; - } - return hwicap_setup(&op->dev, id ? *id : -1, &res, config, - regs); -} -#else -static inline int hwicap_of_probe(struct platform_device *op, - const struct hwicap_driver_config *config) -{ - return -EINVAL; -} -#endif /* CONFIG_OF */ - -static const struct of_device_id hwicap_of_match[]; static int hwicap_drv_probe(struct platform_device *pdev) { - const struct of_device_id *match; - struct resource *res; const struct config_registers *regs; + const struct hwicap_driver_config *config; const char *family; + int id = -1; - match = of_match_device(hwicap_of_match, &pdev->dev); - if (match) - return hwicap_of_probe(pdev, match->data); + config = device_get_match_data(&pdev->dev); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENODEV; + of_property_read_u32(pdev->dev.of_node, "port-number", &id); /* It's most likely that we're using V4, if the family is not * specified */ regs = &v4_config_registers; - family = pdev->dev.platform_data; - - if (family) { + if (!of_property_read_string(pdev->dev.of_node, "xlnx,family", &family)) { if (!strcmp(family, "virtex2p")) regs = &v2_config_registers; else if (!strcmp(family, "virtex4")) @@ -801,9 +710,7 @@ static int hwicap_drv_probe(struct platform_device *pdev) else if (!strcmp(family, "virtex6")) regs = &v6_config_registers; } - - return hwicap_setup(&pdev->dev, pdev->id, res, - &buffer_icap_config, regs); + return hwicap_setup(pdev, id, config, regs); } static void hwicap_drv_remove(struct platform_device *pdev) @@ -815,16 +722,12 @@ static void hwicap_drv_remove(struct platform_device *pdev) device_destroy(&icap_class, drvdata->devt); cdev_del(&drvdata->cdev); - iounmap(drvdata->base_address); - release_mem_region(drvdata->mem_start, drvdata->mem_size); - kfree(drvdata); mutex_lock(&icap_sem); probed_devices[MINOR(dev->devt)-XHWICAP_MINOR] = 0; mutex_unlock(&icap_sem); } -#ifdef CONFIG_OF /* Match table for device tree binding */ static const struct of_device_id hwicap_of_match[] = { { .compatible = "xlnx,opb-hwicap-1.00.b", .data = &buffer_icap_config}, @@ -832,13 +735,10 @@ static const struct of_device_id hwicap_of_match[] = { {}, }; MODULE_DEVICE_TABLE(of, hwicap_of_match); -#else -#define hwicap_of_match NULL -#endif static struct platform_driver hwicap_platform_driver = { .probe = hwicap_drv_probe, - .remove_new = hwicap_drv_remove, + .remove = hwicap_drv_remove, .driver = { .name = DRIVER_NAME, .of_match_table = hwicap_of_match, diff --git a/drivers/char/xillybus/Kconfig b/drivers/char/xillybus/Kconfig index a8036dad437e..f51d533390a9 100644 --- a/drivers/char/xillybus/Kconfig +++ b/drivers/char/xillybus/Kconfig @@ -29,7 +29,7 @@ config XILLYBUS_PCIE config XILLYBUS_OF tristate "Xillybus over Device Tree" - depends on OF && HAS_DMA + depends on OF && HAS_DMA && HAS_IOMEM help Set to M if you want Xillybus to find its resources from the Open Firmware Flattened Device Tree. If the target is an embedded diff --git a/drivers/char/xillybus/xillybus_core.c b/drivers/char/xillybus/xillybus_core.c index 11b7c4749274..fc4e69b5cb6a 100644 --- a/drivers/char/xillybus/xillybus_core.c +++ b/drivers/char/xillybus/xillybus_core.c @@ -1184,8 +1184,7 @@ static int xillybus_flush(struct file *filp, fl_owner_t id) static void xillybus_autoflush(struct work_struct *work) { - struct delayed_work *workitem = container_of( - work, struct delayed_work, work); + struct delayed_work *workitem = to_delayed_work(work); struct xilly_channel *channel = container_of( workitem, struct xilly_channel, rd_workitem); int rc; @@ -1974,7 +1973,7 @@ EXPORT_SYMBOL(xillybus_endpoint_remove); static int __init xillybus_init(void) { - xillybus_wq = alloc_workqueue(xillyname, 0, 0); + xillybus_wq = alloc_workqueue(xillyname, WQ_UNBOUND, 0); if (!xillybus_wq) return -ENOMEM; diff --git a/drivers/char/xillybus/xillybus_of.c b/drivers/char/xillybus/xillybus_of.c index e5372e45d211..1a1e64133315 100644 --- a/drivers/char/xillybus/xillybus_of.c +++ b/drivers/char/xillybus/xillybus_of.c @@ -64,14 +64,12 @@ static int xilly_drv_probe(struct platform_device *op) return xillybus_endpoint_discovery(endpoint); } -static int xilly_drv_remove(struct platform_device *op) +static void xilly_drv_remove(struct platform_device *op) { struct device *dev = &op->dev; struct xilly_endpoint *endpoint = dev_get_drvdata(dev); xillybus_endpoint_remove(endpoint); - - return 0; } static struct platform_driver xillybus_platform_driver = { diff --git a/drivers/char/xillybus/xillyusb.c b/drivers/char/xillybus/xillyusb.c index 5a5afa14ca8c..386531474213 100644 --- a/drivers/char/xillybus/xillyusb.c +++ b/drivers/char/xillybus/xillyusb.c @@ -50,6 +50,7 @@ MODULE_LICENSE("GPL v2"); static const char xillyname[] = "xillyusb"; static unsigned int fifo_buf_order; +static struct workqueue_struct *wakeup_wq; #define USB_VENDOR_ID_XILINX 0x03fd #define USB_VENDOR_ID_ALTERA 0x09fb @@ -569,10 +570,6 @@ static void cleanup_dev(struct kref *kref) * errors if executed. The mechanism relies on that xdev->error is assigned * a non-zero value by report_io_error() prior to queueing wakeup_all(), * which prevents bulk_in_work() from calling process_bulk_in(). - * - * The fact that wakeup_all() and bulk_in_work() are queued on the same - * workqueue makes their concurrent execution very unlikely, however the - * kernel's API doesn't seem to ensure this strictly. */ static void wakeup_all(struct work_struct *work) @@ -627,7 +624,7 @@ static void report_io_error(struct xillyusb_dev *xdev, if (do_once) { kref_get(&xdev->kref); /* xdev is used by work item */ - queue_work(xdev->workq, &xdev->wakeup_workitem); + queue_work(wakeup_wq, &xdev->wakeup_workitem); } } @@ -1906,6 +1903,13 @@ static const struct file_operations xillyusb_fops = { static int xillyusb_setup_base_eps(struct xillyusb_dev *xdev) { + struct usb_device *udev = xdev->udev; + + /* Verify that device has the two fundamental bulk in/out endpoints */ + if (usb_pipe_type_check(udev, usb_sndbulkpipe(udev, MSG_EP_NUM)) || + usb_pipe_type_check(udev, usb_rcvbulkpipe(udev, IN_EP_NUM))) + return -ENODEV; + xdev->msg_ep = endpoint_alloc(xdev, MSG_EP_NUM | USB_DIR_OUT, bulk_out_work, 1, 2); if (!xdev->msg_ep) @@ -1935,14 +1939,15 @@ static int setup_channels(struct xillyusb_dev *xdev, __le16 *chandesc, int num_channels) { - struct xillyusb_channel *chan; + struct usb_device *udev = xdev->udev; + struct xillyusb_channel *chan, *new_channels; int i; chan = kcalloc(num_channels, sizeof(*chan), GFP_KERNEL); if (!chan) return -ENOMEM; - xdev->channels = chan; + new_channels = chan; for (i = 0; i < num_channels; i++, chan++) { unsigned int in_desc = le16_to_cpu(*chandesc++); @@ -1971,6 +1976,15 @@ static int setup_channels(struct xillyusb_dev *xdev, */ if ((out_desc & 0x80) && i < 14) { /* Entry is valid */ + if (usb_pipe_type_check(udev, + usb_sndbulkpipe(udev, i + 2))) { + dev_err(xdev->dev, + "Missing BULK OUT endpoint %d\n", + i + 2); + kfree(new_channels); + return -ENODEV; + } + chan->writable = 1; chan->out_synchronous = !!(out_desc & 0x40); chan->out_seekable = !!(out_desc & 0x20); @@ -1980,6 +1994,7 @@ static int setup_channels(struct xillyusb_dev *xdev, } } + xdev->channels = new_channels; return 0; } @@ -2096,9 +2111,11 @@ static int xillyusb_discovery(struct usb_interface *interface) * just after responding with the IDT, there is no reason for any * work item to be running now. To be sure that xdev->channels * is updated on anything that might run in parallel, flush the - * workqueue, which rarely does anything. + * device's workqueue and the wakeup work item. This rarely + * does anything. */ flush_workqueue(xdev->workq); + flush_work(&xdev->wakeup_workitem); xdev->num_channels = num_channels; @@ -2146,7 +2163,7 @@ static int xillyusb_probe(struct usb_interface *interface, spin_lock_init(&xdev->error_lock); xdev->in_counter = 0; xdev->in_bytes_left = 0; - xdev->workq = alloc_workqueue(xillyname, WQ_HIGHPRI, 0); + xdev->workq = alloc_workqueue(xillyname, WQ_HIGHPRI | WQ_UNBOUND, 0); if (!xdev->workq) { dev_err(&interface->dev, "Failed to allocate work queue\n"); @@ -2258,6 +2275,10 @@ static int __init xillyusb_init(void) { int rc = 0; + wakeup_wq = alloc_workqueue(xillyname, WQ_UNBOUND, 0); + if (!wakeup_wq) + return -ENOMEM; + if (LOG2_INITIAL_FIFO_BUF_SIZE > PAGE_SHIFT) fifo_buf_order = LOG2_INITIAL_FIFO_BUF_SIZE - PAGE_SHIFT; else @@ -2265,12 +2286,17 @@ static int __init xillyusb_init(void) rc = usb_register(&xillyusb_driver); + if (rc) + destroy_workqueue(wakeup_wq); + return rc; } static void __exit xillyusb_exit(void) { usb_deregister(&xillyusb_driver); + + destroy_workqueue(wakeup_wq); } module_init(xillyusb_init); |
