diff options
Diffstat (limited to 'drivers/char')
124 files changed, 4932 insertions, 1366 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 7c8dd0abcfdf..8fb33c90482f 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -238,6 +238,7 @@ config APPLICOM config SONYPI tristate "Sony Vaio Programmable I/O Control Device support" depends on X86_32 && PCI && INPUT + depends on ACPI_EC || !ACPI help This driver enables access to the Sony Programmable I/O Control Device which can be found in many (all ?) Sony Vaio laptops. diff --git a/drivers/char/adi.c b/drivers/char/adi.c index 751d7cc0da1b..f9bec10a6064 100644 --- a/drivers/char/adi.c +++ b/drivers/char/adi.c @@ -14,12 +14,6 @@ #define MAX_BUF_SZ PAGE_SIZE -static int adi_open(struct inode *inode, struct file *file) -{ - file->f_mode |= FMODE_UNSIGNED_OFFSET; - return 0; -} - static int read_mcd_tag(unsigned long addr) { long err; @@ -196,7 +190,6 @@ static loff_t adi_llseek(struct file *file, loff_t offset, int whence) if (offset != file->f_pos) { file->f_pos = offset; - file->f_version = 0; ret = offset; } @@ -206,9 +199,9 @@ static loff_t adi_llseek(struct file *file, loff_t offset, int whence) static const struct file_operations adi_fops = { .owner = THIS_MODULE, .llseek = adi_llseek, - .open = adi_open, .read = adi_read, .write = adi_write, + .fop_flags = FOP_UNSIGNED_OFFSET, }; static struct miscdevice adi_miscdev = { diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c index 760d9a931289..2eaab502ec29 100644 --- a/drivers/char/agp/ali-agp.c +++ b/drivers/char/agp/ali-agp.c @@ -418,5 +418,6 @@ module_init(agp_ali_init); module_exit(agp_ali_cleanup); MODULE_AUTHOR("Dave Jones"); +MODULE_DESCRIPTION("ALi AGPGART routines"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c index c9bf2c219841..e1763ecb8111 100644 --- a/drivers/char/agp/alpha-agp.c +++ b/drivers/char/agp/alpha-agp.c @@ -149,7 +149,7 @@ struct agp_bridge_driver alpha_core_agp_driver = { struct agp_bridge_data *alpha_bridge; -int __init +static int __init alpha_core_agp_setup(void) { alpha_agp_info *agp = alpha_mv.agp_info(); @@ -217,4 +217,5 @@ module_init(agp_alpha_core_init); module_exit(agp_alpha_core_cleanup); MODULE_AUTHOR("Jeff Wiedemeier <Jeff.Wiedemeier@hp.com>"); +MODULE_DESCRIPTION("Alpha AGP support"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c index 55397ba765d2..795c8c9ff680 100644 --- a/drivers/char/agp/amd-k7-agp.c +++ b/drivers/char/agp/amd-k7-agp.c @@ -549,4 +549,5 @@ static void __exit agp_amdk7_cleanup(void) module_init(agp_amdk7_init); module_exit(agp_amdk7_cleanup); +MODULE_DESCRIPTION("AMD K7 AGPGART routines"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index ce8651436609..bf490967241a 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -16,7 +16,7 @@ #include <linux/mmzone.h> #include <asm/page.h> /* PAGE_SIZE */ #include <asm/e820/api.h> -#include <asm/amd_nb.h> +#include <asm/amd/nb.h> #include <asm/gart.h> #include "agp.h" @@ -802,4 +802,5 @@ module_exit(agp_amd64_cleanup); MODULE_AUTHOR("Dave Jones, Andi Kleen"); module_param(agp_try_unsupported, bool, 0); +MODULE_DESCRIPTION("GART driver for the AMD Opteron/Athlon64 on-CPU northbridge"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c index 3c1fce48aabe..f7871afe08cf 100644 --- a/drivers/char/agp/ati-agp.c +++ b/drivers/char/agp/ati-agp.c @@ -572,5 +572,6 @@ module_init(agp_ati_init); module_exit(agp_ati_cleanup); MODULE_AUTHOR("Dave Jones"); +MODULE_DESCRIPTION("ATi AGPGART routines"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c index f28d42319269..0d25bbdc7e6a 100644 --- a/drivers/char/agp/efficeon-agp.c +++ b/drivers/char/agp/efficeon-agp.c @@ -465,4 +465,5 @@ module_init(agp_efficeon_init); module_exit(agp_efficeon_cleanup); MODULE_AUTHOR("Carlos Puchol <cpglinux@puchol.com>"); +MODULE_DESCRIPTION("Transmeta's Efficeon AGPGART driver"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index c518b3a9db04..3111e320b2c5 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -12,7 +12,7 @@ #include <asm/smp.h> #include "agp.h" #include "intel-agp.h" -#include <drm/intel-gtt.h> +#include <drm/intel/intel-gtt.h> static int intel_fetch_size(void) { @@ -920,4 +920,5 @@ module_init(agp_intel_init); module_exit(agp_intel_cleanup); MODULE_AUTHOR("Dave Jones, Various @Intel"); +MODULE_DESCRIPTION("Intel AGPGART routines"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index bf6716ff863b..bcc26785175d 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -25,7 +25,7 @@ #include <asm/smp.h> #include "agp.h" #include "intel-agp.h" -#include <drm/intel-gtt.h> +#include <drm/intel/intel-gtt.h> #include <asm/set_memory.h> /* @@ -53,6 +53,7 @@ struct intel_gtt_driver { * of the mmio register file, that's done in the generic code. */ void (*cleanup)(void); void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags); + dma_addr_t (*read_entry)(unsigned int entry, bool *is_present, bool *is_local); /* Flags is a more or less chipset specific opaque value. * For chipsets that need to support old ums (non-gem) code, this * needs to be identical to the various supported agp memory types! */ @@ -336,6 +337,19 @@ static void i810_write_entry(dma_addr_t addr, unsigned int entry, writel_relaxed(addr | pte_flags, intel_private.gtt + entry); } +static dma_addr_t i810_read_entry(unsigned int entry, + bool *is_present, bool *is_local) +{ + u32 val; + + val = readl(intel_private.gtt + entry); + + *is_present = val & I810_PTE_VALID; + *is_local = val & I810_PTE_LOCAL; + + return val & ~0xfff; +} + static resource_size_t intel_gtt_stolen_size(void) { u16 gmch_ctrl; @@ -741,6 +755,19 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry, writel_relaxed(addr | pte_flags, intel_private.gtt + entry); } +static dma_addr_t i830_read_entry(unsigned int entry, + bool *is_present, bool *is_local) +{ + u32 val; + + val = readl(intel_private.gtt + entry); + + *is_present = val & I810_PTE_VALID; + *is_local = false; + + return val & ~0xfff; +} + bool intel_gmch_enable_gtt(void) { u8 __iomem *reg; @@ -878,6 +905,13 @@ void intel_gmch_gtt_insert_sg_entries(struct sg_table *st, } EXPORT_SYMBOL(intel_gmch_gtt_insert_sg_entries); +dma_addr_t intel_gmch_gtt_read_entry(unsigned int pg, + bool *is_present, bool *is_local) +{ + return intel_private.driver->read_entry(pg, is_present, is_local); +} +EXPORT_SYMBOL(intel_gmch_gtt_read_entry); + #if IS_ENABLED(CONFIG_AGP_INTEL) static void intel_gmch_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries, @@ -1126,6 +1160,19 @@ static void i965_write_entry(dma_addr_t addr, writel_relaxed(addr | pte_flags, intel_private.gtt + entry); } +static dma_addr_t i965_read_entry(unsigned int entry, + bool *is_present, bool *is_local) +{ + u64 val; + + val = readl(intel_private.gtt + entry); + + *is_present = val & I810_PTE_VALID; + *is_local = false; + + return ((val & 0xf0) << 28) | (val & ~0xfff); +} + static int i9xx_setup(void) { phys_addr_t reg_addr; @@ -1187,6 +1234,7 @@ static const struct intel_gtt_driver i81x_gtt_driver = { .cleanup = i810_cleanup, .check_flags = i830_check_flags, .write_entry = i810_write_entry, + .read_entry = i810_read_entry, }; static const struct intel_gtt_driver i8xx_gtt_driver = { .gen = 2, @@ -1194,6 +1242,7 @@ static const struct intel_gtt_driver i8xx_gtt_driver = { .setup = i830_setup, .cleanup = i830_cleanup, .write_entry = i830_write_entry, + .read_entry = i830_read_entry, .dma_mask_size = 32, .check_flags = i830_check_flags, .chipset_flush = i830_chipset_flush, @@ -1205,6 +1254,7 @@ static const struct intel_gtt_driver i915_gtt_driver = { .cleanup = i9xx_cleanup, /* i945 is the last gpu to need phys mem (for overlay and cursors). */ .write_entry = i830_write_entry, + .read_entry = i830_read_entry, .dma_mask_size = 32, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, @@ -1215,6 +1265,7 @@ static const struct intel_gtt_driver g33_gtt_driver = { .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, + .read_entry = i965_read_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, @@ -1225,6 +1276,7 @@ static const struct intel_gtt_driver pineview_gtt_driver = { .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, + .read_entry = i965_read_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, @@ -1235,6 +1287,7 @@ static const struct intel_gtt_driver i965_gtt_driver = { .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, + .read_entry = i965_read_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, @@ -1244,6 +1297,7 @@ static const struct intel_gtt_driver g4x_gtt_driver = { .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, + .read_entry = i965_read_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, @@ -1254,6 +1308,7 @@ static const struct intel_gtt_driver ironlake_gtt_driver = { .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, + .read_entry = i965_read_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, @@ -1461,4 +1516,5 @@ void intel_gmch_remove(void) EXPORT_SYMBOL(intel_gmch_remove); MODULE_AUTHOR("Dave Jones, Various @Intel"); +MODULE_DESCRIPTION("Intel GTT (Graphics Translation Table) routines"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c index dbcbc06cc202..4787391bb6b4 100644 --- a/drivers/char/agp/nvidia-agp.c +++ b/drivers/char/agp/nvidia-agp.c @@ -11,6 +11,7 @@ #include <linux/page-flags.h> #include <linux/mm.h> #include <linux/jiffies.h> +#include <asm/msr.h> #include "agp.h" /* NVIDIA registers */ @@ -462,6 +463,7 @@ static void __exit agp_nvidia_cleanup(void) module_init(agp_nvidia_init); module_exit(agp_nvidia_cleanup); +MODULE_DESCRIPTION("Nvidia AGPGART routines"); MODULE_LICENSE("GPL and additional rights"); MODULE_AUTHOR("NVIDIA Corporation"); diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c index edbc4d338117..93a48070b2a1 100644 --- a/drivers/char/agp/parisc-agp.c +++ b/drivers/char/agp/parisc-agp.c @@ -432,4 +432,5 @@ out: module_init(parisc_agp_init); MODULE_AUTHOR("Kyle McMartin <kyle@parisc-linux.org>"); +MODULE_DESCRIPTION("HP Quicksilver AGP GART routines"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c index 484bb101c53b..a0deb97cedb0 100644 --- a/drivers/char/agp/sis-agp.c +++ b/drivers/char/agp/sis-agp.c @@ -433,4 +433,5 @@ module_param(agp_sis_force_delay, bool, 0); MODULE_PARM_DESC(agp_sis_force_delay,"forces sis delay hack"); module_param(agp_sis_agp_spec, int, 0); MODULE_PARM_DESC(agp_sis_agp_spec,"0=force sis init, 1=force generic agp3 init, default: autodetect"); +MODULE_DESCRIPTION("SiS AGPGART routines"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c index b91da5998dd7..0ab7562d17c9 100644 --- a/drivers/char/agp/sworks-agp.c +++ b/drivers/char/agp/sworks-agp.c @@ -564,5 +564,6 @@ static void __exit agp_serverworks_cleanup(void) module_init(agp_serverworks_init); module_exit(agp_serverworks_cleanup); +MODULE_DESCRIPTION("Serverworks AGPGART routines"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c index 84411b13c49f..b8d7115b8c9e 100644 --- a/drivers/char/agp/uninorth-agp.c +++ b/drivers/char/agp/uninorth-agp.c @@ -726,4 +726,5 @@ MODULE_PARM_DESC(aperture, "\t\tDefault: " DEFAULT_APERTURE_STRING "M"); MODULE_AUTHOR("Ben Herrenschmidt & Paul Mackerras"); +MODULE_DESCRIPTION("Apple UniNorth & U3 AGP support"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c index bc5140af2dcb..8b19a5d1a09b 100644 --- a/drivers/char/agp/via-agp.c +++ b/drivers/char/agp/via-agp.c @@ -575,5 +575,6 @@ static void __exit agp_via_cleanup(void) module_init(agp_via_init); module_exit(agp_via_cleanup); +MODULE_DESCRIPTION("VIA AGPGART routines"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Dave Jones"); diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c index 69314532f38c..9fed9706d9cd 100644 --- a/drivers/char/applicom.c +++ b/drivers/char/applicom.c @@ -111,7 +111,6 @@ static irqreturn_t ac_interrupt(int, void *); static const struct file_operations ac_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .read = ac_read, .write = ac_write, .unlocked_ioctl = ac_ioctl, diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c index 70d31aed9011..837109ef6766 100644 --- a/drivers/char/bsr.c +++ b/drivers/char/bsr.c @@ -342,5 +342,6 @@ static void __exit bsr_exit(void) module_init(bsr_init); module_exit(bsr_exit); +MODULE_DESCRIPTION("IBM POWER Barrier Synchronization Register Driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Sonny Rao <sonnyrao@us.ibm.com>"); diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c index cf89a9631107..44a1cdbd4bfb 100644 --- a/drivers/char/ds1620.c +++ b/drivers/char/ds1620.c @@ -353,7 +353,6 @@ static const struct file_operations ds1620_fops = { .open = ds1620_open, .read = ds1620_read, .unlocked_ioctl = ds1620_unlocked_ioctl, - .llseek = no_llseek, }; static struct miscdevice ds1620_miscdev = { @@ -421,4 +420,5 @@ static void __exit ds1620_exit(void) module_init(ds1620_init); module_exit(ds1620_exit); +MODULE_DESCRIPTION("Dallas Semiconductor DS1620 thermometer driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c index bda27e595da1..1c2c8439797c 100644 --- a/drivers/char/dsp56k.c +++ b/drivers/char/dsp56k.c @@ -530,5 +530,6 @@ static void __exit dsp56k_cleanup_driver(void) } module_exit(dsp56k_cleanup_driver); +MODULE_DESCRIPTION("Atari DSP56001 Device Driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("dsp56k/bootstrap.bin"); diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c index 6946c1cad9f6..16618079298a 100644 --- a/drivers/char/dtlk.c +++ b/drivers/char/dtlk.c @@ -107,7 +107,6 @@ static const struct file_operations dtlk_fops = .unlocked_ioctl = dtlk_ioctl, .open = dtlk_open, .release = dtlk_release, - .llseek = no_llseek, }; /* local prototypes */ @@ -244,11 +243,11 @@ static __poll_t dtlk_poll(struct file *file, poll_table * wait) poll_wait(file, &dtlk_process_list, wait); if (dtlk_has_indexing && dtlk_readable()) { - del_timer(&dtlk_timer); + timer_delete(&dtlk_timer); mask = EPOLLIN | EPOLLRDNORM; } if (dtlk_writeable()) { - del_timer(&dtlk_timer); + timer_delete(&dtlk_timer); mask |= EPOLLOUT | EPOLLWRNORM; } /* there are no exception conditions */ @@ -323,7 +322,7 @@ static int dtlk_release(struct inode *inode, struct file *file) } TRACE_RET; - del_timer_sync(&dtlk_timer); + timer_delete_sync(&dtlk_timer); return 0; } @@ -660,4 +659,5 @@ static char dtlk_write_tts(char ch) return 0; } +MODULE_DESCRIPTION("RC Systems DoubleTalk PC speech card driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c index 4181bcc1c796..497fc167cb8c 100644 --- a/drivers/char/hangcheck-timer.c +++ b/drivers/char/hangcheck-timer.c @@ -167,7 +167,7 @@ static int __init hangcheck_init(void) static void __exit hangcheck_exit(void) { - del_timer_sync(&hangcheck_ticktock); + timer_delete_sync(&hangcheck_ticktock); printk("Hangcheck: Stopped hangcheck timer.\n"); } diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index d51fc8321d41..e110857824fc 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -162,6 +162,7 @@ static irqreturn_t hpet_interrupt(int irq, void *data) static void hpet_timer_set_irq(struct hpet_dev *devp) { + const unsigned int nr_irqs = irq_get_nr_irqs(); unsigned long v; int irq, gsi; struct hpet_timer __iomem *timer; @@ -269,8 +270,13 @@ hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos) if (!devp->hd_ireqfreq) return -EIO; - if (count < sizeof(unsigned long)) - return -EINVAL; + if (in_compat_syscall()) { + if (count < sizeof(compat_ulong_t)) + return -EINVAL; + } else { + if (count < sizeof(unsigned long)) + return -EINVAL; + } add_wait_queue(&devp->hd_waitqueue, &wait); @@ -294,9 +300,16 @@ hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos) schedule(); } - retval = put_user(data, (unsigned long __user *)buf); - if (!retval) - retval = sizeof(unsigned long); + if (in_compat_syscall()) { + retval = put_user(data, (compat_ulong_t __user *)buf); + if (!retval) + retval = sizeof(compat_ulong_t); + } else { + retval = put_user(data, (unsigned long __user *)buf); + if (!retval) + retval = sizeof(unsigned long); + } + out: __set_current_state(TASK_RUNNING); remove_wait_queue(&devp->hd_waitqueue, &wait); @@ -651,12 +664,24 @@ struct compat_hpet_info { unsigned short hi_timer; }; +/* 32-bit types would lead to different command codes which should be + * translated into 64-bit ones before passed to hpet_ioctl_common + */ +#define COMPAT_HPET_INFO _IOR('h', 0x03, struct compat_hpet_info) +#define COMPAT_HPET_IRQFREQ _IOW('h', 0x6, compat_ulong_t) + static long hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct hpet_info info; int err; + if (cmd == COMPAT_HPET_INFO) + cmd = HPET_INFO; + + if (cmd == COMPAT_HPET_IRQFREQ) + cmd = HPET_IRQFREQ; + mutex_lock(&hpet_mutex); err = hpet_ioctl_common(file->private_data, cmd, arg, &info); mutex_unlock(&hpet_mutex); @@ -676,7 +701,6 @@ hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) static const struct file_operations hpet_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .read = hpet_read, .poll = hpet_poll, .unlocked_ioctl = hpet_ioctl, @@ -700,7 +724,7 @@ static int hpet_is_known(struct hpet_data *hdp) return 0; } -static struct ctl_table hpet_table[] = { +static const struct ctl_table hpet_table[] = { { .procname = "max-user-freq", .data = &hpet_max_freq, @@ -784,7 +808,7 @@ int hpet_alloc(struct hpet_data *hdp) struct hpets *hpetp; struct hpet __iomem *hpet; static struct hpets *last; - unsigned long period; + u32 period; unsigned long long temp; u32 remainder; @@ -841,11 +865,11 @@ int hpet_alloc(struct hpet_data *hdp) do_div(temp, period); hpetp->hp_tick_freq = temp; /* ticks per second */ - printk(KERN_INFO "hpet%d: at MMIO 0x%lx, IRQ%s", + printk(KERN_INFO "hpet%u: at MMIO 0x%lx, IRQ%s", hpetp->hp_which, hdp->hd_phys_address, hpetp->hp_ntimer > 1 ? "s" : ""); for (i = 0; i < hpetp->hp_ntimer; i++) - printk(KERN_CONT "%s %d", i > 0 ? "," : "", hdp->hd_irq[i]); + printk(KERN_CONT "%s %u", i > 0 ? "," : "", hdp->hd_irq[i]); printk(KERN_CONT "\n"); temp = hpetp->hp_tick_freq; diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 442c40efb200..c85827843447 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -50,7 +50,7 @@ config HW_RANDOM_INTEL config HW_RANDOM_AMD tristate "AMD HW Random Number Generator support" - depends on (X86 || PPC_MAPLE || COMPILE_TEST) + depends on (X86 || COMPILE_TEST) depends on PCI && HAS_IOPORT_MAP default HW_RANDOM help @@ -62,6 +62,19 @@ config HW_RANDOM_AMD If unsure, say Y. +config HW_RANDOM_AIROHA + tristate "Airoha True HW Random Number Generator support" + depends on ARCH_AIROHA || COMPILE_TEST + default HW_RANDOM + help + This driver provides kernel-side support for the True Random Number + Generator hardware found on Airoha SoC. + + To compile this driver as a module, choose M here: the + module will be called airoha-rng. + + If unsure, say Y. + config HW_RANDOM_ATMEL tristate "Atmel Random Number Generator support" depends on (ARCH_AT91 || COMPILE_TEST) @@ -99,9 +112,22 @@ config HW_RANDOM_BCM2835 If unsure, say Y. +config HW_RANDOM_BCM74110 + tristate "Broadcom BCM74110 Random Number Generator support" + depends on ARCH_BRCMSTB || COMPILE_TEST + default HW_RANDOM + help + This driver provides kernel-side support for the Random Number + Generator hardware found on the Broadcom BCM74110 SoCs. + + To compile this driver as a module, choose M here: the + module will be called bcm74110-rng + + If unsure, say Y. + config HW_RANDOM_IPROC_RNG200 tristate "Broadcom iProc/STB RNG200 support" - depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST + depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BCMBCA || ARCH_BRCMSTB || COMPILE_TEST default HW_RANDOM help This driver provides kernel-side support for the RNG200 @@ -508,10 +534,10 @@ config HW_RANDOM_NPCM If unsure, say Y. config HW_RANDOM_KEYSTONE + tristate "TI Keystone NETCP SA Hardware random number generator" depends on ARCH_KEYSTONE || COMPILE_TEST depends on HAS_IOMEM && OF default HW_RANDOM - tristate "TI Keystone NETCP SA Hardware random number generator" help This option enables Keystone's hardware random generator. @@ -553,15 +579,15 @@ config HW_RANDOM_ARM_SMCCC_TRNG module will be called arm_smccc_trng. config HW_RANDOM_CN10K - tristate "Marvell CN10K Random Number Generator support" - depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST)) - default HW_RANDOM - help - This driver provides support for the True Random Number - generator available in Marvell CN10K SoCs. + tristate "Marvell CN10K Random Number Generator support" + depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST)) + default HW_RANDOM if ARCH_THUNDER + help + This driver provides support for the True Random Number + generator available in Marvell CN10K SoCs. - To compile this driver as a module, choose M here. - The module will be called cn10k_rng. If unsure, say Y. + To compile this driver as a module, choose M here. + The module will be called cn10k_rng. If unsure, say Y. config HW_RANDOM_JH7110 tristate "StarFive JH7110 Random Number Generator support" @@ -573,6 +599,21 @@ config HW_RANDOM_JH7110 To compile this driver as a module, choose M here. The module will be called jh7110-trng. +config HW_RANDOM_ROCKCHIP + tristate "Rockchip True Random Number Generator" + depends on HW_RANDOM && (ARCH_ROCKCHIP || COMPILE_TEST) + depends on HAS_IOMEM + default HW_RANDOM + help + This driver provides kernel-side support for the True Random Number + Generator hardware found on some Rockchip SoCs like RK3566, RK3568 + or RK3588. + + To compile this driver as a module, choose M here: the + module will be called rockchip-rng. + + If unsure, say Y. + endif # HW_RANDOM config UML_RANDOM diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 32549a1186dc..b9132b3f5d21 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -8,6 +8,7 @@ rng-core-y := core.o obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o +obj-$(CONFIG_HW_RANDOM_AIROHA) += airoha-trng.o obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o obj-$(CONFIG_HW_RANDOM_BA431) += ba431-rng.o obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o @@ -31,6 +32,7 @@ obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o obj-$(CONFIG_HW_RANDOM_HISTB) += histb-rng.o obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o +obj-$(CONFIG_HW_RANDOM_BCM74110) += bcm74110-rng.o obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o @@ -48,4 +50,5 @@ obj-$(CONFIG_HW_RANDOM_XIPHERA) += xiphera-trng.o obj-$(CONFIG_HW_RANDOM_ARM_SMCCC_TRNG) += arm_smccc_trng.o obj-$(CONFIG_HW_RANDOM_CN10K) += cn10k-rng.o obj-$(CONFIG_HW_RANDOM_POLARFIRE_SOC) += mpfs-rng.o +obj-$(CONFIG_HW_RANDOM_ROCKCHIP) += rockchip-rng.o obj-$(CONFIG_HW_RANDOM_JH7110) += jh7110-trng.o diff --git a/drivers/char/hw_random/airoha-trng.c b/drivers/char/hw_random/airoha-trng.c new file mode 100644 index 000000000000..1dbfa9505c21 --- /dev/null +++ b/drivers/char/hw_random/airoha-trng.c @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2024 Christian Marangi */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/hw_random.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/platform_device.h> + +#define TRNG_IP_RDY 0x800 +#define CNT_TRANS GENMASK(15, 8) +#define SAMPLE_RDY BIT(0) +#define TRNG_NS_SEK_AND_DAT_EN 0x804 +#define RNG_EN BIT(31) /* referenced as ring_en */ +#define RAW_DATA_EN BIT(16) +#define TRNG_HEALTH_TEST_SW_RST 0x808 +#define SW_RST BIT(0) /* Active High */ +#define TRNG_INTR_EN 0x818 +#define INTR_MASK BIT(16) +#define CONTINUOUS_HEALTH_INITR_EN BIT(2) +#define SW_STARTUP_INITR_EN BIT(1) +#define RST_STARTUP_INITR_EN BIT(0) +/* Notice that Health Test are done only out of Reset and with RNG_EN */ +#define TRNG_HEALTH_TEST_STATUS 0x824 +#define CONTINUOUS_HEALTH_AP_TEST_FAIL BIT(23) +#define CONTINUOUS_HEALTH_RC_TEST_FAIL BIT(22) +#define SW_STARTUP_TEST_DONE BIT(21) +#define SW_STARTUP_AP_TEST_FAIL BIT(20) +#define SW_STARTUP_RC_TEST_FAIL BIT(19) +#define RST_STARTUP_TEST_DONE BIT(18) +#define RST_STARTUP_AP_TEST_FAIL BIT(17) +#define RST_STARTUP_RC_TEST_FAIL BIT(16) +#define RAW_DATA_VALID BIT(7) + +#define TRNG_RAW_DATA_OUT 0x828 + +#define TRNG_CNT_TRANS_VALID 0x80 +#define BUSY_LOOP_SLEEP 10 +#define BUSY_LOOP_TIMEOUT (BUSY_LOOP_SLEEP * 10000) + +struct airoha_trng { + void __iomem *base; + struct hwrng rng; + struct device *dev; + + struct completion rng_op_done; +}; + +static int airoha_trng_irq_mask(struct airoha_trng *trng) +{ + u32 val; + + val = readl(trng->base + TRNG_INTR_EN); + val |= INTR_MASK; + writel(val, trng->base + TRNG_INTR_EN); + + return 0; +} + +static int airoha_trng_irq_unmask(struct airoha_trng *trng) +{ + u32 val; + + val = readl(trng->base + TRNG_INTR_EN); + val &= ~INTR_MASK; + writel(val, trng->base + TRNG_INTR_EN); + + return 0; +} + +static int airoha_trng_init(struct hwrng *rng) +{ + struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng); + int ret; + u32 val; + + val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN); + val |= RNG_EN; + writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN); + + /* Set out of SW Reset */ + airoha_trng_irq_unmask(trng); + writel(0, trng->base + TRNG_HEALTH_TEST_SW_RST); + + ret = wait_for_completion_timeout(&trng->rng_op_done, BUSY_LOOP_TIMEOUT); + if (ret <= 0) { + dev_err(trng->dev, "Timeout waiting for Health Check\n"); + airoha_trng_irq_mask(trng); + return -ENODEV; + } + + /* Check if Health Test Failed */ + val = readl(trng->base + TRNG_HEALTH_TEST_STATUS); + if (val & (RST_STARTUP_AP_TEST_FAIL | RST_STARTUP_RC_TEST_FAIL)) { + dev_err(trng->dev, "Health Check fail: %s test fail\n", + val & RST_STARTUP_AP_TEST_FAIL ? "AP" : "RC"); + return -ENODEV; + } + + /* Check if IP is ready */ + ret = readl_poll_timeout(trng->base + TRNG_IP_RDY, val, + val & SAMPLE_RDY, 10, 1000); + if (ret < 0) { + dev_err(trng->dev, "Timeout waiting for IP ready"); + return -ENODEV; + } + + /* CNT_TRANS must be 0x80 for IP to be considered ready */ + ret = readl_poll_timeout(trng->base + TRNG_IP_RDY, val, + FIELD_GET(CNT_TRANS, val) == TRNG_CNT_TRANS_VALID, + 10, 1000); + if (ret < 0) { + dev_err(trng->dev, "Timeout waiting for IP ready"); + return -ENODEV; + } + + return 0; +} + +static void airoha_trng_cleanup(struct hwrng *rng) +{ + struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng); + u32 val; + + val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN); + val &= ~RNG_EN; + writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN); + + /* Put it in SW Reset */ + writel(SW_RST, trng->base + TRNG_HEALTH_TEST_SW_RST); +} + +static int airoha_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng); + u32 *data = buf; + u32 status; + int ret; + + ret = readl_poll_timeout(trng->base + TRNG_HEALTH_TEST_STATUS, status, + status & RAW_DATA_VALID, 10, 1000); + if (ret < 0) { + dev_err(trng->dev, "Timeout waiting for TRNG RAW Data valid\n"); + return ret; + } + + *data = readl(trng->base + TRNG_RAW_DATA_OUT); + + return 4; +} + +static irqreturn_t airoha_trng_irq(int irq, void *priv) +{ + struct airoha_trng *trng = (struct airoha_trng *)priv; + + airoha_trng_irq_mask(trng); + /* Just complete the task, we will read the value later */ + complete(&trng->rng_op_done); + + return IRQ_HANDLED; +} + +static int airoha_trng_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct airoha_trng *trng; + int irq, ret; + u32 val; + + trng = devm_kzalloc(dev, sizeof(*trng), GFP_KERNEL); + if (!trng) + return -ENOMEM; + + trng->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(trng->base)) + return PTR_ERR(trng->base); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + airoha_trng_irq_mask(trng); + ret = devm_request_irq(&pdev->dev, irq, airoha_trng_irq, 0, + pdev->name, (void *)trng); + if (ret) { + dev_err(dev, "Can't get interrupt working.\n"); + return ret; + } + + init_completion(&trng->rng_op_done); + + /* Enable interrupt for SW reset Health Check */ + val = readl(trng->base + TRNG_INTR_EN); + val |= RST_STARTUP_INITR_EN; + writel(val, trng->base + TRNG_INTR_EN); + + /* Set output to raw data */ + val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN); + val |= RAW_DATA_EN; + writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN); + + /* Put it in SW Reset */ + writel(SW_RST, trng->base + TRNG_HEALTH_TEST_SW_RST); + + trng->dev = dev; + trng->rng.name = pdev->name; + trng->rng.init = airoha_trng_init; + trng->rng.cleanup = airoha_trng_cleanup; + trng->rng.read = airoha_trng_read; + + ret = devm_hwrng_register(dev, &trng->rng); + if (ret) { + dev_err(dev, "failed to register rng device: %d\n", ret); + return ret; + } + + return 0; +} + +static const struct of_device_id airoha_trng_of_match[] = { + { .compatible = "airoha,en7581-trng", }, + {}, +}; +MODULE_DEVICE_TABLE(of, airoha_trng_of_match); + +static struct platform_driver airoha_trng_driver = { + .driver = { + .name = "airoha-trng", + .of_match_table = airoha_trng_of_match, + }, + .probe = airoha_trng_probe, +}; + +module_platform_driver(airoha_trng_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>"); +MODULE_DESCRIPTION("Airoha True Random Number Generator driver"); diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index 86162a13681e..9a24d19236dc 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c @@ -143,8 +143,10 @@ static int __init amd_rng_mod_init(void) found: err = pci_read_config_dword(pdev, 0x58, &pmbase); - if (err) + if (err) { + err = pcibios_err_to_errno(err); goto put_dev; + } pmbase &= 0x0000FF00; if (pmbase == 0) { diff --git a/drivers/char/hw_random/arm_smccc_trng.c b/drivers/char/hw_random/arm_smccc_trng.c index 7e954341b09f..dcb8e7f37f25 100644 --- a/drivers/char/hw_random/arm_smccc_trng.c +++ b/drivers/char/hw_random/arm_smccc_trng.c @@ -118,4 +118,5 @@ module_platform_driver(smccc_trng_driver); MODULE_ALIAS("platform:smccc_trng"); MODULE_AUTHOR("Andre Przywara"); +MODULE_DESCRIPTION("Arm SMCCC TRNG firmware interface support"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c index e9157255f851..d2b00458761e 100644 --- a/drivers/char/hw_random/atmel-rng.c +++ b/drivers/char/hw_random/atmel-rng.c @@ -37,6 +37,7 @@ struct atmel_trng { struct clk *clk; void __iomem *base; struct hwrng rng; + struct device *dev; bool has_half_rate; }; @@ -59,9 +60,9 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max, u32 *data = buf; int ret; - ret = pm_runtime_get_sync((struct device *)trng->rng.priv); + ret = pm_runtime_get_sync(trng->dev); if (ret < 0) { - pm_runtime_put_sync((struct device *)trng->rng.priv); + pm_runtime_put_sync(trng->dev); return ret; } @@ -79,8 +80,8 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max, ret = 4; out: - pm_runtime_mark_last_busy((struct device *)trng->rng.priv); - pm_runtime_put_sync_autosuspend((struct device *)trng->rng.priv); + pm_runtime_mark_last_busy(trng->dev); + pm_runtime_put_sync_autosuspend(trng->dev); return ret; } @@ -134,9 +135,9 @@ static int atmel_trng_probe(struct platform_device *pdev) return -ENODEV; trng->has_half_rate = data->has_half_rate; + trng->dev = &pdev->dev; trng->rng.name = pdev->name; trng->rng.read = atmel_trng_read; - trng->rng.priv = (unsigned long)&pdev->dev; platform_set_drvdata(pdev, trng); #ifndef CONFIG_PM @@ -216,7 +217,7 @@ MODULE_DEVICE_TABLE(of, atmel_trng_dt_ids); static struct platform_driver atmel_trng_driver = { .probe = atmel_trng_probe, - .remove_new = atmel_trng_remove, + .remove = atmel_trng_remove, .driver = { .name = "atmel-trng", .pm = pm_ptr(&atmel_trng_pm_ops), diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index b03e80300627..aa2b135e3ee2 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c @@ -94,8 +94,10 @@ static int bcm2835_rng_init(struct hwrng *rng) return ret; ret = reset_control_reset(priv->reset); - if (ret) + if (ret) { + clk_disable_unprepare(priv->clk); return ret; + } if (priv->mask_interrupts) { /* mask the interrupt */ diff --git a/drivers/char/hw_random/bcm74110-rng.c b/drivers/char/hw_random/bcm74110-rng.c new file mode 100644 index 000000000000..5c64148e91f1 --- /dev/null +++ b/drivers/char/hw_random/bcm74110-rng.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2024 Broadcom + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/random.h> +#include <linux/hw_random.h> + +#define HOST_REV_ID 0x00 +#define HOST_FIFO_DEPTH 0x04 +#define HOST_FIFO_COUNT 0x08 +#define HOST_FIFO_THRESHOLD 0x0c +#define HOST_FIFO_DATA 0x10 + +#define HOST_FIFO_COUNT_MASK 0xffff + +/* Delay range in microseconds */ +#define FIFO_DELAY_MIN_US 3 +#define FIFO_DELAY_MAX_US 7 +#define FIFO_DELAY_MAX_COUNT 10 + +struct bcm74110_priv { + void __iomem *base; +}; + +static inline int bcm74110_rng_fifo_count(void __iomem *mem) +{ + return readl_relaxed(mem) & HOST_FIFO_COUNT_MASK; +} + +static int bcm74110_rng_read(struct hwrng *rng, void *buf, size_t max, + bool wait) +{ + struct bcm74110_priv *priv = (struct bcm74110_priv *)rng->priv; + void __iomem *fc_addr = priv->base + HOST_FIFO_COUNT; + void __iomem *fd_addr = priv->base + HOST_FIFO_DATA; + unsigned underrun_count = 0; + u32 max_words = max / sizeof(u32); + u32 num_words; + unsigned i; + + /* + * We need to check how many words are available in the RNG FIFO. If + * there aren't any, we need to wait for some to become available. + */ + while ((num_words = bcm74110_rng_fifo_count(fc_addr)) == 0) { + if (!wait) + return 0; + /* + * As a precaution, limit how long we wait. If the FIFO doesn't + * refill within the allotted time, return 0 (=no data) to the + * caller. + */ + if (likely(underrun_count < FIFO_DELAY_MAX_COUNT)) + usleep_range(FIFO_DELAY_MIN_US, FIFO_DELAY_MAX_US); + else + return 0; + underrun_count++; + } + if (num_words > max_words) + num_words = max_words; + + /* Bail early if we run out of random numbers unexpectedly */ + for (i = 0; i < num_words && bcm74110_rng_fifo_count(fc_addr) > 0; i++) + ((u32 *)buf)[i] = readl_relaxed(fd_addr); + + return i * sizeof(u32); +} + +static struct hwrng bcm74110_hwrng = { + .read = bcm74110_rng_read, +}; + +static int bcm74110_rng_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct bcm74110_priv *priv; + int rc; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + bcm74110_hwrng.name = pdev->name; + bcm74110_hwrng.priv = (unsigned long)priv; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + rc = devm_hwrng_register(dev, &bcm74110_hwrng); + if (rc) + dev_err(dev, "hwrng registration failed (%d)\n", rc); + else + dev_info(dev, "hwrng registered\n"); + + return rc; +} + +static const struct of_device_id bcm74110_rng_match[] = { + { .compatible = "brcm,bcm74110-rng", }, + {}, +}; +MODULE_DEVICE_TABLE(of, bcm74110_rng_match); + +static struct platform_driver bcm74110_rng_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = bcm74110_rng_match, + }, + .probe = bcm74110_rng_probe, +}; +module_platform_driver(bcm74110_rng_driver); + +MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>"); +MODULE_DESCRIPTION("BCM 74110 Random Number Generator (RNG) driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c index c99c54cd99c6..c1b8918b2292 100644 --- a/drivers/char/hw_random/cavium-rng-vf.c +++ b/drivers/char/hw_random/cavium-rng-vf.c @@ -266,4 +266,5 @@ static struct pci_driver cavium_rng_vf_driver = { module_pci_driver(cavium_rng_vf_driver); MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>"); +MODULE_DESCRIPTION("Cavium ThunderX Random Number Generator VF support"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/char/hw_random/cavium-rng.c b/drivers/char/hw_random/cavium-rng.c index b96579222408..d9d7b6038c06 100644 --- a/drivers/char/hw_random/cavium-rng.c +++ b/drivers/char/hw_random/cavium-rng.c @@ -88,4 +88,5 @@ static struct pci_driver cavium_rng_pf_driver = { module_pci_driver(cavium_rng_pf_driver); MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>"); +MODULE_DESCRIPTION("Cavium ThunderX Random Number Generator support"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c index c0d2f824769f..4db198849695 100644 --- a/drivers/char/hw_random/cctrng.c +++ b/drivers/char/hw_random/cctrng.c @@ -622,6 +622,7 @@ static int __maybe_unused cctrng_resume(struct device *dev) /* wait for Cryptocell reset completion */ if (!cctrng_wait_for_reset_completion(drvdata)) { dev_err(dev, "Cryptocell reset not completed"); + clk_disable_unprepare(drvdata->clk); return -EBUSY; } @@ -652,7 +653,7 @@ static struct platform_driver cctrng_driver = { .pm = &cctrng_pm, }, .probe = cctrng_probe, - .remove_new = cctrng_remove, + .remove = cctrng_remove, }; module_platform_driver(cctrng_driver); diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index a3bbdd6e60fc..018316f54621 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -64,19 +64,6 @@ static size_t rng_buffer_size(void) return RNG_BUFFER_SIZE; } -static void add_early_randomness(struct hwrng *rng) -{ - int bytes_read; - - mutex_lock(&reading_mutex); - bytes_read = rng_get_data(rng, rng_fillbuf, 32, 0); - mutex_unlock(&reading_mutex); - if (bytes_read > 0) { - size_t entropy = bytes_read * 8 * rng->quality / 1024; - add_hwgenerator_randomness(rng_fillbuf, bytes_read, entropy, false); - } -} - static inline void cleanup_rng(struct kref *kref) { struct hwrng *rng = container_of(kref, struct hwrng, ref); @@ -174,7 +161,6 @@ static int hwrng_init(struct hwrng *rng) reinit_completion(&rng->cleanup_done); skip_init: - rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024); current_quality = rng->quality; /* obsolete */ return 0; @@ -195,8 +181,15 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, int present; BUG_ON(!mutex_is_locked(&reading_mutex)); - if (rng->read) - return rng->read(rng, (void *)buffer, size, wait); + if (rng->read) { + int err; + + err = rng->read(rng, buffer, size, wait); + if (WARN_ON_ONCE(err > 0 && err > size)) + err = size; + + return err; + } if (rng->data_present) present = rng->data_present(rng, wait); @@ -340,13 +333,12 @@ static ssize_t rng_current_store(struct device *dev, const char *buf, size_t len) { int err; - struct hwrng *rng, *old_rng, *new_rng; + struct hwrng *rng, *new_rng; err = mutex_lock_interruptible(&rng_mutex); if (err) return -ERESTARTSYS; - old_rng = current_rng; if (sysfs_streq(buf, "")) { err = enable_best_rng(); } else { @@ -362,11 +354,8 @@ static ssize_t rng_current_store(struct device *dev, new_rng = get_current_rng_nolock(); mutex_unlock(&rng_mutex); - if (new_rng) { - if (new_rng != old_rng) - add_early_randomness(new_rng); + if (new_rng) put_rng(new_rng); - } return err ? : len; } @@ -382,7 +371,7 @@ static ssize_t rng_current_show(struct device *dev, if (IS_ERR(rng)) return PTR_ERR(rng); - ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none"); + ret = sysfs_emit(buf, "%s\n", rng ? rng->name : "none"); put_rng(rng); return ret; @@ -487,16 +476,6 @@ static struct attribute *rng_dev_attrs[] = { ATTRIBUTE_GROUPS(rng_dev); -static void __exit unregister_miscdev(void) -{ - misc_deregister(&rng_miscdev); -} - -static int __init register_miscdev(void) -{ - return misc_register(&rng_miscdev); -} - static int hwrng_fillfn(void *unused) { size_t entropy, entropy_credit = 0; /* in 1/1024 of a bit */ @@ -544,7 +523,6 @@ int hwrng_register(struct hwrng *rng) { int err = -EINVAL; struct hwrng *tmp; - bool is_new_current = false; if (!rng->name || (!rng->data_read && !rng->read)) goto out; @@ -563,6 +541,9 @@ int hwrng_register(struct hwrng *rng) complete(&rng->cleanup_done); init_completion(&rng->dying); + /* Adjust quality field to always have a proper value */ + rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024); + if (!current_rng || (!cur_rng_set_by_user && rng->quality > current_rng->quality)) { /* @@ -573,25 +554,8 @@ int hwrng_register(struct hwrng *rng) err = set_current_rng(rng); if (err) goto out_unlock; - /* to use current_rng in add_early_randomness() we need - * to take a ref - */ - is_new_current = true; - kref_get(&rng->ref); } mutex_unlock(&rng_mutex); - if (is_new_current || !rng->init) { - /* - * Use a new device's input to add some randomness to - * the system. If this rng device isn't going to be - * used right away, its init function hasn't been - * called yet by set_current_rng(); so only use the - * randomness from devices that don't need an init callback - */ - add_early_randomness(rng); - } - if (is_new_current) - put_rng(rng); return 0; out_unlock: mutex_unlock(&rng_mutex); @@ -602,12 +566,11 @@ EXPORT_SYMBOL_GPL(hwrng_register); void hwrng_unregister(struct hwrng *rng) { - struct hwrng *old_rng, *new_rng; + struct hwrng *new_rng; int err; mutex_lock(&rng_mutex); - old_rng = current_rng; list_del(&rng->list); complete_all(&rng->dying); if (current_rng == rng) { @@ -626,11 +589,8 @@ void hwrng_unregister(struct hwrng *rng) } else mutex_unlock(&rng_mutex); - if (new_rng) { - if (old_rng != new_rng) - add_early_randomness(new_rng); + if (new_rng) put_rng(new_rng); - } wait_for_completion(&rng->cleanup_done); } @@ -707,7 +667,7 @@ static int __init hwrng_modinit(void) return -ENOMEM; } - ret = register_miscdev(); + ret = misc_register(&rng_miscdev); if (ret) { kfree(rng_fillbuf); kfree(rng_buffer); @@ -724,7 +684,7 @@ static void __exit hwrng_modexit(void) kfree(rng_fillbuf); mutex_unlock(&rng_mutex); - unregister_miscdev(); + misc_deregister(&rng_miscdev); } fs_initcall(hwrng_modinit); /* depends on misc_register() */ diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c index 0ed5d22fe667..02e207c09e81 100644 --- a/drivers/char/hw_random/exynos-trng.c +++ b/drivers/char/hw_random/exynos-trng.c @@ -10,6 +10,7 @@ * Krzysztof Kozłowski <krzk@kernel.org> */ +#include <linux/arm-smccc.h> #include <linux/clk.h> #include <linux/crypto.h> #include <linux/delay.h> @@ -22,46 +23,69 @@ #include <linux/mod_devicetable.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> - -#define EXYNOS_TRNG_CLKDIV (0x0) - -#define EXYNOS_TRNG_CTRL (0x20) -#define EXYNOS_TRNG_CTRL_RNGEN BIT(31) - -#define EXYNOS_TRNG_POST_CTRL (0x30) -#define EXYNOS_TRNG_ONLINE_CTRL (0x40) -#define EXYNOS_TRNG_ONLINE_STAT (0x44) -#define EXYNOS_TRNG_ONLINE_MAXCHI2 (0x48) -#define EXYNOS_TRNG_FIFO_CTRL (0x50) -#define EXYNOS_TRNG_FIFO_0 (0x80) -#define EXYNOS_TRNG_FIFO_1 (0x84) -#define EXYNOS_TRNG_FIFO_2 (0x88) -#define EXYNOS_TRNG_FIFO_3 (0x8c) -#define EXYNOS_TRNG_FIFO_4 (0x90) -#define EXYNOS_TRNG_FIFO_5 (0x94) -#define EXYNOS_TRNG_FIFO_6 (0x98) -#define EXYNOS_TRNG_FIFO_7 (0x9c) -#define EXYNOS_TRNG_FIFO_LEN (8) -#define EXYNOS_TRNG_CLOCK_RATE (500000) - +#include <linux/property.h> + +#define EXYNOS_TRNG_CLKDIV 0x0 + +#define EXYNOS_TRNG_CTRL 0x20 +#define EXYNOS_TRNG_CTRL_RNGEN BIT(31) + +#define EXYNOS_TRNG_POST_CTRL 0x30 +#define EXYNOS_TRNG_ONLINE_CTRL 0x40 +#define EXYNOS_TRNG_ONLINE_STAT 0x44 +#define EXYNOS_TRNG_ONLINE_MAXCHI2 0x48 +#define EXYNOS_TRNG_FIFO_CTRL 0x50 +#define EXYNOS_TRNG_FIFO_0 0x80 +#define EXYNOS_TRNG_FIFO_1 0x84 +#define EXYNOS_TRNG_FIFO_2 0x88 +#define EXYNOS_TRNG_FIFO_3 0x8c +#define EXYNOS_TRNG_FIFO_4 0x90 +#define EXYNOS_TRNG_FIFO_5 0x94 +#define EXYNOS_TRNG_FIFO_6 0x98 +#define EXYNOS_TRNG_FIFO_7 0x9c +#define EXYNOS_TRNG_FIFO_LEN 8 +#define EXYNOS_TRNG_CLOCK_RATE 500000 + +/* Driver feature flags */ +#define EXYNOS_SMC BIT(0) + +#define EXYNOS_SMC_CALL_VAL(func_num) \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_SIP, \ + func_num) + +/* SMC command for DTRNG access */ +#define SMC_CMD_RANDOM EXYNOS_SMC_CALL_VAL(0x1012) + +/* SMC_CMD_RANDOM: arguments */ +#define HWRNG_INIT 0x0 +#define HWRNG_EXIT 0x1 +#define HWRNG_GET_DATA 0x2 +#define HWRNG_RESUME 0x3 + +/* SMC_CMD_RANDOM: return values */ +#define HWRNG_RET_OK 0x0 +#define HWRNG_RET_RETRY_ERROR 0x2 + +#define HWRNG_MAX_TRIES 100 struct exynos_trng_dev { - struct device *dev; - void __iomem *mem; - struct clk *clk; - struct hwrng rng; + struct device *dev; + void __iomem *mem; + struct clk *clk; /* operating clock */ + struct clk *pclk; /* bus clock */ + struct hwrng rng; + unsigned long flags; }; -static int exynos_trng_do_read(struct hwrng *rng, void *data, size_t max, - bool wait) +static int exynos_trng_do_read_reg(struct hwrng *rng, void *data, size_t max, + bool wait) { - struct exynos_trng_dev *trng; + struct exynos_trng_dev *trng = (struct exynos_trng_dev *)rng->priv; int val; max = min_t(size_t, max, (EXYNOS_TRNG_FIFO_LEN * 4)); - - trng = (struct exynos_trng_dev *)rng->priv; - writel_relaxed(max * 8, trng->mem + EXYNOS_TRNG_FIFO_CTRL); val = readl_poll_timeout(trng->mem + EXYNOS_TRNG_FIFO_CTRL, val, val == 0, 200, 1000000); @@ -73,7 +97,40 @@ static int exynos_trng_do_read(struct hwrng *rng, void *data, size_t max, return max; } -static int exynos_trng_init(struct hwrng *rng) +static int exynos_trng_do_read_smc(struct hwrng *rng, void *data, size_t max, + bool wait) +{ + struct arm_smccc_res res; + unsigned int copied = 0; + u32 *buf = data; + int tries = 0; + + while (copied < max) { + arm_smccc_smc(SMC_CMD_RANDOM, HWRNG_GET_DATA, 0, 0, 0, 0, 0, 0, + &res); + switch (res.a0) { + case HWRNG_RET_OK: + *buf++ = res.a2; + *buf++ = res.a3; + copied += 8; + tries = 0; + break; + case HWRNG_RET_RETRY_ERROR: + if (!wait) + return copied; + if (++tries >= HWRNG_MAX_TRIES) + return copied; + cond_resched(); + break; + default: + return -EIO; + } + } + + return copied; +} + +static int exynos_trng_init_reg(struct hwrng *rng) { struct exynos_trng_dev *trng = (struct exynos_trng_dev *)rng->priv; unsigned long sss_rate; @@ -87,7 +144,7 @@ static int exynos_trng_init(struct hwrng *rng) */ val = sss_rate / (EXYNOS_TRNG_CLOCK_RATE * 2); if (val > 0x7fff) { - dev_err(trng->dev, "clock divider too large: %d", val); + dev_err(trng->dev, "clock divider too large: %d\n", val); return -ERANGE; } val = val << 1; @@ -106,6 +163,24 @@ static int exynos_trng_init(struct hwrng *rng) return 0; } +static int exynos_trng_init_smc(struct hwrng *rng) +{ + struct exynos_trng_dev *trng = (struct exynos_trng_dev *)rng->priv; + struct arm_smccc_res res; + int ret = 0; + + arm_smccc_smc(SMC_CMD_RANDOM, HWRNG_INIT, 0, 0, 0, 0, 0, 0, &res); + if (res.a0 != HWRNG_RET_OK) { + dev_err(trng->dev, "SMC command for TRNG init failed (%d)\n", + (int)res.a0); + ret = -EIO; + } + if ((int)res.a0 == -1) + dev_info(trng->dev, "Make sure LDFW is loaded by your BL\n"); + + return ret; +} + static int exynos_trng_probe(struct platform_device *pdev) { struct exynos_trng_dev *trng; @@ -115,21 +190,29 @@ static int exynos_trng_probe(struct platform_device *pdev) if (!trng) return ret; + platform_set_drvdata(pdev, trng); + trng->dev = &pdev->dev; + + trng->flags = (unsigned long)device_get_match_data(&pdev->dev); + trng->rng.name = devm_kstrdup(&pdev->dev, dev_name(&pdev->dev), GFP_KERNEL); if (!trng->rng.name) return ret; - trng->rng.init = exynos_trng_init; - trng->rng.read = exynos_trng_do_read; - trng->rng.priv = (unsigned long) trng; + trng->rng.priv = (unsigned long)trng; - platform_set_drvdata(pdev, trng); - trng->dev = &pdev->dev; + if (trng->flags & EXYNOS_SMC) { + trng->rng.init = exynos_trng_init_smc; + trng->rng.read = exynos_trng_do_read_smc; + } else { + trng->rng.init = exynos_trng_init_reg; + trng->rng.read = exynos_trng_do_read_reg; - trng->mem = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(trng->mem)) - return PTR_ERR(trng->mem); + trng->mem = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(trng->mem)) + return PTR_ERR(trng->mem); + } pm_runtime_enable(&pdev->dev); ret = pm_runtime_resume_and_get(&pdev->dev); @@ -138,32 +221,30 @@ static int exynos_trng_probe(struct platform_device *pdev) goto err_pm_get; } - trng->clk = devm_clk_get(&pdev->dev, "secss"); + trng->clk = devm_clk_get_enabled(&pdev->dev, "secss"); if (IS_ERR(trng->clk)) { - ret = PTR_ERR(trng->clk); - dev_err(&pdev->dev, "Could not get clock.\n"); + ret = dev_err_probe(&pdev->dev, PTR_ERR(trng->clk), + "Could not get clock\n"); goto err_clock; } - ret = clk_prepare_enable(trng->clk); - if (ret) { - dev_err(&pdev->dev, "Could not enable the clk.\n"); + trng->pclk = devm_clk_get_optional_enabled(&pdev->dev, "pclk"); + if (IS_ERR(trng->pclk)) { + ret = dev_err_probe(&pdev->dev, PTR_ERR(trng->pclk), + "Could not get pclk\n"); goto err_clock; } ret = devm_hwrng_register(&pdev->dev, &trng->rng); if (ret) { dev_err(&pdev->dev, "Could not register hwrng device.\n"); - goto err_register; + goto err_clock; } dev_info(&pdev->dev, "Exynos True Random Number Generator.\n"); return 0; -err_register: - clk_disable_unprepare(trng->clk); - err_clock: pm_runtime_put_noidle(&pdev->dev); @@ -175,9 +256,14 @@ err_pm_get: static void exynos_trng_remove(struct platform_device *pdev) { - struct exynos_trng_dev *trng = platform_get_drvdata(pdev); + struct exynos_trng_dev *trng = platform_get_drvdata(pdev); - clk_disable_unprepare(trng->clk); + if (trng->flags & EXYNOS_SMC) { + struct arm_smccc_res res; + + arm_smccc_smc(SMC_CMD_RANDOM, HWRNG_EXIT, 0, 0, 0, 0, 0, 0, + &res); + } pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); @@ -185,6 +271,16 @@ static void exynos_trng_remove(struct platform_device *pdev) static int exynos_trng_suspend(struct device *dev) { + struct exynos_trng_dev *trng = dev_get_drvdata(dev); + struct arm_smccc_res res; + + if (trng->flags & EXYNOS_SMC) { + arm_smccc_smc(SMC_CMD_RANDOM, HWRNG_EXIT, 0, 0, 0, 0, 0, 0, + &res); + if (res.a0 != HWRNG_RET_OK) + return -EIO; + } + pm_runtime_put_sync(dev); return 0; @@ -192,6 +288,7 @@ static int exynos_trng_suspend(struct device *dev) static int exynos_trng_resume(struct device *dev) { + struct exynos_trng_dev *trng = dev_get_drvdata(dev); int ret; ret = pm_runtime_resume_and_get(dev); @@ -200,15 +297,32 @@ static int exynos_trng_resume(struct device *dev) return ret; } + if (trng->flags & EXYNOS_SMC) { + struct arm_smccc_res res; + + arm_smccc_smc(SMC_CMD_RANDOM, HWRNG_RESUME, 0, 0, 0, 0, 0, 0, + &res); + if (res.a0 != HWRNG_RET_OK) + return -EIO; + + arm_smccc_smc(SMC_CMD_RANDOM, HWRNG_INIT, 0, 0, 0, 0, 0, 0, + &res); + if (res.a0 != HWRNG_RET_OK) + return -EIO; + } + return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(exynos_trng_pm_ops, exynos_trng_suspend, - exynos_trng_resume); + exynos_trng_resume); static const struct of_device_id exynos_trng_dt_match[] = { { .compatible = "samsung,exynos5250-trng", + }, { + .compatible = "samsung,exynos850-trng", + .data = (void *)EXYNOS_SMC, }, { }, }; @@ -221,10 +335,11 @@ static struct platform_driver exynos_trng_driver = { .of_match_table = exynos_trng_dt_match, }, .probe = exynos_trng_probe, - .remove_new = exynos_trng_remove, + .remove = exynos_trng_remove, }; module_platform_driver(exynos_trng_driver); + MODULE_AUTHOR("Łukasz Stelmach"); MODULE_DESCRIPTION("H/W TRNG driver for Exynos chips"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/char/hw_random/histb-rng.c b/drivers/char/hw_random/histb-rng.c index f652e1135e4b..1b91e88cc4c0 100644 --- a/drivers/char/hw_random/histb-rng.c +++ b/drivers/char/hw_random/histb-rng.c @@ -89,7 +89,7 @@ depth_show(struct device *dev, struct device_attribute *attr, char *buf) struct histb_rng_priv *priv = dev_get_drvdata(dev); void __iomem *base = priv->base; - return sprintf(buf, "%d\n", histb_rng_get_depth(base)); + return sprintf(buf, "%u\n", histb_rng_get_depth(base)); } static ssize_t diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c index 118a72acb99b..241664a9b5d9 100644 --- a/drivers/char/hw_random/imx-rngc.c +++ b/drivers/char/hw_random/imx-rngc.c @@ -13,6 +13,8 @@ #include <linux/clk.h> #include <linux/err.h> #include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> #include <linux/interrupt.h> #include <linux/hw_random.h> #include <linux/completion.h> @@ -53,6 +55,7 @@ #define RNGC_SELFTEST_TIMEOUT 2500 /* us */ #define RNGC_SEED_TIMEOUT 200 /* ms */ +#define RNGC_PM_TIMEOUT 500 /* ms */ static bool self_test = true; module_param(self_test, bool, 0); @@ -123,7 +126,11 @@ static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng); unsigned int status; - int retval = 0; + int err, retval = 0; + + err = pm_runtime_resume_and_get(rngc->dev); + if (err) + return err; while (max >= sizeof(u32)) { status = readl(rngc->base + RNGC_STATUS); @@ -141,6 +148,8 @@ static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait) max -= sizeof(u32); } } + pm_runtime_mark_last_busy(rngc->dev); + pm_runtime_put(rngc->dev); return retval ? retval : -EIO; } @@ -169,7 +178,11 @@ static int imx_rngc_init(struct hwrng *rng) { struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng); u32 cmd, ctrl; - int ret; + int ret, err; + + err = pm_runtime_resume_and_get(rngc->dev); + if (err) + return err; /* clear error */ cmd = readl(rngc->base + RNGC_COMMAND); @@ -186,15 +199,15 @@ static int imx_rngc_init(struct hwrng *rng) ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_SEED_TIMEOUT)); if (!ret) { - ret = -ETIMEDOUT; - goto err; + err = -ETIMEDOUT; + goto out; } } while (rngc->err_reg == RNGC_ERROR_STATUS_STAT_ERR); if (rngc->err_reg) { - ret = -EIO; - goto err; + err = -EIO; + goto out; } /* @@ -205,23 +218,29 @@ static int imx_rngc_init(struct hwrng *rng) ctrl |= RNGC_CTRL_AUTO_SEED; writel(ctrl, rngc->base + RNGC_CONTROL); +out: /* * if initialisation was successful, we keep the interrupt * unmasked until imx_rngc_cleanup is called * we mask the interrupt ourselves if we return an error */ - return 0; + if (err) + imx_rngc_irq_mask_clear(rngc); -err: - imx_rngc_irq_mask_clear(rngc); - return ret; + pm_runtime_put(rngc->dev); + return err; } static void imx_rngc_cleanup(struct hwrng *rng) { struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng); + int err; - imx_rngc_irq_mask_clear(rngc); + err = pm_runtime_resume_and_get(rngc->dev); + if (!err) { + imx_rngc_irq_mask_clear(rngc); + pm_runtime_put(rngc->dev); + } } static int __init imx_rngc_probe(struct platform_device *pdev) @@ -240,7 +259,7 @@ static int __init imx_rngc_probe(struct platform_device *pdev) if (IS_ERR(rngc->base)) return PTR_ERR(rngc->base); - rngc->clk = devm_clk_get_enabled(&pdev->dev, NULL); + rngc->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(rngc->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(rngc->clk), "Cannot get rng_clk\n"); @@ -248,14 +267,18 @@ static int __init imx_rngc_probe(struct platform_device *pdev) if (irq < 0) return irq; + clk_prepare_enable(rngc->clk); + ver_id = readl(rngc->base + RNGC_VER_ID); rng_type = FIELD_GET(RNG_TYPE, ver_id); /* * This driver supports only RNGC and RNGB. (There's a different * driver for RNGA.) */ - if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB) + if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB) { + clk_disable_unprepare(rngc->clk); return -ENODEV; + } init_completion(&rngc->rng_op_done); @@ -272,15 +295,24 @@ static int __init imx_rngc_probe(struct platform_device *pdev) ret = devm_request_irq(&pdev->dev, irq, imx_rngc_irq, 0, pdev->name, (void *)rngc); - if (ret) + if (ret) { + clk_disable_unprepare(rngc->clk); return dev_err_probe(&pdev->dev, ret, "Can't get interrupt working.\n"); + } if (self_test) { ret = imx_rngc_self_test(rngc); - if (ret) + if (ret) { + clk_disable_unprepare(rngc->clk); return dev_err_probe(&pdev->dev, ret, "self test failed\n"); + } } + pm_runtime_set_autosuspend_delay(&pdev->dev, RNGC_PM_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + devm_pm_runtime_enable(&pdev->dev); + ret = devm_hwrng_register(&pdev->dev, &rngc->rng); if (ret) return dev_err_probe(&pdev->dev, ret, "hwrng registration failed\n"); @@ -310,7 +342,10 @@ static int imx_rngc_resume(struct device *dev) return 0; } -static DEFINE_SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume); +static const struct dev_pm_ops imx_rngc_pm_ops = { + SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) + RUNTIME_PM_OPS(imx_rngc_suspend, imx_rngc_resume, NULL) +}; static const struct of_device_id imx_rngc_dt_ids[] = { { .compatible = "fsl,imx25-rngb" }, @@ -321,7 +356,7 @@ MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids); static struct platform_driver imx_rngc_driver = { .driver = { .name = KBUILD_MODNAME, - .pm = pm_sleep_ptr(&imx_rngc_pm_ops), + .pm = pm_ptr(&imx_rngc_pm_ops), .of_match_table = imx_rngc_dt_ids, }, }; diff --git a/drivers/char/hw_random/ingenic-rng.c b/drivers/char/hw_random/ingenic-rng.c index 2f9b6483c4a1..bbfd662d25a6 100644 --- a/drivers/char/hw_random/ingenic-rng.c +++ b/drivers/char/hw_random/ingenic-rng.c @@ -132,7 +132,7 @@ MODULE_DEVICE_TABLE(of, ingenic_rng_of_match); static struct platform_driver ingenic_rng_driver = { .probe = ingenic_rng_probe, - .remove_new = ingenic_rng_remove, + .remove = ingenic_rng_remove, .driver = { .name = "ingenic-rng", .of_match_table = ingenic_rng_of_match, diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c index 36c34252b4f6..d8fd8a354482 100644 --- a/drivers/char/hw_random/ks-sa-rng.c +++ b/drivers/char/hw_random/ks-sa-rng.c @@ -261,7 +261,7 @@ static struct platform_driver ks_sa_rng_driver = { .of_match_table = ks_sa_rng_dt_match, }, .probe = ks_sa_rng_probe, - .remove_new = ks_sa_rng_remove, + .remove = ks_sa_rng_remove, }; module_platform_driver(ks_sa_rng_driver); diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c index aa993753ab12..b7fa1bc1122b 100644 --- a/drivers/char/hw_random/mtk-rng.c +++ b/drivers/char/hw_random/mtk-rng.c @@ -36,6 +36,7 @@ struct mtk_rng { void __iomem *base; struct clk *clk; struct hwrng rng; + struct device *dev; }; static int mtk_rng_init(struct hwrng *rng) @@ -85,7 +86,7 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) struct mtk_rng *priv = to_mtk_rng(rng); int retval = 0; - pm_runtime_get_sync((struct device *)priv->rng.priv); + pm_runtime_get_sync(priv->dev); while (max >= sizeof(u32)) { if (!mtk_rng_wait_ready(rng, wait)) @@ -97,8 +98,8 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) max -= sizeof(u32); } - pm_runtime_mark_last_busy((struct device *)priv->rng.priv); - pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv); + pm_runtime_mark_last_busy(priv->dev); + pm_runtime_put_sync_autosuspend(priv->dev); return retval || !wait ? retval : -EIO; } @@ -112,13 +113,13 @@ static int mtk_rng_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; + priv->dev = &pdev->dev; priv->rng.name = pdev->name; #ifndef CONFIG_PM priv->rng.init = mtk_rng_init; priv->rng.cleanup = mtk_rng_cleanup; #endif priv->rng.read = mtk_rng_read; - priv->rng.priv = (unsigned long)&pdev->dev; priv->rng.quality = 900; priv->clk = devm_clk_get(&pdev->dev, "rng"); @@ -142,7 +143,7 @@ static int mtk_rng_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, priv); pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_enable(&pdev->dev); + devm_pm_runtime_enable(&pdev->dev); dev_info(&pdev->dev, "registered RNG driver\n"); diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c index 07ec000e4cd7..e3fcb8bcc29b 100644 --- a/drivers/char/hw_random/mxc-rnga.c +++ b/drivers/char/hw_random/mxc-rnga.c @@ -131,7 +131,7 @@ static void mxc_rnga_cleanup(struct hwrng *rng) __raw_writel(ctrl & ~RNGA_CONTROL_GO, mxc_rng->mem + RNGA_CONTROL); } -static int __init mxc_rnga_probe(struct platform_device *pdev) +static int mxc_rnga_probe(struct platform_device *pdev) { int err; struct mxc_rng *mxc_rng; @@ -147,42 +147,32 @@ static int __init mxc_rnga_probe(struct platform_device *pdev) mxc_rng->rng.data_present = mxc_rnga_data_present; mxc_rng->rng.data_read = mxc_rnga_data_read; - mxc_rng->clk = devm_clk_get(&pdev->dev, NULL); + mxc_rng->clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(mxc_rng->clk)) { dev_err(&pdev->dev, "Could not get rng_clk!\n"); return PTR_ERR(mxc_rng->clk); } - err = clk_prepare_enable(mxc_rng->clk); - if (err) - return err; - mxc_rng->mem = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mxc_rng->mem)) { err = PTR_ERR(mxc_rng->mem); - goto err_ioremap; + return err; } err = hwrng_register(&mxc_rng->rng); if (err) { dev_err(&pdev->dev, "MXC RNGA registering failed (%d)\n", err); - goto err_ioremap; + return err; } return 0; - -err_ioremap: - clk_disable_unprepare(mxc_rng->clk); - return err; } -static void __exit mxc_rnga_remove(struct platform_device *pdev) +static void mxc_rnga_remove(struct platform_device *pdev) { struct mxc_rng *mxc_rng = platform_get_drvdata(pdev); hwrng_unregister(&mxc_rng->rng); - - clk_disable_unprepare(mxc_rng->clk); } static const struct of_device_id mxc_rnga_of_match[] = { @@ -197,10 +187,11 @@ static struct platform_driver mxc_rnga_driver = { .name = "mxc_rnga", .of_match_table = mxc_rnga_of_match, }, - .remove_new = __exit_p(mxc_rnga_remove), + .probe = mxc_rnga_probe, + .remove = mxc_rnga_remove, }; -module_platform_driver_probe(mxc_rnga_driver, mxc_rnga_probe); +module_platform_driver(mxc_rnga_driver); MODULE_AUTHOR("Freescale Semiconductor, Inc."); MODULE_DESCRIPTION("H/W RNGA driver for i.MX"); diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c index 1b49e3a86d57..ea6d5599242f 100644 --- a/drivers/char/hw_random/n2-drv.c +++ b/drivers/char/hw_random/n2-drv.c @@ -858,7 +858,7 @@ static struct platform_driver n2rng_driver = { .of_match_table = n2rng_match, }, .probe = n2rng_probe, - .remove_new = n2rng_remove, + .remove = n2rng_remove, }; module_platform_driver(n2rng_driver); diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c index a2009fc4ad3c..f2a2aa7a531c 100644 --- a/drivers/char/hw_random/nomadik-rng.c +++ b/drivers/char/hw_random/nomadik-rng.c @@ -78,7 +78,6 @@ MODULE_DEVICE_TABLE(amba, nmk_rng_ids); static struct amba_driver nmk_rng_driver = { .drv = { - .owner = THIS_MODULE, .name = "rng", }, .probe = nmk_rng_probe, diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c index bce8c4829a1f..3e308c890bd2 100644 --- a/drivers/char/hw_random/npcm-rng.c +++ b/drivers/char/hw_random/npcm-rng.c @@ -32,6 +32,7 @@ struct npcm_rng { void __iomem *base; struct hwrng rng; + struct device *dev; u32 clkp; }; @@ -57,7 +58,7 @@ static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) int retval = 0; int ready; - pm_runtime_get_sync((struct device *)priv->rng.priv); + pm_runtime_get_sync(priv->dev); while (max) { if (wait) { @@ -79,8 +80,8 @@ static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) max--; } - pm_runtime_mark_last_busy((struct device *)priv->rng.priv); - pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv); + pm_runtime_mark_last_busy(priv->dev); + pm_runtime_put_sync_autosuspend(priv->dev); return retval || !wait ? retval : -EIO; } @@ -109,7 +110,7 @@ static int npcm_rng_probe(struct platform_device *pdev) #endif priv->rng.name = pdev->name; priv->rng.read = npcm_rng_read; - priv->rng.priv = (unsigned long)&pdev->dev; + priv->dev = &pdev->dev; priv->clkp = (u32)(uintptr_t)of_device_get_match_data(&pdev->dev); writel(NPCM_RNG_M1ROSEL, priv->base + NPCM_RNGMODE_REG); @@ -176,7 +177,7 @@ static struct platform_driver npcm_rng_driver = { .of_match_table = of_match_ptr(rng_dt_id), }, .probe = npcm_rng_probe, - .remove_new = npcm_rng_remove, + .remove = npcm_rng_remove, }; module_platform_driver(npcm_rng_driver); diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index d4c02e900466..5e8b50f15db7 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -558,10 +558,11 @@ static struct platform_driver omap_rng_driver = { .of_match_table = of_match_ptr(omap_rng_of_match), }, .probe = omap_rng_probe, - .remove_new = omap_rng_remove, + .remove = omap_rng_remove, }; module_platform_driver(omap_rng_driver); MODULE_ALIAS("platform:omap_rng"); MODULE_AUTHOR("Deepak Saxena (and others)"); +MODULE_DESCRIPTION("RNG driver for TI OMAP CPU family"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c index 18dc46b1b58e..8064c792caf0 100644 --- a/drivers/char/hw_random/omap3-rom-rng.c +++ b/drivers/char/hw_random/omap3-rom-rng.c @@ -178,4 +178,5 @@ module_platform_driver(omap3_rom_rng_driver); MODULE_ALIAS("platform:omap3-rom-rng"); MODULE_AUTHOR("Juha Yrjola"); MODULE_AUTHOR("Pali Rohár <pali@kernel.org>"); +MODULE_DESCRIPTION("RNG driver for TI OMAP3 CPU family"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/hw_random/rockchip-rng.c b/drivers/char/hw_random/rockchip-rng.c new file mode 100644 index 000000000000..fb4a30b95507 --- /dev/null +++ b/drivers/char/hw_random/rockchip-rng.c @@ -0,0 +1,495 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * rockchip-rng.c True Random Number Generator driver for Rockchip SoCs + * + * Copyright (c) 2018, Fuzhou Rockchip Electronics Co., Ltd. + * Copyright (c) 2022, Aurelien Jarno + * Copyright (c) 2025, Collabora Ltd. + * Authors: + * Lin Jinhan <troy.lin@rock-chips.com> + * Aurelien Jarno <aurelien@aurel32.net> + * Nicolas Frattaroli <nicolas.frattaroli@collabora.com> + */ +#include <linux/clk.h> +#include <linux/hw_random.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> +#include <linux/slab.h> + +#define RK_RNG_AUTOSUSPEND_DELAY 100 +#define RK_RNG_MAX_BYTE 32 +#define RK_RNG_POLL_PERIOD_US 100 +#define RK_RNG_POLL_TIMEOUT_US 10000 + +/* + * TRNG collects osc ring output bit every RK_RNG_SAMPLE_CNT time. The value is + * a tradeoff between speed and quality and has been adjusted to get a quality + * of ~900 (~87.5% of FIPS 140-2 successes). + */ +#define RK_RNG_SAMPLE_CNT 1000 + +/* after how many bytes of output TRNGv1 implementations should be reseeded */ +#define RK_TRNG_V1_AUTO_RESEED_CNT 16000 + +/* TRNG registers from RK3568 TRM-Part2, section 5.4.1 */ +#define TRNG_RST_CTL 0x0004 +#define TRNG_RNG_CTL 0x0400 +#define TRNG_RNG_CTL_LEN_64_BIT (0x00 << 4) +#define TRNG_RNG_CTL_LEN_128_BIT (0x01 << 4) +#define TRNG_RNG_CTL_LEN_192_BIT (0x02 << 4) +#define TRNG_RNG_CTL_LEN_256_BIT (0x03 << 4) +#define TRNG_RNG_CTL_OSC_RING_SPEED_0 (0x00 << 2) +#define TRNG_RNG_CTL_OSC_RING_SPEED_1 (0x01 << 2) +#define TRNG_RNG_CTL_OSC_RING_SPEED_2 (0x02 << 2) +#define TRNG_RNG_CTL_OSC_RING_SPEED_3 (0x03 << 2) +#define TRNG_RNG_CTL_MASK GENMASK(15, 0) +#define TRNG_RNG_CTL_ENABLE BIT(1) +#define TRNG_RNG_CTL_START BIT(0) +#define TRNG_RNG_SAMPLE_CNT 0x0404 +#define TRNG_RNG_DOUT 0x0410 + +/* + * TRNG V1 register definitions + * The TRNG V1 IP is a stand-alone TRNG implementation (not part of a crypto IP) + * and can be found in the Rockchip RK3588 SoC + */ +#define TRNG_V1_CTRL 0x0000 +#define TRNG_V1_CTRL_NOP 0x00 +#define TRNG_V1_CTRL_RAND 0x01 +#define TRNG_V1_CTRL_SEED 0x02 + +#define TRNG_V1_STAT 0x0004 +#define TRNG_V1_STAT_SEEDED BIT(9) +#define TRNG_V1_STAT_GENERATING BIT(30) +#define TRNG_V1_STAT_RESEEDING BIT(31) + +#define TRNG_V1_MODE 0x0008 +#define TRNG_V1_MODE_128_BIT (0x00 << 3) +#define TRNG_V1_MODE_256_BIT (0x01 << 3) + +/* Interrupt Enable register; unused because polling is faster */ +#define TRNG_V1_IE 0x0010 +#define TRNG_V1_IE_GLBL_EN BIT(31) +#define TRNG_V1_IE_SEED_DONE_EN BIT(1) +#define TRNG_V1_IE_RAND_RDY_EN BIT(0) + +#define TRNG_V1_ISTAT 0x0014 +#define TRNG_V1_ISTAT_RAND_RDY BIT(0) + +/* RAND0 ~ RAND7 */ +#define TRNG_V1_RAND0 0x0020 +#define TRNG_V1_RAND7 0x003C + +/* Auto Reseed Register */ +#define TRNG_V1_AUTO_RQSTS 0x0060 + +#define TRNG_V1_VERSION 0x00F0 +#define TRNG_v1_VERSION_CODE 0x46bc +/* end of TRNG_V1 register definitions */ + +/* + * RKRNG register definitions + * The RKRNG IP is a stand-alone TRNG implementation (not part of a crypto IP) + * and can be found in the Rockchip RK3576, Rockchip RK3562 and Rockchip RK3528 + * SoCs. It can either output true randomness (TRNG) or "deterministic" + * randomness derived from hashing the true entropy (DRNG). This driver + * implementation uses just the true entropy, and leaves stretching the entropy + * up to Linux. + */ +#define RKRNG_CFG 0x0000 +#define RKRNG_CTRL 0x0010 +#define RKRNG_CTRL_REQ_TRNG BIT(4) +#define RKRNG_STATE 0x0014 +#define RKRNG_STATE_TRNG_RDY BIT(4) +#define RKRNG_TRNG_DATA0 0x0050 +#define RKRNG_TRNG_DATA1 0x0054 +#define RKRNG_TRNG_DATA2 0x0058 +#define RKRNG_TRNG_DATA3 0x005C +#define RKRNG_TRNG_DATA4 0x0060 +#define RKRNG_TRNG_DATA5 0x0064 +#define RKRNG_TRNG_DATA6 0x0068 +#define RKRNG_TRNG_DATA7 0x006C +#define RKRNG_READ_LEN 32 + +/* Before removing this assert, give rk3588_rng_read an upper bound of 32 */ +static_assert(RK_RNG_MAX_BYTE <= (TRNG_V1_RAND7 + 4 - TRNG_V1_RAND0), + "You raised RK_RNG_MAX_BYTE and broke rk3588-rng, congrats."); + +struct rk_rng { + struct hwrng rng; + void __iomem *base; + int clk_num; + struct clk_bulk_data *clk_bulks; + const struct rk_rng_soc_data *soc_data; + struct device *dev; +}; + +struct rk_rng_soc_data { + int (*rk_rng_init)(struct hwrng *rng); + int (*rk_rng_read)(struct hwrng *rng, void *buf, size_t max, bool wait); + void (*rk_rng_cleanup)(struct hwrng *rng); + unsigned short quality; + bool reset_optional; +}; + +/* The mask in the upper 16 bits determines the bits that are updated */ +static void rk_rng_write_ctl(struct rk_rng *rng, u32 val, u32 mask) +{ + writel((mask << 16) | val, rng->base + TRNG_RNG_CTL); +} + +static inline void rk_rng_writel(struct rk_rng *rng, u32 val, u32 offset) +{ + writel(val, rng->base + offset); +} + +static inline u32 rk_rng_readl(struct rk_rng *rng, u32 offset) +{ + return readl(rng->base + offset); +} + +static int rk_rng_enable_clks(struct rk_rng *rk_rng) +{ + int ret; + /* start clocks */ + ret = clk_bulk_prepare_enable(rk_rng->clk_num, rk_rng->clk_bulks); + if (ret < 0) { + dev_err(rk_rng->dev, "Failed to enable clocks: %d\n", ret); + return ret; + } + + return 0; +} + +static int rk3568_rng_init(struct hwrng *rng) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + int ret; + + ret = rk_rng_enable_clks(rk_rng); + if (ret < 0) + return ret; + + /* set the sample period */ + writel(RK_RNG_SAMPLE_CNT, rk_rng->base + TRNG_RNG_SAMPLE_CNT); + + /* set osc ring speed and enable it */ + rk_rng_write_ctl(rk_rng, TRNG_RNG_CTL_LEN_256_BIT | + TRNG_RNG_CTL_OSC_RING_SPEED_0 | + TRNG_RNG_CTL_ENABLE, + TRNG_RNG_CTL_MASK); + + return 0; +} + +static void rk3568_rng_cleanup(struct hwrng *rng) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + + /* stop TRNG */ + rk_rng_write_ctl(rk_rng, 0, TRNG_RNG_CTL_MASK); + + /* stop clocks */ + clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks); +} + +static int rk3568_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + size_t to_read = min_t(size_t, max, RK_RNG_MAX_BYTE); + u32 reg; + int ret = 0; + + ret = pm_runtime_resume_and_get(rk_rng->dev); + if (ret < 0) + return ret; + + /* Start collecting random data */ + rk_rng_write_ctl(rk_rng, TRNG_RNG_CTL_START, TRNG_RNG_CTL_START); + + ret = readl_poll_timeout(rk_rng->base + TRNG_RNG_CTL, reg, + !(reg & TRNG_RNG_CTL_START), + RK_RNG_POLL_PERIOD_US, + RK_RNG_POLL_TIMEOUT_US); + if (ret < 0) + goto out; + + /* Read random data stored in the registers */ + memcpy_fromio(buf, rk_rng->base + TRNG_RNG_DOUT, to_read); +out: + pm_runtime_mark_last_busy(rk_rng->dev); + pm_runtime_put_sync_autosuspend(rk_rng->dev); + + return (ret < 0) ? ret : to_read; +} + +static int rk3576_rng_init(struct hwrng *rng) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + + return rk_rng_enable_clks(rk_rng); +} + +static int rk3576_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + size_t to_read = min_t(size_t, max, RKRNG_READ_LEN); + int ret = 0; + u32 val; + + ret = pm_runtime_resume_and_get(rk_rng->dev); + if (ret < 0) + return ret; + + rk_rng_writel(rk_rng, RKRNG_CTRL_REQ_TRNG | (RKRNG_CTRL_REQ_TRNG << 16), + RKRNG_CTRL); + + if (readl_poll_timeout(rk_rng->base + RKRNG_STATE, val, + (val & RKRNG_STATE_TRNG_RDY), RK_RNG_POLL_PERIOD_US, + RK_RNG_POLL_TIMEOUT_US)) { + dev_err(rk_rng->dev, "timed out waiting for data\n"); + ret = -ETIMEDOUT; + goto out; + } + + rk_rng_writel(rk_rng, RKRNG_STATE_TRNG_RDY, RKRNG_STATE); + + memcpy_fromio(buf, rk_rng->base + RKRNG_TRNG_DATA0, to_read); + +out: + pm_runtime_mark_last_busy(rk_rng->dev); + pm_runtime_put_sync_autosuspend(rk_rng->dev); + + return (ret < 0) ? ret : to_read; +} + +static int rk3588_rng_init(struct hwrng *rng) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + u32 version, status, mask, istat; + int ret; + + ret = rk_rng_enable_clks(rk_rng); + if (ret < 0) + return ret; + + version = rk_rng_readl(rk_rng, TRNG_V1_VERSION); + if (version != TRNG_v1_VERSION_CODE) { + dev_err(rk_rng->dev, + "wrong trng version, expected = %08x, actual = %08x\n", + TRNG_V1_VERSION, version); + ret = -EFAULT; + goto err_disable_clk; + } + + mask = TRNG_V1_STAT_SEEDED | TRNG_V1_STAT_GENERATING | + TRNG_V1_STAT_RESEEDING; + if (readl_poll_timeout(rk_rng->base + TRNG_V1_STAT, status, + (status & mask) == TRNG_V1_STAT_SEEDED, + RK_RNG_POLL_PERIOD_US, RK_RNG_POLL_TIMEOUT_US) < 0) { + dev_err(rk_rng->dev, "timed out waiting for hwrng to reseed\n"); + ret = -ETIMEDOUT; + goto err_disable_clk; + } + + /* + * clear ISTAT flag, downstream advises to do this to avoid + * auto-reseeding "on power on" + */ + istat = rk_rng_readl(rk_rng, TRNG_V1_ISTAT); + rk_rng_writel(rk_rng, istat, TRNG_V1_ISTAT); + + /* auto reseed after RK_TRNG_V1_AUTO_RESEED_CNT bytes */ + rk_rng_writel(rk_rng, RK_TRNG_V1_AUTO_RESEED_CNT / 16, TRNG_V1_AUTO_RQSTS); + + return 0; +err_disable_clk: + clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks); + return ret; +} + +static void rk3588_rng_cleanup(struct hwrng *rng) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + + clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks); +} + +static int rk3588_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + size_t to_read = min_t(size_t, max, RK_RNG_MAX_BYTE); + int ret = 0; + u32 reg; + + ret = pm_runtime_resume_and_get(rk_rng->dev); + if (ret < 0) + return ret; + + /* Clear ISTAT, even without interrupts enabled, this will be updated */ + reg = rk_rng_readl(rk_rng, TRNG_V1_ISTAT); + rk_rng_writel(rk_rng, reg, TRNG_V1_ISTAT); + + /* generate 256 bits of random data */ + rk_rng_writel(rk_rng, TRNG_V1_MODE_256_BIT, TRNG_V1_MODE); + rk_rng_writel(rk_rng, TRNG_V1_CTRL_RAND, TRNG_V1_CTRL); + + ret = readl_poll_timeout_atomic(rk_rng->base + TRNG_V1_ISTAT, reg, + (reg & TRNG_V1_ISTAT_RAND_RDY), 0, + RK_RNG_POLL_TIMEOUT_US); + if (ret < 0) + goto out; + + /* Read random data that's in registers TRNG_V1_RAND0 through RAND7 */ + memcpy_fromio(buf, rk_rng->base + TRNG_V1_RAND0, to_read); + +out: + /* Clear ISTAT */ + rk_rng_writel(rk_rng, reg, TRNG_V1_ISTAT); + /* close the TRNG */ + rk_rng_writel(rk_rng, TRNG_V1_CTRL_NOP, TRNG_V1_CTRL); + + pm_runtime_mark_last_busy(rk_rng->dev); + pm_runtime_put_sync_autosuspend(rk_rng->dev); + + return (ret < 0) ? ret : to_read; +} + +static const struct rk_rng_soc_data rk3568_soc_data = { + .rk_rng_init = rk3568_rng_init, + .rk_rng_read = rk3568_rng_read, + .rk_rng_cleanup = rk3568_rng_cleanup, + .quality = 900, + .reset_optional = false, +}; + +static const struct rk_rng_soc_data rk3576_soc_data = { + .rk_rng_init = rk3576_rng_init, + .rk_rng_read = rk3576_rng_read, + .rk_rng_cleanup = rk3588_rng_cleanup, + .quality = 999, /* as determined by actual testing */ + .reset_optional = true, +}; + +static const struct rk_rng_soc_data rk3588_soc_data = { + .rk_rng_init = rk3588_rng_init, + .rk_rng_read = rk3588_rng_read, + .rk_rng_cleanup = rk3588_rng_cleanup, + .quality = 999, /* as determined by actual testing */ + .reset_optional = true, +}; + +static int rk_rng_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct reset_control *rst; + struct rk_rng *rk_rng; + int ret; + + rk_rng = devm_kzalloc(dev, sizeof(*rk_rng), GFP_KERNEL); + if (!rk_rng) + return -ENOMEM; + + rk_rng->soc_data = of_device_get_match_data(dev); + rk_rng->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(rk_rng->base)) + return PTR_ERR(rk_rng->base); + + rk_rng->clk_num = devm_clk_bulk_get_all(dev, &rk_rng->clk_bulks); + if (rk_rng->clk_num < 0) + return dev_err_probe(dev, rk_rng->clk_num, + "Failed to get clks property\n"); + + if (rk_rng->soc_data->reset_optional) + rst = devm_reset_control_array_get_optional_exclusive(dev); + else + rst = devm_reset_control_array_get_exclusive(dev); + + if (rst) { + if (IS_ERR(rst)) + return dev_err_probe(dev, PTR_ERR(rst), "Failed to get reset property\n"); + + reset_control_assert(rst); + udelay(2); + reset_control_deassert(rst); + } + + platform_set_drvdata(pdev, rk_rng); + + rk_rng->rng.name = dev_driver_string(dev); + if (!IS_ENABLED(CONFIG_PM)) { + rk_rng->rng.init = rk_rng->soc_data->rk_rng_init; + rk_rng->rng.cleanup = rk_rng->soc_data->rk_rng_cleanup; + } + rk_rng->rng.read = rk_rng->soc_data->rk_rng_read; + rk_rng->dev = dev; + rk_rng->rng.quality = rk_rng->soc_data->quality; + + pm_runtime_set_autosuspend_delay(dev, RK_RNG_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(dev); + ret = devm_pm_runtime_enable(dev); + if (ret) + return dev_err_probe(dev, ret, "Runtime pm activation failed.\n"); + + ret = devm_hwrng_register(dev, &rk_rng->rng); + if (ret) + return dev_err_probe(dev, ret, "Failed to register Rockchip hwrng\n"); + + return 0; +} + +static int __maybe_unused rk_rng_runtime_suspend(struct device *dev) +{ + struct rk_rng *rk_rng = dev_get_drvdata(dev); + + rk_rng->soc_data->rk_rng_cleanup(&rk_rng->rng); + + return 0; +} + +static int __maybe_unused rk_rng_runtime_resume(struct device *dev) +{ + struct rk_rng *rk_rng = dev_get_drvdata(dev); + + return rk_rng->soc_data->rk_rng_init(&rk_rng->rng); +} + +static const struct dev_pm_ops rk_rng_pm_ops = { + SET_RUNTIME_PM_OPS(rk_rng_runtime_suspend, + rk_rng_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) +}; + +static const struct of_device_id rk_rng_dt_match[] = { + { .compatible = "rockchip,rk3568-rng", .data = (void *)&rk3568_soc_data }, + { .compatible = "rockchip,rk3576-rng", .data = (void *)&rk3576_soc_data }, + { .compatible = "rockchip,rk3588-rng", .data = (void *)&rk3588_soc_data }, + { /* sentinel */ }, +}; + +MODULE_DEVICE_TABLE(of, rk_rng_dt_match); + +static struct platform_driver rk_rng_driver = { + .driver = { + .name = "rockchip-rng", + .pm = &rk_rng_pm_ops, + .of_match_table = rk_rng_dt_match, + }, + .probe = rk_rng_probe, +}; + +module_platform_driver(rk_rng_driver); + +MODULE_DESCRIPTION("Rockchip True Random Number Generator driver"); +MODULE_AUTHOR("Lin Jinhan <troy.lin@rock-chips.com>"); +MODULE_AUTHOR("Aurelien Jarno <aurelien@aurel32.net>"); +MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>"); +MODULE_AUTHOR("Nicolas Frattaroli <nicolas.frattaroli@collabora.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c index 379bc245c520..98edbe796bc5 100644 --- a/drivers/char/hw_random/stm32-rng.c +++ b/drivers/char/hw_random/stm32-rng.c @@ -4,6 +4,7 @@ */ #include <linux/clk.h> +#include <linux/clk-provider.h> #include <linux/delay.h> #include <linux/hw_random.h> #include <linux/io.h> @@ -49,6 +50,7 @@ struct stm32_rng_data { uint max_clock_rate; + uint nb_clock; u32 cr; u32 nscr; u32 htcr; @@ -70,8 +72,9 @@ struct stm32_rng_config { struct stm32_rng_private { struct hwrng rng; + struct device *dev; void __iomem *base; - struct clk *clk; + struct clk_bulk_data *clk_bulk; struct reset_control *rst; struct stm32_rng_config pm_conf; const struct stm32_rng_data *data; @@ -99,7 +102,7 @@ struct stm32_rng_private { */ static int stm32_rng_conceal_seed_error_cond_reset(struct stm32_rng_private *priv) { - struct device *dev = (struct device *)priv->rng.priv; + struct device *dev = priv->dev; u32 sr = readl_relaxed(priv->base + RNG_SR); u32 cr = readl_relaxed(priv->base + RNG_CR); int err; @@ -171,7 +174,7 @@ static int stm32_rng_conceal_seed_error(struct hwrng *rng) { struct stm32_rng_private *priv = container_of(rng, struct stm32_rng_private, rng); - dev_dbg((struct device *)priv->rng.priv, "Concealing seed error\n"); + dev_dbg(priv->dev, "Concealing seed error\n"); if (priv->data->has_cond_reset) return stm32_rng_conceal_seed_error_cond_reset(priv); @@ -187,7 +190,9 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) int retval = 0, err = 0; u32 sr; - pm_runtime_get_sync((struct device *) priv->rng.priv); + retval = pm_runtime_resume_and_get(priv->dev); + if (retval) + return retval; if (readl_relaxed(priv->base + RNG_SR) & RNG_SR_SEIS) stm32_rng_conceal_seed_error(rng); @@ -204,8 +209,7 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) sr, sr, 10, 50000); if (err) { - dev_err((struct device *)priv->rng.priv, - "%s: timeout %x!\n", __func__, sr); + dev_err(priv->dev, "%s: timeout %x!\n", __func__, sr); break; } } else if (!sr) { @@ -218,9 +222,9 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) err = stm32_rng_conceal_seed_error(rng); i++; if (err && i > RNG_NB_RECOVER_TRIES) { - dev_err((struct device *)priv->rng.priv, - "Couldn't recover from seed error\n"); - return -ENOTRECOVERABLE; + dev_err(priv->dev, "Couldn't recover from seed error\n"); + retval = -ENOTRECOVERABLE; + goto exit_rpm; } continue; @@ -236,9 +240,9 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) err = stm32_rng_conceal_seed_error(rng); i++; if (err && i > RNG_NB_RECOVER_TRIES) { - dev_err((struct device *)priv->rng.priv, - "Couldn't recover from seed error"); - return -ENOTRECOVERABLE; + dev_err(priv->dev, "Couldn't recover from seed error"); + retval = -ENOTRECOVERABLE; + goto exit_rpm; } continue; @@ -250,8 +254,9 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) max -= sizeof(u32); } - pm_runtime_mark_last_busy((struct device *) priv->rng.priv); - pm_runtime_put_sync_autosuspend((struct device *) priv->rng.priv); +exit_rpm: + pm_runtime_mark_last_busy(priv->dev); + pm_runtime_put_sync_autosuspend(priv->dev); return retval || !wait ? retval : -EIO; } @@ -263,7 +268,7 @@ static uint stm32_rng_clock_freq_restrain(struct hwrng *rng) unsigned long clock_rate = 0; uint clock_div = 0; - clock_rate = clk_get_rate(priv->clk); + clock_rate = clk_get_rate(priv->clk_bulk[0].clk); /* * Get the exponent to apply on the CLKDIV field in RNG_CR register @@ -273,7 +278,7 @@ static uint stm32_rng_clock_freq_restrain(struct hwrng *rng) while ((clock_rate >> clock_div) > priv->data->max_clock_rate) clock_div++; - pr_debug("RNG clk rate : %lu\n", clk_get_rate(priv->clk) >> clock_div); + pr_debug("RNG clk rate : %lu\n", clk_get_rate(priv->clk_bulk[0].clk) >> clock_div); return clock_div; } @@ -285,7 +290,7 @@ static int stm32_rng_init(struct hwrng *rng) int err; u32 reg; - err = clk_prepare_enable(priv->clk); + err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk); if (err) return err; @@ -325,9 +330,8 @@ static int stm32_rng_init(struct hwrng *rng) (!(reg & RNG_CR_CONDRST)), 10, 50000); if (err) { - clk_disable_unprepare(priv->clk); - dev_err((struct device *)priv->rng.priv, - "%s: timeout %x!\n", __func__, reg); + clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk); + dev_err(priv->dev, "%s: timeout %x!\n", __func__, reg); return -EINVAL; } } else { @@ -353,13 +357,15 @@ static int stm32_rng_init(struct hwrng *rng) err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_SR, reg, reg & RNG_SR_DRDY, 10, 100000); - if (err | (reg & ~RNG_SR_DRDY)) { - clk_disable_unprepare(priv->clk); - dev_err((struct device *)priv->rng.priv, - "%s: timeout:%x SR: %x!\n", __func__, err, reg); + if (err || (reg & ~RNG_SR_DRDY)) { + clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk); + dev_err(priv->dev, "%s: timeout:%x SR: %x!\n", __func__, err, reg); + return -EINVAL; } + clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk); + return 0; } @@ -376,7 +382,8 @@ static int __maybe_unused stm32_rng_runtime_suspend(struct device *dev) reg = readl_relaxed(priv->base + RNG_CR); reg &= ~RNG_CR_RNGEN; writel_relaxed(reg, priv->base + RNG_CR); - clk_disable_unprepare(priv->clk); + + clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk); return 0; } @@ -384,6 +391,11 @@ static int __maybe_unused stm32_rng_runtime_suspend(struct device *dev) static int __maybe_unused stm32_rng_suspend(struct device *dev) { struct stm32_rng_private *priv = dev_get_drvdata(dev); + int err; + + err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk); + if (err) + return err; if (priv->data->has_cond_reset) { priv->pm_conf.nscr = readl_relaxed(priv->base + RNG_NSCR); @@ -395,7 +407,7 @@ static int __maybe_unused stm32_rng_suspend(struct device *dev) writel_relaxed(priv->pm_conf.cr, priv->base + RNG_CR); - clk_disable_unprepare(priv->clk); + clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk); return 0; } @@ -406,7 +418,7 @@ static int __maybe_unused stm32_rng_runtime_resume(struct device *dev) int err; u32 reg; - err = clk_prepare_enable(priv->clk); + err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk); if (err) return err; @@ -426,7 +438,7 @@ static int __maybe_unused stm32_rng_resume(struct device *dev) int err; u32 reg; - err = clk_prepare_enable(priv->clk); + err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk); if (err) return err; @@ -454,9 +466,8 @@ static int __maybe_unused stm32_rng_resume(struct device *dev) reg & ~RNG_CR_CONDRST, 10, 100000); if (err) { - clk_disable_unprepare(priv->clk); - dev_err((struct device *)priv->rng.priv, - "%s: timeout:%x CR: %x!\n", __func__, err, reg); + clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk); + dev_err(priv->dev, "%s: timeout:%x CR: %x!\n", __func__, err, reg); return -EINVAL; } } else { @@ -465,6 +476,8 @@ static int __maybe_unused stm32_rng_resume(struct device *dev) writel_relaxed(reg, priv->base + RNG_CR); } + clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk); + return 0; } @@ -475,9 +488,19 @@ static const struct dev_pm_ops __maybe_unused stm32_rng_pm_ops = { stm32_rng_resume) }; +static const struct stm32_rng_data stm32mp25_rng_data = { + .has_cond_reset = true, + .max_clock_rate = 48000000, + .nb_clock = 2, + .cr = 0x00F00D00, + .nscr = 0x2B5BB, + .htcr = 0x969D, +}; + static const struct stm32_rng_data stm32mp13_rng_data = { .has_cond_reset = true, .max_clock_rate = 48000000, + .nb_clock = 1, .cr = 0x00F00D00, .nscr = 0x2B5BB, .htcr = 0x969D, @@ -485,11 +508,16 @@ static const struct stm32_rng_data stm32mp13_rng_data = { static const struct stm32_rng_data stm32_rng_data = { .has_cond_reset = false, - .max_clock_rate = 3000000, + .max_clock_rate = 48000000, + .nb_clock = 1, }; static const struct of_device_id stm32_rng_match[] = { { + .compatible = "st,stm32mp25-rng", + .data = &stm32mp25_rng_data, + }, + { .compatible = "st,stm32mp13-rng", .data = &stm32mp13_rng_data, }, @@ -507,8 +535,9 @@ static int stm32_rng_probe(struct platform_device *ofdev) struct device_node *np = ofdev->dev.of_node; struct stm32_rng_private *priv; struct resource *res; + int ret; - priv = devm_kzalloc(dev, sizeof(struct stm32_rng_private), GFP_KERNEL); + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; @@ -516,10 +545,6 @@ static int stm32_rng_probe(struct platform_device *ofdev) if (IS_ERR(priv->base)) return PTR_ERR(priv->base); - priv->clk = devm_clk_get(&ofdev->dev, NULL); - if (IS_ERR(priv->clk)) - return PTR_ERR(priv->clk); - priv->rst = devm_reset_control_get(&ofdev->dev, NULL); if (!IS_ERR(priv->rst)) { reset_control_assert(priv->rst); @@ -529,6 +554,7 @@ static int stm32_rng_probe(struct platform_device *ofdev) priv->ced = of_property_read_bool(np, "clock-error-detect"); priv->lock_conf = of_property_read_bool(np, "st,rng-lock-conf"); + priv->dev = dev; priv->data = of_device_get_match_data(dev); if (!priv->data) @@ -539,9 +565,30 @@ static int stm32_rng_probe(struct platform_device *ofdev) priv->rng.name = dev_driver_string(dev); priv->rng.init = stm32_rng_init; priv->rng.read = stm32_rng_read; - priv->rng.priv = (unsigned long) dev; priv->rng.quality = 900; + if (!priv->data->nb_clock || priv->data->nb_clock > 2) + return -EINVAL; + + ret = devm_clk_bulk_get_all(dev, &priv->clk_bulk); + if (ret != priv->data->nb_clock) + return dev_err_probe(dev, -EINVAL, "Failed to get clocks: %d\n", ret); + + if (priv->data->nb_clock == 2) { + const char *id = priv->clk_bulk[1].id; + struct clk *clk = priv->clk_bulk[1].clk; + + if (!priv->clk_bulk[0].id || !priv->clk_bulk[1].id) + return dev_err_probe(dev, -EINVAL, "Missing clock name\n"); + + if (strcmp(priv->clk_bulk[0].id, "core")) { + priv->clk_bulk[1].id = priv->clk_bulk[0].id; + priv->clk_bulk[1].clk = priv->clk_bulk[0].clk; + priv->clk_bulk[0].id = id; + priv->clk_bulk[0].clk = clk; + } + } + pm_runtime_set_autosuspend_delay(dev, 100); pm_runtime_use_autosuspend(dev); pm_runtime_enable(dev); @@ -556,7 +603,7 @@ static struct platform_driver stm32_rng_driver = { .of_match_table = stm32_rng_match, }, .probe = stm32_rng_probe, - .remove_new = stm32_rng_remove, + .remove = stm32_rng_remove, }; module_platform_driver(stm32_rng_driver); diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c index 65b8260339f5..b95f6d0f17ed 100644 --- a/drivers/char/hw_random/timeriomem-rng.c +++ b/drivers/char/hw_random/timeriomem-rng.c @@ -152,8 +152,7 @@ static int timeriomem_rng_probe(struct platform_device *pdev) priv->period = ns_to_ktime(period * NSEC_PER_USEC); init_completion(&priv->completion); - hrtimer_init(&priv->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - priv->timer.function = timeriomem_rng_trigger; + hrtimer_setup(&priv->timer, timeriomem_rng_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); priv->rng_ops.name = dev_name(&pdev->dev); priv->rng_ops.read = timeriomem_rng_read; @@ -193,7 +192,7 @@ static struct platform_driver timeriomem_rng_driver = { .of_match_table = timeriomem_rng_match, }, .probe = timeriomem_rng_probe, - .remove_new = timeriomem_rng_remove, + .remove = timeriomem_rng_remove, }; module_platform_driver(timeriomem_rng_driver); diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 7a4b45393acb..dd998f4fe4f2 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -245,7 +245,6 @@ static const struct virtio_device_id id_table[] = { static struct virtio_driver virtio_rng_driver = { .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtrng_probe, .remove = virtrng_remove, diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c index 642d13519464..a1a751074f7e 100644 --- a/drivers/char/hw_random/xgene-rng.c +++ b/drivers/char/hw_random/xgene-rng.c @@ -93,7 +93,7 @@ static void xgene_rng_expired_timer(struct timer_list *t) /* Clear failure counter as timer expired */ disable_irq(ctx->irq); ctx->failure_cnt = 0; - del_timer(&ctx->failure_timer); + timer_delete(&ctx->failure_timer); enable_irq(ctx->irq); } @@ -375,7 +375,7 @@ MODULE_DEVICE_TABLE(of, xgene_rng_of_match); static struct platform_driver xgene_rng_driver = { .probe = xgene_rng_probe, - .remove_new = xgene_rng_remove, + .remove = xgene_rng_remove, .driver = { .name = "xgene-rng", .of_match_table = xgene_rng_of_match, diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile index cb6138b8ded9..e0944547c9d0 100644 --- a/drivers/char/ipmi/Makefile +++ b/drivers/char/ipmi/Makefile @@ -5,13 +5,10 @@ ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o \ ipmi_si_hotmod.o ipmi_si_hardcode.o ipmi_si_platform.o \ - ipmi_si_port_io.o ipmi_si_mem_io.o -ifdef CONFIG_PCI -ipmi_si-y += ipmi_si_pci.o -endif -ifdef CONFIG_PARISC -ipmi_si-y += ipmi_si_parisc.o -endif + ipmi_si_mem_io.o +ipmi_si-$(CONFIG_HAS_IOPORT) += ipmi_si_port_io.o +ipmi_si-$(CONFIG_PCI) += ipmi_si_pci.o +ipmi_si-$(CONFIG_PARISC) += ipmi_si_parisc.o obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c index 7450904e330a..77146b5c762b 100644 --- a/drivers/char/ipmi/bt-bmc.c +++ b/drivers/char/ipmi/bt-bmc.c @@ -459,14 +459,13 @@ static int bt_bmc_probe(struct platform_device *pdev) return 0; } -static int bt_bmc_remove(struct platform_device *pdev) +static void bt_bmc_remove(struct platform_device *pdev) { struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev); misc_deregister(&bt_bmc->miscdev); if (bt_bmc->irq < 0) - del_timer_sync(&bt_bmc->poll_timer); - return 0; + timer_delete_sync(&bt_bmc->poll_timer); } static const struct of_device_id bt_bmc_match[] = { diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c index 49100845fcb7..ee2bdc7ed0da 100644 --- a/drivers/char/ipmi/ipmb_dev_int.c +++ b/drivers/char/ipmi/ipmb_dev_int.c @@ -321,6 +321,9 @@ static int ipmb_probe(struct i2c_client *client) ipmb_dev->miscdev.name = devm_kasprintf(&client->dev, GFP_KERNEL, "%s%d", "ipmb-", client->adapter->nr); + if (!ipmb_dev->miscdev.name) + return -ENOMEM; + ipmb_dev->miscdev.fops = &ipmb_fops; ipmb_dev->miscdev.parent = &client->dev; ret = misc_register(&ipmb_dev->miscdev); @@ -350,16 +353,18 @@ static void ipmb_remove(struct i2c_client *client) } static const struct i2c_device_id ipmb_id[] = { - { "ipmb-dev", 0 }, - {}, + { "ipmb-dev" }, + {} }; MODULE_DEVICE_TABLE(i2c, ipmb_id); +#ifdef CONFIG_ACPI static const struct acpi_device_id acpi_ipmb_id[] = { { "IPMB0001", 0 }, {}, }; MODULE_DEVICE_TABLE(acpi, acpi_ipmb_id); +#endif static struct i2c_driver ipmb_driver = { .driver = { diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c index 332082e02ea5..e6ba35b71f10 100644 --- a/drivers/char/ipmi/ipmi_devintf.c +++ b/drivers/char/ipmi/ipmi_devintf.c @@ -122,12 +122,9 @@ out: static int ipmi_release(struct inode *inode, struct file *file) { struct ipmi_file_private *priv = file->private_data; - int rv; struct ipmi_recv_msg *msg, *next; - rv = ipmi_destroy_user(priv->user); - if (rv) - return rv; + ipmi_destroy_user(priv->user); list_for_each_entry_safe(msg, next, &priv->recv_msgs, link) ipmi_free_recv_msg(msg); diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c index 4e335832fc26..6a4f279c7c1f 100644 --- a/drivers/char/ipmi/ipmi_ipmb.c +++ b/drivers/char/ipmi/ipmi_ipmb.c @@ -561,8 +561,8 @@ MODULE_DEVICE_TABLE(of, of_ipmi_ipmb_match); #endif static const struct i2c_device_id ipmi_ipmb_id[] = { - { DEVICE_NAME, 0 }, - {}, + { DEVICE_NAME }, + {} }; MODULE_DEVICE_TABLE(i2c, ipmi_ipmb_id); diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index b0eedc4595b3..064944ae9fdc 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -27,7 +27,6 @@ #include <linux/ipmi_smi.h> #include <linux/notifier.h> #include <linux/init.h> -#include <linux/proc_fs.h> #include <linux/rcupdate.h> #include <linux/interrupt.h> #include <linux/moduleparam.h> @@ -41,11 +40,12 @@ static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); static int ipmi_init_msghandler(void); -static void smi_recv_tasklet(struct tasklet_struct *t); +static void smi_work(struct work_struct *t); static void handle_new_recv_msgs(struct ipmi_smi *intf); static void need_waiter(struct ipmi_smi *intf); static int handle_one_recv_msg(struct ipmi_smi *intf, struct ipmi_smi_msg *msg); +static void intf_free(struct kref *ref); static bool initialized; static bool drvregistered; @@ -180,14 +180,8 @@ MODULE_PARM_DESC(max_msgs_per_user, struct ipmi_user { struct list_head link; - /* - * Set to NULL when the user is destroyed, a pointer to myself - * so srcu_dereference can be used on it. - */ - struct ipmi_user *self; - struct srcu_struct release_barrier; - struct kref refcount; + refcount_t destroyed; /* The upper layer that handles receive messages. */ const struct ipmi_user_hndl *handler; @@ -200,30 +194,8 @@ struct ipmi_user { bool gets_events; atomic_t nr_msgs; - - /* Free must run in process context for RCU cleanup. */ - struct work_struct remove_work; }; -static struct workqueue_struct *remove_work_wq; - -static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index) - __acquires(user->release_barrier) -{ - struct ipmi_user *ruser; - - *index = srcu_read_lock(&user->release_barrier); - ruser = srcu_dereference(user->self, &user->release_barrier); - if (!ruser) - srcu_read_unlock(&user->release_barrier, *index); - return ruser; -} - -static void release_ipmi_user(struct ipmi_user *user, int index) -{ - srcu_read_unlock(&user->release_barrier, index); -} - struct cmd_rcvr { struct list_head link; @@ -327,6 +299,8 @@ struct bmc_device { }; #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) +static struct workqueue_struct *bmc_remove_work_wq; + static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, struct ipmi_device_id *id, bool *guid_set, guid_t *guid); @@ -451,11 +425,10 @@ struct ipmi_smi { struct list_head link; /* - * The list of upper layers that are using me. seq_lock write - * protects this. Read protection is with srcu. + * The list of upper layers that are using me. */ struct list_head users; - struct srcu_struct users_srcu; + struct mutex users_mutex; atomic_t nr_users; struct device_attribute nr_users_devattr; struct device_attribute nr_msgs_devattr; @@ -496,15 +469,22 @@ struct ipmi_smi { int curr_seq; /* - * Messages queued for delivery. If delivery fails (out of memory - * for instance), They will stay in here to be processed later in a - * periodic timer interrupt. The tasklet is for handling received - * messages directly from the handler. + * Messages queued for deliver to the user. + */ + struct mutex user_msgs_mutex; + struct list_head user_msgs; + + /* + * Messages queued for processing. If processing fails (out + * of memory for instance), They will stay in here to be + * processed later in a periodic timer interrupt. The + * workqueue is for handling received messages directly from + * the handler. */ spinlock_t waiting_rcv_msgs_lock; struct list_head waiting_rcv_msgs; atomic_t watchdog_pretimeouts_to_deliver; - struct tasklet_struct recv_tasklet; + struct work_struct smi_work; spinlock_t xmit_msgs_lock; struct list_head xmit_msgs; @@ -522,10 +502,9 @@ struct ipmi_smi { * Events that were queues because no one was there to receive * them. */ - spinlock_t events_lock; /* For dealing with event stuff. */ + struct mutex events_mutex; /* For dealing with event stuff. */ struct list_head waiting_events; unsigned int waiting_events_count; /* How many events in queue? */ - char delivering_events; char event_msg_printed; /* How many users are waiting for events? */ @@ -613,6 +592,28 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf, bool guid_set, guid_t *guid, int intf_num); static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id); +static void free_ipmi_user(struct kref *ref) +{ + struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); + struct module *owner; + + owner = user->intf->owner; + kref_put(&user->intf->refcount, intf_free); + module_put(owner); + vfree(user); +} + +static void release_ipmi_user(struct ipmi_user *user) +{ + kref_put(&user->refcount, free_ipmi_user); +} + +static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user) +{ + if (!kref_get_unless_zero(&user->refcount)) + return NULL; + return user; +} /* * The driver model view of the IPMI messaging driver. @@ -630,9 +631,6 @@ static DEFINE_MUTEX(ipmidriver_mutex); static LIST_HEAD(ipmi_interfaces); static DEFINE_MUTEX(ipmi_interfaces_mutex); -#define ipmi_interfaces_mutex_held() \ - lockdep_is_held(&ipmi_interfaces_mutex) -static struct srcu_struct ipmi_interfaces_srcu; /* * List of watchers that want to know when smi's are added and deleted. @@ -698,27 +696,20 @@ static void free_smi_msg_list(struct list_head *q) } } -static void clean_up_interface_data(struct ipmi_smi *intf) +static void intf_free(struct kref *ref) { + struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount); int i; struct cmd_rcvr *rcvr, *rcvr2; - struct list_head list; - - tasklet_kill(&intf->recv_tasklet); free_smi_msg_list(&intf->waiting_rcv_msgs); free_recv_msg_list(&intf->waiting_events); /* * Wholesale remove all the entries from the list in the - * interface and wait for RCU to know that none are in use. + * interface. No need for locks, this is single-threaded. */ - mutex_lock(&intf->cmd_rcvrs_mutex); - INIT_LIST_HEAD(&list); - list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu); - mutex_unlock(&intf->cmd_rcvrs_mutex); - - list_for_each_entry_safe(rcvr, rcvr2, &list, link) + list_for_each_entry_safe(rcvr, rcvr2, &intf->cmd_rcvrs, link) kfree(rcvr); for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { @@ -726,20 +717,17 @@ static void clean_up_interface_data(struct ipmi_smi *intf) && (intf->seq_table[i].recv_msg)) ipmi_free_recv_msg(intf->seq_table[i].recv_msg); } -} - -static void intf_free(struct kref *ref) -{ - struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount); - clean_up_interface_data(intf); kfree(intf); } int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) { struct ipmi_smi *intf; - int index, rv; + unsigned int count = 0, i; + int *interfaces = NULL; + struct device **devices = NULL; + int rv = 0; /* * Make sure the driver is actually initialized, this handles @@ -753,20 +741,53 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) list_add(&watcher->link, &smi_watchers); - index = srcu_read_lock(&ipmi_interfaces_srcu); - list_for_each_entry_rcu(intf, &ipmi_interfaces, link, - lockdep_is_held(&smi_watchers_mutex)) { - int intf_num = READ_ONCE(intf->intf_num); + /* + * Build an array of ipmi interfaces and fill it in, and + * another array of the devices. We can't call the callback + * with ipmi_interfaces_mutex held. smi_watchers_mutex will + * keep things in order for the user. + */ + mutex_lock(&ipmi_interfaces_mutex); + list_for_each_entry(intf, &ipmi_interfaces, link) + count++; + if (count > 0) { + interfaces = kmalloc_array(count, sizeof(*interfaces), + GFP_KERNEL); + if (!interfaces) { + rv = -ENOMEM; + } else { + devices = kmalloc_array(count, sizeof(*devices), + GFP_KERNEL); + if (!devices) { + kfree(interfaces); + interfaces = NULL; + rv = -ENOMEM; + } + } + count = 0; + } + if (interfaces) { + list_for_each_entry(intf, &ipmi_interfaces, link) { + int intf_num = READ_ONCE(intf->intf_num); - if (intf_num == -1) - continue; - watcher->new_smi(intf_num, intf->si_dev); + if (intf_num == -1) + continue; + devices[count] = intf->si_dev; + interfaces[count++] = intf_num; + } + } + mutex_unlock(&ipmi_interfaces_mutex); + + if (interfaces) { + for (i = 0; i < count; i++) + watcher->new_smi(interfaces[i], devices[i]); + kfree(interfaces); + kfree(devices); } - srcu_read_unlock(&ipmi_interfaces_srcu, index); mutex_unlock(&smi_watchers_mutex); - return 0; + return rv; } EXPORT_SYMBOL(ipmi_smi_watcher_register); @@ -779,22 +800,17 @@ int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) } EXPORT_SYMBOL(ipmi_smi_watcher_unregister); -/* - * Must be called with smi_watchers_mutex held. - */ static void call_smi_watchers(int i, struct device *dev) { struct ipmi_smi_watcher *w; - mutex_lock(&smi_watchers_mutex); list_for_each_entry(w, &smi_watchers, link) { if (try_module_get(w->owner)) { w->new_smi(i, dev); module_put(w->owner); } } - mutex_unlock(&smi_watchers_mutex); } static int @@ -941,18 +957,14 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) ipmi_free_recv_msg(msg); atomic_dec(&msg->user->nr_msgs); } else { - int index; - struct ipmi_user *user = acquire_ipmi_user(msg->user, &index); - - if (user) { - atomic_dec(&user->nr_msgs); - user->handler->ipmi_recv_hndl(msg, user->handler_data); - release_ipmi_user(user, index); - } else { - /* User went away, give up. */ - ipmi_free_recv_msg(msg); - rv = -EINVAL; - } + /* + * Deliver it in smi_work. The message will hold a + * refcount to the user. + */ + mutex_lock(&intf->user_msgs_mutex); + list_add_tail(&msg->link, &intf->user_msgs); + mutex_unlock(&intf->user_msgs_mutex); + queue_work(system_wq, &intf->smi_work); } return rv; @@ -1192,23 +1204,14 @@ static int intf_err_seq(struct ipmi_smi *intf, return rv; } -static void free_user_work(struct work_struct *work) -{ - struct ipmi_user *user = container_of(work, struct ipmi_user, - remove_work); - - cleanup_srcu_struct(&user->release_barrier); - vfree(user); -} - int ipmi_create_user(unsigned int if_num, const struct ipmi_user_hndl *handler, void *handler_data, struct ipmi_user **user) { unsigned long flags; - struct ipmi_user *new_user; - int rv, index; + struct ipmi_user *new_user = NULL; + int rv = 0; struct ipmi_smi *intf; /* @@ -1230,30 +1233,31 @@ int ipmi_create_user(unsigned int if_num, if (rv) return rv; - new_user = vzalloc(sizeof(*new_user)); - if (!new_user) - return -ENOMEM; - - index = srcu_read_lock(&ipmi_interfaces_srcu); - list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { + mutex_lock(&ipmi_interfaces_mutex); + list_for_each_entry(intf, &ipmi_interfaces, link) { if (intf->intf_num == if_num) goto found; } /* Not found, return an error */ rv = -EINVAL; - goto out_kfree; + goto out_unlock; found: + if (intf->in_shutdown) { + rv = -ENODEV; + goto out_unlock; + } + if (atomic_add_return(1, &intf->nr_users) > max_users) { rv = -EBUSY; goto out_kfree; } - INIT_WORK(&new_user->remove_work, free_user_work); - - rv = init_srcu_struct(&new_user->release_barrier); - if (rv) + new_user = vzalloc(sizeof(*new_user)); + if (!new_user) { + rv = -ENOMEM; goto out_kfree; + } if (!try_module_get(intf->owner)) { rv = -ENODEV; @@ -1265,64 +1269,58 @@ int ipmi_create_user(unsigned int if_num, atomic_set(&new_user->nr_msgs, 0); kref_init(&new_user->refcount); + refcount_set(&new_user->destroyed, 1); + kref_get(&new_user->refcount); /* Destroy owns a refcount. */ new_user->handler = handler; new_user->handler_data = handler_data; new_user->intf = intf; new_user->gets_events = false; - rcu_assign_pointer(new_user->self, new_user); + mutex_lock(&intf->users_mutex); spin_lock_irqsave(&intf->seq_lock, flags); - list_add_rcu(&new_user->link, &intf->users); + list_add(&new_user->link, &intf->users); spin_unlock_irqrestore(&intf->seq_lock, flags); + mutex_unlock(&intf->users_mutex); + if (handler->ipmi_watchdog_pretimeout) /* User wants pretimeouts, so make sure to watch for them. */ smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); - srcu_read_unlock(&ipmi_interfaces_srcu, index); - *user = new_user; - return 0; out_kfree: - atomic_dec(&intf->nr_users); - srcu_read_unlock(&ipmi_interfaces_srcu, index); - vfree(new_user); + if (rv) { + atomic_dec(&intf->nr_users); + vfree(new_user); + } else { + *user = new_user; + } +out_unlock: + mutex_unlock(&ipmi_interfaces_mutex); return rv; } EXPORT_SYMBOL(ipmi_create_user); int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) { - int rv, index; + int rv = -EINVAL; struct ipmi_smi *intf; - index = srcu_read_lock(&ipmi_interfaces_srcu); - list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { - if (intf->intf_num == if_num) - goto found; + mutex_lock(&ipmi_interfaces_mutex); + list_for_each_entry(intf, &ipmi_interfaces, link) { + if (intf->intf_num == if_num) { + if (!intf->handlers->get_smi_info) + rv = -ENOTTY; + else + rv = intf->handlers->get_smi_info(intf->send_info, data); + break; + } } - srcu_read_unlock(&ipmi_interfaces_srcu, index); - - /* Not found, return an error */ - return -EINVAL; - -found: - if (!intf->handlers->get_smi_info) - rv = -ENOTTY; - else - rv = intf->handlers->get_smi_info(intf->send_info, data); - srcu_read_unlock(&ipmi_interfaces_srcu, index); + mutex_unlock(&ipmi_interfaces_mutex); return rv; } EXPORT_SYMBOL(ipmi_get_smi_info); -static void free_user(struct kref *ref) -{ - struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); - - /* SRCU cleanup must happen in task context. */ - queue_work(remove_work_wq, &user->remove_work); -} - +/* Must be called with intf->users_mutex held. */ static void _ipmi_destroy_user(struct ipmi_user *user) { struct ipmi_smi *intf = user->intf; @@ -1330,21 +1328,10 @@ static void _ipmi_destroy_user(struct ipmi_user *user) unsigned long flags; struct cmd_rcvr *rcvr; struct cmd_rcvr *rcvrs = NULL; - struct module *owner; + struct ipmi_recv_msg *msg, *msg2; - if (!acquire_ipmi_user(user, &i)) { - /* - * The user has already been cleaned up, just make sure - * nothing is using it and return. - */ - synchronize_srcu(&user->release_barrier); + if (!refcount_dec_if_one(&user->destroyed)) return; - } - - rcu_assign_pointer(user->self, NULL); - release_ipmi_user(user, i); - - synchronize_srcu(&user->release_barrier); if (user->handler->shutdown) user->handler->shutdown(user->handler_data); @@ -1355,11 +1342,11 @@ static void _ipmi_destroy_user(struct ipmi_user *user) if (user->gets_events) atomic_dec(&intf->event_waiters); - /* Remove the user from the interface's sequence table. */ - spin_lock_irqsave(&intf->seq_lock, flags); - list_del_rcu(&user->link); + /* Remove the user from the interface's list and sequence table. */ + list_del(&user->link); atomic_dec(&intf->nr_users); + spin_lock_irqsave(&intf->seq_lock, flags); for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { if (intf->seq_table[i].inuse && (intf->seq_table[i].recv_msg->user == user)) { @@ -1374,7 +1361,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user) * Remove the user from the command receiver's table. First * we build a list of everything (not using the standard link, * since other things may be using it till we do - * synchronize_srcu()) then free everything in that list. + * synchronize_rcu()) then free everything in that list. */ mutex_lock(&intf->cmd_rcvrs_mutex); list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, @@ -1386,25 +1373,33 @@ static void _ipmi_destroy_user(struct ipmi_user *user) } } mutex_unlock(&intf->cmd_rcvrs_mutex); - synchronize_rcu(); while (rcvrs) { rcvr = rcvrs; rcvrs = rcvr->next; kfree(rcvr); } - owner = intf->owner; - kref_put(&intf->refcount, intf_free); - module_put(owner); + mutex_lock(&intf->user_msgs_mutex); + list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) { + if (msg->user != user) + continue; + list_del(&msg->link); + ipmi_free_recv_msg(msg); + } + mutex_unlock(&intf->user_msgs_mutex); + + release_ipmi_user(user); } -int ipmi_destroy_user(struct ipmi_user *user) +void ipmi_destroy_user(struct ipmi_user *user) { - _ipmi_destroy_user(user); + struct ipmi_smi *intf = user->intf; - kref_put(&user->refcount, free_user); + mutex_lock(&intf->users_mutex); + _ipmi_destroy_user(user); + mutex_unlock(&intf->users_mutex); - return 0; + kref_put(&user->refcount, free_ipmi_user); } EXPORT_SYMBOL(ipmi_destroy_user); @@ -1413,9 +1408,9 @@ int ipmi_get_version(struct ipmi_user *user, unsigned char *minor) { struct ipmi_device_id id; - int rv, index; + int rv; - user = acquire_ipmi_user(user, &index); + user = acquire_ipmi_user(user); if (!user) return -ENODEV; @@ -1424,7 +1419,7 @@ int ipmi_get_version(struct ipmi_user *user, *major = ipmi_version_major(&id); *minor = ipmi_version_minor(&id); } - release_ipmi_user(user, index); + release_ipmi_user(user); return rv; } @@ -1434,9 +1429,9 @@ int ipmi_set_my_address(struct ipmi_user *user, unsigned int channel, unsigned char address) { - int index, rv = 0; + int rv = 0; - user = acquire_ipmi_user(user, &index); + user = acquire_ipmi_user(user); if (!user) return -ENODEV; @@ -1446,7 +1441,7 @@ int ipmi_set_my_address(struct ipmi_user *user, channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); user->intf->addrinfo[channel].address = address; } - release_ipmi_user(user, index); + release_ipmi_user(user); return rv; } @@ -1456,9 +1451,9 @@ int ipmi_get_my_address(struct ipmi_user *user, unsigned int channel, unsigned char *address) { - int index, rv = 0; + int rv = 0; - user = acquire_ipmi_user(user, &index); + user = acquire_ipmi_user(user); if (!user) return -ENODEV; @@ -1468,7 +1463,7 @@ int ipmi_get_my_address(struct ipmi_user *user, channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); *address = user->intf->addrinfo[channel].address; } - release_ipmi_user(user, index); + release_ipmi_user(user); return rv; } @@ -1478,9 +1473,9 @@ int ipmi_set_my_LUN(struct ipmi_user *user, unsigned int channel, unsigned char LUN) { - int index, rv = 0; + int rv = 0; - user = acquire_ipmi_user(user, &index); + user = acquire_ipmi_user(user); if (!user) return -ENODEV; @@ -1490,7 +1485,7 @@ int ipmi_set_my_LUN(struct ipmi_user *user, channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); user->intf->addrinfo[channel].lun = LUN & 0x3; } - release_ipmi_user(user, index); + release_ipmi_user(user); return rv; } @@ -1500,9 +1495,9 @@ int ipmi_get_my_LUN(struct ipmi_user *user, unsigned int channel, unsigned char *address) { - int index, rv = 0; + int rv = 0; - user = acquire_ipmi_user(user, &index); + user = acquire_ipmi_user(user); if (!user) return -ENODEV; @@ -1512,7 +1507,7 @@ int ipmi_get_my_LUN(struct ipmi_user *user, channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); *address = user->intf->addrinfo[channel].lun; } - release_ipmi_user(user, index); + release_ipmi_user(user); return rv; } @@ -1520,17 +1515,17 @@ EXPORT_SYMBOL(ipmi_get_my_LUN); int ipmi_get_maintenance_mode(struct ipmi_user *user) { - int mode, index; + int mode; unsigned long flags; - user = acquire_ipmi_user(user, &index); + user = acquire_ipmi_user(user); if (!user) return -ENODEV; spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags); mode = user->intf->maintenance_mode; spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags); - release_ipmi_user(user, index); + release_ipmi_user(user); return mode; } @@ -1545,11 +1540,11 @@ static void maintenance_mode_update(struct ipmi_smi *intf) int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode) { - int rv = 0, index; + int rv = 0; unsigned long flags; struct ipmi_smi *intf = user->intf; - user = acquire_ipmi_user(user, &index); + user = acquire_ipmi_user(user); if (!user) return -ENODEV; @@ -1579,7 +1574,7 @@ int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode) } out_unlock: spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); - release_ipmi_user(user, index); + release_ipmi_user(user); return rv; } @@ -1587,19 +1582,17 @@ EXPORT_SYMBOL(ipmi_set_maintenance_mode); int ipmi_set_gets_events(struct ipmi_user *user, bool val) { - unsigned long flags; struct ipmi_smi *intf = user->intf; struct ipmi_recv_msg *msg, *msg2; struct list_head msgs; - int index; - user = acquire_ipmi_user(user, &index); + user = acquire_ipmi_user(user); if (!user) return -ENODEV; INIT_LIST_HEAD(&msgs); - spin_lock_irqsave(&intf->events_lock, flags); + mutex_lock(&intf->events_mutex); if (user->gets_events == val) goto out; @@ -1612,13 +1605,6 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val) atomic_dec(&intf->event_waiters); } - if (intf->delivering_events) - /* - * Another thread is delivering events for this, so - * let it handle any new events. - */ - goto out; - /* Deliver any queued events. */ while (user->gets_events && !list_empty(&intf->waiting_events)) { list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) @@ -1629,22 +1615,16 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val) intf->event_msg_printed = 0; } - intf->delivering_events = 1; - spin_unlock_irqrestore(&intf->events_lock, flags); - list_for_each_entry_safe(msg, msg2, &msgs, link) { msg->user = user; kref_get(&user->refcount); deliver_local_response(intf, msg); } - - spin_lock_irqsave(&intf->events_lock, flags); - intf->delivering_events = 0; } out: - spin_unlock_irqrestore(&intf->events_lock, flags); - release_ipmi_user(user, index); + mutex_unlock(&intf->events_mutex); + release_ipmi_user(user); return 0; } @@ -1689,9 +1669,9 @@ int ipmi_register_for_cmd(struct ipmi_user *user, { struct ipmi_smi *intf = user->intf; struct cmd_rcvr *rcvr; - int rv = 0, index; + int rv = 0; - user = acquire_ipmi_user(user, &index); + user = acquire_ipmi_user(user); if (!user) return -ENODEV; @@ -1721,7 +1701,7 @@ out_unlock: if (rv) kfree(rcvr); out_release: - release_ipmi_user(user, index); + release_ipmi_user(user); return rv; } @@ -1735,9 +1715,9 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user, struct ipmi_smi *intf = user->intf; struct cmd_rcvr *rcvr; struct cmd_rcvr *rcvrs = NULL; - int i, rv = -ENOENT, index; + int i, rv = -ENOENT; - user = acquire_ipmi_user(user, &index); + user = acquire_ipmi_user(user); if (!user) return -ENODEV; @@ -1760,7 +1740,7 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user, } mutex_unlock(&intf->cmd_rcvrs_mutex); synchronize_rcu(); - release_ipmi_user(user, index); + release_ipmi_user(user); while (rcvrs) { smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); rcvr = rcvrs; @@ -1884,13 +1864,12 @@ static void smi_send(struct ipmi_smi *intf, const struct ipmi_smi_handlers *handlers, struct ipmi_smi_msg *smi_msg, int priority) { - int run_to_completion = intf->run_to_completion; + int run_to_completion = READ_ONCE(intf->run_to_completion); unsigned long flags = 0; if (!run_to_completion) spin_lock_irqsave(&intf->xmit_msgs_lock, flags); smi_msg = smi_add_send_msg(intf, smi_msg, priority); - if (!run_to_completion) spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); @@ -2306,6 +2285,7 @@ static int i_ipmi_request(struct ipmi_user *user, { struct ipmi_smi_msg *smi_msg; struct ipmi_recv_msg *recv_msg; + int run_to_completion = READ_ONCE(intf->run_to_completion); int rv = 0; if (user) { @@ -2339,7 +2319,8 @@ static int i_ipmi_request(struct ipmi_user *user, } } - rcu_read_lock(); + if (!run_to_completion) + mutex_lock(&intf->users_mutex); if (intf->in_shutdown) { rv = -ENODEV; goto out_err; @@ -2385,7 +2366,8 @@ out_err: smi_send(intf, intf->handlers, smi_msg, priority); } - rcu_read_unlock(); + if (!run_to_completion) + mutex_unlock(&intf->users_mutex); out: if (rv && user) @@ -2416,12 +2398,12 @@ int ipmi_request_settime(struct ipmi_user *user, unsigned int retry_time_ms) { unsigned char saddr = 0, lun = 0; - int rv, index; + int rv; if (!user) return -EINVAL; - user = acquire_ipmi_user(user, &index); + user = acquire_ipmi_user(user); if (!user) return -ENODEV; @@ -2440,7 +2422,7 @@ int ipmi_request_settime(struct ipmi_user *user, retries, retry_time_ms); - release_ipmi_user(user, index); + release_ipmi_user(user); return rv; } EXPORT_SYMBOL(ipmi_request_settime); @@ -2455,12 +2437,12 @@ int ipmi_request_supply_msgs(struct ipmi_user *user, int priority) { unsigned char saddr = 0, lun = 0; - int rv, index; + int rv; if (!user) return -EINVAL; - user = acquire_ipmi_user(user, &index); + user = acquire_ipmi_user(user); if (!user) return -ENODEV; @@ -2479,7 +2461,7 @@ int ipmi_request_supply_msgs(struct ipmi_user *user, lun, -1, 0); - release_ipmi_user(user, index); + release_ipmi_user(user); return rv; } EXPORT_SYMBOL(ipmi_request_supply_msgs); @@ -3066,7 +3048,7 @@ cleanup_bmc_device(struct kref *ref) * with removing the device attributes while reading a device * attribute. */ - queue_work(remove_work_wq, &bmc->remove_work); + queue_work(bmc_remove_work_wq, &bmc->remove_work); } /* @@ -3522,15 +3504,14 @@ static ssize_t nr_msgs_show(struct device *dev, char *buf) { struct ipmi_smi *intf = container_of(attr, - struct ipmi_smi, nr_msgs_devattr); + struct ipmi_smi, nr_msgs_devattr); struct ipmi_user *user; - int index; unsigned int count = 0; - index = srcu_read_lock(&intf->users_srcu); - list_for_each_entry_rcu(user, &intf->users, link) + mutex_lock(&intf->users_mutex); + list_for_each_entry(user, &intf->users, link) count += atomic_read(&user->nr_msgs); - srcu_read_unlock(&intf->users_srcu, index); + mutex_unlock(&intf->users_mutex); return sysfs_emit(buf, "%u\n", count); } @@ -3571,12 +3552,6 @@ int ipmi_add_smi(struct module *owner, if (!intf) return -ENOMEM; - rv = init_srcu_struct(&intf->users_srcu); - if (rv) { - kfree(intf); - return rv; - } - intf->owner = owner; intf->bmc = &intf->tmp_bmc; INIT_LIST_HEAD(&intf->bmc->intfs); @@ -3593,7 +3568,10 @@ int ipmi_add_smi(struct module *owner, } if (slave_addr != 0) intf->addrinfo[0].address = slave_addr; + INIT_LIST_HEAD(&intf->user_msgs); + mutex_init(&intf->user_msgs_mutex); INIT_LIST_HEAD(&intf->users); + mutex_init(&intf->users_mutex); atomic_set(&intf->nr_users, 0); intf->handlers = handlers; intf->send_info = send_info; @@ -3605,13 +3583,12 @@ int ipmi_add_smi(struct module *owner, intf->curr_seq = 0; spin_lock_init(&intf->waiting_rcv_msgs_lock); INIT_LIST_HEAD(&intf->waiting_rcv_msgs); - tasklet_setup(&intf->recv_tasklet, - smi_recv_tasklet); + INIT_WORK(&intf->smi_work, smi_work); atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); spin_lock_init(&intf->xmit_msgs_lock); INIT_LIST_HEAD(&intf->xmit_msgs); INIT_LIST_HEAD(&intf->hp_xmit_msgs); - spin_lock_init(&intf->events_lock); + mutex_init(&intf->events_mutex); spin_lock_init(&intf->watch_lock); atomic_set(&intf->event_waiters, 0); intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; @@ -3624,12 +3601,16 @@ int ipmi_add_smi(struct module *owner, for (i = 0; i < IPMI_NUM_STATS; i++) atomic_set(&intf->stats[i], 0); + /* + * Grab the watchers mutex so we can deliver the new interface + * without races. + */ + mutex_lock(&smi_watchers_mutex); mutex_lock(&ipmi_interfaces_mutex); /* Look for a hole in the numbers. */ i = 0; link = &ipmi_interfaces; - list_for_each_entry_rcu(tintf, &ipmi_interfaces, link, - ipmi_interfaces_mutex_held()) { + list_for_each_entry(tintf, &ipmi_interfaces, link) { if (tintf->intf_num != i) { link = &tintf->link; break; @@ -3638,9 +3619,9 @@ int ipmi_add_smi(struct module *owner, } /* Add the new interface in numeric order. */ if (i == 0) - list_add_rcu(&intf->link, &ipmi_interfaces); + list_add(&intf->link, &ipmi_interfaces); else - list_add_tail_rcu(&intf->link, link); + list_add_tail(&intf->link, link); rv = handlers->start_processing(send_info, intf); if (rv) @@ -3672,18 +3653,14 @@ int ipmi_add_smi(struct module *owner, goto out_err_bmc_reg; } - /* - * Keep memory order straight for RCU readers. Make - * sure everything else is committed to memory before - * setting intf_num to mark the interface valid. - */ - smp_wmb(); intf->intf_num = i; mutex_unlock(&ipmi_interfaces_mutex); /* After this point the interface is legal to use. */ call_smi_watchers(i, intf->si_dev); + mutex_unlock(&smi_watchers_mutex); + return 0; out_err_bmc_reg: @@ -3692,10 +3669,9 @@ int ipmi_add_smi(struct module *owner, if (intf->handlers->shutdown) intf->handlers->shutdown(intf->send_info); out_err: - list_del_rcu(&intf->link); + list_del(&intf->link); mutex_unlock(&ipmi_interfaces_mutex); - synchronize_srcu(&ipmi_interfaces_srcu); - cleanup_srcu_struct(&intf->users_srcu); + mutex_unlock(&smi_watchers_mutex); kref_put(&intf->refcount, intf_free); return rv; @@ -3761,19 +3737,28 @@ static void cleanup_smi_msgs(struct ipmi_smi *intf) void ipmi_unregister_smi(struct ipmi_smi *intf) { struct ipmi_smi_watcher *w; - int intf_num, index; + int intf_num; if (!intf) return; + intf_num = intf->intf_num; mutex_lock(&ipmi_interfaces_mutex); + cancel_work_sync(&intf->smi_work); + /* smi_work() can no longer be in progress after this. */ + intf->intf_num = -1; intf->in_shutdown = true; - list_del_rcu(&intf->link); + list_del(&intf->link); mutex_unlock(&ipmi_interfaces_mutex); - synchronize_srcu(&ipmi_interfaces_srcu); - /* At this point no users can be added to the interface. */ + /* + * At this point no users can be added to the interface and no + * new messages can be sent. + */ + + if (intf->handlers->shutdown) + intf->handlers->shutdown(intf->send_info); device_remove_file(intf->si_dev, &intf->nr_msgs_devattr); device_remove_file(intf->si_dev, &intf->nr_users_devattr); @@ -3787,24 +3772,19 @@ void ipmi_unregister_smi(struct ipmi_smi *intf) w->smi_gone(intf_num); mutex_unlock(&smi_watchers_mutex); - index = srcu_read_lock(&intf->users_srcu); + mutex_lock(&intf->users_mutex); while (!list_empty(&intf->users)) { - struct ipmi_user *user = - container_of(list_next_rcu(&intf->users), - struct ipmi_user, link); + struct ipmi_user *user = list_first_entry(&intf->users, + struct ipmi_user, link); _ipmi_destroy_user(user); } - srcu_read_unlock(&intf->users_srcu, index); - - if (intf->handlers->shutdown) - intf->handlers->shutdown(intf->send_info); + mutex_unlock(&intf->users_mutex); cleanup_smi_msgs(intf); ipmi_bmc_unregister(intf); - cleanup_srcu_struct(&intf->users_srcu); kref_put(&intf->refcount, intf_free); } EXPORT_SYMBOL(ipmi_unregister_smi); @@ -3929,17 +3909,12 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, dev_dbg(intf->si_dev, "Invalid command: %*ph\n", msg->data_size, msg->data); - rcu_read_lock(); - if (!intf->in_shutdown) { - smi_send(intf, intf->handlers, msg, 0); - /* - * We used the message, so return the value - * that causes it to not be freed or - * queued. - */ - rv = -1; - } - rcu_read_unlock(); + smi_send(intf, intf->handlers, msg, 0); + /* + * We used the message, so return the value that + * causes it to not be freed or queued. + */ + rv = -1; } else { recv_msg = ipmi_alloc_recv_msg(); if (!recv_msg) { @@ -3949,7 +3924,7 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, * later. */ rv = 1; - kref_put(&user->refcount, free_user); + kref_put(&user->refcount, free_ipmi_user); } else { /* Extract the source address from the data. */ ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; @@ -4020,17 +3995,12 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE; msg->data_size = 5; - rcu_read_lock(); - if (!intf->in_shutdown) { - smi_send(intf, intf->handlers, msg, 0); - /* - * We used the message, so return the value - * that causes it to not be freed or - * queued. - */ - rv = -1; - } - rcu_read_unlock(); + smi_send(intf, intf->handlers, msg, 0); + /* + * We used the message, so return the value that + * causes it to not be freed or queued. + */ + rv = -1; } else { recv_msg = ipmi_alloc_recv_msg(); if (!recv_msg) { @@ -4040,7 +4010,7 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, * later. */ rv = 1; - kref_put(&user->refcount, free_user); + kref_put(&user->refcount, free_ipmi_user); } else { /* Extract the source address from the data. */ daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr; @@ -4209,14 +4179,33 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, rcu_read_unlock(); if (user == NULL) { - /* We didn't find a user, just give up. */ + /* We didn't find a user, just give up and return an error. */ ipmi_inc_stat(intf, unhandled_commands); + msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); + msg->data[1] = IPMI_SEND_MSG_CMD; + msg->data[2] = chan; + msg->data[3] = msg->rsp[4]; /* handle */ + msg->data[4] = msg->rsp[8]; /* rsSWID */ + msg->data[5] = ((netfn + 1) << 2) | (msg->rsp[9] & 0x3); + msg->data[6] = ipmb_checksum(&msg->data[3], 3); + msg->data[7] = msg->rsp[5]; /* rqSWID */ + /* rqseq/lun */ + msg->data[8] = (msg->rsp[9] & 0xfc) | (msg->rsp[6] & 0x3); + msg->data[9] = cmd; + msg->data[10] = IPMI_INVALID_CMD_COMPLETION_CODE; + msg->data[11] = ipmb_checksum(&msg->data[7], 4); + msg->data_size = 12; + + dev_dbg(intf->si_dev, "Invalid command: %*ph\n", + msg->data_size, msg->data); + + smi_send(intf, intf->handlers, msg, 0); /* - * Don't do anything with these messages, just allow - * them to be freed. + * We used the message, so return the value that + * causes it to not be freed or queued. */ - rv = 0; + rv = -1; } else { recv_msg = ipmi_alloc_recv_msg(); if (!recv_msg) { @@ -4225,7 +4214,7 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, * message, so requeue it for handling later. */ rv = 1; - kref_put(&user->refcount, free_user); + kref_put(&user->refcount, free_ipmi_user); } else { /* Extract the source address from the data. */ lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; @@ -4334,7 +4323,7 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, * later. */ rv = 1; - kref_put(&user->refcount, free_user); + kref_put(&user->refcount, free_ipmi_user); } else { /* * OEM Messages are expected to be delivered via @@ -4396,8 +4385,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg, *recv_msg2; struct list_head msgs; struct ipmi_user *user; - int rv = 0, deliver_count = 0, index; - unsigned long flags; + int rv = 0, deliver_count = 0; if (msg->rsp_size < 19) { /* Message is too small to be an IPMB event. */ @@ -4412,7 +4400,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf, INIT_LIST_HEAD(&msgs); - spin_lock_irqsave(&intf->events_lock, flags); + mutex_lock(&intf->events_mutex); ipmi_inc_stat(intf, events); @@ -4420,18 +4408,20 @@ static int handle_read_event_rsp(struct ipmi_smi *intf, * Allocate and fill in one message for every user that is * getting events. */ - index = srcu_read_lock(&intf->users_srcu); - list_for_each_entry_rcu(user, &intf->users, link) { + mutex_lock(&intf->users_mutex); + list_for_each_entry(user, &intf->users, link) { if (!user->gets_events) continue; recv_msg = ipmi_alloc_recv_msg(); if (!recv_msg) { - rcu_read_unlock(); + mutex_unlock(&intf->users_mutex); list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { + user = recv_msg->user; list_del(&recv_msg->link); ipmi_free_recv_msg(recv_msg); + kref_put(&user->refcount, free_ipmi_user); } /* * We couldn't allocate memory for the @@ -4449,7 +4439,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf, kref_get(&user->refcount); list_add_tail(&recv_msg->link, &msgs); } - srcu_read_unlock(&intf->users_srcu, index); + mutex_unlock(&intf->users_mutex); if (deliver_count) { /* Now deliver all the messages. */ @@ -4487,7 +4477,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf, } out: - spin_unlock_irqrestore(&intf->events_lock, flags); + mutex_unlock(&intf->events_mutex); return rv; } @@ -4573,7 +4563,7 @@ return_unspecified: && (msg->data[1] == IPMI_SEND_MSG_CMD) && (msg->user_data == NULL)) { - if (intf->in_shutdown) + if (intf->in_shutdown || intf->run_to_completion) goto out; /* @@ -4645,6 +4635,9 @@ return_unspecified: */ struct ipmi_recv_msg *recv_msg; + if (intf->run_to_completion) + goto out; + chan = msg->data[2] & 0x0f; if (chan >= IPMI_MAX_CHANNELS) /* Invalid channel number */ @@ -4667,6 +4660,9 @@ process_response_response: && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { struct ipmi_channel *chans; + if (intf->run_to_completion) + goto out; + /* It's from the receive queue. */ chan = msg->rsp[3] & 0xf; if (chan >= IPMI_MAX_CHANNELS) { @@ -4741,6 +4737,9 @@ process_response_response: } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) { /* It's an asynchronous event. */ + if (intf->run_to_completion) + goto out; + requeue = handle_read_event_rsp(intf, msg); } else { /* It's a response from the local BMC. */ @@ -4756,10 +4755,10 @@ process_response_response: */ static void handle_new_recv_msgs(struct ipmi_smi *intf) { - struct ipmi_smi_msg *smi_msg; - unsigned long flags = 0; - int rv; - int run_to_completion = intf->run_to_completion; + struct ipmi_smi_msg *smi_msg; + unsigned long flags = 0; + int rv; + int run_to_completion = READ_ONCE(intf->run_to_completion); /* See if any waiting messages need to be processed. */ if (!run_to_completion) @@ -4779,7 +4778,7 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf) * To preserve message order, quit if we * can't handle a message. Add the message * back at the head, this is safe because this - * tasklet is the only thing that pulls the + * workqueue is the only thing that pulls the * messages. */ list_add(&smi_msg->link, &intf->waiting_rcv_msgs); @@ -4793,31 +4792,15 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf) } if (!run_to_completion) spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); - - /* - * If the pretimout count is non-zero, decrement one from it and - * deliver pretimeouts to all the users. - */ - if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { - struct ipmi_user *user; - int index; - - index = srcu_read_lock(&intf->users_srcu); - list_for_each_entry_rcu(user, &intf->users, link) { - if (user->handler->ipmi_watchdog_pretimeout) - user->handler->ipmi_watchdog_pretimeout( - user->handler_data); - } - srcu_read_unlock(&intf->users_srcu, index); - } } -static void smi_recv_tasklet(struct tasklet_struct *t) +static void smi_work(struct work_struct *t) { unsigned long flags = 0; /* keep us warning-free. */ - struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet); - int run_to_completion = intf->run_to_completion; + struct ipmi_smi *intf = from_work(intf, t, smi_work); + int run_to_completion = READ_ONCE(intf->run_to_completion); struct ipmi_smi_msg *newmsg = NULL; + struct ipmi_recv_msg *msg, *msg2; /* * Start the next message if available. @@ -4827,8 +4810,6 @@ static void smi_recv_tasklet(struct tasklet_struct *t) * message delivery. */ - rcu_read_lock(); - if (!run_to_completion) spin_lock_irqsave(&intf->xmit_msgs_lock, flags); if (intf->curr_msg == NULL && !intf->in_shutdown) { @@ -4846,15 +4827,57 @@ static void smi_recv_tasklet(struct tasklet_struct *t) intf->curr_msg = newmsg; } } - if (!run_to_completion) spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); + if (newmsg) intf->handlers->sender(intf->send_info, newmsg); - rcu_read_unlock(); - handle_new_recv_msgs(intf); + + /* Nothing below applies during panic time. */ + if (run_to_completion) + return; + + /* + * If the pretimout count is non-zero, decrement one from it and + * deliver pretimeouts to all the users. + */ + if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { + struct ipmi_user *user; + + mutex_lock(&intf->users_mutex); + list_for_each_entry(user, &intf->users, link) { + if (user->handler->ipmi_watchdog_pretimeout) + user->handler->ipmi_watchdog_pretimeout( + user->handler_data); + } + mutex_unlock(&intf->users_mutex); + } + + /* + * Freeing the message can cause a user to be released, which + * can then cause the interface to be freed. Make sure that + * doesn't happen until we are ready. + */ + kref_get(&intf->refcount); + + mutex_lock(&intf->user_msgs_mutex); + list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) { + struct ipmi_user *user = msg->user; + + list_del(&msg->link); + + if (refcount_read(&user->destroyed) == 0) { + ipmi_free_recv_msg(msg); + } else { + atomic_dec(&user->nr_msgs); + user->handler->ipmi_recv_hndl(msg, user->handler_data); + } + } + mutex_unlock(&intf->user_msgs_mutex); + + kref_put(&intf->refcount, intf_free); } /* Handle a new message from the lower layer. */ @@ -4862,11 +4885,11 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf, struct ipmi_smi_msg *msg) { unsigned long flags = 0; /* keep us warning-free. */ - int run_to_completion = intf->run_to_completion; + int run_to_completion = READ_ONCE(intf->run_to_completion); /* * To preserve message order, we keep a queue and deliver from - * a tasklet. + * a workqueue. */ if (!run_to_completion) spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); @@ -4887,9 +4910,9 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf, spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); if (run_to_completion) - smi_recv_tasklet(&intf->recv_tasklet); + smi_work(&intf->smi_work); else - tasklet_schedule(&intf->recv_tasklet); + queue_work(system_wq, &intf->smi_work); } EXPORT_SYMBOL(ipmi_smi_msg_received); @@ -4899,7 +4922,7 @@ void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf) return; atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); - tasklet_schedule(&intf->recv_tasklet); + queue_work(system_wq, &intf->smi_work); } EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); @@ -5068,7 +5091,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf, flags); } - tasklet_schedule(&intf->recv_tasklet); + queue_work(system_wq, &intf->smi_work); return need_timer; } @@ -5087,17 +5110,19 @@ static struct timer_list ipmi_timer; static atomic_t stop_operation; -static void ipmi_timeout(struct timer_list *unused) +static void ipmi_timeout_work(struct work_struct *work) { + if (atomic_read(&stop_operation)) + return; + struct ipmi_smi *intf; bool need_timer = false; - int index; if (atomic_read(&stop_operation)) return; - index = srcu_read_lock(&ipmi_interfaces_srcu); - list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { + mutex_lock(&ipmi_interfaces_mutex); + list_for_each_entry(intf, &ipmi_interfaces, link) { if (atomic_read(&intf->event_waiters)) { intf->ticks_to_req_ev--; if (intf->ticks_to_req_ev == 0) { @@ -5109,12 +5134,22 @@ static void ipmi_timeout(struct timer_list *unused) need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); } - srcu_read_unlock(&ipmi_interfaces_srcu, index); + mutex_unlock(&ipmi_interfaces_mutex); if (need_timer) mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); } +static DECLARE_WORK(ipmi_timer_work, ipmi_timeout_work); + +static void ipmi_timeout(struct timer_list *unused) +{ + if (atomic_read(&stop_operation)) + return; + + queue_work(system_wq, &ipmi_timer_work); +} + static void need_waiter(struct ipmi_smi *intf) { /* Racy, but worst case we start the timer twice. */ @@ -5171,7 +5206,7 @@ static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) { if (msg->user && !oops_in_progress) - kref_put(&msg->user->refcount, free_user); + kref_put(&msg->user->refcount, free_ipmi_user); msg->done(msg); } EXPORT_SYMBOL(ipmi_free_recv_msg); @@ -5191,9 +5226,9 @@ static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) /* * Inside a panic, send a message and wait for a response. */ -static void ipmi_panic_request_and_wait(struct ipmi_smi *intf, - struct ipmi_addr *addr, - struct kernel_ipmi_msg *msg) +static void _ipmi_panic_request_and_wait(struct ipmi_smi *intf, + struct ipmi_addr *addr, + struct kernel_ipmi_msg *msg) { struct ipmi_smi_msg smi_msg; struct ipmi_recv_msg recv_msg; @@ -5223,6 +5258,15 @@ static void ipmi_panic_request_and_wait(struct ipmi_smi *intf, ipmi_poll(intf); } +void ipmi_panic_request_and_wait(struct ipmi_user *user, + struct ipmi_addr *addr, + struct kernel_ipmi_msg *msg) +{ + user->intf->run_to_completion = 1; + _ipmi_panic_request_and_wait(user->intf, addr, msg); +} +EXPORT_SYMBOL(ipmi_panic_request_and_wait); + static void event_receiver_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) { @@ -5291,7 +5335,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str) } /* Send the event announcing the panic. */ - ipmi_panic_request_and_wait(intf, &addr, &msg); + _ipmi_panic_request_and_wait(intf, &addr, &msg); /* * On every interface, dump a bunch of OEM event holding the @@ -5327,7 +5371,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str) msg.data = NULL; msg.data_len = 0; intf->null_user_handler = device_id_fetcher; - ipmi_panic_request_and_wait(intf, &addr, &msg); + _ipmi_panic_request_and_wait(intf, &addr, &msg); if (intf->local_event_generator) { /* Request the event receiver from the local MC. */ @@ -5336,7 +5380,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str) msg.data = NULL; msg.data_len = 0; intf->null_user_handler = event_receiver_fetcher; - ipmi_panic_request_and_wait(intf, &addr, &msg); + _ipmi_panic_request_and_wait(intf, &addr, &msg); } intf->null_user_handler = NULL; @@ -5388,7 +5432,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str) memcpy_and_pad(data+5, 11, p, size, '\0'); p += size; - ipmi_panic_request_and_wait(intf, &addr, &msg); + _ipmi_panic_request_and_wait(intf, &addr, &msg); } } @@ -5406,7 +5450,7 @@ static int panic_event(struct notifier_block *this, has_panicked = 1; /* For every registered interface, set it to run to completion. */ - list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { + list_for_each_entry(intf, &ipmi_interfaces, link) { if (!intf->handlers || intf->intf_num == -1) /* Interface is not ready. */ continue; @@ -5436,7 +5480,7 @@ static int panic_event(struct notifier_block *this, intf->handlers->set_run_to_completion(intf->send_info, 1); - list_for_each_entry_rcu(user, &intf->users, link) { + list_for_each_entry(user, &intf->users, link) { if (user->handler->ipmi_panic_handler) user->handler->ipmi_panic_handler( user->handler_data); @@ -5481,15 +5525,11 @@ static int ipmi_init_msghandler(void) if (initialized) goto out; - rv = init_srcu_struct(&ipmi_interfaces_srcu); - if (rv) - goto out; - - remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq"); - if (!remove_work_wq) { + bmc_remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq"); + if (!bmc_remove_work_wq) { pr_err("unable to create ipmi-msghandler-remove-wq workqueue"); rv = -ENOMEM; - goto out_wq; + goto out; } timer_setup(&ipmi_timer, ipmi_timeout, 0); @@ -5499,9 +5539,6 @@ static int ipmi_init_msghandler(void) initialized = true; -out_wq: - if (rv) - cleanup_srcu_struct(&ipmi_interfaces_srcu); out: mutex_unlock(&ipmi_interfaces_mutex); return rv; @@ -5525,7 +5562,7 @@ static void __exit cleanup_ipmi(void) int count; if (initialized) { - destroy_workqueue(remove_work_wq); + destroy_workqueue(bmc_remove_work_wq); atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block); @@ -5541,7 +5578,8 @@ static void __exit cleanup_ipmi(void) * here. */ atomic_set(&stop_operation, 1); - del_timer_sync(&ipmi_timer); + timer_delete_sync(&ipmi_timer); + cancel_work_sync(&ipmi_timer_work); initialized = false; @@ -5552,8 +5590,6 @@ static void __exit cleanup_ipmi(void) count = atomic_read(&recv_msg_inuse_count); if (count != 0) pr_warn("recv message count %d at exit\n", count); - - cleanup_srcu_struct(&ipmi_interfaces_srcu); } if (drvregistered) driver_unregister(&ipmidriver.driver); diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c index da22a8cbe68e..4a2efafcd1f8 100644 --- a/drivers/char/ipmi/ipmi_powernv.c +++ b/drivers/char/ipmi/ipmi_powernv.c @@ -281,15 +281,13 @@ err_free: return rc; } -static int ipmi_powernv_remove(struct platform_device *pdev) +static void ipmi_powernv_remove(struct platform_device *pdev) { struct ipmi_smi_powernv *smi = dev_get_drvdata(&pdev->dev); ipmi_unregister_smi(smi->intf); free_irq(smi->irq, smi); irq_dispose_mapping(smi->irq); - - return 0; } static const struct of_device_id ipmi_powernv_match[] = { @@ -304,7 +302,7 @@ static struct platform_driver powernv_ipmi_driver = { .of_match_table = ipmi_powernv_match, }, .probe = ipmi_powernv_probe, - .remove = ipmi_powernv_remove, + .remove = ipmi_powernv_remove, }; diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c index 941d2dcc8c9d..e63c316d8aaa 100644 --- a/drivers/char/ipmi/ipmi_poweroff.c +++ b/drivers/char/ipmi/ipmi_poweroff.c @@ -650,7 +650,7 @@ static struct ipmi_smi_watcher smi_watcher = { #ifdef CONFIG_PROC_FS #include <linux/sysctl.h> -static struct ctl_table ipmi_table[] = { +static const struct ctl_table ipmi_table[] = { { .procname = "poweroff_powercycle", .data = &poweroff_powercycle, .maxlen = sizeof(poweroff_powercycle), @@ -699,8 +699,6 @@ static int __init ipmi_poweroff_init(void) #ifdef MODULE static void __exit ipmi_poweroff_cleanup(void) { - int rv; - #ifdef CONFIG_PROC_FS unregister_sysctl_table(ipmi_table_header); #endif @@ -708,9 +706,7 @@ static void __exit ipmi_poweroff_cleanup(void) ipmi_smi_watcher_unregister(&smi_watcher); if (ready) { - rv = ipmi_destroy_user(ipmi_user); - if (rv) - pr_err("could not cleanup the IPMI user: 0x%x\n", rv); + ipmi_destroy_user(ipmi_user); pm_power_off = old_poweroff_func; } } diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h index a7ead2a4c753..508c3fd45877 100644 --- a/drivers/char/ipmi/ipmi_si.h +++ b/drivers/char/ipmi/ipmi_si.h @@ -26,6 +26,14 @@ enum si_type { /* Array is defined in the ipmi_si_intf.c */ extern const char *const si_to_str[]; +struct ipmi_match_info { + enum si_type type; +}; + +extern const struct ipmi_match_info ipmi_kcs_si_info; +extern const struct ipmi_match_info ipmi_smic_si_info; +extern const struct ipmi_match_info ipmi_bt_si_info; + enum ipmi_addr_space { IPMI_IO_ADDR_SPACE, IPMI_MEM_ADDR_SPACE }; @@ -64,7 +72,7 @@ struct si_sm_io { void (*irq_cleanup)(struct si_sm_io *io); u8 slave_addr; - enum si_type si_type; + const struct ipmi_match_info *si_info; struct device *dev; }; diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 5cd031f3fc97..7fe891783a37 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -73,6 +73,10 @@ enum si_intf_state { /* 'invalid' to allow a firmware-specified interface to be disabled */ const char *const si_to_str[] = { "invalid", "kcs", "smic", "bt", NULL }; +const struct ipmi_match_info ipmi_kcs_si_info = { .type = SI_KCS }; +const struct ipmi_match_info ipmi_smic_si_info = { .type = SI_SMIC }; +const struct ipmi_match_info ipmi_bt_si_info = { .type = SI_BT }; + static bool initialized; /* @@ -692,7 +696,7 @@ static void handle_transaction_done(struct smi_info *smi_info) break; } enables = current_global_enables(smi_info, 0, &irq_on); - if (smi_info->io.si_type == SI_BT) + if (smi_info->io.si_info->type == SI_BT) /* BT has its own interrupt enable bit. */ check_bt_irq(smi_info, irq_on); if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) { @@ -859,7 +863,7 @@ restart: if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) { /* Ok it if fails, the timer will just go off. */ - if (del_timer(&smi_info->si_timer)) + if (timer_delete(&smi_info->si_timer)) smi_info->timer_running = false; } @@ -1119,7 +1123,7 @@ irqreturn_t ipmi_si_irq_handler(int irq, void *data) struct smi_info *smi_info = data; unsigned long flags; - if (smi_info->io.si_type == SI_BT) + if (smi_info->io.si_info->type == SI_BT) /* We need to clear the IRQ flag for the BT interface. */ smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, IPMI_BT_INTMASK_CLEAR_IRQ_BIT @@ -1164,7 +1168,7 @@ static int smi_start_processing(void *send_info, * The BT interface is efficient enough to not need a thread, * and there is no need for a thread if we have interrupts. */ - else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq)) + else if (new_smi->io.si_info->type != SI_BT && !new_smi->io.irq) enable = 1; if (enable) { @@ -1235,7 +1239,7 @@ MODULE_PARM_DESC(kipmid_max_busy_us, void ipmi_irq_finish_setup(struct si_sm_io *io) { - if (io->si_type == SI_BT) + if (io->si_info->type == SI_BT) /* Enable the interrupt in the BT interface. */ io->outputb(io, IPMI_BT_INTMASK_REG, IPMI_BT_INTMASK_ENABLE_IRQ_BIT); @@ -1243,7 +1247,7 @@ void ipmi_irq_finish_setup(struct si_sm_io *io) void ipmi_irq_start_cleanup(struct si_sm_io *io) { - if (io->si_type == SI_BT) + if (io->si_info->type == SI_BT) /* Disable the interrupt in the BT interface. */ io->outputb(io, IPMI_BT_INTMASK_REG, 0); } @@ -1614,7 +1618,7 @@ static ssize_t type_show(struct device *dev, { struct smi_info *smi_info = dev_get_drvdata(dev); - return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_type]); + return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_info->type]); } static DEVICE_ATTR_RO(type); @@ -1649,7 +1653,7 @@ static ssize_t params_show(struct device *dev, return sysfs_emit(buf, "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", - si_to_str[smi_info->io.si_type], + si_to_str[smi_info->io.si_info->type], addr_space_to_str[smi_info->io.addr_space], smi_info->io.addr_data, smi_info->io.regspacing, @@ -1803,7 +1807,7 @@ setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info) { struct ipmi_device_id *id = &smi_info->device_id; if (id->manufacturer_id == DELL_IANA_MFR_ID && - smi_info->io.si_type == SI_BT) + smi_info->io.si_info->type == SI_BT) register_xaction_notifier(&dell_poweredge_bt_xaction_notifier); } @@ -1839,7 +1843,7 @@ static inline void stop_timer_and_thread(struct smi_info *smi_info) } smi_info->timer_can_start = false; - del_timer_sync(&smi_info->si_timer); + timer_delete_sync(&smi_info->si_timer); } static struct smi_info *find_dup_si(struct smi_info *info) @@ -1882,7 +1886,8 @@ int ipmi_si_add_smi(struct si_sm_io *io) } if (!io->io_setup) { - if (io->addr_space == IPMI_IO_ADDR_SPACE) { + if (IS_ENABLED(CONFIG_HAS_IOPORT) && + io->addr_space == IPMI_IO_ADDR_SPACE) { io->io_setup = ipmi_si_port_setup; } else if (io->addr_space == IPMI_MEM_ADDR_SPACE) { io->io_setup = ipmi_si_mem_setup; @@ -1906,13 +1911,13 @@ int ipmi_si_add_smi(struct si_sm_io *io) /* We prefer ACPI over SMBIOS. */ dev_info(dup->io.dev, "Removing SMBIOS-specified %s state machine in favor of ACPI\n", - si_to_str[new_smi->io.si_type]); + si_to_str[new_smi->io.si_info->type]); cleanup_one_si(dup); } else { dev_info(new_smi->io.dev, "%s-specified %s state machine: duplicate\n", ipmi_addr_src_to_str(new_smi->io.addr_source), - si_to_str[new_smi->io.si_type]); + si_to_str[new_smi->io.si_info->type]); rv = -EBUSY; kfree(new_smi); goto out_err; @@ -1921,7 +1926,7 @@ int ipmi_si_add_smi(struct si_sm_io *io) pr_info("Adding %s-specified %s state machine\n", ipmi_addr_src_to_str(new_smi->io.addr_source), - si_to_str[new_smi->io.si_type]); + si_to_str[new_smi->io.si_info->type]); list_add_tail(&new_smi->link, &smi_infos); @@ -1944,12 +1949,12 @@ static int try_smi_init(struct smi_info *new_smi) pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n", ipmi_addr_src_to_str(new_smi->io.addr_source), - si_to_str[new_smi->io.si_type], + si_to_str[new_smi->io.si_info->type], addr_space_to_str[new_smi->io.addr_space], new_smi->io.addr_data, new_smi->io.slave_addr, new_smi->io.irq); - switch (new_smi->io.si_type) { + switch (new_smi->io.si_info->type) { case SI_KCS: new_smi->handlers = &kcs_smi_handlers; break; @@ -2072,7 +2077,7 @@ static int try_smi_init(struct smi_info *new_smi) smi_num++; dev_info(new_smi->io.dev, "IPMI %s interface initialized\n", - si_to_str[new_smi->io.si_type]); + si_to_str[new_smi->io.si_info->type]); WARN_ON(new_smi->io.dev->init_name != NULL); @@ -2090,9 +2095,18 @@ static int try_smi_init(struct smi_info *new_smi) return rv; } +/* + * Devices in the same address space at the same address are the same. + */ +static bool __init ipmi_smi_info_same(struct smi_info *e1, struct smi_info *e2) +{ + return (e1->io.addr_space == e2->io.addr_space && + e1->io.addr_data == e2->io.addr_data); +} + static int __init init_ipmi_si(void) { - struct smi_info *e; + struct smi_info *e, *e2; enum ipmi_addr_src type = SI_INVALID; if (initialized) @@ -2108,37 +2122,70 @@ static int __init init_ipmi_si(void) ipmi_si_parisc_init(); - /* We prefer devices with interrupts, but in the case of a machine - with multiple BMCs we assume that there will be several instances - of a given type so if we succeed in registering a type then also - try to register everything else of the same type */ mutex_lock(&smi_infos_lock); + + /* + * Scan through all the devices. We prefer devices with + * interrupts, so go through those first in case there are any + * duplicates that don't have the interrupt set. + */ list_for_each_entry(e, &smi_infos, link) { - /* Try to register a device if it has an IRQ and we either - haven't successfully registered a device yet or this - device has the same type as one we successfully registered */ - if (e->io.irq && (!type || e->io.addr_source == type)) { - if (!try_smi_init(e)) { - type = e->io.addr_source; + bool dup = false; + + /* Register ones with interrupts first. */ + if (!e->io.irq) + continue; + + /* + * Go through the ones we have already seen to see if this + * is a dup. + */ + list_for_each_entry(e2, &smi_infos, link) { + if (e2 == e) + break; + if (e2->io.irq && ipmi_smi_info_same(e, e2)) { + dup = true; + break; } } + if (!dup) + try_smi_init(e); } - /* type will only have been set if we successfully registered an si */ - if (type) - goto skip_fallback_noirq; + /* + * Now try devices without interrupts. + */ + list_for_each_entry(e, &smi_infos, link) { + bool dup = false; - /* Fall back to the preferred device */ + if (e->io.irq) + continue; - list_for_each_entry(e, &smi_infos, link) { - if (!e->io.irq && (!type || e->io.addr_source == type)) { - if (!try_smi_init(e)) { - type = e->io.addr_source; + /* + * Go through the ones we have already seen to see if + * this is a dup. We have already looked at the ones + * with interrupts. + */ + list_for_each_entry(e2, &smi_infos, link) { + if (!e2->io.irq) + continue; + if (ipmi_smi_info_same(e, e2)) { + dup = true; + break; + } + } + list_for_each_entry(e2, &smi_infos, link) { + if (e2 == e) + break; + if (ipmi_smi_info_same(e, e2)) { + dup = true; + break; } } + if (!dup) + try_smi_init(e); } -skip_fallback_noirq: initialized = true; mutex_unlock(&smi_infos_lock); @@ -2266,7 +2313,7 @@ struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type, list_for_each_entry_safe(e, tmp_e, &smi_infos, link) { if (e->io.addr_space != addr_space) continue; - if (e->io.si_type != si_type) + if (e->io.si_info->type != si_type) continue; if (e->io.addr_data == addr) { dev = get_device(e->io.dev); diff --git a/drivers/char/ipmi/ipmi_si_parisc.c b/drivers/char/ipmi/ipmi_si_parisc.c index 2be2967f6b5f..3b0a70d9adbb 100644 --- a/drivers/char/ipmi/ipmi_si_parisc.c +++ b/drivers/char/ipmi/ipmi_si_parisc.c @@ -13,7 +13,7 @@ static int __init ipmi_parisc_probe(struct parisc_device *dev) memset(&io, 0, sizeof(io)); - io.si_type = SI_KCS; + io.si_info = &ipmi_kcs_si_info; io.addr_source = SI_DEVICETREE; io.addr_space = IPMI_MEM_ADDR_SPACE; io.addr_data = dev->hpa.start; diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c index 74fa2055868b..17f72763322d 100644 --- a/drivers/char/ipmi/ipmi_si_pci.c +++ b/drivers/char/ipmi/ipmi_si_pci.c @@ -23,30 +23,32 @@ MODULE_PARM_DESC(trypci, static int ipmi_pci_probe_regspacing(struct si_sm_io *io) { - if (io->si_type == SI_KCS) { - unsigned char status; - int regspacing; - - io->regsize = DEFAULT_REGSIZE; - io->regshift = 0; - - /* detect 1, 4, 16byte spacing */ - for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) { - io->regspacing = regspacing; - if (io->io_setup(io)) { - dev_err(io->dev, "Could not setup I/O space\n"); - return DEFAULT_REGSPACING; - } - /* write invalid cmd */ - io->outputb(io, 1, 0x10); - /* read status back */ - status = io->inputb(io, 1); - io->io_cleanup(io); - if (status) - return regspacing; - regspacing *= 4; + unsigned char status; + int regspacing; + + if (io->si_info->type != SI_KCS) + return DEFAULT_REGSPACING; + + io->regsize = DEFAULT_REGSIZE; + io->regshift = 0; + + /* detect 1, 4, 16byte spacing */ + for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) { + io->regspacing = regspacing; + if (io->io_setup(io)) { + dev_err(io->dev, "Could not setup I/O space\n"); + return DEFAULT_REGSPACING; } + /* write invalid cmd */ + io->outputb(io, 1, 0x10); + /* read status back */ + status = io->inputb(io, 1); + io->io_cleanup(io); + if (status) + return regspacing; + regspacing *= 4; } + return DEFAULT_REGSPACING; } @@ -74,15 +76,15 @@ static int ipmi_pci_probe(struct pci_dev *pdev, switch (pdev->class) { case PCI_CLASS_SERIAL_IPMI_SMIC: - io.si_type = SI_SMIC; + io.si_info = &ipmi_smic_si_info; break; case PCI_CLASS_SERIAL_IPMI_KCS: - io.si_type = SI_KCS; + io.si_info = &ipmi_kcs_si_info; break; case PCI_CLASS_SERIAL_IPMI_BT: - io.si_type = SI_BT; + io.si_info = &ipmi_bt_si_info; break; default: @@ -97,6 +99,9 @@ static int ipmi_pci_probe(struct pci_dev *pdev, } if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) { + if (!IS_ENABLED(CONFIG_HAS_IOPORT)) + return -ENXIO; + io.addr_space = IPMI_IO_ADDR_SPACE; io.io_setup = ipmi_si_port_setup; } else { @@ -115,7 +120,7 @@ static int ipmi_pci_probe(struct pci_dev *pdev, if (io.irq) io.irq_setup = ipmi_std_irq_setup; - dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n", + dev_info(&pdev->dev, "%pR regsize %u spacing %u irq %d\n", &pdev->resource[0], io.regsize, io.regspacing, io.irq); return ipmi_si_add_smi(&io); diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c index cd2edd8f8a03..fb6e359ae494 100644 --- a/drivers/char/ipmi/ipmi_si_platform.c +++ b/drivers/char/ipmi/ipmi_si_platform.c @@ -163,9 +163,13 @@ static int platform_ipmi_probe(struct platform_device *pdev) switch (type) { case SI_KCS: + io.si_info = &ipmi_kcs_si_info; + break; case SI_SMIC: + io.si_info = &ipmi_smic_si_info; + break; case SI_BT: - io.si_type = type; + io.si_info = &ipmi_bt_si_info; break; case SI_TYPE_INVALID: /* User disabled this in hardcode. */ return -ENODEV; @@ -213,13 +217,10 @@ static int platform_ipmi_probe(struct platform_device *pdev) #ifdef CONFIG_OF static const struct of_device_id of_ipmi_match[] = { - { .type = "ipmi", .compatible = "ipmi-kcs", - .data = (void *)(unsigned long) SI_KCS }, - { .type = "ipmi", .compatible = "ipmi-smic", - .data = (void *)(unsigned long) SI_SMIC }, - { .type = "ipmi", .compatible = "ipmi-bt", - .data = (void *)(unsigned long) SI_BT }, - {}, + { .type = "ipmi", .compatible = "ipmi-kcs", .data = &ipmi_kcs_si_info }, + { .type = "ipmi", .compatible = "ipmi-smic", .data = &ipmi_smic_si_info }, + { .type = "ipmi", .compatible = "ipmi-bt", .data = &ipmi_bt_si_info }, + {} }; MODULE_DEVICE_TABLE(of, of_ipmi_match); @@ -265,7 +266,7 @@ static int of_ipmi_probe(struct platform_device *pdev) } memset(&io, 0, sizeof(io)); - io.si_type = (enum si_type)device_get_match_data(&pdev->dev); + io.si_info = device_get_match_data(&pdev->dev); io.addr_source = SI_DEVICETREE; io.irq_setup = ipmi_std_irq_setup; @@ -296,7 +297,7 @@ static int find_slave_address(struct si_sm_io *io, int slave_addr) { #ifdef CONFIG_IPMI_DMI_DECODE if (!slave_addr) - slave_addr = ipmi_dmi_get_slave_addr(io->si_type, + slave_addr = ipmi_dmi_get_slave_addr(io->si_info->type, io->addr_space, io->addr_data); #endif @@ -335,13 +336,13 @@ static int acpi_ipmi_probe(struct platform_device *pdev) switch (tmp) { case 1: - io.si_type = SI_KCS; + io.si_info = &ipmi_kcs_si_info; break; case 2: - io.si_type = SI_SMIC; + io.si_info = &ipmi_smic_si_info; break; case 3: - io.si_type = SI_BT; + io.si_info = &ipmi_bt_si_info; break; case 4: /* SSIF, just ignore */ return -ENODEV; @@ -405,11 +406,9 @@ static int ipmi_probe(struct platform_device *pdev) return platform_ipmi_probe(pdev); } -static int ipmi_remove(struct platform_device *pdev) +static void ipmi_remove(struct platform_device *pdev) { ipmi_si_remove_by_dev(&pdev->dev); - - return 0; } static int pdev_match_name(struct device *dev, const void *data) diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 1f7600c361e6..5bf038e620c7 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -481,8 +481,6 @@ static int ipmi_ssif_thread(void *data) /* Wait for something to do */ result = wait_for_completion_interruptible( &ssif_info->wake_thread); - if (ssif_info->stopping) - break; if (result == -ERESTARTSYS) continue; init_completion(&ssif_info->wake_thread); @@ -599,7 +597,7 @@ static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type, flags = ipmi_ssif_lock_cond(ssif_info, &oflags); if (ssif_info->waiting_alert) { ssif_info->waiting_alert = false; - del_timer(&ssif_info->retry_timer); + timer_delete(&ssif_info->retry_timer); do_get = true; } else if (ssif_info->curr_msg) { ssif_info->got_alert = true; @@ -980,7 +978,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result, ipmi_ssif_unlock_cond(ssif_info, flags); start_get(ssif_info); } else { - /* Wait a jiffie then request the next message */ + /* Wait a jiffy then request the next message */ ssif_info->waiting_alert = true; ssif_info->retries_left = SSIF_RECV_RETRIES; if (!ssif_info->stopping) @@ -1268,12 +1266,10 @@ static void shutdown_ssif(void *send_info) schedule_timeout(1); ssif_info->stopping = true; - del_timer_sync(&ssif_info->watch_timer); - del_timer_sync(&ssif_info->retry_timer); - if (ssif_info->thread) { - complete(&ssif_info->wake_thread); + timer_delete_sync(&ssif_info->watch_timer); + timer_delete_sync(&ssif_info->retry_timer); + if (ssif_info->thread) kthread_stop(ssif_info->thread); - } } static void ssif_remove(struct i2c_client *client) @@ -1368,8 +1364,20 @@ static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info) rv = do_cmd(client, 2, msg, &len, resp); if (rv) rv = -ENODEV; - else + else { + if (len < 3) { + rv = -ENODEV; + } else { + struct ipmi_device_id id; + + rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1], + resp + 2, len - 2, &id); + if (rv) + rv = -ENODEV; /* Error means a BMC probably isn't there. */ + } + if (!rv && info) strscpy(info->type, DEVICE_NAME, I2C_NAME_SIZE); + } kfree(resp); return rv; } @@ -1704,6 +1712,16 @@ static int ssif_probe(struct i2c_client *client) ipmi_addr_src_to_str(ssif_info->addr_source), client->addr, client->adapter->name, slave_addr); + /* + * Send a get device id command and validate its response to + * make sure a valid BMC is there. + */ + rv = ssif_detect(client, NULL); + if (rv) { + dev_err(&client->dev, "Not present\n"); + goto out; + } + /* Now check for system interface capabilities */ msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD; @@ -2049,7 +2067,7 @@ static int dmi_ipmi_probe(struct platform_device *pdev) #endif static const struct i2c_device_id ssif_id[] = { - { DEVICE_NAME, 0 }, + { DEVICE_NAME }, { } }; MODULE_DEVICE_TABLE(i2c, ssif_id); @@ -2071,7 +2089,7 @@ static int ssif_platform_probe(struct platform_device *dev) return dmi_ipmi_probe(dev); } -static int ssif_platform_remove(struct platform_device *dev) +static void ssif_platform_remove(struct platform_device *dev) { struct ssif_addr_info *addr_info = dev_get_drvdata(&dev->dev); @@ -2079,13 +2097,13 @@ static int ssif_platform_remove(struct platform_device *dev) list_del(&addr_info->link); kfree(addr_info); mutex_unlock(&ssif_infos_mutex); - return 0; } static const struct platform_device_id ssif_plat_ids[] = { { "dmi-ipmi-ssif", 0 }, { } }; +MODULE_DEVICE_TABLE(platform, ssif_plat_ids); static struct platform_driver ipmi_driver = { .driver = { diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 9a459257489f..ab759b492fdd 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -150,7 +150,7 @@ static char preaction[16] = "pre_none"; static unsigned char preop_val = WDOG_PREOP_NONE; static char preop[16] = "preop_none"; -static DEFINE_SPINLOCK(ipmi_read_lock); +static DEFINE_MUTEX(ipmi_read_mutex); static char data_to_read; static DECLARE_WAIT_QUEUE_HEAD(read_q); static struct fasync_struct *fasync_q; @@ -363,7 +363,7 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg, { struct kernel_ipmi_msg msg; unsigned char data[6]; - int rv; + int rv = 0; struct ipmi_system_interface_addr addr; int hbnow = 0; @@ -405,14 +405,18 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg, msg.cmd = IPMI_WDOG_SET_TIMER; msg.data = data; msg.data_len = sizeof(data); - rv = ipmi_request_supply_msgs(watchdog_user, - (struct ipmi_addr *) &addr, - 0, - &msg, - NULL, - smi_msg, - recv_msg, - 1); + if (smi_msg) + rv = ipmi_request_supply_msgs(watchdog_user, + (struct ipmi_addr *) &addr, + 0, + &msg, + NULL, + smi_msg, + recv_msg, + 1); + else + ipmi_panic_request_and_wait(watchdog_user, + (struct ipmi_addr *) &addr, &msg); if (rv) pr_warn("set timeout error: %d\n", rv); else if (send_heartbeat_now) @@ -431,9 +435,7 @@ static int _ipmi_set_timeout(int do_heartbeat) atomic_set(&msg_tofree, 2); - rv = __ipmi_set_timeout(&smi_msg, - &recv_msg, - &send_heartbeat_now); + rv = __ipmi_set_timeout(&smi_msg, &recv_msg, &send_heartbeat_now); if (rv) { atomic_set(&msg_tofree, 0); return rv; @@ -460,27 +462,10 @@ static int ipmi_set_timeout(int do_heartbeat) return rv; } -static atomic_t panic_done_count = ATOMIC_INIT(0); - -static void panic_smi_free(struct ipmi_smi_msg *msg) -{ - atomic_dec(&panic_done_count); -} -static void panic_recv_free(struct ipmi_recv_msg *msg) -{ - atomic_dec(&panic_done_count); -} - -static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg = - INIT_IPMI_SMI_MSG(panic_smi_free); -static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg = - INIT_IPMI_RECV_MSG(panic_recv_free); - static void panic_halt_ipmi_heartbeat(void) { struct kernel_ipmi_msg msg; struct ipmi_system_interface_addr addr; - int rv; /* * Don't reset the timer if we have the timer turned off, that @@ -497,24 +482,10 @@ static void panic_halt_ipmi_heartbeat(void) msg.cmd = IPMI_WDOG_RESET_TIMER; msg.data = NULL; msg.data_len = 0; - atomic_add(2, &panic_done_count); - rv = ipmi_request_supply_msgs(watchdog_user, - (struct ipmi_addr *) &addr, - 0, - &msg, - NULL, - &panic_halt_heartbeat_smi_msg, - &panic_halt_heartbeat_recv_msg, - 1); - if (rv) - atomic_sub(2, &panic_done_count); + ipmi_panic_request_and_wait(watchdog_user, (struct ipmi_addr *) &addr, + &msg); } -static struct ipmi_smi_msg panic_halt_smi_msg = - INIT_IPMI_SMI_MSG(panic_smi_free); -static struct ipmi_recv_msg panic_halt_recv_msg = - INIT_IPMI_RECV_MSG(panic_recv_free); - /* * Special call, doesn't claim any locks. This is only to be called * at panic or halt time, in run-to-completion mode, when the caller @@ -526,22 +497,13 @@ static void panic_halt_ipmi_set_timeout(void) int send_heartbeat_now; int rv; - /* Wait for the messages to be free. */ - while (atomic_read(&panic_done_count) != 0) - ipmi_poll_interface(watchdog_user); - atomic_add(2, &panic_done_count); - rv = __ipmi_set_timeout(&panic_halt_smi_msg, - &panic_halt_recv_msg, - &send_heartbeat_now); + rv = __ipmi_set_timeout(NULL, NULL, &send_heartbeat_now); if (rv) { - atomic_sub(2, &panic_done_count); pr_warn("Unable to extend the watchdog timeout\n"); } else { if (send_heartbeat_now) panic_halt_ipmi_heartbeat(); } - while (atomic_read(&panic_done_count) != 0) - ipmi_poll_interface(watchdog_user); } static int __ipmi_heartbeat(void) @@ -793,7 +755,7 @@ static ssize_t ipmi_read(struct file *file, * Reading returns if the pretimeout has gone off, and it only does * it once per pretimeout. */ - spin_lock_irq(&ipmi_read_lock); + mutex_lock(&ipmi_read_mutex); if (!data_to_read) { if (file->f_flags & O_NONBLOCK) { rv = -EAGAIN; @@ -804,9 +766,9 @@ static ssize_t ipmi_read(struct file *file, add_wait_queue(&read_q, &wait); while (!data_to_read && !signal_pending(current)) { set_current_state(TASK_INTERRUPTIBLE); - spin_unlock_irq(&ipmi_read_lock); + mutex_unlock(&ipmi_read_mutex); schedule(); - spin_lock_irq(&ipmi_read_lock); + mutex_lock(&ipmi_read_mutex); } remove_wait_queue(&read_q, &wait); @@ -818,7 +780,7 @@ static ssize_t ipmi_read(struct file *file, data_to_read = 0; out: - spin_unlock_irq(&ipmi_read_lock); + mutex_unlock(&ipmi_read_mutex); if (rv == 0) { if (copy_to_user(buf, &data_to_read, 1)) @@ -856,10 +818,10 @@ static __poll_t ipmi_poll(struct file *file, poll_table *wait) poll_wait(file, &read_q, wait); - spin_lock_irq(&ipmi_read_lock); + mutex_lock(&ipmi_read_mutex); if (data_to_read) mask |= (EPOLLIN | EPOLLRDNORM); - spin_unlock_irq(&ipmi_read_lock); + mutex_unlock(&ipmi_read_mutex); return mask; } @@ -903,7 +865,6 @@ static const struct file_operations ipmi_wdog_fops = { .open = ipmi_open, .release = ipmi_close, .fasync = ipmi_fasync, - .llseek = no_llseek, }; static struct miscdevice ipmi_wdog_miscdev = { @@ -933,13 +894,11 @@ static void ipmi_wdog_pretimeout_handler(void *handler_data) if (atomic_inc_and_test(&preop_panic_excl)) panic("Watchdog pre-timeout"); } else if (preop_val == WDOG_PREOP_GIVE_DATA) { - unsigned long flags; - - spin_lock_irqsave(&ipmi_read_lock, flags); + mutex_lock(&ipmi_read_mutex); data_to_read = 1; wake_up_interruptible(&read_q); kill_fasync(&fasync_q, SIGIO, POLL_IN); - spin_unlock_irqrestore(&ipmi_read_lock, flags); + mutex_unlock(&ipmi_read_mutex); } } @@ -1065,7 +1024,6 @@ static void ipmi_register_watchdog(int ipmi_intf) static void ipmi_unregister_watchdog(int ipmi_intf) { - int rv; struct ipmi_user *loc_user = watchdog_user; if (!loc_user) @@ -1090,9 +1048,7 @@ static void ipmi_unregister_watchdog(int ipmi_intf) mutex_lock(&ipmi_watchdog_mutex); /* Disconnect from IPMI. */ - rv = ipmi_destroy_user(loc_user); - if (rv) - pr_warn("error unlinking from IPMI: %d\n", rv); + ipmi_destroy_user(loc_user); /* If it comes back, restart it properly. */ ipmi_start_timer_on_heartbeat = 1; diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c index 72640da55380..a13a3470c17a 100644 --- a/drivers/char/ipmi/kcs_bmc_aspeed.c +++ b/drivers/char/ipmi/kcs_bmc_aspeed.c @@ -428,7 +428,7 @@ static void aspeed_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, if (rc == -ETIMEDOUT) mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD); } else { - del_timer(&priv->obe.timer); + timer_delete(&priv->obe.timer); } } @@ -641,7 +641,7 @@ static int aspeed_kcs_probe(struct platform_device *pdev) return 0; } -static int aspeed_kcs_remove(struct platform_device *pdev) +static void aspeed_kcs_remove(struct platform_device *pdev) { struct aspeed_kcs_bmc *priv = platform_get_drvdata(pdev); struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc; @@ -655,9 +655,7 @@ static int aspeed_kcs_remove(struct platform_device *pdev) spin_lock_irq(&priv->obe.lock); priv->obe.remove = true; spin_unlock_irq(&priv->obe.lock); - del_timer_sync(&priv->obe.timer); - - return 0; + timer_delete_sync(&priv->obe.timer); } static const struct of_device_id ast_kcs_bmc_match[] = { diff --git a/drivers/char/ipmi/kcs_bmc_npcm7xx.c b/drivers/char/ipmi/kcs_bmc_npcm7xx.c index 7961fec56476..4808a61bf273 100644 --- a/drivers/char/ipmi/kcs_bmc_npcm7xx.c +++ b/drivers/char/ipmi/kcs_bmc_npcm7xx.c @@ -218,7 +218,7 @@ static int npcm7xx_kcs_probe(struct platform_device *pdev) return 0; } -static int npcm7xx_kcs_remove(struct platform_device *pdev) +static void npcm7xx_kcs_remove(struct platform_device *pdev) { struct npcm7xx_kcs_bmc *priv = platform_get_drvdata(pdev); struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc; @@ -227,8 +227,6 @@ static int npcm7xx_kcs_remove(struct platform_device *pdev) npcm7xx_kcs_enable_channel(kcs_bmc, false); npcm7xx_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0); - - return 0; } static const struct of_device_id npcm_kcs_bmc_match[] = { @@ -243,7 +241,7 @@ static struct platform_driver npcm_kcs_bmc_driver = { .of_match_table = npcm_kcs_bmc_match, }, .probe = npcm7xx_kcs_probe, - .remove = npcm7xx_kcs_remove, + .remove = npcm7xx_kcs_remove, }; module_platform_driver(npcm_kcs_bmc_driver); diff --git a/drivers/char/ipmi/ssif_bmc.c b/drivers/char/ipmi/ssif_bmc.c index 56346fb32872..e4bd74585d4d 100644 --- a/drivers/char/ipmi/ssif_bmc.c +++ b/drivers/char/ipmi/ssif_bmc.c @@ -177,13 +177,15 @@ static ssize_t ssif_bmc_write(struct file *file, const char __user *buf, size_t unsigned long flags; ssize_t ret; - if (count > sizeof(struct ipmi_ssif_msg)) + if (count < sizeof(msg.len) || + count > sizeof(struct ipmi_ssif_msg)) return -EINVAL; if (copy_from_user(&msg, buf, count)) return -EFAULT; - if (!msg.len || count < sizeof_field(struct ipmi_ssif_msg, len) + msg.len) + if (!msg.len || msg.len > IPMI_SSIF_PAYLOAD_MAX || + count < sizeof_field(struct ipmi_ssif_msg, len) + msg.len) return -EINVAL; spin_lock_irqsave(&ssif_bmc->lock, flags); @@ -207,7 +209,7 @@ static ssize_t ssif_bmc_write(struct file *file, const char __user *buf, size_t if (ret) goto exit; - del_timer(&ssif_bmc->response_timer); + timer_delete(&ssif_bmc->response_timer); ssif_bmc->response_timer_inited = false; memcpy(&ssif_bmc->response, &msg, count); @@ -290,7 +292,6 @@ static void complete_response(struct ssif_bmc_ctx *ssif_bmc) ssif_bmc->nbytes_processed = 0; ssif_bmc->remain_len = 0; ssif_bmc->busy = false; - memset(&ssif_bmc->part_buf, 0, sizeof(struct ssif_part_buffer)); wake_up_all(&ssif_bmc->wait_queue); } @@ -742,9 +743,11 @@ static void on_stop_event(struct ssif_bmc_ctx *ssif_bmc, u8 *val) ssif_bmc->aborting = true; } } else if (ssif_bmc->state == SSIF_RES_SENDING) { - if (ssif_bmc->is_singlepart_read || ssif_bmc->block_num == 0xFF) + if (ssif_bmc->is_singlepart_read || ssif_bmc->block_num == 0xFF) { + memset(&ssif_bmc->part_buf, 0, sizeof(struct ssif_part_buffer)); /* Invalidate response buffer to denote it is sent */ complete_response(ssif_bmc); + } ssif_bmc->state = SSIF_READY; } @@ -850,8 +853,8 @@ static const struct of_device_id ssif_bmc_match[] = { MODULE_DEVICE_TABLE(of, ssif_bmc_match); static const struct i2c_device_id ssif_bmc_id[] = { - { DEVICE_NAME, 0 }, - { }, + { DEVICE_NAME }, + { } }; MODULE_DEVICE_TABLE(i2c, ssif_bmc_id); diff --git a/drivers/char/lp.c b/drivers/char/lp.c index 2f171d14b9b5..24417a00dfe9 100644 --- a/drivers/char/lp.c +++ b/drivers/char/lp.c @@ -1016,7 +1016,6 @@ static struct parport_driver lp_driver = { .name = "lp", .match_port = lp_attach, .detach = lp_detach, - .devmodel = true, }; static int __init lp_init(void) @@ -1123,4 +1122,5 @@ module_init(lp_init_module); module_exit(lp_cleanup_module); MODULE_ALIAS_CHARDEV_MAJOR(LP_MAJOR); +MODULE_DESCRIPTION("Generic parallel printer driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 3c6670cf905f..48839958b0b1 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -61,29 +61,11 @@ static inline int page_is_allowed(unsigned long pfn) { return devmem_is_allowed(pfn); } -static inline int range_is_allowed(unsigned long pfn, unsigned long size) -{ - u64 from = ((u64)pfn) << PAGE_SHIFT; - u64 to = from + size; - u64 cursor = from; - - while (cursor < to) { - if (!devmem_is_allowed(pfn)) - return 0; - cursor += PAGE_SIZE; - pfn++; - } - return 1; -} #else static inline int page_is_allowed(unsigned long pfn) { return 1; } -static inline int range_is_allowed(unsigned long pfn, unsigned long size) -{ - return 1; -} #endif static inline bool should_stop_iteration(void) @@ -383,6 +365,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma) return 0; } +#ifdef CONFIG_DEVPORT static ssize_t read_port(struct file *file, char __user *buf, size_t count, loff_t *ppos) { @@ -424,6 +407,7 @@ static ssize_t write_port(struct file *file, const char __user *buf, *ppos = i; return tmp-buf; } +#endif static ssize_t read_null(struct file *file, char __user *buf, size_t count, loff_t *ppos) @@ -544,7 +528,7 @@ static unsigned long get_unmapped_area_zero(struct file *file, } /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */ - return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); + return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags); #else return -ENOSYS; #endif @@ -641,6 +625,7 @@ static const struct file_operations __maybe_unused mem_fops = { .get_unmapped_area = get_unmapped_area_mem, .mmap_capabilities = memory_mmap_capabilities, #endif + .fop_flags = FOP_UNSIGNED_OFFSET, }; static const struct file_operations null_fops = { @@ -653,12 +638,14 @@ static const struct file_operations null_fops = { .uring_cmd = uring_cmd_null, }; -static const struct file_operations __maybe_unused port_fops = { +#ifdef CONFIG_DEVPORT +static const struct file_operations port_fops = { .llseek = memory_lseek, .read = read_port, .write = write_port, .open = open_port, }; +#endif static const struct file_operations zero_fops = { .llseek = zero_lseek, @@ -689,7 +676,7 @@ static const struct memdev { umode_t mode; } devlist[] = { #ifdef CONFIG_DEVMEM - [DEVMEM_MINOR] = { "mem", &mem_fops, FMODE_UNSIGNED_OFFSET, 0 }, + [DEVMEM_MINOR] = { "mem", &mem_fops, 0, 0 }, #endif [3] = { "null", &null_fops, FMODE_NOWAIT, 0666 }, #ifdef CONFIG_DEVPORT diff --git a/drivers/char/misc.c b/drivers/char/misc.c index 541edc26ec89..dda466f9181a 100644 --- a/drivers/char/misc.c +++ b/drivers/char/misc.c @@ -63,16 +63,30 @@ static DEFINE_MUTEX(misc_mtx); #define DYNAMIC_MINORS 128 /* like dynamic majors */ static DEFINE_IDA(misc_minors_ida); -static int misc_minor_alloc(void) +static int misc_minor_alloc(int minor) { - int ret; - - ret = ida_alloc_max(&misc_minors_ida, DYNAMIC_MINORS - 1, GFP_KERNEL); - if (ret >= 0) { - ret = DYNAMIC_MINORS - ret - 1; + int ret = 0; + + if (minor == MISC_DYNAMIC_MINOR) { + /* allocate free id */ + ret = ida_alloc_max(&misc_minors_ida, DYNAMIC_MINORS - 1, GFP_KERNEL); + if (ret >= 0) { + ret = DYNAMIC_MINORS - ret - 1; + } else { + ret = ida_alloc_range(&misc_minors_ida, MISC_DYNAMIC_MINOR + 1, + MINORMASK, GFP_KERNEL); + } } else { - ret = ida_alloc_range(&misc_minors_ida, MISC_DYNAMIC_MINOR + 1, - MINORMASK, GFP_KERNEL); + /* specific minor, check if it is in dynamic or misc dynamic range */ + if (minor < DYNAMIC_MINORS) { + minor = DYNAMIC_MINORS - minor - 1; + ret = ida_alloc_range(&misc_minors_ida, minor, minor, GFP_KERNEL); + } else if (minor > MISC_DYNAMIC_MINOR) { + ret = ida_alloc_range(&misc_minors_ida, minor, minor, GFP_KERNEL); + } else { + /* case of non-dynamic minors, no need to allocate id */ + ret = 0; + } } return ret; } @@ -219,7 +233,7 @@ int misc_register(struct miscdevice *misc) mutex_lock(&misc_mtx); if (is_dynamic) { - int i = misc_minor_alloc(); + int i = misc_minor_alloc(misc->minor); if (i < 0) { err = -EBUSY; @@ -228,6 +242,7 @@ int misc_register(struct miscdevice *misc) misc->minor = i; } else { struct miscdevice *c; + int i; list_for_each_entry(c, &misc_list, list) { if (c->minor == misc->minor) { @@ -235,6 +250,12 @@ int misc_register(struct miscdevice *misc) goto out; } } + + i = misc_minor_alloc(misc->minor); + if (i < 0) { + err = -EBUSY; + goto out; + } } dev = MKDEV(MISC_MAJOR, misc->minor); @@ -243,8 +264,8 @@ int misc_register(struct miscdevice *misc) device_create_with_groups(&misc_class, misc->parent, dev, misc, misc->groups, "%s", misc->name); if (IS_ERR(misc->this_device)) { + misc_minor_free(misc->minor); if (is_dynamic) { - misc_minor_free(misc->minor); misc->minor = MISC_DYNAMIC_MINOR; } err = PTR_ERR(misc->this_device); @@ -294,7 +315,7 @@ static int __init misc_init(void) goto fail_remove; err = -EIO; - if (register_chrdev(MISC_MAJOR, "misc", &misc_fops)) + if (__register_chrdev(MISC_MAJOR, 0, MINORMASK + 1, "misc", &misc_fops)) goto fail_printk; return 0; diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c index e9f694b36871..9eff426a9286 100644 --- a/drivers/char/nvram.c +++ b/drivers/char/nvram.c @@ -540,6 +540,7 @@ static void __exit nvram_module_exit(void) module_init(nvram_module_init); module_exit(nvram_module_exit); +MODULE_DESCRIPTION("CMOS/NV-RAM driver for Linux"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(NVRAM_MINOR); MODULE_ALIAS("devname:nvram"); diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c index ea378c0ed549..92cee5717237 100644 --- a/drivers/char/nwbutton.c +++ b/drivers/char/nwbutton.c @@ -241,6 +241,7 @@ static void __exit nwbutton_exit (void) MODULE_AUTHOR("Alex Holden"); +MODULE_DESCRIPTION("NetWinder button driver"); MODULE_LICENSE("GPL"); module_init(nwbutton_init); diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c index 0973c2c2b01a..9f52f0306ef7 100644 --- a/drivers/char/nwflash.c +++ b/drivers/char/nwflash.c @@ -618,6 +618,7 @@ static void __exit nwflash_exit(void) iounmap((void *)FLASH_BASE); } +MODULE_DESCRIPTION("NetWinder flash memory driver"); MODULE_LICENSE("GPL"); module_param(flashdebug, bool, 0644); diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c index c39a836ebd15..5f4696813cea 100644 --- a/drivers/char/pc8736x_gpio.c +++ b/drivers/char/pc8736x_gpio.c @@ -235,7 +235,6 @@ static const struct file_operations pc8736x_gpio_fileops = { .open = pc8736x_gpio_open, .write = nsc_gpio_write, .read = nsc_gpio_read, - .llseek = no_llseek, }; static void __init pc8736x_init_shadow(void) diff --git a/drivers/char/powernv-op-panel.c b/drivers/char/powernv-op-panel.c index 3c99696b145e..53467b0a6187 100644 --- a/drivers/char/powernv-op-panel.c +++ b/drivers/char/powernv-op-panel.c @@ -195,12 +195,11 @@ free_oppanel_data: return rc; } -static int oppanel_remove(struct platform_device *pdev) +static void oppanel_remove(struct platform_device *pdev) { misc_deregister(&oppanel_dev); kfree(oppanel_lines); kfree(oppanel_data); - return 0; } static const struct of_device_id oppanel_match[] = { diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index ee951b265213..d1dfbd8d4d42 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c @@ -296,28 +296,35 @@ static int register_device(int minor, struct pp_struct *pp) if (!port) { pr_warn("%s: no associated port!\n", name); rc = -ENXIO; - goto err; + goto err_free_name; } index = ida_alloc(&ida_index, GFP_KERNEL); + if (index < 0) { + pr_warn("%s: failed to get index!\n", name); + rc = index; + goto err_put_port; + } + memset(&ppdev_cb, 0, sizeof(ppdev_cb)); ppdev_cb.irq_func = pp_irq; ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; ppdev_cb.private = pp; pdev = parport_register_dev_model(port, name, &ppdev_cb, index); - parport_put_port(port); if (!pdev) { pr_warn("%s: failed to register device!\n", name); rc = -ENXIO; ida_free(&ida_index, index); - goto err; + goto err_put_port; } pp->pdev = pdev; pp->index = index; dev_dbg(&pdev->dev, "registered pardevice\n"); -err: +err_put_port: + parport_put_port(port); +err_free_name: kfree(name); return rc; } @@ -779,7 +786,6 @@ static const struct class ppdev_class = { static const struct file_operations pp_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .read = pp_read, .write = pp_write, .poll = pp_poll, @@ -832,7 +838,6 @@ static struct parport_driver pp_driver = { .probe = pp_probe, .match_port = pp_attach, .detach = pp_detach, - .devmodel = true, }; static int __init ppdev_init(void) @@ -875,5 +880,6 @@ static void __exit ppdev_cleanup(void) module_init(ppdev_init); module_exit(ppdev_cleanup); +MODULE_DESCRIPTION("Support for user-space parallel port device drivers"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(PP_MAJOR); diff --git a/drivers/char/random.c b/drivers/char/random.c index 2597cb43f438..b8b24b6ed3fe 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) /* - * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * Copyright (C) 2017-2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved. * @@ -56,6 +56,11 @@ #include <linux/sched/isolation.h> #include <crypto/chacha.h> #include <crypto/blake2s.h> +#ifdef CONFIG_VDSO_GETRANDOM +#include <vdso/getrandom.h> +#include <vdso/datapage.h> +#include <vdso/vsyscall.h> +#endif #include <asm/archrandom.h> #include <asm/processor.h> #include <asm/irq.h> @@ -271,6 +276,22 @@ static void crng_reseed(struct work_struct *work) if (next_gen == ULONG_MAX) ++next_gen; WRITE_ONCE(base_crng.generation, next_gen); +#ifdef CONFIG_VDSO_GETRANDOM + /* base_crng.generation's invalid value is ULONG_MAX, while + * vdso_k_rng_data->generation's invalid value is 0, so add one to the + * former to arrive at the latter. Use smp_store_release so that this + * is ordered with the write above to base_crng.generation. Pairs with + * the smp_rmb() before the syscall in the vDSO code. + * + * Cast to unsigned long for 32-bit architectures, since atomic 64-bit + * operations are not supported on those architectures. This is safe + * because base_crng.generation is a 32-bit value. On big-endian + * architectures it will be stored in the upper 32 bits, but that's okay + * because the vDSO side only checks whether the value changed, without + * actually using or interpreting the value. + */ + smp_store_release((unsigned long *)&vdso_k_rng_data->generation, next_gen + 1); +#endif if (!static_branch_likely(&crng_is_ready)) crng_init = CRNG_READY; spin_unlock_irqrestore(&base_crng.lock, flags); @@ -288,11 +309,11 @@ static void crng_reseed(struct work_struct *work) * key value, at index 4, so the state should always be zeroed out * immediately after using in order to maintain forward secrecy. * If the state cannot be erased in a timely manner, then it is - * safer to set the random_data parameter to &chacha_state[4] so - * that this function overwrites it before returning. + * safer to set the random_data parameter to &chacha_state->x[4] + * so that this function overwrites it before returning. */ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], - u32 chacha_state[CHACHA_STATE_WORDS], + struct chacha_state *chacha_state, u8 *random_data, size_t random_data_len) { u8 first_block[CHACHA_BLOCK_SIZE]; @@ -300,8 +321,8 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], BUG_ON(random_data_len > 32); chacha_init_consts(chacha_state); - memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE); - memset(&chacha_state[12], 0, sizeof(u32) * 4); + memcpy(&chacha_state->x[4], key, CHACHA_KEY_SIZE); + memset(&chacha_state->x[12], 0, sizeof(u32) * 4); chacha20_block(chacha_state, first_block); memcpy(key, first_block, CHACHA_KEY_SIZE); @@ -314,7 +335,7 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], * random data. It also returns up to 32 bytes on its own of random data * that may be used; random_data_len may not be greater than 32. */ -static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], +static void crng_make_state(struct chacha_state *chacha_state, u8 *random_data, size_t random_data_len) { unsigned long flags; @@ -374,7 +395,7 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], static void _get_random_bytes(void *buf, size_t len) { - u32 chacha_state[CHACHA_STATE_WORDS]; + struct chacha_state chacha_state; u8 tmp[CHACHA_BLOCK_SIZE]; size_t first_block_len; @@ -382,26 +403,26 @@ static void _get_random_bytes(void *buf, size_t len) return; first_block_len = min_t(size_t, 32, len); - crng_make_state(chacha_state, buf, first_block_len); + crng_make_state(&chacha_state, buf, first_block_len); len -= first_block_len; buf += first_block_len; while (len) { if (len < CHACHA_BLOCK_SIZE) { - chacha20_block(chacha_state, tmp); + chacha20_block(&chacha_state, tmp); memcpy(buf, tmp, len); memzero_explicit(tmp, sizeof(tmp)); break; } - chacha20_block(chacha_state, buf); - if (unlikely(chacha_state[12] == 0)) - ++chacha_state[13]; + chacha20_block(&chacha_state, buf); + if (unlikely(chacha_state.x[12] == 0)) + ++chacha_state.x[13]; len -= CHACHA_BLOCK_SIZE; buf += CHACHA_BLOCK_SIZE; } - memzero_explicit(chacha_state, sizeof(chacha_state)); + chacha_zeroize_state(&chacha_state); } /* @@ -420,7 +441,7 @@ EXPORT_SYMBOL(get_random_bytes); static ssize_t get_random_bytes_user(struct iov_iter *iter) { - u32 chacha_state[CHACHA_STATE_WORDS]; + struct chacha_state chacha_state; u8 block[CHACHA_BLOCK_SIZE]; size_t ret = 0, copied; @@ -432,21 +453,22 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter) * bytes, in case userspace causes copy_to_iter() below to sleep * forever, so that we still retain forward secrecy in that case. */ - crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE); + crng_make_state(&chacha_state, (u8 *)&chacha_state.x[4], + CHACHA_KEY_SIZE); /* * However, if we're doing a read of len <= 32, we don't need to * use chacha_state after, so we can simply return those bytes to * the user directly. */ if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) { - ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter); + ret = copy_to_iter(&chacha_state.x[4], CHACHA_KEY_SIZE, iter); goto out_zero_chacha; } for (;;) { - chacha20_block(chacha_state, block); - if (unlikely(chacha_state[12] == 0)) - ++chacha_state[13]; + chacha20_block(&chacha_state, block); + if (unlikely(chacha_state.x[12] == 0)) + ++chacha_state.x[13]; copied = copy_to_iter(block, sizeof(block), iter); ret += copied; @@ -463,7 +485,7 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter) memzero_explicit(block, sizeof(block)); out_zero_chacha: - memzero_explicit(chacha_state, sizeof(chacha_state)); + chacha_zeroize_state(&chacha_state); return ret ? ret : -EFAULT; } @@ -705,6 +727,7 @@ static void __cold _credit_init_bits(size_t bits) static DECLARE_WORK(set_ready, crng_set_ready); unsigned int new, orig, add; unsigned long flags; + int m; if (!bits) return; @@ -721,12 +744,15 @@ static void __cold _credit_init_bits(size_t bits) if (static_key_initialized && system_unbound_wq) queue_work(system_unbound_wq, &set_ready); atomic_notifier_call_chain(&random_ready_notifier, 0, NULL); +#ifdef CONFIG_VDSO_GETRANDOM + WRITE_ONCE(vdso_k_rng_data->is_ready, true); +#endif wake_up_interruptible(&crng_init_wait); kill_fasync(&fasync, SIGIO, POLL_IN); pr_notice("crng init done\n"); - if (urandom_warning.missed) - pr_notice("%d urandom warning(s) missed due to ratelimiting\n", - urandom_warning.missed); + m = ratelimit_state_get_miss(&urandom_warning); + if (m) + pr_notice("%d urandom warning(s) missed due to ratelimiting\n", m); } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) { spin_lock_irqsave(&base_crng.lock, flags); /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */ @@ -1287,9 +1313,9 @@ static void __cold try_to_generate_entropy(void) while (!crng_ready() && !signal_pending(current)) { /* * Check !timer_pending() and then ensure that any previous callback has finished - * executing by checking try_to_del_timer_sync(), before queueing the next one. + * executing by checking timer_delete_sync_try(), before queueing the next one. */ - if (!timer_pending(&stack->timer) && try_to_del_timer_sync(&stack->timer) >= 0) { + if (!timer_pending(&stack->timer) && timer_delete_sync_try(&stack->timer) >= 0) { struct cpumask timer_cpus; unsigned int num_cpus; @@ -1328,8 +1354,8 @@ static void __cold try_to_generate_entropy(void) } mix_pool_bytes(&stack->entropy, sizeof(stack->entropy)); - del_timer_sync(&stack->timer); - destroy_timer_on_stack(&stack->timer); + timer_delete_sync(&stack->timer); + timer_destroy_on_stack(&stack->timer); } @@ -1442,7 +1468,7 @@ static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter) if (!crng_ready()) { if (!ratelimit_disable && maxwarn <= 0) - ++urandom_warning.missed; + ratelimit_state_inc_miss(&urandom_warning); else if (ratelimit_disable || __ratelimit(&urandom_warning)) { --maxwarn; pr_notice("%s: uninitialized urandom read (%zu bytes read)\n", @@ -1604,7 +1630,7 @@ static u8 sysctl_bootid[UUID_SIZE]; * UUID. The difference is in whether table->data is NULL; if it is, * then a new UUID is generated and returned to the user. */ -static int proc_do_uuid(struct ctl_table *table, int write, void *buf, +static int proc_do_uuid(const struct ctl_table *table, int write, void *buf, size_t *lenp, loff_t *ppos) { u8 tmp_uuid[UUID_SIZE], *uuid; @@ -1635,13 +1661,13 @@ static int proc_do_uuid(struct ctl_table *table, int write, void *buf, } /* The same as proc_dointvec, but writes don't change anything. */ -static int proc_do_rointvec(struct ctl_table *table, int write, void *buf, +static int proc_do_rointvec(const struct ctl_table *table, int write, void *buf, size_t *lenp, loff_t *ppos) { return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos); } -static struct ctl_table random_table[] = { +static const struct ctl_table random_table[] = { { .procname = "poolsize", .data = &sysctl_poolsize, diff --git a/drivers/char/scx200_gpio.c b/drivers/char/scx200_gpio.c index 9f701dcba95c..700e6affea6f 100644 --- a/drivers/char/scx200_gpio.c +++ b/drivers/char/scx200_gpio.c @@ -68,7 +68,6 @@ static const struct file_operations scx200_gpio_fileops = { .read = nsc_gpio_read, .open = scx200_gpio_open, .release = scx200_gpio_release, - .llseek = no_llseek, }; static struct cdev scx200_gpio_cdev; /* use 1 cdev for all pins */ diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c index 22d249333f53..677bb5ac950a 100644 --- a/drivers/char/sonypi.c +++ b/drivers/char/sonypi.c @@ -37,6 +37,7 @@ #include <linux/kfifo.h> #include <linux/platform_device.h> #include <linux/gfp.h> +#include <linux/string_choices.h> #include <linux/uaccess.h> #include <asm/io.h> @@ -1054,7 +1055,6 @@ static const struct file_operations sonypi_misc_fops = { .release = sonypi_misc_release, .fasync = sonypi_misc_fasync, .unlocked_ioctl = sonypi_misc_ioctl, - .llseek = no_llseek, }; static struct miscdevice sonypi_misc_device = { @@ -1269,12 +1269,12 @@ static void sonypi_display_info(void) "compat = %s, mask = 0x%08lx, useinput = %s, acpi = %s\n", sonypi_device.model, verbose, - fnkeyinit ? "on" : "off", - camera ? "on" : "off", - compat ? "on" : "off", + str_on_off(fnkeyinit), + str_on_off(camera), + str_on_off(compat), mask, - useinput ? "on" : "off", - SONYPI_ACPI_ACTIVE ? "on" : "off"); + str_on_off(useinput), + str_on_off(SONYPI_ACPI_ACTIVE)); printk(KERN_INFO "sonypi: enabled at irq=%d, port1=0x%x, port2=0x%x\n", sonypi_device.irq, sonypi_device.ioport1, sonypi_device.ioport2); @@ -1408,7 +1408,7 @@ static int sonypi_probe(struct platform_device *dev) return error; } -static int sonypi_remove(struct platform_device *dev) +static void sonypi_remove(struct platform_device *dev) { sonypi_disable(); @@ -1432,8 +1432,6 @@ static int sonypi_remove(struct platform_device *dev) } kfifo_free(&sonypi_device.fifo); - - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c index 896a3550fba9..b381ea7e85d2 100644 --- a/drivers/char/tlclk.c +++ b/drivers/char/tlclk.c @@ -42,11 +42,12 @@ #include <linux/sysfs.h> #include <linux/device.h> #include <linux/miscdevice.h> -#include <linux/platform_device.h> +#include <linux/device/faux.h> #include <asm/io.h> /* inb/outb */ #include <linux/uaccess.h> MODULE_AUTHOR("Sebastien Bouchard <sebastien.bouchard@ca.kontron.com>"); +MODULE_DESCRIPTION("Telecom Clock driver for Intel NetStructure(tm) MPCBL0010"); MODULE_LICENSE("GPL"); /*Hardware Reset of the PLL */ @@ -741,7 +742,7 @@ static ssize_t store_reset (struct device *d, static DEVICE_ATTR(reset, (S_IWUSR|S_IWGRP), NULL, store_reset); -static struct attribute *tlclk_sysfs_entries[] = { +static struct attribute *tlclk_attrs[] = { &dev_attr_current_ref.attr, &dev_attr_telclock_version.attr, &dev_attr_alarms.attr, @@ -765,13 +766,9 @@ static struct attribute *tlclk_sysfs_entries[] = { &dev_attr_reset.attr, NULL }; +ATTRIBUTE_GROUPS(tlclk); -static const struct attribute_group tlclk_attribute_group = { - .name = NULL, /* put in device directory */ - .attrs = tlclk_sysfs_entries, -}; - -static struct platform_device *tlclk_device; +static struct faux_device *tlclk_device; static int __init tlclk_init(void) { @@ -816,24 +813,13 @@ static int __init tlclk_init(void) goto out3; } - tlclk_device = platform_device_register_simple("telco_clock", - -1, NULL, 0); - if (IS_ERR(tlclk_device)) { - printk(KERN_ERR "tlclk: platform_device_register failed.\n"); - ret = PTR_ERR(tlclk_device); + tlclk_device = faux_device_create_with_groups("telco_clock", NULL, NULL, tlclk_groups); + if (!tlclk_device) { + ret = -ENODEV; goto out4; } - ret = sysfs_create_group(&tlclk_device->dev.kobj, - &tlclk_attribute_group); - if (ret) { - printk(KERN_ERR "tlclk: failed to create sysfs device attributes.\n"); - goto out5; - } - return 0; -out5: - platform_device_unregister(tlclk_device); out4: misc_deregister(&tlclk_miscdev); out3: @@ -847,13 +833,12 @@ out1: static void __exit tlclk_cleanup(void) { - sysfs_remove_group(&tlclk_device->dev.kobj, &tlclk_attribute_group); - platform_device_unregister(tlclk_device); + faux_device_destroy(tlclk_device); misc_deregister(&tlclk_miscdev); unregister_chrdev(tlclk_major, "telco_clock"); release_region(TLCLK_BASE, 8); - del_timer_sync(&switchover_timer); + timer_delete_sync(&switchover_timer); kfree(alarm_events); } @@ -871,7 +856,7 @@ static void switchover_timeout(struct timer_list *unused) } /* Alarm processing is done, wake up read task */ - del_timer(&switchover_timer); + timer_delete(&switchover_timer); got_event = 1; wake_up(&wq); } diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index 927088b2c3d3..dddd702b2454 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -27,6 +27,20 @@ menuconfig TCG_TPM if TCG_TPM +config TCG_TPM2_HMAC + bool "Use HMAC and encrypted transactions on the TPM bus" + default X86_64 + select CRYPTO_ECDH + select CRYPTO_LIB_AESCFB + select CRYPTO_LIB_SHA256 + help + Setting this causes us to deploy a scheme which uses request + and response HMACs in addition to encryption for + communicating with the TPM to prevent or detect bus snooping + and interposer attacks (see tpm-security.rst). Saying Y + here adds some encryption overhead to all kernel to TPM + transactions. + config HW_RANDOM_TPM bool "TPM HW Random Number Generator support" depends on TCG_TPM && HW_RANDOM && !(TCG_TPM=y && HW_RANDOM=m) @@ -148,7 +162,8 @@ config TCG_NSC config TCG_ATMEL tristate "Atmel TPM Interface" - depends on PPC64 || HAS_IOPORT_MAP + depends on HAS_IOPORT_MAP + depends on HAS_IOPORT help If you have a TPM security chip from Atmel say Yes and it will be accessible from within Linux. To compile this driver @@ -156,7 +171,7 @@ config TCG_ATMEL config TCG_INFINEON tristate "Infineon Technologies TPM Interface" - depends on PNP + depends on PNP || COMPILE_TEST help If you have a TPM security chip from Infineon Technologies (either SLD 9630 TT 1.1 or SLB 9635 TT 1.2) say Yes and it @@ -195,6 +210,15 @@ config TCG_CRB from within Linux. To compile this driver as a module, choose M here; the module will be called tpm_crb. +config TCG_ARM_CRB_FFA + tristate "TPM CRB over Arm FF-A Transport" + depends on ARM_FFA_TRANSPORT && TCG_CRB + default TCG_CRB + help + If the Arm FF-A transport is used to access the TPM say Yes. + To compile this driver as a module, choose M here; the module + will be called tpm_crb_ffa. + config TCG_VTPM_PROXY tristate "VTPM Proxy Interface" depends on TCG_TPM @@ -210,5 +234,15 @@ config TCG_FTPM_TEE help This driver proxies for firmware TPM running in TEE. +config TCG_SVSM + tristate "SNP SVSM vTPM interface" + depends on AMD_MEM_ENCRYPT + help + This is a driver for the AMD SVSM vTPM protocol that a SEV-SNP guest + OS can use to discover and talk to a vTPM emulated by the Secure VM + Service Module (SVSM) in the guest context, but at a more privileged + level (usually VMPL0). To compile this driver as a module, choose M + here; the module will be called tpm_svsm. + source "drivers/char/tpm/st33zp24/Kconfig" endif # TCG_TPM diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile index 0222b1ddb310..9de1b3ea34a9 100644 --- a/drivers/char/tpm/Makefile +++ b/drivers/char/tpm/Makefile @@ -15,6 +15,8 @@ tpm-y += tpm-sysfs.o tpm-y += eventlog/common.o tpm-y += eventlog/tpm1.o tpm-y += eventlog/tpm2.o +tpm-y += tpm-buf.o +tpm-y += tpm2-sessions.o tpm-$(CONFIG_ACPI) += tpm_ppi.o eventlog/acpi.o tpm-$(CONFIG_EFI) += eventlog/efi.o @@ -40,5 +42,7 @@ obj-$(CONFIG_TCG_IBMVTPM) += tpm_ibmvtpm.o obj-$(CONFIG_TCG_TIS_ST33ZP24) += st33zp24/ obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o obj-$(CONFIG_TCG_CRB) += tpm_crb.o +obj-$(CONFIG_TCG_ARM_CRB_FFA) += tpm_crb_ffa.o obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o +obj-$(CONFIG_TCG_SVSM) += tpm_svsm.o diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c index bd757d836c5c..cf02ec646f46 100644 --- a/drivers/char/tpm/eventlog/acpi.c +++ b/drivers/char/tpm/eventlog/acpi.c @@ -63,6 +63,11 @@ static bool tpm_is_tpm2_log(void *bios_event_log, u64 len) return n == 0; } +static void tpm_bios_log_free(void *data) +{ + kvfree(data); +} + /* read binary bios log */ int tpm_read_log_acpi(struct tpm_chip *chip) { @@ -136,13 +141,12 @@ int tpm_read_log_acpi(struct tpm_chip *chip) } /* malloc EventLog space */ - log->bios_event_log = devm_kmalloc(&chip->dev, len, GFP_KERNEL); + log->bios_event_log = kvmalloc(len, GFP_KERNEL); if (!log->bios_event_log) return -ENOMEM; log->bios_event_log_end = log->bios_event_log + len; - ret = -EIO; virt = acpi_os_map_iomem(start, len); if (!virt) { dev_warn(&chip->dev, "%s: Failed to map ACPI memory\n", __func__); @@ -162,10 +166,16 @@ int tpm_read_log_acpi(struct tpm_chip *chip) goto err; } + ret = devm_add_action(&chip->dev, tpm_bios_log_free, log->bios_event_log); + if (ret) { + log->bios_event_log = NULL; + goto err; + } + return format; err: - devm_kfree(&chip->dev, log->bios_event_log); + tpm_bios_log_free(log->bios_event_log); log->bios_event_log = NULL; return ret; } diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c index 639c3f395a5a..4c0bbba64ee5 100644 --- a/drivers/char/tpm/eventlog/common.c +++ b/drivers/char/tpm/eventlog/common.c @@ -47,6 +47,8 @@ static int tpm_bios_measurements_open(struct inode *inode, if (!err) { seq = file->private_data; seq->private = chip; + } else { + put_device(&chip->dev); } return err; diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c index 12ee42a31c71..e7913b2853d5 100644 --- a/drivers/char/tpm/eventlog/tpm1.c +++ b/drivers/char/tpm/eventlog/tpm1.c @@ -257,11 +257,8 @@ static int tpm1_ascii_bios_measurements_show(struct seq_file *m, void *v) (unsigned char *)(v + sizeof(struct tcpa_event)); eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL); - if (!eventname) { - printk(KERN_ERR "%s: ERROR - No Memory for event name\n ", - __func__); - return -EFAULT; - } + if (!eventname) + return -ENOMEM; /* 1st: PCR */ seq_printf(m, "%2d ", do_endian_conversion(event->pcr_index)); diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c index 45ca33b3dcb2..81348487c125 100644 --- a/drivers/char/tpm/st33zp24/i2c.c +++ b/drivers/char/tpm/st33zp24/i2c.c @@ -133,7 +133,7 @@ static void st33zp24_i2c_remove(struct i2c_client *client) } static const struct i2c_device_id st33zp24_i2c_id[] = { - {TPM_ST33_I2C, 0}, + { TPM_ST33_I2C }, {} }; MODULE_DEVICE_TABLE(i2c, st33zp24_i2c_id); diff --git a/drivers/char/tpm/tpm-buf.c b/drivers/char/tpm/tpm-buf.c new file mode 100644 index 000000000000..dc882fc9fa9e --- /dev/null +++ b/drivers/char/tpm/tpm-buf.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Handling of TPM command and other buffers. + */ + +#include <linux/tpm_command.h> +#include <linux/module.h> +#include <linux/tpm.h> + +/** + * tpm_buf_init() - Allocate and initialize a TPM command + * @buf: A &tpm_buf + * @tag: TPM_TAG_RQU_COMMAND, TPM2_ST_NO_SESSIONS or TPM2_ST_SESSIONS + * @ordinal: A command ordinal + * + * Return: 0 or -ENOMEM + */ +int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal) +{ + buf->data = (u8 *)__get_free_page(GFP_KERNEL); + if (!buf->data) + return -ENOMEM; + + tpm_buf_reset(buf, tag, ordinal); + return 0; +} +EXPORT_SYMBOL_GPL(tpm_buf_init); + +/** + * tpm_buf_reset() - Initialize a TPM command + * @buf: A &tpm_buf + * @tag: TPM_TAG_RQU_COMMAND, TPM2_ST_NO_SESSIONS or TPM2_ST_SESSIONS + * @ordinal: A command ordinal + */ +void tpm_buf_reset(struct tpm_buf *buf, u16 tag, u32 ordinal) +{ + struct tpm_header *head = (struct tpm_header *)buf->data; + + WARN_ON(tag != TPM_TAG_RQU_COMMAND && tag != TPM2_ST_NO_SESSIONS && + tag != TPM2_ST_SESSIONS && tag != 0); + + buf->flags = 0; + buf->length = sizeof(*head); + head->tag = cpu_to_be16(tag); + head->length = cpu_to_be32(sizeof(*head)); + head->ordinal = cpu_to_be32(ordinal); + buf->handles = 0; +} +EXPORT_SYMBOL_GPL(tpm_buf_reset); + +/** + * tpm_buf_init_sized() - Allocate and initialize a sized (TPM2B) buffer + * @buf: A @tpm_buf + * + * Return: 0 or -ENOMEM + */ +int tpm_buf_init_sized(struct tpm_buf *buf) +{ + buf->data = (u8 *)__get_free_page(GFP_KERNEL); + if (!buf->data) + return -ENOMEM; + + tpm_buf_reset_sized(buf); + return 0; +} +EXPORT_SYMBOL_GPL(tpm_buf_init_sized); + +/** + * tpm_buf_reset_sized() - Initialize a sized buffer + * @buf: A &tpm_buf + */ +void tpm_buf_reset_sized(struct tpm_buf *buf) +{ + buf->flags = TPM_BUF_TPM2B; + buf->length = 2; + buf->data[0] = 0; + buf->data[1] = 0; +} +EXPORT_SYMBOL_GPL(tpm_buf_reset_sized); + +void tpm_buf_destroy(struct tpm_buf *buf) +{ + free_page((unsigned long)buf->data); +} +EXPORT_SYMBOL_GPL(tpm_buf_destroy); + +/** + * tpm_buf_length() - Return the number of bytes consumed by the data + * @buf: A &tpm_buf + * + * Return: The number of bytes consumed by the buffer + */ +u32 tpm_buf_length(struct tpm_buf *buf) +{ + return buf->length; +} +EXPORT_SYMBOL_GPL(tpm_buf_length); + +/** + * tpm_buf_append() - Append data to an initialized buffer + * @buf: A &tpm_buf + * @new_data: A data blob + * @new_length: Size of the appended data + */ +void tpm_buf_append(struct tpm_buf *buf, const u8 *new_data, u16 new_length) +{ + /* Return silently if overflow has already happened. */ + if (buf->flags & TPM_BUF_OVERFLOW) + return; + + if ((buf->length + new_length) > PAGE_SIZE) { + WARN(1, "tpm_buf: write overflow\n"); + buf->flags |= TPM_BUF_OVERFLOW; + return; + } + + memcpy(&buf->data[buf->length], new_data, new_length); + buf->length += new_length; + + if (buf->flags & TPM_BUF_TPM2B) + ((__be16 *)buf->data)[0] = cpu_to_be16(buf->length - 2); + else + ((struct tpm_header *)buf->data)->length = cpu_to_be32(buf->length); +} +EXPORT_SYMBOL_GPL(tpm_buf_append); + +void tpm_buf_append_u8(struct tpm_buf *buf, const u8 value) +{ + tpm_buf_append(buf, &value, 1); +} +EXPORT_SYMBOL_GPL(tpm_buf_append_u8); + +void tpm_buf_append_u16(struct tpm_buf *buf, const u16 value) +{ + __be16 value2 = cpu_to_be16(value); + + tpm_buf_append(buf, (u8 *)&value2, 2); +} +EXPORT_SYMBOL_GPL(tpm_buf_append_u16); + +void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value) +{ + __be32 value2 = cpu_to_be32(value); + + tpm_buf_append(buf, (u8 *)&value2, 4); +} +EXPORT_SYMBOL_GPL(tpm_buf_append_u32); + +/** + * tpm_buf_append_handle() - Add a handle + * @chip: &tpm_chip instance + * @buf: &tpm_buf instance + * @handle: a TPM object handle + * + * Add a handle to the buffer, and increase the count tracking the number of + * handles in the command buffer. Works only for command buffers. + */ +void tpm_buf_append_handle(struct tpm_chip *chip, struct tpm_buf *buf, u32 handle) +{ + if (buf->flags & TPM_BUF_TPM2B) { + dev_err(&chip->dev, "Invalid buffer type (TPM2B)\n"); + return; + } + + tpm_buf_append_u32(buf, handle); + buf->handles++; +} + +/** + * tpm_buf_read() - Read from a TPM buffer + * @buf: &tpm_buf instance + * @offset: offset within the buffer + * @count: the number of bytes to read + * @output: the output buffer + */ +static void tpm_buf_read(struct tpm_buf *buf, off_t *offset, size_t count, void *output) +{ + off_t next_offset; + + /* Return silently if overflow has already happened. */ + if (buf->flags & TPM_BUF_BOUNDARY_ERROR) + return; + + next_offset = *offset + count; + if (next_offset > buf->length) { + WARN(1, "tpm_buf: read out of boundary\n"); + buf->flags |= TPM_BUF_BOUNDARY_ERROR; + return; + } + + memcpy(output, &buf->data[*offset], count); + *offset = next_offset; +} + +/** + * tpm_buf_read_u8() - Read 8-bit word from a TPM buffer + * @buf: &tpm_buf instance + * @offset: offset within the buffer + * + * Return: next 8-bit word + */ +u8 tpm_buf_read_u8(struct tpm_buf *buf, off_t *offset) +{ + u8 value = 0; + + tpm_buf_read(buf, offset, sizeof(value), &value); + + return value; +} +EXPORT_SYMBOL_GPL(tpm_buf_read_u8); + +/** + * tpm_buf_read_u16() - Read 16-bit word from a TPM buffer + * @buf: &tpm_buf instance + * @offset: offset within the buffer + * + * Return: next 16-bit word + */ +u16 tpm_buf_read_u16(struct tpm_buf *buf, off_t *offset) +{ + u16 value = 0; + + tpm_buf_read(buf, offset, sizeof(value), &value); + + return be16_to_cpu(value); +} +EXPORT_SYMBOL_GPL(tpm_buf_read_u16); + +/** + * tpm_buf_read_u32() - Read 32-bit word from a TPM buffer + * @buf: &tpm_buf instance + * @offset: offset within the buffer + * + * Return: next 32-bit word + */ +u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset) +{ + u32 value = 0; + + tpm_buf_read(buf, offset, sizeof(value), &value); + + return be32_to_cpu(value); +} +EXPORT_SYMBOL_GPL(tpm_buf_read_u32); + + diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 42b1062e33cd..e25daf2396d3 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -158,6 +158,9 @@ int tpm_try_get_ops(struct tpm_chip *chip) { int rc = -EIO; + if (chip->flags & TPM_CHIP_FLAG_DISABLE) + return rc; + get_device(&chip->dev); down_read(&chip->ops_sem); @@ -165,6 +168,11 @@ int tpm_try_get_ops(struct tpm_chip *chip) goto out_ops; mutex_lock(&chip->tpm_mutex); + + /* tmp_chip_start may issue IO that is denied while suspended */ + if (chip->flags & TPM_CHIP_FLAG_SUSPENDED) + goto out_lock; + rc = tpm_chip_start(chip); if (rc) goto out_lock; @@ -275,6 +283,9 @@ static void tpm_dev_release(struct device *dev) kfree(chip->work_space.context_buf); kfree(chip->work_space.session_buf); kfree(chip->allocated_banks); +#ifdef CONFIG_TCG_TPM2_HMAC + kfree(chip->auth); +#endif kfree(chip); } @@ -294,6 +305,7 @@ int tpm_class_shutdown(struct device *dev) down_write(&chip->ops_sem); if (chip->flags & TPM_CHIP_FLAG_TPM2) { if (!tpm_chip_start(chip)) { + tpm2_end_auth_session(chip); tpm2_shutdown(chip, TPM2_SU_CLEAR); tpm_chip_stop(chip); } @@ -519,10 +531,6 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng); - /* Give back zero bytes, as TPM chip has not yet fully resumed: */ - if (chip->flags & TPM_CHIP_FLAG_SUSPENDED) - return 0; - return tpm_get_random(chip, data, max); } @@ -668,6 +676,16 @@ EXPORT_SYMBOL_GPL(tpm_chip_register); */ void tpm_chip_unregister(struct tpm_chip *chip) { +#ifdef CONFIG_TCG_TPM2_HMAC + int rc; + + rc = tpm_try_get_ops(chip); + if (!rc) { + tpm2_end_auth_session(chip); + tpm_put_ops(chip); + } +#endif + tpm_del_legacy_sysfs(chip); if (tpm_is_hwrng_enabled(chip)) hwrng_unregister(&chip->hwrng); diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index 30b4c288c1bb..11deaf538e87 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -27,6 +27,9 @@ static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space, struct tpm_header *header = (void *)buf; ssize_t ret, len; + if (chip->flags & TPM_CHIP_FLAG_TPM2) + tpm2_end_auth_session(chip); + ret = tpm2_prepare_space(chip, space, buf, bufsiz); /* If the command is not implemented by the TPM, synthesize a * response with a TPM2_RC_COMMAND_CODE return for user-space. @@ -47,6 +50,8 @@ static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space, if (!ret) ret = tpm2_commit_space(chip, space, buf, &len); + else + tpm2_flush_space(chip); out_rc: return ret ? ret : len; @@ -155,7 +160,7 @@ ssize_t tpm_common_read(struct file *file, char __user *buf, out: if (!priv->response_length) { *off = 0; - del_timer_sync(&priv->user_read_timer); + timer_delete_sync(&priv->user_read_timer); flush_work(&priv->timeout_work); } mutex_unlock(&priv->buffer_mutex); @@ -262,7 +267,7 @@ __poll_t tpm_common_poll(struct file *file, poll_table *wait) void tpm_common_release(struct file *file, struct file_priv *priv) { flush_work(&priv->async_work); - del_timer_sync(&priv->user_read_timer); + timer_delete_sync(&priv->user_read_timer); flush_work(&priv->timeout_work); file->private_data = NULL; priv->response_length = 0; diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c index e2c0baa69fef..97c94b5e9340 100644 --- a/drivers/char/tpm/tpm-dev.c +++ b/drivers/char/tpm/tpm-dev.c @@ -59,7 +59,6 @@ static int tpm_release(struct inode *inode, struct file *file) const struct file_operations tpm_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .open = tpm_open, .read = tpm_common_read, .write = tpm_common_write, diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 757336324c90..8d7e4da6ed53 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -58,6 +58,30 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal) } EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration); +static void tpm_chip_cancel(struct tpm_chip *chip) +{ + if (!chip->ops->cancel) + return; + + chip->ops->cancel(chip); +} + +static u8 tpm_chip_status(struct tpm_chip *chip) +{ + if (!chip->ops->status) + return 0; + + return chip->ops->status(chip); +} + +static bool tpm_chip_req_canceled(struct tpm_chip *chip, u8 status) +{ + if (!chip->ops->req_canceled) + return false; + + return chip->ops->req_canceled(chip, status); +} + static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz) { struct tpm_header *header = buf; @@ -104,12 +128,12 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz) stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal); do { - u8 status = chip->ops->status(chip); + u8 status = tpm_chip_status(chip); if ((status & chip->ops->req_complete_mask) == chip->ops->req_complete_val) goto out_recv; - if (chip->ops->req_canceled(chip, status)) { + if (tpm_chip_req_canceled(chip, status)) { dev_err(&chip->dev, "Operation Canceled\n"); return -ECANCELED; } @@ -118,7 +142,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz) rmb(); } while (time_before(jiffies, stop)); - chip->ops->cancel(chip); + tpm_chip_cancel(chip); dev_err(&chip->dev, "Operation Timed out\n"); return -ETIME; @@ -232,6 +256,7 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_buf *buf, if (len < min_rsp_body_length + TPM_HEADER_SIZE) return -EFAULT; + buf->length = len; return 0; } EXPORT_SYMBOL_GPL(tpm_transmit_cmd); @@ -342,31 +367,6 @@ out: } EXPORT_SYMBOL_GPL(tpm_pcr_extend); -/** - * tpm_send - send a TPM command - * @chip: a &struct tpm_chip instance, %NULL for the default chip - * @cmd: a TPM command buffer - * @buflen: the length of the TPM command buffer - * - * Return: same as with tpm_transmit_cmd() - */ -int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen) -{ - struct tpm_buf buf; - int rc; - - chip = tpm_find_get_ops(chip); - if (!chip) - return -ENODEV; - - buf.data = cmd; - rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to a send a command"); - - tpm_put_ops(chip); - return rc; -} -EXPORT_SYMBOL_GPL(tpm_send); - int tpm_auto_startup(struct tpm_chip *chip) { int rc; @@ -394,6 +394,13 @@ int tpm_pm_suspend(struct device *dev) if (!chip) return -ENODEV; + rc = tpm_try_get_ops(chip); + if (rc) { + /* Can be safely set out of locks, as no action cannot race: */ + chip->flags |= TPM_CHIP_FLAG_SUSPENDED; + goto out; + } + if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED) goto suspended; @@ -401,19 +408,19 @@ int tpm_pm_suspend(struct device *dev) !pm_suspend_via_firmware()) goto suspended; - rc = tpm_try_get_ops(chip); - if (!rc) { - if (chip->flags & TPM_CHIP_FLAG_TPM2) - tpm2_shutdown(chip, TPM2_SU_STATE); - else - rc = tpm1_pm_suspend(chip, tpm_suspend_pcr); - - tpm_put_ops(chip); + if (chip->flags & TPM_CHIP_FLAG_TPM2) { + tpm2_end_auth_session(chip); + tpm2_shutdown(chip, TPM2_SU_STATE); + goto suspended; } + rc = tpm1_pm_suspend(chip, tpm_suspend_pcr); + suspended: chip->flags |= TPM_CHIP_FLAG_SUSPENDED; + tpm_put_ops(chip); +out: if (rc) dev_err(dev, "Ignoring error %d while suspending\n", rc); return 0; diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c index 54c71473aa29..94231f052ea7 100644 --- a/drivers/char/tpm/tpm-sysfs.c +++ b/drivers/char/tpm/tpm-sysfs.c @@ -309,6 +309,21 @@ static ssize_t tpm_version_major_show(struct device *dev, } static DEVICE_ATTR_RO(tpm_version_major); +#ifdef CONFIG_TCG_TPM2_HMAC +static ssize_t null_name_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tpm_chip *chip = to_tpm_chip(dev); + int size = TPM2_NAME_SIZE; + + bin2hex(buf, chip->null_key_name, size); + size *= 2; + buf[size++] = '\n'; + return size; +} +static DEVICE_ATTR_RO(null_name); +#endif + static struct attribute *tpm1_dev_attrs[] = { &dev_attr_pubek.attr, &dev_attr_pcrs.attr, @@ -326,6 +341,9 @@ static struct attribute *tpm1_dev_attrs[] = { static struct attribute *tpm2_dev_attrs[] = { &dev_attr_tpm_version_major.attr, +#ifdef CONFIG_TCG_TPM2_HMAC + &dev_attr_null_name.attr, +#endif NULL }; diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 61445f1dc46d..7bb87fa5f7a1 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -28,7 +28,7 @@ #include <linux/tpm_eventlog.h> #ifdef CONFIG_X86 -#include <asm/intel-family.h> +#include <asm/cpu_device_id.h> #endif #define TPM_MINOR 224 /* officially assigned */ @@ -312,9 +312,23 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf, size_t *bufsiz); int tpm_devs_add(struct tpm_chip *chip); void tpm_devs_remove(struct tpm_chip *chip); +int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf, + unsigned int buf_size, unsigned int *offset); +int tpm2_load_context(struct tpm_chip *chip, u8 *buf, + unsigned int *offset, u32 *handle); void tpm_bios_log_setup(struct tpm_chip *chip); void tpm_bios_log_teardown(struct tpm_chip *chip); int tpm_dev_common_init(void); void tpm_dev_common_exit(void); + +#ifdef CONFIG_TCG_TPM2_HMAC +int tpm2_sessions_init(struct tpm_chip *chip); +#else +static inline int tpm2_sessions_init(struct tpm_chip *chip) +{ + return 0; +} +#endif + #endif diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index 93545be190a5..524d802ede26 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -14,6 +14,10 @@ #include "tpm.h" #include <crypto/hash_info.h> +static bool disable_pcr_integrity; +module_param(disable_pcr_integrity, bool, 0444); +MODULE_PARM_DESC(disable_pcr_integrity, "Disable integrity protection of TPM2_PCR_Extend"); + static struct tpm2_hash tpm2_hash_map[] = { {HASH_ALGO_SHA1, TPM_ALG_SHA1}, {HASH_ALGO_SHA256, TPM_ALG_SHA256}, @@ -216,13 +220,6 @@ out: return rc; } -struct tpm2_null_auth_area { - __be32 handle; - __be16 nonce_size; - u8 attributes; - __be16 auth_size; -} __packed; - /** * tpm2_pcr_extend() - extend a PCR value * @@ -236,24 +233,30 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, struct tpm_digest *digests) { struct tpm_buf buf; - struct tpm2_null_auth_area auth_area; int rc; int i; + if (!disable_pcr_integrity) { + rc = tpm2_start_auth_session(chip); + if (rc) + return rc; + } + rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_PCR_EXTEND); - if (rc) + if (rc) { + if (!disable_pcr_integrity) + tpm2_end_auth_session(chip); return rc; + } - tpm_buf_append_u32(&buf, pcr_idx); - - auth_area.handle = cpu_to_be32(TPM2_RS_PW); - auth_area.nonce_size = 0; - auth_area.attributes = 0; - auth_area.auth_size = 0; + if (!disable_pcr_integrity) { + tpm_buf_append_name(chip, &buf, pcr_idx, NULL); + tpm_buf_append_hmac_session(chip, &buf, 0, NULL, 0); + } else { + tpm_buf_append_handle(chip, &buf, pcr_idx); + tpm_buf_append_auth(chip, &buf, 0, NULL, 0); + } - tpm_buf_append_u32(&buf, sizeof(struct tpm2_null_auth_area)); - tpm_buf_append(&buf, (const unsigned char *)&auth_area, - sizeof(auth_area)); tpm_buf_append_u32(&buf, chip->nr_allocated_banks); for (i = 0; i < chip->nr_allocated_banks; i++) { @@ -262,7 +265,11 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, chip->allocated_banks[i].digest_size); } + if (!disable_pcr_integrity) + tpm_buf_fill_hmac_session(chip, &buf); rc = tpm_transmit_cmd(chip, &buf, 0, "attempting extend a PCR value"); + if (!disable_pcr_integrity) + rc = tpm_buf_check_hmac_response(chip, &buf, rc); tpm_buf_destroy(&buf); @@ -288,6 +295,7 @@ struct tpm2_get_random_out { int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max) { struct tpm2_get_random_out *out; + struct tpm_header *head; struct tpm_buf buf; u32 recd; u32 num_bytes = max; @@ -295,29 +303,46 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max) int total = 0; int retries = 5; u8 *dest_ptr = dest; + off_t offset; if (!num_bytes || max > TPM_MAX_RNG_DATA) return -EINVAL; - err = tpm_buf_init(&buf, 0, 0); + err = tpm2_start_auth_session(chip); if (err) return err; + err = tpm_buf_init(&buf, 0, 0); + if (err) { + tpm2_end_auth_session(chip); + return err; + } + do { - tpm_buf_reset(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_RANDOM); + tpm_buf_reset(&buf, TPM2_ST_SESSIONS, TPM2_CC_GET_RANDOM); + tpm_buf_append_hmac_session_opt(chip, &buf, TPM2_SA_ENCRYPT + | TPM2_SA_CONTINUE_SESSION, + NULL, 0); tpm_buf_append_u16(&buf, num_bytes); + tpm_buf_fill_hmac_session(chip, &buf); err = tpm_transmit_cmd(chip, &buf, offsetof(struct tpm2_get_random_out, buffer), "attempting get random"); + err = tpm_buf_check_hmac_response(chip, &buf, err); if (err) { if (err > 0) err = -EIO; goto out; } - out = (struct tpm2_get_random_out *) - &buf.data[TPM_HEADER_SIZE]; + head = (struct tpm_header *)buf.data; + offset = TPM_HEADER_SIZE; + /* Skip the parameter size field: */ + if (be16_to_cpu(head->tag) == TPM2_ST_SESSIONS) + offset += 4; + + out = (struct tpm2_get_random_out *)&buf.data[offset]; recd = min_t(u32, be16_to_cpu(out->size), num_bytes); if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + @@ -334,9 +359,11 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max) } while (retries-- && total < max); tpm_buf_destroy(&buf); + return total ? total : -EIO; out: tpm_buf_destroy(&buf); + tpm2_end_auth_session(chip); return err; } @@ -759,6 +786,11 @@ int tpm2_auto_startup(struct tpm_chip *chip) rc = 0; } + if (rc) + goto out; + + rc = tpm2_sessions_init(chip); + out: /* * Infineon TPM in field upgrade mode will return no data for the number diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c new file mode 100644 index 000000000000..7b5049b3d476 --- /dev/null +++ b/drivers/char/tpm/tpm2-sessions.c @@ -0,0 +1,1385 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2018 James.Bottomley@HansenPartnership.com + * + * Cryptographic helper routines for handling TPM2 sessions for + * authorization HMAC and request response encryption. + * + * The idea is to ensure that every TPM command is HMAC protected by a + * session, meaning in-flight tampering would be detected and in + * addition all sensitive inputs and responses should be encrypted. + * + * The basic way this works is to use a TPM feature called salted + * sessions where a random secret used in session construction is + * encrypted to the public part of a known TPM key. The problem is we + * have no known keys, so initially a primary Elliptic Curve key is + * derived from the NULL seed (we use EC because most TPMs generate + * these keys much faster than RSA ones). The curve used is NIST_P256 + * because that's now mandated to be present in 'TCG TPM v2.0 + * Provisioning Guidance' + * + * Threat problems: the initial TPM2_CreatePrimary is not (and cannot + * be) session protected, so a clever Man in the Middle could return a + * public key they control to this command and from there intercept + * and decode all subsequent session based transactions. The kernel + * cannot mitigate this threat but, after boot, userspace can get + * proof this has not happened by asking the TPM to certify the NULL + * key. This certification would chain back to the TPM Endorsement + * Certificate and prove the NULL seed primary had not been tampered + * with and thus all sessions must have been cryptographically secure. + * To assist with this, the initial NULL seed public key name is made + * available in a sysfs file. + * + * Use of these functions: + * + * The design is all the crypto, hash and hmac gunk is confined in this + * file and never needs to be seen even by the kernel internal user. To + * the user there's an init function tpm2_sessions_init() that needs to + * be called once per TPM which generates the NULL seed primary key. + * + * These are the usage functions: + * + * tpm2_end_auth_session() kills the session and frees the resources. + * Under normal operation this function is done by + * tpm_buf_check_hmac_response(), so this is only to be used on + * error legs where the latter is not executed. + * tpm_buf_append_name() to add a handle to the buffer. This must be + * used in place of the usual tpm_buf_append_u32() for adding + * handles because handles have to be processed specially when + * calculating the HMAC. In particular, for NV, volatile and + * permanent objects you now need to provide the name. + * tpm_buf_append_hmac_session() which appends the hmac session to the + * buf in the same way tpm_buf_append_auth does(). + * tpm_buf_fill_hmac_session() This calculates the correct hash and + * places it in the buffer. It must be called after the complete + * command buffer is finalized so it can fill in the correct HMAC + * based on the parameters. + * tpm_buf_check_hmac_response() which checks the session response in + * the buffer and calculates what it should be. If there's a + * mismatch it will log a warning and return an error. If + * tpm_buf_append_hmac_session() did not specify + * TPM_SA_CONTINUE_SESSION then the session will be closed (if it + * hasn't been consumed) and the auth structure freed. + */ + +#include "tpm.h" +#include <linux/random.h> +#include <linux/scatterlist.h> +#include <linux/unaligned.h> +#include <crypto/kpp.h> +#include <crypto/ecdh.h> +#include <crypto/hash.h> +#include <crypto/hmac.h> + +/* maximum number of names the TPM must remember for authorization */ +#define AUTH_MAX_NAMES 3 + +#define AES_KEY_BYTES AES_KEYSIZE_128 +#define AES_KEY_BITS (AES_KEY_BYTES*8) + +/* + * This is the structure that carries all the auth information (like + * session handle, nonces, session key and auth) from use to use it is + * designed to be opaque to anything outside. + */ +struct tpm2_auth { + u32 handle; + /* + * This has two meanings: before tpm_buf_fill_hmac_session() + * it marks the offset in the buffer of the start of the + * sessions (i.e. after all the handles). Once the buffer has + * been filled it markes the session number of our auth + * session so we can find it again in the response buffer. + * + * The two cases are distinguished because the first offset + * must always be greater than TPM_HEADER_SIZE and the second + * must be less than or equal to 5. + */ + u32 session; + /* + * the size here is variable and set by the size of our_nonce + * which must be between 16 and the name hash length. we set + * the maximum sha256 size for the greatest protection + */ + u8 our_nonce[SHA256_DIGEST_SIZE]; + u8 tpm_nonce[SHA256_DIGEST_SIZE]; + /* + * the salt is only used across the session command/response + * after that it can be used as a scratch area + */ + union { + u8 salt[EC_PT_SZ]; + /* scratch for key + IV */ + u8 scratch[AES_KEY_BYTES + AES_BLOCK_SIZE]; + }; + /* + * the session key and passphrase are the same size as the + * name digest (sha256 again). The session key is constant + * for the use of the session and the passphrase can change + * with every invocation. + * + * Note: these fields must be adjacent and in this order + * because several HMAC/KDF schemes use the combination of the + * session_key and passphrase. + */ + u8 session_key[SHA256_DIGEST_SIZE]; + u8 passphrase[SHA256_DIGEST_SIZE]; + int passphrase_len; + struct crypto_aes_ctx aes_ctx; + /* saved session attributes: */ + u8 attrs; + __be32 ordinal; + + /* + * memory for three authorization handles. We know them by + * handle, but they are part of the session by name, which + * we must compute and remember + */ + u32 name_h[AUTH_MAX_NAMES]; + u8 name[AUTH_MAX_NAMES][2 + SHA512_DIGEST_SIZE]; +}; + +#ifdef CONFIG_TCG_TPM2_HMAC +/* + * Name Size based on TPM algorithm (assumes no hash bigger than 255) + */ +static u8 name_size(const u8 *name) +{ + static u8 size_map[] = { + [TPM_ALG_SHA1] = SHA1_DIGEST_SIZE, + [TPM_ALG_SHA256] = SHA256_DIGEST_SIZE, + [TPM_ALG_SHA384] = SHA384_DIGEST_SIZE, + [TPM_ALG_SHA512] = SHA512_DIGEST_SIZE, + }; + u16 alg = get_unaligned_be16(name); + return size_map[alg] + 2; +} + +static int tpm2_parse_read_public(char *name, struct tpm_buf *buf) +{ + struct tpm_header *head = (struct tpm_header *)buf->data; + off_t offset = TPM_HEADER_SIZE; + u32 tot_len = be32_to_cpu(head->length); + u32 val; + + /* we're starting after the header so adjust the length */ + tot_len -= TPM_HEADER_SIZE; + + /* skip public */ + val = tpm_buf_read_u16(buf, &offset); + if (val > tot_len) + return -EINVAL; + offset += val; + /* name */ + val = tpm_buf_read_u16(buf, &offset); + if (val != name_size(&buf->data[offset])) + return -EINVAL; + memcpy(name, &buf->data[offset], val); + /* forget the rest */ + return 0; +} + +static int tpm2_read_public(struct tpm_chip *chip, u32 handle, char *name) +{ + struct tpm_buf buf; + int rc; + + rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_READ_PUBLIC); + if (rc) + return rc; + + tpm_buf_append_u32(&buf, handle); + rc = tpm_transmit_cmd(chip, &buf, 0, "read public"); + if (rc == TPM2_RC_SUCCESS) + rc = tpm2_parse_read_public(name, &buf); + + tpm_buf_destroy(&buf); + + return rc; +} +#endif /* CONFIG_TCG_TPM2_HMAC */ + +/** + * tpm_buf_append_name() - add a handle area to the buffer + * @chip: the TPM chip structure + * @buf: The buffer to be appended + * @handle: The handle to be appended + * @name: The name of the handle (may be NULL) + * + * In order to compute session HMACs, we need to know the names of the + * objects pointed to by the handles. For most objects, this is simply + * the actual 4 byte handle or an empty buf (in these cases @name + * should be NULL) but for volatile objects, permanent objects and NV + * areas, the name is defined as the hash (according to the name + * algorithm which should be set to sha256) of the public area to + * which the two byte algorithm id has been appended. For these + * objects, the @name pointer should point to this. If a name is + * required but @name is NULL, then TPM2_ReadPublic() will be called + * on the handle to obtain the name. + * + * As with most tpm_buf operations, success is assumed because failure + * will be caused by an incorrect programming model and indicated by a + * kernel message. + */ +void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf, + u32 handle, u8 *name) +{ +#ifdef CONFIG_TCG_TPM2_HMAC + enum tpm2_mso_type mso = tpm2_handle_mso(handle); + struct tpm2_auth *auth; + int slot; +#endif + + if (!tpm2_chip_auth(chip)) { + tpm_buf_append_handle(chip, buf, handle); + return; + } + +#ifdef CONFIG_TCG_TPM2_HMAC + slot = (tpm_buf_length(buf) - TPM_HEADER_SIZE) / 4; + if (slot >= AUTH_MAX_NAMES) { + dev_err(&chip->dev, "TPM: too many handles\n"); + return; + } + auth = chip->auth; + WARN(auth->session != tpm_buf_length(buf), + "name added in wrong place\n"); + tpm_buf_append_u32(buf, handle); + auth->session += 4; + + if (mso == TPM2_MSO_PERSISTENT || + mso == TPM2_MSO_VOLATILE || + mso == TPM2_MSO_NVRAM) { + if (!name) + tpm2_read_public(chip, handle, auth->name[slot]); + } else { + if (name) + dev_err(&chip->dev, "TPM: Handle does not require name but one is specified\n"); + } + + auth->name_h[slot] = handle; + if (name) + memcpy(auth->name[slot], name, name_size(name)); +#endif +} +EXPORT_SYMBOL_GPL(tpm_buf_append_name); + +void tpm_buf_append_auth(struct tpm_chip *chip, struct tpm_buf *buf, + u8 attributes, u8 *passphrase, int passphrase_len) +{ + /* offset tells us where the sessions area begins */ + int offset = buf->handles * 4 + TPM_HEADER_SIZE; + u32 len = 9 + passphrase_len; + + if (tpm_buf_length(buf) != offset) { + /* not the first session so update the existing length */ + len += get_unaligned_be32(&buf->data[offset]); + put_unaligned_be32(len, &buf->data[offset]); + } else { + tpm_buf_append_u32(buf, len); + } + /* auth handle */ + tpm_buf_append_u32(buf, TPM2_RS_PW); + /* nonce */ + tpm_buf_append_u16(buf, 0); + /* attributes */ + tpm_buf_append_u8(buf, 0); + /* passphrase */ + tpm_buf_append_u16(buf, passphrase_len); + tpm_buf_append(buf, passphrase, passphrase_len); +} + +/** + * tpm_buf_append_hmac_session() - Append a TPM session element + * @chip: the TPM chip structure + * @buf: The buffer to be appended + * @attributes: The session attributes + * @passphrase: The session authority (NULL if none) + * @passphrase_len: The length of the session authority (0 if none) + * + * This fills in a session structure in the TPM command buffer, except + * for the HMAC which cannot be computed until the command buffer is + * complete. The type of session is controlled by the @attributes, + * the main ones of which are TPM2_SA_CONTINUE_SESSION which means the + * session won't terminate after tpm_buf_check_hmac_response(), + * TPM2_SA_DECRYPT which means this buffers first parameter should be + * encrypted with a session key and TPM2_SA_ENCRYPT, which means the + * response buffer's first parameter needs to be decrypted (confusing, + * but the defines are written from the point of view of the TPM). + * + * Any session appended by this command must be finalized by calling + * tpm_buf_fill_hmac_session() otherwise the HMAC will be incorrect + * and the TPM will reject the command. + * + * As with most tpm_buf operations, success is assumed because failure + * will be caused by an incorrect programming model and indicated by a + * kernel message. + */ +void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf, + u8 attributes, u8 *passphrase, + int passphrase_len) +{ +#ifdef CONFIG_TCG_TPM2_HMAC + u8 nonce[SHA256_DIGEST_SIZE]; + struct tpm2_auth *auth; + u32 len; +#endif + + if (!tpm2_chip_auth(chip)) { + tpm_buf_append_auth(chip, buf, attributes, passphrase, + passphrase_len); + return; + } + +#ifdef CONFIG_TCG_TPM2_HMAC + /* The first write to /dev/tpm{rm0} will flush the session. */ + attributes |= TPM2_SA_CONTINUE_SESSION; + + /* + * The Architecture Guide requires us to strip trailing zeros + * before computing the HMAC + */ + while (passphrase && passphrase_len > 0 && passphrase[passphrase_len - 1] == '\0') + passphrase_len--; + + auth = chip->auth; + auth->attrs = attributes; + auth->passphrase_len = passphrase_len; + if (passphrase_len) + memcpy(auth->passphrase, passphrase, passphrase_len); + + if (auth->session != tpm_buf_length(buf)) { + /* we're not the first session */ + len = get_unaligned_be32(&buf->data[auth->session]); + if (4 + len + auth->session != tpm_buf_length(buf)) { + WARN(1, "session length mismatch, cannot append"); + return; + } + + /* add our new session */ + len += 9 + 2 * SHA256_DIGEST_SIZE; + put_unaligned_be32(len, &buf->data[auth->session]); + } else { + tpm_buf_append_u32(buf, 9 + 2 * SHA256_DIGEST_SIZE); + } + + /* random number for our nonce */ + get_random_bytes(nonce, sizeof(nonce)); + memcpy(auth->our_nonce, nonce, sizeof(nonce)); + tpm_buf_append_u32(buf, auth->handle); + /* our new nonce */ + tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE); + tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE); + tpm_buf_append_u8(buf, auth->attrs); + /* and put a placeholder for the hmac */ + tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE); + tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE); +#endif +} +EXPORT_SYMBOL_GPL(tpm_buf_append_hmac_session); + +#ifdef CONFIG_TCG_TPM2_HMAC + +static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy, + u32 *handle, u8 *name); + +/* + * It turns out the crypto hmac(sha256) is hard for us to consume + * because it assumes a fixed key and the TPM seems to change the key + * on every operation, so we weld the hmac init and final functions in + * here to give it the same usage characteristics as a regular hash + */ +static void tpm2_hmac_init(struct sha256_state *sctx, u8 *key, u32 key_len) +{ + u8 pad[SHA256_BLOCK_SIZE]; + int i; + + sha256_init(sctx); + for (i = 0; i < sizeof(pad); i++) { + if (i < key_len) + pad[i] = key[i]; + else + pad[i] = 0; + pad[i] ^= HMAC_IPAD_VALUE; + } + sha256_update(sctx, pad, sizeof(pad)); +} + +static void tpm2_hmac_final(struct sha256_state *sctx, u8 *key, u32 key_len, + u8 *out) +{ + u8 pad[SHA256_BLOCK_SIZE]; + int i; + + for (i = 0; i < sizeof(pad); i++) { + if (i < key_len) + pad[i] = key[i]; + else + pad[i] = 0; + pad[i] ^= HMAC_OPAD_VALUE; + } + + /* collect the final hash; use out as temporary storage */ + sha256_final(sctx, out); + + sha256_init(sctx); + sha256_update(sctx, pad, sizeof(pad)); + sha256_update(sctx, out, SHA256_DIGEST_SIZE); + sha256_final(sctx, out); +} + +/* + * assume hash sha256 and nonces u, v of size SHA256_DIGEST_SIZE but + * otherwise standard tpm2_KDFa. Note output is in bytes not bits. + */ +static void tpm2_KDFa(u8 *key, u32 key_len, const char *label, u8 *u, + u8 *v, u32 bytes, u8 *out) +{ + u32 counter = 1; + const __be32 bits = cpu_to_be32(bytes * 8); + + while (bytes > 0) { + struct sha256_state sctx; + __be32 c = cpu_to_be32(counter); + + tpm2_hmac_init(&sctx, key, key_len); + sha256_update(&sctx, (u8 *)&c, sizeof(c)); + sha256_update(&sctx, label, strlen(label)+1); + sha256_update(&sctx, u, SHA256_DIGEST_SIZE); + sha256_update(&sctx, v, SHA256_DIGEST_SIZE); + sha256_update(&sctx, (u8 *)&bits, sizeof(bits)); + tpm2_hmac_final(&sctx, key, key_len, out); + + bytes -= SHA256_DIGEST_SIZE; + counter++; + out += SHA256_DIGEST_SIZE; + } +} + +/* + * Somewhat of a bastardization of the real KDFe. We're assuming + * we're working with known point sizes for the input parameters and + * the hash algorithm is fixed at sha256. Because we know that the + * point size is 32 bytes like the hash size, there's no need to loop + * in this KDF. + */ +static void tpm2_KDFe(u8 z[EC_PT_SZ], const char *str, u8 *pt_u, u8 *pt_v, + u8 *out) +{ + struct sha256_state sctx; + /* + * this should be an iterative counter, but because we know + * we're only taking 32 bytes for the point using a sha256 + * hash which is also 32 bytes, there's only one loop + */ + __be32 c = cpu_to_be32(1); + + sha256_init(&sctx); + /* counter (BE) */ + sha256_update(&sctx, (u8 *)&c, sizeof(c)); + /* secret value */ + sha256_update(&sctx, z, EC_PT_SZ); + /* string including trailing zero */ + sha256_update(&sctx, str, strlen(str)+1); + sha256_update(&sctx, pt_u, EC_PT_SZ); + sha256_update(&sctx, pt_v, EC_PT_SZ); + sha256_final(&sctx, out); +} + +static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip, + struct tpm2_auth *auth) +{ + struct crypto_kpp *kpp; + struct kpp_request *req; + struct scatterlist s[2], d[1]; + struct ecdh p = {0}; + u8 encoded_key[EC_PT_SZ], *x, *y; + unsigned int buf_len; + + /* secret is two sized points */ + tpm_buf_append_u16(buf, (EC_PT_SZ + 2)*2); + /* + * we cheat here and append uninitialized data to form + * the points. All we care about is getting the two + * co-ordinate pointers, which will be used to overwrite + * the uninitialized data + */ + tpm_buf_append_u16(buf, EC_PT_SZ); + x = &buf->data[tpm_buf_length(buf)]; + tpm_buf_append(buf, encoded_key, EC_PT_SZ); + tpm_buf_append_u16(buf, EC_PT_SZ); + y = &buf->data[tpm_buf_length(buf)]; + tpm_buf_append(buf, encoded_key, EC_PT_SZ); + sg_init_table(s, 2); + sg_set_buf(&s[0], x, EC_PT_SZ); + sg_set_buf(&s[1], y, EC_PT_SZ); + + kpp = crypto_alloc_kpp("ecdh-nist-p256", CRYPTO_ALG_INTERNAL, 0); + if (IS_ERR(kpp)) { + dev_err(&chip->dev, "crypto ecdh allocation failed\n"); + return; + } + + buf_len = crypto_ecdh_key_len(&p); + if (sizeof(encoded_key) < buf_len) { + dev_err(&chip->dev, "salt buffer too small needs %d\n", + buf_len); + goto out; + } + crypto_ecdh_encode_key(encoded_key, buf_len, &p); + /* this generates a random private key */ + crypto_kpp_set_secret(kpp, encoded_key, buf_len); + + /* salt is now the public point of this private key */ + req = kpp_request_alloc(kpp, GFP_KERNEL); + if (!req) + goto out; + kpp_request_set_input(req, NULL, 0); + kpp_request_set_output(req, s, EC_PT_SZ*2); + crypto_kpp_generate_public_key(req); + /* + * we're not done: now we have to compute the shared secret + * which is our private key multiplied by the tpm_key public + * point, we actually only take the x point and discard the y + * point and feed it through KDFe to get the final secret salt + */ + sg_set_buf(&s[0], chip->null_ec_key_x, EC_PT_SZ); + sg_set_buf(&s[1], chip->null_ec_key_y, EC_PT_SZ); + kpp_request_set_input(req, s, EC_PT_SZ*2); + sg_init_one(d, auth->salt, EC_PT_SZ); + kpp_request_set_output(req, d, EC_PT_SZ); + crypto_kpp_compute_shared_secret(req); + kpp_request_free(req); + + /* + * pass the shared secret through KDFe for salt. Note salt + * area is used both for input shared secret and output salt. + * This works because KDFe fully consumes the secret before it + * writes the salt + */ + tpm2_KDFe(auth->salt, "SECRET", x, chip->null_ec_key_x, auth->salt); + + out: + crypto_free_kpp(kpp); +} + +/** + * tpm_buf_fill_hmac_session() - finalize the session HMAC + * @chip: the TPM chip structure + * @buf: The buffer to be appended + * + * This command must not be called until all of the parameters have + * been appended to @buf otherwise the computed HMAC will be + * incorrect. + * + * This function computes and fills in the session HMAC using the + * session key and, if TPM2_SA_DECRYPT was specified, computes the + * encryption key and encrypts the first parameter of the command + * buffer with it. + * + * As with most tpm_buf operations, success is assumed because failure + * will be caused by an incorrect programming model and indicated by a + * kernel message. + */ +void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf) +{ + u32 cc, handles, val; + struct tpm2_auth *auth = chip->auth; + int i; + struct tpm_header *head = (struct tpm_header *)buf->data; + off_t offset_s = TPM_HEADER_SIZE, offset_p; + u8 *hmac = NULL; + u32 attrs; + u8 cphash[SHA256_DIGEST_SIZE]; + struct sha256_state sctx; + + if (!auth) + return; + + /* save the command code in BE format */ + auth->ordinal = head->ordinal; + + cc = be32_to_cpu(head->ordinal); + + i = tpm2_find_cc(chip, cc); + if (i < 0) { + dev_err(&chip->dev, "Command 0x%x not found in TPM\n", cc); + return; + } + attrs = chip->cc_attrs_tbl[i]; + + handles = (attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0); + + /* + * just check the names, it's easy to make mistakes. This + * would happen if someone added a handle via + * tpm_buf_append_u32() instead of tpm_buf_append_name() + */ + for (i = 0; i < handles; i++) { + u32 handle = tpm_buf_read_u32(buf, &offset_s); + + if (auth->name_h[i] != handle) { + dev_err(&chip->dev, "TPM: handle %d wrong for name\n", + i); + return; + } + } + /* point offset_s to the start of the sessions */ + val = tpm_buf_read_u32(buf, &offset_s); + /* point offset_p to the start of the parameters */ + offset_p = offset_s + val; + for (i = 1; offset_s < offset_p; i++) { + u32 handle = tpm_buf_read_u32(buf, &offset_s); + u16 len; + u8 a; + + /* nonce (already in auth) */ + len = tpm_buf_read_u16(buf, &offset_s); + offset_s += len; + + a = tpm_buf_read_u8(buf, &offset_s); + + len = tpm_buf_read_u16(buf, &offset_s); + if (handle == auth->handle && auth->attrs == a) { + hmac = &buf->data[offset_s]; + /* + * save our session number so we know which + * session in the response belongs to us + */ + auth->session = i; + } + + offset_s += len; + } + if (offset_s != offset_p) { + dev_err(&chip->dev, "TPM session length is incorrect\n"); + return; + } + if (!hmac) { + dev_err(&chip->dev, "TPM could not find HMAC session\n"); + return; + } + + /* encrypt before HMAC */ + if (auth->attrs & TPM2_SA_DECRYPT) { + u16 len; + + /* need key and IV */ + tpm2_KDFa(auth->session_key, SHA256_DIGEST_SIZE + + auth->passphrase_len, "CFB", auth->our_nonce, + auth->tpm_nonce, AES_KEY_BYTES + AES_BLOCK_SIZE, + auth->scratch); + + len = tpm_buf_read_u16(buf, &offset_p); + aes_expandkey(&auth->aes_ctx, auth->scratch, AES_KEY_BYTES); + aescfb_encrypt(&auth->aes_ctx, &buf->data[offset_p], + &buf->data[offset_p], len, + auth->scratch + AES_KEY_BYTES); + /* reset p to beginning of parameters for HMAC */ + offset_p -= 2; + } + + sha256_init(&sctx); + /* ordinal is already BE */ + sha256_update(&sctx, (u8 *)&head->ordinal, sizeof(head->ordinal)); + /* add the handle names */ + for (i = 0; i < handles; i++) { + enum tpm2_mso_type mso = tpm2_handle_mso(auth->name_h[i]); + + if (mso == TPM2_MSO_PERSISTENT || + mso == TPM2_MSO_VOLATILE || + mso == TPM2_MSO_NVRAM) { + sha256_update(&sctx, auth->name[i], + name_size(auth->name[i])); + } else { + __be32 h = cpu_to_be32(auth->name_h[i]); + + sha256_update(&sctx, (u8 *)&h, 4); + } + } + if (offset_s != tpm_buf_length(buf)) + sha256_update(&sctx, &buf->data[offset_s], + tpm_buf_length(buf) - offset_s); + sha256_final(&sctx, cphash); + + /* now calculate the hmac */ + tpm2_hmac_init(&sctx, auth->session_key, sizeof(auth->session_key) + + auth->passphrase_len); + sha256_update(&sctx, cphash, sizeof(cphash)); + sha256_update(&sctx, auth->our_nonce, sizeof(auth->our_nonce)); + sha256_update(&sctx, auth->tpm_nonce, sizeof(auth->tpm_nonce)); + sha256_update(&sctx, &auth->attrs, 1); + tpm2_hmac_final(&sctx, auth->session_key, sizeof(auth->session_key) + + auth->passphrase_len, hmac); +} +EXPORT_SYMBOL(tpm_buf_fill_hmac_session); + +/** + * tpm_buf_check_hmac_response() - check the TPM return HMAC for correctness + * @chip: the TPM chip structure + * @buf: the original command buffer (which now contains the response) + * @rc: the return code from tpm_transmit_cmd + * + * If @rc is non zero, @buf may not contain an actual return, so @rc + * is passed through as the return and the session cleaned up and + * de-allocated if required (this is required if + * TPM2_SA_CONTINUE_SESSION was not specified as a session flag). + * + * If @rc is zero, the response HMAC is computed against the returned + * @buf and matched to the TPM one in the session area. If there is a + * mismatch, an error is logged and -EINVAL returned. + * + * The reason for this is that the command issue and HMAC check + * sequence should look like: + * + * rc = tpm_transmit_cmd(...); + * rc = tpm_buf_check_hmac_response(&buf, auth, rc); + * if (rc) + * ... + * + * Which is easily layered into the current contrl flow. + * + * Returns: 0 on success or an error. + */ +int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf, + int rc) +{ + struct tpm_header *head = (struct tpm_header *)buf->data; + struct tpm2_auth *auth = chip->auth; + off_t offset_s, offset_p; + u8 rphash[SHA256_DIGEST_SIZE]; + u32 attrs, cc; + struct sha256_state sctx; + u16 tag = be16_to_cpu(head->tag); + int parm_len, len, i, handles; + + if (!auth) + return rc; + + cc = be32_to_cpu(auth->ordinal); + + if (auth->session >= TPM_HEADER_SIZE) { + WARN(1, "tpm session not filled correctly\n"); + goto out; + } + + if (rc != 0) + /* pass non success rc through and close the session */ + goto out; + + rc = -EINVAL; + if (tag != TPM2_ST_SESSIONS) { + dev_err(&chip->dev, "TPM: HMAC response check has no sessions tag\n"); + goto out; + } + + i = tpm2_find_cc(chip, cc); + if (i < 0) + goto out; + attrs = chip->cc_attrs_tbl[i]; + handles = (attrs >> TPM2_CC_ATTR_RHANDLE) & 1; + + /* point to area beyond handles */ + offset_s = TPM_HEADER_SIZE + handles * 4; + parm_len = tpm_buf_read_u32(buf, &offset_s); + offset_p = offset_s; + offset_s += parm_len; + /* skip over any sessions before ours */ + for (i = 0; i < auth->session - 1; i++) { + len = tpm_buf_read_u16(buf, &offset_s); + offset_s += len + 1; + len = tpm_buf_read_u16(buf, &offset_s); + offset_s += len; + } + /* TPM nonce */ + len = tpm_buf_read_u16(buf, &offset_s); + if (offset_s + len > tpm_buf_length(buf)) + goto out; + if (len != SHA256_DIGEST_SIZE) + goto out; + memcpy(auth->tpm_nonce, &buf->data[offset_s], len); + offset_s += len; + attrs = tpm_buf_read_u8(buf, &offset_s); + len = tpm_buf_read_u16(buf, &offset_s); + if (offset_s + len != tpm_buf_length(buf)) + goto out; + if (len != SHA256_DIGEST_SIZE) + goto out; + /* + * offset_s points to the HMAC. now calculate comparison, beginning + * with rphash + */ + sha256_init(&sctx); + /* yes, I know this is now zero, but it's what the standard says */ + sha256_update(&sctx, (u8 *)&head->return_code, + sizeof(head->return_code)); + /* ordinal is already BE */ + sha256_update(&sctx, (u8 *)&auth->ordinal, sizeof(auth->ordinal)); + sha256_update(&sctx, &buf->data[offset_p], parm_len); + sha256_final(&sctx, rphash); + + /* now calculate the hmac */ + tpm2_hmac_init(&sctx, auth->session_key, sizeof(auth->session_key) + + auth->passphrase_len); + sha256_update(&sctx, rphash, sizeof(rphash)); + sha256_update(&sctx, auth->tpm_nonce, sizeof(auth->tpm_nonce)); + sha256_update(&sctx, auth->our_nonce, sizeof(auth->our_nonce)); + sha256_update(&sctx, &auth->attrs, 1); + /* we're done with the rphash, so put our idea of the hmac there */ + tpm2_hmac_final(&sctx, auth->session_key, sizeof(auth->session_key) + + auth->passphrase_len, rphash); + if (memcmp(rphash, &buf->data[offset_s], SHA256_DIGEST_SIZE) == 0) { + rc = 0; + } else { + dev_err(&chip->dev, "TPM: HMAC check failed\n"); + goto out; + } + + /* now do response decryption */ + if (auth->attrs & TPM2_SA_ENCRYPT) { + /* need key and IV */ + tpm2_KDFa(auth->session_key, SHA256_DIGEST_SIZE + + auth->passphrase_len, "CFB", auth->tpm_nonce, + auth->our_nonce, AES_KEY_BYTES + AES_BLOCK_SIZE, + auth->scratch); + + len = tpm_buf_read_u16(buf, &offset_p); + aes_expandkey(&auth->aes_ctx, auth->scratch, AES_KEY_BYTES); + aescfb_decrypt(&auth->aes_ctx, &buf->data[offset_p], + &buf->data[offset_p], len, + auth->scratch + AES_KEY_BYTES); + } + + out: + if ((auth->attrs & TPM2_SA_CONTINUE_SESSION) == 0) { + if (rc) + /* manually close the session if it wasn't consumed */ + tpm2_flush_context(chip, auth->handle); + + kfree_sensitive(auth); + chip->auth = NULL; + } else { + /* reset for next use */ + auth->session = TPM_HEADER_SIZE; + } + + return rc; +} +EXPORT_SYMBOL(tpm_buf_check_hmac_response); + +/** + * tpm2_end_auth_session() - kill the allocated auth session + * @chip: the TPM chip structure + * + * ends the session started by tpm2_start_auth_session and frees all + * the resources. Under normal conditions, + * tpm_buf_check_hmac_response() will correctly end the session if + * required, so this function is only for use in error legs that will + * bypass the normal invocation of tpm_buf_check_hmac_response(). + */ +void tpm2_end_auth_session(struct tpm_chip *chip) +{ + struct tpm2_auth *auth = chip->auth; + + if (!auth) + return; + + tpm2_flush_context(chip, auth->handle); + kfree_sensitive(auth); + chip->auth = NULL; +} +EXPORT_SYMBOL(tpm2_end_auth_session); + +static int tpm2_parse_start_auth_session(struct tpm2_auth *auth, + struct tpm_buf *buf) +{ + struct tpm_header *head = (struct tpm_header *)buf->data; + u32 tot_len = be32_to_cpu(head->length); + off_t offset = TPM_HEADER_SIZE; + u32 val; + + /* we're starting after the header so adjust the length */ + tot_len -= TPM_HEADER_SIZE; + + /* should have handle plus nonce */ + if (tot_len != 4 + 2 + sizeof(auth->tpm_nonce)) + return -EINVAL; + + auth->handle = tpm_buf_read_u32(buf, &offset); + val = tpm_buf_read_u16(buf, &offset); + if (val != sizeof(auth->tpm_nonce)) + return -EINVAL; + memcpy(auth->tpm_nonce, &buf->data[offset], sizeof(auth->tpm_nonce)); + /* now compute the session key from the nonces */ + tpm2_KDFa(auth->salt, sizeof(auth->salt), "ATH", auth->tpm_nonce, + auth->our_nonce, sizeof(auth->session_key), + auth->session_key); + + return 0; +} + +static int tpm2_load_null(struct tpm_chip *chip, u32 *null_key) +{ + unsigned int offset = 0; /* dummy offset for null seed context */ + u8 name[SHA256_DIGEST_SIZE + 2]; + u32 tmp_null_key; + int rc; + + rc = tpm2_load_context(chip, chip->null_key_context, &offset, + &tmp_null_key); + if (rc != -EINVAL) { + if (!rc) + *null_key = tmp_null_key; + goto err; + } + + /* Try to re-create null key, given the integrity failure: */ + rc = tpm2_create_primary(chip, TPM2_RH_NULL, &tmp_null_key, name); + if (rc) + goto err; + + /* Return null key if the name has not been changed: */ + if (!memcmp(name, chip->null_key_name, sizeof(name))) { + *null_key = tmp_null_key; + return 0; + } + + /* Deduce from the name change TPM interference: */ + dev_err(&chip->dev, "null key integrity check failed\n"); + tpm2_flush_context(chip, tmp_null_key); + +err: + if (rc) { + chip->flags |= TPM_CHIP_FLAG_DISABLE; + rc = -ENODEV; + } + return rc; +} + +/** + * tpm2_start_auth_session() - Create an a HMAC authentication session + * @chip: A TPM chip + * + * Loads the ephemeral key (null seed), and starts an HMAC authenticated + * session. The null seed is flushed before the return. + * + * Returns zero on success, or a POSIX error code. + */ +int tpm2_start_auth_session(struct tpm_chip *chip) +{ + struct tpm2_auth *auth; + struct tpm_buf buf; + u32 null_key; + int rc; + + if (chip->auth) { + dev_dbg_once(&chip->dev, "auth session is active\n"); + return 0; + } + + auth = kzalloc(sizeof(*auth), GFP_KERNEL); + if (!auth) + return -ENOMEM; + + rc = tpm2_load_null(chip, &null_key); + if (rc) + goto out; + + auth->session = TPM_HEADER_SIZE; + + rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_START_AUTH_SESS); + if (rc) + goto out; + + /* salt key handle */ + tpm_buf_append_u32(&buf, null_key); + /* bind key handle */ + tpm_buf_append_u32(&buf, TPM2_RH_NULL); + /* nonce caller */ + get_random_bytes(auth->our_nonce, sizeof(auth->our_nonce)); + tpm_buf_append_u16(&buf, sizeof(auth->our_nonce)); + tpm_buf_append(&buf, auth->our_nonce, sizeof(auth->our_nonce)); + + /* append encrypted salt and squirrel away unencrypted in auth */ + tpm_buf_append_salt(&buf, chip, auth); + /* session type (HMAC, audit or policy) */ + tpm_buf_append_u8(&buf, TPM2_SE_HMAC); + + /* symmetric encryption parameters */ + /* symmetric algorithm */ + tpm_buf_append_u16(&buf, TPM_ALG_AES); + /* bits for symmetric algorithm */ + tpm_buf_append_u16(&buf, AES_KEY_BITS); + /* symmetric algorithm mode (must be CFB) */ + tpm_buf_append_u16(&buf, TPM_ALG_CFB); + /* hash algorithm for session */ + tpm_buf_append_u16(&buf, TPM_ALG_SHA256); + + rc = tpm_ret_to_err(tpm_transmit_cmd(chip, &buf, 0, "StartAuthSession")); + tpm2_flush_context(chip, null_key); + + if (rc == TPM2_RC_SUCCESS) + rc = tpm2_parse_start_auth_session(auth, &buf); + + tpm_buf_destroy(&buf); + + if (rc == TPM2_RC_SUCCESS) { + chip->auth = auth; + return 0; + } + +out: + kfree_sensitive(auth); + return rc; +} +EXPORT_SYMBOL(tpm2_start_auth_session); + +/* + * A mask containing the object attributes for the kernel held null primary key + * used in HMAC encryption. For more information on specific attributes look up + * to "8.3 TPMA_OBJECT (Object Attributes)". + */ +#define TPM2_OA_NULL_KEY ( \ + TPM2_OA_NO_DA | \ + TPM2_OA_FIXED_TPM | \ + TPM2_OA_FIXED_PARENT | \ + TPM2_OA_SENSITIVE_DATA_ORIGIN | \ + TPM2_OA_USER_WITH_AUTH | \ + TPM2_OA_DECRYPT | \ + TPM2_OA_RESTRICTED) + +/** + * tpm2_parse_create_primary() - parse the data returned from TPM_CC_CREATE_PRIMARY + * + * @chip: The TPM the primary was created under + * @buf: The response buffer from the chip + * @handle: pointer to be filled in with the return handle of the primary + * @hierarchy: The hierarchy the primary was created for + * @name: pointer to be filled in with the primary key name + * + * Return: + * * 0 - OK + * * -errno - A system error + * * TPM_RC - A TPM error + */ +static int tpm2_parse_create_primary(struct tpm_chip *chip, struct tpm_buf *buf, + u32 *handle, u32 hierarchy, u8 *name) +{ + struct tpm_header *head = (struct tpm_header *)buf->data; + off_t offset_r = TPM_HEADER_SIZE, offset_t; + u16 len = TPM_HEADER_SIZE; + u32 total_len = be32_to_cpu(head->length); + u32 val, param_len, keyhandle; + + keyhandle = tpm_buf_read_u32(buf, &offset_r); + if (handle) + *handle = keyhandle; + else + tpm2_flush_context(chip, keyhandle); + + param_len = tpm_buf_read_u32(buf, &offset_r); + /* + * param_len doesn't include the header, but all the other + * lengths and offsets do, so add it to parm len to make + * the comparisons easier + */ + param_len += TPM_HEADER_SIZE; + + if (param_len + 8 > total_len) + return -EINVAL; + len = tpm_buf_read_u16(buf, &offset_r); + offset_t = offset_r; + if (name) { + /* + * now we have the public area, compute the name of + * the object + */ + put_unaligned_be16(TPM_ALG_SHA256, name); + sha256(&buf->data[offset_r], len, name + 2); + } + + /* validate the public key */ + val = tpm_buf_read_u16(buf, &offset_t); + + /* key type (must be what we asked for) */ + if (val != TPM_ALG_ECC) + return -EINVAL; + val = tpm_buf_read_u16(buf, &offset_t); + + /* name algorithm */ + if (val != TPM_ALG_SHA256) + return -EINVAL; + val = tpm_buf_read_u32(buf, &offset_t); + + /* object properties */ + if (val != TPM2_OA_NULL_KEY) + return -EINVAL; + + /* auth policy (empty) */ + val = tpm_buf_read_u16(buf, &offset_t); + if (val != 0) + return -EINVAL; + + /* symmetric key parameters */ + val = tpm_buf_read_u16(buf, &offset_t); + if (val != TPM_ALG_AES) + return -EINVAL; + + /* symmetric key length */ + val = tpm_buf_read_u16(buf, &offset_t); + if (val != AES_KEY_BITS) + return -EINVAL; + + /* symmetric encryption scheme */ + val = tpm_buf_read_u16(buf, &offset_t); + if (val != TPM_ALG_CFB) + return -EINVAL; + + /* signing scheme */ + val = tpm_buf_read_u16(buf, &offset_t); + if (val != TPM_ALG_NULL) + return -EINVAL; + + /* ECC Curve */ + val = tpm_buf_read_u16(buf, &offset_t); + if (val != TPM2_ECC_NIST_P256) + return -EINVAL; + + /* KDF Scheme */ + val = tpm_buf_read_u16(buf, &offset_t); + if (val != TPM_ALG_NULL) + return -EINVAL; + + /* extract public key (x and y points) */ + val = tpm_buf_read_u16(buf, &offset_t); + if (val != EC_PT_SZ) + return -EINVAL; + memcpy(chip->null_ec_key_x, &buf->data[offset_t], val); + offset_t += val; + val = tpm_buf_read_u16(buf, &offset_t); + if (val != EC_PT_SZ) + return -EINVAL; + memcpy(chip->null_ec_key_y, &buf->data[offset_t], val); + offset_t += val; + + /* original length of the whole TPM2B */ + offset_r += len; + + /* should have exactly consumed the TPM2B public structure */ + if (offset_t != offset_r) + return -EINVAL; + if (offset_r > param_len) + return -EINVAL; + + /* creation data (skip) */ + len = tpm_buf_read_u16(buf, &offset_r); + offset_r += len; + if (offset_r > param_len) + return -EINVAL; + + /* creation digest (must be sha256) */ + len = tpm_buf_read_u16(buf, &offset_r); + offset_r += len; + if (len != SHA256_DIGEST_SIZE || offset_r > param_len) + return -EINVAL; + + /* TPMT_TK_CREATION follows */ + /* tag, must be TPM_ST_CREATION (0x8021) */ + val = tpm_buf_read_u16(buf, &offset_r); + if (val != TPM2_ST_CREATION || offset_r > param_len) + return -EINVAL; + + /* hierarchy */ + val = tpm_buf_read_u32(buf, &offset_r); + if (val != hierarchy || offset_r > param_len) + return -EINVAL; + + /* the ticket digest HMAC (might not be sha256) */ + len = tpm_buf_read_u16(buf, &offset_r); + offset_r += len; + if (offset_r > param_len) + return -EINVAL; + + /* + * finally we have the name, which is a sha256 digest plus a 2 + * byte algorithm type + */ + len = tpm_buf_read_u16(buf, &offset_r); + if (offset_r + len != param_len + 8) + return -EINVAL; + if (len != SHA256_DIGEST_SIZE + 2) + return -EINVAL; + + if (memcmp(chip->null_key_name, &buf->data[offset_r], + SHA256_DIGEST_SIZE + 2) != 0) { + dev_err(&chip->dev, "NULL Seed name comparison failed\n"); + return -EINVAL; + } + + return 0; +} + +/** + * tpm2_create_primary() - create a primary key using a fixed P-256 template + * + * @chip: the TPM chip to create under + * @hierarchy: The hierarchy handle to create under + * @handle: The returned volatile handle on success + * @name: The name of the returned key + * + * For platforms that might not have a persistent primary, this can be + * used to create one quickly on the fly (it uses Elliptic Curve not + * RSA, so even slow TPMs can create one fast). The template uses the + * TCG mandated H one for non-endorsement ECC primaries, i.e. P-256 + * elliptic curve (the only current one all TPM2s are required to + * have) a sha256 name hash and no policy. + * + * Return: + * * 0 - OK + * * -errno - A system error + * * TPM_RC - A TPM error + */ +static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy, + u32 *handle, u8 *name) +{ + int rc; + struct tpm_buf buf; + struct tpm_buf template; + + rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_CREATE_PRIMARY); + if (rc) + return rc; + + rc = tpm_buf_init_sized(&template); + if (rc) { + tpm_buf_destroy(&buf); + return rc; + } + + /* + * create the template. Note: in order for userspace to + * verify the security of the system, it will have to create + * and certify this NULL primary, meaning all the template + * parameters will have to be identical, so conform exactly to + * the TCG TPM v2.0 Provisioning Guidance for the SRK ECC + * key H template (H has zero size unique points) + */ + + /* key type */ + tpm_buf_append_u16(&template, TPM_ALG_ECC); + + /* name algorithm */ + tpm_buf_append_u16(&template, TPM_ALG_SHA256); + + /* object properties */ + tpm_buf_append_u32(&template, TPM2_OA_NULL_KEY); + + /* sauth policy (empty) */ + tpm_buf_append_u16(&template, 0); + + /* BEGIN parameters: key specific; for ECC*/ + + /* symmetric algorithm */ + tpm_buf_append_u16(&template, TPM_ALG_AES); + + /* bits for symmetric algorithm */ + tpm_buf_append_u16(&template, AES_KEY_BITS); + + /* algorithm mode (must be CFB) */ + tpm_buf_append_u16(&template, TPM_ALG_CFB); + + /* scheme (NULL means any scheme) */ + tpm_buf_append_u16(&template, TPM_ALG_NULL); + + /* ECC Curve ID */ + tpm_buf_append_u16(&template, TPM2_ECC_NIST_P256); + + /* KDF Scheme */ + tpm_buf_append_u16(&template, TPM_ALG_NULL); + + /* unique: key specific; for ECC it is two zero size points */ + tpm_buf_append_u16(&template, 0); + tpm_buf_append_u16(&template, 0); + + /* END parameters */ + + /* primary handle */ + tpm_buf_append_u32(&buf, hierarchy); + tpm_buf_append_empty_auth(&buf, TPM2_RS_PW); + + /* sensitive create size is 4 for two empty buffers */ + tpm_buf_append_u16(&buf, 4); + + /* sensitive create auth data (empty) */ + tpm_buf_append_u16(&buf, 0); + + /* sensitive create sensitive data (empty) */ + tpm_buf_append_u16(&buf, 0); + + /* the public template */ + tpm_buf_append(&buf, template.data, template.length); + tpm_buf_destroy(&template); + + /* outside info (empty) */ + tpm_buf_append_u16(&buf, 0); + + /* creation PCR (none) */ + tpm_buf_append_u32(&buf, 0); + + rc = tpm_transmit_cmd(chip, &buf, 0, + "attempting to create NULL primary"); + + if (rc == TPM2_RC_SUCCESS) + rc = tpm2_parse_create_primary(chip, &buf, handle, hierarchy, + name); + + tpm_buf_destroy(&buf); + + return rc; +} + +static int tpm2_create_null_primary(struct tpm_chip *chip) +{ + u32 null_key; + int rc; + + rc = tpm2_create_primary(chip, TPM2_RH_NULL, &null_key, + chip->null_key_name); + + if (rc == TPM2_RC_SUCCESS) { + unsigned int offset = 0; /* dummy offset for null key context */ + + rc = tpm2_save_context(chip, null_key, chip->null_key_context, + sizeof(chip->null_key_context), &offset); + tpm2_flush_context(chip, null_key); + } + + return rc; +} + +/** + * tpm2_sessions_init() - start of day initialization for the sessions code + * @chip: TPM chip + * + * Derive and context save the null primary and allocate memory in the + * struct tpm_chip for the authorizations. + * + * Return: + * * 0 - OK + * * -errno - A system error + * * TPM_RC - A TPM error + */ +int tpm2_sessions_init(struct tpm_chip *chip) +{ + int rc; + + rc = tpm2_create_null_primary(chip); + if (rc) { + dev_err(&chip->dev, "null key creation failed with %d\n", rc); + return rc; + } + + return rc; +} +#endif /* CONFIG_TCG_TPM2_HMAC */ diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c index 363afdd4d1d3..60354cd53b5c 100644 --- a/drivers/char/tpm/tpm2-space.c +++ b/drivers/char/tpm/tpm2-space.c @@ -12,7 +12,7 @@ */ #include <linux/gfp.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "tpm.h" enum tpm2_handle_types { @@ -68,8 +68,8 @@ void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space) kfree(space->session_buf); } -static int tpm2_load_context(struct tpm_chip *chip, u8 *buf, - unsigned int *offset, u32 *handle) +int tpm2_load_context(struct tpm_chip *chip, u8 *buf, + unsigned int *offset, u32 *handle) { struct tpm_buf tbuf; struct tpm2_context *ctx; @@ -105,6 +105,9 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf, *handle = 0; tpm_buf_destroy(&tbuf); return -ENOENT; + } else if (tpm2_rc_value(rc) == TPM2_RC_INTEGRITY) { + tpm_buf_destroy(&tbuf); + return -EINVAL; } else if (rc > 0) { dev_warn(&chip->dev, "%s: failed with a TPM error 0x%04X\n", __func__, rc); @@ -119,8 +122,8 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf, return 0; } -static int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf, - unsigned int buf_size, unsigned int *offset) +int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf, + unsigned int buf_size, unsigned int *offset) { struct tpm_buf tbuf; unsigned int body_size; @@ -166,6 +169,9 @@ void tpm2_flush_space(struct tpm_chip *chip) struct tpm_space *space = &chip->work_space; int i; + if (!space) + return; + for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++) if (space->context_tbl[i] && ~space->context_tbl[i]) tpm2_flush_context(chip, space->context_tbl[i]); diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c index 9fb2defa9dc4..54a0360a3c95 100644 --- a/drivers/char/tpm/tpm_atmel.c +++ b/drivers/char/tpm/tpm_atmel.c @@ -15,7 +15,66 @@ */ #include "tpm.h" -#include "tpm_atmel.h" + +struct tpm_atmel_priv { + int region_size; + int have_region; + unsigned long base; + void __iomem *iobase; +}; + +#define atmel_getb(chip, offset) inb(atmel_get_priv(chip)->base + (offset)) +#define atmel_putb(val, chip, offset) \ + outb(val, atmel_get_priv(chip)->base + (offset)) +#define atmel_request_region request_region +#define atmel_release_region release_region +/* Atmel definitions */ +enum tpm_atmel_addr { + TPM_ATMEL_BASE_ADDR_LO = 0x08, + TPM_ATMEL_BASE_ADDR_HI = 0x09 +}; + +static inline int tpm_read_index(int base, int index) +{ + outb(index, base); + return inb(base + 1) & 0xFF; +} + +/* Verify this is a 1.1 Atmel TPM */ +static int atmel_verify_tpm11(void) +{ + /* verify that it is an Atmel part */ + if (tpm_read_index(TPM_ADDR, 4) != 'A' || + tpm_read_index(TPM_ADDR, 5) != 'T' || + tpm_read_index(TPM_ADDR, 6) != 'M' || + tpm_read_index(TPM_ADDR, 7) != 'L') + return 1; + + /* query chip for its version number */ + if (tpm_read_index(TPM_ADDR, 0x00) != 1 || + tpm_read_index(TPM_ADDR, 0x01) != 1) + return 1; + + /* This is an atmel supported part */ + return 0; +} + +/* Determine where to talk to device */ +static void __iomem *atmel_get_base_addr(unsigned long *base, int *region_size) +{ + int lo, hi; + + if (atmel_verify_tpm11() != 0) + return NULL; + + lo = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_LO); + hi = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_HI); + + *base = (hi << 8) | lo; + *region_size = 2; + + return ioport_map(*base, *region_size); +} /* write status bits */ enum tpm_atmel_write_status { @@ -142,7 +201,6 @@ static void atml_plat_remove(void) tpm_chip_unregister(chip); if (priv->have_region) atmel_release_region(priv->base, priv->region_size); - atmel_put_base_addr(priv->iobase); platform_device_unregister(pdev); } @@ -211,7 +269,6 @@ static int __init init_atmel(void) err_unreg_dev: platform_device_unregister(pdev); err_rel_reg: - atmel_put_base_addr(iobase); if (have_region) atmel_release_region(base, region_size); diff --git a/drivers/char/tpm/tpm_atmel.h b/drivers/char/tpm/tpm_atmel.h deleted file mode 100644 index 7ac3f69dcf0f..000000000000 --- a/drivers/char/tpm/tpm_atmel.h +++ /dev/null @@ -1,140 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2005 IBM Corporation - * - * Authors: - * Kylene Hall <kjhall@us.ibm.com> - * - * Maintained by: <tpmdd-devel@lists.sourceforge.net> - * - * Device driver for TCG/TCPA TPM (trusted platform module). - * Specifications at www.trustedcomputinggroup.org - * - * These difference are required on power because the device must be - * discovered through the device tree and iomap must be used to get - * around the need for holes in the io_page_mask. This does not happen - * automatically because the tpm is not a normal pci device and lives - * under the root node. - */ - -struct tpm_atmel_priv { - int region_size; - int have_region; - unsigned long base; - void __iomem *iobase; -}; - -#ifdef CONFIG_PPC64 - -#include <linux/of.h> - -#define atmel_getb(priv, offset) readb(priv->iobase + offset) -#define atmel_putb(val, priv, offset) writeb(val, priv->iobase + offset) -#define atmel_request_region request_mem_region -#define atmel_release_region release_mem_region - -static inline void atmel_put_base_addr(void __iomem *iobase) -{ - iounmap(iobase); -} - -static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size) -{ - struct device_node *dn; - unsigned long address, size; - const unsigned int *reg; - int reglen; - int naddrc; - int nsizec; - - dn = of_find_node_by_name(NULL, "tpm"); - - if (!dn) - return NULL; - - if (!of_device_is_compatible(dn, "AT97SC3201")) { - of_node_put(dn); - return NULL; - } - - reg = of_get_property(dn, "reg", ®len); - naddrc = of_n_addr_cells(dn); - nsizec = of_n_size_cells(dn); - - of_node_put(dn); - - - if (naddrc == 2) - address = ((unsigned long) reg[0] << 32) | reg[1]; - else - address = reg[0]; - - if (nsizec == 2) - size = - ((unsigned long) reg[naddrc] << 32) | reg[naddrc + 1]; - else - size = reg[naddrc]; - - *base = address; - *region_size = size; - return ioremap(*base, *region_size); -} -#else -#define atmel_getb(chip, offset) inb(atmel_get_priv(chip)->base + offset) -#define atmel_putb(val, chip, offset) \ - outb(val, atmel_get_priv(chip)->base + offset) -#define atmel_request_region request_region -#define atmel_release_region release_region -/* Atmel definitions */ -enum tpm_atmel_addr { - TPM_ATMEL_BASE_ADDR_LO = 0x08, - TPM_ATMEL_BASE_ADDR_HI = 0x09 -}; - -static inline int tpm_read_index(int base, int index) -{ - outb(index, base); - return inb(base+1) & 0xFF; -} - -/* Verify this is a 1.1 Atmel TPM */ -static int atmel_verify_tpm11(void) -{ - - /* verify that it is an Atmel part */ - if (tpm_read_index(TPM_ADDR, 4) != 'A' || - tpm_read_index(TPM_ADDR, 5) != 'T' || - tpm_read_index(TPM_ADDR, 6) != 'M' || - tpm_read_index(TPM_ADDR, 7) != 'L') - return 1; - - /* query chip for its version number */ - if (tpm_read_index(TPM_ADDR, 0x00) != 1 || - tpm_read_index(TPM_ADDR, 0x01) != 1) - return 1; - - /* This is an atmel supported part */ - return 0; -} - -static inline void atmel_put_base_addr(void __iomem *iobase) -{ -} - -/* Determine where to talk to device */ -static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size) -{ - int lo, hi; - - if (atmel_verify_tpm11() != 0) - return NULL; - - lo = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_LO); - hi = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_HI); - - *base = (hi << 8) | lo; - *region_size = 2; - - return ioport_map(*base, *region_size); -} -#endif diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index ea085b14ab7c..876edf2705ab 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -19,6 +19,7 @@ #ifdef CONFIG_ARM64 #include <linux/arm-smccc.h> #endif +#include "tpm_crb_ffa.h" #include "tpm.h" #define ACPI_SIG_TPM2 "TPM2" @@ -100,6 +101,8 @@ struct crb_priv { u32 smc_func_id; u32 __iomem *pluton_start_addr; u32 __iomem *pluton_reply_addr; + u8 ffa_flags; + u8 ffa_attributes; }; struct tpm2_crb_smc { @@ -110,11 +113,30 @@ struct tpm2_crb_smc { u32 smc_func_id; }; +/* CRB over FFA start method parameters in TCG2 ACPI table */ +struct tpm2_crb_ffa { + u8 flags; + u8 attributes; + u16 partition_id; + u8 reserved[8]; +}; + struct tpm2_crb_pluton { u64 start_addr; u64 reply_addr; }; +/* + * Returns true if the start method supports idle. + */ +static inline bool tpm_crb_has_idle(u32 start_method) +{ + return !(start_method == ACPI_TPM2_START_METHOD || + start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD || + start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC || + start_method == ACPI_TPM2_CRB_WITH_ARM_FFA); +} + static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value, unsigned long timeout) { @@ -173,9 +195,7 @@ static int __crb_go_idle(struct device *dev, struct crb_priv *priv) { int rc; - if ((priv->sm == ACPI_TPM2_START_METHOD) || - (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) || - (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC)) + if (!tpm_crb_has_idle(priv->sm)) return 0; iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req); @@ -222,9 +242,7 @@ static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv) { int rc; - if ((priv->sm == ACPI_TPM2_START_METHOD) || - (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) || - (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC)) + if (!tpm_crb_has_idle(priv->sm)) return 0; iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req); @@ -255,13 +273,20 @@ static int crb_cmd_ready(struct tpm_chip *chip) static int __crb_request_locality(struct device *dev, struct crb_priv *priv, int loc) { - u32 value = CRB_LOC_STATE_LOC_ASSIGNED | - CRB_LOC_STATE_TPM_REG_VALID_STS; + u32 value = CRB_LOC_STATE_LOC_ASSIGNED | CRB_LOC_STATE_TPM_REG_VALID_STS; + int rc; if (!priv->regs_h) return 0; iowrite32(CRB_LOC_CTRL_REQUEST_ACCESS, &priv->regs_h->loc_ctrl); + + if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) { + rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_LOCALITY_REQUEST, loc); + if (rc) + return rc; + } + if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, value, value, TPM2_TIMEOUT_C)) { dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n"); @@ -281,14 +306,21 @@ static int crb_request_locality(struct tpm_chip *chip, int loc) static int __crb_relinquish_locality(struct device *dev, struct crb_priv *priv, int loc) { - u32 mask = CRB_LOC_STATE_LOC_ASSIGNED | - CRB_LOC_STATE_TPM_REG_VALID_STS; + u32 mask = CRB_LOC_STATE_LOC_ASSIGNED | CRB_LOC_STATE_TPM_REG_VALID_STS; u32 value = CRB_LOC_STATE_TPM_REG_VALID_STS; + int rc; if (!priv->regs_h) return 0; iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl); + + if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) { + rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_LOCALITY_REQUEST, loc); + if (rc) + return rc; + } + if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value, TPM2_TIMEOUT_C)) { dev_warn(dev, "TPM_LOC_STATE_x.Relinquish timed out\n"); @@ -423,13 +455,13 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len) * report only ACPI start but in practice seems to require both * CRB start, hence invoking CRB start method if hid == MSFT0101. */ - if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) || - (priv->sm == ACPI_TPM2_MEMORY_MAPPED) || - (!strcmp(priv->hid, "MSFT0101"))) + if (priv->sm == ACPI_TPM2_COMMAND_BUFFER || + priv->sm == ACPI_TPM2_MEMORY_MAPPED || + !strcmp(priv->hid, "MSFT0101")) iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start); - if ((priv->sm == ACPI_TPM2_START_METHOD) || - (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)) + if (priv->sm == ACPI_TPM2_START_METHOD || + priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) rc = crb_do_acpi_start(chip); if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) { @@ -437,6 +469,11 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len) rc = tpm_crb_smc_start(&chip->dev, priv->smc_func_id); } + if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) { + iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start); + rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, chip->locality); + } + if (rc) return rc; @@ -446,13 +483,20 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len) static void crb_cancel(struct tpm_chip *chip) { struct crb_priv *priv = dev_get_drvdata(&chip->dev); + int rc; iowrite32(CRB_CANCEL_INVOKE, &priv->regs_t->ctrl_cancel); - if (((priv->sm == ACPI_TPM2_START_METHOD) || - (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)) && + if ((priv->sm == ACPI_TPM2_START_METHOD || + priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) && crb_do_acpi_start(chip)) dev_err(&chip->dev, "ACPI Start failed\n"); + + if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) { + rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, chip->locality); + if (rc) + dev_err(&chip->dev, "FF-A Start failed\n"); + } } static bool crb_req_canceled(struct tpm_chip *chip, u8 status) @@ -609,8 +653,9 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, * the control area, as one nice sane region except for some older * stuff that puts the control area outside the ACPI IO region. */ - if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) || - (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) { + if (priv->sm == ACPI_TPM2_COMMAND_BUFFER || + priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA || + priv->sm == ACPI_TPM2_MEMORY_MAPPED) { if (iores && buf->control_address == iores->start + sizeof(*priv->regs_h)) @@ -731,6 +776,7 @@ static int crb_acpi_add(struct acpi_device *device) struct tpm_chip *chip; struct device *dev = &device->dev; struct tpm2_crb_smc *crb_smc; + struct tpm2_crb_ffa *crb_ffa; struct tpm2_crb_pluton *crb_pluton; acpi_status status; u32 sm; @@ -769,6 +815,27 @@ static int crb_acpi_add(struct acpi_device *device) priv->smc_func_id = crb_smc->smc_func_id; } + if (sm == ACPI_TPM2_CRB_WITH_ARM_FFA) { + if (buf->header.length < (sizeof(*buf) + sizeof(*crb_ffa))) { + dev_err(dev, + FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n", + buf->header.length, + ACPI_TPM2_CRB_WITH_ARM_FFA); + rc = -EINVAL; + goto out; + } + crb_ffa = ACPI_ADD_PTR(struct tpm2_crb_ffa, buf, sizeof(*buf)); + priv->ffa_flags = crb_ffa->flags; + priv->ffa_attributes = crb_ffa->attributes; + rc = tpm_crb_ffa_init(); + if (rc) { + /* If FF-A driver is not available yet, request probe retry */ + if (rc == -ENOENT) + rc = -EPROBE_DEFER; + goto out; + } + } + if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) { if (buf->header.length < (sizeof(*buf) + sizeof(*crb_pluton))) { dev_err(dev, diff --git a/drivers/char/tpm/tpm_crb_ffa.c b/drivers/char/tpm/tpm_crb_ffa.c new file mode 100644 index 000000000000..4ead61f01299 --- /dev/null +++ b/drivers/char/tpm/tpm_crb_ffa.c @@ -0,0 +1,376 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024 Arm Ltd. + * + * This device driver implements the TPM CRB start method + * as defined in the TPM Service Command Response Buffer + * Interface Over FF-A (DEN0138). + */ + +#define pr_fmt(fmt) "CRB_FFA: " fmt + +#include <linux/arm_ffa.h> +#include "tpm_crb_ffa.h" + +/* TPM service function status codes */ +#define CRB_FFA_OK 0x05000001 +#define CRB_FFA_OK_RESULTS_RETURNED 0x05000002 +#define CRB_FFA_NOFUNC 0x8e000001 +#define CRB_FFA_NOTSUP 0x8e000002 +#define CRB_FFA_INVARG 0x8e000005 +#define CRB_FFA_INV_CRB_CTRL_DATA 0x8e000006 +#define CRB_FFA_ALREADY 0x8e000009 +#define CRB_FFA_DENIED 0x8e00000a +#define CRB_FFA_NOMEM 0x8e00000b + +#define CRB_FFA_VERSION_MAJOR 1 +#define CRB_FFA_VERSION_MINOR 0 + +/* version encoding */ +#define CRB_FFA_MAJOR_VERSION_MASK GENMASK(30, 16) +#define CRB_FFA_MINOR_VERSION_MASK GENMASK(15, 0) +#define CRB_FFA_MAJOR_VERSION(x) ((u16)(FIELD_GET(CRB_FFA_MAJOR_VERSION_MASK, (x)))) +#define CRB_FFA_MINOR_VERSION(x) ((u16)(FIELD_GET(CRB_FFA_MINOR_VERSION_MASK, (x)))) + +/* + * Normal world sends requests with FFA_MSG_SEND_DIRECT_REQ and + * responses are returned with FFA_MSG_SEND_DIRECT_RESP for normal + * messages. + * + * All requests with FFA_MSG_SEND_DIRECT_REQ and FFA_MSG_SEND_DIRECT_RESP + * are using the AArch32 or AArch64 SMC calling convention with register usage + * as defined in FF-A specification: + * w0: Function ID + * -for 32-bit: 0x8400006F or 0x84000070 + * -for 64-bit: 0xC400006F or 0xC4000070 + * w1: Source/Destination IDs + * w2: Reserved (MBZ) + * w3-w7: Implementation defined, free to be used below + */ + +/* + * Returns the version of the interface that is available + * Call register usage: + * w3: Not used (MBZ) + * w4: TPM service function ID, CRB_FFA_GET_INTERFACE_VERSION + * w5-w7: Reserved (MBZ) + * + * Return register usage: + * w3: Not used (MBZ) + * w4: TPM service function status + * w5: TPM service interface version + * Bits[31:16]: major version + * Bits[15:0]: minor version + * w6-w7: Reserved (MBZ) + * + * Possible function status codes in register w4: + * CRB_FFA_OK_RESULTS_RETURNED: The version of the interface has been + * returned. + */ +#define CRB_FFA_GET_INTERFACE_VERSION 0x0f000001 + +/* + * Notifies the TPM service that a TPM command or TPM locality request is + * ready to be processed, and allows the TPM service to process it. + * Call register usage: + * w3: Not used (MBZ) + * w4: TPM service function ID, CRB_FFA_START + * w5: Start function qualifier + * Bits[31:8] (MBZ) + * Bits[7:0] + * 0: Notifies TPM that a command is ready to be processed + * 1: Notifies TPM that a locality request is ready to be processed + * w6: TPM locality, one of 0..4 + * -If the start function qualifier is 0, identifies the locality + * from where the command originated. + * -If the start function qualifier is 1, identifies the locality + * of the locality request + * w6-w7: Reserved (MBZ) + * + * Return register usage: + * w3: Not used (MBZ) + * w4: TPM service function status + * w5-w7: Reserved (MBZ) + * + * Possible function status codes in register w4: + * CRB_FFA_OK: the TPM service has been notified successfully + * CRB_FFA_INVARG: one or more arguments are not valid + * CRB_FFA_INV_CRB_CTRL_DATA: CRB control data or locality control + * data at the given TPM locality is not valid + * CRB_FFA_DENIED: the TPM has previously disabled locality requests and + * command processing at the given locality + */ +#define CRB_FFA_START 0x0f000201 + +struct tpm_crb_ffa { + struct ffa_device *ffa_dev; + u16 major_version; + u16 minor_version; + /* lock to protect sending of FF-A messages: */ + struct mutex msg_data_lock; + union { + struct ffa_send_direct_data direct_msg_data; + struct ffa_send_direct_data2 direct_msg_data2; + }; +}; + +static struct tpm_crb_ffa *tpm_crb_ffa; + +static int tpm_crb_ffa_to_linux_errno(int errno) +{ + int rc; + + switch (errno) { + case CRB_FFA_OK: + rc = 0; + break; + case CRB_FFA_OK_RESULTS_RETURNED: + rc = 0; + break; + case CRB_FFA_NOFUNC: + rc = -ENOENT; + break; + case CRB_FFA_NOTSUP: + rc = -EPERM; + break; + case CRB_FFA_INVARG: + rc = -EINVAL; + break; + case CRB_FFA_INV_CRB_CTRL_DATA: + rc = -ENOEXEC; + break; + case CRB_FFA_ALREADY: + rc = -EEXIST; + break; + case CRB_FFA_DENIED: + rc = -EACCES; + break; + case CRB_FFA_NOMEM: + rc = -ENOMEM; + break; + default: + rc = -EINVAL; + } + + return rc; +} + +/** + * tpm_crb_ffa_init - called by the CRB driver to do any needed initialization + * + * This function is called by the tpm_crb driver during the tpm_crb + * driver's initialization. If the tpm_crb_ffa has not been probed + * yet, returns -ENOENT in order to force a retry. If th ffa_crb + * driver had been probed but failed with an error, returns -ENODEV + * in order to prevent further retries. + * + * Return: 0 on success, negative error code on failure. + */ +int tpm_crb_ffa_init(void) +{ + if (!tpm_crb_ffa) + return -ENOENT; + + if (IS_ERR_VALUE(tpm_crb_ffa)) + return -ENODEV; + + return 0; +} +EXPORT_SYMBOL_GPL(tpm_crb_ffa_init); + +static int __tpm_crb_ffa_send_recieve(unsigned long func_id, + unsigned long a0, + unsigned long a1, + unsigned long a2) +{ + const struct ffa_msg_ops *msg_ops; + int ret; + + if (!tpm_crb_ffa) + return -ENOENT; + + msg_ops = tpm_crb_ffa->ffa_dev->ops->msg_ops; + + if (ffa_partition_supports_direct_req2_recv(tpm_crb_ffa->ffa_dev)) { + memset(&tpm_crb_ffa->direct_msg_data2, 0x00, + sizeof(struct ffa_send_direct_data2)); + + tpm_crb_ffa->direct_msg_data2.data[0] = func_id; + tpm_crb_ffa->direct_msg_data2.data[1] = a0; + tpm_crb_ffa->direct_msg_data2.data[2] = a1; + tpm_crb_ffa->direct_msg_data2.data[3] = a2; + + ret = msg_ops->sync_send_receive2(tpm_crb_ffa->ffa_dev, + &tpm_crb_ffa->direct_msg_data2); + if (!ret) + ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data2.data[0]); + } else { + memset(&tpm_crb_ffa->direct_msg_data, 0x00, + sizeof(struct ffa_send_direct_data)); + + tpm_crb_ffa->direct_msg_data.data1 = func_id; + tpm_crb_ffa->direct_msg_data.data2 = a0; + tpm_crb_ffa->direct_msg_data.data3 = a1; + tpm_crb_ffa->direct_msg_data.data4 = a2; + + ret = msg_ops->sync_send_receive(tpm_crb_ffa->ffa_dev, + &tpm_crb_ffa->direct_msg_data); + if (!ret) + ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data.data1); + } + + + return ret; +} + +/** + * tpm_crb_ffa_get_interface_version() - gets the ABI version of the TPM service + * @major: Pointer to caller-allocated buffer to hold the major version + * number the ABI + * @minor: Pointer to caller-allocated buffer to hold the minor version + * number the ABI + * + * Returns the major and minor version of the ABI of the FF-A based TPM. + * Allows the caller to evaluate its compatibility with the version of + * the ABI. + * + * Return: 0 on success, negative error code on failure. + */ +int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor) +{ + int rc; + + if (!tpm_crb_ffa) + return -ENOENT; + + if (IS_ERR_VALUE(tpm_crb_ffa)) + return -ENODEV; + + if (!major || !minor) + return -EINVAL; + + guard(mutex)(&tpm_crb_ffa->msg_data_lock); + + rc = __tpm_crb_ffa_send_recieve(CRB_FFA_GET_INTERFACE_VERSION, 0x00, 0x00, 0x00); + if (!rc) { + if (ffa_partition_supports_direct_req2_recv(tpm_crb_ffa->ffa_dev)) { + *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data2.data[1]); + *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data2.data[1]); + } else { + *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data.data2); + *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data.data2); + } + } + + return rc; +} +EXPORT_SYMBOL_GPL(tpm_crb_ffa_get_interface_version); + +/** + * tpm_crb_ffa_start() - signals the TPM that a field has changed in the CRB + * @request_type: Identifies whether the change to the CRB is in the command + * fields or locality fields. + * @locality: Specifies the locality number. + * + * Used by the CRB driver + * that might be useful to those using or modifying it. Begins with + * empty comment line, and may include additional embedded empty + * comment lines. + * + * Return: 0 on success, negative error code on failure. + */ +int tpm_crb_ffa_start(int request_type, int locality) +{ + if (!tpm_crb_ffa) + return -ENOENT; + + if (IS_ERR_VALUE(tpm_crb_ffa)) + return -ENODEV; + + guard(mutex)(&tpm_crb_ffa->msg_data_lock); + + return __tpm_crb_ffa_send_recieve(CRB_FFA_START, request_type, locality, 0x00); +} +EXPORT_SYMBOL_GPL(tpm_crb_ffa_start); + +static int tpm_crb_ffa_probe(struct ffa_device *ffa_dev) +{ + struct tpm_crb_ffa *p; + int rc; + + /* only one instance of a TPM partition is supported */ + if (tpm_crb_ffa && !IS_ERR_VALUE(tpm_crb_ffa)) + return -EEXIST; + + tpm_crb_ffa = ERR_PTR(-ENODEV); // set tpm_crb_ffa so we can detect probe failure + + if (!ffa_partition_supports_direct_recv(ffa_dev) && + !ffa_partition_supports_direct_req2_recv(ffa_dev)) { + dev_warn(&ffa_dev->dev, "partition doesn't support direct message receive.\n"); + return -EINVAL; + } + + p = kzalloc(sizeof(*tpm_crb_ffa), GFP_KERNEL); + if (!p) + return -ENOMEM; + tpm_crb_ffa = p; + + mutex_init(&tpm_crb_ffa->msg_data_lock); + tpm_crb_ffa->ffa_dev = ffa_dev; + ffa_dev_set_drvdata(ffa_dev, tpm_crb_ffa); + + /* if TPM is aarch32 use 32-bit SMCs */ + if (!ffa_partition_check_property(ffa_dev, FFA_PARTITION_AARCH64_EXEC)) + ffa_dev->ops->msg_ops->mode_32bit_set(ffa_dev); + + /* verify compatibility of TPM service version number */ + rc = tpm_crb_ffa_get_interface_version(&tpm_crb_ffa->major_version, + &tpm_crb_ffa->minor_version); + if (rc) { + dev_err(&ffa_dev->dev, "failed to get crb interface version. rc:%d\n", rc); + goto out; + } + + dev_info(&ffa_dev->dev, "ABI version %u.%u\n", tpm_crb_ffa->major_version, + tpm_crb_ffa->minor_version); + + if (tpm_crb_ffa->major_version != CRB_FFA_VERSION_MAJOR || + (tpm_crb_ffa->minor_version > 0 && + tpm_crb_ffa->minor_version < CRB_FFA_VERSION_MINOR)) { + dev_warn(&ffa_dev->dev, "Incompatible ABI version\n"); + goto out; + } + + return 0; + +out: + kfree(tpm_crb_ffa); + tpm_crb_ffa = ERR_PTR(-ENODEV); + return -EINVAL; +} + +static void tpm_crb_ffa_remove(struct ffa_device *ffa_dev) +{ + kfree(tpm_crb_ffa); + tpm_crb_ffa = NULL; +} + +static const struct ffa_device_id tpm_crb_ffa_device_id[] = { + /* 17b862a4-1806-4faf-86b3-089a58353861 */ + { UUID_INIT(0x17b862a4, 0x1806, 0x4faf, + 0x86, 0xb3, 0x08, 0x9a, 0x58, 0x35, 0x38, 0x61) }, + {} +}; + +static struct ffa_driver tpm_crb_ffa_driver = { + .name = "ffa-crb", + .probe = tpm_crb_ffa_probe, + .remove = tpm_crb_ffa_remove, + .id_table = tpm_crb_ffa_device_id, +}; + +module_ffa_driver(tpm_crb_ffa_driver); + +MODULE_AUTHOR("Arm"); +MODULE_DESCRIPTION("TPM CRB FFA driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpm_crb_ffa.h b/drivers/char/tpm/tpm_crb_ffa.h new file mode 100644 index 000000000000..645c41ede10e --- /dev/null +++ b/drivers/char/tpm/tpm_crb_ffa.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2024 Arm Ltd. + * + * This device driver implements the TPM CRB start method + * as defined in the TPM Service Command Response Buffer + * Interface Over FF-A (DEN0138). + */ +#ifndef _TPM_CRB_FFA_H +#define _TPM_CRB_FFA_H + +#if IS_REACHABLE(CONFIG_TCG_ARM_CRB_FFA) +int tpm_crb_ffa_init(void); +int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor); +int tpm_crb_ffa_start(int request_type, int locality); +#else +static inline int tpm_crb_ffa_init(void) { return 0; } +static inline int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor) { return 0; } +static inline int tpm_crb_ffa_start(int request_type, int locality) { return 0; } +#endif + +#define CRB_FFA_START_TYPE_COMMAND 0 +#define CRB_FFA_START_TYPE_LOCALITY_REQUEST 1 + +#endif diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c index 2ea4882251cf..53ba28ccd5d3 100644 --- a/drivers/char/tpm/tpm_ftpm_tee.c +++ b/drivers/char/tpm/tpm_ftpm_tee.c @@ -164,30 +164,10 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len) return 0; } -static void ftpm_tee_tpm_op_cancel(struct tpm_chip *chip) -{ - /* not supported */ -} - -static u8 ftpm_tee_tpm_op_status(struct tpm_chip *chip) -{ - return 0; -} - -static bool ftpm_tee_tpm_req_canceled(struct tpm_chip *chip, u8 status) -{ - return false; -} - static const struct tpm_class_ops ftpm_tee_tpm_ops = { .flags = TPM_OPS_AUTO_STARTUP, .recv = ftpm_tee_tpm_op_recv, .send = ftpm_tee_tpm_op_send, - .cancel = ftpm_tee_tpm_op_cancel, - .status = ftpm_tee_tpm_op_status, - .req_complete_mask = 0, - .req_complete_val = 0, - .req_canceled = ftpm_tee_tpm_req_canceled, }; /* @@ -362,11 +342,11 @@ MODULE_DEVICE_TABLE(of, of_ftpm_tee_ids); static struct platform_driver ftpm_tee_plat_driver = { .driver = { .name = "ftpm-tee", - .of_match_table = of_match_ptr(of_ftpm_tee_ids), + .of_match_table = of_ftpm_tee_ids, }, .shutdown = ftpm_plat_tee_shutdown, .probe = ftpm_plat_tee_probe, - .remove_new = ftpm_plat_tee_remove, + .remove = ftpm_plat_tee_remove, }; /* UUID of the fTPM TA */ diff --git a/drivers/char/tpm/tpm_ftpm_tee.h b/drivers/char/tpm/tpm_ftpm_tee.h index f98daa7bf68c..e39903b7ea07 100644 --- a/drivers/char/tpm/tpm_ftpm_tee.h +++ b/drivers/char/tpm/tpm_ftpm_tee.h @@ -21,7 +21,6 @@ /** * struct ftpm_tee_private - fTPM's private data * @chip: struct tpm_chip instance registered with tpm framework. - * @state: internal state * @session: fTPM TA session identifier. * @resp_len: cached response buffer length. * @resp_buf: cached response buffer. diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c index 301a95b3734f..d1d27fdfe523 100644 --- a/drivers/char/tpm/tpm_i2c_atmel.c +++ b/drivers/char/tpm/tpm_i2c_atmel.c @@ -186,7 +186,7 @@ static void i2c_atmel_remove(struct i2c_client *client) } static const struct i2c_device_id i2c_atmel_id[] = { - {I2C_DRIVER_NAME, 0}, + { I2C_DRIVER_NAME }, {} }; MODULE_DEVICE_TABLE(i2c, i2c_atmel_id); diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index d3989b257f42..76d048f63d55 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -450,6 +450,7 @@ static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status) } static const struct tpm_class_ops tpm_ibmvtpm = { + .flags = TPM_OPS_AUTO_STARTUP, .recv = tpm_ibmvtpm_recv, .send = tpm_ibmvtpm_send, .cancel = tpm_ibmvtpm_cancel, @@ -690,16 +691,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, if (!strcmp(id->compat, "IBM,vtpm20")) chip->flags |= TPM_CHIP_FLAG_TPM2; - rc = tpm_get_timeouts(chip); - if (rc) - goto init_irq_cleanup; - - if (chip->flags & TPM_CHIP_FLAG_TPM2) { - rc = tpm2_get_cc_attrs_tbl(chip); - if (rc) - goto init_irq_cleanup; - } - return tpm_chip_register(chip); init_irq_cleanup: do { diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c index 9c924a1440a9..2d2ae37153ba 100644 --- a/drivers/char/tpm/tpm_infineon.c +++ b/drivers/char/tpm/tpm_infineon.c @@ -51,34 +51,40 @@ static struct tpm_inf_dev tpm_dev; static inline void tpm_data_out(unsigned char data, unsigned char offset) { +#ifdef CONFIG_HAS_IOPORT if (tpm_dev.iotype == TPM_INF_IO_PORT) outb(data, tpm_dev.data_regs + offset); else +#endif writeb(data, tpm_dev.mem_base + tpm_dev.data_regs + offset); } static inline unsigned char tpm_data_in(unsigned char offset) { +#ifdef CONFIG_HAS_IOPORT if (tpm_dev.iotype == TPM_INF_IO_PORT) return inb(tpm_dev.data_regs + offset); - else - return readb(tpm_dev.mem_base + tpm_dev.data_regs + offset); +#endif + return readb(tpm_dev.mem_base + tpm_dev.data_regs + offset); } static inline void tpm_config_out(unsigned char data, unsigned char offset) { +#ifdef CONFIG_HAS_IOPORT if (tpm_dev.iotype == TPM_INF_IO_PORT) outb(data, tpm_dev.config_port + offset); else +#endif writeb(data, tpm_dev.mem_base + tpm_dev.index_off + offset); } static inline unsigned char tpm_config_in(unsigned char offset) { +#ifdef CONFIG_HAS_IOPORT if (tpm_dev.iotype == TPM_INF_IO_PORT) return inb(tpm_dev.config_port + offset); - else - return readb(tpm_dev.mem_base + tpm_dev.index_off + offset); +#endif + return readb(tpm_dev.mem_base + tpm_dev.index_off + offset); } /* TPM header definitions */ diff --git a/drivers/char/tpm/tpm_svsm.c b/drivers/char/tpm/tpm_svsm.c new file mode 100644 index 000000000000..4280edf427d6 --- /dev/null +++ b/drivers/char/tpm/tpm_svsm.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * + * Driver for the vTPM defined by the AMD SVSM spec [1]. + * + * The specification defines a protocol that a SEV-SNP guest OS can use to + * discover and talk to a vTPM emulated by the Secure VM Service Module (SVSM) + * in the guest context, but at a more privileged level (usually VMPL0). + * + * [1] "Secure VM Service Module for SEV-SNP Guests" + * Publication # 58019 Revision: 1.00 + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/tpm_svsm.h> + +#include <asm/sev.h> + +#include "tpm.h" + +struct tpm_svsm_priv { + void *buffer; +}; + +static int tpm_svsm_send(struct tpm_chip *chip, u8 *buf, size_t len) +{ + struct tpm_svsm_priv *priv = dev_get_drvdata(&chip->dev); + int ret; + + ret = svsm_vtpm_cmd_request_fill(priv->buffer, 0, buf, len); + if (ret) + return ret; + + /* + * The SVSM call uses the same buffer for the command and for the + * response, so after this call, the buffer will contain the response + * that can be used by .recv() op. + */ + return snp_svsm_vtpm_send_command(priv->buffer); +} + +static int tpm_svsm_recv(struct tpm_chip *chip, u8 *buf, size_t len) +{ + struct tpm_svsm_priv *priv = dev_get_drvdata(&chip->dev); + + /* + * The internal buffer contains the response after we send the command + * to SVSM. + */ + return svsm_vtpm_cmd_response_parse(priv->buffer, buf, len); +} + +static struct tpm_class_ops tpm_chip_ops = { + .flags = TPM_OPS_AUTO_STARTUP, + .recv = tpm_svsm_recv, + .send = tpm_svsm_send, +}; + +static int __init tpm_svsm_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct tpm_svsm_priv *priv; + struct tpm_chip *chip; + int err; + + priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + /* + * The maximum buffer supported is one page (see SVSM_VTPM_MAX_BUFFER + * in tpm_svsm.h). + */ + priv->buffer = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0); + if (!priv->buffer) + return -ENOMEM; + + chip = tpmm_chip_alloc(dev, &tpm_chip_ops); + if (IS_ERR(chip)) + return PTR_ERR(chip); + + dev_set_drvdata(&chip->dev, priv); + + err = tpm2_probe(chip); + if (err) + return err; + + err = tpm_chip_register(chip); + if (err) + return err; + + dev_info(dev, "SNP SVSM vTPM %s device\n", + (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2"); + + return 0; +} + +static void __exit tpm_svsm_remove(struct platform_device *pdev) +{ + struct tpm_chip *chip = platform_get_drvdata(pdev); + + tpm_chip_unregister(chip); +} + +/* + * tpm_svsm_remove() lives in .exit.text. For drivers registered via + * module_platform_driver_probe() this is ok because they cannot get unbound + * at runtime. So mark the driver struct with __refdata to prevent modpost + * triggering a section mismatch warning. + */ +static struct platform_driver tpm_svsm_driver __refdata = { + .remove = __exit_p(tpm_svsm_remove), + .driver = { + .name = "tpm-svsm", + }, +}; + +module_platform_driver_probe(tpm_svsm_driver, tpm_svsm_probe); + +MODULE_DESCRIPTION("SNP SVSM vTPM Driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:tpm-svsm"); diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 2f7326d297ad..9aa230a63616 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -356,7 +356,7 @@ MODULE_DEVICE_TABLE(of, tis_of_platform_match); static struct platform_driver tis_drv = { .probe = tpm_tis_plat_probe, - .remove_new = tpm_tis_plat_remove, + .remove = tpm_tis_plat_remove, .driver = { .name = "tpm_tis", .pm = &tpm_tis_pm, diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 714070ebb6e7..ed0d3d8449b3 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -114,11 +114,10 @@ again: return 0; /* process status changes without irq support */ do { + usleep_range(priv->timeout_min, priv->timeout_max); status = chip->ops->status(chip); if ((status & mask) == mask) return 0; - usleep_range(priv->timeout_min, - priv->timeout_max); } while (time_before(jiffies, stop)); return -ETIME; } @@ -464,7 +463,10 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len) if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c, &priv->int_queue, false) < 0) { - rc = -ETIME; + if (test_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags)) + rc = -EAGAIN; + else + rc = -ETIME; goto out_err; } status = tpm_tis_status(chip); @@ -481,7 +483,10 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len) if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c, &priv->int_queue, false) < 0) { - rc = -ETIME; + if (test_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags)) + rc = -EAGAIN; + else + rc = -ETIME; goto out_err; } status = tpm_tis_status(chip); @@ -546,9 +551,11 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len) if (rc >= 0) /* Data transfer done successfully */ break; - else if (rc != -EIO) + else if (rc != -EAGAIN && rc != -EIO) /* Data transfer failed, not recoverable */ return rc; + + usleep_range(priv->timeout_min, priv->timeout_max); } /* go and do it */ @@ -1020,7 +1027,8 @@ void tpm_tis_remove(struct tpm_chip *chip) interrupt = 0; tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt); - flush_work(&priv->free_irq_work); + if (priv->free_irq_work.func) + flush_work(&priv->free_irq_work); tpm_tis_clkrun_enable(chip, false); @@ -1057,11 +1065,6 @@ static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value) clkrun_val &= ~LPC_CLKRUN_EN; iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET); - /* - * Write any random value on port 0x80 which is on LPC, to make - * sure LPC clock is running before sending any TPM command. - */ - outb(0xCC, 0x80); } else { data->clkrun_enabled--; if (data->clkrun_enabled) @@ -1072,13 +1075,15 @@ static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value) /* Enable LPC CLKRUN# */ clkrun_val |= LPC_CLKRUN_EN; iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET); - - /* - * Write any random value on port 0x80 which is on LPC, to make - * sure LPC clock is running before sending any TPM command. - */ - outb(0xCC, 0x80); } + +#ifdef CONFIG_HAS_IOPORT + /* + * Write any random value on port 0x80 which is on LPC, to make + * sure LPC clock is running before sending any TPM command. + */ + outb(0xCC, 0x80); +#endif } static const struct tpm_class_ops tpm_tis = { @@ -1146,6 +1151,9 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, priv->timeout_max = TIS_TIMEOUT_MAX_ATML; } + if (priv->manufacturer_id == TPM_VID_IFX) + set_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags); + if (is_bsw()) { priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR, ILB_REMAP_SIZE); diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h index 13e99cf65efe..6c3aa480396b 100644 --- a/drivers/char/tpm/tpm_tis_core.h +++ b/drivers/char/tpm/tpm_tis_core.h @@ -54,7 +54,7 @@ enum tis_int_flags { enum tis_defaults { TIS_MEM_LEN = 0x5000, TIS_SHORT_TIMEOUT = 750, /* ms */ - TIS_LONG_TIMEOUT = 2000, /* 2 sec */ + TIS_LONG_TIMEOUT = 4000, /* 4 secs */ TIS_TIMEOUT_MIN_ATML = 14700, /* usecs */ TIS_TIMEOUT_MAX_ATML = 15000, /* usecs */ }; @@ -89,6 +89,7 @@ enum tpm_tis_flags { TPM_TIS_INVALID_STATUS = 1, TPM_TIS_DEFAULT_CANCELLATION = 2, TPM_TIS_IRQ_TESTED = 3, + TPM_TIS_STATUS_VALID_RETRY = 4, }; struct tpm_tis_data { @@ -210,7 +211,7 @@ static inline int tpm_tis_verify_crc(struct tpm_tis_data *data, size_t len, static inline bool is_bsw(void) { #ifdef CONFIG_X86 - return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0); + return (boot_cpu_data.x86_vfm == INTEL_ATOM_AIRMONT) ? 1 : 0; #else return false; #endif diff --git a/drivers/char/tpm/tpm_tis_i2c.c b/drivers/char/tpm/tpm_tis_i2c.c index 9511c0d50185..6cd07dd34507 100644 --- a/drivers/char/tpm/tpm_tis_i2c.c +++ b/drivers/char/tpm/tpm_tis_i2c.c @@ -375,7 +375,7 @@ static void tpm_tis_i2c_remove(struct i2c_client *client) } static const struct i2c_device_id tpm_tis_i2c_id[] = { - { "tpm_tis_i2c", 0 }, + { "tpm_tis_i2c" }, {} }; MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_id); diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c index adf22992138e..3b55a7b05c46 100644 --- a/drivers/char/tpm/tpm_tis_i2c_cr50.c +++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c @@ -17,6 +17,7 @@ */ #include <linux/acpi.h> +#include <linux/bug.h> #include <linux/completion.h> #include <linux/i2c.h> #include <linux/interrupt.h> @@ -30,11 +31,13 @@ #define TPM_CR50_MAX_BUFSIZE 64 #define TPM_CR50_TIMEOUT_SHORT_MS 2 /* Short timeout during transactions */ #define TPM_CR50_TIMEOUT_NOIRQ_MS 20 /* Timeout for TPM ready without IRQ */ -#define TPM_CR50_I2C_DID_VID 0x00281ae0L /* Device and vendor ID reg value */ -#define TPM_TI50_I2C_DID_VID 0x504a6666L /* Device and vendor ID reg value */ +#define TPM_CR50_I2C_DID_VID 0x00281ae0L /* Device and vendor ID for Cr50 H1 */ +#define TPM_TI50_DT_I2C_DID_VID 0x504a6666L /* Device and vendor ID for Ti50 DT */ +#define TPM_TI50_OT_I2C_DID_VID 0x50666666L /* Device and vendor ID for TI50 OT */ #define TPM_CR50_I2C_MAX_RETRIES 3 /* Max retries due to I2C errors */ #define TPM_CR50_I2C_RETRY_DELAY_LO 55 /* Min usecs between retries on I2C */ #define TPM_CR50_I2C_RETRY_DELAY_HI 65 /* Max usecs between retries on I2C */ +#define TPM_CR50_I2C_DEFAULT_LOC 0 #define TPM_I2C_ACCESS(l) (0x0000 | ((l) << 4)) #define TPM_I2C_STS(l) (0x0001 | ((l) << 4)) @@ -199,8 +202,6 @@ static int tpm_cr50_i2c_read(struct tpm_chip *chip, u8 addr, u8 *buffer, size_t }; int rc; - i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT); - /* Prepare for completion interrupt */ tpm_cr50_i2c_enable_tpm_irq(chip); @@ -219,7 +220,6 @@ static int tpm_cr50_i2c_read(struct tpm_chip *chip, u8 addr, u8 *buffer, size_t out: tpm_cr50_i2c_disable_tpm_irq(chip); - i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT); if (rc < 0) return rc; @@ -261,8 +261,6 @@ static int tpm_cr50_i2c_write(struct tpm_chip *chip, u8 addr, u8 *buffer, priv->buf[0] = addr; memcpy(priv->buf + 1, buffer, len); - i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT); - /* Prepare for completion interrupt */ tpm_cr50_i2c_enable_tpm_irq(chip); @@ -276,7 +274,6 @@ static int tpm_cr50_i2c_write(struct tpm_chip *chip, u8 addr, u8 *buffer, out: tpm_cr50_i2c_disable_tpm_irq(chip); - i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT); if (rc < 0) return rc; @@ -285,25 +282,26 @@ out: } /** - * tpm_cr50_check_locality() - Verify TPM locality 0 is active. + * tpm_cr50_check_locality() - Verify if required TPM locality is active. * @chip: A TPM chip. + * @loc: Locality to be verified * * Return: - * - 0: Success. + * - loc: Success. * - -errno: A POSIX error code. */ -static int tpm_cr50_check_locality(struct tpm_chip *chip) +static int tpm_cr50_check_locality(struct tpm_chip *chip, int loc) { u8 mask = TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY; u8 buf; int rc; - rc = tpm_cr50_i2c_read(chip, TPM_I2C_ACCESS(0), &buf, sizeof(buf)); + rc = tpm_cr50_i2c_read(chip, TPM_I2C_ACCESS(loc), &buf, sizeof(buf)); if (rc < 0) return rc; if ((buf & mask) == mask) - return 0; + return loc; return -EIO; } @@ -311,53 +309,72 @@ static int tpm_cr50_check_locality(struct tpm_chip *chip) /** * tpm_cr50_release_locality() - Release TPM locality. * @chip: A TPM chip. - * @force: Flag to force release if set. + * @loc: Locality to be released + * + * Return: + * - 0: Success. + * - -errno: A POSIX error code. */ -static void tpm_cr50_release_locality(struct tpm_chip *chip, bool force) +static int tpm_cr50_release_locality(struct tpm_chip *chip, int loc) { + struct i2c_client *client = to_i2c_client(chip->dev.parent); u8 mask = TPM_ACCESS_VALID | TPM_ACCESS_REQUEST_PENDING; - u8 addr = TPM_I2C_ACCESS(0); + u8 addr = TPM_I2C_ACCESS(loc); u8 buf; + int rc; - if (tpm_cr50_i2c_read(chip, addr, &buf, sizeof(buf)) < 0) - return; + rc = tpm_cr50_i2c_read(chip, addr, &buf, sizeof(buf)); + if (rc < 0) + goto unlock_out; - if (force || (buf & mask) == mask) { + if ((buf & mask) == mask) { buf = TPM_ACCESS_ACTIVE_LOCALITY; - tpm_cr50_i2c_write(chip, addr, &buf, sizeof(buf)); + rc = tpm_cr50_i2c_write(chip, addr, &buf, sizeof(buf)); } + +unlock_out: + i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT); + return rc; } /** - * tpm_cr50_request_locality() - Request TPM locality 0. + * tpm_cr50_request_locality() - Request TPM locality. * @chip: A TPM chip. + * @loc: Locality to be requested. * * Return: - * - 0: Success. + * - loc: Success. * - -errno: A POSIX error code. */ -static int tpm_cr50_request_locality(struct tpm_chip *chip) +static int tpm_cr50_request_locality(struct tpm_chip *chip, int loc) { + struct i2c_client *client = to_i2c_client(chip->dev.parent); u8 buf = TPM_ACCESS_REQUEST_USE; unsigned long stop; int rc; - if (!tpm_cr50_check_locality(chip)) - return 0; + i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT); - rc = tpm_cr50_i2c_write(chip, TPM_I2C_ACCESS(0), &buf, sizeof(buf)); + if (tpm_cr50_check_locality(chip, loc) == loc) + return loc; + + rc = tpm_cr50_i2c_write(chip, TPM_I2C_ACCESS(loc), &buf, sizeof(buf)); if (rc < 0) - return rc; + goto unlock_out; stop = jiffies + chip->timeout_a; do { - if (!tpm_cr50_check_locality(chip)) - return 0; + if (tpm_cr50_check_locality(chip, loc) == loc) + return loc; msleep(TPM_CR50_TIMEOUT_SHORT_MS); } while (time_before(jiffies, stop)); - return -ETIMEDOUT; + rc = -ETIMEDOUT; + +unlock_out: + i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT); + return rc; } /** @@ -373,7 +390,7 @@ static u8 tpm_cr50_i2c_tis_status(struct tpm_chip *chip) { u8 buf[4]; - if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(0), buf, sizeof(buf)) < 0) + if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(chip->locality), buf, sizeof(buf)) < 0) return 0; return buf[0]; @@ -389,7 +406,7 @@ static void tpm_cr50_i2c_tis_set_ready(struct tpm_chip *chip) { u8 buf[4] = { TPM_STS_COMMAND_READY }; - tpm_cr50_i2c_write(chip, TPM_I2C_STS(0), buf, sizeof(buf)); + tpm_cr50_i2c_write(chip, TPM_I2C_STS(chip->locality), buf, sizeof(buf)); msleep(TPM_CR50_TIMEOUT_SHORT_MS); } @@ -419,7 +436,7 @@ static int tpm_cr50_i2c_get_burst_and_status(struct tpm_chip *chip, u8 mask, stop = jiffies + chip->timeout_b; do { - if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(0), buf, sizeof(buf)) < 0) { + if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(chip->locality), buf, sizeof(buf)) < 0) { msleep(TPM_CR50_TIMEOUT_SHORT_MS); continue; } @@ -453,7 +470,7 @@ static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len) u8 mask = TPM_STS_VALID | TPM_STS_DATA_AVAIL; size_t burstcnt, cur, len, expected; - u8 addr = TPM_I2C_DATA_FIFO(0); + u8 addr = TPM_I2C_DATA_FIFO(chip->locality); u32 status; int rc; @@ -515,7 +532,6 @@ static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len) goto out_err; } - tpm_cr50_release_locality(chip, false); return cur; out_err: @@ -523,7 +539,6 @@ out_err: if (tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY) tpm_cr50_i2c_tis_set_ready(chip); - tpm_cr50_release_locality(chip, false); return rc; } @@ -545,10 +560,6 @@ static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) u32 status; int rc; - rc = tpm_cr50_request_locality(chip); - if (rc < 0) - return rc; - /* Wait until TPM is ready for a command */ stop = jiffies + chip->timeout_b; while (!(tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)) { @@ -577,7 +588,8 @@ static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) * that is inserted by tpm_cr50_i2c_write() */ limit = min_t(size_t, burstcnt - 1, len); - rc = tpm_cr50_i2c_write(chip, TPM_I2C_DATA_FIFO(0), &buf[sent], limit); + rc = tpm_cr50_i2c_write(chip, TPM_I2C_DATA_FIFO(chip->locality), + &buf[sent], limit); if (rc < 0) { dev_err(&chip->dev, "Write failed\n"); goto out_err; @@ -598,7 +610,7 @@ static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) } /* Start the TPM command */ - rc = tpm_cr50_i2c_write(chip, TPM_I2C_STS(0), tpm_go, + rc = tpm_cr50_i2c_write(chip, TPM_I2C_STS(chip->locality), tpm_go, sizeof(tpm_go)); if (rc < 0) { dev_err(&chip->dev, "Start command failed\n"); @@ -611,7 +623,6 @@ out_err: if (tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY) tpm_cr50_i2c_tis_set_ready(chip); - tpm_cr50_release_locality(chip, false); return rc; } @@ -650,6 +661,8 @@ static const struct tpm_class_ops cr50_i2c = { .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, .req_canceled = &tpm_cr50_i2c_req_canceled, + .request_locality = &tpm_cr50_request_locality, + .relinquish_locality = &tpm_cr50_release_locality, }; #ifdef CONFIG_ACPI @@ -669,6 +682,27 @@ MODULE_DEVICE_TABLE(of, of_cr50_i2c_match); #endif /** + * tpm_cr50_vid_to_name() - Maps VID to name. + * @vendor: Vendor identifier to map to name + * + * Return: + * A valid string for the vendor or empty string + */ +static const char *tpm_cr50_vid_to_name(u32 vendor) +{ + switch (vendor) { + case TPM_CR50_I2C_DID_VID: + return "cr50"; + case TPM_TI50_DT_I2C_DID_VID: + return "ti50 DT"; + case TPM_TI50_OT_I2C_DID_VID: + return "ti50 OT"; + default: + return "unknown"; + } +} + +/** * tpm_cr50_i2c_probe() - Driver probe function. * @client: I2C client information. * @@ -684,6 +718,7 @@ static int tpm_cr50_i2c_probe(struct i2c_client *client) u32 vendor; u8 buf[4]; int rc; + int loc; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -ENODEV; @@ -726,29 +761,37 @@ static int tpm_cr50_i2c_probe(struct i2c_client *client) TPM_CR50_TIMEOUT_NOIRQ_MS); } - rc = tpm_cr50_request_locality(chip); - if (rc < 0) { + loc = tpm_cr50_request_locality(chip, TPM_CR50_I2C_DEFAULT_LOC); + if (loc < 0) { dev_err(dev, "Could not request locality\n"); - return rc; + return loc; } /* Read four bytes from DID_VID register */ - rc = tpm_cr50_i2c_read(chip, TPM_I2C_DID_VID(0), buf, sizeof(buf)); + rc = tpm_cr50_i2c_read(chip, TPM_I2C_DID_VID(loc), buf, sizeof(buf)); if (rc < 0) { dev_err(dev, "Could not read vendor id\n"); - tpm_cr50_release_locality(chip, true); + if (tpm_cr50_release_locality(chip, loc)) + dev_err(dev, "Could not release locality\n"); + return rc; + } + + rc = tpm_cr50_release_locality(chip, loc); + if (rc) { + dev_err(dev, "Could not release locality\n"); return rc; } vendor = le32_to_cpup((__le32 *)buf); - if (vendor != TPM_CR50_I2C_DID_VID && vendor != TPM_TI50_I2C_DID_VID) { + if (vendor != TPM_CR50_I2C_DID_VID && + vendor != TPM_TI50_DT_I2C_DID_VID && + vendor != TPM_TI50_OT_I2C_DID_VID) { dev_err(dev, "Vendor ID did not match! ID was %08x\n", vendor); - tpm_cr50_release_locality(chip, true); return -ENODEV; } dev_info(dev, "%s TPM 2.0 (i2c 0x%02x irq %d id 0x%x)\n", - vendor == TPM_TI50_I2C_DID_VID ? "ti50" : "cr50", + tpm_cr50_vid_to_name(vendor), client->addr, client->irq, vendor >> 16); return tpm_chip_register(chip); } @@ -772,7 +815,6 @@ static void tpm_cr50_i2c_remove(struct i2c_client *client) } tpm_chip_unregister(chip); - tpm_cr50_release_locality(chip, true); } static SIMPLE_DEV_PM_OPS(cr50_i2c_pm, tpm_pm_suspend, tpm_pm_resume); diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c index 3f9eaf27b41b..61b42c83ced8 100644 --- a/drivers/char/tpm/tpm_tis_spi_main.c +++ b/drivers/char/tpm/tpm_tis_spi_main.c @@ -37,6 +37,7 @@ #include "tpm_tis_spi.h" #define MAX_SPI_FRAMESIZE 64 +#define SPI_HDRSIZE 4 /* * TCG SPI flow control is documented in section 6.4 of the spec[1]. In short, @@ -247,7 +248,7 @@ static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr, int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy, int irq, const struct tpm_tis_phy_ops *phy_ops) { - phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL); + phy->iobuf = devm_kmalloc(&spi->dev, SPI_HDRSIZE + MAX_SPI_FRAMESIZE, GFP_KERNEL); if (!phy->iobuf) return -ENOMEM; @@ -317,6 +318,7 @@ static void tpm_tis_spi_remove(struct spi_device *dev) } static const struct spi_device_id tpm_tis_spi_id[] = { + { "attpm20p", (unsigned long)tpm_tis_spi_probe }, { "st33htpm-spi", (unsigned long)tpm_tis_spi_probe }, { "slb9670", (unsigned long)tpm_tis_spi_probe }, { "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe }, diff --git a/drivers/char/tpm/tpm_tis_synquacer.c b/drivers/char/tpm/tpm_tis_synquacer.c index 0621ebec530b..4927714d277a 100644 --- a/drivers/char/tpm/tpm_tis_synquacer.c +++ b/drivers/char/tpm/tpm_tis_synquacer.c @@ -152,7 +152,7 @@ MODULE_DEVICE_TABLE(acpi, tpm_synquacer_acpi_tbl); static struct platform_driver tis_synquacer_drv = { .probe = tpm_tis_synquacer_probe, - .remove_new = tpm_tis_synquacer_remove, + .remove = tpm_tis_synquacer_remove, .driver = { .name = "tpm_tis_synquacer", .pm = &tpm_tis_synquacer_pm, diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c index 11c502039faf..8fe4a01eea12 100644 --- a/drivers/char/tpm/tpm_vtpm_proxy.c +++ b/drivers/char/tpm/tpm_vtpm_proxy.c @@ -243,7 +243,6 @@ static int vtpm_proxy_fops_release(struct inode *inode, struct file *filp) static const struct file_operations vtpm_proxy_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .read = vtpm_proxy_fops_read, .write = vtpm_proxy_fops_write, .poll = vtpm_proxy_fops_poll, diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c index eef0fb06ea83..c25df7ea064e 100644 --- a/drivers/char/tpm/tpmrm-dev.c +++ b/drivers/char/tpm/tpmrm-dev.c @@ -46,7 +46,6 @@ static int tpmrm_release(struct inode *inode, struct file *file) const struct file_operations tpmrm_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .open = tpmrm_open, .read = tpm_common_read, .write = tpm_common_write, diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c index 4c806a189ee5..d7f841ab4323 100644 --- a/drivers/char/ttyprintk.c +++ b/drivers/char/ttyprintk.c @@ -228,4 +228,5 @@ static void __exit ttyprintk_exit(void) device_initcall(ttyprintk_init); module_exit(ttyprintk_exit); +MODULE_DESCRIPTION("TTY driver to output user messages via printk"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 035f89f1a251..088182e54deb 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -26,6 +26,7 @@ #include <linux/workqueue.h> #include <linux/module.h> #include <linux/dma-mapping.h> +#include <linux/string_choices.h> #include "../tty/hvc/hvc_console.h" #define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC) @@ -883,9 +884,9 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf, if (len + offset > PAGE_SIZE) len = PAGE_SIZE - offset; - src = kmap_atomic(buf->page); + src = kmap_local_page(buf->page); memcpy(page_address(page) + offset, src + buf->offset, len); - kunmap_atomic(src); + kunmap_local(src); sg_set_page(&(sgl->sg[sgl->n]), page, len, offset); } @@ -923,14 +924,14 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, pipe_lock(pipe); ret = 0; - if (pipe_empty(pipe->head, pipe->tail)) + if (pipe_is_empty(pipe)) goto error_out; ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK); if (ret < 0) goto error_out; - occupancy = pipe_occupancy(pipe->head, pipe->tail); + occupancy = pipe_buf_usage(pipe); buf = alloc_buf(port->portdev->vdev, 0, occupancy); if (!buf) { @@ -1093,7 +1094,6 @@ static const struct file_operations port_fops = { .poll = port_fops_poll, .release = port_fops_release, .fasync = port_fops_fasync, - .llseek = no_llseek, }; /* @@ -1270,8 +1270,7 @@ static int port_debugfs_show(struct seq_file *s, void *data) seq_printf(s, "bytes_sent: %lu\n", port->stats.bytes_sent); seq_printf(s, "bytes_received: %lu\n", port->stats.bytes_received); seq_printf(s, "bytes_discarded: %lu\n", port->stats.bytes_discarded); - seq_printf(s, "is_console: %s\n", - is_console_port(port) ? "yes" : "no"); + seq_printf(s, "is_console: %s\n", str_yes_no(is_console_port(port))); seq_printf(s, "console_vtermno: %u\n", port->cons.vtermno); return 0; @@ -1322,7 +1321,6 @@ static void send_sigio_to_port(struct port *port) static int add_port(struct ports_device *portdev, u32 id) { - char debugfs_name[16]; struct port *port; dev_t devt; int err; @@ -1425,9 +1423,7 @@ static int add_port(struct ports_device *portdev, u32 id) * Finally, create the debugfs file that we can use to * inspect a port's state at any time */ - snprintf(debugfs_name, sizeof(debugfs_name), "vport%up%u", - port->portdev->vdev->index, id); - port->debugfs_file = debugfs_create_file(debugfs_name, 0444, + port->debugfs_file = debugfs_create_file(dev_name(port->dev), 0444, pdrvdata.debugfs_dir, port, &port_debugfs_fops); return 0; @@ -1580,8 +1576,8 @@ static void handle_control_message(struct virtio_device *vdev, break; case VIRTIO_CONSOLE_RESIZE: { struct { - __u16 rows; - __u16 cols; + __virtio16 cols; + __virtio16 rows; } size; if (!is_console_port(port)) @@ -1589,7 +1585,8 @@ static void handle_control_message(struct virtio_device *vdev, memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt), sizeof(size)); - set_console_size(port, size.rows, size.cols); + set_console_size(port, virtio16_to_cpu(vdev, size.rows), + virtio16_to_cpu(vdev, size.cols)); port->cons.hvc->irq_requested = 1; resize_console(port); @@ -1804,8 +1801,7 @@ static void config_work_handler(struct work_struct *work) static int init_vqs(struct ports_device *portdev) { - vq_callback_t **io_callbacks; - char **io_names; + struct virtqueue_info *vqs_info; struct virtqueue **vqs; u32 i, j, nr_ports, nr_queues; int err; @@ -1814,15 +1810,12 @@ static int init_vqs(struct ports_device *portdev) nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; vqs = kmalloc_array(nr_queues, sizeof(struct virtqueue *), GFP_KERNEL); - io_callbacks = kmalloc_array(nr_queues, sizeof(vq_callback_t *), - GFP_KERNEL); - io_names = kmalloc_array(nr_queues, sizeof(char *), GFP_KERNEL); + vqs_info = kcalloc(nr_queues, sizeof(*vqs_info), GFP_KERNEL); portdev->in_vqs = kmalloc_array(nr_ports, sizeof(struct virtqueue *), GFP_KERNEL); portdev->out_vqs = kmalloc_array(nr_ports, sizeof(struct virtqueue *), GFP_KERNEL); - if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs || - !portdev->out_vqs) { + if (!vqs || !vqs_info || !portdev->in_vqs || !portdev->out_vqs) { err = -ENOMEM; goto free; } @@ -1833,30 +1826,27 @@ static int init_vqs(struct ports_device *portdev) * 0 before others. */ j = 0; - io_callbacks[j] = in_intr; - io_callbacks[j + 1] = out_intr; - io_names[j] = "input"; - io_names[j + 1] = "output"; + vqs_info[j].callback = in_intr; + vqs_info[j + 1].callback = out_intr; + vqs_info[j].name = "input"; + vqs_info[j + 1].name = "output"; j += 2; if (use_multiport(portdev)) { - io_callbacks[j] = control_intr; - io_callbacks[j + 1] = NULL; - io_names[j] = "control-i"; - io_names[j + 1] = "control-o"; + vqs_info[j].callback = control_intr; + vqs_info[j].name = "control-i"; + vqs_info[j + 1].name = "control-o"; for (i = 1; i < nr_ports; i++) { j += 2; - io_callbacks[j] = in_intr; - io_callbacks[j + 1] = out_intr; - io_names[j] = "input"; - io_names[j + 1] = "output"; + vqs_info[j].callback = in_intr; + vqs_info[j + 1].callback = out_intr; + vqs_info[j].name = "input"; + vqs_info[j + 1].name = "output"; } } /* Find the queues. */ - err = virtio_find_vqs(portdev->vdev, nr_queues, vqs, - io_callbacks, - (const char **)io_names, NULL); + err = virtio_find_vqs(portdev->vdev, nr_queues, vqs, vqs_info, NULL); if (err) goto free; @@ -1874,8 +1864,7 @@ static int init_vqs(struct ports_device *portdev) portdev->out_vqs[i] = vqs[j + 1]; } } - kfree(io_names); - kfree(io_callbacks); + kfree(vqs_info); kfree(vqs); return 0; @@ -1883,8 +1872,7 @@ static int init_vqs(struct ports_device *portdev) free: kfree(portdev->out_vqs); kfree(portdev->in_vqs); - kfree(io_names); - kfree(io_callbacks); + kfree(vqs_info); kfree(vqs); return err; @@ -2016,25 +2004,27 @@ static int virtcons_probe(struct virtio_device *vdev) multiport = true; } - err = init_vqs(portdev); - if (err < 0) { - dev_err(&vdev->dev, "Error %d initializing vqs\n", err); - goto free_chrdev; - } - spin_lock_init(&portdev->ports_lock); INIT_LIST_HEAD(&portdev->ports); INIT_LIST_HEAD(&portdev->list); - virtio_device_ready(portdev->vdev); - INIT_WORK(&portdev->config_work, &config_work_handler); INIT_WORK(&portdev->control_work, &control_work_handler); if (multiport) { spin_lock_init(&portdev->c_ivq_lock); spin_lock_init(&portdev->c_ovq_lock); + } + err = init_vqs(portdev); + if (err < 0) { + dev_err(&vdev->dev, "Error %d initializing vqs\n", err); + goto free_chrdev; + } + + virtio_device_ready(portdev->vdev); + + if (multiport) { err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock); if (err < 0) { dev_err(&vdev->dev, @@ -2173,7 +2163,6 @@ static struct virtio_driver virtio_console = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtcons_probe, .remove = virtcons_remove, @@ -2188,7 +2177,6 @@ static struct virtio_driver virtio_rproc_serial = { .feature_table = rproc_serial_features, .feature_table_size = ARRAY_SIZE(rproc_serial_features), .driver.name = "virtio_rproc_serial", - .driver.owner = THIS_MODULE, .id_table = rproc_serial_id_table, .probe = virtcons_probe, .remove = virtcons_remove, diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c index 4f6c3cb8aa41..34a345dc5e72 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c @@ -738,7 +738,7 @@ MODULE_DEVICE_TABLE(of, hwicap_of_match); static struct platform_driver hwicap_platform_driver = { .probe = hwicap_drv_probe, - .remove_new = hwicap_drv_remove, + .remove = hwicap_drv_remove, .driver = { .name = DRIVER_NAME, .of_match_table = hwicap_of_match, diff --git a/drivers/char/xillybus/xillybus_of.c b/drivers/char/xillybus/xillybus_of.c index 8802e2a6fd20..1a1e64133315 100644 --- a/drivers/char/xillybus/xillybus_of.c +++ b/drivers/char/xillybus/xillybus_of.c @@ -74,7 +74,7 @@ static void xilly_drv_remove(struct platform_device *op) static struct platform_driver xillybus_platform_driver = { .probe = xilly_drv_probe, - .remove_new = xilly_drv_remove, + .remove = xilly_drv_remove, .driver = { .name = xillyname, .of_match_table = xillybus_of_match, diff --git a/drivers/char/xillybus/xillyusb.c b/drivers/char/xillybus/xillyusb.c index 5a5afa14ca8c..45771b1a3716 100644 --- a/drivers/char/xillybus/xillyusb.c +++ b/drivers/char/xillybus/xillyusb.c @@ -50,6 +50,7 @@ MODULE_LICENSE("GPL v2"); static const char xillyname[] = "xillyusb"; static unsigned int fifo_buf_order; +static struct workqueue_struct *wakeup_wq; #define USB_VENDOR_ID_XILINX 0x03fd #define USB_VENDOR_ID_ALTERA 0x09fb @@ -569,10 +570,6 @@ static void cleanup_dev(struct kref *kref) * errors if executed. The mechanism relies on that xdev->error is assigned * a non-zero value by report_io_error() prior to queueing wakeup_all(), * which prevents bulk_in_work() from calling process_bulk_in(). - * - * The fact that wakeup_all() and bulk_in_work() are queued on the same - * workqueue makes their concurrent execution very unlikely, however the - * kernel's API doesn't seem to ensure this strictly. */ static void wakeup_all(struct work_struct *work) @@ -627,7 +624,7 @@ static void report_io_error(struct xillyusb_dev *xdev, if (do_once) { kref_get(&xdev->kref); /* xdev is used by work item */ - queue_work(xdev->workq, &xdev->wakeup_workitem); + queue_work(wakeup_wq, &xdev->wakeup_workitem); } } @@ -1906,6 +1903,13 @@ static const struct file_operations xillyusb_fops = { static int xillyusb_setup_base_eps(struct xillyusb_dev *xdev) { + struct usb_device *udev = xdev->udev; + + /* Verify that device has the two fundamental bulk in/out endpoints */ + if (usb_pipe_type_check(udev, usb_sndbulkpipe(udev, MSG_EP_NUM)) || + usb_pipe_type_check(udev, usb_rcvbulkpipe(udev, IN_EP_NUM))) + return -ENODEV; + xdev->msg_ep = endpoint_alloc(xdev, MSG_EP_NUM | USB_DIR_OUT, bulk_out_work, 1, 2); if (!xdev->msg_ep) @@ -1935,14 +1939,15 @@ static int setup_channels(struct xillyusb_dev *xdev, __le16 *chandesc, int num_channels) { - struct xillyusb_channel *chan; + struct usb_device *udev = xdev->udev; + struct xillyusb_channel *chan, *new_channels; int i; chan = kcalloc(num_channels, sizeof(*chan), GFP_KERNEL); if (!chan) return -ENOMEM; - xdev->channels = chan; + new_channels = chan; for (i = 0; i < num_channels; i++, chan++) { unsigned int in_desc = le16_to_cpu(*chandesc++); @@ -1971,6 +1976,15 @@ static int setup_channels(struct xillyusb_dev *xdev, */ if ((out_desc & 0x80) && i < 14) { /* Entry is valid */ + if (usb_pipe_type_check(udev, + usb_sndbulkpipe(udev, i + 2))) { + dev_err(xdev->dev, + "Missing BULK OUT endpoint %d\n", + i + 2); + kfree(new_channels); + return -ENODEV; + } + chan->writable = 1; chan->out_synchronous = !!(out_desc & 0x40); chan->out_seekable = !!(out_desc & 0x20); @@ -1980,6 +1994,7 @@ static int setup_channels(struct xillyusb_dev *xdev, } } + xdev->channels = new_channels; return 0; } @@ -2096,9 +2111,11 @@ static int xillyusb_discovery(struct usb_interface *interface) * just after responding with the IDT, there is no reason for any * work item to be running now. To be sure that xdev->channels * is updated on anything that might run in parallel, flush the - * workqueue, which rarely does anything. + * device's workqueue and the wakeup work item. This rarely + * does anything. */ flush_workqueue(xdev->workq); + flush_work(&xdev->wakeup_workitem); xdev->num_channels = num_channels; @@ -2258,6 +2275,10 @@ static int __init xillyusb_init(void) { int rc = 0; + wakeup_wq = alloc_workqueue(xillyname, 0, 0); + if (!wakeup_wq) + return -ENOMEM; + if (LOG2_INITIAL_FIFO_BUF_SIZE > PAGE_SHIFT) fifo_buf_order = LOG2_INITIAL_FIFO_BUF_SIZE - PAGE_SHIFT; else @@ -2265,12 +2286,17 @@ static int __init xillyusb_init(void) rc = usb_register(&xillyusb_driver); + if (rc) + destroy_workqueue(wakeup_wq); + return rc; } static void __exit xillyusb_exit(void) { usb_deregister(&xillyusb_driver); + + destroy_workqueue(wakeup_wq); } module_init(xillyusb_init); |