From 94ee12b507db8b5876e31c9d6c9d84f556a4b49f Mon Sep 17 00:00:00 2001 From: Michael Clark Date: Mon, 11 Feb 2019 17:38:29 +1300 Subject: MIPS: fix truncation in __cmpxchg_small for short values __cmpxchg_small erroneously uses u8 for load comparison which can be either char or short. This patch changes the local variable to u32 which is sufficiently sized, as the loaded value is already masked and shifted appropriately. Using an integer size avoids any unnecessary canonicalization from use of non native widths. This patch is part of a series that adapts the MIPS small word atomics code for xchg and cmpxchg on short and char to RISC-V. Cc: RISC-V Patches Cc: Linux RISC-V Cc: Linux MIPS Signed-off-by: Michael Clark [paul.burton@mips.com: - Fix varialble typo per Jonas Gorski. - Consolidate load variable with other declarations.] Signed-off-by: Paul Burton Fixes: 3ba7f44d2b19 ("MIPS: cmpxchg: Implement 1 byte & 2 byte cmpxchg()") Cc: stable@vger.kernel.org # v4.13+ --- arch/mips/kernel/cmpxchg.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c index 0b9535bc2c53..6b2a4a902a98 100644 --- a/arch/mips/kernel/cmpxchg.c +++ b/arch/mips/kernel/cmpxchg.c @@ -54,10 +54,9 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size) { - u32 mask, old32, new32, load32; + u32 mask, old32, new32, load32, load; volatile u32 *ptr32; unsigned int shift; - u8 load; /* Check that ptr is naturally aligned */ WARN_ON((unsigned long)ptr & (size - 1)); -- cgit From 74f03104ed465ff71b11076ef620e4eaa53dbf74 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 1 Feb 2019 09:47:44 +0100 Subject: MIPS: lantiq: pass struct device to DMA API functions The DMA API generally relies on a struct device to work properly, and only barely works without one for legacy reasons. Pass the easily available struct device from the platform_device to remedy this. Also use GFP_KERNEL instead of GFP_ATOMIC as the gfp_t for the memory allocation, as we aren't in interrupt context or under a lock. Note that this whole function looks somewhat bogus given that we never even look at the returned dma address, and the CPHYSADDR magic on a returned noncached mapping looks "interesting". But I'll leave that to people more familiar with the code to sort out. Signed-off-by: Christoph Hellwig Signed-off-by: Paul Burton Cc: John Crispin Cc: Vinod Koul Cc: Dmitry Tarnyagin Cc: Nicolas Ferre Cc: Sudip Mukherjee Cc: Felipe Balbi Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: dmaengine@vger.kernel.org Cc: netdev@vger.kernel.org Cc: linux-usb@vger.kernel.org Cc: linux-fbdev@vger.kernel.org Cc: alsa-devel@alsa-project.org Cc: iommu@lists.linux-foundation.org --- arch/mips/lantiq/xway/vmmc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/mips/lantiq/xway/vmmc.c b/arch/mips/lantiq/xway/vmmc.c index 577ec81b557d..3deab9a77718 100644 --- a/arch/mips/lantiq/xway/vmmc.c +++ b/arch/mips/lantiq/xway/vmmc.c @@ -31,8 +31,8 @@ static int vmmc_probe(struct platform_device *pdev) dma_addr_t dma; cp1_base = - (void *) CPHYSADDR(dma_alloc_coherent(NULL, CP1_SIZE, - &dma, GFP_ATOMIC)); + (void *) CPHYSADDR(dma_alloc_coherent(&pdev->dev, CP1_SIZE, + &dma, GFP_KERNEL)); gpio_count = of_gpio_count(pdev->dev.of_node); while (gpio_count > 0) { -- cgit From 6e356d45950e2d26b63531a2fd112c987da7a933 Mon Sep 17 00:00:00 2001 From: Mike Marshall Date: Tue, 5 Feb 2019 14:13:34 -0500 Subject: orangefs: remove two un-needed BUG_ONs... Signed-off-by: Mike Marshall --- fs/orangefs/file.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c index a5a2fe76568f..b094d3d79354 100644 --- a/fs/orangefs/file.c +++ b/fs/orangefs/file.c @@ -398,8 +398,6 @@ static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter loff_t pos = iocb->ki_pos; ssize_t rc = 0; - BUG_ON(iocb->private); - gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_read_iter\n"); orangefs_stats.reads++; @@ -416,8 +414,6 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite loff_t pos; ssize_t rc; - BUG_ON(iocb->private); - gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_write_iter\n"); inode_lock(file->f_mapping->host); -- cgit From f1071c3e2473ae19a7f5d892a187c4cab1a61f2e Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Mon, 11 Feb 2019 16:27:58 +0200 Subject: crypto: ccree - add missing inline qualifier Commit 1358c13a48c4 ("crypto: ccree - fix resume race condition on init") was missing a "inline" qualifier for stub function used when CONFIG_PM is not set causing a build warning. Fixes: 1358c13a48c4 ("crypto: ccree - fix resume race condition on init") Cc: stable@kernel.org # v4.20 Signed-off-by: Gilad Ben-Yossef Acked-by: Geert Uytterhoeven Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_pm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h index f62624357020..907a6db4d6c0 100644 --- a/drivers/crypto/ccree/cc_pm.h +++ b/drivers/crypto/ccree/cc_pm.h @@ -30,7 +30,7 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata) return 0; } -static void cc_pm_go(struct cc_drvdata *drvdata) {} +static inline void cc_pm_go(struct cc_drvdata *drvdata) {} static inline void cc_pm_fini(struct cc_drvdata *drvdata) {} -- cgit From 69216a545cf81b2b32d01948f7039315abaf75a0 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Sat, 16 Feb 2019 14:51:25 +0100 Subject: crypto: sha256/arm - fix crash bug in Thumb2 build The SHA256 code we adopted from the OpenSSL project uses a rather peculiar way to take the address of the round constant table: it takes the address of the sha256_block_data_order() routine, and substracts a constant known quantity to arrive at the base of the table, which is emitted by the same assembler code right before the routine's entry point. However, recent versions of binutils have helpfully changed the behavior of references emitted via an ADR instruction when running in Thumb2 mode: it now takes the Thumb execution mode bit into account, which is bit 0 af the address. This means the produced table address also has bit 0 set, and so we end up with an address value pointing 1 byte past the start of the table, which results in crashes such as Unable to handle kernel paging request at virtual address bf825000 pgd = 42f44b11 [bf825000] *pgd=80000040206003, *pmd=5f1bd003, *pte=00000000 Internal error: Oops: 207 [#1] PREEMPT SMP THUMB2 Modules linked in: sha256_arm(+) sha1_arm_ce sha1_arm ... CPU: 7 PID: 396 Comm: cryptomgr_test Not tainted 5.0.0-rc6+ #144 Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015 PC is at sha256_block_data_order+0xaaa/0xb30 [sha256_arm] LR is at __this_module+0x17fd/0xffffe800 [sha256_arm] pc : [] lr : [] psr: 800b0033 sp : ebc8bbe8 ip : faaabe1c fp : 2fdd3433 r10: 4c5f1692 r9 : e43037df r8 : b04b0a5a r7 : c369d722 r6 : 39c3693e r5 : 7a013189 r4 : 1580d26b r3 : 8762a9b0 r2 : eea9c2cd r1 : 3e9ab536 r0 : 1dea4ae7 Flags: Nzcv IRQs on FIQs on Mode SVC_32 ISA Thumb Segment user Control: 70c5383d Table: 6b8467c0 DAC: dbadc0de Process cryptomgr_test (pid: 396, stack limit = 0x69e1fe23) Stack: (0xebc8bbe8 to 0xebc8c000) ... unwind: Unknown symbol address bf820bca unwind: Index not found bf820bca Code: 441a ea80 40f9 440a (f85e) 3b04 ---[ end trace e560cce92700ef8a ]--- Given that this affects older kernels as well, in case they are built with a recent toolchain, apply a minimal backportable fix, which is to emit another non-code label at the start of the routine, and reference that instead. (This is similar to the current upstream state of this file in OpenSSL) Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/arm/crypto/sha256-armv4.pl | 3 ++- arch/arm/crypto/sha256-core.S_shipped | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/crypto/sha256-armv4.pl index b9ec44060ed3..a03cf4dfb781 100644 --- a/arch/arm/crypto/sha256-armv4.pl +++ b/arch/arm/crypto/sha256-armv4.pl @@ -212,10 +212,11 @@ K256: .global sha256_block_data_order .type sha256_block_data_order,%function sha256_block_data_order: +.Lsha256_block_data_order: #if __ARM_ARCH__<7 sub r3,pc,#8 @ sha256_block_data_order #else - adr r3,sha256_block_data_order + adr r3,.Lsha256_block_data_order #endif #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) ldr r12,.LOPENSSL_armcap diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped index 3b58300d611c..054aae0edfce 100644 --- a/arch/arm/crypto/sha256-core.S_shipped +++ b/arch/arm/crypto/sha256-core.S_shipped @@ -93,10 +93,11 @@ K256: .global sha256_block_data_order .type sha256_block_data_order,%function sha256_block_data_order: +.Lsha256_block_data_order: #if __ARM_ARCH__<7 sub r3,pc,#8 @ sha256_block_data_order #else - adr r3,sha256_block_data_order + adr r3,.Lsha256_block_data_order #endif #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) ldr r12,.LOPENSSL_armcap -- cgit From c64316502008064c158fa40cc250665e461b0f2a Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Sat, 16 Feb 2019 14:51:26 +0100 Subject: crypto: sha512/arm - fix crash bug in Thumb2 build The SHA512 code we adopted from the OpenSSL project uses a rather peculiar way to take the address of the round constant table: it takes the address of the sha256_block_data_order() routine, and substracts a constant known quantity to arrive at the base of the table, which is emitted by the same assembler code right before the routine's entry point. However, recent versions of binutils have helpfully changed the behavior of references emitted via an ADR instruction when running in Thumb2 mode: it now takes the Thumb execution mode bit into account, which is bit 0 af the address. This means the produced table address also has bit 0 set, and so we end up with an address value pointing 1 byte past the start of the table, which results in crashes such as Unable to handle kernel paging request at virtual address bf825000 pgd = 42f44b11 [bf825000] *pgd=80000040206003, *pmd=5f1bd003, *pte=00000000 Internal error: Oops: 207 [#1] PREEMPT SMP THUMB2 Modules linked in: sha256_arm(+) sha1_arm_ce sha1_arm ... CPU: 7 PID: 396 Comm: cryptomgr_test Not tainted 5.0.0-rc6+ #144 Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015 PC is at sha256_block_data_order+0xaaa/0xb30 [sha256_arm] LR is at __this_module+0x17fd/0xffffe800 [sha256_arm] pc : [] lr : [] psr: 800b0033 sp : ebc8bbe8 ip : faaabe1c fp : 2fdd3433 r10: 4c5f1692 r9 : e43037df r8 : b04b0a5a r7 : c369d722 r6 : 39c3693e r5 : 7a013189 r4 : 1580d26b r3 : 8762a9b0 r2 : eea9c2cd r1 : 3e9ab536 r0 : 1dea4ae7 Flags: Nzcv IRQs on FIQs on Mode SVC_32 ISA Thumb Segment user Control: 70c5383d Table: 6b8467c0 DAC: dbadc0de Process cryptomgr_test (pid: 396, stack limit = 0x69e1fe23) Stack: (0xebc8bbe8 to 0xebc8c000) ... unwind: Unknown symbol address bf820bca unwind: Index not found bf820bca Code: 441a ea80 40f9 440a (f85e) 3b04 ---[ end trace e560cce92700ef8a ]--- Given that this affects older kernels as well, in case they are built with a recent toolchain, apply a minimal backportable fix, which is to emit another non-code label at the start of the routine, and reference that instead. (This is similar to the current upstream state of this file in OpenSSL) Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/arm/crypto/sha512-armv4.pl | 3 ++- arch/arm/crypto/sha512-core.S_shipped | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/arm/crypto/sha512-armv4.pl b/arch/arm/crypto/sha512-armv4.pl index fb5d15048c0b..788c17b56ecc 100644 --- a/arch/arm/crypto/sha512-armv4.pl +++ b/arch/arm/crypto/sha512-armv4.pl @@ -274,10 +274,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817) .global sha512_block_data_order .type sha512_block_data_order,%function sha512_block_data_order: +.Lsha512_block_data_order: #if __ARM_ARCH__<7 sub r3,pc,#8 @ sha512_block_data_order #else - adr r3,sha512_block_data_order + adr r3,.Lsha512_block_data_order #endif #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) ldr r12,.LOPENSSL_armcap diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped index b1c334a49cda..710ea309769e 100644 --- a/arch/arm/crypto/sha512-core.S_shipped +++ b/arch/arm/crypto/sha512-core.S_shipped @@ -141,10 +141,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817) .global sha512_block_data_order .type sha512_block_data_order,%function sha512_block_data_order: +.Lsha512_block_data_order: #if __ARM_ARCH__<7 sub r3,pc,#8 @ sha512_block_data_order #else - adr r3,sha512_block_data_order + adr r3,.Lsha512_block_data_order #endif #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) ldr r12,.LOPENSSL_armcap -- cgit From 5908e6b738e3357af42c10e1183753c70a0117a9 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 24 Feb 2019 16:46:45 -0800 Subject: Linux 5.0-rc8 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 96c5335e7ee4..ac5ac28a24e9 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 0 SUBLEVEL = 0 -EXTRAVERSION = -rc7 +EXTRAVERSION = -rc8 NAME = Shy Crocodile # *DOCUMENTATION* -- cgit From c9bd505dbd9d3dc80c496f88eafe70affdcf1ba6 Mon Sep 17 00:00:00 2001 From: Jonathan Neuschäfer Date: Sun, 10 Feb 2019 18:31:07 +0100 Subject: mmc: spi: Fix card detection during probe MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When using the mmc_spi driver with a card-detect pin, I noticed that the card was not detected immediately after probe, but only after it was unplugged and plugged back in (and the CD IRQ fired). The call tree looks something like this: mmc_spi_probe mmc_add_host mmc_start_host _mmc_detect_change mmc_schedule_delayed_work(&host->detect, 0) mmc_rescan host->bus_ops->detect(host) mmc_detect _mmc_detect_card_removed host->ops->get_cd(host) mmc_gpio_get_cd -> -ENOSYS (ctx->cd_gpio not set) mmc_gpiod_request_cd ctx->cd_gpio = desc To fix this issue, call mmc_detect_change after the card-detect GPIO/IRQ is registered. Signed-off-by: Jonathan Neuschäfer Reviewed-by: Linus Walleij Cc: stable@vger.kernel.org Signed-off-by: Ulf Hansson --- drivers/mmc/host/mmc_spi.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 10ba46b728e8..8ade14fb2148 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1450,6 +1450,7 @@ static int mmc_spi_probe(struct spi_device *spi) mmc->caps &= ~MMC_CAP_NEEDS_POLL; mmc_gpiod_request_cd_irq(mmc); } + mmc_detect_change(mmc, 0); /* Index 1 is write protect/read only */ status = mmc_gpiod_request_ro(mmc, NULL, 1, false, 0, NULL); -- cgit From 5c27ff5db1491a947264d6d4e4cbe43ae6535bae Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Mon, 18 Feb 2019 20:45:40 +0300 Subject: mmc: tmio_mmc_core: don't claim spurious interrupts I have encountered an interrupt storm during the eMMC chip probing (and the chip finally didn't get detected). It turned out that U-Boot left the DMAC interrupts enabled while the Linux driver didn't use those. The SDHI driver's interrupt handler somehow assumes that, even if an SDIO interrupt didn't happen, it should return IRQ_HANDLED. I think that if none of the enabled interrupts happened and got handled, we should return IRQ_NONE -- that way the kernel IRQ code recoginizes a spurious interrupt and masks it off pretty quickly... Fixes: 7729c7a232a9 ("mmc: tmio: Provide separate interrupt handlers") Signed-off-by: Sergei Shtylyov Reviewed-by: Wolfram Sang Tested-by: Wolfram Sang Reviewed-by: Simon Horman Cc: stable@vger.kernel.org Signed-off-by: Ulf Hansson --- drivers/mmc/host/tmio_mmc_core.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 085a0fab769c..41d79720e745 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -629,7 +629,7 @@ static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg, return false; } -static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) +static bool __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) { struct mmc_host *mmc = host->mmc; struct tmio_mmc_data *pdata = host->pdata; @@ -637,7 +637,7 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) unsigned int sdio_status; if (!(pdata->flags & TMIO_MMC_SDIO_IRQ)) - return; + return false; status = sd_ctrl_read16(host, CTL_SDIO_STATUS); ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask; @@ -650,6 +650,8 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ) mmc_signal_sdio_irq(mmc); + + return ireg; } irqreturn_t tmio_mmc_irq(int irq, void *devid) @@ -668,9 +670,10 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid) if (__tmio_mmc_sdcard_irq(host, ireg, status)) return IRQ_HANDLED; - __tmio_mmc_sdio_irq(host); + if (__tmio_mmc_sdio_irq(host)) + return IRQ_HANDLED; - return IRQ_HANDLED; + return IRQ_NONE; } EXPORT_SYMBOL_GPL(tmio_mmc_irq); -- cgit From 53a41cb7ed381edee91029cdcabe9b3250f43f4d Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 25 Feb 2019 09:10:51 -0800 Subject: Revert "x86/fault: BUG() when uaccess helpers fault on kernel addresses" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 9da3f2b74054406f87dff7101a569217ffceb29b. It was well-intentioned, but wrong. Overriding the exception tables for instructions for random reasons is just wrong, and that is what the new code did. It caused problems for tracing, and it caused problems for strncpy_from_user(), because the new checks made perfectly valid use cases break, rather than catch things that did bad things. Unchecked user space accesses are a problem, but that's not a reason to add invalid checks that then people have to work around with silly flags (in this case, that 'kernel_uaccess_faults_ok' flag, which is just an odd way to say "this commit was wrong" and was sprinked into random places to hide the wrongness). The real fix to unchecked user space accesses is to get rid of the special "let's not check __get_user() and __put_user() at all" logic. Make __{get|put}_user() be just aliases to the regular {get|put}_user() functions, and make it impossible to access user space without having the proper checks in places. The raison d'être of the special double-underscore versions used to be that the range check was expensive, and if you did multiple user accesses, you'd do the range check up front (like the signal frame handling code, for example). But SMAP (on x86) and PAN (on ARM) have made that optimization pointless, because the _real_ expense is the "set CPU flag to allow user space access". Do let's not break the valid cases to catch invalid cases that shouldn't even exist. Cc: Thomas Gleixner Cc: Kees Cook Cc: Tobin C. Harding Cc: Borislav Petkov Cc: Peter Zijlstra Cc: Andy Lutomirski Cc: Jann Horn Signed-off-by: Linus Torvalds --- arch/x86/mm/extable.c | 58 --------------------------------------------------- fs/namespace.c | 2 -- include/linux/sched.h | 6 ------ mm/maccess.c | 6 ------ 4 files changed, 72 deletions(-) diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index 6521134057e8..856fa409c536 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c @@ -117,67 +117,11 @@ __visible bool ex_handler_fprestore(const struct exception_table_entry *fixup, } EXPORT_SYMBOL_GPL(ex_handler_fprestore); -/* Helper to check whether a uaccess fault indicates a kernel bug. */ -static bool bogus_uaccess(struct pt_regs *regs, int trapnr, - unsigned long fault_addr) -{ - /* This is the normal case: #PF with a fault address in userspace. */ - if (trapnr == X86_TRAP_PF && fault_addr < TASK_SIZE_MAX) - return false; - - /* - * This code can be reached for machine checks, but only if the #MC - * handler has already decided that it looks like a candidate for fixup. - * This e.g. happens when attempting to access userspace memory which - * the CPU can't access because of uncorrectable bad memory. - */ - if (trapnr == X86_TRAP_MC) - return false; - - /* - * There are two remaining exception types we might encounter here: - * - #PF for faulting accesses to kernel addresses - * - #GP for faulting accesses to noncanonical addresses - * Complain about anything else. - */ - if (trapnr != X86_TRAP_PF && trapnr != X86_TRAP_GP) { - WARN(1, "unexpected trap %d in uaccess\n", trapnr); - return false; - } - - /* - * This is a faulting memory access in kernel space, on a kernel - * address, in a usercopy function. This can e.g. be caused by improper - * use of helpers like __put_user and by improper attempts to access - * userspace addresses in KERNEL_DS regions. - * The one (semi-)legitimate exception are probe_kernel_{read,write}(), - * which can be invoked from places like kgdb, /dev/mem (for reading) - * and privileged BPF code (for reading). - * The probe_kernel_*() functions set the kernel_uaccess_faults_ok flag - * to tell us that faulting on kernel addresses, and even noncanonical - * addresses, in a userspace accessor does not necessarily imply a - * kernel bug, root might just be doing weird stuff. - */ - if (current->kernel_uaccess_faults_ok) - return false; - - /* This is bad. Refuse the fixup so that we go into die(). */ - if (trapnr == X86_TRAP_PF) { - pr_emerg("BUG: pagefault on kernel address 0x%lx in non-whitelisted uaccess\n", - fault_addr); - } else { - pr_emerg("BUG: GPF in non-whitelisted uaccess (non-canonical address?)\n"); - } - return true; -} - __visible bool ex_handler_uaccess(const struct exception_table_entry *fixup, struct pt_regs *regs, int trapnr, unsigned long error_code, unsigned long fault_addr) { - if (bogus_uaccess(regs, trapnr, fault_addr)) - return false; regs->ip = ex_fixup_addr(fixup); return true; } @@ -188,8 +132,6 @@ __visible bool ex_handler_ext(const struct exception_table_entry *fixup, unsigned long error_code, unsigned long fault_addr) { - if (bogus_uaccess(regs, trapnr, fault_addr)) - return false; /* Special hack for uaccess_err */ current->thread.uaccess_err = 1; regs->ip = ex_fixup_addr(fixup); diff --git a/fs/namespace.c b/fs/namespace.c index a677b59efd74..678ef175d63a 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -2698,7 +2698,6 @@ static long exact_copy_from_user(void *to, const void __user * from, if (!access_ok(from, n)) return n; - current->kernel_uaccess_faults_ok++; while (n) { if (__get_user(c, f)) { memset(t, 0, n); @@ -2708,7 +2707,6 @@ static long exact_copy_from_user(void *to, const void __user * from, f++; n--; } - current->kernel_uaccess_faults_ok--; return n; } diff --git a/include/linux/sched.h b/include/linux/sched.h index bba3afb4e9bf..f9b43c989577 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -739,12 +739,6 @@ struct task_struct { unsigned use_memdelay:1; #endif - /* - * May usercopy functions fault on kernel addresses? - * This is not just a single bit because this can potentially nest. - */ - unsigned int kernel_uaccess_faults_ok; - unsigned long atomic_flags; /* Flags requiring atomic access. */ struct restart_block restart_block; diff --git a/mm/maccess.c b/mm/maccess.c index f3416632e5a4..ec00be51a24f 100644 --- a/mm/maccess.c +++ b/mm/maccess.c @@ -30,10 +30,8 @@ long __probe_kernel_read(void *dst, const void *src, size_t size) set_fs(KERNEL_DS); pagefault_disable(); - current->kernel_uaccess_faults_ok++; ret = __copy_from_user_inatomic(dst, (__force const void __user *)src, size); - current->kernel_uaccess_faults_ok--; pagefault_enable(); set_fs(old_fs); @@ -60,9 +58,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size) set_fs(KERNEL_DS); pagefault_disable(); - current->kernel_uaccess_faults_ok++; ret = __copy_to_user_inatomic((__force void __user *)dst, src, size); - current->kernel_uaccess_faults_ok--; pagefault_enable(); set_fs(old_fs); @@ -98,13 +94,11 @@ long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count) set_fs(KERNEL_DS); pagefault_disable(); - current->kernel_uaccess_faults_ok++; do { ret = __get_user(*dst++, (const char __user __force *)src++); } while (dst[-1] && ret == 0 && src - unsafe_addr < count); - current->kernel_uaccess_faults_ok--; dst[-1] = '\0'; pagefault_enable(); set_fs(old_fs); -- cgit From 2a418cf3f5f1caf911af288e978d61c9844b0695 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Fri, 22 Feb 2019 17:17:04 -0800 Subject: x86/uaccess: Don't leak the AC flag into __put_user() value evaluation When calling __put_user(foo(), ptr), the __put_user() macro would call foo() in between __uaccess_begin() and __uaccess_end(). If that code were buggy, then those bugs would be run without SMAP protection. Fortunately, there seem to be few instances of the problem in the kernel. Nevertheless, __put_user() should be fixed to avoid doing this. Therefore, evaluate __put_user()'s argument before setting AC. This issue was noticed when an objtool hack by Peter Zijlstra complained about genregs_get() and I compared the assembly output to the C source. [ bp: Massage commit message and fixed up whitespace. ] Fixes: 11f1a4b9755f ("x86: reorganize SMAP handling in user space accesses") Signed-off-by: Andy Lutomirski Signed-off-by: Borislav Petkov Acked-by: Linus Torvalds Cc: Peter Zijlstra Cc: Brian Gerst Cc: Josh Poimboeuf Cc: Denys Vlasenko Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/20190225125231.845656645@infradead.org --- arch/x86/include/asm/uaccess.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index a77445d1b034..28376aa2d053 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -284,7 +284,7 @@ do { \ __put_user_goto(x, ptr, "l", "k", "ir", label); \ break; \ case 8: \ - __put_user_goto_u64((__typeof__(*ptr))(x), ptr, label); \ + __put_user_goto_u64(x, ptr, label); \ break; \ default: \ __put_user_bad(); \ @@ -431,8 +431,10 @@ do { \ ({ \ __label__ __pu_label; \ int __pu_err = -EFAULT; \ + __typeof__(*(ptr)) __pu_val; \ + __pu_val = x; \ __uaccess_begin(); \ - __put_user_size((x), (ptr), (size), __pu_label); \ + __put_user_size(__pu_val, (ptr), (size), __pu_label); \ __pu_err = 0; \ __pu_label: \ __uaccess_end(); \ -- cgit From 29b00e609960ae0fcff382f4c7079dd0874a5311 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Fri, 22 Feb 2019 22:35:32 -0800 Subject: tmpfs: fix uninitialized return value in shmem_link When we made the shmem_reserve_inode call in shmem_link conditional, we forgot to update the declaration for ret so that it always has a known value. Dan Carpenter pointed out this deficiency in the original patch. Fixes: 1062af920c07 ("tmpfs: fix link accounting when a tmpfile is linked in") Reported-by: Dan Carpenter Signed-off-by: Darrick J. Wong Signed-off-by: Hugh Dickins Cc: Matej Kupljen Cc: Al Viro Cc: Andrew Morton Signed-off-by: Linus Torvalds --- mm/shmem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/shmem.c b/mm/shmem.c index 0905215fb016..2c012eee133d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2848,7 +2848,7 @@ static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(old_dentry); - int ret; + int ret = 0; /* * No ordinary (disk based) filesystem counts links as inodes; -- cgit From 7d762d69145a54d169f58e56d6dac57a5508debc Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 21 Feb 2019 22:04:32 +0000 Subject: afs: Fix manually set volume location server list When a cell with a volume location server list is added manually by echoing the details into /proc/net/afs/cells, a record is added but the flag saying it has been looked up isn't set. This causes the VL server rotation code to wait forever, with the top of /proc/pid/stack looking like: afs_select_vlserver+0x3a6/0x6f3 afs_vl_lookup_vldb+0x4b/0x92 afs_create_volume+0x25/0x1b9 ... with the thread stuck in afs_start_vl_iteration() waiting for AFS_CELL_FL_NO_LOOKUP_YET to be cleared. Fix this by clearing AFS_CELL_FL_NO_LOOKUP_YET when setting up a record if that record's details were supplied manually. Fixes: 0a5143f2f89c ("afs: Implement VL server rotation") Reported-by: Dave Botsch Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- fs/afs/cell.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/afs/cell.c b/fs/afs/cell.c index cf445dbd5f2e..9de46116c749 100644 --- a/fs/afs/cell.c +++ b/fs/afs/cell.c @@ -173,6 +173,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net, rcu_assign_pointer(cell->vl_servers, vllist); cell->dns_expiry = TIME64_MAX; + __clear_bit(AFS_CELL_FL_NO_LOOKUP_YET, &cell->flags); } else { cell->dns_expiry = ktime_get_real_seconds(); } -- cgit From 18836b48ebae20850631ee2916d0cdbb86df813d Mon Sep 17 00:00:00 2001 From: Jonas Gorski Date: Thu, 21 Feb 2019 10:56:42 +0100 Subject: MIPS: BCM63XX: provide DMA masks for ethernet devices The switch to the generic dma ops made dma masks mandatory, breaking devices having them not set. In case of bcm63xx, it broke ethernet with the following warning when trying to up the device: [ 2.633123] ------------[ cut here ]------------ [ 2.637949] WARNING: CPU: 0 PID: 325 at ./include/linux/dma-mapping.h:516 bcm_enetsw_open+0x160/0xbbc [ 2.647423] Modules linked in: gpio_button_hotplug [ 2.652361] CPU: 0 PID: 325 Comm: ip Not tainted 4.19.16 #0 [ 2.658080] Stack : 80520000 804cd3ec 00000000 00000000 804ccc00 87085bdc 87d3f9d4 804f9a17 [ 2.666707] 8049cf18 00000145 80a942a0 00000204 80ac0000 10008400 87085b90 eb3d5ab7 [ 2.675325] 00000000 00000000 80ac0000 000022b0 00000000 00000000 00000007 00000000 [ 2.683954] 0000007a 80500000 0013b381 00000000 80000000 00000000 804a1664 80289878 [ 2.692572] 00000009 00000204 80ac0000 00000200 00000002 00000000 00000000 80a90000 [ 2.701191] ... [ 2.703701] Call Trace: [ 2.706244] [<8001f3c8>] show_stack+0x58/0x100 [ 2.710840] [<800336e4>] __warn+0xe4/0x118 [ 2.715049] [<800337d4>] warn_slowpath_null+0x48/0x64 [ 2.720237] [<80289878>] bcm_enetsw_open+0x160/0xbbc [ 2.725347] [<802d1d4c>] __dev_open+0xf8/0x16c [ 2.729913] [<802d20cc>] __dev_change_flags+0x100/0x1c4 [ 2.735290] [<802d21b8>] dev_change_flags+0x28/0x70 [ 2.740326] [<803539e0>] devinet_ioctl+0x310/0x7b0 [ 2.745250] [<80355fd8>] inet_ioctl+0x1f8/0x224 [ 2.749939] [<802af290>] sock_ioctl+0x30c/0x488 [ 2.754632] [<80112b34>] do_vfs_ioctl+0x740/0x7dc [ 2.759459] [<80112c20>] ksys_ioctl+0x50/0x94 [ 2.763955] [<800240b8>] syscall_common+0x34/0x58 [ 2.768782] ---[ end trace fb1a6b14d74e28b6 ]--- [ 2.773544] bcm63xx_enetsw bcm63xx_enetsw.0: cannot allocate rx ring 512 Fix this by adding appropriate DMA masks for the platform devices. Fixes: f8c55dc6e828 ("MIPS: use generic dma noncoherent ops for simple noncoherent platforms") Signed-off-by: Jonas Gorski Reviewed-by: Christoph Hellwig Reviewed-by: Florian Fainelli Signed-off-by: Paul Burton Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: Ralf Baechle Cc: James Hogan Cc: stable@vger.kernel.org # v4.19+ --- arch/mips/bcm63xx/dev-enet.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c index 07b4c65a88a4..8e73d65f3480 100644 --- a/arch/mips/bcm63xx/dev-enet.c +++ b/arch/mips/bcm63xx/dev-enet.c @@ -70,6 +70,8 @@ static struct platform_device bcm63xx_enet_shared_device = { static int shared_device_registered; +static u64 enet_dmamask = DMA_BIT_MASK(32); + static struct resource enet0_res[] = { { .start = -1, /* filled at runtime */ @@ -99,6 +101,8 @@ static struct platform_device bcm63xx_enet0_device = { .resource = enet0_res, .dev = { .platform_data = &enet0_pd, + .dma_mask = &enet_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -131,6 +135,8 @@ static struct platform_device bcm63xx_enet1_device = { .resource = enet1_res, .dev = { .platform_data = &enet1_pd, + .dma_mask = &enet_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -157,6 +163,8 @@ static struct platform_device bcm63xx_enetsw_device = { .resource = enetsw_res, .dev = { .platform_data = &enetsw_pd, + .dma_mask = &enet_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; -- cgit From 56de8357049c707e5c881cc9d0e5ffed76388423 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Mon, 18 Feb 2019 08:34:19 +0100 Subject: scsi: lpfc: fix calls to dma_set_mask_and_coherent() The change to use dma_set_mask_and_coherent() incorrectly made a second call with the 32 bit DMA mask value when the call with the 64 bit DMA mask value succeeded. This resulted in NVMe/FC connections failing due to corrupted data buffers, and various other SCSI/FCP I/O errors. Fixes: f30e1bfd6154 ("scsi: lpfc: use dma_set_mask_and_coherent") Cc: Suggested-by: Don Dutile Signed-off-by: Ewan D. Milne Signed-off-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Reviewed-by: Ewan D. Milne Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_init.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index c1c36812c3d2..a588dfad4b11 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -7361,15 +7361,18 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) unsigned long bar0map_len, bar2map_len; int i, hbq_count; void *ptr; - int error = -ENODEV; + int error; if (!pdev) - return error; + return -ENODEV; /* Set the device DMA mask size */ - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) + error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (error) + error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (error) return error; + error = -ENODEV; /* Get the bus address of Bar0 and Bar2 and the number of bytes * required by each mapping. @@ -9742,11 +9745,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) uint32_t if_type; if (!pdev) - return error; + return -ENODEV; /* Set the device DMA mask size */ - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) + error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (error) + error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (error) return error; /* -- cgit From 33d6667416c73eb0b37f0f10f56d825b15389dab Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Mon, 18 Feb 2019 08:34:20 +0100 Subject: scsi: 3w-9xxx: fix calls to dma_set_mask_and_coherent() The change to use dma_set_mask_and_coherent() incorrectly made a second call with the 32 bit DMA mask value when the call with the 64 bit DMA mask value succeeded. Fixes: b000bced5739 ("scsi: 3w-9xxx: fully convert to the generic DMA API") Cc: Signed-off-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Reviewed-by: Ewan D. Milne Signed-off-by: Martin K. Petersen --- drivers/scsi/3w-9xxx.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index a3c20e3a8b7c..3337b1e80412 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -2009,7 +2009,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) struct Scsi_Host *host = NULL; TW_Device_Extension *tw_dev; unsigned long mem_addr, mem_len; - int retval = -ENODEV; + int retval; retval = pci_enable_device(pdev); if (retval) { @@ -2020,8 +2020,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) pci_set_master(pdev); pci_try_set_mwi(pdev); - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (retval) + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (retval) { TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask"); retval = -ENODEV; goto out_disable_device; @@ -2240,8 +2242,10 @@ static int twa_resume(struct pci_dev *pdev) pci_set_master(pdev); pci_try_set_mwi(pdev); - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (retval) + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (retval) { TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume"); retval = -ENODEV; goto out_disable_device; -- cgit From 1feb3b02294994ee3a067c9b6cda6c244fd0ee18 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Mon, 18 Feb 2019 08:34:21 +0100 Subject: scsi: 3w-sas: fix calls to dma_set_mask_and_coherent() The change to use dma_set_mask_and_coherent() incorrectly made a second call with the 32 bit DMA mask value when the call with the 64 bit DMA mask value succeeded. Fixes: b1fa122930c4 ("scsi: 3w-sas: fully convert to the generic DMA API") Cc: Signed-off-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Reviewed-by: Ewan D. Milne Signed-off-by: Martin K. Petersen --- drivers/scsi/3w-sas.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index e8f5f7c63190..2d15b878bd94 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c @@ -1572,8 +1572,10 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) pci_set_master(pdev); pci_try_set_mwi(pdev); - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (retval) + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (retval) { TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask"); retval = -ENODEV; goto out_disable_device; @@ -1804,8 +1806,10 @@ static int twl_resume(struct pci_dev *pdev) pci_set_master(pdev); pci_try_set_mwi(pdev); - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (retval) + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (retval) { TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume"); retval = -ENODEV; goto out_disable_device; -- cgit From c326de562f1fc149da4855a1b9d0433300c2a85d Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Mon, 18 Feb 2019 08:34:22 +0100 Subject: scsi: aic94xx: fix calls to dma_set_mask_and_coherent() The change to use dma_set_mask_and_coherent() incorrectly made a second call with the 32 bit DMA mask value when the call with the 64 bit DMA mask value succeeded. [mkp: fixed subject] Fixes: 3a21986f1a59 ("scsi: aic94xx: fully convert to the generic DMA API") Cc: Signed-off-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Reviewed-by: Ewan D. Milne Signed-off-by: Martin K. Petersen --- drivers/scsi/aic94xx/aic94xx_init.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index 07efcb9b5b94..bbdae67774f0 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c @@ -769,9 +769,11 @@ static int asd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) if (err) goto Err_remove; - err = -ENODEV; - if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) { + err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)); + if (err) + err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)); + if (err) { + err = -ENODEV; asd_printk("no suitable DMA mask for %s\n", pci_name(dev)); goto Err_remove; } -- cgit From 11ea3824140ca994f4560c4bec6a32d257ef3e83 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Mon, 18 Feb 2019 08:34:23 +0100 Subject: scsi: bfa: fix calls to dma_set_mask_and_coherent() The change to use dma_set_mask_and_coherent() incorrectly made a second call with the 32 bit DMA mask value when the call with the 64 bit DMA mask value succeeded. [mkp: fixed commit message] Fixes: a69b080025ea ("scsi: bfa: use dma_set_mask_and_coherent") Cc: Suggested-by: Ewan D. Milne Signed-off-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Reviewed-by: Ewan D. Milne Signed-off-by: Martin K. Petersen --- drivers/scsi/bfa/bfad.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 42a0caf6740d..88880a66a189 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -727,7 +727,7 @@ bfad_init_timer(struct bfad_s *bfad) int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) { - int rc = -ENODEV; + int rc = -ENODEV; if (pci_enable_device(pdev)) { printk(KERN_ERR "pci_enable_device fail %p\n", pdev); @@ -739,8 +739,12 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) pci_set_master(pdev); - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rc) + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + + if (rc) { + rc = -ENODEV; printk(KERN_ERR "dma_set_mask_and_coherent fail %p\n", pdev); goto out_release_region; } @@ -1534,6 +1538,7 @@ bfad_pci_slot_reset(struct pci_dev *pdev) { struct bfad_s *bfad = pci_get_drvdata(pdev); u8 byte; + int rc; dev_printk(KERN_ERR, &pdev->dev, "bfad_pci_slot_reset flags: 0x%x\n", bfad->bfad_flags); @@ -1561,8 +1566,11 @@ bfad_pci_slot_reset(struct pci_dev *pdev) pci_save_state(pdev); pci_set_master(pdev); - if (dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(32))) + rc = dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64)); + if (rc) + rc = dma_set_mask_and_coherent(&bfad->pcidev->dev, + DMA_BIT_MASK(32)); + if (rc) goto out_disable_device; if (restart_bfa(bfad) == -1) -- cgit From 732f3238dcf27acb92959a99b7923dc49395980e Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Mon, 18 Feb 2019 08:34:24 +0100 Subject: scsi: csiostor: fix calls to dma_set_mask_and_coherent() The change to use dma_set_mask_and_coherent() incorrectly made a second call with the 32 bit DMA mask value when the call with the 64 bit DMA mask value succeeded. Fixes: c22b332d811b ("scsi: csiostor: switch to generic DMA API") Cc: Signed-off-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Reviewed-by: Ewan D. Milne Signed-off-by: Martin K. Petersen --- drivers/scsi/csiostor/csio_init.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c index cf629380a981..616b25bf7941 100644 --- a/drivers/scsi/csiostor/csio_init.c +++ b/drivers/scsi/csiostor/csio_init.c @@ -210,8 +210,11 @@ csio_pci_init(struct pci_dev *pdev, int *bars) pci_set_master(pdev); pci_try_set_mwi(pdev); - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rv) + rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (rv) { + rv = -ENODEV; dev_err(&pdev->dev, "No suitable DMA available.\n"); goto err_release_regions; } -- cgit From d9a00459effc30f6de2cdd887b64f15c6c54ae71 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Mon, 18 Feb 2019 08:34:25 +0100 Subject: scsi: hisi_sas: fix calls to dma_set_mask_and_coherent() The change to use dma_set_mask_and_coherent() incorrectly made a second call with the 32 bit DMA mask value when the call with the 64 bit DMA mask value succeeded. [mkp: fixed commit message] Fixes: e4db40e7a1a2 ("scsi: hisi_sas: use dma_set_mask_and_coherent") Cc: Suggested-by: Ewan D. Milne Signed-off-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Signed-off-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_main.c | 8 ++++++-- drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 8 +++++--- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index eed7fc5b3389..bc17fa0d8375 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -2323,6 +2323,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, struct Scsi_Host *shost; struct hisi_hba *hisi_hba; struct device *dev = &pdev->dev; + int error; shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba)); if (!shost) { @@ -2343,8 +2344,11 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, if (hisi_sas_get_fw_info(hisi_hba) < 0) goto err_out; - if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) && - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) { + error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (error) + error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + + if (error) { dev_err(dev, "No usable DMA addressing method\n"); goto err_out; } diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index c92b3822c408..e0570fd8466e 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -2447,10 +2447,12 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) goto err_out_disable_device; - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rc) + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (rc) { dev_err(dev, "No usable DMA addressing method\n"); - rc = -EIO; + rc = -ENODEV; goto err_out_regions; } -- cgit From 3e344b6cec8e675e692ffddf9977c52638337006 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Mon, 18 Feb 2019 08:34:26 +0100 Subject: scsi: hptiop: fix calls to dma_set_mask() The change to use dma_set_mask() incorrectly made a second call with the 32 bit DMA mask value when the call with the 64 bit DMA mask value succeeded. Fixes: 453cd3700ca3 ("scsi: hptiop: use dma_set_mask") Cc: Suggested-by: Ewan D. Milne Signed-off-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Reviewed-by: Ewan D. Milne Signed-off-by: Martin K. Petersen --- drivers/scsi/hptiop.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index 3eedfd4f8f57..251c084a6ff0 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c @@ -1292,6 +1292,7 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id) dma_addr_t start_phy; void *start_virt; u32 offset, i, req_size; + int rc; dprintk("hptiop_probe(%p)\n", pcidev); @@ -1308,9 +1309,12 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id) /* Enable 64bit DMA if possible */ iop_ops = (struct hptiop_adapter_ops *)id->driver_data; - if (dma_set_mask(&pcidev->dev, - DMA_BIT_MASK(iop_ops->hw_dma_bit_mask)) || - dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32))) { + rc = dma_set_mask(&pcidev->dev, + DMA_BIT_MASK(iop_ops->hw_dma_bit_mask)); + if (rc) + rc = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32)); + + if (rc) { printk(KERN_ERR "hptiop: fail to set dma_mask\n"); goto disable_pci_device; } -- cgit From 5603731a15ef9ca317c122cc8c959f1dee1798b4 Mon Sep 17 00:00:00 2001 From: Takeshi Saito Date: Thu, 21 Feb 2019 20:38:05 +0100 Subject: mmc: tmio: fix access width of Block Count Register In R-Car Gen2 or later, the maximum number of transfer blocks are changed from 0xFFFF to 0xFFFFFFFF. Therefore, Block Count Register should use iowrite32(). If another system (U-boot, Hypervisor OS, etc) uses bit[31:16], this value will not be cleared. So, SD/MMC card initialization fails. So, check for the bigger register and use apropriate write. Also, mark the register as extended on Gen2. Signed-off-by: Takeshi Saito [wsa: use max_blk_count in if(), add Gen2, update commit message] Signed-off-by: Wolfram Sang Cc: stable@kernel.org Reviewed-by: Simon Horman [Ulf: Fixed build error] Signed-off-by: Ulf Hansson --- drivers/mmc/host/renesas_sdhi_sys_dmac.c | 1 + drivers/mmc/host/tmio_mmc.h | 5 +++++ drivers/mmc/host/tmio_mmc_core.c | 6 +++++- 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c index 8471160316e0..02cd878e209f 100644 --- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c @@ -65,6 +65,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = { .scc_offset = 0x0300, .taps = rcar_gen2_scc_taps, .taps_num = ARRAY_SIZE(rcar_gen2_scc_taps), + .max_blk_count = 0xffffffff, }; /* Definitions for sampling clocks */ diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index c03529e3f01a..2adb0d24360f 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -277,6 +277,11 @@ static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host, iowrite16(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); } +static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) +{ + iowrite32(val, host->ctl + (addr << host->bus_shift)); +} + static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr, const u32 *buf, int count) { diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 41d79720e745..f7a6f005899a 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include #include @@ -703,7 +704,10 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host, /* Set transfer length / blocksize */ sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); - sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); + if (host->mmc->max_blk_count >= SZ_64K) + sd_ctrl_write32(host, CTL_XFER_BLK_COUNT, data->blocks); + else + sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); tmio_mmc_start_dma(host, data); -- cgit From cffaaf0c816238c45cd2d06913476c83eb50f682 Mon Sep 17 00:00:00 2001 From: Julia Cartwright Date: Wed, 20 Feb 2019 16:46:31 +0000 Subject: iommu/dmar: Fix buffer overflow during PCI bus notification Commit 57384592c433 ("iommu/vt-d: Store bus information in RMRR PCI device path") changed the type of the path data, however, the change in path type was not reflected in size calculations. Update to use the correct type and prevent a buffer overflow. This bug manifests in systems with deep PCI hierarchies, and can lead to an overflow of the static allocated buffer (dmar_pci_notify_info_buf), or can lead to overflow of slab-allocated data. BUG: KASAN: global-out-of-bounds in dmar_alloc_pci_notify_info+0x1d5/0x2e0 Write of size 1 at addr ffffffff90445d80 by task swapper/0/1 CPU: 0 PID: 1 Comm: swapper/0 Tainted: G W 4.14.87-rt49-02406-gd0a0e96 #1 Call Trace: ? dump_stack+0x46/0x59 ? print_address_description+0x1df/0x290 ? dmar_alloc_pci_notify_info+0x1d5/0x2e0 ? kasan_report+0x256/0x340 ? dmar_alloc_pci_notify_info+0x1d5/0x2e0 ? e820__memblock_setup+0xb0/0xb0 ? dmar_dev_scope_init+0x424/0x48f ? __down_write_common+0x1ec/0x230 ? dmar_dev_scope_init+0x48f/0x48f ? dmar_free_unused_resources+0x109/0x109 ? cpumask_next+0x16/0x20 ? __kmem_cache_create+0x392/0x430 ? kmem_cache_create+0x135/0x2f0 ? e820__memblock_setup+0xb0/0xb0 ? intel_iommu_init+0x170/0x1848 ? _raw_spin_unlock_irqrestore+0x32/0x60 ? migrate_enable+0x27a/0x5b0 ? sched_setattr+0x20/0x20 ? migrate_disable+0x1fc/0x380 ? task_rq_lock+0x170/0x170 ? try_to_run_init_process+0x40/0x40 ? locks_remove_file+0x85/0x2f0 ? dev_prepare_static_identity_mapping+0x78/0x78 ? rt_spin_unlock+0x39/0x50 ? lockref_put_or_lock+0x2a/0x40 ? dput+0x128/0x2f0 ? __rcu_read_unlock+0x66/0x80 ? __fput+0x250/0x300 ? __rcu_read_lock+0x1b/0x30 ? mntput_no_expire+0x38/0x290 ? e820__memblock_setup+0xb0/0xb0 ? pci_iommu_init+0x25/0x63 ? pci_iommu_init+0x25/0x63 ? do_one_initcall+0x7e/0x1c0 ? initcall_blacklisted+0x120/0x120 ? kernel_init_freeable+0x27b/0x307 ? rest_init+0xd0/0xd0 ? kernel_init+0xf/0x120 ? rest_init+0xd0/0xd0 ? ret_from_fork+0x1f/0x40 The buggy address belongs to the variable: dmar_pci_notify_info_buf+0x40/0x60 Fixes: 57384592c433 ("iommu/vt-d: Store bus information in RMRR PCI device path") Signed-off-by: Julia Cartwright Signed-off-by: Joerg Roedel --- drivers/iommu/dmar.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index dc9f14811e0f..58dc70bffd5b 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -144,7 +144,7 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event) for (tmp = dev; tmp; tmp = tmp->bus->self) level++; - size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path); + size = sizeof(*info) + level * sizeof(info->path[0]); if (size <= sizeof(dmar_pci_notify_info_buf)) { info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf; } else { -- cgit From e5723f95d6b493dd437f1199cacb41459713b32f Mon Sep 17 00:00:00 2001 From: Ritesh Harjani Date: Fri, 22 Feb 2019 19:21:34 +0530 Subject: mmc: core: Fix NULL ptr crash from mmc_should_fail_request In case of CQHCI, mrq->cmd may be NULL for data requests (non DCMD). In such case mmc_should_fail_request is directly dereferencing mrq->cmd while cmd is NULL. Fix this by checking for mrq->cmd pointer. Fixes: 72a5af554df8 ("mmc: core: Add support for handling CQE requests") Signed-off-by: Ritesh Harjani Cc: stable@vger.kernel.org Signed-off-by: Ulf Hansson --- drivers/mmc/core/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 5bd58b95d318..b27a1e620233 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -95,7 +95,7 @@ static void mmc_should_fail_request(struct mmc_host *host, if (!data) return; - if (cmd->error || data->error || + if ((cmd && cmd->error) || data->error || !should_fail(&host->fail_mmc_request, data->blksz * data->blocks)) return; -- cgit From 388b4e6a00bb3097278ed1648ac5a1cb48c894e6 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Tue, 26 Feb 2019 16:35:26 -0800 Subject: scsi: core: Avoid that system resume triggers a kernel warning scsi_device_quiesce() and scsi_device_resume() are called during system-wide suspend and resume. scsi_device_quiesce() only succeeds for SCSI devices that are in one of the RUNNING, OFFLINE or TRANSPORT_OFFLINE states (see also scsi_set_device_state()). This patch avoids that the following warning is triggered when resuming a system for which quiescing a SCSI device failed: WARNING: CPU: 2 PID: 11303 at drivers/scsi/scsi_lib.c:2600 scsi_device_resume+0x4f/0x58 CPU: 2 PID: 11303 Comm: kworker/u8:70 Not tainted 5.0.0-rc1+ #50 Hardware name: LENOVO 80E3/Lancer 5B2, BIOS A2CN45WW(V2.13) 08/04/2016 Workqueue: events_unbound async_run_entry_fn Call Trace: scsi_dev_type_resume+0x2e/0x60 async_run_entry_fn+0x32/0xd8 process_one_work+0x1f4/0x420 worker_thread+0x28/0x3c0 kthread+0x118/0x130 ret_from_fork+0x22/0x40 Cc: Przemek Socha Reported-by: Przemek Socha Fixes: 3a0a529971ec ("block, scsi: Make SCSI quiesce and resume work reliably") # v4.15 Signed-off-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/scsi/scsi_lib.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index f8d51c3d5582..a6828391d6b3 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -2598,7 +2598,6 @@ void scsi_device_resume(struct scsi_device *sdev) * device deleted during suspend) */ mutex_lock(&sdev->state_mutex); - WARN_ON_ONCE(!sdev->quiesced_by); sdev->quiesced_by = NULL; blk_clear_pm_only(sdev->request_queue); if (sdev->sdev_state == SDEV_QUIESCE) -- cgit From 27ec9dc17c48ea2e642ccb90b4ebf7fd47468911 Mon Sep 17 00:00:00 2001 From: Alamy Liu Date: Mon, 25 Feb 2019 11:22:13 -0800 Subject: mmc: cqhci: fix space allocated for transfer descriptor There is not enough space being allocated when DCMD is disabled. CQE_DCMD is not necessary to be enabled when CQE is enabled. (Software could halt CQE to send command) In the case that CQE_DCMD is not enabled, it still needs to allocate space for data transfer. For instance: CQE_DCMD is enabled: 31 slots space (one slot used by DCMD) CQE_DCMD is disabled: 32 slots space Fixes: a4080225f51d ("mmc: cqhci: support for command queue enabled host") Signed-off-by: Alamy Liu Acked-by: Adrian Hunter Cc: stable@vger.kernel.org Signed-off-by: Ulf Hansson --- drivers/mmc/host/cqhci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c index 159270e947cf..bd9a59bc0e31 100644 --- a/drivers/mmc/host/cqhci.c +++ b/drivers/mmc/host/cqhci.c @@ -201,7 +201,7 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host) cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots; cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs * - (cq_host->num_slots - 1); + cq_host->mmc->cqe_qdepth; pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n", mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size, -- cgit From d07e9fadf3a6b466ca3ae90fa4859089ff20530f Mon Sep 17 00:00:00 2001 From: Alamy Liu Date: Mon, 25 Feb 2019 11:22:14 -0800 Subject: mmc: cqhci: Fix a tiny potential memory leak on error condition Free up the allocated memory in the case of error return The value of mmc_host->cqe_enabled stays 'false'. Thus, cqhci_disable (mmc_cqe_ops->cqe_disable) won't be called to free the memory. Also, cqhci_disable() seems to be designed to disable and free all resources, not suitable to handle this corner case. Fixes: a4080225f51d ("mmc: cqhci: support for command queue enabled host") Signed-off-by: Alamy Liu Acked-by: Adrian Hunter Cc: stable@vger.kernel.org Signed-off-by: Ulf Hansson --- drivers/mmc/host/cqhci.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c index bd9a59bc0e31..a8af682a9182 100644 --- a/drivers/mmc/host/cqhci.c +++ b/drivers/mmc/host/cqhci.c @@ -217,12 +217,21 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host) cq_host->desc_size, &cq_host->desc_dma_base, GFP_KERNEL); + if (!cq_host->desc_base) + return -ENOMEM; + cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc), cq_host->data_size, &cq_host->trans_desc_dma_base, GFP_KERNEL); - if (!cq_host->desc_base || !cq_host->trans_desc_base) + if (!cq_host->trans_desc_base) { + dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size, + cq_host->desc_base, + cq_host->desc_dma_base); + cq_host->desc_base = NULL; + cq_host->desc_dma_base = 0; return -ENOMEM; + } pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n", mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base, -- cgit From c53336c8f5f29043fded57912cc06c24e12613d7 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Thu, 28 Feb 2019 00:02:11 +0800 Subject: mmc: core: align max segment size with logical block size Logical block size is the lowest possible block size that the storage device can address. Max segment size is often related with controller's DMA capability. And it is reasonable to align max segment size with logical block size. SDHCI sets un-aligned max segment size, and causes ADMA error, so fix it by aligning max segment size with logical block size. Reported-by: Naresh Kamboju Cc: Christoph Hellwig Cc: Naresh Kamboju Cc: Faiz Abbas Cc: linux-block@vger.kernel.org Signed-off-by: Ming Lei Signed-off-by: Ulf Hansson --- drivers/mmc/core/block.c | 6 ------ drivers/mmc/core/queue.c | 9 ++++++++- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 14f3fdb8c6bb..9ce8eb51a60f 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -2380,12 +2380,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), "mmcblk%u%s", card->host->index, subname ? subname : ""); - if (mmc_card_mmc(card)) - blk_queue_logical_block_size(md->queue.queue, - card->ext_csd.data_sector_size); - else - blk_queue_logical_block_size(md->queue.queue, 512); - set_capacity(md->disk, size); if (mmc_host_cmd23(card->host)) { diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 35cc138b096d..15a45ec6518d 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -355,6 +355,7 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) { struct mmc_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; + unsigned block_size = 512; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; @@ -368,7 +369,13 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count, host->max_req_size / 512)); blk_queue_max_segments(mq->queue, host->max_segs); - blk_queue_max_segment_size(mq->queue, host->max_seg_size); + + if (mmc_card_mmc(card)) + block_size = card->ext_csd.data_sector_size; + + blk_queue_logical_block_size(mq->queue, block_size); + blk_queue_max_segment_size(mq->queue, + round_down(host->max_seg_size, block_size)); INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); -- cgit From d63716658ac16c515d1223a9fbf5edbf76b1b333 Mon Sep 17 00:00:00 2001 From: Mario Kleiner Date: Wed, 30 Jan 2019 06:04:46 +0100 Subject: drm/amd/display: Use vrr friendly pageflip throttling in DC. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In VRR mode, keep track of the vblank count of the last completed pageflip in amdgpu_crtc->last_flip_vblank, as recorded in the pageflip completion handler after each completed flip. Use that count to prevent mmio programming a new pageflip within the same vblank in which the last pageflip completed, iow. to throttle pageflips to at most one flip per video frame, while at the same time allowing to request a flip not only before start of vblank, but also anywhere within vblank. The old logic did the same, and made sense for regular fixed refresh rate flipping, but in vrr mode it prevents requesting a flip anywhere inside the possibly huge vblank, thereby reducing framerate in vrr mode instead of improving it, by delaying a slightly delayed flip requests up to a maximum vblank duration + 1 scanout duration. This would limit VRR usefulness to only help applications with a very high GPU demand, which can submit the flip request before start of vblank, but then have to wait long for fences to complete. With this method a flip can be both requested and - after fences have completed - executed, ie. it doesn't matter if the request (amdgpu_dm_do_flip()) gets delayed until deep into the extended vblank due to cpu execution delays. This also allows clients which want to regulate framerate within the vrr range a much more fine-grained control of flip timing, a feature that might be useful for video playback, and is very useful for neuroscience/vision research applications. In regular non-VRR mode, retain the old flip submission behavior. This to keep flip scheduling for fullscreen X11/GLX OpenGL clients intact, if they use the GLX_OML_sync_control extensions glXSwapBufferMscOML(, ..., target_msc,...) function with a specific target_msc target vblank count. glXSwapBuffersMscOML() or DRI3/Present PresentPixmap() will not flip at the proper target_msc for a non-zero target_msc if VRR mode is active with this patch. They'd often flip one frame too early. However, this limitation should not matter much in VRR mode, as scheduling based on vblank counts is pretty futile/unusable under variable refresh duration anyway, so no real extra harm is done. Signed-off-by: Mario Kleiner Cc: Nicholas Kazlauskas Cc: Harry Wentland Cc: Alex Deucher Cc: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h | 1 + drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 27 +++++++++++++++++++---- 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index aadd0fa42e43..3aa42c64484a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -405,6 +405,7 @@ struct amdgpu_crtc { struct amdgpu_flip_work *pflip_works; enum amdgpu_flip_status pflip_status; int deferred_flip_completion; + u64 last_flip_vblank; /* pll sharing */ struct amdgpu_atom_ss ss; bool ss_enabled; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 5296b8f3e0ab..636d14a60952 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -303,12 +303,11 @@ static void dm_pflip_high_irq(void *interrupt_params) return; } + /* Update to correct count(s) if racing with vblank irq */ + amdgpu_crtc->last_flip_vblank = drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); /* wake up userspace */ if (amdgpu_crtc->event) { - /* Update to correct count(s) if racing with vblank irq */ - drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); - drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event); /* page flip completed. clean up */ @@ -4828,6 +4827,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); int planes_count = 0; unsigned long flags; + u64 last_flip_vblank; + bool vrr_active = acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE; /* update planes when needed */ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { @@ -4859,6 +4860,16 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, /* In commit tail framework this cannot happen */ WARN_ON(1); } + + /* For variable refresh rate mode only: + * Get vblank of last completed flip to avoid > 1 vrr flips per + * video frame by use of throttling, but allow flip programming + * anywhere in the possibly large variable vrr vblank interval + * for fine-grained flip timing control and more opportunity to + * avoid stutter on late submission of amdgpu_dm_do_flip() calls. + */ + last_flip_vblank = acrtc_attach->last_flip_vblank; + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); if (!pflip_needed || plane->type == DRM_PLANE_TYPE_OVERLAY) { @@ -4882,10 +4893,18 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, if (plane->type == DRM_PLANE_TYPE_PRIMARY) drm_crtc_vblank_get(crtc); + /* Use old throttling in non-vrr fixed refresh rate mode + * to keep flip scheduling based on target vblank counts + * working in a backwards compatible way, e.g., clients + * using GLX_OML_sync_control extension. + */ + if (!vrr_active) + last_flip_vblank = drm_crtc_vblank_count(crtc); + amdgpu_dm_do_flip( crtc, fb, - (uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank, + (uint32_t) last_flip_vblank + *wait_for_vblank, dc_state); } -- cgit From 0a1d52994d440e21def1c2174932410b4f2a98a1 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Wed, 27 Feb 2019 21:29:52 +0100 Subject: mm: enforce min addr even if capable() in expand_downwards() security_mmap_addr() does a capability check with current_cred(), but we can reach this code from contexts like a VFS write handler where current_cred() must not be used. This can be abused on systems without SMAP to make NULL pointer dereferences exploitable again. Fixes: 8869477a49c3 ("security: protect from stack expansion into low vm addresses") Cc: stable@kernel.org Signed-off-by: Jann Horn Signed-off-by: Linus Torvalds --- mm/mmap.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index f901065c4c64..fc1809b1bed6 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2426,12 +2426,11 @@ int expand_downwards(struct vm_area_struct *vma, { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *prev; - int error; + int error = 0; address &= PAGE_MASK; - error = security_mmap_addr(address); - if (error) - return error; + if (address < mmap_min_addr) + return -EPERM; /* Enforce stack_guard_gap */ prev = vma->vm_prev; -- cgit From e0bf304e4a00d66d90904a6c5b93141f177cf6d2 Mon Sep 17 00:00:00 2001 From: Thomas Bogendoerfer Date: Wed, 27 Feb 2019 10:42:56 +0100 Subject: MIPS: fix memory setup for platforms with PHYS_OFFSET != 0 For platforms, which use a PHYS_OFFSET != 0, symbol _end also contains that offset. So when calling memblock_reserve() for reserving kernel the size argument needs to be adjusted. Fixes: bcec54bf3118 ("mips: switch to NO_BOOTMEM") Acked-by: Mike Rapoport Signed-off-by: Thomas Bogendoerfer Signed-off-by: Paul Burton Cc: Ralf Baechle Cc: James Hogan Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: Mike Rapoport Cc: stable@vger.kernel.org # v4.20+ --- arch/mips/kernel/setup.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 8c6c48ed786a..d2e5a5ad0e6f 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -384,7 +384,8 @@ static void __init bootmem_init(void) init_initrd(); reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end)); - memblock_reserve(PHYS_OFFSET, reserved_end << PAGE_SHIFT); + memblock_reserve(PHYS_OFFSET, + (reserved_end << PAGE_SHIFT) - PHYS_OFFSET); /* * max_low_pfn is not a number of pages. The number of pages -- cgit From 2216322919c8608a448d7ebc560a845238a5d6b6 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Mon, 7 Jan 2019 12:41:46 -0500 Subject: drm: Block fb changes for async plane updates The prepare_fb call always happens on new_plane_state. The drm_atomic_helper_cleanup_planes checks to see if plane state pointer has changed when deciding to call cleanup_fb on either the new_plane_state or the old_plane_state. For a non-async atomic commit the state pointer is swapped, so this helper calls prepare_fb on the new_plane_state and cleanup_fb on the old_plane_state. This makes sense, since we want to prepare the framebuffer we are going to use and cleanup the the framebuffer we are no longer using. For the async atomic update helpers this differs. The async atomic update helpers perform in-place updates on the existing state. They call drm_atomic_helper_cleanup_planes but the state pointer is not swapped. This means that prepare_fb is called on the new_plane_state and cleanup_fb is called on the new_plane_state (not the old). In the case where old_plane_state->fb == new_plane_state->fb then there should be no behavioral difference between an async update and a non-async commit. But there are issues that arise when old_plane_state->fb != new_plane_state->fb. The first is that the new_plane_state->fb is immediately cleaned up after it has been prepared, so we're using a fb that we shouldn't be. The second occurs during a sequence of async atomic updates and non-async regular atomic commits. Suppose there are two framebuffers being interleaved in a double-buffering scenario, fb1 and fb2: - Async update, oldfb = NULL, newfb = fb1, prepare fb1, cleanup fb1 - Async update, oldfb = fb1, newfb = fb2, prepare fb2, cleanup fb2 - Non-async commit, oldfb = fb2, newfb = fb1, prepare fb1, cleanup fb2 We call cleanup_fb on fb2 twice in this example scenario, and any further use will result in use-after-free. The simple fix to this problem is to block framebuffer changes in the drm_atomic_helper_async_check function for now. v2: Move check by itself, add a FIXME (Daniel) Cc: Daniel Vetter Cc: Harry Wentland Cc: Andrey Grodzovsky Cc: # v4.14+ Fixes: fef9df8b5945 ("drm/atomic: initial support for asynchronous plane update") Signed-off-by: Nicholas Kazlauskas Acked-by: Andrey Grodzovsky Acked-by: Harry Wentland Reviewed-by: Daniel Vetter Signed-off-by: Harry Wentland Link: https://patchwork.freedesktop.org/patch/275364/ Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_atomic_helper.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 54e2ae614dcc..f4290f6b0c38 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1602,6 +1602,15 @@ int drm_atomic_helper_async_check(struct drm_device *dev, old_plane_state->crtc != new_plane_state->crtc) return -EINVAL; + /* + * FIXME: Since prepare_fb and cleanup_fb are always called on + * the new_plane_state for async updates we need to block framebuffer + * changes. This prevents use of a fb that's been cleaned up and + * double cleanups from occuring. + */ + if (old_plane_state->fb != new_plane_state->fb) + return -EINVAL; + funcs = plane->helper_private; if (!funcs->atomic_async_update) return -EINVAL; -- cgit From 17fb465f16027ea22225b282295f5b8af19992e0 Mon Sep 17 00:00:00 2001 From: Alistair Francis Date: Thu, 21 Feb 2019 00:33:03 +0000 Subject: drm/bochs: Fix the ID mismatch error When running RISC-V QEMU with the Bochs device attached via PCIe the probe of the Bochs device fails with: [drm:bochs_hw_init] *ERROR* ID mismatch This was introduced by this commit: 7780eb9ce8 bochs: convert to drm_dev_register To fix the error we ensure that pci_enable_device() is called before bochs_load(). Fixes: 7780eb9ce80f ("bochs: convert to drm_dev_register") Signed-off-by: Alistair Francis Reported-by: David Abdurachmanov Link: http://patchwork.freedesktop.org/patch/msgid/20190221003231.31625-1-alistair.francis@wdc.com Signed-off-by: Gerd Hoffmann Signed-off-by: Dave Airlie --- drivers/gpu/drm/bochs/bochs_drv.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c index f3dd66ae990a..aa35007262cd 100644 --- a/drivers/gpu/drm/bochs/bochs_drv.c +++ b/drivers/gpu/drm/bochs/bochs_drv.c @@ -154,6 +154,10 @@ static int bochs_pci_probe(struct pci_dev *pdev, if (IS_ERR(dev)) return PTR_ERR(dev); + ret = pci_enable_device(pdev); + if (ret) + goto err_free_dev; + dev->pdev = pdev; pci_set_drvdata(pdev, dev); -- cgit From 4b6d196c9cec548a6b1cf5bb07b4a8b8d375829d Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 22 Feb 2019 22:54:07 -0800 Subject: crypto: arm64/chacha - fix chacha_4block_xor_neon() for big endian The change to encrypt a fifth ChaCha block using scalar instructions caused the chacha20-neon, xchacha20-neon, and xchacha12-neon self-tests to start failing on big endian arm64 kernels. The bug is that the keystream block produced in 32-bit scalar registers is directly XOR'd with the data words, which are loaded and stored in native endianness. Thus in big endian mode the data bytes end up XOR'd with the wrong bytes. Fix it by byte-swapping the keystream words in big endian mode. Fixes: 2fe55987b262 ("crypto: arm64/chacha - use combined SIMD/ALU routine for more speed") Signed-off-by: Eric Biggers Reviewed-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/arm64/crypto/chacha-neon-core.S | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/arch/arm64/crypto/chacha-neon-core.S b/arch/arm64/crypto/chacha-neon-core.S index 021bb9e9784b..bfb80e10ff7b 100644 --- a/arch/arm64/crypto/chacha-neon-core.S +++ b/arch/arm64/crypto/chacha-neon-core.S @@ -532,6 +532,10 @@ ENTRY(chacha_4block_xor_neon) add v3.4s, v3.4s, v19.4s add a2, a2, w8 add a3, a3, w9 +CPU_BE( rev a0, a0 ) +CPU_BE( rev a1, a1 ) +CPU_BE( rev a2, a2 ) +CPU_BE( rev a3, a3 ) ld4r {v24.4s-v27.4s}, [x0], #16 ld4r {v28.4s-v31.4s}, [x0] @@ -552,6 +556,10 @@ ENTRY(chacha_4block_xor_neon) add v7.4s, v7.4s, v23.4s add a6, a6, w8 add a7, a7, w9 +CPU_BE( rev a4, a4 ) +CPU_BE( rev a5, a5 ) +CPU_BE( rev a6, a6 ) +CPU_BE( rev a7, a7 ) // x8[0-3] += s2[0] // x9[0-3] += s2[1] @@ -569,6 +577,10 @@ ENTRY(chacha_4block_xor_neon) add v11.4s, v11.4s, v27.4s add a10, a10, w8 add a11, a11, w9 +CPU_BE( rev a8, a8 ) +CPU_BE( rev a9, a9 ) +CPU_BE( rev a10, a10 ) +CPU_BE( rev a11, a11 ) // x12[0-3] += s3[0] // x13[0-3] += s3[1] @@ -586,6 +598,10 @@ ENTRY(chacha_4block_xor_neon) add v15.4s, v15.4s, v31.4s add a14, a14, w8 add a15, a15, w9 +CPU_BE( rev a12, a12 ) +CPU_BE( rev a13, a13 ) +CPU_BE( rev a14, a14 ) +CPU_BE( rev a15, a15 ) // interleave 32-bit words in state n, n+1 ldp w6, w7, [x2], #64 -- cgit From f86d17e9efe010b894db231329ee36b24bcc1b24 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 22 Feb 2019 22:54:08 -0800 Subject: crypto: arm64/chacha - fix hchacha_block_neon() for big endian On big endian arm64 kernels, the xchacha20-neon and xchacha12-neon self-tests fail because hchacha_block_neon() outputs little endian words but the C code expects native endianness. Fix it to output the words in native endianness (which also makes it match the arm32 version). Fixes: cc7cf991e9eb ("crypto: arm64/chacha20 - add XChaCha20 support") Signed-off-by: Eric Biggers Reviewed-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/arm64/crypto/chacha-neon-core.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/crypto/chacha-neon-core.S b/arch/arm64/crypto/chacha-neon-core.S index bfb80e10ff7b..706c4e10e9e2 100644 --- a/arch/arm64/crypto/chacha-neon-core.S +++ b/arch/arm64/crypto/chacha-neon-core.S @@ -158,8 +158,8 @@ ENTRY(hchacha_block_neon) mov w3, w2 bl chacha_permute - st1 {v0.16b}, [x1], #16 - st1 {v3.16b}, [x1] + st1 {v0.4s}, [x1], #16 + st1 {v3.4s}, [x1] ldp x29, x30, [sp], #16 ret -- cgit From 9cd05ad2910b55238e3c720c99ad896dc538301b Mon Sep 17 00:00:00 2001 From: Lan Tianyu Date: Mon, 25 Feb 2019 22:31:14 +0800 Subject: x86/hyper-v: Fix definition of HV_MAX_FLUSH_REP_COUNT The max flush rep count of HvFlushGuestPhysicalAddressList hypercall is equal with how many entries of union hv_gpa_page_range can be populated into the input parameter page. The code lacks parenthesis around PAGE_SIZE - 2 * sizeof(u64) which results in bogus computations. Add them. Fixes: cc4edae4b924 ("x86/hyper-v: Add HvFlushGuestAddressList hypercall support") Signed-off-by: Lan Tianyu Signed-off-by: Thomas Gleixner Cc: kys@microsoft.com Cc: haiyangz@microsoft.com Cc: sthemmin@microsoft.com Cc: sashal@kernel.org Cc: bp@alien8.de Cc: hpa@zytor.com Cc: gregkh@linuxfoundation.org Cc: devel@linuxdriverproject.org Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20190225143114.5149-1-Tianyu.Lan@microsoft.com --- arch/x86/include/asm/hyperv-tlfs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h index 705dafc2d11a..2bdbbbcfa393 100644 --- a/arch/x86/include/asm/hyperv-tlfs.h +++ b/arch/x86/include/asm/hyperv-tlfs.h @@ -841,7 +841,7 @@ union hv_gpa_page_range { * count is equal with how many entries of union hv_gpa_page_range can * be populated into the input parameter page. */ -#define HV_MAX_FLUSH_REP_COUNT (PAGE_SIZE - 2 * sizeof(u64) / \ +#define HV_MAX_FLUSH_REP_COUNT ((PAGE_SIZE - 2 * sizeof(u64)) / \ sizeof(union hv_gpa_page_range)) struct hv_guest_mapping_flush_list { -- cgit From e30be063d6dbcc0f18b1eb25fa709fdef89201fb Mon Sep 17 00:00:00 2001 From: BOUGH CHEN Date: Thu, 28 Feb 2019 10:15:42 +0000 Subject: mmc: sdhci-esdhc-imx: correct the fix of ERR004536 Commit 18094430d6b5 ("mmc: sdhci-esdhc-imx: add ADMA Length Mismatch errata fix") involve the fix of ERR004536, but the fix is incorrect. Double confirm with IC, need to clear the bit 7 of register 0x6c rather than set this bit 7. Here is the definition of bit 7 of 0x6c: 0: enable the new IC fix for ERR004536 1: do not use the IC fix, keep the same as before Find this issue on i.MX845s-evk board when enable CMDQ, and let system in heavy loading. root@imx8mmevk:~# dd if=/dev/mmcblk2 of=/dev/null bs=1M & root@imx8mmevk:~# memtester 1000M > /dev/zero & root@imx8mmevk:~# [ 139.897220] mmc2: cqhci: timeout for tag 16 [ 139.901417] mmc2: cqhci: ============ CQHCI REGISTER DUMP =========== [ 139.907862] mmc2: cqhci: Caps: 0x0000310a | Version: 0x00000510 [ 139.914311] mmc2: cqhci: Config: 0x00001001 | Control: 0x00000000 [ 139.920753] mmc2: cqhci: Int stat: 0x00000000 | Int enab: 0x00000006 [ 139.927193] mmc2: cqhci: Int sig: 0x00000006 | Int Coal: 0x00000000 [ 139.933634] mmc2: cqhci: TDL base: 0x7809c000 | TDL up32: 0x00000000 [ 139.940073] mmc2: cqhci: Doorbell: 0x00030000 | TCN: 0x00000000 [ 139.946518] mmc2: cqhci: Dev queue: 0x00010000 | Dev Pend: 0x00010000 [ 139.952967] mmc2: cqhci: Task clr: 0x00000000 | SSC1: 0x00011000 [ 139.959411] mmc2: cqhci: SSC2: 0x00000001 | DCMD rsp: 0x00000000 [ 139.965857] mmc2: cqhci: RED mask: 0xfdf9a080 | TERRI: 0x00000000 [ 139.972308] mmc2: cqhci: Resp idx: 0x0000002e | Resp arg: 0x00000900 [ 139.978761] mmc2: sdhci: ============ SDHCI REGISTER DUMP =========== [ 139.985214] mmc2: sdhci: Sys addr: 0xb2c19000 | Version: 0x00000002 [ 139.991669] mmc2: sdhci: Blk size: 0x00000200 | Blk cnt: 0x00000400 [ 139.998127] mmc2: sdhci: Argument: 0x40110400 | Trn mode: 0x00000033 [ 140.004618] mmc2: sdhci: Present: 0x01088a8f | Host ctl: 0x00000030 [ 140.011113] mmc2: sdhci: Power: 0x00000002 | Blk gap: 0x00000080 [ 140.017583] mmc2: sdhci: Wake-up: 0x00000008 | Clock: 0x0000000f [ 140.024039] mmc2: sdhci: Timeout: 0x0000008f | Int stat: 0x00000000 [ 140.030497] mmc2: sdhci: Int enab: 0x107f4000 | Sig enab: 0x107f4000 [ 140.036972] mmc2: sdhci: AC12 err: 0x00000000 | Slot int: 0x00000502 [ 140.043426] mmc2: sdhci: Caps: 0x07eb0000 | Caps_1: 0x8000b407 [ 140.049867] mmc2: sdhci: Cmd: 0x00002c1a | Max curr: 0x00ffffff [ 140.056314] mmc2: sdhci: Resp[0]: 0x00000900 | Resp[1]: 0xffffffff [ 140.062755] mmc2: sdhci: Resp[2]: 0x328f5903 | Resp[3]: 0x00d00f00 [ 140.069195] mmc2: sdhci: Host ctl2: 0x00000008 [ 140.073640] mmc2: sdhci: ADMA Err: 0x00000007 | ADMA Ptr: 0x7809c108 [ 140.080079] mmc2: sdhci: ============================================ [ 140.086662] mmc2: running CQE recovery Fixes: 18094430d6b5 ("mmc: sdhci-esdhc-imx: add ADMA Length Mismatch errata fix") Signed-off-by: Haibo Chen Cc: stable@vger.kernel.org Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-esdhc-imx.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index d0d319398a54..00d41b312c79 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -1095,11 +1095,12 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host) writel(readl(host->ioaddr + SDHCI_HOST_CONTROL) | ESDHC_BURST_LEN_EN_INCR, host->ioaddr + SDHCI_HOST_CONTROL); + /* - * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL - * TO1.1, it's harmless for MX6SL - */ - writel(readl(host->ioaddr + 0x6c) | BIT(7), + * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL + * TO1.1, it's harmless for MX6SL + */ + writel(readl(host->ioaddr + 0x6c) & ~BIT(7), host->ioaddr + 0x6c); /* disable DLL_CTRL delay line settings */ -- cgit From 8ed0579c12b2fe56a1fac2f712f58fc26c1dc49b Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 28 Feb 2019 16:34:37 +0100 Subject: kvm: properly check debugfs dentry before using it MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit debugfs can now report an error code if something went wrong instead of just NULL. So if the return value is to be used as a "real" dentry, it needs to be checked if it is an error before dereferencing it. This is now happening because of ff9fb72bc077 ("debugfs: return error values, not NULL"). syzbot has found a way to trigger multiple debugfs files attempting to be created, which fails, and then the error code gets passed to dentry_path_raw() which obviously does not like it. Reported-by: Eric Biggers Reported-and-tested-by: syzbot+7857962b4d45e602b8ad@syzkaller.appspotmail.com Cc: "Radim Krčmář" Cc: kvm@vger.kernel.org Acked-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman Signed-off-by: Linus Torvalds --- virt/kvm/kvm_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 585845203db8..076bc38963bf 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -4044,7 +4044,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) } add_uevent_var(env, "PID=%d", kvm->userspace_pid); - if (kvm->debugfs_dentry) { + if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) { char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL); if (p) { -- cgit From 6baec880d7a53cbc2841123e56ee31e830df9b49 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 28 Feb 2019 16:21:58 -0800 Subject: kasan: turn off asan-stack for clang-8 and earlier Building an arm64 allmodconfig kernel with clang results in over 140 warnings about overly large stack frames, the worst ones being: drivers/gpu/drm/panel/panel-sitronix-st7789v.c:196:12: error: stack frame size of 20224 bytes in function 'st7789v_prepare' drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c:196:12: error: stack frame size of 13120 bytes in function 'td028ttec1_panel_enable' drivers/usb/host/max3421-hcd.c:1395:1: error: stack frame size of 10048 bytes in function 'max3421_spi_thread' drivers/net/wan/slic_ds26522.c:209:12: error: stack frame size of 9664 bytes in function 'slic_ds26522_probe' drivers/crypto/ccp/ccp-ops.c:2434:5: error: stack frame size of 8832 bytes in function 'ccp_run_cmd' drivers/media/dvb-frontends/stv0367.c:1005:12: error: stack frame size of 7840 bytes in function 'stv0367ter_algo' None of these happen with gcc today, and almost all of these are the result of a single known issue in llvm. Hopefully it will eventually get fixed with the clang-9 release. In the meantime, the best idea I have is to turn off asan-stack for clang-8 and earlier, so we can produce a kernel that is safe to run. I have posted three patches that address the frame overflow warnings that are not addressed by turning off asan-stack, so in combination with this change, we get much closer to a clean allmodconfig build, which in turn is necessary to do meaningful build regression testing. It is still possible to turn on the CONFIG_ASAN_STACK option on all versions of clang, and it's always enabled for gcc, but when CONFIG_COMPILE_TEST is set, the option remains invisible, so allmodconfig and randconfig builds (which are normally done with a forced CONFIG_COMPILE_TEST) will still result in a mostly clean build. Link: http://lkml.kernel.org/r/20190222222950.3997333-1-arnd@arndb.de Link: https://bugs.llvm.org/show_bug.cgi?id=38809 Signed-off-by: Arnd Bergmann Reviewed-by: Qian Cai Reviewed-by: Mark Brown Acked-by: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Nick Desaulniers Cc: Kostya Serebryany Cc: Andrey Konovalov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.kasan | 22 ++++++++++++++++++++++ scripts/Makefile.kasan | 2 +- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index d8c474b6691e..9737059ec58b 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -113,6 +113,28 @@ config KASAN_INLINE endchoice +config KASAN_STACK_ENABLE + bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST + default !(CLANG_VERSION < 90000) + depends on KASAN + help + The LLVM stack address sanitizer has a know problem that + causes excessive stack usage in a lot of functions, see + https://bugs.llvm.org/show_bug.cgi?id=38809 + Disabling asan-stack makes it safe to run kernels build + with clang-8 with KASAN enabled, though it loses some of + the functionality. + This feature is always disabled when compile-testing with clang-8 + or earlier to avoid cluttering the output in stack overflow + warnings, but clang-8 users can still enable it for builds without + CONFIG_COMPILE_TEST. On gcc and later clang versions it is + assumed to always be safe to use and enabled by default. + +config KASAN_STACK + int + default 1 if KASAN_STACK_ENABLE || CC_IS_GCC + default 0 + config KASAN_S390_4_LEVEL_PAGING bool "KASan: use 4-level paging" depends on KASAN && S390 diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan index 25c259df8ffa..6deabedc67fc 100644 --- a/scripts/Makefile.kasan +++ b/scripts/Makefile.kasan @@ -26,7 +26,7 @@ else CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \ $(call cc-param,asan-globals=1) \ $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \ - $(call cc-param,asan-stack=1) \ + $(call cc-param,asan-stack=$(CONFIG_KASAN_STACK)) \ $(call cc-param,asan-use-after-scope=1) \ $(call cc-param,asan-instrument-allocas=1) endif -- cgit From cb6acd01e2e43fd8bad11155752b7699c3d0fb76 Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Thu, 28 Feb 2019 16:22:02 -0800 Subject: hugetlbfs: fix races and page leaks during migration hugetlb pages should only be migrated if they are 'active'. The routines set/clear_page_huge_active() modify the active state of hugetlb pages. When a new hugetlb page is allocated at fault time, set_page_huge_active is called before the page is locked. Therefore, another thread could race and migrate the page while it is being added to page table by the fault code. This race is somewhat hard to trigger, but can be seen by strategically adding udelay to simulate worst case scheduling behavior. Depending on 'how' the code races, various BUG()s could be triggered. To address this issue, simply delay the set_page_huge_active call until after the page is successfully added to the page table. Hugetlb pages can also be leaked at migration time if the pages are associated with a file in an explicitly mounted hugetlbfs filesystem. For example, consider a two node system with 4GB worth of huge pages available. A program mmaps a 2G file in a hugetlbfs filesystem. It then migrates the pages associated with the file from one node to another. When the program exits, huge page counts are as follows: node0 1024 free_hugepages 1024 nr_hugepages node1 0 free_hugepages 1024 nr_hugepages Filesystem Size Used Avail Use% Mounted on nodev 4.0G 2.0G 2.0G 50% /var/opt/hugepool That is as expected. 2G of huge pages are taken from the free_hugepages counts, and 2G is the size of the file in the explicitly mounted filesystem. If the file is then removed, the counts become: node0 1024 free_hugepages 1024 nr_hugepages node1 1024 free_hugepages 1024 nr_hugepages Filesystem Size Used Avail Use% Mounted on nodev 4.0G 2.0G 2.0G 50% /var/opt/hugepool Note that the filesystem still shows 2G of pages used, while there actually are no huge pages in use. The only way to 'fix' the filesystem accounting is to unmount the filesystem If a hugetlb page is associated with an explicitly mounted filesystem, this information in contained in the page_private field. At migration time, this information is not preserved. To fix, simply transfer page_private from old to new page at migration time if necessary. There is a related race with removing a huge page from a file and migration. When a huge page is removed from the pagecache, the page_mapping() field is cleared, yet page_private remains set until the page is actually freed by free_huge_page(). A page could be migrated while in this state. However, since page_mapping() is not set the hugetlbfs specific routine to transfer page_private is not called and we leak the page count in the filesystem. To fix that, check for this condition before migrating a huge page. If the condition is detected, return EBUSY for the page. Link: http://lkml.kernel.org/r/74510272-7319-7372-9ea6-ec914734c179@oracle.com Link: http://lkml.kernel.org/r/20190212221400.3512-1-mike.kravetz@oracle.com Fixes: bcc54222309c ("mm: hugetlb: introduce page_huge_active") Signed-off-by: Mike Kravetz Reviewed-by: Naoya Horiguchi Cc: Michal Hocko Cc: Andrea Arcangeli Cc: "Kirill A . Shutemov" Cc: Mel Gorman Cc: Davidlohr Bueso Cc: [mike.kravetz@oracle.com: v2] Link: http://lkml.kernel.org/r/7534d322-d782-8ac6-1c8d-a8dc380eb3ab@oracle.com [mike.kravetz@oracle.com: update comment and changelog] Link: http://lkml.kernel.org/r/420bcfd6-158b-38e4-98da-26d0cd85bd01@oracle.com Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/hugetlbfs/inode.c | 12 ++++++++++++ mm/hugetlb.c | 16 +++++++++++++--- mm/migrate.c | 11 +++++++++++ 3 files changed, 36 insertions(+), 3 deletions(-) diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 32920a10100e..a7fa037b876b 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -859,6 +859,18 @@ static int hugetlbfs_migrate_page(struct address_space *mapping, rc = migrate_huge_page_move_mapping(mapping, newpage, page); if (rc != MIGRATEPAGE_SUCCESS) return rc; + + /* + * page_private is subpool pointer in hugetlb pages. Transfer to + * new page. PagePrivate is not associated with page_private for + * hugetlb pages and can not be set here as only page_huge_active + * pages can be migrated. + */ + if (page_private(page)) { + set_page_private(newpage, page_private(page)); + set_page_private(page, 0); + } + if (mode != MIGRATE_SYNC_NO_COPY) migrate_page_copy(newpage, page); else diff --git a/mm/hugetlb.c b/mm/hugetlb.c index afef61656c1e..8dfdffc34a99 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3624,7 +3624,6 @@ retry_avoidcopy: copy_user_huge_page(new_page, old_page, address, vma, pages_per_huge_page(h)); __SetPageUptodate(new_page); - set_page_huge_active(new_page); mmu_notifier_range_init(&range, mm, haddr, haddr + huge_page_size(h)); mmu_notifier_invalidate_range_start(&range); @@ -3645,6 +3644,7 @@ retry_avoidcopy: make_huge_pte(vma, new_page, 1)); page_remove_rmap(old_page, true); hugepage_add_new_anon_rmap(new_page, vma, haddr); + set_page_huge_active(new_page); /* Make the old page be freed below */ new_page = old_page; } @@ -3729,6 +3729,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, pte_t new_pte; spinlock_t *ptl; unsigned long haddr = address & huge_page_mask(h); + bool new_page = false; /* * Currently, we are forced to kill the process in the event the @@ -3790,7 +3791,7 @@ retry: } clear_huge_page(page, address, pages_per_huge_page(h)); __SetPageUptodate(page); - set_page_huge_active(page); + new_page = true; if (vma->vm_flags & VM_MAYSHARE) { int err = huge_add_to_page_cache(page, mapping, idx); @@ -3861,6 +3862,15 @@ retry: } spin_unlock(ptl); + + /* + * Only make newly allocated pages active. Existing pages found + * in the pagecache could be !page_huge_active() if they have been + * isolated for migration. + */ + if (new_page) + set_page_huge_active(page); + unlock_page(page); out: return ret; @@ -4095,7 +4105,6 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, * the set_pte_at() write. */ __SetPageUptodate(page); - set_page_huge_active(page); mapping = dst_vma->vm_file->f_mapping; idx = vma_hugecache_offset(h, dst_vma, dst_addr); @@ -4163,6 +4172,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, update_mmu_cache(dst_vma, dst_addr, dst_pte); spin_unlock(ptl); + set_page_huge_active(page); if (vm_shared) unlock_page(page); ret = 0; diff --git a/mm/migrate.c b/mm/migrate.c index d4fd680be3b0..181f5d2718a9 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1315,6 +1315,16 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, lock_page(hpage); } + /* + * Check for pages which are in the process of being freed. Without + * page_mapping() set, hugetlbfs specific move page routine will not + * be called and we could leak usage counts for subpools. + */ + if (page_private(hpage) && !page_mapping(hpage)) { + rc = -EBUSY; + goto out_unlock; + } + if (PageAnon(hpage)) anon_vma = page_get_anon_vma(hpage); @@ -1345,6 +1355,7 @@ put_anon: put_new_page = NULL; } +out_unlock: unlock_page(hpage); out: if (rc != -EAGAIN) -- cgit From 46b1c18f9deb326a7e18348e668e4c7ab7c7458b Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 28 Feb 2019 12:55:43 -0800 Subject: net: sched: put back q.qlen into a single location In the series fc8b81a5981f ("Merge branch 'lockless-qdisc-series'") John made the assumption that the data path had no need to read the qdisc qlen (number of packets in the qdisc). It is true when pfifo_fast is used as the root qdisc, or as direct MQ/MQPRIO children. But pfifo_fast can be used as leaf in class full qdiscs, and existing logic needs to access the child qlen in an efficient way. HTB breaks badly, since it uses cl->leaf.q->q.qlen in : htb_activate() -> WARN_ON() htb_dequeue_tree() to decide if a class can be htb_deactivated when it has no more packets. HFSC, DRR, CBQ, QFQ have similar issues, and some calls to qdisc_tree_reduce_backlog() also read q.qlen directly. Using qdisc_qlen_sum() (which iterates over all possible cpus) in the data path is a non starter. It seems we have to put back qlen in a central location, at least for stable kernels. For all qdisc but pfifo_fast, qlen is guarded by the qdisc lock, so the existing q.qlen{++|--} are correct. For 'lockless' qdisc (pfifo_fast so far), we need to use atomic_{inc|dec}() because the spinlock might be not held (for example from pfifo_fast_enqueue() and pfifo_fast_dequeue()) This patch adds atomic_qlen (in the same location than qlen) and renames the following helpers, since we want to express they can be used without qdisc lock, and that qlen is no longer percpu. - qdisc_qstats_cpu_qlen_dec -> qdisc_qstats_atomic_qlen_dec() - qdisc_qstats_cpu_qlen_inc -> qdisc_qstats_atomic_qlen_inc() Later (net-next) we might revert this patch by tracking all these qlen uses and replace them by a more efficient method (not having to access a precise qlen, but an empty/non_empty status that might be less expensive to maintain/track). Another possibility is to have a legacy pfifo_fast version that would be used when used a a child qdisc, since the parent qdisc needs a spinlock anyway. But then, future lockless qdiscs would also have the same problem. Fixes: 7e66016f2c65 ("net: sched: helpers to sum qlen and qlen for per cpu logic") Signed-off-by: Eric Dumazet Cc: John Fastabend Cc: Jamal Hadi Salim Cc: Cong Wang Cc: Jiri Pirko Signed-off-by: David S. Miller --- include/net/sch_generic.h | 31 +++++++++++++------------------ net/core/gen_stats.c | 2 -- net/sched/sch_generic.c | 13 ++++++------- 3 files changed, 19 insertions(+), 27 deletions(-) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 9481f2c142e2..e7eb4aa6ccc9 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -51,7 +51,10 @@ struct qdisc_size_table { struct qdisc_skb_head { struct sk_buff *head; struct sk_buff *tail; - __u32 qlen; + union { + u32 qlen; + atomic_t atomic_qlen; + }; spinlock_t lock; }; @@ -408,27 +411,19 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) BUILD_BUG_ON(sizeof(qcb->data) < sz); } -static inline int qdisc_qlen_cpu(const struct Qdisc *q) -{ - return this_cpu_ptr(q->cpu_qstats)->qlen; -} - static inline int qdisc_qlen(const struct Qdisc *q) { return q->q.qlen; } -static inline int qdisc_qlen_sum(const struct Qdisc *q) +static inline u32 qdisc_qlen_sum(const struct Qdisc *q) { - __u32 qlen = q->qstats.qlen; - int i; + u32 qlen = q->qstats.qlen; - if (q->flags & TCQ_F_NOLOCK) { - for_each_possible_cpu(i) - qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen; - } else { + if (q->flags & TCQ_F_NOLOCK) + qlen += atomic_read(&q->q.atomic_qlen); + else qlen += q->q.qlen; - } return qlen; } @@ -825,14 +820,14 @@ static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch, this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); } -static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch) +static inline void qdisc_qstats_atomic_qlen_inc(struct Qdisc *sch) { - this_cpu_inc(sch->cpu_qstats->qlen); + atomic_inc(&sch->q.atomic_qlen); } -static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch) +static inline void qdisc_qstats_atomic_qlen_dec(struct Qdisc *sch) { - this_cpu_dec(sch->cpu_qstats->qlen); + atomic_dec(&sch->q.atomic_qlen); } static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch) diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index 9bf1b9ad1780..ac679f74ba47 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c @@ -291,7 +291,6 @@ __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats, for_each_possible_cpu(i) { const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i); - qstats->qlen = 0; qstats->backlog += qcpu->backlog; qstats->drops += qcpu->drops; qstats->requeues += qcpu->requeues; @@ -307,7 +306,6 @@ void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats, if (cpu) { __gnet_stats_copy_queue_cpu(qstats, cpu); } else { - qstats->qlen = q->qlen; qstats->backlog = q->backlog; qstats->drops = q->drops; qstats->requeues = q->requeues; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 968a85fe4d4a..de31f2f3b973 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -68,7 +68,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q) skb = __skb_dequeue(&q->skb_bad_txq); if (qdisc_is_percpu_stats(q)) { qdisc_qstats_cpu_backlog_dec(q, skb); - qdisc_qstats_cpu_qlen_dec(q); + qdisc_qstats_atomic_qlen_dec(q); } else { qdisc_qstats_backlog_dec(q, skb); q->q.qlen--; @@ -108,7 +108,7 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q, if (qdisc_is_percpu_stats(q)) { qdisc_qstats_cpu_backlog_inc(q, skb); - qdisc_qstats_cpu_qlen_inc(q); + qdisc_qstats_atomic_qlen_inc(q); } else { qdisc_qstats_backlog_inc(q, skb); q->q.qlen++; @@ -147,7 +147,7 @@ static inline int dev_requeue_skb_locked(struct sk_buff *skb, struct Qdisc *q) qdisc_qstats_cpu_requeues_inc(q); qdisc_qstats_cpu_backlog_inc(q, skb); - qdisc_qstats_cpu_qlen_inc(q); + qdisc_qstats_atomic_qlen_inc(q); skb = next; } @@ -252,7 +252,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, skb = __skb_dequeue(&q->gso_skb); if (qdisc_is_percpu_stats(q)) { qdisc_qstats_cpu_backlog_dec(q, skb); - qdisc_qstats_cpu_qlen_dec(q); + qdisc_qstats_atomic_qlen_dec(q); } else { qdisc_qstats_backlog_dec(q, skb); q->q.qlen--; @@ -645,7 +645,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, if (unlikely(err)) return qdisc_drop_cpu(skb, qdisc, to_free); - qdisc_qstats_cpu_qlen_inc(qdisc); + qdisc_qstats_atomic_qlen_inc(qdisc); /* Note: skb can not be used after skb_array_produce(), * so we better not use qdisc_qstats_cpu_backlog_inc() */ @@ -670,7 +670,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) if (likely(skb)) { qdisc_qstats_cpu_backlog_dec(qdisc, skb); qdisc_bstats_cpu_update(qdisc, skb); - qdisc_qstats_cpu_qlen_dec(qdisc); + qdisc_qstats_atomic_qlen_dec(qdisc); } return skb; @@ -714,7 +714,6 @@ static void pfifo_fast_reset(struct Qdisc *qdisc) struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i); q->backlog = 0; - q->qlen = 0; } } -- cgit From 084e5bb16bd7dc2b551bbd9fb358bf73e03ee8d8 Mon Sep 17 00:00:00 2001 From: Kavya Sree Kotagiri Date: Thu, 28 Feb 2019 07:32:22 +0000 Subject: net: mscc: Enable all ports in QSGMII When Ocelot phy-mode is QSGMII, all 4 ports involved in QSGMII shall be kept out of reset and Tx lanes shall be enabled to pass the data. Fixes: a556c76adc05 ("net: mscc: Add initial Ocelot switch support") Signed-off-by: Kavya Sree Kotagiri Signed-off-by: Steen Hegelund Co-developed-by: Steen Hegelund Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot_board.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c index ca3ea2fbfcd0..80d87798c62b 100644 --- a/drivers/net/ethernet/mscc/ocelot_board.c +++ b/drivers/net/ethernet/mscc/ocelot_board.c @@ -267,6 +267,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev) struct phy *serdes; void __iomem *regs; char res_name[8]; + int phy_mode; u32 port; if (of_property_read_u32(portnp, "reg", &port)) @@ -292,11 +293,11 @@ static int mscc_ocelot_probe(struct platform_device *pdev) if (err) return err; - err = of_get_phy_mode(portnp); - if (err < 0) + phy_mode = of_get_phy_mode(portnp); + if (phy_mode < 0) ocelot->ports[port]->phy_mode = PHY_INTERFACE_MODE_NA; else - ocelot->ports[port]->phy_mode = err; + ocelot->ports[port]->phy_mode = phy_mode; switch (ocelot->ports[port]->phy_mode) { case PHY_INTERFACE_MODE_NA: @@ -304,6 +305,13 @@ static int mscc_ocelot_probe(struct platform_device *pdev) case PHY_INTERFACE_MODE_SGMII: break; case PHY_INTERFACE_MODE_QSGMII: + /* Ensure clock signals and speed is set on all + * QSGMII links + */ + ocelot_port_writel(ocelot->ports[port], + DEV_CLOCK_CFG_LINK_SPEED + (OCELOT_SPEED_1000), + DEV_CLOCK_CFG); break; default: dev_err(ocelot->dev, -- cgit From e2bcd8b0ce6ee3410665765db0d44dd8b7e3b348 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 1 Mar 2019 10:57:56 +0800 Subject: appletalk: use remove_proc_subtree to simplify procfs code Use remove_proc_subtree to remove the whole subtree on cleanup.Also do some cleanup. Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- net/appletalk/atalk_proc.c | 56 ++++++++++++++-------------------------------- 1 file changed, 17 insertions(+), 39 deletions(-) diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c index 8006295f8bd7..bd8734ef80b8 100644 --- a/net/appletalk/atalk_proc.c +++ b/net/appletalk/atalk_proc.c @@ -210,56 +210,34 @@ static const struct seq_operations atalk_seq_socket_ops = { .show = atalk_seq_socket_show, }; -static struct proc_dir_entry *atalk_proc_dir; - int __init atalk_proc_init(void) { - struct proc_dir_entry *p; - int rc = -ENOMEM; + if (!proc_mkdir("atalk", init_net.proc_net)) + return -ENOMEM; - atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net); - if (!atalk_proc_dir) + if (!proc_create_seq("atalk/interface", 0444, init_net.proc_net, + &atalk_seq_interface_ops)) goto out; - p = proc_create_seq("interface", 0444, atalk_proc_dir, - &atalk_seq_interface_ops); - if (!p) - goto out_interface; - - p = proc_create_seq("route", 0444, atalk_proc_dir, - &atalk_seq_route_ops); - if (!p) - goto out_route; + if (!proc_create_seq("atalk/route", 0444, init_net.proc_net, + &atalk_seq_route_ops)) + goto out; - p = proc_create_seq("socket", 0444, atalk_proc_dir, - &atalk_seq_socket_ops); - if (!p) - goto out_socket; + if (!proc_create_seq("atalk/socket", 0444, init_net.proc_net, + &atalk_seq_socket_ops)) + goto out; - p = proc_create_seq_private("arp", 0444, atalk_proc_dir, &aarp_seq_ops, - sizeof(struct aarp_iter_state), NULL); - if (!p) - goto out_arp; + if (!proc_create_seq_private("atalk/arp", 0444, init_net.proc_net, + &aarp_seq_ops, + sizeof(struct aarp_iter_state), NULL)) + goto out; - rc = 0; out: - return rc; -out_arp: - remove_proc_entry("socket", atalk_proc_dir); -out_socket: - remove_proc_entry("route", atalk_proc_dir); -out_route: - remove_proc_entry("interface", atalk_proc_dir); -out_interface: - remove_proc_entry("atalk", init_net.proc_net); - goto out; + remove_proc_subtree("atalk", init_net.proc_net); + return -ENOMEM; } void __exit atalk_proc_exit(void) { - remove_proc_entry("interface", atalk_proc_dir); - remove_proc_entry("route", atalk_proc_dir); - remove_proc_entry("socket", atalk_proc_dir); - remove_proc_entry("arp", atalk_proc_dir); - remove_proc_entry("atalk", init_net.proc_net); + remove_proc_subtree("atalk", init_net.proc_net); } -- cgit From 6377f787aeb945cae7abbb6474798de129e1f3ac Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 1 Mar 2019 10:57:57 +0800 Subject: appletalk: Fix use-after-free in atalk_proc_exit KASAN report this: BUG: KASAN: use-after-free in pde_subdir_find+0x12d/0x150 fs/proc/generic.c:71 Read of size 8 at addr ffff8881f41fe5b0 by task syz-executor.0/2806 CPU: 0 PID: 2806 Comm: syz-executor.0 Not tainted 5.0.0-rc7+ #45 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0xfa/0x1ce lib/dump_stack.c:113 print_address_description+0x65/0x270 mm/kasan/report.c:187 kasan_report+0x149/0x18d mm/kasan/report.c:317 pde_subdir_find+0x12d/0x150 fs/proc/generic.c:71 remove_proc_entry+0xe8/0x420 fs/proc/generic.c:667 atalk_proc_exit+0x18/0x820 [appletalk] atalk_exit+0xf/0x5a [appletalk] __do_sys_delete_module kernel/module.c:1018 [inline] __se_sys_delete_module kernel/module.c:961 [inline] __x64_sys_delete_module+0x3dc/0x5e0 kernel/module.c:961 do_syscall_64+0x147/0x600 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x49/0xbe RIP: 0033:0x462e99 Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007fb2de6b9c58 EFLAGS: 00000246 ORIG_RAX: 00000000000000b0 RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462e99 RDX: 0000000000000000 RSI: 0000000000000000 RDI: 00000000200001c0 RBP: 0000000000000002 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 00007fb2de6ba6bc R13: 00000000004bccaa R14: 00000000006f6bc8 R15: 00000000ffffffff Allocated by task 2806: set_track mm/kasan/common.c:85 [inline] __kasan_kmalloc.constprop.3+0xa0/0xd0 mm/kasan/common.c:496 slab_post_alloc_hook mm/slab.h:444 [inline] slab_alloc_node mm/slub.c:2739 [inline] slab_alloc mm/slub.c:2747 [inline] kmem_cache_alloc+0xcf/0x250 mm/slub.c:2752 kmem_cache_zalloc include/linux/slab.h:730 [inline] __proc_create+0x30f/0xa20 fs/proc/generic.c:408 proc_mkdir_data+0x47/0x190 fs/proc/generic.c:469 0xffffffffc10c01bb 0xffffffffc10c0166 do_one_initcall+0xfa/0x5ca init/main.c:887 do_init_module+0x204/0x5f6 kernel/module.c:3460 load_module+0x66b2/0x8570 kernel/module.c:3808 __do_sys_finit_module+0x238/0x2a0 kernel/module.c:3902 do_syscall_64+0x147/0x600 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x49/0xbe Freed by task 2806: set_track mm/kasan/common.c:85 [inline] __kasan_slab_free+0x130/0x180 mm/kasan/common.c:458 slab_free_hook mm/slub.c:1409 [inline] slab_free_freelist_hook mm/slub.c:1436 [inline] slab_free mm/slub.c:2986 [inline] kmem_cache_free+0xa6/0x2a0 mm/slub.c:3002 pde_put+0x6e/0x80 fs/proc/generic.c:647 remove_proc_entry+0x1d3/0x420 fs/proc/generic.c:684 0xffffffffc10c031c 0xffffffffc10c0166 do_one_initcall+0xfa/0x5ca init/main.c:887 do_init_module+0x204/0x5f6 kernel/module.c:3460 load_module+0x66b2/0x8570 kernel/module.c:3808 __do_sys_finit_module+0x238/0x2a0 kernel/module.c:3902 do_syscall_64+0x147/0x600 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x49/0xbe The buggy address belongs to the object at ffff8881f41fe500 which belongs to the cache proc_dir_entry of size 256 The buggy address is located 176 bytes inside of 256-byte region [ffff8881f41fe500, ffff8881f41fe600) The buggy address belongs to the page: page:ffffea0007d07f80 count:1 mapcount:0 mapping:ffff8881f6e69a00 index:0x0 flags: 0x2fffc0000000200(slab) raw: 02fffc0000000200 dead000000000100 dead000000000200 ffff8881f6e69a00 raw: 0000000000000000 00000000800c000c 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff8881f41fe480: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc ffff8881f41fe500: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb >ffff8881f41fe580: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ^ ffff8881f41fe600: fc fc fc fc fc fc fc fc fb fb fb fb fb fb fb fb ffff8881f41fe680: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb It should check the return value of atalk_proc_init fails, otherwise atalk_exit will trgger use-after-free in pde_subdir_find while unload the module.This patch fix error cleanup path of atalk_init Reported-by: Hulk Robot Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- include/linux/atalk.h | 2 +- net/appletalk/atalk_proc.c | 2 +- net/appletalk/ddp.c | 37 +++++++++++++++++++++++++++++++------ net/appletalk/sysctl_net_atalk.c | 5 ++++- 4 files changed, 37 insertions(+), 9 deletions(-) diff --git a/include/linux/atalk.h b/include/linux/atalk.h index 23f805562f4e..5a90f28d5ff2 100644 --- a/include/linux/atalk.h +++ b/include/linux/atalk.h @@ -158,7 +158,7 @@ extern int sysctl_aarp_retransmit_limit; extern int sysctl_aarp_resolve_time; #ifdef CONFIG_SYSCTL -extern void atalk_register_sysctl(void); +extern int atalk_register_sysctl(void); extern void atalk_unregister_sysctl(void); #else #define atalk_register_sysctl() do { } while(0) diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c index bd8734ef80b8..77f203f1febc 100644 --- a/net/appletalk/atalk_proc.c +++ b/net/appletalk/atalk_proc.c @@ -237,7 +237,7 @@ out: return -ENOMEM; } -void __exit atalk_proc_exit(void) +void atalk_proc_exit(void) { remove_proc_subtree("atalk", init_net.proc_net); } diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 9b6bc5abe946..795fbc6c06aa 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1910,12 +1910,16 @@ static const char atalk_err_snap[] __initconst = /* Called by proto.c on kernel start up */ static int __init atalk_init(void) { - int rc = proto_register(&ddp_proto, 0); + int rc; - if (rc != 0) + rc = proto_register(&ddp_proto, 0); + if (rc) goto out; - (void)sock_register(&atalk_family_ops); + rc = sock_register(&atalk_family_ops); + if (rc) + goto out_proto; + ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv); if (!ddp_dl) printk(atalk_err_snap); @@ -1923,12 +1927,33 @@ static int __init atalk_init(void) dev_add_pack(<alk_packet_type); dev_add_pack(&ppptalk_packet_type); - register_netdevice_notifier(&ddp_notifier); + rc = register_netdevice_notifier(&ddp_notifier); + if (rc) + goto out_sock; + aarp_proto_init(); - atalk_proc_init(); - atalk_register_sysctl(); + rc = atalk_proc_init(); + if (rc) + goto out_aarp; + + rc = atalk_register_sysctl(); + if (rc) + goto out_proc; out: return rc; +out_proc: + atalk_proc_exit(); +out_aarp: + aarp_cleanup_module(); + unregister_netdevice_notifier(&ddp_notifier); +out_sock: + dev_remove_pack(&ppptalk_packet_type); + dev_remove_pack(<alk_packet_type); + unregister_snap_client(ddp_dl); + sock_unregister(PF_APPLETALK); +out_proto: + proto_unregister(&ddp_proto); + goto out; } module_init(atalk_init); diff --git a/net/appletalk/sysctl_net_atalk.c b/net/appletalk/sysctl_net_atalk.c index c744a853fa5f..d945b7c0176d 100644 --- a/net/appletalk/sysctl_net_atalk.c +++ b/net/appletalk/sysctl_net_atalk.c @@ -45,9 +45,12 @@ static struct ctl_table atalk_table[] = { static struct ctl_table_header *atalk_table_header; -void atalk_register_sysctl(void) +int __init atalk_register_sysctl(void) { atalk_table_header = register_net_sysctl(&init_net, "net/appletalk", atalk_table); + if (!atalk_table_header) + return -ENOMEM; + return 0; } void atalk_unregister_sysctl(void) -- cgit From c6195a8bdfc62a7cecf7df685e64847a4b700275 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Fri, 1 Mar 2019 19:53:57 +0100 Subject: net: dsa: mv88e6xxx: handle unknown duplex modes gracefully in mv88e6xxx_port_set_duplex When testing another issue I faced the problem that mv88e6xxx_port_setup_mac() failed due to DUPLEX_UNKNOWN being passed as argument to mv88e6xxx_port_set_duplex(). We should handle this case gracefully and return -EOPNOTSUPP, like e.g. mv88e6xxx_port_set_speed() is doing it. Fixes: 7f1ae07b51e8 ("net: dsa: mv88e6xxx: add port duplex setter") Signed-off-by: Heiner Kallweit Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/port.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index 79ab51e69aee..184c2b1b3115 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -190,7 +190,7 @@ int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup) /* normal duplex detection */ break; default: - return -EINVAL; + return -EOPNOTSUPP; } err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg); -- cgit From 895a5e96dbd6386c8e78e5b78e067dcc67b7f0ab Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Sat, 2 Mar 2019 10:34:55 +0800 Subject: net-sysfs: Fix mem leak in netdev_register_kobject syzkaller report this: BUG: memory leak unreferenced object 0xffff88837a71a500 (size 256): comm "syz-executor.2", pid 9770, jiffies 4297825125 (age 17.843s) hex dump (first 32 bytes): 00 00 00 00 ad 4e ad de ff ff ff ff 00 00 00 00 .....N.......... ff ff ff ff ff ff ff ff 20 c0 ef 86 ff ff ff ff ........ ....... backtrace: [<00000000db12624b>] netdev_register_kobject+0x124/0x2e0 net/core/net-sysfs.c:1751 [<00000000dc49a994>] register_netdevice+0xcc1/0x1270 net/core/dev.c:8516 [<00000000e5f3fea0>] tun_set_iff drivers/net/tun.c:2649 [inline] [<00000000e5f3fea0>] __tun_chr_ioctl+0x2218/0x3d20 drivers/net/tun.c:2883 [<000000001b8ac127>] vfs_ioctl fs/ioctl.c:46 [inline] [<000000001b8ac127>] do_vfs_ioctl+0x1a5/0x10e0 fs/ioctl.c:690 [<0000000079b269f8>] ksys_ioctl+0x89/0xa0 fs/ioctl.c:705 [<00000000de649beb>] __do_sys_ioctl fs/ioctl.c:712 [inline] [<00000000de649beb>] __se_sys_ioctl fs/ioctl.c:710 [inline] [<00000000de649beb>] __x64_sys_ioctl+0x74/0xb0 fs/ioctl.c:710 [<000000007ebded1e>] do_syscall_64+0xc8/0x580 arch/x86/entry/common.c:290 [<00000000db315d36>] entry_SYSCALL_64_after_hwframe+0x49/0xbe [<00000000115be9bb>] 0xffffffffffffffff It should call kset_unregister to free 'dev->queues_kset' in error path of register_queue_kobjects, otherwise will cause a mem leak. Reported-by: Hulk Robot Fixes: 1d24eb4815d1 ("xps: Transmit Packet Steering") Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- net/core/net-sysfs.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index ff9fd2bb4ce4..73ad7607dcd1 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -1547,6 +1547,9 @@ static int register_queue_kobjects(struct net_device *dev) error: netdev_queue_update_kobjects(dev, txq, 0); net_rx_queue_update_kobjects(dev, rxq, 0); +#ifdef CONFIG_SYSFS + kset_unregister(dev->queues_kset); +#endif return error; } -- cgit From 95150f29ae480276e76368cdf8a9524b5a96c0ca Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 2 Mar 2019 10:06:05 +0100 Subject: net: dsa: mv8e6xxx: fix number of internal PHYs for 88E6x90 family Ports 9 and 10 don't have internal PHY's but are (dependent on the version) SERDES/SGMII/XAUI/RXAUI ports. v2: - fix it for all 88E6x90 family members Fixes: bc3931557d1d ("net: dsa: mv88e6xxx: Add number of internal PHYs") Signed-off-by: Heiner Kallweit Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 7e3c00bd9532..e7763e25ed8f 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -4222,7 +4222,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6190", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ - .num_internal_phys = 11, + .num_internal_phys = 9, .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, @@ -4245,7 +4245,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6190X", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ - .num_internal_phys = 11, + .num_internal_phys = 9, .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, @@ -4268,7 +4268,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6191", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ - .num_internal_phys = 11, + .num_internal_phys = 9, .max_vid = 8191, .port_base_addr = 0x0, .phy_base_addr = 0x0, @@ -4315,7 +4315,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6290", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ - .num_internal_phys = 11, + .num_internal_phys = 9, .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, @@ -4477,7 +4477,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6390", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ - .num_internal_phys = 11, + .num_internal_phys = 9, .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, @@ -4500,7 +4500,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6390X", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ - .num_internal_phys = 11, + .num_internal_phys = 9, .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, -- cgit From 822e44b45eb991c63487c5e2ce7d636411870a8d Mon Sep 17 00:00:00 2001 From: Kristian Evensen Date: Sat, 2 Mar 2019 13:32:26 +0100 Subject: qmi_wwan: Add support for Quectel EG12/EM12 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Quectel EG12 (module)/EM12 (M.2 card) is a Cat. 12 LTE modem. The modem behaves in the same way as the EP06, so the "set DTR"-quirk must be applied and the diagnostic-interface check performed. Since the diagnostic-check now applies to more modems, I have renamed the function from quectel_ep06_diag_detected() to quectel_diag_detected(). Signed-off-by: Kristian Evensen Acked-by: Bjørn Mork Signed-off-by: David S. Miller --- drivers/net/usb/qmi_wwan.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 18af2f8eee96..74bebbdb4b15 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -976,6 +976,13 @@ static const struct usb_device_id products[] = { 0xff), .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr, }, + { /* Quectel EG12/EM12 */ + USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0512, + USB_CLASS_VENDOR_SPEC, + USB_SUBCLASS_VENDOR_SPEC, + 0xff), + .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr, + }, /* 3. Combined interface devices matching on interface number */ {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ @@ -1343,17 +1350,20 @@ static bool quectel_ec20_detected(struct usb_interface *intf) return false; } -static bool quectel_ep06_diag_detected(struct usb_interface *intf) +static bool quectel_diag_detected(struct usb_interface *intf) { struct usb_device *dev = interface_to_usbdev(intf); struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc; + u16 id_vendor = le16_to_cpu(dev->descriptor.idVendor); + u16 id_product = le16_to_cpu(dev->descriptor.idProduct); - if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c && - le16_to_cpu(dev->descriptor.idProduct) == 0x0306 && - intf_desc.bNumEndpoints == 2) - return true; + if (id_vendor != 0x2c7c || intf_desc.bNumEndpoints != 2) + return false; - return false; + if (id_product == 0x0306 || id_product == 0x0512) + return true; + else + return false; } static int qmi_wwan_probe(struct usb_interface *intf, @@ -1390,13 +1400,13 @@ static int qmi_wwan_probe(struct usb_interface *intf, return -ENODEV; } - /* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so + /* Several Quectel modems supports dynamic interface configuration, so * we need to match on class/subclass/protocol. These values are * identical for the diagnostic- and QMI-interface, but bNumEndpoints is * different. Ignore the current interface if the number of endpoints * the number for the diag interface (two). */ - if (quectel_ep06_diag_detected(intf)) + if (quectel_diag_detected(intf)) return -ENODEV; return usbnet_probe(intf, id); -- cgit