diff options
203 files changed, 4394 insertions, 1979 deletions
diff --git a/Documentation/ABI/testing/sysfs-firmware-acpi b/Documentation/ABI/testing/sysfs-firmware-acpi index b16d30a71709..819939d858c9 100644 --- a/Documentation/ABI/testing/sysfs-firmware-acpi +++ b/Documentation/ABI/testing/sysfs-firmware-acpi @@ -1,3 +1,46 @@ +What: /sys/firmware/acpi/fpdt/ +Date: Jan 2021 +Contact: Zhang Rui <rui.zhang@intel.com> +Description: + ACPI Firmware Performance Data Table (FPDT) provides + information for firmware performance data for system boot, + S3 suspend and S3 resume. This sysfs entry contains the + performance data retrieved from the FPDT. + + boot: + firmware_start_ns: Timer value logged at the beginning + of firmware image execution. In nanoseconds. + bootloader_load_ns: Timer value logged just prior to + loading the OS boot loader into memory. + In nanoseconds. + bootloader_launch_ns: Timer value logged just prior to + launching the currently loaded OS boot loader + image. In nanoseconds. + exitbootservice_start_ns: Timer value logged at the + point when the OS loader calls the + ExitBootServices function for UEFI compatible + firmware. In nanoseconds. + exitbootservice_end_ns: Timer value logged at the point + just prior to the OS loader gaining control + back from the ExitBootServices function for + UEFI compatible firmware. In nanoseconds. + suspend: + suspend_start_ns: Timer value recorded at the previous + OS write to SLP_TYP upon entry to S3. In + nanoseconds. + suspend_end_ns: Timer value recorded at the previous + firmware write to SLP_TYP used to trigger + hardware entry to S3. In nanoseconds. + resume: + resume_count: A count of the number of S3 resume cycles + since the last full boot sequence. + resume_avg_ns: Average timer value of all resume cycles + logged since the last full boot sequence, + including the most recent resume. In nanoseconds. + resume_prev_ns: Timer recorded at the end of the previous + platform runtime firmware S3 resume, just prior to + handoff to the OS waking vector. In nanoseconds. + What: /sys/firmware/acpi/bgrt/ Date: January 2012 Contact: Matthew Garrett <mjg@redhat.com> diff --git a/Documentation/ABI/testing/sysfs-platform_profile b/Documentation/ABI/testing/sysfs-platform_profile index 9d6b89b66cca..dae9c8941905 100644 --- a/Documentation/ABI/testing/sysfs-platform_profile +++ b/Documentation/ABI/testing/sysfs-platform_profile @@ -5,13 +5,17 @@ Description: This file contains a space-separated list of profiles supported for Drivers must use the following standard profile-names: - ============ ============================================ - low-power Low power consumption - cool Cooler operation - quiet Quieter operation - balanced Balance between low power consumption and performance - performance High performance operation - ============ ============================================ + ==================== ======================================== + low-power Low power consumption + cool Cooler operation + quiet Quieter operation + balanced Balance between low power consumption + and performance + balanced-performance Balance between performance and low + power consumption with a slight bias + towards performance + performance High performance operation + ==================== ======================================== Userspace may expect drivers to offer more than one of these standard profile names. diff --git a/Documentation/devicetree/bindings/display/connector/dp-connector.yaml b/Documentation/devicetree/bindings/display/connector/dp-connector.yaml index 1c17d60e7760..22792a79e7ce 100644 --- a/Documentation/devicetree/bindings/display/connector/dp-connector.yaml +++ b/Documentation/devicetree/bindings/display/connector/dp-connector.yaml @@ -26,7 +26,6 @@ properties: dp-pwr-supply: description: Power supply for the DP_PWR pin - maxItems: 1 port: $ref: /schemas/graph.yaml#/properties/port diff --git a/Documentation/devicetree/bindings/pwm/pwm-zx.txt b/Documentation/devicetree/bindings/pwm/pwm-zx.txt deleted file mode 100644 index 3c8fe7aa8269..000000000000 --- a/Documentation/devicetree/bindings/pwm/pwm-zx.txt +++ /dev/null @@ -1,22 +0,0 @@ -ZTE ZX PWM controller - -Required properties: - - compatible: Should be "zte,zx296718-pwm". - - reg: Physical base address and length of the controller's registers. - - clocks : The phandle and specifier referencing the controller's clocks. - - clock-names: "pclk" for PCLK, "wclk" for WCLK to the PWM controller. The - PCLK is for register access, while WCLK is the reference clock for - calculating period and duty cycles. - - #pwm-cells: Should be 3. See pwm.yaml in this directory for a description of - the cells format. - -Example: - - pwm: pwm@1439000 { - compatible = "zte,zx296718-pwm"; - reg = <0x1439000 0x1000>; - clocks = <&lsp1crm LSP1_PWM_PCLK>, - <&lsp1crm LSP1_PWM_WCLK>; - clock-names = "pclk", "wclk"; - #pwm-cells = <3>; - }; diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c index 95f8f10d8697..31bcfa4e08b9 100644 --- a/arch/mips/bmips/setup.c +++ b/arch/mips/bmips/setup.c @@ -196,4 +196,4 @@ static int __init plat_dev_init(void) return 0; } -device_initcall(plat_dev_init); +arch_initcall(plat_dev_init); diff --git a/arch/mips/kernel/r4k-bugs64.c b/arch/mips/kernel/r4k-bugs64.c index 1ff19f1ea5ca..35729c9e6cfa 100644 --- a/arch/mips/kernel/r4k-bugs64.c +++ b/arch/mips/kernel/r4k-bugs64.c @@ -18,7 +18,7 @@ static char bug64hit[] __initdata = "reliable operation impossible!\n%s"; static char nowar[] __initdata = - "Please report to <linux-mips@linux-mips.org>."; + "Please report to <linux-mips@vger.kernel.org>."; static char r4kwar[] __initdata = "Enable CPU_R4000_WORKAROUNDS to rectify."; static char daddiwar[] __initdata = diff --git a/arch/mips/lib/iomap-pci.c b/arch/mips/lib/iomap-pci.c index 210f5a95ecb1..a9cb28813f0b 100644 --- a/arch/mips/lib/iomap-pci.c +++ b/arch/mips/lib/iomap-pci.c @@ -32,7 +32,7 @@ void __iomem *__pci_ioport_map(struct pci_dev *dev, sprintf(name, "%04x:%02x", pci_domain_nr(bus), bus->number); printk(KERN_WARNING "io_map_base of root PCI bus %s unset. " "Trying to continue but you better\nfix this issue or " - "report it to linux-mips@linux-mips.org or your " + "report it to linux-mips@vger.kernel.org or your " "vendor.\n", name); #ifdef CONFIG_PCI_DOMAINS panic("To avoid data corruption io_map_base MUST be set with " diff --git a/arch/mips/sgi-ip32/ip32-irq.c b/arch/mips/sgi-ip32/ip32-irq.c index 1bbd5bfb5458..e21ea1de05e3 100644 --- a/arch/mips/sgi-ip32/ip32-irq.c +++ b/arch/mips/sgi-ip32/ip32-irq.c @@ -343,7 +343,7 @@ static void ip32_unknown_interrupt(void) printk("Register dump:\n"); show_regs(get_irq_regs()); - printk("Please mail this report to linux-mips@linux-mips.org\n"); + printk("Please mail this report to linux-mips@vger.kernel.org\n"); printk("Spinning..."); while(1) ; } diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index fc06945d3f99..eedec61e3476 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -87,6 +87,14 @@ config ACPI_SPCR_TABLE This table provides information about the configuration of the earlycon console. +config ACPI_FPDT + bool "ACPI Firmware Performance Data Table (FPDT) support" + depends on X86_64 + help + Enable support for the Firmware Performance Data Table (FPDT). + This table provides information on the timing of the system + boot, S3 suspend and S3 resume firmware code paths. + config ACPI_LPIT bool depends on X86_64 @@ -327,21 +335,7 @@ config ACPI_THERMAL the module will be called thermal. config ACPI_PLATFORM_PROFILE - tristate "ACPI Platform Profile Driver" - default m - help - This driver adds support for platform-profiles on platforms that - support it. - - Platform-profiles can be used to control the platform behaviour. For - example whether to operate in a lower power mode, in a higher - power performance mode or between the two. - - This driver provides the sysfs interface and is used as the registration - point for platform specific drivers. - - Which profiles are supported is determined on a per-platform basis and - should be obtained from the platform specific driver. + tristate config ACPI_CUSTOM_DSDT_FILE string "Custom DSDT Table file to include" diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 52b627c7f977..700b41adf2db 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -57,6 +57,7 @@ acpi-$(CONFIG_X86) += x86/utils.o acpi-$(CONFIG_X86) += x86/s2idle.o acpi-$(CONFIG_DEBUG_FS) += debugfs.o acpi-y += acpi_lpat.o +acpi-$(CONFIG_ACPI_FPDT) += acpi_fpdt.o acpi-$(CONFIG_ACPI_LPIT) += acpi_lpit.o acpi-$(CONFIG_ACPI_GENERIC_GSI) += irq.o acpi-$(CONFIG_ACPI_WATCHDOG) += acpi_watchdog.o diff --git a/drivers/acpi/acpi_fpdt.c b/drivers/acpi/acpi_fpdt.c new file mode 100644 index 000000000000..a89a806a7a2a --- /dev/null +++ b/drivers/acpi/acpi_fpdt.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * FPDT support for exporting boot and suspend/resume performance data + * + * Copyright (C) 2021 Intel Corporation. All rights reserved. + */ + +#define pr_fmt(fmt) "ACPI FPDT: " fmt + +#include <linux/acpi.h> + +/* + * FPDT contains ACPI table header and a number of fpdt_subtable_entries. + * Each fpdt_subtable_entry points to a subtable: FBPT or S3PT. + * Each FPDT subtable (FBPT/S3PT) is composed of a fpdt_subtable_header + * and a number of fpdt performance records. + * Each FPDT performance record is composed of a fpdt_record_header and + * performance data fields, for boot or suspend or resume phase. + */ +enum fpdt_subtable_type { + SUBTABLE_FBPT, + SUBTABLE_S3PT, +}; + +struct fpdt_subtable_entry { + u16 type; /* refer to enum fpdt_subtable_type */ + u8 length; + u8 revision; + u32 reserved; + u64 address; /* physical address of the S3PT/FBPT table */ +}; + +struct fpdt_subtable_header { + u32 signature; + u32 length; +}; + +enum fpdt_record_type { + RECORD_S3_RESUME, + RECORD_S3_SUSPEND, + RECORD_BOOT, +}; + +struct fpdt_record_header { + u16 type; /* refer to enum fpdt_record_type */ + u8 length; + u8 revision; +}; + +struct resume_performance_record { + struct fpdt_record_header header; + u32 resume_count; + u64 resume_prev; + u64 resume_avg; +} __attribute__((packed)); + +struct boot_performance_record { + struct fpdt_record_header header; + u32 reserved; + u64 firmware_start; + u64 bootloader_load; + u64 bootloader_launch; + u64 exitbootservice_start; + u64 exitbootservice_end; +} __attribute__((packed)); + +struct suspend_performance_record { + struct fpdt_record_header header; + u64 suspend_start; + u64 suspend_end; +} __attribute__((packed)); + + +static struct resume_performance_record *record_resume; +static struct suspend_performance_record *record_suspend; +static struct boot_performance_record *record_boot; + +#define FPDT_ATTR(phase, name) \ +static ssize_t name##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%llu\n", record_##phase->name); \ +} \ +static struct kobj_attribute name##_attr = \ +__ATTR(name##_ns, 0444, name##_show, NULL) + +FPDT_ATTR(resume, resume_prev); +FPDT_ATTR(resume, resume_avg); +FPDT_ATTR(suspend, suspend_start); +FPDT_ATTR(suspend, suspend_end); +FPDT_ATTR(boot, firmware_start); +FPDT_ATTR(boot, bootloader_load); +FPDT_ATTR(boot, bootloader_launch); +FPDT_ATTR(boot, exitbootservice_start); +FPDT_ATTR(boot, exitbootservice_end); + +static ssize_t resume_count_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", record_resume->resume_count); +} + +static struct kobj_attribute resume_count_attr = +__ATTR_RO(resume_count); + +static struct attribute *resume_attrs[] = { + &resume_count_attr.attr, + &resume_prev_attr.attr, + &resume_avg_attr.attr, + NULL +}; + +static const struct attribute_group resume_attr_group = { + .attrs = resume_attrs, + .name = "resume", +}; + +static struct attribute *suspend_attrs[] = { + &suspend_start_attr.attr, + &suspend_end_attr.attr, + NULL +}; + +static const struct attribute_group suspend_attr_group = { + .attrs = suspend_attrs, + .name = "suspend", +}; + +static struct attribute *boot_attrs[] = { + &firmware_start_attr.attr, + &bootloader_load_attr.attr, + &bootloader_launch_attr.attr, + &exitbootservice_start_attr.attr, + &exitbootservice_end_attr.attr, + NULL +}; + +static const struct attribute_group boot_attr_group = { + .attrs = boot_attrs, + .name = "boot", +}; + +static struct kobject *fpdt_kobj; + +static int fpdt_process_subtable(u64 address, u32 subtable_type) +{ + struct fpdt_subtable_header *subtable_header; + struct fpdt_record_header *record_header; + char *signature = (subtable_type == SUBTABLE_FBPT ? "FBPT" : "S3PT"); + u32 length, offset; + int result; + + subtable_header = acpi_os_map_memory(address, sizeof(*subtable_header)); + if (!subtable_header) + return -ENOMEM; + + if (strncmp((char *)&subtable_header->signature, signature, 4)) { + pr_info(FW_BUG "subtable signature and type mismatch!\n"); + return -EINVAL; + } + + length = subtable_header->length; + acpi_os_unmap_memory(subtable_header, sizeof(*subtable_header)); + + subtable_header = acpi_os_map_memory(address, length); + if (!subtable_header) + return -ENOMEM; + + offset = sizeof(*subtable_header); + while (offset < length) { + record_header = (void *)subtable_header + offset; + offset += record_header->length; + + switch (record_header->type) { + case RECORD_S3_RESUME: + if (subtable_type != SUBTABLE_S3PT) { + pr_err(FW_BUG "Invalid record %d for subtable %s\n", + record_header->type, signature); + return -EINVAL; + } + if (record_resume) { + pr_err("Duplicate resume performance record found.\n"); + continue; + } + record_resume = (struct resume_performance_record *)record_header; + result = sysfs_create_group(fpdt_kobj, &resume_attr_group); + if (result) + return result; + break; + case RECORD_S3_SUSPEND: + if (subtable_type != SUBTABLE_S3PT) { + pr_err(FW_BUG "Invalid %d for subtable %s\n", + record_header->type, signature); + continue; + } + if (record_suspend) { + pr_err("Duplicate suspend performance record found.\n"); + continue; + } + record_suspend = (struct suspend_performance_record *)record_header; + result = sysfs_create_group(fpdt_kobj, &suspend_attr_group); + if (result) + return result; + break; + case RECORD_BOOT: + if (subtable_type != SUBTABLE_FBPT) { + pr_err(FW_BUG "Invalid %d for subtable %s\n", + record_header->type, signature); + return -EINVAL; + } + if (record_boot) { + pr_err("Duplicate boot performance record found.\n"); + continue; + } + record_boot = (struct boot_performance_record *)record_header; + result = sysfs_create_group(fpdt_kobj, &boot_attr_group); + if (result) + return result; + break; + + default: + pr_err(FW_BUG "Invalid record %d found.\n", record_header->type); + return -EINVAL; + } + } + return 0; +} + +static int __init acpi_init_fpdt(void) +{ + acpi_status status; + struct acpi_table_header *header; + struct fpdt_subtable_entry *subtable; + u32 offset = sizeof(*header); + + status = acpi_get_table(ACPI_SIG_FPDT, 0, &header); + + if (ACPI_FAILURE(status)) + return 0; + + fpdt_kobj = kobject_create_and_add("fpdt", acpi_kobj); + if (!fpdt_kobj) + return -ENOMEM; + + while (offset < header->length) { + subtable = (void *)header + offset; + switch (subtable->type) { + case SUBTABLE_FBPT: + case SUBTABLE_S3PT: + fpdt_process_subtable(subtable->address, + subtable->type); + break; + default: + pr_info(FW_BUG "Invalid subtable type %d found.\n", + subtable->type); + break; + } + offset += sizeof(*subtable); + } + return 0; +} + +fs_initcall(acpi_init_fpdt); diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c index 4a59c5993bde..dd2fbf38e414 100644 --- a/drivers/acpi/platform_profile.c +++ b/drivers/acpi/platform_profile.c @@ -17,6 +17,7 @@ static const char * const profile_names[] = { [PLATFORM_PROFILE_COOL] = "cool", [PLATFORM_PROFILE_QUIET] = "quiet", [PLATFORM_PROFILE_BALANCED] = "balanced", + [PLATFORM_PROFILE_BALANCED_PERFORMANCE] = "balanced-performance", [PLATFORM_PROFILE_PERFORMANCE] = "performance", }; static_assert(ARRAY_SIZE(profile_names) == PLATFORM_PROFILE_LAST); diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 2779e85795a7..fd236158f32d 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -66,6 +66,12 @@ config AMIGA_Z2RAM To compile this driver as a module, choose M here: the module will be called z2ram. +config N64CART + bool "N64 cart support" + depends on MACH_NINTENDO64 + help + Support for the N64 cart. + config CDROM tristate select BLK_SCSI_REQUEST diff --git a/drivers/block/Makefile b/drivers/block/Makefile index b501b8728fb9..e3e3f1c79a82 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_PS3_DISK) += ps3disk.o obj-$(CONFIG_PS3_VRAM) += ps3vram.o obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o +obj-$(CONFIG_N64CART) += n64cart.o obj-$(CONFIG_BLK_DEV_RAM) += brd.o obj-$(CONFIG_BLK_DEV_LOOP) += loop.o obj-$(CONFIG_XILINX_SYSACE) += xsysace.o diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c new file mode 100644 index 000000000000..47bdf324e962 --- /dev/null +++ b/drivers/block/n64cart.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Support for the N64 cart. + * + * Copyright (c) 2021 Lauri Kasanen + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/bitops.h> +#include <linux/blkdev.h> +#include <linux/dma-mapping.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +enum { + PI_DRAM_REG = 0, + PI_CART_REG, + PI_READ_REG, + PI_WRITE_REG, + PI_STATUS_REG, +}; + +#define PI_STATUS_DMA_BUSY (1 << 0) +#define PI_STATUS_IO_BUSY (1 << 1) + +#define CART_DOMAIN 0x10000000 +#define CART_MAX 0x1FFFFFFF + +#define MIN_ALIGNMENT 8 + +static u32 __iomem *reg_base; + +static unsigned int start; +module_param(start, uint, 0); +MODULE_PARM_DESC(start, "Start address of the cart block data"); + +static unsigned int size; +module_param(size, uint, 0); +MODULE_PARM_DESC(size, "Size of the cart block data, in bytes"); + +static void n64cart_write_reg(const u8 reg, const u32 value) +{ + writel(value, reg_base + reg); +} + +static u32 n64cart_read_reg(const u8 reg) +{ + return readl(reg_base + reg); +} + +static void n64cart_wait_dma(void) +{ + while (n64cart_read_reg(PI_STATUS_REG) & + (PI_STATUS_DMA_BUSY | PI_STATUS_IO_BUSY)) + cpu_relax(); +} + +/* + * Process a single bvec of a bio. + */ +static bool n64cart_do_bvec(struct device *dev, struct bio_vec *bv, u32 pos) +{ + dma_addr_t dma_addr; + const u32 bstart = pos + start; + + /* Alignment check */ + WARN_ON_ONCE((bv->bv_offset & (MIN_ALIGNMENT - 1)) || + (bv->bv_len & (MIN_ALIGNMENT - 1))); + + dma_addr = dma_map_bvec(dev, bv, DMA_FROM_DEVICE, 0); + if (dma_mapping_error(dev, dma_addr)) + return false; + + n64cart_wait_dma(); + + n64cart_write_reg(PI_DRAM_REG, dma_addr + bv->bv_offset); + n64cart_write_reg(PI_CART_REG, (bstart | CART_DOMAIN) & CART_MAX); + n64cart_write_reg(PI_WRITE_REG, bv->bv_len - 1); + + n64cart_wait_dma(); + + dma_unmap_page(dev, dma_addr, bv->bv_len, DMA_FROM_DEVICE); + return true; +} + +static blk_qc_t n64cart_submit_bio(struct bio *bio) +{ + struct bio_vec bvec; + struct bvec_iter iter; + struct device *dev = bio->bi_disk->private_data; + u32 pos = bio->bi_iter.bi_sector << SECTOR_SHIFT; + + bio_for_each_segment(bvec, bio, iter) { + if (!n64cart_do_bvec(dev, &bvec, pos)) + goto io_error; + pos += bvec.bv_len; + } + + bio_endio(bio); + return BLK_QC_T_NONE; +io_error: + bio_io_error(bio); + return BLK_QC_T_NONE; +} + +static const struct block_device_operations n64cart_fops = { + .owner = THIS_MODULE, + .submit_bio = n64cart_submit_bio, +}; + +/* + * The target device is embedded and RAM-constrained. We save RAM + * by initializing in __init code that gets dropped late in boot. + * For the same reason there is no module or unloading support. + */ +static int __init n64cart_probe(struct platform_device *pdev) +{ + struct gendisk *disk; + + if (!start || !size) { + pr_err("start or size not specified\n"); + return -ENODEV; + } + + if (size & 4095) { + pr_err("size must be a multiple of 4K\n"); + return -ENODEV; + } + + reg_base = devm_platform_ioremap_resource(pdev, 0); + if (!reg_base) + return -EINVAL; + + disk = alloc_disk(0); + if (!disk) + return -ENOMEM; + + disk->queue = blk_alloc_queue(NUMA_NO_NODE); + if (!disk->queue) + return -ENOMEM; + + disk->first_minor = 0; + disk->flags = GENHD_FL_NO_PART_SCAN | GENHD_FL_EXT_DEVT; + disk->fops = &n64cart_fops; + disk->private_data = &pdev->dev; + strcpy(disk->disk_name, "n64cart"); + + set_capacity(disk, size >> SECTOR_SHIFT); + set_disk_ro(disk, 1); + + blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); + blk_queue_physical_block_size(disk->queue, 4096); + blk_queue_logical_block_size(disk->queue, 4096); + + add_disk(disk); + + pr_info("n64cart: %u kb disk\n", size / 1024); + + return 0; +} + +static struct platform_driver n64cart_driver = { + .driver = { + .name = "n64cart", + }, +}; + +static int __init n64cart_init(void) +{ + return platform_driver_probe(&n64cart_driver, n64cart_probe); +} + +module_init(n64cart_init); + +MODULE_AUTHOR("Lauri Kasanen <cand@gmx.com>"); +MODULE_DESCRIPTION("Driver for the N64 cart"); +MODULE_LICENSE("GPL"); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index b0285db7cf4f..b9fa3ef5b57c 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -705,6 +705,7 @@ static int virtblk_probe(struct virtio_device *vdev) u32 v, blk_size, max_size, sg_elems, opt_io_size; u16 min_io_size; u8 physical_block_exp, alignment_offset; + unsigned int queue_depth; if (!vdev->config->get) { dev_err(&vdev->dev, "%s failure: config access disabled\n", @@ -756,16 +757,18 @@ static int virtblk_probe(struct virtio_device *vdev) } /* Default queue sizing is to fill the ring. */ - if (!virtblk_queue_depth) { - virtblk_queue_depth = vblk->vqs[0].vq->num_free; + if (likely(!virtblk_queue_depth)) { + queue_depth = vblk->vqs[0].vq->num_free; /* ... but without indirect descs, we use 2 descs per req */ if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC)) - virtblk_queue_depth /= 2; + queue_depth /= 2; + } else { + queue_depth = virtblk_queue_depth; } memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); vblk->tag_set.ops = &virtio_mq_ops; - vblk->tag_set.queue_depth = virtblk_queue_depth; + vblk->tag_set.queue_depth = queue_depth; vblk->tag_set.numa_node = NUMA_NO_NODE; vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; vblk->tag_set.cmd_size = diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 7475e09b0680..d64fc03929be 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -312,22 +312,25 @@ void __dma_fence_might_wait(void) /** - * dma_fence_signal_locked - signal completion of a fence + * dma_fence_signal_timestamp_locked - signal completion of a fence * @fence: the fence to signal + * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain * * Signal completion for software callbacks on a fence, this will unblock * dma_fence_wait() calls and run all the callbacks added with * dma_fence_add_callback(). Can be called multiple times, but since a fence * can only go from the unsignaled to the signaled state and not back, it will - * only be effective the first time. + * only be effective the first time. Set the timestamp provided as the fence + * signal timestamp. * - * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock - * held. + * Unlike dma_fence_signal_timestamp(), this function must be called with + * &dma_fence.lock held. * * Returns 0 on success and a negative error value when @fence has been * signalled already. */ -int dma_fence_signal_locked(struct dma_fence *fence) +int dma_fence_signal_timestamp_locked(struct dma_fence *fence, + ktime_t timestamp) { struct dma_fence_cb *cur, *tmp; struct list_head cb_list; @@ -341,7 +344,7 @@ int dma_fence_signal_locked(struct dma_fence *fence) /* Stash the cb_list before replacing it with the timestamp */ list_replace(&fence->cb_list, &cb_list); - fence->timestamp = ktime_get(); + fence->timestamp = timestamp; set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); trace_dma_fence_signaled(fence); @@ -352,6 +355,59 @@ int dma_fence_signal_locked(struct dma_fence *fence) return 0; } +EXPORT_SYMBOL(dma_fence_signal_timestamp_locked); + +/** + * dma_fence_signal_timestamp - signal completion of a fence + * @fence: the fence to signal + * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain + * + * Signal completion for software callbacks on a fence, this will unblock + * dma_fence_wait() calls and run all the callbacks added with + * dma_fence_add_callback(). Can be called multiple times, but since a fence + * can only go from the unsignaled to the signaled state and not back, it will + * only be effective the first time. Set the timestamp provided as the fence + * signal timestamp. + * + * Returns 0 on success and a negative error value when @fence has been + * signalled already. + */ +int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp) +{ + unsigned long flags; + int ret; + + if (!fence) + return -EINVAL; + + spin_lock_irqsave(fence->lock, flags); + ret = dma_fence_signal_timestamp_locked(fence, timestamp); + spin_unlock_irqrestore(fence->lock, flags); + + return ret; +} +EXPORT_SYMBOL(dma_fence_signal_timestamp); + +/** + * dma_fence_signal_locked - signal completion of a fence + * @fence: the fence to signal + * + * Signal completion for software callbacks on a fence, this will unblock + * dma_fence_wait() calls and run all the callbacks added with + * dma_fence_add_callback(). Can be called multiple times, but since a fence + * can only go from the unsignaled to the signaled state and not back, it will + * only be effective the first time. + * + * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock + * held. + * + * Returns 0 on success and a negative error value when @fence has been + * signalled already. + */ +int dma_fence_signal_locked(struct dma_fence *fence) +{ + return dma_fence_signal_timestamp_locked(fence, ktime_get()); +} EXPORT_SYMBOL(dma_fence_signal_locked); /** @@ -379,7 +435,7 @@ int dma_fence_signal(struct dma_fence *fence) tmp = dma_fence_begin_signalling(); spin_lock_irqsave(fence->lock, flags); - ret = dma_fence_signal_locked(fence); + ret = dma_fence_signal_timestamp_locked(fence, ktime_get()); spin_unlock_irqrestore(fence->lock, flags); dma_fence_end_signalling(tmp); diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c index afd22c9dbdcf..6b5db954569f 100644 --- a/drivers/dma-buf/dma-heap.c +++ b/drivers/dma-buf/dma-heap.c @@ -52,6 +52,9 @@ static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len, unsigned int fd_flags, unsigned int heap_flags) { + struct dma_buf *dmabuf; + int fd; + /* * Allocations from all heaps have to begin * and end on page boundaries. @@ -60,7 +63,16 @@ static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len, if (!len) return -EINVAL; - return heap->ops->allocate(heap, len, fd_flags, heap_flags); + dmabuf = heap->ops->allocate(heap, len, fd_flags, heap_flags); + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); + + fd = dma_buf_fd(dmabuf, fd_flags); + if (fd < 0) { + dma_buf_put(dmabuf); + /* just return, as put will call release and that will free */ + } + return fd; } static int dma_heap_open(struct inode *inode, struct file *file) diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index 364fc2f3e499..5d64eccd21d6 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -271,10 +271,10 @@ static const struct dma_buf_ops cma_heap_buf_ops = { .release = cma_heap_dma_buf_release, }; -static int cma_heap_allocate(struct dma_heap *heap, - unsigned long len, - unsigned long fd_flags, - unsigned long heap_flags) +static struct dma_buf *cma_heap_allocate(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags) { struct cma_heap *cma_heap = dma_heap_get_drvdata(heap); struct cma_heap_buffer *buffer; @@ -289,7 +289,7 @@ static int cma_heap_allocate(struct dma_heap *heap, buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) - return -ENOMEM; + return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&buffer->attachments); mutex_init(&buffer->lock); @@ -348,15 +348,7 @@ static int cma_heap_allocate(struct dma_heap *heap, ret = PTR_ERR(dmabuf); goto free_pages; } - - ret = dma_buf_fd(dmabuf, fd_flags); - if (ret < 0) { - dma_buf_put(dmabuf); - /* just return, as put will call release and that will free */ - return ret; - } - - return ret; + return dmabuf; free_pages: kfree(buffer->pages); @@ -365,7 +357,7 @@ free_cma: free_buffer: kfree(buffer); - return ret; + return ERR_PTR(ret); } static const struct dma_heap_ops cma_heap_ops = { diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c index 17e0e9a68baf..29e49ac17251 100644 --- a/drivers/dma-buf/heaps/system_heap.c +++ b/drivers/dma-buf/heaps/system_heap.c @@ -331,10 +331,10 @@ static struct page *alloc_largest_available(unsigned long size, return NULL; } -static int system_heap_allocate(struct dma_heap *heap, - unsigned long len, - unsigned long fd_flags, - unsigned long heap_flags) +static struct dma_buf *system_heap_allocate(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags) { struct system_heap_buffer *buffer; DEFINE_DMA_BUF_EXPORT_INFO(exp_info); @@ -349,7 +349,7 @@ static int system_heap_allocate(struct dma_heap *heap, buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) - return -ENOMEM; + return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&buffer->attachments); mutex_init(&buffer->lock); @@ -363,8 +363,10 @@ static int system_heap_allocate(struct dma_heap *heap, * Avoid trying to allocate memory if the process * has been killed by SIGKILL */ - if (fatal_signal_pending(current)) + if (fatal_signal_pending(current)) { + ret = -EINTR; goto free_buffer; + } page = alloc_largest_available(size_remaining, max_order); if (!page) @@ -397,14 +399,7 @@ static int system_heap_allocate(struct dma_heap *heap, ret = PTR_ERR(dmabuf); goto free_pages; } - - ret = dma_buf_fd(dmabuf, fd_flags); - if (ret < 0) { - dma_buf_put(dmabuf); - /* just return, as put will call release and that will free */ - return ret; - } - return ret; + return dmabuf; free_pages: for_each_sgtable_sg(table, sg, i) { @@ -418,7 +413,7 @@ free_buffer: __free_pages(page, compound_order(page)); kfree(buffer); - return ret; + return ERR_PTR(ret); } static const struct dma_heap_ops system_heap_ops = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 86452f4f3c82..b6879d97c9c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1008,6 +1008,12 @@ struct amdgpu_device { bool in_suspend; bool in_hibernate; + /* + * The combination flag in_poweroff_reboot_com used to identify the poweroff + * and reboot opt in the s0i3 system-wide suspend. + */ + bool in_poweroff_reboot_com; + atomic_t in_gpu_reset; enum pp_mp1_state mp1_state; struct rw_semaphore reset_sem; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 7052dc35d278..6447cd6ca5a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2678,7 +2678,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) { int i, r; - if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) { + if (adev->in_poweroff_reboot_com || + !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) { amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); } @@ -3741,7 +3742,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) amdgpu_fence_driver_suspend(adev); - if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) + if (adev->in_poweroff_reboot_com || + !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) r = amdgpu_device_ip_suspend_phase2(adev); else amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 03c529630c21..4575192d9b08 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1270,7 +1270,9 @@ amdgpu_pci_shutdown(struct pci_dev *pdev) */ if (!amdgpu_passthrough(adev)) adev->mp1_state = PP_MP1_STATE_UNLOAD; + adev->in_poweroff_reboot_com = true; amdgpu_device_ip_suspend(adev); + adev->in_poweroff_reboot_com = false; adev->mp1_state = PP_MP1_STATE_NONE; } @@ -1312,8 +1314,13 @@ static int amdgpu_pmops_thaw(struct device *dev) static int amdgpu_pmops_poweroff(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); + int r; - return amdgpu_device_suspend(drm_dev, true); + adev->in_poweroff_reboot_com = true; + r = amdgpu_device_suspend(drm_dev, true); + adev->in_poweroff_reboot_com = false; + return r; } static int amdgpu_pmops_restore(struct device *dev) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 94cd5ddd67ef..3e1fd1e7d09f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -937,7 +937,49 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_ } #endif +#if defined(CONFIG_DRM_AMD_DC_DCN) +static void event_mall_stutter(struct work_struct *work) +{ + + struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work); + struct amdgpu_display_manager *dm = vblank_work->dm; + + mutex_lock(&dm->dc_lock); + + if (vblank_work->enable) + dm->active_vblank_irq_count++; + else + dm->active_vblank_irq_count--; + + + dc_allow_idle_optimizations( + dm->dc, dm->active_vblank_irq_count == 0 ? true : false); + + DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0); + + mutex_unlock(&dm->dc_lock); +} + +static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc) +{ + + int max_caps = dc->caps.max_links; + struct vblank_workqueue *vblank_work; + int i = 0; + + vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(vblank_work)) { + kfree(vblank_work); + return NULL; + } + + for (i = 0; i < max_caps; i++) + INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter); + + return vblank_work; +} +#endif static int amdgpu_dm_init(struct amdgpu_device *adev) { struct dc_init_data init_data; @@ -957,6 +999,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) mutex_init(&adev->dm.dc_lock); mutex_init(&adev->dm.audio_lock); +#if defined(CONFIG_DRM_AMD_DC_DCN) + spin_lock_init(&adev->dm.vblank_lock); +#endif if(amdgpu_dm_irq_init(adev)) { DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); @@ -1071,6 +1116,17 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) amdgpu_dm_init_color_mod(); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (adev->dm.dc->caps.max_links > 0) { + adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc); + + if (!adev->dm.vblank_workqueue) + DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n"); + else + DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue); + } +#endif + #ifdef CONFIG_DRM_AMD_DC_HDCP if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) { adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc); @@ -1936,7 +1992,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state, dc_commit_updates_for_stream( dm->dc, bundle->surface_updates, dc_state->stream_status->plane_count, - dc_state->streams[k], &bundle->stream_update); + dc_state->streams[k], &bundle->stream_update, dc_state); } cleanup: @@ -1967,7 +2023,8 @@ static void dm_set_dpms_off(struct dc_link *link) stream_update.stream = stream_state; dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0, - stream_state, &stream_update); + stream_state, &stream_update, + stream_state->ctx->dc->current_state); mutex_unlock(&adev->dm.dc_lock); } @@ -5374,7 +5431,10 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); +#if defined(CONFIG_DRM_AMD_DC_DCN) struct amdgpu_display_manager *dm = &adev->dm; + unsigned long flags; +#endif int rc = 0; if (enable) { @@ -5397,22 +5457,15 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) if (amdgpu_in_reset(adev)) return 0; - mutex_lock(&dm->dc_lock); - - if (enable) - dm->active_vblank_irq_count++; - else - dm->active_vblank_irq_count--; - #if defined(CONFIG_DRM_AMD_DC_DCN) - dc_allow_idle_optimizations( - adev->dm.dc, dm->active_vblank_irq_count == 0 ? true : false); - - DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0); + spin_lock_irqsave(&dm->vblank_lock, flags); + dm->vblank_workqueue->dm = dm; + dm->vblank_workqueue->otg_inst = acrtc->otg_inst; + dm->vblank_workqueue->enable = enable; + spin_unlock_irqrestore(&dm->vblank_lock, flags); + schedule_work(&dm->vblank_workqueue->mall_work); #endif - mutex_unlock(&dm->dc_lock); - return 0; } @@ -7663,7 +7716,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct drm_crtc *pcrtc, bool wait_for_vblank) { - int i; + uint32_t i; uint64_t timestamp_ns; struct drm_plane *plane; struct drm_plane_state *old_plane_state, *new_plane_state; @@ -7704,7 +7757,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, amdgpu_dm_commit_cursors(state); /* update planes when needed */ - for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { + for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { struct drm_crtc *crtc = new_plane_state->crtc; struct drm_crtc_state *new_crtc_state; struct drm_framebuffer *fb = new_plane_state->fb; @@ -7927,7 +7980,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates, planes_count, acrtc_state->stream, - &bundle->stream_update); + &bundle->stream_update, + dc_state); /** * Enable or disable the interrupts on the backend. @@ -8263,13 +8317,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); - struct dc_surface_update surface_updates[MAX_SURFACES]; + struct dc_surface_update dummy_updates[MAX_SURFACES]; struct dc_stream_update stream_update; struct dc_info_packet hdr_packet; struct dc_stream_status *status = NULL; bool abm_changed, hdr_changed, scaling_changed; - memset(&surface_updates, 0, sizeof(surface_updates)); + memset(&dummy_updates, 0, sizeof(dummy_updates)); memset(&stream_update, 0, sizeof(stream_update)); if (acrtc) { @@ -8326,15 +8380,16 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) * To fix this, DC should permit updating only stream properties. */ for (j = 0; j < status->plane_count; j++) - surface_updates[j].surface = status->plane_states[j]; + dummy_updates[j].surface = status->plane_states[0]; mutex_lock(&dm->dc_lock); dc_commit_updates_for_stream(dm->dc, - surface_updates, + dummy_updates, status->plane_count, dm_new_crtc_state->stream, - &stream_update); + &stream_update, + dc_state); mutex_unlock(&dm->dc_lock); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index f72930c36c22..8bfe901cf237 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -93,6 +93,20 @@ struct dm_compressor_info { }; /** + * struct vblank_workqueue - Works to be executed in a separate thread during vblank + * @mall_work: work for mall stutter + * @dm: amdgpu display manager device + * @otg_inst: otg instance of which vblank is being set + * @enable: true if enable vblank + */ +struct vblank_workqueue { + struct work_struct mall_work; + struct amdgpu_display_manager *dm; + int otg_inst; + bool enable; +}; + +/** * struct amdgpu_dm_backlight_caps - Information about backlight * * Describe the backlight support for ACPI or eDP AUX. @@ -244,6 +258,15 @@ struct amdgpu_display_manager { struct mutex audio_lock; /** + * @vblank_work_lock: + * + * Guards access to deferred vblank work state. + */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + spinlock_t vblank_lock; +#endif + + /** * @audio_component: * * Used to notify ELD changes to sound driver. @@ -321,6 +344,10 @@ struct amdgpu_display_manager { struct hdcp_workqueue *hdcp_workqueue; #endif +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct vblank_workqueue *vblank_workqueue; +#endif + struct drm_atomic_state *cached_state; struct dc_state *cached_dc_state; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index c9aede2f783d..8f8a13c7cf73 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2697,7 +2697,8 @@ void dc_commit_updates_for_stream(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, - struct dc_stream_update *stream_update) + struct dc_stream_update *stream_update, + struct dc_state *state) { const struct dc_stream_status *stream_status; enum surface_update_type update_type; @@ -2716,12 +2717,6 @@ void dc_commit_updates_for_stream(struct dc *dc, if (update_type >= UPDATE_TYPE_FULL) { - struct dc_plane_state *new_planes[MAX_SURFACES]; - - memset(new_planes, 0, sizeof(new_planes)); - - for (i = 0; i < surface_count; i++) - new_planes[i] = srf_updates[i].surface; /* initialize scratch memory for building context */ context = dc_create_state(dc); @@ -2730,21 +2725,15 @@ void dc_commit_updates_for_stream(struct dc *dc, return; } - dc_resource_state_copy_construct( - dc->current_state, context); + dc_resource_state_copy_construct(state, context); - /*remove old surfaces from context */ - if (!dc_rem_all_planes_for_stream(dc, stream, context)) { - DC_ERROR("Failed to remove streams for new validate context!\n"); - return; - } + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; - /* add surface to context */ - if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) { - DC_ERROR("Failed to add streams for new validate context!\n"); - return; + if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) + new_pipe->plane_state->force_full_update = true; } - } diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index a4f7ec888c67..80b67b860091 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -294,7 +294,8 @@ void dc_commit_updates_for_stream(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, - struct dc_stream_update *stream_update); + struct dc_stream_update *stream_update, + struct dc_state *state); /* * Log the current stream state. */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c index 59024653430c..e4701825b5a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c @@ -480,7 +480,6 @@ unsigned int dcn10_get_dig_frontend(struct link_encoder *enc) break; default: // invalid source select DIG - ASSERT(false); result = ENGINE_ID_UNKNOWN; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 9620fb8a27dc..06dc1e2e8383 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -539,6 +539,8 @@ void dcn30_init_hw(struct dc *dc) fe = dc->links[i]->link_enc->funcs->get_dig_frontend( dc->links[i]->link_enc); + if (fe == ENGINE_ID_UNKNOWN) + continue; for (j = 0; j < dc->res_pool->stream_enc_count; j++) { if (fe == dc->res_pool->stream_enc[j]->id) { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index 1b971265418b..0e0f494fbb5e 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -168,6 +168,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = { .ack = NULL }; +static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + #undef BASE_INNER #define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg @@ -230,6 +235,17 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = { .funcs = &vblank_irq_info_funcs\ } +/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic + * of DCE's DC_IRQ_SOURCE_VUPDATEx. + */ +#define vupdate_no_lock_int_entry(reg_num)\ + [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\ + IRQ_REG_ENTRY(OTG, reg_num,\ + OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\ + OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\ + .funcs = &vupdate_no_lock_irq_info_funcs\ + } + #define vblank_int_entry(reg_num)\ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\ IRQ_REG_ENTRY(OTG, reg_num,\ @@ -338,6 +354,12 @@ irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = { vupdate_int_entry(3), vupdate_int_entry(4), vupdate_int_entry(5), + vupdate_no_lock_int_entry(0), + vupdate_no_lock_int_entry(1), + vupdate_no_lock_int_entry(2), + vupdate_no_lock_int_entry(3), + vupdate_no_lock_int_entry(4), + vupdate_no_lock_int_entry(5), vblank_int_entry(0), vblank_int_entry(1), vblank_int_entry(2), diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index bb620fdd4cd2..bcedd4d92e35 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -762,7 +762,7 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) structure_size = sizeof(struct gpu_metrics_v2_0); break; default: - break; + return; } #undef METRICS_VERSION diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 6b116bfd747c..7efbccffc2ea 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -775,20 +775,19 @@ void drm_event_cancel_free(struct drm_device *dev, EXPORT_SYMBOL(drm_event_cancel_free); /** - * drm_send_event_locked - send DRM event to file descriptor + * drm_send_event_helper - send DRM event to file descriptor * @dev: DRM device * @e: DRM event to deliver + * @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC + * time domain * - * This function sends the event @e, initialized with drm_event_reserve_init(), - * to its associated userspace DRM file. Callers must already hold - * &drm_device.event_lock, see drm_send_event() for the unlocked version. - * - * Note that the core will take care of unlinking and disarming events when the - * corresponding DRM file is closed. Drivers need not worry about whether the - * DRM file for this event still exists and can call this function upon - * completion of the asynchronous work unconditionally. + * This helper function sends the event @e, initialized with + * drm_event_reserve_init(), to its associated userspace DRM file. + * The timestamp variant of dma_fence_signal is used when the caller + * sends a valid timestamp. */ -void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) +void drm_send_event_helper(struct drm_device *dev, + struct drm_pending_event *e, ktime_t timestamp) { assert_spin_locked(&dev->event_lock); @@ -799,7 +798,10 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) } if (e->fence) { - dma_fence_signal(e->fence); + if (timestamp) + dma_fence_signal_timestamp(e->fence, timestamp); + else + dma_fence_signal(e->fence); dma_fence_put(e->fence); } @@ -814,6 +816,48 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) wake_up_interruptible_poll(&e->file_priv->event_wait, EPOLLIN | EPOLLRDNORM); } + +/** + * drm_send_event_timestamp_locked - send DRM event to file descriptor + * @dev: DRM device + * @e: DRM event to deliver + * @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC + * time domain + * + * This function sends the event @e, initialized with drm_event_reserve_init(), + * to its associated userspace DRM file. Callers must already hold + * &drm_device.event_lock. + * + * Note that the core will take care of unlinking and disarming events when the + * corresponding DRM file is closed. Drivers need not worry about whether the + * DRM file for this event still exists and can call this function upon + * completion of the asynchronous work unconditionally. + */ +void drm_send_event_timestamp_locked(struct drm_device *dev, + struct drm_pending_event *e, ktime_t timestamp) +{ + drm_send_event_helper(dev, e, timestamp); +} +EXPORT_SYMBOL(drm_send_event_timestamp_locked); + +/** + * drm_send_event_locked - send DRM event to file descriptor + * @dev: DRM device + * @e: DRM event to deliver + * + * This function sends the event @e, initialized with drm_event_reserve_init(), + * to its associated userspace DRM file. Callers must already hold + * &drm_device.event_lock, see drm_send_event() for the unlocked version. + * + * Note that the core will take care of unlinking and disarming events when the + * corresponding DRM file is closed. Drivers need not worry about whether the + * DRM file for this event still exists and can call this function upon + * completion of the asynchronous work unconditionally. + */ +void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) +{ + drm_send_event_helper(dev, e, 0); +} EXPORT_SYMBOL(drm_send_event_locked); /** @@ -836,7 +880,7 @@ void drm_send_event(struct drm_device *dev, struct drm_pending_event *e) unsigned long irqflags; spin_lock_irqsave(&dev->event_lock, irqflags); - drm_send_event_locked(dev, e); + drm_send_event_helper(dev, e, 0); spin_unlock_irqrestore(&dev->event_lock, irqflags); } EXPORT_SYMBOL(drm_send_event); diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index 30912d8f82a5..893165eeddf3 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -1006,7 +1006,14 @@ static void send_vblank_event(struct drm_device *dev, break; } trace_drm_vblank_event_delivered(e->base.file_priv, e->pipe, seq); - drm_send_event_locked(dev, &e->base); + /* + * Use the same timestamp for any associated fence signal to avoid + * mismatch in timestamps for vsync & fence events triggered by the + * same HW event. Frameworks like SurfaceFlinger in Android expects the + * retire-fence timestamp to match exactly with HW vsync as it uses it + * for its software vsync modeling. + */ + drm_send_event_timestamp_locked(dev, &e->base, now); } /** diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 2ae471518d9a..2385a7505f5d 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -21,7 +21,6 @@ subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable) subdir-ccflags-y += $(call cc-disable-warning, sign-compare) subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized) subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides) -subdir-ccflags-y += $(call cc-disable-warning, uninitialized) subdir-ccflags-y += $(call cc-disable-warning, frame-address) subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c index 57b0a3ebe908..8e77ca7ddf11 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.c +++ b/drivers/gpu/drm/i915/display/intel_crtc.c @@ -109,7 +109,6 @@ void intel_crtc_state_reset(struct intel_crtc_state *crtc_state, crtc_state->cpu_transcoder = INVALID_TRANSCODER; crtc_state->master_transcoder = INVALID_TRANSCODER; crtc_state->hsw_workaround_pipe = INVALID_PIPE; - crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID; crtc_state->scaler_state.scaler_id = -1; crtc_state->mst_master_transcoder = INVALID_TRANSCODER; } diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 7ea1e5b487b6..8d7aaa68c6f6 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -10211,7 +10211,6 @@ static void snprintf_output_types(char *buf, size_t len, } static const char * const output_format_str[] = { - [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid", [INTEL_OUTPUT_FORMAT_RGB] = "RGB", [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0", [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4", @@ -10220,7 +10219,7 @@ static const char * const output_format_str[] = { static const char *output_formats(enum intel_output_format format) { if (format >= ARRAY_SIZE(output_format_str)) - format = INTEL_OUTPUT_FORMAT_INVALID; + return "invalid"; return output_format_str[format]; } diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 39397748b4b0..184ecbbcec99 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -830,7 +830,6 @@ struct intel_crtc_wm_state { }; enum intel_output_format { - INTEL_OUTPUT_FORMAT_INVALID, INTEL_OUTPUT_FORMAT_RGB, INTEL_OUTPUT_FORMAT_YCBCR420, INTEL_OUTPUT_FORMAT_YCBCR444, diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index d54ea0e4681d..fef1e857cefc 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -41,6 +41,7 @@ #include "gt/intel_lrc.h" #include "gt/intel_ring.h" #include "gt/intel_gt_requests.h" +#include "gt/shmem_utils.h" #include "gvt.h" #include "i915_pvinfo.h" #include "trace.h" @@ -3094,71 +3095,28 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) */ void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu) { + const unsigned long start = LRC_STATE_PN * PAGE_SIZE; struct intel_gvt *gvt = vgpu->gvt; - struct drm_i915_private *dev_priv = gvt->gt->i915; struct intel_engine_cs *engine; enum intel_engine_id id; - const unsigned long start = LRC_STATE_PN * PAGE_SIZE; - struct i915_request *rq; - struct intel_vgpu_submission *s = &vgpu->submission; - struct i915_request *requests[I915_NUM_ENGINES] = {}; - bool is_ctx_pinned[I915_NUM_ENGINES] = {}; - int ret = 0; if (gvt->is_reg_whitelist_updated) return; - for_each_engine(engine, &dev_priv->gt, id) { - ret = intel_context_pin(s->shadow[id]); - if (ret) { - gvt_vgpu_err("fail to pin shadow ctx\n"); - goto out; - } - is_ctx_pinned[id] = true; - - rq = i915_request_create(s->shadow[id]); - if (IS_ERR(rq)) { - gvt_vgpu_err("fail to alloc default request\n"); - ret = -EIO; - goto out; - } - requests[id] = i915_request_get(rq); - i915_request_add(rq); - } - - if (intel_gt_wait_for_idle(&dev_priv->gt, - I915_GEM_IDLE_TIMEOUT) == -ETIME) { - ret = -EIO; - goto out; - } - /* scan init ctx to update cmd accessible list */ - for_each_engine(engine, &dev_priv->gt, id) { - int size = engine->context_size - PAGE_SIZE; - void *vaddr; + for_each_engine(engine, gvt->gt, id) { struct parser_exec_state s; - struct drm_i915_gem_object *obj; - struct i915_request *rq; - - rq = requests[id]; - GEM_BUG_ON(!i915_request_completed(rq)); - GEM_BUG_ON(!intel_context_is_pinned(rq->context)); - obj = rq->context->state->obj; - - if (!obj) { - ret = -EIO; - goto out; - } + void *vaddr; + int ret; - i915_gem_object_set_cache_coherency(obj, - I915_CACHE_LLC); + if (!engine->default_state) + continue; - vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); + vaddr = shmem_pin_map(engine->default_state); if (IS_ERR(vaddr)) { - gvt_err("failed to pin init ctx obj, ring=%d, err=%lx\n", - id, PTR_ERR(vaddr)); - ret = PTR_ERR(vaddr); - goto out; + gvt_err("failed to map %s->default state, err:%zd\n", + engine->name, PTR_ERR(vaddr)); + return; } s.buf_type = RING_BUFFER_CTX; @@ -3166,9 +3124,9 @@ void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu) s.vgpu = vgpu; s.engine = engine; s.ring_start = 0; - s.ring_size = size; + s.ring_size = engine->context_size - start; s.ring_head = 0; - s.ring_tail = size; + s.ring_tail = s.ring_size; s.rb_va = vaddr + start; s.workload = NULL; s.is_ctx_wa = false; @@ -3176,29 +3134,18 @@ void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu) /* skipping the first RING_CTX_SIZE(0x50) dwords */ ret = ip_gma_set(&s, RING_CTX_SIZE); - if (ret) { - i915_gem_object_unpin_map(obj); - goto out; + if (ret == 0) { + ret = command_scan(&s, 0, s.ring_size, 0, s.ring_size); + if (ret) + gvt_err("Scan init ctx error\n"); } - ret = command_scan(&s, 0, size, 0, size); + shmem_unpin_map(engine->default_state, vaddr); if (ret) - gvt_err("Scan init ctx error\n"); - - i915_gem_object_unpin_map(obj); + return; } -out: - if (!ret) - gvt->is_reg_whitelist_updated = true; - - for (id = 0; id < I915_NUM_ENGINES ; id++) { - if (requests[id]) - i915_request_put(requests[id]); - - if (is_ctx_pinned[id]) - intel_context_unpin(s->shadow[id]); - } + gvt->is_reg_whitelist_updated = true; } int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload) diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 158873f269b1..c8dcda6d4f0d 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -522,12 +522,11 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, static void clean_execlist(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask) { - struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; - struct intel_engine_cs *engine; struct intel_vgpu_submission *s = &vgpu->submission; + struct intel_engine_cs *engine; intel_engine_mask_t tmp; - for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) { + for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) { kfree(s->ring_scan_buffer[engine->id]); s->ring_scan_buffer[engine->id] = NULL; s->ring_scan_buffer_size[engine->id] = 0; @@ -537,11 +536,10 @@ static void clean_execlist(struct intel_vgpu *vgpu, static void reset_execlist(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask) { - struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; struct intel_engine_cs *engine; intel_engine_mask_t tmp; - for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) + for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) init_vgpu_execlist(vgpu, engine); } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 43f31c2eab14..fc735692f21f 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -412,7 +412,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) if (!wa_ctx->indirect_ctx.obj) return; + i915_gem_object_lock(wa_ctx->indirect_ctx.obj, NULL); i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); + i915_gem_object_unlock(wa_ctx->indirect_ctx.obj); i915_gem_object_put(wa_ctx->indirect_ctx.obj); wa_ctx->indirect_ctx.obj = NULL; @@ -520,6 +522,7 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) struct intel_gvt *gvt = workload->vgpu->gvt; const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd; struct intel_vgpu_shadow_bb *bb; + struct i915_gem_ww_ctx ww; int ret; list_for_each_entry(bb, &workload->shadow_bb, list) { @@ -544,10 +547,19 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) * directly */ if (!bb->ppgtt) { - bb->vma = i915_gem_object_ggtt_pin(bb->obj, - NULL, 0, 0, 0); + i915_gem_ww_ctx_init(&ww, false); +retry: + i915_gem_object_lock(bb->obj, &ww); + + bb->vma = i915_gem_object_ggtt_pin_ww(bb->obj, &ww, + NULL, 0, 0, 0); if (IS_ERR(bb->vma)) { ret = PTR_ERR(bb->vma); + if (ret == -EDEADLK) { + ret = i915_gem_ww_ctx_backoff(&ww); + if (!ret) + goto retry; + } goto err; } @@ -561,13 +573,15 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) 0); if (ret) goto err; - } - /* No one is going to touch shadow bb from now on. */ - i915_gem_object_flush_map(bb->obj); + /* No one is going to touch shadow bb from now on. */ + i915_gem_object_flush_map(bb->obj); + i915_gem_object_unlock(bb->obj); + } } return 0; err: + i915_gem_ww_ctx_fini(&ww); release_shadow_batch_buffer(workload); return ret; } @@ -594,14 +608,29 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) unsigned char *per_ctx_va = (unsigned char *)wa_ctx->indirect_ctx.shadow_va + wa_ctx->indirect_ctx.size; + struct i915_gem_ww_ctx ww; + int ret; if (wa_ctx->indirect_ctx.size == 0) return 0; - vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, - 0, CACHELINE_BYTES, 0); - if (IS_ERR(vma)) - return PTR_ERR(vma); + i915_gem_ww_ctx_init(&ww, false); +retry: + i915_gem_object_lock(wa_ctx->indirect_ctx.obj, &ww); + + vma = i915_gem_object_ggtt_pin_ww(wa_ctx->indirect_ctx.obj, &ww, NULL, + 0, CACHELINE_BYTES, 0); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + if (ret == -EDEADLK) { + ret = i915_gem_ww_ctx_backoff(&ww); + if (!ret) + goto retry; + } + return ret; + } + + i915_gem_object_unlock(wa_ctx->indirect_ctx.obj); /* FIXME: we are not tracking our pinned VMA leaving it * up to the core to fix up the stray pin_count upon @@ -635,12 +664,14 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) { if (bb->obj) { + i915_gem_object_lock(bb->obj, NULL); if (bb->va && !IS_ERR(bb->va)) i915_gem_object_unpin_map(bb->obj); if (bb->vma && !IS_ERR(bb->vma)) i915_vma_unpin(bb->vma); + i915_gem_object_unlock(bb->obj); i915_gem_object_put(bb->obj); } list_del(&bb->list); @@ -1015,13 +1046,12 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask) { struct intel_vgpu_submission *s = &vgpu->submission; - struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; struct intel_engine_cs *engine; struct intel_vgpu_workload *pos, *n; intel_engine_mask_t tmp; /* free the unsubmited workloads in the queues. */ - for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) { + for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) { list_for_each_entry_safe(pos, n, &s->workload_q_head[engine->id], list) { list_del_init(&pos->list); diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h index 346cc6ff3a36..7b9fcfe95c04 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h @@ -2367,6 +2367,8 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val) #define REG_A5XX_UCHE_ADDR_MODE_CNTL 0x00000e80 +#define REG_A5XX_UCHE_MODE_CNTL 0x00000e81 + #define REG_A5XX_UCHE_SVM_CNTL 0x00000e82 #define REG_A5XX_UCHE_WRITE_THRU_BASE_LO 0x00000e87 diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index a5af223eaf50..7e553d3efeb2 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -222,7 +222,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) a5xx_preempt_trigger(gpu); } -static const struct { +static const struct adreno_five_hwcg_regs { u32 offset; u32 value; } a5xx_hwcg[] = { @@ -318,16 +318,124 @@ static const struct { {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000}, {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200}, {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222} +}, a50x_hwcg[] = { + {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220}, + {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF}, + {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080}, + {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777}, + {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111}, + {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00FFFFF4}, + {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220}, + {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555}, + {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404}, + {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044}, + {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011}, + {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222}, + {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004}, + {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000}, + {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200}, + {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}, +}, a512_hwcg[] = { + {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220}, + {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220}, + {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF}, + {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF}, + {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080}, + {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080}, + {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777}, + {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777}, + {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111}, + {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111}, + {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444}, + {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220}, + {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220}, + {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555}, + {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404}, + {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404}, + {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044}, + {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011}, + {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222}, + {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004}, + {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000}, + {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200}, + {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}, }; void a5xx_set_hwcg(struct msm_gpu *gpu, bool state) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - unsigned int i; + const struct adreno_five_hwcg_regs *regs; + unsigned int i, sz; + + if (adreno_is_a508(adreno_gpu)) { + regs = a50x_hwcg; + sz = ARRAY_SIZE(a50x_hwcg); + } else if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu)) { + regs = a512_hwcg; + sz = ARRAY_SIZE(a512_hwcg); + } else { + regs = a5xx_hwcg; + sz = ARRAY_SIZE(a5xx_hwcg); + } - for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++) - gpu_write(gpu, a5xx_hwcg[i].offset, - state ? a5xx_hwcg[i].value : 0); + for (i = 0; i < sz; i++) + gpu_write(gpu, regs[i].offset, + state ? regs[i].value : 0); if (adreno_is_a540(adreno_gpu)) { gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU, state ? 0x00000770 : 0); @@ -538,11 +646,13 @@ static int a5xx_hw_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + u32 regbit; int ret; gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003); - if (adreno_is_a540(adreno_gpu)) + if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) || + adreno_is_a540(adreno_gpu)) gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009); /* Make all blocks contribute to the GPU BUSY perf counter */ @@ -604,29 +714,48 @@ static int a5xx_hw_init(struct msm_gpu *gpu) 0x00100000 + adreno_gpu->gmem - 1); gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000); - if (adreno_is_a510(adreno_gpu)) { + if (adreno_is_a508(adreno_gpu) || adreno_is_a510(adreno_gpu)) { gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20); - gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20); + if (adreno_is_a508(adreno_gpu)) + gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400); + else + gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20); gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030); gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A); - gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, - (0x200 << 11 | 0x200 << 22)); } else { gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40); if (adreno_is_a530(adreno_gpu)) gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40); - if (adreno_is_a540(adreno_gpu)) + else gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400); gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060); gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16); + } + + if (adreno_is_a508(adreno_gpu)) + gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, + (0x100 << 11 | 0x100 << 22)); + else if (adreno_is_a509(adreno_gpu) || adreno_is_a510(adreno_gpu) || + adreno_is_a512(adreno_gpu)) + gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, + (0x200 << 11 | 0x200 << 22)); + else gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22)); - } if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI) gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8)); - gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100); + /* + * Disable the RB sampler datapath DP2 clock gating optimization + * for 1-SP GPUs, as it is enabled by default. + */ + if (adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) || + adreno_is_a512(adreno_gpu)) + gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, 0, (1 << 9)); + + /* Disable UCHE global filter as SP can invalidate/flush independently */ + gpu_write(gpu, REG_A5XX_UCHE_MODE_CNTL, BIT(29)); /* Enable USE_RETENTION_FLOPS */ gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000); @@ -653,10 +782,20 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); /* Set the highest bank bit */ - gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, 2 << 7); - gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, 2 << 1); if (adreno_is_a540(adreno_gpu)) - gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, 2); + regbit = 2; + else + regbit = 1; + + gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, regbit << 7); + gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, regbit << 1); + + if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) || + adreno_is_a540(adreno_gpu)) + gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, regbit); + + /* Disable All flat shading optimization (ALLFLATOPTDIS) */ + gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, (1 << 10)); /* Protect registers from the CP */ gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007); @@ -688,12 +827,14 @@ static int a5xx_hw_init(struct msm_gpu *gpu) /* VPC */ gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8)); - gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 4)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 16)); /* UCHE */ gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16)); - if (adreno_is_a530(adreno_gpu) || adreno_is_a510(adreno_gpu)) + if (adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) || + adreno_is_a510(adreno_gpu) || adreno_is_a512(adreno_gpu) || + adreno_is_a530(adreno_gpu)) gpu_write(gpu, REG_A5XX_CP_PROTECT(17), ADRENO_PROTECT_RW(0x10000, 0x8000)); @@ -735,7 +876,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu) if (ret) return ret; - if (!adreno_is_a510(adreno_gpu)) + if (!(adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) || + adreno_is_a510(adreno_gpu) || adreno_is_a512(adreno_gpu))) a5xx_gpmu_ucode_init(gpu); ret = a5xx_ucode_init(gpu); @@ -1168,7 +1310,8 @@ static int a5xx_pm_resume(struct msm_gpu *gpu) if (ret) return ret; - if (adreno_is_a510(adreno_gpu)) { + /* Adreno 508, 509, 510, 512 needs manual RBBM sus/res control */ + if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))) { /* Halt the sp_input_clk at HM level */ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055); a5xx_set_hwcg(gpu, true); @@ -1210,8 +1353,8 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu) u32 mask = 0xf; int i, ret; - /* A510 has 3 XIN ports in VBIF */ - if (adreno_is_a510(adreno_gpu)) + /* A508, A510 have 3 XIN ports in VBIF */ + if (adreno_is_a508(adreno_gpu) || adreno_is_a510(adreno_gpu)) mask = 0x7; /* Clear the VBIF pipe before shutting down */ @@ -1223,10 +1366,12 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu) /* * Reset the VBIF before power collapse to avoid issue with FIFO - * entries + * entries on Adreno A510 and A530 (the others will tend to lock up) */ - gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000); - gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000); + if (adreno_is_a510(adreno_gpu) || adreno_is_a530(adreno_gpu)) { + gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000); + gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000); + } ret = msm_gpu_pm_suspend(gpu); if (ret) diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index f176a6f3eff6..5ccc9da455a1 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -298,7 +298,7 @@ int a5xx_power_init(struct msm_gpu *gpu) int ret; /* Not all A5xx chips have a GPMU */ - if (adreno_is_a510(adreno_gpu)) + if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))) return 0; /* Set up the limits management */ @@ -330,7 +330,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) unsigned int *data, *ptr, *cmds; unsigned int cmds_size; - if (adreno_is_a510(adreno_gpu)) + if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))) return; if (a5xx_gpu->gpmu_bo) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 05e0ef58fe32..71c917f909af 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -245,37 +245,66 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu) return ret; } +struct a6xx_gmu_oob_bits { + int set, ack, set_new, ack_new; + const char *name; +}; + +/* These are the interrupt / ack bits for each OOB request that are set + * in a6xx_gmu_set_oob and a6xx_clear_oob + */ +static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = { + [GMU_OOB_GPU_SET] = { + .name = "GPU_SET", + .set = 16, + .ack = 24, + .set_new = 30, + .ack_new = 31, + }, + + [GMU_OOB_PERFCOUNTER_SET] = { + .name = "PERFCOUNTER", + .set = 17, + .ack = 25, + .set_new = 28, + .ack_new = 30, + }, + + [GMU_OOB_BOOT_SLUMBER] = { + .name = "BOOT_SLUMBER", + .set = 22, + .ack = 30, + }, + + [GMU_OOB_DCVS_SET] = { + .name = "GPU_DCVS", + .set = 23, + .ack = 31, + }, +}; + /* Trigger a OOB (out of band) request to the GMU */ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) { int ret; u32 val; int request, ack; - const char *name; - switch (state) { - case GMU_OOB_GPU_SET: - if (gmu->legacy) { - request = GMU_OOB_GPU_SET_REQUEST; - ack = GMU_OOB_GPU_SET_ACK; - } else { - request = GMU_OOB_GPU_SET_REQUEST_NEW; - ack = GMU_OOB_GPU_SET_ACK_NEW; - } - name = "GPU_SET"; - break; - case GMU_OOB_BOOT_SLUMBER: - request = GMU_OOB_BOOT_SLUMBER_REQUEST; - ack = GMU_OOB_BOOT_SLUMBER_ACK; - name = "BOOT_SLUMBER"; - break; - case GMU_OOB_DCVS_SET: - request = GMU_OOB_DCVS_REQUEST; - ack = GMU_OOB_DCVS_ACK; - name = "GPU_DCVS"; - break; - default: + if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) return -EINVAL; + + if (gmu->legacy) { + request = a6xx_gmu_oob_bits[state].set; + ack = a6xx_gmu_oob_bits[state].ack; + } else { + request = a6xx_gmu_oob_bits[state].set_new; + ack = a6xx_gmu_oob_bits[state].ack_new; + if (!request || !ack) { + DRM_DEV_ERROR(gmu->dev, + "Invalid non-legacy GMU request %s\n", + a6xx_gmu_oob_bits[state].name); + return -EINVAL; + } } /* Trigger the equested OOB operation */ @@ -288,7 +317,7 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) if (ret) DRM_DEV_ERROR(gmu->dev, "Timeout waiting for GMU OOB set %s: 0x%x\n", - name, + a6xx_gmu_oob_bits[state].name, gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO)); /* Clear the acknowledge interrupt */ @@ -300,27 +329,17 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) /* Clear a pending OOB state in the GMU */ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) { - if (!gmu->legacy) { - WARN_ON(state != GMU_OOB_GPU_SET); - gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, - 1 << GMU_OOB_GPU_SET_CLEAR_NEW); + int bit; + + if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) return; - } - switch (state) { - case GMU_OOB_GPU_SET: - gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, - 1 << GMU_OOB_GPU_SET_CLEAR); - break; - case GMU_OOB_BOOT_SLUMBER: - gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, - 1 << GMU_OOB_BOOT_SLUMBER_CLEAR); - break; - case GMU_OOB_DCVS_SET: - gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, - 1 << GMU_OOB_DCVS_CLEAR); - break; - } + if (gmu->legacy) + bit = a6xx_gmu_oob_bits[state].ack; + else + bit = a6xx_gmu_oob_bits[state].ack_new; + + gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, bit); } /* Enable CPU control of SPTP power power collapse */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index c6d2bced8e5d..71dfa60070cc 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -153,44 +153,27 @@ static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value) */ enum a6xx_gmu_oob_state { + /* + * Let the GMU know that a boot or slumber operation has started. The value in + * REG_A6XX_GMU_BOOT_SLUMBER_OPTION lets the GMU know which operation we are + * doing + */ GMU_OOB_BOOT_SLUMBER = 0, + /* + * Let the GMU know to not turn off any GPU registers while the CPU is in a + * critical section + */ GMU_OOB_GPU_SET, + /* + * Set a new power level for the GPU when the CPU is doing frequency scaling + */ GMU_OOB_DCVS_SET, + /* + * Used to keep the GPU on for CPU-side reads of performance counters. + */ + GMU_OOB_PERFCOUNTER_SET, }; -/* These are the interrupt / ack bits for each OOB request that are set - * in a6xx_gmu_set_oob and a6xx_clear_oob - */ - -/* - * Let the GMU know that a boot or slumber operation has started. The value in - * REG_A6XX_GMU_BOOT_SLUMBER_OPTION lets the GMU know which operation we are - * doing - */ -#define GMU_OOB_BOOT_SLUMBER_REQUEST 22 -#define GMU_OOB_BOOT_SLUMBER_ACK 30 -#define GMU_OOB_BOOT_SLUMBER_CLEAR 30 - -/* - * Set a new power level for the GPU when the CPU is doing frequency scaling - */ -#define GMU_OOB_DCVS_REQUEST 23 -#define GMU_OOB_DCVS_ACK 31 -#define GMU_OOB_DCVS_CLEAR 31 - -/* - * Let the GMU know to not turn off any GPU registers while the CPU is in a - * critical section - */ -#define GMU_OOB_GPU_SET_REQUEST 16 -#define GMU_OOB_GPU_SET_ACK 24 -#define GMU_OOB_GPU_SET_CLEAR 24 - -#define GMU_OOB_GPU_SET_REQUEST_NEW 30 -#define GMU_OOB_GPU_SET_ACK_NEW 31 -#define GMU_OOB_GPU_SET_CLEAR_NEW 31 - - void a6xx_hfi_init(struct a6xx_gmu *gmu); int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state); void a6xx_hfi_stop(struct a6xx_gmu *gmu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 130661898546..ba8e9d3cf0fe 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -10,6 +10,7 @@ #include <linux/bitfield.h> #include <linux/devfreq.h> +#include <linux/nvmem-consumer.h> #include <linux/soc/qcom/llcc-qcom.h> #define GPU_PAS_ID 13 @@ -1117,7 +1118,7 @@ static void a6xx_llc_slices_init(struct platform_device *pdev, a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU); a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW); - if (IS_ERR(a6xx_gpu->llc_slice) && IS_ERR(a6xx_gpu->htw_llc_slice)) + if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice)) a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL); } @@ -1169,14 +1170,18 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + static DEFINE_MUTEX(perfcounter_oob); + + mutex_lock(&perfcounter_oob); /* Force the GPU power on so we can read this register */ - a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); + a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO, REG_A6XX_RBBM_PERFCTR_CP_0_HI); - a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); + a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); + mutex_unlock(&perfcounter_oob); return 0; } @@ -1208,6 +1213,10 @@ static void a6xx_destroy(struct msm_gpu *gpu) a6xx_gmu_remove(a6xx_gpu); adreno_gpu_cleanup(adreno_gpu); + + if (a6xx_gpu->opp_table) + dev_pm_opp_put_supported_hw(a6xx_gpu->opp_table); + kfree(a6xx_gpu); } @@ -1240,6 +1249,50 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu) } static struct msm_gem_address_space * +a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct iommu_domain *iommu; + struct msm_mmu *mmu; + struct msm_gem_address_space *aspace; + u64 start, size; + + iommu = iommu_domain_alloc(&platform_bus_type); + if (!iommu) + return NULL; + + /* + * This allows GPU to set the bus attributes required to use system + * cache on behalf of the iommu page table walker. + */ + if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice)) + adreno_set_llc_attributes(iommu); + + mmu = msm_iommu_new(&pdev->dev, iommu); + if (IS_ERR(mmu)) { + iommu_domain_free(iommu); + return ERR_CAST(mmu); + } + + /* + * Use the aperture start or SZ_16M, whichever is greater. This will + * ensure that we align with the allocated pagetable range while still + * allowing room in the lower 32 bits for GMEM and whatnot + */ + start = max_t(u64, SZ_16M, iommu->geometry.aperture_start); + size = iommu->geometry.aperture_end - start + 1; + + aspace = msm_gem_address_space_create(mmu, "gpu", + start & GENMASK_ULL(48, 0), size); + + if (IS_ERR(aspace) && !IS_ERR(mmu)) + mmu->funcs->destroy(mmu); + + return aspace; +} + +static struct msm_gem_address_space * a6xx_create_private_address_space(struct msm_gpu *gpu) { struct msm_mmu *mmu; @@ -1264,6 +1317,78 @@ static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR); } +static u32 a618_get_speed_bin(u32 fuse) +{ + if (fuse == 0) + return 0; + else if (fuse == 169) + return 1; + else if (fuse == 174) + return 2; + + return UINT_MAX; +} + +static u32 fuse_to_supp_hw(struct device *dev, u32 revn, u32 fuse) +{ + u32 val = UINT_MAX; + + if (revn == 618) + val = a618_get_speed_bin(fuse); + + if (val == UINT_MAX) { + DRM_DEV_ERROR(dev, + "missing support for speed-bin: %u. Some OPPs may not be supported by hardware", + fuse); + return UINT_MAX; + } + + return (1 << val); +} + +static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu, + u32 revn) +{ + struct opp_table *opp_table; + struct nvmem_cell *cell; + u32 supp_hw = UINT_MAX; + void *buf; + + cell = nvmem_cell_get(dev, "speed_bin"); + /* + * -ENOENT means that the platform doesn't support speedbin which is + * fine + */ + if (PTR_ERR(cell) == -ENOENT) + return 0; + else if (IS_ERR(cell)) { + DRM_DEV_ERROR(dev, + "failed to read speed-bin. Some OPPs may not be supported by hardware"); + goto done; + } + + buf = nvmem_cell_read(cell, NULL); + if (IS_ERR(buf)) { + nvmem_cell_put(cell); + DRM_DEV_ERROR(dev, + "failed to read speed-bin. Some OPPs may not be supported by hardware"); + goto done; + } + + supp_hw = fuse_to_supp_hw(dev, revn, *((u32 *) buf)); + + kfree(buf); + nvmem_cell_put(cell); + +done: + opp_table = dev_pm_opp_set_supported_hw(dev, &supp_hw, 1); + if (IS_ERR(opp_table)) + return PTR_ERR(opp_table); + + a6xx_gpu->opp_table = opp_table; + return 0; +} + static const struct adreno_gpu_funcs funcs = { .base = { .get_param = adreno_get_param, @@ -1285,7 +1410,7 @@ static const struct adreno_gpu_funcs funcs = { .gpu_state_get = a6xx_gpu_state_get, .gpu_state_put = a6xx_gpu_state_put, #endif - .create_address_space = adreno_iommu_create_address_space, + .create_address_space = a6xx_create_address_space, .create_private_address_space = a6xx_create_private_address_space, .get_rptr = a6xx_get_rptr, }, @@ -1325,6 +1450,12 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) a6xx_llc_slices_init(pdev, a6xx_gpu); + ret = a6xx_set_supported_hw(&pdev->dev, a6xx_gpu, info->revn); + if (ret) { + a6xx_destroy(&(a6xx_gpu->base.base)); + return ERR_PTR(ret); + } + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); if (ret) { a6xx_destroy(&(a6xx_gpu->base.base)); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h index e793d329e77b..ce0610c5256f 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h @@ -33,6 +33,8 @@ struct a6xx_gpu { void *llc_slice; void *htw_llc_slice; bool have_mmu500; + + struct opp_table *opp_table; }; #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base) diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 12e75ba360f9..600d445fabe8 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -134,6 +134,41 @@ static const struct adreno_info gpulist[] = { .inactive_period = DRM_MSM_INACTIVE_PERIOD, .init = a4xx_gpu_init, }, { + .rev = ADRENO_REV(5, 0, 8, ANY_ID), + .revn = 508, + .name = "A508", + .fw = { + [ADRENO_FW_PM4] = "a530_pm4.fw", + [ADRENO_FW_PFP] = "a530_pfp.fw", + }, + .gmem = (SZ_128K + SZ_8K), + /* + * Increase inactive period to 250 to avoid bouncing + * the GDSC which appears to make it grumpy + */ + .inactive_period = 250, + .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE, + .init = a5xx_gpu_init, + .zapfw = "a508_zap.mdt", + }, { + .rev = ADRENO_REV(5, 0, 9, ANY_ID), + .revn = 509, + .name = "A509", + .fw = { + [ADRENO_FW_PM4] = "a530_pm4.fw", + [ADRENO_FW_PFP] = "a530_pfp.fw", + }, + .gmem = (SZ_256K + SZ_16K), + /* + * Increase inactive period to 250 to avoid bouncing + * the GDSC which appears to make it grumpy + */ + .inactive_period = 250, + .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE, + .init = a5xx_gpu_init, + /* Adreno 509 uses the same ZAP as 512 */ + .zapfw = "a512_zap.mdt", + }, { .rev = ADRENO_REV(5, 1, 0, ANY_ID), .revn = 510, .name = "A510", @@ -149,6 +184,23 @@ static const struct adreno_info gpulist[] = { .inactive_period = 250, .init = a5xx_gpu_init, }, { + .rev = ADRENO_REV(5, 1, 2, ANY_ID), + .revn = 512, + .name = "A512", + .fw = { + [ADRENO_FW_PM4] = "a530_pm4.fw", + [ADRENO_FW_PFP] = "a530_pfp.fw", + }, + .gmem = (SZ_256K + SZ_16K), + /* + * Increase inactive period to 250 to avoid bouncing + * the GDSC which appears to make it grumpy + */ + .inactive_period = 250, + .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE, + .init = a5xx_gpu_init, + .zapfw = "a512_zap.mdt", + }, { .rev = ADRENO_REV(5, 3, 0, 2), .revn = 530, .name = "A530", @@ -168,7 +220,7 @@ static const struct adreno_info gpulist[] = { .init = a5xx_gpu_init, .zapfw = "a530_zap.mdt", }, { - .rev = ADRENO_REV(5, 4, 0, 2), + .rev = ADRENO_REV(5, 4, 0, ANY_ID), .revn = 540, .name = "A540", .fw = { diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index f09175698827..0f184c3dd9d9 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -186,11 +186,18 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid) return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); } +void adreno_set_llc_attributes(struct iommu_domain *iommu) +{ + struct io_pgtable_domain_attr pgtbl_cfg; + + pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA; + iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg); +} + struct msm_gem_address_space * adreno_iommu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) { - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct iommu_domain *iommu; struct msm_mmu *mmu; struct msm_gem_address_space *aspace; @@ -200,20 +207,6 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu, if (!iommu) return NULL; - - if (adreno_is_a6xx(adreno_gpu)) { - struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); - struct io_pgtable_domain_attr pgtbl_cfg; - /* - * This allows GPU to set the bus attributes required to use system - * cache on behalf of the iommu page table walker. - */ - if (!IS_ERR(a6xx_gpu->htw_llc_slice)) { - pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA; - iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg); - } - } - mmu = msm_iommu_new(&pdev->dev, iommu); if (IS_ERR(mmu)) { iommu_domain_free(iommu); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index b3d9a333591b..ccac275aa7a2 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -197,11 +197,26 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu) return gpu->revn == 430; } +static inline int adreno_is_a508(struct adreno_gpu *gpu) +{ + return gpu->revn == 508; +} + +static inline int adreno_is_a509(struct adreno_gpu *gpu) +{ + return gpu->revn == 509; +} + static inline int adreno_is_a510(struct adreno_gpu *gpu) { return gpu->revn == 510; } +static inline int adreno_is_a512(struct adreno_gpu *gpu) +{ + return gpu->revn == 512; +} + static inline int adreno_is_a530(struct adreno_gpu *gpu) { return gpu->revn == 530; @@ -212,11 +227,6 @@ static inline int adreno_is_a540(struct adreno_gpu *gpu) return gpu->revn == 540; } -static inline bool adreno_is_a6xx(struct adreno_gpu *gpu) -{ - return ((gpu->revn < 700 && gpu->revn > 599)); -} - static inline int adreno_is_a618(struct adreno_gpu *gpu) { return gpu->revn == 618; @@ -278,6 +288,8 @@ struct msm_gem_address_space * adreno_iommu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev); +void adreno_set_llc_attributes(struct iommu_domain *iommu); + /* * For a5xx and a6xx targets load the zap shader that is used to pull the GPU * out of secure mode diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c index 5a056c1191df..b2be39b9144e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c @@ -4,8 +4,10 @@ */ #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ +#include <linux/delay.h> #include "dpu_encoder_phys.h" #include "dpu_hw_interrupts.h" +#include "dpu_hw_pingpong.h" #include "dpu_core_irq.h" #include "dpu_formats.h" #include "dpu_trace.h" @@ -35,6 +37,8 @@ #define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000 +#define DPU_ENC_MAX_POLL_TIMEOUT_US 2000 + static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc) { return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false; @@ -368,15 +372,12 @@ static void dpu_encoder_phys_cmd_tearcheck_config( tc_cfg.vsync_count = vsync_hz / (mode->vtotal * drm_mode_vrefresh(mode)); - /* enable external TE after kickoff to avoid premature autorefresh */ - tc_cfg.hw_vsync_mode = 0; - /* - * By setting sync_cfg_height to near max register value, we essentially - * disable dpu hw generated TE signal, since hw TE will arrive first. - * Only caveat is if due to error, we hit wrap-around. + * Set the sync_cfg_height to twice vtotal so that if we lose a + * TE event coming from the display TE pin we won't stall immediately */ - tc_cfg.sync_cfg_height = 0xFFF0; + tc_cfg.hw_vsync_mode = 1; + tc_cfg.sync_cfg_height = mode->vtotal * 2; tc_cfg.vsync_init_val = mode->vdisplay; tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START; tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE; @@ -580,6 +581,69 @@ static void dpu_encoder_phys_cmd_prepare_for_kickoff( atomic_read(&phys_enc->pending_kickoff_cnt)); } +static bool dpu_encoder_phys_cmd_is_ongoing_pptx( + struct dpu_encoder_phys *phys_enc) +{ + struct dpu_hw_pp_vsync_info info; + + if (!phys_enc) + return false; + + phys_enc->hw_pp->ops.get_vsync_info(phys_enc->hw_pp, &info); + if (info.wr_ptr_line_count > 0 && + info.wr_ptr_line_count < phys_enc->cached_mode.vdisplay) + return true; + + return false; +} + +static void dpu_encoder_phys_cmd_prepare_commit( + struct dpu_encoder_phys *phys_enc) +{ + struct dpu_encoder_phys_cmd *cmd_enc = + to_dpu_encoder_phys_cmd(phys_enc); + int trial = 0; + + if (!phys_enc) + return; + if (!phys_enc->hw_pp) + return; + if (!dpu_encoder_phys_cmd_is_master(phys_enc)) + return; + + /* If autorefresh is already disabled, we have nothing to do */ + if (!phys_enc->hw_pp->ops.get_autorefresh(phys_enc->hw_pp, NULL)) + return; + + /* + * If autorefresh is enabled, disable it and make sure it is safe to + * proceed with current frame commit/push. Sequence fallowed is, + * 1. Disable TE + * 2. Disable autorefresh config + * 4. Poll for frame transfer ongoing to be false + * 5. Enable TE back + */ + _dpu_encoder_phys_cmd_connect_te(phys_enc, false); + phys_enc->hw_pp->ops.setup_autorefresh(phys_enc->hw_pp, 0, false); + + do { + udelay(DPU_ENC_MAX_POLL_TIMEOUT_US); + if ((trial * DPU_ENC_MAX_POLL_TIMEOUT_US) + > (KICKOFF_TIMEOUT_MS * USEC_PER_MSEC)) { + DPU_ERROR_CMDENC(cmd_enc, + "disable autorefresh failed\n"); + break; + } + + trial++; + } while (dpu_encoder_phys_cmd_is_ongoing_pptx(phys_enc)); + + _dpu_encoder_phys_cmd_connect_te(phys_enc, true); + + DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), + "disabled autorefresh\n"); +} + static int _dpu_encoder_phys_cmd_wait_for_ctl_start( struct dpu_encoder_phys *phys_enc) { @@ -621,20 +685,15 @@ static int dpu_encoder_phys_cmd_wait_for_tx_complete( static int dpu_encoder_phys_cmd_wait_for_commit_done( struct dpu_encoder_phys *phys_enc) { - int rc = 0; struct dpu_encoder_phys_cmd *cmd_enc; cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); /* only required for master controller */ - if (dpu_encoder_phys_cmd_is_master(phys_enc)) - rc = _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc); - - /* required for both controllers */ - if (!rc && cmd_enc->serialize_wait4pp) - dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc); + if (!dpu_encoder_phys_cmd_is_master(phys_enc)) + return 0; - return rc; + return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc); } static int dpu_encoder_phys_cmd_wait_for_vblank( @@ -681,6 +740,7 @@ static void dpu_encoder_phys_cmd_trigger_start( static void dpu_encoder_phys_cmd_init_ops( struct dpu_encoder_phys_ops *ops) { + ops->prepare_commit = dpu_encoder_phys_cmd_prepare_commit; ops->is_master = dpu_encoder_phys_cmd_is_master; ops->mode_set = dpu_encoder_phys_cmd_mode_set; ops->mode_fixup = dpu_encoder_phys_cmd_mode_fixup; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index 90393fe9e59c..189f3533525c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -12,14 +12,17 @@ #define VIG_MASK \ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\ - BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_QOS_8LVL) |\ + BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) |\ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT)) #define VIG_SDM845_MASK \ - (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3)) + (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3)) #define VIG_SC7180_MASK \ - (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED4)) + (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4)) + +#define VIG_SM8250_MASK \ + (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3LITE)) #define DMA_SDM845_MASK \ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\ @@ -185,7 +188,7 @@ static const struct dpu_caps sm8150_dpu_caps = { static const struct dpu_caps sm8250_dpu_caps = { .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, .max_mixer_blendstages = 0xb, - .qseed_type = DPU_SSPP_SCALER_QSEED3, /* TODO: qseed3 lite */ + .qseed_type = DPU_SSPP_SCALER_QSEED3LITE, .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */ .ubwc_version = DPU_HW_UBWC_VER_40, .has_src_split = true, @@ -444,6 +447,34 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = { sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1), }; +static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 = + _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3LITE); +static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 = + _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3LITE); +static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 = + _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3LITE); +static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 = + _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3LITE); + +static const struct dpu_sspp_cfg sm8250_sspp[] = { + SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SM8250_MASK, + sm8250_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0), + SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SM8250_MASK, + sm8250_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1), + SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SM8250_MASK, + sm8250_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2), + SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SM8250_MASK, + sm8250_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3), + SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK, + sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0), + SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK, + sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1), + SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK, + sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0), + SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK, + sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1), +}; + /************************************************************* * MIXER sub blocks config *************************************************************/ @@ -532,23 +563,28 @@ static const struct dpu_dspp_sub_blks sm8150_dspp_sblk = { .len = 0x90, .version = 0x40000}, }; -#define DSPP_BLK(_name, _id, _base, _sblk) \ +#define DSPP_BLK(_name, _id, _base, _mask, _sblk) \ {\ .name = _name, .id = _id, \ .base = _base, .len = 0x1800, \ - .features = DSPP_SC7180_MASK, \ + .features = _mask, \ .sblk = _sblk \ } static const struct dpu_dspp_cfg sc7180_dspp[] = { - DSPP_BLK("dspp_0", DSPP_0, 0x54000, &sc7180_dspp_sblk), + DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK, + &sc7180_dspp_sblk), }; static const struct dpu_dspp_cfg sm8150_dspp[] = { - DSPP_BLK("dspp_0", DSPP_0, 0x54000, &sm8150_dspp_sblk), - DSPP_BLK("dspp_1", DSPP_1, 0x56000, &sm8150_dspp_sblk), - DSPP_BLK("dspp_2", DSPP_2, 0x58000, &sm8150_dspp_sblk), - DSPP_BLK("dspp_3", DSPP_3, 0x5a000, &sm8150_dspp_sblk), + DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK, + &sm8150_dspp_sblk), + DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK, + &sm8150_dspp_sblk), + DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK, + &sm8150_dspp_sblk), + DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK, + &sm8150_dspp_sblk), }; /************************************************************* @@ -624,33 +660,33 @@ static const struct dpu_merge_3d_cfg sm8150_merge_3d[] = { /************************************************************* * INTF sub blocks config *************************************************************/ -#define INTF_BLK(_name, _id, _base, _type, _ctrl_id, _features) \ +#define INTF_BLK(_name, _id, _base, _type, _ctrl_id, _progfetch, _features) \ {\ .name = _name, .id = _id, \ .base = _base, .len = 0x280, \ .features = _features, \ .type = _type, \ .controller_id = _ctrl_id, \ - .prog_fetch_lines_worst_case = 24 \ + .prog_fetch_lines_worst_case = _progfetch \ } static const struct dpu_intf_cfg sdm845_intf[] = { - INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SDM845_MASK), - INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SDM845_MASK), - INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, INTF_SDM845_MASK), - INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, INTF_SDM845_MASK), + INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SDM845_MASK), + INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SDM845_MASK), + INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SDM845_MASK), + INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SDM845_MASK), }; static const struct dpu_intf_cfg sc7180_intf[] = { - INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SC7180_MASK), - INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SC7180_MASK), + INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK), + INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK), }; static const struct dpu_intf_cfg sm8150_intf[] = { - INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SC7180_MASK), - INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SC7180_MASK), - INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, INTF_SC7180_MASK), - INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, INTF_SC7180_MASK), + INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK), + INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK), + INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SC7180_MASK), + INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SC7180_MASK), }; /************************************************************* @@ -969,9 +1005,8 @@ static void sm8250_cfg_init(struct dpu_mdss_cfg *dpu_cfg) .mdp = sm8250_mdp, .ctl_count = ARRAY_SIZE(sm8150_ctl), .ctl = sm8150_ctl, - /* TODO: sspp qseed version differs from 845 */ - .sspp_count = ARRAY_SIZE(sdm845_sspp), - .sspp = sdm845_sspp, + .sspp_count = ARRAY_SIZE(sm8250_sspp), + .sspp = sm8250_sspp, .mixer_count = ARRAY_SIZE(sm8150_lm), .mixer = sm8150_lm, .dspp_count = ARRAY_SIZE(sm8150_dspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h index eaef99db2d2f..ea4647d21a20 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h @@ -95,6 +95,7 @@ enum { * @DPU_SSPP_SRC Src and fetch part of the pipes, * @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support * @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support + * @DPU_SSPP_SCALER_QSEED3LITE, QSEED3 Lite alogorithm support * @DPU_SSPP_SCALER_QSEED4, QSEED4 algorithm support * @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes * @DPU_SSPP_CSC, Support of Color space converion @@ -114,6 +115,7 @@ enum { DPU_SSPP_SRC = 0x1, DPU_SSPP_SCALER_QSEED2, DPU_SSPP_SCALER_QSEED3, + DPU_SSPP_SCALER_QSEED3LITE, DPU_SSPP_SCALER_QSEED4, DPU_SSPP_SCALER_RGB, DPU_SSPP_CSC, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c index bea4ab5c58c5..245a7a62b5c6 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c @@ -23,6 +23,7 @@ #define PP_WR_PTR_IRQ 0x024 #define PP_OUT_LINE_COUNT 0x028 #define PP_LINE_COUNT 0x02C +#define PP_AUTOREFRESH_CONFIG 0x030 #define PP_FBC_MODE 0x034 #define PP_FBC_BUDGET_CTL 0x038 @@ -120,6 +121,29 @@ static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp, return 0; } +static void dpu_hw_pp_setup_autorefresh_config(struct dpu_hw_pingpong *pp, + u32 frame_count, bool enable) +{ + DPU_REG_WRITE(&pp->hw, PP_AUTOREFRESH_CONFIG, + enable ? (BIT(31) | frame_count) : 0); +} + +/* + * dpu_hw_pp_get_autorefresh_config - Get autorefresh config from HW + * @pp: DPU pingpong structure + * @frame_count: Used to return the current frame count from hw + * + * Returns: True if autorefresh enabled, false if disabled. + */ +static bool dpu_hw_pp_get_autorefresh_config(struct dpu_hw_pingpong *pp, + u32 *frame_count) +{ + u32 val = DPU_REG_READ(&pp->hw, PP_AUTOREFRESH_CONFIG); + if (frame_count != NULL) + *frame_count = val & 0xffff; + return !!((val & BIT(31)) >> 31); +} + static int dpu_hw_pp_poll_timeout_wr_ptr(struct dpu_hw_pingpong *pp, u32 timeout_us) { @@ -228,6 +252,8 @@ static void _setup_pingpong_ops(struct dpu_hw_pingpong *c, c->ops.enable_tearcheck = dpu_hw_pp_enable_te; c->ops.connect_external_te = dpu_hw_pp_connect_external_te; c->ops.get_vsync_info = dpu_hw_pp_get_vsync_info; + c->ops.setup_autorefresh = dpu_hw_pp_setup_autorefresh_config; + c->ops.get_autorefresh = dpu_hw_pp_get_autorefresh_config; c->ops.poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr; c->ops.get_line_count = dpu_hw_pp_get_line_count; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h index 6902b9b95c8e..845b9ce80e31 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h @@ -63,6 +63,8 @@ struct dpu_hw_dither_cfg { * @setup_tearcheck : program tear check values * @enable_tearcheck : enables tear check * @get_vsync_info : retries timing info of the panel + * @setup_autorefresh : configure and enable the autorefresh config + * @get_autorefresh : retrieve autorefresh config from hardware * @setup_dither : function to program the dither hw block * @get_line_count: obtain current vertical line counter */ @@ -95,6 +97,18 @@ struct dpu_hw_pingpong_ops { struct dpu_hw_pp_vsync_info *info); /** + * configure and enable the autorefresh config + */ + void (*setup_autorefresh)(struct dpu_hw_pingpong *pp, + u32 frame_count, bool enable); + + /** + * retrieve autorefresh config from hardware + */ + bool (*get_autorefresh)(struct dpu_hw_pingpong *pp, + u32 *frame_count); + + /** * poll until write pointer transmission starts * @Return: 0 on success, -ETIMEDOUT on timeout */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c index 2c2ca5335aa8..34d81aa16041 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c @@ -673,6 +673,7 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c, c->ops.setup_multirect = dpu_hw_sspp_setup_multirect; if (test_bit(DPU_SSPP_SCALER_QSEED3, &features) || + test_bit(DPU_SSPP_SCALER_QSEED3LITE, &features) || test_bit(DPU_SSPP_SCALER_QSEED4, &features)) { c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3; c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h index 85b018a9b03c..fdfd4b46e2c6 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h @@ -28,6 +28,7 @@ struct dpu_hw_pipe; #define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \ (1UL << DPU_SSPP_SCALER_QSEED2) | \ (1UL << DPU_SSPP_SCALER_QSEED3) | \ + (1UL << DPU_SSPP_SCALER_QSEED3LITE) | \ (1UL << DPU_SSPP_SCALER_QSEED4)) /** diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c index 84e9875994a8..f94584c982cd 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c @@ -59,6 +59,19 @@ static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE; #define QSEED3_SEP_LUT_SIZE \ (QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32)) +/* DPU_SCALER_QSEED3LITE */ +#define QSEED3LITE_COEF_LUT_Y_SEP_BIT 4 +#define QSEED3LITE_COEF_LUT_UV_SEP_BIT 5 +#define QSEED3LITE_COEF_LUT_CTRL 0x4C +#define QSEED3LITE_COEF_LUT_SWAP_BIT 0 +#define QSEED3LITE_DIR_FILTER_WEIGHT 0x60 +#define QSEED3LITE_FILTERS 2 +#define QSEED3LITE_SEPARABLE_LUTS 10 +#define QSEED3LITE_LUT_SIZE 33 +#define QSEED3LITE_SEP_LUT_SIZE \ + (QSEED3LITE_LUT_SIZE * QSEED3LITE_SEPARABLE_LUTS * sizeof(u32)) + + void dpu_reg_write(struct dpu_hw_blk_reg_map *c, u32 reg_off, u32 val, @@ -156,6 +169,57 @@ static void _dpu_hw_setup_scaler3_lut(struct dpu_hw_blk_reg_map *c, } +static void _dpu_hw_setup_scaler3lite_lut(struct dpu_hw_blk_reg_map *c, + struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset) +{ + int j, filter; + int config_lut = 0x0; + unsigned long lut_flags; + u32 lut_addr, lut_offset; + u32 *lut[QSEED3LITE_FILTERS] = {NULL, NULL}; + static const uint32_t off_tbl[QSEED3_FILTERS] = { 0x000, 0x200 }; + + DPU_REG_WRITE(c, QSEED3LITE_DIR_FILTER_WEIGHT + offset, scaler3_cfg->dir_weight); + + if (!scaler3_cfg->sep_lut) + return; + + lut_flags = (unsigned long) scaler3_cfg->lut_flag; + if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) && + (scaler3_cfg->y_rgb_sep_lut_idx < QSEED3LITE_SEPARABLE_LUTS) && + (scaler3_cfg->sep_len == QSEED3LITE_SEP_LUT_SIZE)) { + lut[0] = scaler3_cfg->sep_lut + + scaler3_cfg->y_rgb_sep_lut_idx * QSEED3LITE_LUT_SIZE; + config_lut = 1; + } + if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) && + (scaler3_cfg->uv_sep_lut_idx < QSEED3LITE_SEPARABLE_LUTS) && + (scaler3_cfg->sep_len == QSEED3LITE_SEP_LUT_SIZE)) { + lut[1] = scaler3_cfg->sep_lut + + scaler3_cfg->uv_sep_lut_idx * QSEED3LITE_LUT_SIZE; + config_lut = 1; + } + + if (config_lut) { + for (filter = 0; filter < QSEED3LITE_FILTERS; filter++) { + if (!lut[filter]) + continue; + lut_offset = 0; + lut_addr = QSEED3_COEF_LUT + offset + off_tbl[filter]; + for (j = 0; j < QSEED3LITE_LUT_SIZE; j++) { + DPU_REG_WRITE(c, + lut_addr, + (lut[filter])[lut_offset++]); + lut_addr += 4; + } + } + } + + if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags)) + DPU_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0)); + +} + static void _dpu_hw_setup_scaler3_de(struct dpu_hw_blk_reg_map *c, struct dpu_hw_scaler3_de_cfg *de_cfg, u32 offset) { @@ -242,9 +306,12 @@ void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c, op_mode |= BIT(8); } - if (scaler3_cfg->lut_flag) - _dpu_hw_setup_scaler3_lut(c, scaler3_cfg, - scaler_offset); + if (scaler3_cfg->lut_flag) { + if (scaler_version < 0x2004) + _dpu_hw_setup_scaler3_lut(c, scaler3_cfg, scaler_offset); + else + _dpu_hw_setup_scaler3lite_lut(c, scaler3_cfg, scaler_offset); + } if (scaler_version == 0x1002) { phase_init = diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h index 234eb7d65753..ff3cffde84cd 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h @@ -97,6 +97,7 @@ struct dpu_hw_scaler3_de_cfg { * @ cir_lut: pointer to circular filter LUT * @ sep_lut: pointer to separable filter LUT * @ de: detail enhancer configuration + * @ dir_weight: Directional weight */ struct dpu_hw_scaler3_cfg { u32 enable; @@ -137,6 +138,8 @@ struct dpu_hw_scaler3_cfg { * Detail enhancer settings */ struct dpu_hw_scaler3_de_cfg de; + + u32 dir_weight; }; /** diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c index cf867f3f7c36..b757054e1c23 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c @@ -30,7 +30,7 @@ #define VBIF_XIN_HALT_CTRL0 0x0200 #define VBIF_XIN_HALT_CTRL1 0x0204 #define VBIF_XINL_QOS_RP_REMAP_000 0x0550 -#define VBIF_XINL_QOS_LVL_REMAP_000 0x0590 +#define VBIF_XINL_QOS_LVL_REMAP_000(v) (v < DPU_HW_VER_400 ? 0x570 : 0x0590) static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif, u32 *pnd_errors, u32 *src_errors) @@ -156,18 +156,19 @@ static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif, u32 xin_id, u32 level, u32 remap_level) { struct dpu_hw_blk_reg_map *c; - u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift; + u32 reg_lvl, reg_val, reg_val_lvl, mask, reg_high, reg_shift; if (!vbif) return; c = &vbif->hw; + reg_lvl = VBIF_XINL_QOS_LVL_REMAP_000(c->hwversion); reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8); reg_shift = (xin_id & 0x7) * 4; reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high); - reg_val_lvl = DPU_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high); + reg_val_lvl = DPU_REG_READ(c, reg_lvl + reg_high); mask = 0x7 << reg_shift; @@ -178,7 +179,7 @@ static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif, reg_val_lvl |= (remap_level << reg_shift) & mask; DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val); - DPU_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl); + DPU_REG_WRITE(c, reg_lvl + reg_high, reg_val_lvl); } static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 374b0e8471e6..5a8e3e1fc48c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -749,7 +749,7 @@ static void _dpu_kms_set_encoder_mode(struct msm_kms *kms, case DRM_MODE_ENCODER_TMDS: info.num_of_h_tiles = 1; break; - }; + } rc = dpu_encoder_setup(encoder->dev, encoder, &info); if (rc) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index bc0231a50132..f898a8f67b7f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -1465,6 +1465,7 @@ static int _dpu_plane_init_debugfs(struct drm_plane *plane) pdpu->debugfs_root, &pdpu->debugfs_src); if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) || + cfg->features & BIT(DPU_SSPP_SCALER_QSEED3LITE) || cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) || cfg->features & BIT(DPU_SSPP_SCALER_QSEED4)) { dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler, diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c index df10c1ac7591..94ce62a26daf 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c @@ -177,7 +177,7 @@ static const struct mdp5_cfg_hw msm8x74v2_config = { [3] = INTF_HDMI, }, }, - .max_clk = 200000000, + .max_clk = 320000000, }; static const struct mdp5_cfg_hw apq8084_config = { diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index 0c8f9f88301f..f5d71b274079 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -1180,7 +1180,7 @@ static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus) struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, pp_done); - complete(&mdp5_crtc->pp_completion); + complete_all(&mdp5_crtc->pp_completion); } static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c index 19b35ae3e927..1c6e1d2b947c 100644 --- a/drivers/gpu/drm/msm/dp/dp_aux.c +++ b/drivers/gpu/drm/msm/dp/dp_aux.c @@ -336,7 +336,6 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, ssize_t ret; int const aux_cmd_native_max = 16; int const aux_cmd_i2c_max = 128; - int const retry_count = 5; struct dp_aux_private *aux = container_of(dp_aux, struct dp_aux_private, dp_aux); @@ -378,12 +377,6 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, ret = dp_aux_cmd_fifo_tx(aux, msg); if (ret < 0) { - if (aux->native) { - aux->retry_cnt++; - if (!(aux->retry_cnt % retry_count)) - dp_catalog_aux_update_cfg(aux->catalog); - dp_catalog_aux_reset(aux->catalog); - } usleep_range(400, 500); /* at least 400us to next try */ goto unlock_exit; } diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c index 44f0c57798d0..b1a9b1b98f5f 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.c +++ b/drivers/gpu/drm/msm/dp/dp_catalog.c @@ -190,6 +190,18 @@ int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog) return 0; } +/** + * dp_catalog_aux_reset() - reset AUX controller + * + * @aux: DP catalog structure + * + * return: void + * + * This function reset AUX controller + * + * NOTE: reset AUX controller will also clear any pending HPD related interrupts + * + */ void dp_catalog_aux_reset(struct dp_catalog *dp_catalog) { u32 aux_ctrl; @@ -483,6 +495,18 @@ int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog, return 0; } +/** + * dp_catalog_ctrl_reset() - reset DP controller + * + * @dp_catalog: DP catalog structure + * + * return: void + * + * This function reset the DP controller + * + * NOTE: reset DP controller will also clear any pending HPD related interrupts + * + */ void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog) { u32 sw_reset; diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index 36b39c381b3f..1390f3547fde 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -631,7 +631,7 @@ static void _dp_ctrl_calc_tu(struct dp_tu_calc_input *in, tu = kzalloc(sizeof(*tu), GFP_KERNEL); if (!tu) - return + return; dp_panel_update_tu_timings(in, tu); @@ -1158,7 +1158,7 @@ static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl) default: ret = -EINVAL; break; - }; + } if (!ret) DRM_DEBUG_DP("new rate=0x%x\n", ctrl->link->link_params.rate); @@ -1296,7 +1296,6 @@ static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl, * transitioned to PUSH_IDLE. In order to start transmitting * a link training pattern, we have to first do soft reset. */ - dp_catalog_ctrl_reset(ctrl->catalog); ret = dp_ctrl_link_train(ctrl, cr, training_step); @@ -1365,7 +1364,7 @@ static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl) return ret; } -int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip) +int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset) { struct dp_ctrl_private *ctrl; struct dp_io *dp_io; @@ -1382,6 +1381,9 @@ int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip) ctrl->dp_ctrl.orientation = flip; + if (reset) + dp_catalog_ctrl_reset(ctrl->catalog); + dp_catalog_ctrl_phy_reset(ctrl->catalog); phy_init(phy); dp_catalog_ctrl_enable_irq(ctrl->catalog, true); @@ -1496,7 +1498,6 @@ static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl) int training_step = DP_TRAINING_NONE; dp_ctrl_push_idle(&ctrl->dp_ctrl); - dp_catalog_ctrl_reset(ctrl->catalog); ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; @@ -1785,14 +1786,14 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl) * Set up transfer unit values and set controller state to send * video. */ + reinit_completion(&ctrl->video_comp); + dp_ctrl_configure_source_params(ctrl); dp_catalog_ctrl_config_msa(ctrl->catalog, ctrl->link->link_params.rate, ctrl->dp_ctrl.pixel_rate, dp_ctrl_use_fixed_nvid(ctrl)); - reinit_completion(&ctrl->video_comp); - dp_ctrl_setup_tr_unit(ctrl); dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h index f60ba93c8678..a836bd358447 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.h +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h @@ -19,7 +19,7 @@ struct dp_ctrl { u32 pixel_rate; }; -int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip); +int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset); void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl); int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl); int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl); diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index 3bc7ed21de28..5a39da6e1eaf 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -350,7 +350,7 @@ end: return rc; } -static void dp_display_host_init(struct dp_display_private *dp) +static void dp_display_host_init(struct dp_display_private *dp, int reset) { bool flip = false; @@ -365,7 +365,7 @@ static void dp_display_host_init(struct dp_display_private *dp) dp_display_set_encoder_mode(dp); dp_power_init(dp->power, flip); - dp_ctrl_host_init(dp->ctrl, flip); + dp_ctrl_host_init(dp->ctrl, flip, reset); dp_aux_init(dp->aux); dp->core_initialized = true; } @@ -403,7 +403,7 @@ static int dp_display_usbpd_configure_cb(struct device *dev) goto end; } - dp_display_host_init(dp); + dp_display_host_init(dp, false); /* * set sink to normal operation mode -- D0 @@ -651,8 +651,8 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND); /* signal the disconnect event early to ensure proper teardown */ - dp_display_handle_plugged_change(g_dp_display, false); reinit_completion(&dp->audio_comp); + dp_display_handle_plugged_change(g_dp_display, false); dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK | DP_DP_IRQ_HPD_INT_MASK, true); @@ -700,6 +700,13 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data) return 0; } + if (state == ST_CONNECT_PENDING || state == ST_DISCONNECT_PENDING) { + /* wait until ST_CONNECTED */ + dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */ + mutex_unlock(&dp->event_mutex); + return 0; + } + ret = dp_display_usbpd_attention_cb(&dp->pdev->dev); if (ret == -ECONNRESET) { /* cable unplugged */ dp->core_initialized = false; @@ -890,6 +897,9 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data) /* wait only if audio was enabled */ if (dp_display->audio_enabled) { + /* signal the disconnect event */ + reinit_completion(&dp->audio_comp); + dp_display_handle_plugged_change(dp_display, false); if (!wait_for_completion_timeout(&dp->audio_comp, HZ * 5)) DRM_ERROR("audio comp timeout\n"); @@ -1002,7 +1012,7 @@ int dp_display_get_test_bpp(struct msm_dp *dp) static void dp_display_config_hpd(struct dp_display_private *dp) { - dp_display_host_init(dp); + dp_display_host_init(dp, true); dp_catalog_ctrl_hpd_config(dp->catalog); /* Enable interrupt first time @@ -1256,7 +1266,7 @@ static int dp_pm_resume(struct device *dev) dp->hpd_state = ST_DISCONNECTED; /* turn on dp ctrl/phy */ - dp_display_host_init(dp); + dp_display_host_init(dp, true); dp_catalog_ctrl_hpd_config(dp->catalog); @@ -1439,7 +1449,7 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder) state = dp_display->hpd_state; if (state == ST_DISPLAY_OFF) - dp_display_host_init(dp_display); + dp_display_host_init(dp_display, true); dp_display_enable(dp_display, 0); diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c index d1780bcac8cc..9cc816663668 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.c +++ b/drivers/gpu/drm/msm/dp/dp_panel.c @@ -409,7 +409,6 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel) int dp_panel_init_panel_info(struct dp_panel *dp_panel) { - int rc = 0; struct drm_display_mode *drm_mode; drm_mode = &dp_panel->dp_mode.drm_mode; @@ -436,7 +435,7 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel) min_t(u32, dp_panel->dp_mode.bpp, 30)); DRM_DEBUG_DP("updated bpp = %d\n", dp_panel->dp_mode.bpp); - return rc; + return 0; } struct dp_panel *dp_panel_get(struct dp_panel_in *in) diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c index 1afb7c579dbb..eca86bf448f7 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c @@ -139,7 +139,7 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = { .disable = dsi_20nm_phy_disable, .init = msm_dsi_phy_init_common, }, - .io_start = { 0xfd998300, 0xfd9a0300 }, + .io_start = { 0xfd998500, 0xfd9a0500 }, .num_dsi_phy = 2, }; diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c index e4e9bf04b736..de3b802ccd3d 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c @@ -172,9 +172,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll) multiplier = 1 << config->frac_bits; dec_multiple = div_u64(pll_freq * multiplier, divider); - div_u64_rem(dec_multiple, multiplier, &frac); - - dec = div_u64(dec_multiple, multiplier); + dec = div_u64_rem(dec_multiple, multiplier, &frac); if (pll_freq <= 1900000000UL) regs->pll_prop_gain_rate = 8; @@ -306,7 +304,8 @@ static void dsi_pll_commit(struct dsi_pll_10nm *pll) reg->frac_div_start_mid); pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1, reg->frac_div_start_high); - pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40); + pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, + reg->pll_lockdet_rate); pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06); pll_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10); pll_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS, @@ -345,6 +344,7 @@ static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll) { + struct device *dev = &pll->pdev->dev; int rc; u32 status = 0; u32 const delay_us = 100; @@ -357,8 +357,8 @@ static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll) delay_us, timeout_us); if (rc) - pr_err("DSI PLL(%d) lock failed, status=0x%08x\n", - pll->id, status); + DRM_DEV_ERROR(dev, "DSI PLL(%d) lock failed, status=0x%08x\n", + pll->id, status); return rc; } @@ -405,6 +405,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw) { struct msm_dsi_pll *pll = hw_clk_to_pll(hw); struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + struct device *dev = &pll_10nm->pdev->dev; int rc; dsi_pll_enable_pll_bias(pll_10nm); @@ -413,7 +414,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw) rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0); if (rc) { - pr_err("vco_set_rate failed, rc=%d\n", rc); + DRM_DEV_ERROR(dev, "vco_set_rate failed, rc=%d\n", rc); return rc; } @@ -430,7 +431,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw) /* Check for PLL lock */ rc = dsi_pll_10nm_lock_status(pll_10nm); if (rc) { - pr_err("PLL(%d) lock failed\n", pll_10nm->id); + DRM_DEV_ERROR(dev, "PLL(%d) lock failed\n", pll_10nm->id); goto error; } @@ -483,6 +484,7 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw, { struct msm_dsi_pll *pll = hw_clk_to_pll(hw); struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + struct dsi_pll_config *config = &pll_10nm->pll_configuration; void __iomem *base = pll_10nm->mmio; u64 ref_clk = pll_10nm->vco_ref_clk_rate; u64 vco_rate = 0x0; @@ -503,9 +505,8 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw, /* * TODO: * 1. Assumes prescaler is disabled - * 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits) */ - multiplier = 1 << 18; + multiplier = 1 << config->frac_bits; pll_freq = dec * (ref_clk * 2); tmp64 = (ref_clk * 2 * frac); pll_freq += div_u64(tmp64, multiplier); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 108c405e03dd..94525ac76d4e 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -788,9 +788,10 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev, struct drm_file *file, struct drm_gem_object *obj, uint64_t *iova) { + struct msm_drm_private *priv = dev->dev_private; struct msm_file_private *ctx = file->driver_priv; - if (!ctx->aspace) + if (!priv->gpu) return -EINVAL; /* diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index a588b0388d27..f091c1e164fa 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -987,8 +987,7 @@ void msm_gem_free_object(struct drm_gem_object *obj) /* Don't drop the pages for imported dmabuf, as they are not * ours, just free the array we allocated: */ - if (msm_obj->pages) - kvfree(msm_obj->pages); + kvfree(msm_obj->pages); put_iova_vmas(obj); diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index d04c349d8112..5480852bdeda 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -198,6 +198,8 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit, submit->cmd[i].idx = submit_cmd.submit_idx; submit->cmd[i].nr_relocs = submit_cmd.nr_relocs; + userptr = u64_to_user_ptr(submit_cmd.relocs); + sz = array_size(submit_cmd.nr_relocs, sizeof(struct drm_msm_gem_submit_reloc)); /* check for overflow: */ diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index d8151a89e163..4735251a394d 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -157,6 +157,7 @@ struct msm_kms { * from the crtc's pending_timer close to end of the frame: */ struct mutex commit_lock[MAX_CRTCS]; + struct lock_class_key commit_lock_keys[MAX_CRTCS]; unsigned pending_crtc_mask; struct msm_pending_timer pending_timers[MAX_CRTCS]; }; @@ -166,8 +167,11 @@ static inline int msm_kms_init(struct msm_kms *kms, { unsigned i, ret; - for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) - mutex_init(&kms->commit_lock[i]); + for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) { + lockdep_register_key(&kms->commit_lock_keys[i]); + __mutex_init(&kms->commit_lock[i], "&kms->commit_lock[i]", + &kms->commit_lock_keys[i]); + } kms->funcs = funcs; diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c index bc36aa3c1123..fe5ac3ef9018 100644 --- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c +++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c @@ -265,7 +265,8 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi) dsi->lanes = 1; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | - MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET; + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET | + MIPI_DSI_CLOCK_NON_CONTINUOUS; drm_panel_init(&ctx->panel, &dsi->dev, &kd35t133_funcs, DRM_MODE_CONNECTOR_DSI); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h index 4a2099cb582e..857d97cdc67c 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h @@ -17,9 +17,20 @@ #define NUM_YUV2YUV_COEFFICIENTS 12 +/* AFBC supports a number of configurable modes. Relevant to us is block size + * (16x16 or 32x8), storage modifiers (SPARSE, SPLIT), and the YUV-like + * colourspace transform (YTR). 16x16 SPARSE mode is always used. SPLIT mode + * could be enabled via the hreg_block_split register, but is not currently + * handled. The colourspace transform is implicitly always assumed by the + * decoder, so consumers must use this transform as well. + * + * Failure to match modifiers will cause errors displaying AFBC buffers + * produced by conformant AFBC producers, including Mesa. + */ #define ROCKCHIP_AFBC_MOD \ DRM_FORMAT_MOD_ARM_AFBC( \ AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 | AFBC_FORMAT_MOD_SPARSE \ + | AFBC_FORMAT_MOD_YTR \ ) enum vop_data_format { diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index b65f4b12f986..20a25660b35b 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -959,8 +959,10 @@ static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo, return ret; /* move to the bounce domain */ ret = ttm_bo_handle_move_mem(bo, &hop_mem, false, ctx, NULL); - if (ret) + if (ret) { + ttm_resource_free(bo, &hop_mem); return ret; + } return 0; } @@ -991,18 +993,19 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, * stop and the driver will be called to make * the second hop. */ -bounce: ret = ttm_bo_mem_space(bo, placement, &mem, ctx); if (ret) return ret; +bounce: ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx, &hop); if (ret == -EMULTIHOP) { ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop); if (ret) - return ret; + goto out; /* try and move to final place now. */ goto bounce; } +out: if (ret) ttm_resource_free(bo, &mem); return ret; diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 45d12b0e9a2f..b09bed554f26 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -88,7 +88,7 @@ config WIREGUARD select CRYPTO_CURVE25519_X86 if X86 && 64BIT select ARM_CRYPTO if ARM select ARM64_CRYPTO if ARM64 - select CRYPTO_CHACHA20_NEON if (ARM || ARM64) && KERNEL_MODE_NEON + select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON) select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON select CRYPTO_POLY1305_ARM if ARM select CRYPTO_BLAKE2S_ARM if ARM diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c index d9281ae853f8..311d8564d611 100644 --- a/drivers/net/can/dev/dev.c +++ b/drivers/net/can/dev/dev.c @@ -239,6 +239,7 @@ void can_setup(struct net_device *dev) struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, unsigned int txqs, unsigned int rxqs) { + struct can_ml_priv *can_ml; struct net_device *dev; struct can_priv *priv; int size; @@ -270,7 +271,8 @@ struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, priv = netdev_priv(dev); priv->dev = dev; - dev->ml_priv = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN); + can_ml = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN); + can_set_ml_priv(dev, can_ml); if (echo_skb_max) { priv->echo_skb_max = echo_skb_max; diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index a1bd1be09548..30c8d53c9745 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -516,6 +516,7 @@ static struct slcan *slc_alloc(void) int i; char name[IFNAMSIZ]; struct net_device *dev = NULL; + struct can_ml_priv *can_ml; struct slcan *sl; int size; @@ -538,7 +539,8 @@ static struct slcan *slc_alloc(void) dev->base_addr = i; sl = netdev_priv(dev); - dev->ml_priv = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN); + can_ml = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN); + can_set_ml_priv(dev, can_ml); /* Initialize channel control data */ sl->magic = SLCAN_MAGIC; diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c index 39ca14b0585d..067705e2850b 100644 --- a/drivers/net/can/vcan.c +++ b/drivers/net/can/vcan.c @@ -153,7 +153,7 @@ static void vcan_setup(struct net_device *dev) dev->addr_len = 0; dev->tx_queue_len = 0; dev->flags = IFF_NOARP; - dev->ml_priv = netdev_priv(dev); + can_set_ml_priv(dev, netdev_priv(dev)); /* set flags according to driver capabilities */ if (echo) diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index f9a524c5f6d6..8861a7d875e7 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -141,6 +141,8 @@ static const struct net_device_ops vxcan_netdev_ops = { static void vxcan_setup(struct net_device *dev) { + struct can_ml_priv *can_ml; + dev->type = ARPHRD_CAN; dev->mtu = CANFD_MTU; dev->hard_header_len = 0; @@ -149,7 +151,9 @@ static void vxcan_setup(struct net_device *dev) dev->flags = (IFF_NOARP|IFF_ECHO); dev->netdev_ops = &vxcan_netdev_ops; dev->needs_free_netdev = true; - dev->ml_priv = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN); + + can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN); + can_set_ml_priv(dev, can_ml); } /* forward declaration for rtnl_create_link() */ diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index ae86ded1e2a1..a162499bcafc 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -543,6 +543,19 @@ static void b53_port_set_mcast_flood(struct b53_device *dev, int port, b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc); } +static void b53_port_set_learning(struct b53_device *dev, int port, + bool learning) +{ + u16 reg; + + b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, ®); + if (learning) + reg &= ~BIT(port); + else + reg |= BIT(port); + b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg); +} + int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) { struct b53_device *dev = ds->priv; @@ -557,6 +570,7 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) b53_port_set_ucast_flood(dev, port, true); b53_port_set_mcast_flood(dev, port, true); + b53_port_set_learning(dev, port, false); if (dev->ops->irq_enable) ret = dev->ops->irq_enable(dev, port); @@ -691,6 +705,7 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port) b53_port_set_ucast_flood(dev, port, true); b53_port_set_mcast_flood(dev, port, true); + b53_port_set_learning(dev, port, false); } static void b53_enable_mib(struct b53_device *dev) @@ -1953,19 +1968,20 @@ void b53_br_fast_age(struct dsa_switch *ds, int port) } EXPORT_SYMBOL(b53_br_fast_age); -static int b53_br_flags_pre(struct dsa_switch *ds, int port, - struct switchdev_brport_flags flags, - struct netlink_ext_ack *extack) +int b53_br_flags_pre(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) { - if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD)) + if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_LEARNING)) return -EINVAL; return 0; } +EXPORT_SYMBOL(b53_br_flags_pre); -static int b53_br_flags(struct dsa_switch *ds, int port, - struct switchdev_brport_flags flags, - struct netlink_ext_ack *extack) +int b53_br_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) { if (flags.mask & BR_FLOOD) b53_port_set_ucast_flood(ds->priv, port, @@ -1973,17 +1989,22 @@ static int b53_br_flags(struct dsa_switch *ds, int port, if (flags.mask & BR_MCAST_FLOOD) b53_port_set_mcast_flood(ds->priv, port, !!(flags.val & BR_MCAST_FLOOD)); + if (flags.mask & BR_LEARNING) + b53_port_set_learning(ds->priv, port, + !!(flags.val & BR_LEARNING)); return 0; } +EXPORT_SYMBOL(b53_br_flags); -static int b53_set_mrouter(struct dsa_switch *ds, int port, bool mrouter, - struct netlink_ext_ack *extack) +int b53_set_mrouter(struct dsa_switch *ds, int port, bool mrouter, + struct netlink_ext_ack *extack) { b53_port_set_mcast_flood(ds->priv, port, mrouter); return 0; } +EXPORT_SYMBOL(b53_set_mrouter); static bool b53_possible_cpu_port(struct dsa_switch *ds, int port) { diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index faf983fbca82..8419bb7f4505 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -326,6 +326,14 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge); void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge); void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state); void b53_br_fast_age(struct dsa_switch *ds, int port); +int b53_br_flags_pre(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack); +int b53_br_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack); +int b53_set_mrouter(struct dsa_switch *ds, int port, bool mrouter, + struct netlink_ext_ack *extack); int b53_setup_devlink_resources(struct dsa_switch *ds); void b53_port_event(struct dsa_switch *ds, int port); void b53_phylink_validate(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h index c90985c294a2..b2c539a42154 100644 --- a/drivers/net/dsa/b53/b53_regs.h +++ b/drivers/net/dsa/b53/b53_regs.h @@ -115,6 +115,7 @@ #define B53_UC_FLOOD_MASK 0x32 #define B53_MC_FLOOD_MASK 0x34 #define B53_IPMC_FLOOD_MASK 0x36 +#define B53_DIS_LEARNING 0x3c /* * Override Ports 0-7 State on devices with xMII interfaces (8 bit) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 1857aa9aa84a..5ee8103b8e9c 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -223,23 +223,10 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, reg &= ~P_TXQ_PSM_VDD(port); core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); - /* Enable learning */ - reg = core_readl(priv, CORE_DIS_LEARN); - reg &= ~BIT(port); - core_writel(priv, reg, CORE_DIS_LEARN); - /* Enable Broadcom tags for that port if requested */ - if (priv->brcm_tag_mask & BIT(port)) { + if (priv->brcm_tag_mask & BIT(port)) b53_brcm_hdr_setup(ds, port); - /* Disable learning on ASP port */ - if (port == 7) { - reg = core_readl(priv, CORE_DIS_LEARN); - reg |= BIT(port); - core_writel(priv, reg, CORE_DIS_LEARN); - } - } - /* Configure Traffic Class to QoS mapping, allow each priority to map * to a different queue number */ @@ -1117,7 +1104,10 @@ static const struct dsa_switch_ops bcm_sf2_ops = { .set_mac_eee = b53_set_mac_eee, .port_bridge_join = b53_br_join, .port_bridge_leave = b53_br_leave, + .port_pre_bridge_flags = b53_br_flags_pre, + .port_bridge_flags = b53_br_flags, .port_stp_state_set = b53_br_set_stp_state, + .port_set_mrouter = b53_set_mrouter, .port_fast_age = b53_br_fast_age, .port_vlan_filtering = b53_vlan_filtering, .port_vlan_add = b53_vlan_add, diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c index 139b7b4fbd0d..a8efb7fac395 100644 --- a/drivers/net/dsa/sja1105/sja1105_static_config.c +++ b/drivers/net/dsa/sja1105/sja1105_static_config.c @@ -85,7 +85,7 @@ u32 sja1105_crc32(const void *buf, size_t len) /* seed */ crc = ~0; for (i = 0; i < len; i += 4) { - sja1105_unpack((void *)buf + i, &word, 31, 0, 4); + sja1105_unpack(buf + i, &word, 31, 0, 4); crc = crc32_le(crc, (u8 *)&word, 4); } return ~crc; diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index dd5c8a9038bb..a60ce9030581 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -223,8 +223,6 @@ #define AG71XX_REG_RX_SM 0x01b0 #define AG71XX_REG_TX_SM 0x01b4 -#define ETH_SWITCH_HEADER_LEN 2 - #define AG71XX_DEFAULT_MSG_ENABLE \ (NETIF_MSG_DRV \ | NETIF_MSG_PROBE \ @@ -933,7 +931,7 @@ static void ag71xx_hw_setup(struct ag71xx *ag) static unsigned int ag71xx_max_frame_len(unsigned int mtu) { - return ETH_SWITCH_HEADER_LEN + ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN; + return ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN; } static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac) diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c index 9be33dc98072..0b70e9e0ddad 100644 --- a/drivers/net/ethernet/broadcom/bcm4908_enet.c +++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c @@ -570,6 +570,7 @@ static int bcm4908_enet_poll(struct napi_struct *napi, int weight) if (len < ETH_ZLEN || (ctl & (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) != (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) { + kfree_skb(slot.skb); enet->netdev->stats.rx_dropped++; break; } @@ -582,6 +583,8 @@ static int bcm4908_enet_poll(struct napi_struct *napi, int weight) enet->netdev->stats.rx_packets++; enet->netdev->stats.rx_bytes += len; + + handled++; } if (handled < weight) { diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index fd8767213165..977f097fc7bf 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1192,7 +1192,6 @@ static int bcm_enet_stop(struct net_device *dev) kdev = &priv->pdev->dev; netif_stop_queue(dev); - netdev_reset_queue(dev); napi_disable(&priv->napi); if (priv->has_phy) phy_stop(dev->phydev); @@ -1231,6 +1230,9 @@ static int bcm_enet_stop(struct net_device *dev) if (priv->has_phy) phy_disconnect(dev->phydev); + /* reset BQL after forced tx reclaim to prevent kernel panic */ + netdev_reset_queue(dev); + return 0; } @@ -2343,7 +2345,6 @@ static int bcm_enetsw_stop(struct net_device *dev) del_timer_sync(&priv->swphy_poll); netif_stop_queue(dev); - netdev_reset_queue(dev); napi_disable(&priv->napi); del_timer_sync(&priv->rx_timeout); @@ -2371,6 +2372,9 @@ static int bcm_enetsw_stop(struct net_device *dev) free_irq(priv->irq_tx, dev); free_irq(priv->irq_rx, dev); + /* reset BQL after forced tx reclaim to prevent kernel panic */ + netdev_reset_queue(dev); + return 0; } diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index ccfe52a50a66..720dc99bd1fc 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2670,7 +2670,6 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, u32 hash; u64 ns; - np = container_of(&portal, struct dpaa_napi_portal, p); dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); fd_status = be32_to_cpu(fd->status); fd_format = qm_fd_get_format(fd); @@ -2685,6 +2684,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, percpu_priv = this_cpu_ptr(priv->percpu_priv); percpu_stats = &percpu_priv->stats; + np = &percpu_priv->np; if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))) return qman_cb_dqrr_stop; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 1c0e4beb56e7..118a4bd3f877 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1172,12 +1172,25 @@ static int ibmvnic_open(struct net_device *netdev) struct ibmvnic_adapter *adapter = netdev_priv(netdev); int rc; - /* If device failover is pending, just set device state and return. - * Device operation will be handled by reset routine. + ASSERT_RTNL(); + + /* If device failover is pending or we are about to reset, just set + * device state and return. Device operation will be handled by reset + * routine. + * + * It should be safe to overwrite the adapter->state here. Since + * we hold the rtnl, either the reset has not actually started or + * the rtnl got dropped during the set_link_state() in do_reset(). + * In the former case, no one else is changing the state (again we + * have the rtnl) and in the latter case, do_reset() will detect and + * honor our setting below. */ - if (adapter->failover_pending) { + if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) { + netdev_dbg(netdev, "[S:%d FOP:%d] Resetting, deferring open\n", + adapter->state, adapter->failover_pending); adapter->state = VNIC_OPEN; - return 0; + rc = 0; + goto out; } if (adapter->state != VNIC_CLOSED) { @@ -1196,10 +1209,12 @@ static int ibmvnic_open(struct net_device *netdev) rc = __ibmvnic_open(netdev); out: - /* If open fails due to a pending failover, set device state and - * return. Device operation will be handled by reset routine. + /* If open failed and there is a pending failover or in-progress reset, + * set device state and return. Device operation will be handled by + * reset routine. See also comments above regarding rtnl. */ - if (rc && adapter->failover_pending) { + if (rc && + (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) { adapter->state = VNIC_OPEN; rc = 0; } @@ -1928,6 +1943,14 @@ static int do_reset(struct ibmvnic_adapter *adapter, if (rwi->reset_reason == VNIC_RESET_FAILOVER) adapter->failover_pending = false; + /* read the state and check (again) after getting rtnl */ + reset_state = adapter->state; + + if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) { + rc = -EBUSY; + goto out; + } + netif_carrier_off(netdev); old_num_rx_queues = adapter->req_rx_queues; @@ -1958,11 +1981,27 @@ static int do_reset(struct ibmvnic_adapter *adapter, if (rc) goto out; + if (adapter->state == VNIC_OPEN) { + /* When we dropped rtnl, ibmvnic_open() got + * it and noticed that we are resetting and + * set the adapter state to OPEN. Update our + * new "target" state, and resume the reset + * from VNIC_CLOSING state. + */ + netdev_dbg(netdev, + "Open changed state from %d, updating.\n", + reset_state); + reset_state = VNIC_OPEN; + adapter->state = VNIC_CLOSING; + } + if (adapter->state != VNIC_CLOSING) { + /* If someone else changed the adapter state + * when we dropped the rtnl, fail the reset + */ rc = -1; goto out; } - adapter->state = VNIC_CLOSED; } } @@ -2106,6 +2145,14 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter, netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n", rwi->reset_reason); + /* read the state and check (again) after getting rtnl */ + reset_state = adapter->state; + + if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) { + rc = -EBUSY; + goto out; + } + netif_carrier_off(netdev); adapter->reset_reason = rwi->reset_reason; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index a8a2b5f683a2..c70dec65a572 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -5083,7 +5083,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) enum i40e_admin_queue_err adq_err; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - bool is_reset_needed; + u32 reset_needed = 0; i40e_status status; u32 i, j; @@ -5128,9 +5128,11 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) flags_complete: changed_flags = orig_flags ^ new_flags; - is_reset_needed = !!(changed_flags & (I40E_FLAG_VEB_STATS_ENABLED | - I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED | - I40E_FLAG_DISABLE_FW_LLDP)); + if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) + reset_needed = I40E_PF_RESET_AND_REBUILD_FLAG; + if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED | + I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED)) + reset_needed = BIT(__I40E_PF_RESET_REQUESTED); /* Before we finalize any flag changes, we need to perform some * checks to ensure that the changes are supported and safe. @@ -5252,7 +5254,7 @@ flags_complete: case I40E_AQ_RC_EEXIST: dev_warn(&pf->pdev->dev, "FW LLDP agent is already running\n"); - is_reset_needed = false; + reset_needed = 0; break; case I40E_AQ_RC_EPERM: dev_warn(&pf->pdev->dev, @@ -5281,8 +5283,8 @@ flags_complete: /* Issue reset to cause things to take effect, as additional bits * are added we will need to create a mask of bits requiring reset */ - if (is_reset_needed) - i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true); + if (reset_needed) + i40e_do_reset(pf, reset_needed, true); return 0; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 8bb8eb65add9..353deae139f9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2616,7 +2616,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) return; if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state)) return; - if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) { + if (test_bit(__I40E_VF_DISABLE, pf->state)) { set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); return; } @@ -2634,7 +2634,6 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) } } } - clear_bit(__I40E_VF_DISABLE, pf->state); } /** @@ -5937,7 +5936,7 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid, ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc; ch->seid = ctxt.seid; ch->vsi_number = ctxt.vsi_number; - ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx); + ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx); /* copy just the sections touched not the entire info * since not all sections are valid as returned by @@ -7977,8 +7976,8 @@ static inline void i40e_set_cld_element(struct i40e_cloud_filter *filter, struct i40e_aqc_cloud_filters_element_data *cld) { - int i, j; u32 ipa; + int i; memset(cld, 0, sizeof(*cld)); ether_addr_copy(cld->outer_mac, filter->dst_mac); @@ -7989,14 +7988,14 @@ i40e_set_cld_element(struct i40e_cloud_filter *filter, if (filter->n_proto == ETH_P_IPV6) { #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1) - for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6); - i++, j += 2) { + for (i = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++) { ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]); - ipa = cpu_to_le32(ipa); - memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa)); + + *(__le32 *)&cld->ipaddr.raw_v6.data[i * 2] = cpu_to_le32(ipa); } } else { ipa = be32_to_cpu(filter->dst_ipv4); + memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa)); } @@ -8044,6 +8043,8 @@ int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, if (filter->flags >= ARRAY_SIZE(flag_table)) return I40E_ERR_CONFIG; + memset(&cld_filter, 0, sizeof(cld_filter)); + /* copy element needed to add cloud filter from filter */ i40e_set_cld_element(filter, &cld_filter); @@ -8107,10 +8108,13 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, return -EOPNOTSUPP; /* adding filter using src_port/src_ip is not supported at this stage */ - if (filter->src_port || filter->src_ipv4 || + if (filter->src_port || + (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) || !ipv6_addr_any(&filter->ip.v6.src_ip6)) return -EOPNOTSUPP; + memset(&cld_filter, 0, sizeof(cld_filter)); + /* copy element needed to add cloud filter from filter */ i40e_set_cld_element(filter, &cld_filter.element); @@ -8134,7 +8138,7 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT); } - } else if (filter->dst_ipv4 || + } else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) || !ipv6_addr_any(&filter->ip.v6.dst_ip6)) { cld_filter.element.flags = cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT); @@ -8928,11 +8932,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) dev_dbg(&pf->pdev->dev, "PFR requested\n"); i40e_handle_reset_warning(pf, lock_acquired); - dev_info(&pf->pdev->dev, - pf->flags & I40E_FLAG_DISABLE_FW_LLDP ? - "FW LLDP is disabled\n" : - "FW LLDP is enabled\n"); - } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) { /* Request a PF Reset * @@ -8940,6 +8939,10 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) */ i40e_prep_for_reset(pf); i40e_reset_and_rebuild(pf, true, lock_acquired); + dev_info(&pf->pdev->dev, + pf->flags & I40E_FLAG_DISABLE_FW_LLDP ? + "FW LLDP is disabled\n" : + "FW LLDP is enabled\n"); } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) { int v; @@ -10462,7 +10465,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state); struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_hw *hw = &pf->hw; - u8 set_fc_aq_fail = 0; i40e_status ret; u32 val; int v; @@ -10605,13 +10607,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - /* make sure our flow control settings are restored */ - ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); - if (ret) - dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - /* Rebuild the VSIs and VEBs that existed before reset. * They are still in our local switch element arrays, so only * need to rebuild the switch model in the HW. @@ -12191,6 +12186,8 @@ i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf) struct i40e_aqc_configure_partition_bw_data bw_data; i40e_status status; + memset(&bw_data, 0, sizeof(bw_data)); + /* Set the valid bit for this PF */ bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id)); bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK; @@ -15198,7 +15195,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) int err; u32 val; u32 i; - u8 set_fc_aq_fail; err = pci_enable_device_mem(pdev); if (err) @@ -15537,24 +15533,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list); - /* Make sure flow control is set according to current settings */ - err = i40e_set_fc(hw, &set_fc_aq_fail, true); - if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET) - dev_dbg(&pf->pdev->dev, - "Set fc with err %s aq_err %s on get_phy_cap\n", - i40e_stat_str(hw, err), - i40e_aq_str(hw, hw->aq.asq_last_status)); - if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET) - dev_dbg(&pf->pdev->dev, - "Set fc with err %s aq_err %s on set_phy_config\n", - i40e_stat_str(hw, err), - i40e_aq_str(hw, hw->aq.asq_last_status)); - if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE) - dev_dbg(&pf->pdev->dev, - "Set fc with err %s aq_err %s on get_link_info\n", - i40e_stat_str(hw, err), - i40e_aq_str(hw, hw->aq.asq_last_status)); - /* if FDIR VSI was set up, start it now */ for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { @@ -15611,6 +15589,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) { dev_info(&pdev->dev, "setup of misc vector failed: %d\n", err); + i40e_cloud_filter_exit(pf); + i40e_fdir_teardown(pf); goto err_vsis; } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index f6f1af94cca0..627794b31e33 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1948,7 +1948,7 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring, skb_record_rx_queue(skb, rx_ring->queue_index); if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) { - u16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1; + __le16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1; __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), le16_to_cpu(vlan_tag)); @@ -3223,13 +3223,16 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, l4_proto = ip.v4->protocol; } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { + int ret; + tunnel |= I40E_TX_CTX_EXT_IP_IPV6; exthdr = ip.hdr + sizeof(*ip.v6); l4_proto = ip.v6->nexthdr; - if (l4.hdr != exthdr) - ipv6_skip_exthdr(skb, exthdr - skb->data, - &l4_proto, &frag_off); + ret = ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + if (ret < 0) + return -1; } /* define outer transport */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 4f11f7bf75d1..fc32c5019b0f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -453,7 +453,7 @@ static void i40e_set_rs_bit(struct i40e_ring *xdp_ring) struct i40e_tx_desc *tx_desc; tx_desc = I40E_TX_DESC(xdp_ring, ntu); - tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT); + tx_desc->cmd_type_offset_bsz |= cpu_to_le64(I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT); } /** diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index dae8280ce17c..357706444dd5 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -454,9 +454,7 @@ struct ice_pf { struct ice_hw_port_stats stats_prev; struct ice_hw hw; u8 stat_prev_loaded:1; /* has previous stats been loaded */ -#ifdef CONFIG_DCB u16 dcbx_cap; -#endif /* CONFIG_DCB */ u32 tx_timeout_count; unsigned long tx_timeout_last_recovery; u32 tx_timeout_recovery_level; diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c index fcfefad00d1c..468a63f7eff9 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c @@ -134,7 +134,7 @@ ice_dcbnl_getnumtcs(struct net_device *dev, int __always_unused tcid, u8 *num) if (!test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)) return -EINVAL; - *num = IEEE_8021QAZ_MAX_TCS; + *num = pf->hw.func_caps.common_cap.maxtc; return 0; } @@ -159,6 +159,10 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode) struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_qos_cfg *qos_cfg; + /* if FW LLDP agent is running, DCBNL not allowed to change mode */ + if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) + return ICE_DCB_NO_HW_CHG; + /* No support for LLD_MANAGED modes or CEE+IEEE */ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) || diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 5636c9b23896..2dcfa685b763 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -8,6 +8,7 @@ #include "ice_fltr.h" #include "ice_lib.h" #include "ice_dcb_lib.h" +#include <net/dcbnl.h> struct ice_stats { char stat_string[ETH_GSTRING_LEN]; @@ -1238,6 +1239,9 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) status = ice_init_pf_dcb(pf, true); if (status) dev_warn(dev, "Fail to init DCB\n"); + + pf->dcbx_cap &= ~DCB_CAP_DCBX_LLD_MANAGED; + pf->dcbx_cap |= DCB_CAP_DCBX_HOST; } else { enum ice_status status; bool dcbx_agent_status; @@ -1280,6 +1284,9 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) if (status) dev_dbg(dev, "Fail to enable MIB change events\n"); + pf->dcbx_cap &= ~DCB_CAP_DCBX_HOST; + pf->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED; + ice_nway_reset(netdev); } } @@ -3322,6 +3329,18 @@ ice_get_channels(struct net_device *dev, struct ethtool_channels *ch) } /** + * ice_get_valid_rss_size - return valid number of RSS queues + * @hw: pointer to the HW structure + * @new_size: requested RSS queues + */ +static int ice_get_valid_rss_size(struct ice_hw *hw, int new_size) +{ + struct ice_hw_common_caps *caps = &hw->func_caps.common_cap; + + return min_t(int, new_size, BIT(caps->rss_table_entry_width)); +} + +/** * ice_vsi_set_dflt_rss_lut - set default RSS LUT with requested RSS size * @vsi: VSI to reconfigure RSS LUT on * @req_rss_size: requested range of queue numbers for hashing @@ -3348,14 +3367,10 @@ static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size) return -ENOMEM; /* set RSS LUT parameters */ - if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { + if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) vsi->rss_size = 1; - } else { - struct ice_hw_common_caps *caps = &hw->func_caps.common_cap; - - vsi->rss_size = min_t(int, req_rss_size, - BIT(caps->rss_table_entry_width)); - } + else + vsi->rss_size = ice_get_valid_rss_size(hw, req_rss_size); /* create/set RSS LUT */ ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); @@ -3434,9 +3449,12 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) ice_vsi_recfg_qs(vsi, new_rx, new_tx); - if (new_rx && !netif_is_rxfh_configured(dev)) + if (!netif_is_rxfh_configured(dev)) return ice_vsi_set_dflt_rss_lut(vsi, new_rx); + /* Update rss_size due to change in Rx queues */ + vsi->rss_size = ice_get_valid_rss_size(&pf->hw, new_rx); + return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index bf5fd812ea0e..1f38a8d0c525 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -1919,6 +1919,29 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg) } /** + * ice_vc_get_max_frame_size - get max frame size allowed for VF + * @vf: VF used to determine max frame size + * + * Max frame size is determined based on the current port's max frame size and + * whether a port VLAN is configured on this VF. The VF is not aware whether + * it's in a port VLAN so the PF needs to account for this in max frame size + * checks and sending the max frame size to the VF. + */ +static u16 ice_vc_get_max_frame_size(struct ice_vf *vf) +{ + struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; + struct ice_port_info *pi = vsi->port_info; + u16 max_frame_size; + + max_frame_size = pi->phy.link_info.max_frame_size; + + if (vf->port_vlan_info) + max_frame_size -= VLAN_HLEN; + + return max_frame_size; +} + +/** * ice_vc_get_vf_res_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -2000,6 +2023,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) vfres->max_vectors = pf->num_msix_per_vf; vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE; vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE; + vfres->max_mtu = ice_vc_get_max_frame_size(vf); vfres->vsi_res[0].vsi_id = vf->lan_vsi_num; vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; @@ -2420,7 +2444,7 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg) } if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) { - bool set_dflt_vsi = !!(info->flags & FLAG_VF_UNICAST_PROMISC); + bool set_dflt_vsi = alluni || allmulti; if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw)) /* only attempt to set the default forwarding VSI if @@ -2998,6 +3022,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) /* copy Rx queue info from VF into VSI */ if (qpi->rxq.ring_len > 0) { + u16 max_frame_size = ice_vc_get_max_frame_size(vf); + num_rxq++; vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; vsi->rx_rings[i]->count = qpi->rxq.ring_len; @@ -3010,7 +3036,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) } vsi->rx_buf_len = qpi->rxq.databuffer_size; vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len; - if (qpi->rxq.max_pkt_size >= (16 * 1024) || + if (qpi->rxq.max_pkt_size > max_frame_size || qpi->rxq.max_pkt_size < 64) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -3018,6 +3044,11 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) } vsi->max_frame = qpi->rxq.max_pkt_size; + /* add space for the port VLAN since the VF driver is not + * expected to account for it in the MTU calculation + */ + if (vf->port_vlan_info) + vsi->max_frame += VLAN_HLEN; } /* VF can request to configure less than allocated queues or default diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 0507369bb54d..1767c60056c5 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -4699,9 +4699,10 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port) } } -static bool mvpp22_rss_is_supported(void) +static bool mvpp22_rss_is_supported(struct mvpp2_port *port) { - return queue_mode == MVPP2_QDIST_MULTI_MODE; + return (queue_mode == MVPP2_QDIST_MULTI_MODE) && + !(port->flags & MVPP2_F_LOOPBACK); } static int mvpp2_open(struct net_device *dev) @@ -5513,7 +5514,7 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, struct mvpp2_port *port = netdev_priv(dev); int ret = 0, i, loc = 0; - if (!mvpp22_rss_is_supported()) + if (!mvpp22_rss_is_supported(port)) return -EOPNOTSUPP; switch (info->cmd) { @@ -5548,7 +5549,7 @@ static int mvpp2_ethtool_set_rxnfc(struct net_device *dev, struct mvpp2_port *port = netdev_priv(dev); int ret = 0; - if (!mvpp22_rss_is_supported()) + if (!mvpp22_rss_is_supported(port)) return -EOPNOTSUPP; switch (info->cmd) { @@ -5569,7 +5570,9 @@ static int mvpp2_ethtool_set_rxnfc(struct net_device *dev, static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev) { - return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0; + struct mvpp2_port *port = netdev_priv(dev); + + return mvpp22_rss_is_supported(port) ? MVPP22_RSS_TABLE_ENTRIES : 0; } static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, @@ -5578,7 +5581,7 @@ static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, struct mvpp2_port *port = netdev_priv(dev); int ret = 0; - if (!mvpp22_rss_is_supported()) + if (!mvpp22_rss_is_supported(port)) return -EOPNOTSUPP; if (indir) @@ -5596,7 +5599,7 @@ static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir, struct mvpp2_port *port = netdev_priv(dev); int ret = 0; - if (!mvpp22_rss_is_supported()) + if (!mvpp22_rss_is_supported(port)) return -EOPNOTSUPP; if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32) @@ -5617,7 +5620,7 @@ static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir, struct mvpp2_port *port = netdev_priv(dev); int ret = 0; - if (!mvpp22_rss_is_supported()) + if (!mvpp22_rss_is_supported(port)) return -EOPNOTSUPP; if (rss_context >= MVPP22_N_RSS_TABLES) return -EINVAL; @@ -5639,7 +5642,7 @@ static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev, struct mvpp2_port *port = netdev_priv(dev); int ret; - if (!mvpp22_rss_is_supported()) + if (!mvpp22_rss_is_supported(port)) return -EOPNOTSUPP; if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32) @@ -5956,7 +5959,7 @@ static int mvpp2_port_init(struct mvpp2_port *port) mvpp2_cls_oversize_rxq_set(port); mvpp2_cls_port_config(port); - if (mvpp22_rss_is_supported()) + if (mvpp22_rss_is_supported(port)) mvpp22_port_rss_init(port); /* Provide an initial Rx packet size */ @@ -6861,7 +6864,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO | NETIF_F_HW_VLAN_CTAG_FILTER; - if (mvpp22_rss_is_supported()) { + if (mvpp22_rss_is_supported(port)) { dev->hw_features |= NETIF_F_RXHASH; dev->features |= NETIF_F_NTUPLE; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 094124b695dc..aa2ca8780b9c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -473,7 +473,7 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp, u16 pcifunc; int ret, lf; - cmd_buf = memdup_user(buffer, count); + cmd_buf = memdup_user(buffer, count + 1); if (IS_ERR(cmd_buf)) return -ENOMEM; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index ebe1406c6e64..dbec8e187a68 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4806,12 +4806,11 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port, if (!is_valid_ether_addr(dev->dev_addr)) { struct sockaddr sa = { AF_UNSPEC }; - netdev_warn(dev, - "Invalid MAC address, defaulting to random\n"); + dev_warn(&hw->pdev->dev, "Invalid MAC address, defaulting to random\n"); eth_hw_addr_random(dev); memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN); if (sky2_set_mac_address(dev, &sa)) - netdev_warn(dev, "Failed to set MAC address.\n"); + dev_warn(&hw->pdev->dev, "Failed to set MAC address.\n"); } return dev; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 394f43add85c..a99e71bc7b3c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -4986,6 +4986,7 @@ static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule if (!fs_rule->mirr_mbox) { mlx4_err(dev, "rule mirroring mailbox is null\n"); + mlx4_free_cmd_mailbox(dev, mailbox); return -EINVAL; } memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size); diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 0a20dae32184..f704da3f214c 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -2285,14 +2285,14 @@ static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp) static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) { - RTL_W8(tp, MaxTxPacketSize, 0x3f); + RTL_W8(tp, MaxTxPacketSize, 0x24); RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01); } static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) { - RTL_W8(tp, MaxTxPacketSize, 0x0c); + RTL_W8(tp, MaxTxPacketSize, 0x3f); RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c index b7a0c57dfbfb..d23be45a64e5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c @@ -218,6 +218,7 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev) goto remove_config; } + spin_lock_init(&dwmac->lock); dwmac->reg = stmmac_res.addr; plat_dat->bsp_priv = dwmac; plat_dat->fix_mac_speed = visconti_eth_fix_mac_speed; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 56985542e202..44bb133c3000 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -316,6 +316,32 @@ static int tc_setup_cbs(struct stmmac_priv *priv, if (!priv->dma_cap.av) return -EOPNOTSUPP; + /* Port Transmit Rate and Speed Divider */ + switch (priv->speed) { + case SPEED_10000: + ptr = 32; + speed_div = 10000000; + break; + case SPEED_5000: + ptr = 32; + speed_div = 5000000; + break; + case SPEED_2500: + ptr = 8; + speed_div = 2500000; + break; + case SPEED_1000: + ptr = 8; + speed_div = 1000000; + break; + case SPEED_100: + ptr = 4; + speed_div = 100000; + break; + default: + return -EOPNOTSUPP; + } + mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) { ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB); @@ -332,10 +358,6 @@ static int tc_setup_cbs(struct stmmac_priv *priv, priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; } - /* Port Transmit Rate and Speed Divider */ - ptr = (priv->speed == SPEED_100) ? 4 : 8; - speed_div = (priv->speed == SPEED_100) ? 100000 : 1000000; - /* Final adjustments for HW */ value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div); priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0); diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 9a70f05baf6e..39c00f050fbd 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -543,7 +543,6 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) && mtu < ntohs(iph->tot_len)) { netdev_dbg(dev, "packet too big, fragmentation needed\n"); - memset(IPCB(skb), 0, sizeof(*IPCB(skb))); icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); goto err_rt; diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c index 3e431737c1ba..a00a667454a9 100644 --- a/drivers/net/phy/icplus.c +++ b/drivers/net/phy/icplus.c @@ -239,7 +239,7 @@ static int ip101a_g_config_intr_pin(struct phy_device *phydev) oldpage = phy_select_page(phydev, IP101G_DEFAULT_PAGE); if (oldpage < 0) - return oldpage; + goto out; /* configure the RXER/INTR_32 pin of the 32-pin IP101GR if needed: */ switch (priv->sel_intr32) { @@ -314,7 +314,7 @@ static int ip101a_g_read_status(struct phy_device *phydev) oldpage = phy_select_page(phydev, IP101G_DEFAULT_PAGE); if (oldpage < 0) - return oldpage; + goto out; ret = __phy_read(phydev, IP10XX_SPEC_CTRL_STATUS); if (ret < 0) @@ -349,7 +349,8 @@ out: static int ip101a_g_config_mdix(struct phy_device *phydev) { u16 ctrl = 0, ctrl2 = 0; - int oldpage, ret; + int oldpage; + int ret = 0; switch (phydev->mdix_ctrl) { case ETH_TP_MDI: @@ -367,7 +368,7 @@ static int ip101a_g_config_mdix(struct phy_device *phydev) oldpage = phy_select_page(phydev, IP101G_DEFAULT_PAGE); if (oldpage < 0) - return oldpage; + goto out; ret = __phy_modify(phydev, IP10XX_SPEC_CTRL_STATUS, IP101A_G_AUTO_MDIX_DIS, ctrl); diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 7ec6f70d6a82..a14a00328fa3 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -1303,6 +1303,7 @@ static struct phy_driver ksphy_driver[] = { .driver_data = &ksz8081_type, .probe = kszphy_probe, .config_init = ksz8081_config_init, + .soft_reset = genphy_soft_reset, .config_intr = kszphy_config_intr, .handle_interrupt = kszphy_handle_interrupt, .get_sset_count = kszphy_get_sset_count, diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 6c3d8c2abd38..17a050521b86 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1318,6 +1318,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x1255, 4)}, {QMI_FIXED_INTF(0x19d2, 0x1256, 4)}, {QMI_FIXED_INTF(0x19d2, 0x1270, 5)}, /* ZTE MF667 */ + {QMI_FIXED_INTF(0x19d2, 0x1275, 3)}, /* ZTE P685M */ {QMI_FIXED_INTF(0x19d2, 0x1401, 2)}, {QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */ {QMI_FIXED_INTF(0x19d2, 0x1424, 2)}, diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 2d7cc63bef89..b246817f3405 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -2632,21 +2632,24 @@ static inline u8 rtl8152_get_speed(struct r8152 *tp) return ocp_read_byte(tp, MCU_TYPE_PLA, PLA_PHYSTATUS); } -static void rtl_set_eee_plus(struct r8152 *tp) +static void rtl_eee_plus_en(struct r8152 *tp, bool enable) { u32 ocp_data; - u8 speed; - speed = rtl8152_get_speed(tp); - if (speed & _10bps) { - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR); + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR); + if (enable) ocp_data |= EEEP_CR_EEEP_TX; - ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data); - } else { - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR); + else ocp_data &= ~EEEP_CR_EEEP_TX; - ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data); - } + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data); +} + +static void rtl_set_eee_plus(struct r8152 *tp) +{ + if (rtl8152_get_speed(tp) & _10bps) + rtl_eee_plus_en(tp, true); + else + rtl_eee_plus_en(tp, false); } static void rxdy_gated_en(struct r8152 *tp, bool enable) @@ -3150,10 +3153,22 @@ static void r8153b_ups_flags(struct r8152 *tp) ocp_write_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS, ups_flags); } -static void r8153b_green_en(struct r8152 *tp, bool enable) +static void rtl_green_en(struct r8152 *tp, bool enable) { u16 data; + data = sram_read(tp, SRAM_GREEN_CFG); + if (enable) + data |= GREEN_ETH_EN; + else + data &= ~GREEN_ETH_EN; + sram_write(tp, SRAM_GREEN_CFG, data); + + tp->ups_info.green = enable; +} + +static void r8153b_green_en(struct r8152 *tp, bool enable) +{ if (enable) { sram_write(tp, 0x8045, 0); /* 10M abiq&ldvbias */ sram_write(tp, 0x804d, 0x1222); /* 100M short abiq&ldvbias */ @@ -3164,11 +3179,7 @@ static void r8153b_green_en(struct r8152 *tp, bool enable) sram_write(tp, 0x805d, 0x2444); /* 1000M short abiq&ldvbias */ } - data = sram_read(tp, SRAM_GREEN_CFG); - data |= GREEN_ETH_EN; - sram_write(tp, SRAM_GREEN_CFG, data); - - tp->ups_info.green = enable; + rtl_green_en(tp, true); } static u16 r8153_phy_status(struct r8152 *tp, u16 desired) @@ -3360,7 +3371,7 @@ static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable) r8153b_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); - if (tp->udev->speed != USB_SPEED_HIGH) + if (tp->udev->speed >= USB_SPEED_SUPER) r8153b_u1u2en(tp, true); } } @@ -5056,7 +5067,7 @@ static void rtl8153b_up(struct r8152 *tp) r8153_aldps_en(tp, true); - if (tp->udev->speed != USB_SPEED_HIGH) + if (tp->udev->speed >= USB_SPEED_SUPER) r8153b_u1u2en(tp, true); } @@ -5572,8 +5583,9 @@ static void r8153b_init(struct r8152 *tp) ocp_data |= POLL_LINK_CHG; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); - if (tp->udev->speed != USB_SPEED_HIGH) + if (tp->udev->speed >= USB_SPEED_SUPER) r8153b_u1u2en(tp, true); + usb_enable_lpm(tp->udev); /* MAC clock speed down */ @@ -5756,6 +5768,9 @@ static int rtl8152_runtime_suspend(struct r8152 *tp) struct net_device *netdev = tp->netdev; int ret = 0; + if (!tp->rtl_ops.autosuspend_en) + return -EBUSY; + set_bit(SELECTIVE_SUSPEND, &tp->flags); smp_mb__after_atomic(); @@ -6155,6 +6170,11 @@ rtl_ethtool_get_eee(struct net_device *net, struct ethtool_eee *edata) struct r8152 *tp = netdev_priv(net); int ret; + if (!tp->rtl_ops.eee_get) { + ret = -EOPNOTSUPP; + goto out; + } + ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; @@ -6177,6 +6197,11 @@ rtl_ethtool_set_eee(struct net_device *net, struct ethtool_eee *edata) struct r8152 *tp = netdev_priv(net); int ret; + if (!tp->rtl_ops.eee_set) { + ret = -EOPNOTSUPP; + goto out; + } + ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; @@ -6576,7 +6601,7 @@ static int rtl_ops_init(struct r8152 *tp) default: ret = -ENODEV; - netif_err(tp, probe, tp->netdev, "Unknown Device\n"); + dev_err(&tp->intf->dev, "Unknown Device\n"); break; } @@ -6833,7 +6858,7 @@ static int rtl8152_probe(struct usb_interface *intf, ret = register_netdev(netdev); if (ret != 0) { - netif_err(tp, probe, netdev, "couldn't register the device\n"); + dev_err(&intf->dev, "couldn't register the device\n"); goto out1; } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index ba8e63792549..82e520d2cb12 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -729,6 +729,7 @@ static struct sk_buff *receive_small(struct net_device *dev, fallthrough; case XDP_ABORTED: trace_xdp_exception(vi->dev, xdp_prog, act); + goto err_xdp; case XDP_DROP: goto err_xdp; } diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 3929e437382b..666dd201c3d5 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -4721,7 +4721,6 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head) struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_dev *vxlan, *next; struct net_device *dev, *aux; - unsigned int h; for_each_netdev_safe(net, dev, aux) if (dev->rtnl_link_ops == &vxlan_link_ops) @@ -4735,14 +4734,13 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head) unregister_netdevice_queue(vxlan->dev, head); } - for (h = 0; h < PORT_HASH_SIZE; ++h) - WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h])); } static void __net_exit vxlan_exit_batch_net(struct list_head *net_list) { struct net *net; LIST_HEAD(list); + unsigned int h; rtnl_lock(); list_for_each_entry(net, net_list, exit_list) { @@ -4755,6 +4753,13 @@ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list) unregister_netdevice_many(&list); rtnl_unlock(); + + list_for_each_entry(net, net_list, exit_list) { + struct vxlan_net *vn = net_generic(net, vxlan_net_id); + + for (h = 0; h < PORT_HASH_SIZE; ++h) + WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h])); + } } static struct pernet_operations vxlan_net_ops = { diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c index a3ed49cd95c3..551ddaaaf540 100644 --- a/drivers/net/wireguard/device.c +++ b/drivers/net/wireguard/device.c @@ -138,7 +138,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev) else if (skb->protocol == htons(ETH_P_IPV6)) net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n", dev->name, &ipv6_hdr(skb)->daddr); - goto err; + goto err_icmp; } family = READ_ONCE(peer->endpoint.addr.sa_family); @@ -157,7 +157,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev) } else { struct sk_buff *segs = skb_gso_segment(skb, 0); - if (unlikely(IS_ERR(segs))) { + if (IS_ERR(segs)) { ret = PTR_ERR(segs); goto err_peer; } @@ -201,12 +201,13 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev) err_peer: wg_peer_put(peer); -err: - ++dev->stats.tx_errors; +err_icmp: if (skb->protocol == htons(ETH_P_IP)) icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); else if (skb->protocol == htons(ETH_P_IPV6)) icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); +err: + ++dev->stats.tx_errors; kfree_skb(skb); return ret; } @@ -234,8 +235,8 @@ static void wg_destruct(struct net_device *dev) destroy_workqueue(wg->handshake_receive_wq); destroy_workqueue(wg->handshake_send_wq); destroy_workqueue(wg->packet_crypt_wq); - wg_packet_queue_free(&wg->decrypt_queue, true); - wg_packet_queue_free(&wg->encrypt_queue, true); + wg_packet_queue_free(&wg->decrypt_queue); + wg_packet_queue_free(&wg->encrypt_queue); rcu_barrier(); /* Wait for all the peers to be actually freed. */ wg_ratelimiter_uninit(); memzero_explicit(&wg->static_identity, sizeof(wg->static_identity)); @@ -337,12 +338,12 @@ static int wg_newlink(struct net *src_net, struct net_device *dev, goto err_destroy_handshake_send; ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker, - true, MAX_QUEUED_PACKETS); + MAX_QUEUED_PACKETS); if (ret < 0) goto err_destroy_packet_crypt; ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker, - true, MAX_QUEUED_PACKETS); + MAX_QUEUED_PACKETS); if (ret < 0) goto err_free_encrypt_queue; @@ -367,9 +368,9 @@ static int wg_newlink(struct net *src_net, struct net_device *dev, err_uninit_ratelimiter: wg_ratelimiter_uninit(); err_free_decrypt_queue: - wg_packet_queue_free(&wg->decrypt_queue, true); + wg_packet_queue_free(&wg->decrypt_queue); err_free_encrypt_queue: - wg_packet_queue_free(&wg->encrypt_queue, true); + wg_packet_queue_free(&wg->encrypt_queue); err_destroy_packet_crypt: destroy_workqueue(wg->packet_crypt_wq); err_destroy_handshake_send: diff --git a/drivers/net/wireguard/device.h b/drivers/net/wireguard/device.h index 4d0144e16947..854bc3d97150 100644 --- a/drivers/net/wireguard/device.h +++ b/drivers/net/wireguard/device.h @@ -27,13 +27,14 @@ struct multicore_worker { struct crypt_queue { struct ptr_ring ring; - union { - struct { - struct multicore_worker __percpu *worker; - int last_cpu; - }; - struct work_struct work; - }; + struct multicore_worker __percpu *worker; + int last_cpu; +}; + +struct prev_queue { + struct sk_buff *head, *tail, *peeked; + struct { struct sk_buff *next, *prev; } empty; // Match first 2 members of struct sk_buff. + atomic_t count; }; struct wg_device { diff --git a/drivers/net/wireguard/peer.c b/drivers/net/wireguard/peer.c index b3b6370e6b95..cd5cb0292cb6 100644 --- a/drivers/net/wireguard/peer.c +++ b/drivers/net/wireguard/peer.c @@ -32,27 +32,22 @@ struct wg_peer *wg_peer_create(struct wg_device *wg, peer = kzalloc(sizeof(*peer), GFP_KERNEL); if (unlikely(!peer)) return ERR_PTR(ret); - peer->device = wg; + if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) + goto err; + peer->device = wg; wg_noise_handshake_init(&peer->handshake, &wg->static_identity, public_key, preshared_key, peer); - if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) - goto err_1; - if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false, - MAX_QUEUED_PACKETS)) - goto err_2; - if (wg_packet_queue_init(&peer->rx_queue, NULL, false, - MAX_QUEUED_PACKETS)) - goto err_3; - peer->internal_id = atomic64_inc_return(&peer_counter); peer->serial_work_cpu = nr_cpumask_bits; wg_cookie_init(&peer->latest_cookie); wg_timers_init(peer); wg_cookie_checker_precompute_peer_keys(peer); spin_lock_init(&peer->keypairs.keypair_update_lock); - INIT_WORK(&peer->transmit_handshake_work, - wg_packet_handshake_send_worker); + INIT_WORK(&peer->transmit_handshake_work, wg_packet_handshake_send_worker); + INIT_WORK(&peer->transmit_packet_work, wg_packet_tx_worker); + wg_prev_queue_init(&peer->tx_queue); + wg_prev_queue_init(&peer->rx_queue); rwlock_init(&peer->endpoint_lock); kref_init(&peer->refcount); skb_queue_head_init(&peer->staged_packet_queue); @@ -68,11 +63,7 @@ struct wg_peer *wg_peer_create(struct wg_device *wg, pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id); return peer; -err_3: - wg_packet_queue_free(&peer->tx_queue, false); -err_2: - dst_cache_destroy(&peer->endpoint_cache); -err_1: +err: kfree(peer); return ERR_PTR(ret); } @@ -197,8 +188,7 @@ static void rcu_release(struct rcu_head *rcu) struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu); dst_cache_destroy(&peer->endpoint_cache); - wg_packet_queue_free(&peer->rx_queue, false); - wg_packet_queue_free(&peer->tx_queue, false); + WARN_ON(wg_prev_queue_peek(&peer->tx_queue) || wg_prev_queue_peek(&peer->rx_queue)); /* The final zeroing takes care of clearing any remaining handshake key * material and other potentially sensitive information. diff --git a/drivers/net/wireguard/peer.h b/drivers/net/wireguard/peer.h index 23af40922997..8d53b687a1d1 100644 --- a/drivers/net/wireguard/peer.h +++ b/drivers/net/wireguard/peer.h @@ -36,16 +36,17 @@ struct endpoint { struct wg_peer { struct wg_device *device; - struct crypt_queue tx_queue, rx_queue; + struct prev_queue tx_queue, rx_queue; struct sk_buff_head staged_packet_queue; int serial_work_cpu; + bool is_dead; struct noise_keypairs keypairs; struct endpoint endpoint; struct dst_cache endpoint_cache; rwlock_t endpoint_lock; struct noise_handshake handshake; atomic64_t last_sent_handshake; - struct work_struct transmit_handshake_work, clear_peer_work; + struct work_struct transmit_handshake_work, clear_peer_work, transmit_packet_work; struct cookie latest_cookie; struct hlist_node pubkey_hash; u64 rx_bytes, tx_bytes; @@ -61,9 +62,8 @@ struct wg_peer { struct rcu_head rcu; struct list_head peer_list; struct list_head allowedips_list; - u64 internal_id; struct napi_struct napi; - bool is_dead; + u64 internal_id; }; struct wg_peer *wg_peer_create(struct wg_device *wg, diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c index 71b8e80b58e1..48e7b982a307 100644 --- a/drivers/net/wireguard/queueing.c +++ b/drivers/net/wireguard/queueing.c @@ -9,8 +9,7 @@ struct multicore_worker __percpu * wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr) { int cpu; - struct multicore_worker __percpu *worker = - alloc_percpu(struct multicore_worker); + struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker); if (!worker) return NULL; @@ -23,7 +22,7 @@ wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr) } int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, - bool multicore, unsigned int len) + unsigned int len) { int ret; @@ -31,25 +30,78 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); if (ret) return ret; - if (function) { - if (multicore) { - queue->worker = wg_packet_percpu_multicore_worker_alloc( - function, queue); - if (!queue->worker) { - ptr_ring_cleanup(&queue->ring, NULL); - return -ENOMEM; - } - } else { - INIT_WORK(&queue->work, function); - } + queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); + if (!queue->worker) { + ptr_ring_cleanup(&queue->ring, NULL); + return -ENOMEM; } return 0; } -void wg_packet_queue_free(struct crypt_queue *queue, bool multicore) +void wg_packet_queue_free(struct crypt_queue *queue) { - if (multicore) - free_percpu(queue->worker); + free_percpu(queue->worker); WARN_ON(!__ptr_ring_empty(&queue->ring)); ptr_ring_cleanup(&queue->ring, NULL); } + +#define NEXT(skb) ((skb)->prev) +#define STUB(queue) ((struct sk_buff *)&queue->empty) + +void wg_prev_queue_init(struct prev_queue *queue) +{ + NEXT(STUB(queue)) = NULL; + queue->head = queue->tail = STUB(queue); + queue->peeked = NULL; + atomic_set(&queue->count, 0); + BUILD_BUG_ON( + offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) - + offsetof(struct prev_queue, empty) || + offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) - + offsetof(struct prev_queue, empty)); +} + +static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) +{ + WRITE_ONCE(NEXT(skb), NULL); + WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb); +} + +bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) +{ + if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS)) + return false; + __wg_prev_queue_enqueue(queue, skb); + return true; +} + +struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue) +{ + struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail)); + + if (tail == STUB(queue)) { + if (!next) + return NULL; + queue->tail = next; + tail = next; + next = smp_load_acquire(&NEXT(next)); + } + if (next) { + queue->tail = next; + atomic_dec(&queue->count); + return tail; + } + if (tail != READ_ONCE(queue->head)) + return NULL; + __wg_prev_queue_enqueue(queue, STUB(queue)); + next = smp_load_acquire(&NEXT(tail)); + if (next) { + queue->tail = next; + atomic_dec(&queue->count); + return tail; + } + return NULL; +} + +#undef NEXT +#undef STUB diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h index dfb674e03076..4ef2944a68bc 100644 --- a/drivers/net/wireguard/queueing.h +++ b/drivers/net/wireguard/queueing.h @@ -17,12 +17,13 @@ struct wg_device; struct wg_peer; struct multicore_worker; struct crypt_queue; +struct prev_queue; struct sk_buff; /* queueing.c APIs: */ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, - bool multicore, unsigned int len); -void wg_packet_queue_free(struct crypt_queue *queue, bool multicore); + unsigned int len); +void wg_packet_queue_free(struct crypt_queue *queue); struct multicore_worker __percpu * wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr); @@ -135,8 +136,31 @@ static inline int wg_cpumask_next_online(int *next) return cpu; } +void wg_prev_queue_init(struct prev_queue *queue); + +/* Multi producer */ +bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb); + +/* Single consumer */ +struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue); + +/* Single consumer */ +static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue) +{ + if (queue->peeked) + return queue->peeked; + queue->peeked = wg_prev_queue_dequeue(queue); + return queue->peeked; +} + +/* Single consumer */ +static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue) +{ + queue->peeked = NULL; +} + static inline int wg_queue_enqueue_per_device_and_peer( - struct crypt_queue *device_queue, struct crypt_queue *peer_queue, + struct crypt_queue *device_queue, struct prev_queue *peer_queue, struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu) { int cpu; @@ -145,8 +169,9 @@ static inline int wg_queue_enqueue_per_device_and_peer( /* We first queue this up for the peer ingestion, but the consumer * will wait for the state to change to CRYPTED or DEAD before. */ - if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb))) + if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb))) return -ENOSPC; + /* Then we queue it up in the device queue, which consumes the * packet as soon as it can. */ @@ -157,9 +182,7 @@ static inline int wg_queue_enqueue_per_device_and_peer( return 0; } -static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue, - struct sk_buff *skb, - enum packet_state state) +static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state) { /* We take a reference, because as soon as we call atomic_set, the * peer can be freed from below us. @@ -167,14 +190,12 @@ static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue, struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb)); atomic_set_release(&PACKET_CB(skb)->state, state); - queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, - peer->internal_id), - peer->device->packet_crypt_wq, &queue->work); + queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id), + peer->device->packet_crypt_wq, &peer->transmit_packet_work); wg_peer_put(peer); } -static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb, - enum packet_state state) +static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state) { /* We take a reference, because as soon as we call atomic_set, the * peer can be freed from below us. diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c index 2c9551ea6dc7..7dc84bcca261 100644 --- a/drivers/net/wireguard/receive.c +++ b/drivers/net/wireguard/receive.c @@ -444,7 +444,6 @@ packet_processed: int wg_packet_rx_poll(struct napi_struct *napi, int budget) { struct wg_peer *peer = container_of(napi, struct wg_peer, napi); - struct crypt_queue *queue = &peer->rx_queue; struct noise_keypair *keypair; struct endpoint endpoint; enum packet_state state; @@ -455,11 +454,10 @@ int wg_packet_rx_poll(struct napi_struct *napi, int budget) if (unlikely(budget <= 0)) return 0; - while ((skb = __ptr_ring_peek(&queue->ring)) != NULL && + while ((skb = wg_prev_queue_peek(&peer->rx_queue)) != NULL && (state = atomic_read_acquire(&PACKET_CB(skb)->state)) != PACKET_STATE_UNCRYPTED) { - __ptr_ring_discard_one(&queue->ring); - peer = PACKET_PEER(skb); + wg_prev_queue_drop_peeked(&peer->rx_queue); keypair = PACKET_CB(skb)->keypair; free = true; @@ -508,7 +506,7 @@ void wg_packet_decrypt_worker(struct work_struct *work) enum packet_state state = likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ? PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; - wg_queue_enqueue_per_peer_napi(skb, state); + wg_queue_enqueue_per_peer_rx(skb, state); if (need_resched()) cond_resched(); } @@ -531,12 +529,10 @@ static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb) if (unlikely(READ_ONCE(peer->is_dead))) goto err; - ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, - &peer->rx_queue, skb, - wg->packet_crypt_wq, - &wg->decrypt_queue.last_cpu); + ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb, + wg->packet_crypt_wq, &wg->decrypt_queue.last_cpu); if (unlikely(ret == -EPIPE)) - wg_queue_enqueue_per_peer_napi(skb, PACKET_STATE_DEAD); + wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD); if (likely(!ret || ret == -EPIPE)) { rcu_read_unlock_bh(); return; diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c index f74b9341ab0f..5368f7c35b4b 100644 --- a/drivers/net/wireguard/send.c +++ b/drivers/net/wireguard/send.c @@ -239,8 +239,7 @@ void wg_packet_send_keepalive(struct wg_peer *peer) wg_packet_send_staged_packets(peer); } -static void wg_packet_create_data_done(struct sk_buff *first, - struct wg_peer *peer) +static void wg_packet_create_data_done(struct wg_peer *peer, struct sk_buff *first) { struct sk_buff *skb, *next; bool is_keepalive, data_sent = false; @@ -262,22 +261,19 @@ static void wg_packet_create_data_done(struct sk_buff *first, void wg_packet_tx_worker(struct work_struct *work) { - struct crypt_queue *queue = container_of(work, struct crypt_queue, - work); + struct wg_peer *peer = container_of(work, struct wg_peer, transmit_packet_work); struct noise_keypair *keypair; enum packet_state state; struct sk_buff *first; - struct wg_peer *peer; - while ((first = __ptr_ring_peek(&queue->ring)) != NULL && + while ((first = wg_prev_queue_peek(&peer->tx_queue)) != NULL && (state = atomic_read_acquire(&PACKET_CB(first)->state)) != PACKET_STATE_UNCRYPTED) { - __ptr_ring_discard_one(&queue->ring); - peer = PACKET_PEER(first); + wg_prev_queue_drop_peeked(&peer->tx_queue); keypair = PACKET_CB(first)->keypair; if (likely(state == PACKET_STATE_CRYPTED)) - wg_packet_create_data_done(first, peer); + wg_packet_create_data_done(peer, first); else kfree_skb_list(first); @@ -306,16 +302,14 @@ void wg_packet_encrypt_worker(struct work_struct *work) break; } } - wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first, - state); + wg_queue_enqueue_per_peer_tx(first, state); if (need_resched()) cond_resched(); } } -static void wg_packet_create_data(struct sk_buff *first) +static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first) { - struct wg_peer *peer = PACKET_PEER(first); struct wg_device *wg = peer->device; int ret = -EINVAL; @@ -323,13 +317,10 @@ static void wg_packet_create_data(struct sk_buff *first) if (unlikely(READ_ONCE(peer->is_dead))) goto err; - ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, - &peer->tx_queue, first, - wg->packet_crypt_wq, - &wg->encrypt_queue.last_cpu); + ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first, + wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu); if (unlikely(ret == -EPIPE)) - wg_queue_enqueue_per_peer(&peer->tx_queue, first, - PACKET_STATE_DEAD); + wg_queue_enqueue_per_peer_tx(first, PACKET_STATE_DEAD); err: rcu_read_unlock_bh(); if (likely(!ret || ret == -EPIPE)) @@ -393,7 +384,7 @@ void wg_packet_send_staged_packets(struct wg_peer *peer) packets.prev->next = NULL; wg_peer_get(keypair->entry.peer); PACKET_CB(packets.next)->keypair = keypair; - wg_packet_create_data(packets.next); + wg_packet_create_data(peer, packets.next); return; out_invalid: diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c index 410b318e57fb..d9ad850daa79 100644 --- a/drivers/net/wireguard/socket.c +++ b/drivers/net/wireguard/socket.c @@ -53,7 +53,7 @@ static int send4(struct wg_device *wg, struct sk_buff *skb, if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0, fl.saddr, RT_SCOPE_HOST))) { endpoint->src4.s_addr = 0; - *(__force __be32 *)&endpoint->src_if4 = 0; + endpoint->src_if4 = 0; fl.saddr = 0; if (cache) dst_cache_reset(cache); @@ -63,7 +63,7 @@ static int send4(struct wg_device *wg, struct sk_buff *skb, PTR_ERR(rt) == -EINVAL) || (!IS_ERR(rt) && rt->dst.dev->ifindex != endpoint->src_if4)))) { endpoint->src4.s_addr = 0; - *(__force __be32 *)&endpoint->src_if4 = 0; + endpoint->src_if4 = 0; fl.saddr = 0; if (cache) dst_cache_reset(cache); @@ -71,7 +71,7 @@ static int send4(struct wg_device *wg, struct sk_buff *skb, ip_rt_put(rt); rt = ip_route_output_flow(sock_net(sock), &fl, sock); } - if (unlikely(IS_ERR(rt))) { + if (IS_ERR(rt)) { ret = PTR_ERR(rt); net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", wg->dev->name, &endpoint->addr, ret); @@ -138,7 +138,7 @@ static int send6(struct wg_device *wg, struct sk_buff *skb, } dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sock), sock, &fl, NULL); - if (unlikely(IS_ERR(dst))) { + if (IS_ERR(dst)) { ret = PTR_ERR(dst); net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", wg->dev->name, &endpoint->addr, ret); diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 56353e8c792a..ad4e630e73e2 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -450,7 +450,7 @@ config IDEAPAD_LAPTOP depends on BACKLIGHT_CLASS_DEVICE depends on ACPI_VIDEO || ACPI_VIDEO = n depends on ACPI_WMI || ACPI_WMI = n - depends on ACPI_PLATFORM_PROFILE + select ACPI_PLATFORM_PROFILE select INPUT_SPARSEKMAP select NEW_LEDS select LEDS_CLASS @@ -484,7 +484,7 @@ config THINKPAD_ACPI depends on RFKILL || RFKILL = n depends on ACPI_VIDEO || ACPI_VIDEO = n depends on BACKLIGHT_CLASS_DEVICE - depends on ACPI_PLATFORM_PROFILE + select ACPI_PLATFORM_PROFILE select HWMON select NVRAM select NEW_LEDS diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index 0937e1c047ac..9a4f66ae8070 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -611,14 +611,4 @@ config PWM_VT8500 To compile this driver as a module, choose M here: the module will be called pwm-vt8500. -config PWM_ZX - tristate "ZTE ZX PWM support" - depends on ARCH_ZX || COMPILE_TEST - depends on HAS_IOMEM - help - Generic PWM framework driver for ZTE ZX family SoCs. - - To compile this driver as a module, choose M here: the module - will be called pwm-zx. - endif diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile index 18b89d7fd092..6374d3b1d6f3 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile @@ -57,4 +57,3 @@ obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o obj-$(CONFIG_PWM_TWL) += pwm-twl.o obj-$(CONFIG_PWM_TWL_LED) += pwm-twl-led.o obj-$(CONFIG_PWM_VT8500) += pwm-vt8500.o -obj-$(CONFIG_PWM_ZX) += pwm-zx.o diff --git a/drivers/pwm/pwm-iqs620a.c b/drivers/pwm/pwm-iqs620a.c index 5ede8255926e..957b972c458b 100644 --- a/drivers/pwm/pwm-iqs620a.c +++ b/drivers/pwm/pwm-iqs620a.c @@ -37,16 +37,34 @@ struct iqs620_pwm_private { struct pwm_chip chip; struct notifier_block notifier; struct mutex lock; - bool out_en; - u8 duty_val; + unsigned int duty_scale; }; +static int iqs620_pwm_init(struct iqs620_pwm_private *iqs620_pwm, + unsigned int duty_scale) +{ + struct iqs62x_core *iqs62x = iqs620_pwm->iqs62x; + int ret; + + if (!duty_scale) + return regmap_update_bits(iqs62x->regmap, IQS620_PWR_SETTINGS, + IQS620_PWR_SETTINGS_PWM_OUT, 0); + + ret = regmap_write(iqs62x->regmap, IQS620_PWM_DUTY_CYCLE, + duty_scale - 1); + if (ret) + return ret; + + return regmap_update_bits(iqs62x->regmap, IQS620_PWR_SETTINGS, + IQS620_PWR_SETTINGS_PWM_OUT, 0xff); +} + static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_state *state) { struct iqs620_pwm_private *iqs620_pwm; - struct iqs62x_core *iqs62x; - u64 duty_scale; + unsigned int duty_cycle; + unsigned int duty_scale; int ret; if (state->polarity != PWM_POLARITY_NORMAL) @@ -56,7 +74,6 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, return -EINVAL; iqs620_pwm = container_of(chip, struct iqs620_pwm_private, chip); - iqs62x = iqs620_pwm->iqs62x; /* * The duty cycle generated by the device is calculated as follows: @@ -70,38 +87,18 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, * For lower duty cycles (e.g. 0), the PWM output is simply disabled to * allow an external pull-down resistor to hold the GPIO3/LTX pin low. */ - duty_scale = div_u64(state->duty_cycle * 256, IQS620_PWM_PERIOD_NS); - - mutex_lock(&iqs620_pwm->lock); - - if (!state->enabled || !duty_scale) { - ret = regmap_update_bits(iqs62x->regmap, IQS620_PWR_SETTINGS, - IQS620_PWR_SETTINGS_PWM_OUT, 0); - if (ret) - goto err_mutex; - } + duty_cycle = min_t(u64, state->duty_cycle, IQS620_PWM_PERIOD_NS); + duty_scale = duty_cycle * 256 / IQS620_PWM_PERIOD_NS; - if (duty_scale) { - u8 duty_val = min_t(u64, duty_scale - 1, 0xff); + if (!state->enabled) + duty_scale = 0; - ret = regmap_write(iqs62x->regmap, IQS620_PWM_DUTY_CYCLE, - duty_val); - if (ret) - goto err_mutex; - - iqs620_pwm->duty_val = duty_val; - } - - if (state->enabled && duty_scale) { - ret = regmap_update_bits(iqs62x->regmap, IQS620_PWR_SETTINGS, - IQS620_PWR_SETTINGS_PWM_OUT, 0xff); - if (ret) - goto err_mutex; - } + mutex_lock(&iqs620_pwm->lock); - iqs620_pwm->out_en = state->enabled; + ret = iqs620_pwm_init(iqs620_pwm, duty_scale); + if (!ret) + iqs620_pwm->duty_scale = duty_scale; -err_mutex: mutex_unlock(&iqs620_pwm->lock); return ret; @@ -119,12 +116,11 @@ static void iqs620_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, /* * Since the device cannot generate a 0% duty cycle, requests to do so * cause subsequent calls to iqs620_pwm_get_state to report the output - * as disabled with duty cycle equal to that which was in use prior to - * the request. This is not ideal, but is the best compromise based on + * as disabled. This is not ideal, but is the best compromise based on * the capabilities of the device. */ - state->enabled = iqs620_pwm->out_en; - state->duty_cycle = DIV_ROUND_UP((iqs620_pwm->duty_val + 1) * + state->enabled = iqs620_pwm->duty_scale > 0; + state->duty_cycle = DIV_ROUND_UP(iqs620_pwm->duty_scale * IQS620_PWM_PERIOD_NS, 256); mutex_unlock(&iqs620_pwm->lock); @@ -136,7 +132,6 @@ static int iqs620_pwm_notifier(struct notifier_block *notifier, unsigned long event_flags, void *context) { struct iqs620_pwm_private *iqs620_pwm; - struct iqs62x_core *iqs62x; int ret; if (!(event_flags & BIT(IQS62X_EVENT_SYS_RESET))) @@ -144,7 +139,6 @@ static int iqs620_pwm_notifier(struct notifier_block *notifier, iqs620_pwm = container_of(notifier, struct iqs620_pwm_private, notifier); - iqs62x = iqs620_pwm->iqs62x; mutex_lock(&iqs620_pwm->lock); @@ -153,16 +147,8 @@ static int iqs620_pwm_notifier(struct notifier_block *notifier, * of a device reset, so nothing else is printed here unless there is * an additional failure. */ - ret = regmap_write(iqs62x->regmap, IQS620_PWM_DUTY_CYCLE, - iqs620_pwm->duty_val); - if (ret) - goto err_mutex; + ret = iqs620_pwm_init(iqs620_pwm, iqs620_pwm->duty_scale); - ret = regmap_update_bits(iqs62x->regmap, IQS620_PWR_SETTINGS, - IQS620_PWR_SETTINGS_PWM_OUT, - iqs620_pwm->out_en ? 0xff : 0); - -err_mutex: mutex_unlock(&iqs620_pwm->lock); if (ret) { @@ -209,12 +195,14 @@ static int iqs620_pwm_probe(struct platform_device *pdev) ret = regmap_read(iqs62x->regmap, IQS620_PWR_SETTINGS, &val); if (ret) return ret; - iqs620_pwm->out_en = val & IQS620_PWR_SETTINGS_PWM_OUT; - ret = regmap_read(iqs62x->regmap, IQS620_PWM_DUTY_CYCLE, &val); - if (ret) - return ret; - iqs620_pwm->duty_val = val; + if (val & IQS620_PWR_SETTINGS_PWM_OUT) { + ret = regmap_read(iqs62x->regmap, IQS620_PWM_DUTY_CYCLE, &val); + if (ret) + return ret; + + iqs620_pwm->duty_scale = val + 1; + } iqs620_pwm->chip.dev = &pdev->dev; iqs620_pwm->chip.ops = &iqs620_pwm_ops; diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c index dc5133bec3e7..7ef40243eb6c 100644 --- a/drivers/pwm/pwm-lpc18xx-sct.c +++ b/drivers/pwm/pwm-lpc18xx-sct.c @@ -289,7 +289,7 @@ static int lpc18xx_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) dev_err(lpc18xx_pwm->dev, "maximum number of simultaneous channels reached\n"); return -EBUSY; - }; + } set_bit(event, &lpc18xx_pwm->event_map); lpc18xx_data->duty_event = event; diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c index 389a5e140412..6ad7d0a50aed 100644 --- a/drivers/pwm/pwm-rockchip.c +++ b/drivers/pwm/pwm-rockchip.c @@ -72,6 +72,10 @@ static void rockchip_pwm_get_state(struct pwm_chip *chip, if (ret) return; + ret = clk_enable(pc->clk); + if (ret) + return; + clk_rate = clk_get_rate(pc->clk); tmp = readl_relaxed(pc->base + pc->data->regs.period); @@ -90,6 +94,7 @@ static void rockchip_pwm_get_state(struct pwm_chip *chip, else state->polarity = PWM_POLARITY_NORMAL; + clk_disable(pc->clk); clk_disable(pc->pclk); } @@ -189,6 +194,10 @@ static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, if (ret) return ret; + ret = clk_enable(pc->clk); + if (ret) + return ret; + pwm_get_state(pwm, &curstate); enabled = curstate.enabled; @@ -208,6 +217,7 @@ static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, } out: + clk_disable(pc->clk); clk_disable(pc->pclk); return ret; @@ -288,6 +298,7 @@ static int rockchip_pwm_probe(struct platform_device *pdev) const struct of_device_id *id; struct rockchip_pwm_chip *pc; u32 enable_conf, ctrl; + bool enabled; int ret, count; id = of_match_device(rockchip_pwm_dt_ids, &pdev->dev); @@ -307,7 +318,7 @@ static int rockchip_pwm_probe(struct platform_device *pdev) pc->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pc->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(pc->clk), - "Can't get bus clk\n"); + "Can't get PWM clk\n"); } count = of_count_phandle_with_args(pdev->dev.of_node, @@ -326,13 +337,13 @@ static int rockchip_pwm_probe(struct platform_device *pdev) ret = clk_prepare_enable(pc->clk); if (ret) { - dev_err(&pdev->dev, "Can't prepare enable bus clk: %d\n", ret); + dev_err(&pdev->dev, "Can't prepare enable PWM clk: %d\n", ret); return ret; } - ret = clk_prepare(pc->pclk); + ret = clk_prepare_enable(pc->pclk); if (ret) { - dev_err(&pdev->dev, "Can't prepare APB clk: %d\n", ret); + dev_err(&pdev->dev, "Can't prepare enable APB clk: %d\n", ret); goto err_clk; } @@ -349,23 +360,26 @@ static int rockchip_pwm_probe(struct platform_device *pdev) pc->chip.of_pwm_n_cells = 3; } + enable_conf = pc->data->enable_conf; + ctrl = readl_relaxed(pc->base + pc->data->regs.ctrl); + enabled = (ctrl & enable_conf) == enable_conf; + ret = pwmchip_add(&pc->chip); if (ret < 0) { - clk_unprepare(pc->clk); dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret); goto err_pclk; } /* Keep the PWM clk enabled if the PWM appears to be up and running. */ - enable_conf = pc->data->enable_conf; - ctrl = readl_relaxed(pc->base + pc->data->regs.ctrl); - if ((ctrl & enable_conf) != enable_conf) + if (!enabled) clk_disable(pc->clk); + clk_disable(pc->pclk); + return 0; err_pclk: - clk_unprepare(pc->pclk); + clk_disable_unprepare(pc->pclk); err_clk: clk_disable_unprepare(pc->clk); diff --git a/drivers/pwm/pwm-zx.c b/drivers/pwm/pwm-zx.c deleted file mode 100644 index 34e91195ce98..000000000000 --- a/drivers/pwm/pwm-zx.c +++ /dev/null @@ -1,278 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2017 Sanechips Technology Co., Ltd. - * Copyright 2017 Linaro Ltd. - */ - -#include <linux/clk.h> -#include <linux/err.h> -#include <linux/io.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/pwm.h> -#include <linux/slab.h> - -#define ZX_PWM_MODE 0x0 -#define ZX_PWM_CLKDIV_SHIFT 2 -#define ZX_PWM_CLKDIV_MASK GENMASK(11, 2) -#define ZX_PWM_CLKDIV(x) (((x) << ZX_PWM_CLKDIV_SHIFT) & \ - ZX_PWM_CLKDIV_MASK) -#define ZX_PWM_POLAR BIT(1) -#define ZX_PWM_EN BIT(0) -#define ZX_PWM_PERIOD 0x4 -#define ZX_PWM_DUTY 0x8 - -#define ZX_PWM_CLKDIV_MAX 1023 -#define ZX_PWM_PERIOD_MAX 65535 - -struct zx_pwm_chip { - struct pwm_chip chip; - struct clk *pclk; - struct clk *wclk; - void __iomem *base; -}; - -static inline struct zx_pwm_chip *to_zx_pwm_chip(struct pwm_chip *chip) -{ - return container_of(chip, struct zx_pwm_chip, chip); -} - -static inline u32 zx_pwm_readl(struct zx_pwm_chip *zpc, unsigned int hwpwm, - unsigned int offset) -{ - return readl(zpc->base + (hwpwm + 1) * 0x10 + offset); -} - -static inline void zx_pwm_writel(struct zx_pwm_chip *zpc, unsigned int hwpwm, - unsigned int offset, u32 value) -{ - writel(value, zpc->base + (hwpwm + 1) * 0x10 + offset); -} - -static void zx_pwm_set_mask(struct zx_pwm_chip *zpc, unsigned int hwpwm, - unsigned int offset, u32 mask, u32 value) -{ - u32 data; - - data = zx_pwm_readl(zpc, hwpwm, offset); - data &= ~mask; - data |= value & mask; - zx_pwm_writel(zpc, hwpwm, offset, data); -} - -static void zx_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, - struct pwm_state *state) -{ - struct zx_pwm_chip *zpc = to_zx_pwm_chip(chip); - unsigned long rate; - unsigned int div; - u32 value; - u64 tmp; - - value = zx_pwm_readl(zpc, pwm->hwpwm, ZX_PWM_MODE); - - if (value & ZX_PWM_POLAR) - state->polarity = PWM_POLARITY_NORMAL; - else - state->polarity = PWM_POLARITY_INVERSED; - - if (value & ZX_PWM_EN) - state->enabled = true; - else - state->enabled = false; - - div = (value & ZX_PWM_CLKDIV_MASK) >> ZX_PWM_CLKDIV_SHIFT; - rate = clk_get_rate(zpc->wclk); - - tmp = zx_pwm_readl(zpc, pwm->hwpwm, ZX_PWM_PERIOD); - tmp *= div * NSEC_PER_SEC; - state->period = DIV_ROUND_CLOSEST_ULL(tmp, rate); - - tmp = zx_pwm_readl(zpc, pwm->hwpwm, ZX_PWM_DUTY); - tmp *= div * NSEC_PER_SEC; - state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, rate); -} - -static int zx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, - unsigned int duty_ns, unsigned int period_ns) -{ - struct zx_pwm_chip *zpc = to_zx_pwm_chip(chip); - unsigned int period_cycles, duty_cycles; - unsigned long long c; - unsigned int div = 1; - unsigned long rate; - - /* Find out the best divider */ - rate = clk_get_rate(zpc->wclk); - - while (1) { - c = rate / div; - c = c * period_ns; - do_div(c, NSEC_PER_SEC); - - if (c < ZX_PWM_PERIOD_MAX) - break; - - div++; - - if (div > ZX_PWM_CLKDIV_MAX) - return -ERANGE; - } - - /* Calculate duty cycles */ - period_cycles = c; - c *= duty_ns; - do_div(c, period_ns); - duty_cycles = c; - - /* - * If the PWM is being enabled, we have to temporarily disable it - * before configuring the registers. - */ - if (pwm_is_enabled(pwm)) - zx_pwm_set_mask(zpc, pwm->hwpwm, ZX_PWM_MODE, ZX_PWM_EN, 0); - - /* Set up registers */ - zx_pwm_set_mask(zpc, pwm->hwpwm, ZX_PWM_MODE, ZX_PWM_CLKDIV_MASK, - ZX_PWM_CLKDIV(div)); - zx_pwm_writel(zpc, pwm->hwpwm, ZX_PWM_PERIOD, period_cycles); - zx_pwm_writel(zpc, pwm->hwpwm, ZX_PWM_DUTY, duty_cycles); - - /* Re-enable the PWM if needed */ - if (pwm_is_enabled(pwm)) - zx_pwm_set_mask(zpc, pwm->hwpwm, ZX_PWM_MODE, - ZX_PWM_EN, ZX_PWM_EN); - - return 0; -} - -static int zx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, - const struct pwm_state *state) -{ - struct zx_pwm_chip *zpc = to_zx_pwm_chip(chip); - struct pwm_state cstate; - int ret; - - pwm_get_state(pwm, &cstate); - - if (state->polarity != cstate.polarity) - zx_pwm_set_mask(zpc, pwm->hwpwm, ZX_PWM_MODE, ZX_PWM_POLAR, - (state->polarity == PWM_POLARITY_INVERSED) ? - 0 : ZX_PWM_POLAR); - - if (state->period != cstate.period || - state->duty_cycle != cstate.duty_cycle) { - ret = zx_pwm_config(chip, pwm, state->duty_cycle, - state->period); - if (ret) - return ret; - } - - if (state->enabled != cstate.enabled) { - if (state->enabled) { - ret = clk_prepare_enable(zpc->wclk); - if (ret) - return ret; - - zx_pwm_set_mask(zpc, pwm->hwpwm, ZX_PWM_MODE, - ZX_PWM_EN, ZX_PWM_EN); - } else { - zx_pwm_set_mask(zpc, pwm->hwpwm, ZX_PWM_MODE, - ZX_PWM_EN, 0); - clk_disable_unprepare(zpc->wclk); - } - } - - return 0; -} - -static const struct pwm_ops zx_pwm_ops = { - .apply = zx_pwm_apply, - .get_state = zx_pwm_get_state, - .owner = THIS_MODULE, -}; - -static int zx_pwm_probe(struct platform_device *pdev) -{ - struct zx_pwm_chip *zpc; - unsigned int i; - int ret; - - zpc = devm_kzalloc(&pdev->dev, sizeof(*zpc), GFP_KERNEL); - if (!zpc) - return -ENOMEM; - - zpc->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(zpc->base)) - return PTR_ERR(zpc->base); - - zpc->pclk = devm_clk_get(&pdev->dev, "pclk"); - if (IS_ERR(zpc->pclk)) - return PTR_ERR(zpc->pclk); - - zpc->wclk = devm_clk_get(&pdev->dev, "wclk"); - if (IS_ERR(zpc->wclk)) - return PTR_ERR(zpc->wclk); - - ret = clk_prepare_enable(zpc->pclk); - if (ret) - return ret; - - zpc->chip.dev = &pdev->dev; - zpc->chip.ops = &zx_pwm_ops; - zpc->chip.base = -1; - zpc->chip.npwm = 4; - zpc->chip.of_xlate = of_pwm_xlate_with_flags; - zpc->chip.of_pwm_n_cells = 3; - - /* - * PWM devices may be enabled by firmware, and let's disable all of - * them initially to save power. - */ - for (i = 0; i < zpc->chip.npwm; i++) - zx_pwm_set_mask(zpc, i, ZX_PWM_MODE, ZX_PWM_EN, 0); - - ret = pwmchip_add(&zpc->chip); - if (ret < 0) { - dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret); - clk_disable_unprepare(zpc->pclk); - return ret; - } - - platform_set_drvdata(pdev, zpc); - - return 0; -} - -static int zx_pwm_remove(struct platform_device *pdev) -{ - struct zx_pwm_chip *zpc = platform_get_drvdata(pdev); - int ret; - - ret = pwmchip_remove(&zpc->chip); - clk_disable_unprepare(zpc->pclk); - - return ret; -} - -static const struct of_device_id zx_pwm_dt_ids[] = { - { .compatible = "zte,zx296718-pwm", }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, zx_pwm_dt_ids); - -static struct platform_driver zx_pwm_driver = { - .driver = { - .name = "zx-pwm", - .of_match_table = zx_pwm_dt_ids, - }, - .probe = zx_pwm_probe, - .remove = zx_pwm_remove, -}; -module_platform_driver(zx_pwm_driver); - -MODULE_ALIAS("platform:zx-pwm"); -MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); -MODULE_DESCRIPTION("ZTE ZX PWM Driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig index 92a6396f8a73..ffd1e098bfd2 100644 --- a/drivers/vdpa/Kconfig +++ b/drivers/vdpa/Kconfig @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only menuconfig VDPA tristate "vDPA drivers" + depends on NET help Enable this module to support vDPA device that uses a datapath which complies with virtio specifications with diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c index fa1af301cf55..7c8bbfcf6c3e 100644 --- a/drivers/vdpa/ifcvf/ifcvf_main.c +++ b/drivers/vdpa/ifcvf/ifcvf_main.c @@ -432,7 +432,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa, dev, &ifc_vdpa_ops, - IFCVF_MAX_QUEUE_PAIRS * 2); + IFCVF_MAX_QUEUE_PAIRS * 2, NULL); if (adapter == NULL) { IFCVF_ERR(pdev, "Failed to allocate vDPA structure"); return -ENOMEM; diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index b5fe6d2ad22f..10e9b09932eb 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -1820,7 +1820,7 @@ static void mlx5_vdpa_get_config(struct vdpa_device *vdev, unsigned int offset, struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); - if (offset + len < sizeof(struct virtio_net_config)) + if (offset + len <= sizeof(struct virtio_net_config)) memcpy(buf, (u8 *)&ndev->config + offset, len); } @@ -1982,7 +1982,7 @@ static int mlx5v_probe(struct auxiliary_device *adev, max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS); ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops, - 2 * mlx5_vdpa_max_qps(max_vqs)); + 2 * mlx5_vdpa_max_qps(max_vqs), NULL); if (IS_ERR(ndev)) return PTR_ERR(ndev); diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index c0825650c055..da67f07e24fd 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -11,9 +11,17 @@ #include <linux/idr.h> #include <linux/slab.h> #include <linux/vdpa.h> +#include <uapi/linux/vdpa.h> +#include <net/genetlink.h> +#include <linux/mod_devicetable.h> +static LIST_HEAD(mdev_head); +/* A global mutex that protects vdpa management device and device level operations. */ +static DEFINE_MUTEX(vdpa_dev_mutex); static DEFINE_IDA(vdpa_index_ida); +static struct genl_family vdpa_nl_family; + static int vdpa_dev_probe(struct device *d) { struct vdpa_device *vdev = dev_to_vdpa(d); @@ -63,6 +71,7 @@ static void vdpa_release_dev(struct device *d) * @config: the bus operations that is supported by this device * @nvqs: number of virtqueues supported by this device * @size: size of the parent structure that contains private data + * @name: name of the vdpa device; optional. * * Driver should use vdpa_alloc_device() wrapper macro instead of * using this directly. @@ -72,8 +81,7 @@ static void vdpa_release_dev(struct device *d) */ struct vdpa_device *__vdpa_alloc_device(struct device *parent, const struct vdpa_config_ops *config, - int nvqs, - size_t size) + int nvqs, size_t size, const char *name) { struct vdpa_device *vdev; int err = -EINVAL; @@ -101,7 +109,10 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent, vdev->features_valid = false; vdev->nvqs = nvqs; - err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index); + if (name) + err = dev_set_name(&vdev->dev, "%s", name); + else + err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index); if (err) goto err_name; @@ -118,6 +129,44 @@ err: } EXPORT_SYMBOL_GPL(__vdpa_alloc_device); +static int vdpa_name_match(struct device *dev, const void *data) +{ + struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); + + return (strcmp(dev_name(&vdev->dev), data) == 0); +} + +static int __vdpa_register_device(struct vdpa_device *vdev) +{ + struct device *dev; + + lockdep_assert_held(&vdpa_dev_mutex); + dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match); + if (dev) { + put_device(dev); + return -EEXIST; + } + return device_add(&vdev->dev); +} + +/** + * _vdpa_register_device - register a vDPA device with vdpa lock held + * Caller must have a succeed call of vdpa_alloc_device() before. + * Caller must invoke this routine in the management device dev_add() + * callback after setting up valid mgmtdev for this vdpa device. + * @vdev: the vdpa device to be registered to vDPA bus + * + * Returns an error when fail to add device to vDPA bus + */ +int _vdpa_register_device(struct vdpa_device *vdev) +{ + if (!vdev->mdev) + return -EINVAL; + + return __vdpa_register_device(vdev); +} +EXPORT_SYMBOL_GPL(_vdpa_register_device); + /** * vdpa_register_device - register a vDPA device * Callers must have a succeed call of vdpa_alloc_device() before. @@ -127,17 +176,38 @@ EXPORT_SYMBOL_GPL(__vdpa_alloc_device); */ int vdpa_register_device(struct vdpa_device *vdev) { - return device_add(&vdev->dev); + int err; + + mutex_lock(&vdpa_dev_mutex); + err = __vdpa_register_device(vdev); + mutex_unlock(&vdpa_dev_mutex); + return err; } EXPORT_SYMBOL_GPL(vdpa_register_device); /** + * _vdpa_unregister_device - unregister a vDPA device + * Caller must invoke this routine as part of management device dev_del() + * callback. + * @vdev: the vdpa device to be unregisted from vDPA bus + */ +void _vdpa_unregister_device(struct vdpa_device *vdev) +{ + lockdep_assert_held(&vdpa_dev_mutex); + WARN_ON(!vdev->mdev); + device_unregister(&vdev->dev); +} +EXPORT_SYMBOL_GPL(_vdpa_unregister_device); + +/** * vdpa_unregister_device - unregister a vDPA device * @vdev: the vdpa device to be unregisted from vDPA bus */ void vdpa_unregister_device(struct vdpa_device *vdev) { + mutex_lock(&vdpa_dev_mutex); device_unregister(&vdev->dev); + mutex_unlock(&vdpa_dev_mutex); } EXPORT_SYMBOL_GPL(vdpa_unregister_device); @@ -167,13 +237,436 @@ void vdpa_unregister_driver(struct vdpa_driver *drv) } EXPORT_SYMBOL_GPL(vdpa_unregister_driver); +/** + * vdpa_mgmtdev_register - register a vdpa management device + * + * @mdev: Pointer to vdpa management device + * vdpa_mgmtdev_register() register a vdpa management device which supports + * vdpa device management. + */ +int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev) +{ + if (!mdev->device || !mdev->ops || !mdev->ops->dev_add || !mdev->ops->dev_del) + return -EINVAL; + + INIT_LIST_HEAD(&mdev->list); + mutex_lock(&vdpa_dev_mutex); + list_add_tail(&mdev->list, &mdev_head); + mutex_unlock(&vdpa_dev_mutex); + return 0; +} +EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register); + +static int vdpa_match_remove(struct device *dev, void *data) +{ + struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); + struct vdpa_mgmt_dev *mdev = vdev->mdev; + + if (mdev == data) + mdev->ops->dev_del(mdev, vdev); + return 0; +} + +void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev) +{ + mutex_lock(&vdpa_dev_mutex); + + list_del(&mdev->list); + + /* Filter out all the entries belong to this management device and delete it. */ + bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove); + + mutex_unlock(&vdpa_dev_mutex); +} +EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister); + +static bool mgmtdev_handle_match(const struct vdpa_mgmt_dev *mdev, + const char *busname, const char *devname) +{ + /* Bus name is optional for simulated management device, so ignore the + * device with bus if bus attribute is provided. + */ + if ((busname && !mdev->device->bus) || (!busname && mdev->device->bus)) + return false; + + if (!busname && strcmp(dev_name(mdev->device), devname) == 0) + return true; + + if (busname && (strcmp(mdev->device->bus->name, busname) == 0) && + (strcmp(dev_name(mdev->device), devname) == 0)) + return true; + + return false; +} + +static struct vdpa_mgmt_dev *vdpa_mgmtdev_get_from_attr(struct nlattr **attrs) +{ + struct vdpa_mgmt_dev *mdev; + const char *busname = NULL; + const char *devname; + + if (!attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]) + return ERR_PTR(-EINVAL); + devname = nla_data(attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]); + if (attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]) + busname = nla_data(attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]); + + list_for_each_entry(mdev, &mdev_head, list) { + if (mgmtdev_handle_match(mdev, busname, devname)) + return mdev; + } + return ERR_PTR(-ENODEV); +} + +static int vdpa_nl_mgmtdev_handle_fill(struct sk_buff *msg, const struct vdpa_mgmt_dev *mdev) +{ + if (mdev->device->bus && + nla_put_string(msg, VDPA_ATTR_MGMTDEV_BUS_NAME, mdev->device->bus->name)) + return -EMSGSIZE; + if (nla_put_string(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, dev_name(mdev->device))) + return -EMSGSIZE; + return 0; +} + +static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *msg, + u32 portid, u32 seq, int flags) +{ + u64 supported_classes = 0; + void *hdr; + int i = 0; + int err; + + hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_MGMTDEV_NEW); + if (!hdr) + return -EMSGSIZE; + err = vdpa_nl_mgmtdev_handle_fill(msg, mdev); + if (err) + goto msg_err; + + while (mdev->id_table[i].device) { + supported_classes |= BIT(mdev->id_table[i].device); + i++; + } + + if (nla_put_u64_64bit(msg, VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES, + supported_classes, VDPA_ATTR_UNSPEC)) { + err = -EMSGSIZE; + goto msg_err; + } + + genlmsg_end(msg, hdr); + return 0; + +msg_err: + genlmsg_cancel(msg, hdr); + return err; +} + +static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct vdpa_mgmt_dev *mdev; + struct sk_buff *msg; + int err; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + mutex_lock(&vdpa_dev_mutex); + mdev = vdpa_mgmtdev_get_from_attr(info->attrs); + if (IS_ERR(mdev)) { + mutex_unlock(&vdpa_dev_mutex); + NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device"); + err = PTR_ERR(mdev); + goto out; + } + + err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0); + mutex_unlock(&vdpa_dev_mutex); + if (err) + goto out; + err = genlmsg_reply(msg, info); + return err; + +out: + nlmsg_free(msg); + return err; +} + +static int +vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) +{ + struct vdpa_mgmt_dev *mdev; + int start = cb->args[0]; + int idx = 0; + int err; + + mutex_lock(&vdpa_dev_mutex); + list_for_each_entry(mdev, &mdev_head, list) { + if (idx < start) { + idx++; + continue; + } + err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, NLM_F_MULTI); + if (err) + goto out; + idx++; + } +out: + mutex_unlock(&vdpa_dev_mutex); + cb->args[0] = idx; + return msg->len; +} + +static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct vdpa_mgmt_dev *mdev; + const char *name; + int err = 0; + + if (!info->attrs[VDPA_ATTR_DEV_NAME]) + return -EINVAL; + + name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); + + mutex_lock(&vdpa_dev_mutex); + mdev = vdpa_mgmtdev_get_from_attr(info->attrs); + if (IS_ERR(mdev)) { + NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device"); + err = PTR_ERR(mdev); + goto err; + } + + err = mdev->ops->dev_add(mdev, name); +err: + mutex_unlock(&vdpa_dev_mutex); + return err; +} + +static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct vdpa_mgmt_dev *mdev; + struct vdpa_device *vdev; + struct device *dev; + const char *name; + int err = 0; + + if (!info->attrs[VDPA_ATTR_DEV_NAME]) + return -EINVAL; + name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); + + mutex_lock(&vdpa_dev_mutex); + dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match); + if (!dev) { + NL_SET_ERR_MSG_MOD(info->extack, "device not found"); + err = -ENODEV; + goto dev_err; + } + vdev = container_of(dev, struct vdpa_device, dev); + if (!vdev->mdev) { + NL_SET_ERR_MSG_MOD(info->extack, "Only user created device can be deleted by user"); + err = -EINVAL; + goto mdev_err; + } + mdev = vdev->mdev; + mdev->ops->dev_del(mdev, vdev); +mdev_err: + put_device(dev); +dev_err: + mutex_unlock(&vdpa_dev_mutex); + return err; +} + +static int +vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq, + int flags, struct netlink_ext_ack *extack) +{ + u16 max_vq_size; + u32 device_id; + u32 vendor_id; + void *hdr; + int err; + + hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_NEW); + if (!hdr) + return -EMSGSIZE; + + err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev); + if (err) + goto msg_err; + + device_id = vdev->config->get_device_id(vdev); + vendor_id = vdev->config->get_vendor_id(vdev); + max_vq_size = vdev->config->get_vq_num_max(vdev); + + err = -EMSGSIZE; + if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) + goto msg_err; + if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) + goto msg_err; + if (nla_put_u32(msg, VDPA_ATTR_DEV_VENDOR_ID, vendor_id)) + goto msg_err; + if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs)) + goto msg_err; + if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size)) + goto msg_err; + + genlmsg_end(msg, hdr); + return 0; + +msg_err: + genlmsg_cancel(msg, hdr); + return err; +} + +static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct vdpa_device *vdev; + struct sk_buff *msg; + const char *devname; + struct device *dev; + int err; + + if (!info->attrs[VDPA_ATTR_DEV_NAME]) + return -EINVAL; + devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + mutex_lock(&vdpa_dev_mutex); + dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match); + if (!dev) { + NL_SET_ERR_MSG_MOD(info->extack, "device not found"); + err = -ENODEV; + goto err; + } + vdev = container_of(dev, struct vdpa_device, dev); + if (!vdev->mdev) { + err = -EINVAL; + goto mdev_err; + } + err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack); + if (!err) + err = genlmsg_reply(msg, info); +mdev_err: + put_device(dev); +err: + mutex_unlock(&vdpa_dev_mutex); + if (err) + nlmsg_free(msg); + return err; +} + +struct vdpa_dev_dump_info { + struct sk_buff *msg; + struct netlink_callback *cb; + int start_idx; + int idx; +}; + +static int vdpa_dev_dump(struct device *dev, void *data) +{ + struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); + struct vdpa_dev_dump_info *info = data; + int err; + + if (!vdev->mdev) + return 0; + if (info->idx < info->start_idx) { + info->idx++; + return 0; + } + err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid, + info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack); + if (err) + return err; + + info->idx++; + return 0; +} + +static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) +{ + struct vdpa_dev_dump_info info; + + info.msg = msg; + info.cb = cb; + info.start_idx = cb->args[0]; + info.idx = 0; + + mutex_lock(&vdpa_dev_mutex); + bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump); + mutex_unlock(&vdpa_dev_mutex); + cb->args[0] = info.idx; + return msg->len; +} + +static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = { + [VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING }, + [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING }, + [VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING }, +}; + +static const struct genl_ops vdpa_nl_ops[] = { + { + .cmd = VDPA_CMD_MGMTDEV_GET, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + .doit = vdpa_nl_cmd_mgmtdev_get_doit, + .dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit, + }, + { + .cmd = VDPA_CMD_DEV_NEW, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + .doit = vdpa_nl_cmd_dev_add_set_doit, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = VDPA_CMD_DEV_DEL, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + .doit = vdpa_nl_cmd_dev_del_set_doit, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = VDPA_CMD_DEV_GET, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + .doit = vdpa_nl_cmd_dev_get_doit, + .dumpit = vdpa_nl_cmd_dev_get_dumpit, + }, +}; + +static struct genl_family vdpa_nl_family __ro_after_init = { + .name = VDPA_GENL_NAME, + .version = VDPA_GENL_VERSION, + .maxattr = VDPA_ATTR_MAX, + .policy = vdpa_nl_policy, + .netnsok = false, + .module = THIS_MODULE, + .ops = vdpa_nl_ops, + .n_ops = ARRAY_SIZE(vdpa_nl_ops), +}; + static int vdpa_init(void) { - return bus_register(&vdpa_bus); + int err; + + err = bus_register(&vdpa_bus); + if (err) + return err; + err = genl_register_family(&vdpa_nl_family); + if (err) + goto err; + return 0; + +err: + bus_unregister(&vdpa_bus); + return err; } static void __exit vdpa_exit(void) { + genl_unregister_family(&vdpa_nl_family); bus_unregister(&vdpa_bus); ida_destroy(&vdpa_index_ida); } diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index b3fcc67bfdf0..d5942842432d 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -235,7 +235,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr) ops = &vdpasim_config_ops; vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, - dev_attr->nvqs); + dev_attr->nvqs, dev_attr->name); if (!vdpasim) goto err_alloc; @@ -249,6 +249,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr) if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) goto err_iommu; set_dma_ops(dev, &vdpasim_dma_ops); + vdpasim->vdpa.mdev = dev_attr->mgmt_dev; vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL); if (!vdpasim->config) diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.h b/drivers/vdpa/vdpa_sim/vdpa_sim.h index b02142293d5b..6d75444f9948 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.h +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.h @@ -33,6 +33,8 @@ struct vdpasim_virtqueue { }; struct vdpasim_dev_attr { + struct vdpa_mgmt_dev *mgmt_dev; + const char *name; u64 supported_features; size_t config_size; size_t buffer_size; diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c index c10b6981fdab..d344c5b7c914 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c @@ -33,9 +33,7 @@ static char *macaddr; module_param(macaddr, charp, 0); MODULE_PARM_DESC(macaddr, "Ethernet MAC address"); -u8 macaddr_buf[ETH_ALEN]; - -static struct vdpasim *vdpasim_net_dev; +static u8 macaddr_buf[ETH_ALEN]; static void vdpasim_net_work(struct work_struct *work) { @@ -120,21 +118,23 @@ static void vdpasim_net_get_config(struct vdpasim *vdpasim, void *config) memcpy(net_config->mac, macaddr_buf, ETH_ALEN); } -static int __init vdpasim_net_init(void) +static void vdpasim_net_mgmtdev_release(struct device *dev) +{ +} + +static struct device vdpasim_net_mgmtdev = { + .init_name = "vdpasim_net", + .release = vdpasim_net_mgmtdev_release, +}; + +static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name) { struct vdpasim_dev_attr dev_attr = {}; + struct vdpasim *simdev; int ret; - if (macaddr) { - mac_pton(macaddr, macaddr_buf); - if (!is_valid_ether_addr(macaddr_buf)) { - ret = -EADDRNOTAVAIL; - goto out; - } - } else { - eth_random_addr(macaddr_buf); - } - + dev_attr.mgmt_dev = mdev; + dev_attr.name = name; dev_attr.id = VIRTIO_ID_NET; dev_attr.supported_features = VDPASIM_NET_FEATURES; dev_attr.nvqs = VDPASIM_NET_VQ_NUM; @@ -143,29 +143,75 @@ static int __init vdpasim_net_init(void) dev_attr.work_fn = vdpasim_net_work; dev_attr.buffer_size = PAGE_SIZE; - vdpasim_net_dev = vdpasim_create(&dev_attr); - if (IS_ERR(vdpasim_net_dev)) { - ret = PTR_ERR(vdpasim_net_dev); - goto out; + simdev = vdpasim_create(&dev_attr); + if (IS_ERR(simdev)) + return PTR_ERR(simdev); + + ret = _vdpa_register_device(&simdev->vdpa); + if (ret) + goto reg_err; + + return 0; + +reg_err: + put_device(&simdev->vdpa.dev); + return ret; +} + +static void vdpasim_net_dev_del(struct vdpa_mgmt_dev *mdev, + struct vdpa_device *dev) +{ + struct vdpasim *simdev = container_of(dev, struct vdpasim, vdpa); + + _vdpa_unregister_device(&simdev->vdpa); +} + +static const struct vdpa_mgmtdev_ops vdpasim_net_mgmtdev_ops = { + .dev_add = vdpasim_net_dev_add, + .dev_del = vdpasim_net_dev_del +}; + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static struct vdpa_mgmt_dev mgmt_dev = { + .device = &vdpasim_net_mgmtdev, + .id_table = id_table, + .ops = &vdpasim_net_mgmtdev_ops, +}; + +static int __init vdpasim_net_init(void) +{ + int ret; + + if (macaddr) { + mac_pton(macaddr, macaddr_buf); + if (!is_valid_ether_addr(macaddr_buf)) + return -EADDRNOTAVAIL; + } else { + eth_random_addr(macaddr_buf); } - ret = vdpa_register_device(&vdpasim_net_dev->vdpa); + ret = device_register(&vdpasim_net_mgmtdev); if (ret) - goto put_dev; + return ret; + ret = vdpa_mgmtdev_register(&mgmt_dev); + if (ret) + goto parent_err; return 0; -put_dev: - put_device(&vdpasim_net_dev->vdpa.dev); -out: +parent_err: + device_unregister(&vdpasim_net_mgmtdev); return ret; } static void __exit vdpasim_net_exit(void) { - struct vdpa_device *vdpa = &vdpasim_net_dev->vdpa; - - vdpa_unregister_device(vdpa); + vdpa_mgmtdev_unregister(&mgmt_dev); + device_unregister(&vdpasim_net_mgmtdev); } module_init(vdpasim_net_init); diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 4ce9f00ae10e..5de21ad4bd05 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1814,12 +1814,9 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) struct vhost_virtqueue **vqs; int r = -ENOMEM, i; - vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL); - if (!vs) { - vs = vzalloc(sizeof(*vs)); - if (!vs) - goto err_vs; - } + vs = kvzalloc(sizeof(*vs), GFP_KERNEL); + if (!vs) + goto err_vs; vqs = kmalloc_array(VHOST_SCSI_MAX_VQ, sizeof(*vqs), GFP_KERNEL); if (!vqs) diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c index 83c8e809955a..e946903a86c2 100644 --- a/drivers/video/fbdev/aty/atyfb_base.c +++ b/drivers/video/fbdev/aty/atyfb_base.c @@ -175,6 +175,15 @@ u32 aty_ld_lcd(int index, const struct atyfb_par *par) return aty_ld_le32(LCD_DATA, par); } } +#else /* defined(CONFIG_PMAC_BACKLIGHT) || defined(CONFIG_FB_ATY_BACKLIGHT) \ + defined(CONFIG_FB_ATY_GENERIC_LCD) */ +void aty_st_lcd(int index, u32 val, const struct atyfb_par *par) +{ } + +u32 aty_ld_lcd(int index, const struct atyfb_par *par) +{ + return 0; +} #endif /* defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) */ #ifdef CONFIG_FB_ATY_GENERIC_LCD diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 7b41130d3f35..ce1b3f6ec325 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -12,6 +12,14 @@ config ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS This option is selected if the architecture may need to enforce VIRTIO_F_ACCESS_PLATFORM +config VIRTIO_PCI_LIB + tristate + help + Modern PCI device implementation. This module implements the + basic probe and control for devices which are based on modern + PCI device with possible vendor specific extensions. Any + module that selects this module must depend on PCI. + menuconfig VIRTIO_MENU bool "Virtio drivers" default y @@ -21,6 +29,7 @@ if VIRTIO_MENU config VIRTIO_PCI tristate "PCI driver for virtio devices" depends on PCI + select VIRTIO_PCI_LIB select VIRTIO help This driver provides support for virtio based paravirtual device diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile index 591e6f72aa54..699bbea0465f 100644 --- a/drivers/virtio/Makefile +++ b/drivers/virtio/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o +obj-$(CONFIG_VIRTIO_PCI_LIB) += virtio_pci_modern_dev.o obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c index f1f6208edcf5..ce51ae165943 100644 --- a/drivers/virtio/virtio_input.c +++ b/drivers/virtio/virtio_input.c @@ -7,6 +7,7 @@ #include <uapi/linux/virtio_ids.h> #include <uapi/linux/virtio_input.h> +#include <linux/input/mt.h> struct virtio_input { struct virtio_device *vdev; @@ -64,6 +65,21 @@ static int virtinput_send_status(struct virtio_input *vi, unsigned long flags; int rc; + /* + * Since 29cc309d8bf1 (HID: hid-multitouch: forward MSC_TIMESTAMP), + * EV_MSC/MSC_TIMESTAMP is added to each before EV_SYN event. + * EV_MSC is configured as INPUT_PASS_TO_ALL. + * In case of touch device: + * BE pass EV_MSC/MSC_TIMESTAMP to FE on receiving event from evdev. + * FE pass EV_MSC/MSC_TIMESTAMP back to BE. + * BE writes EV_MSC/MSC_TIMESTAMP to evdev due to INPUT_PASS_TO_ALL. + * BE receives extra EV_MSC/MSC_TIMESTAMP and pass to FE. + * >>> Each new frame becomes larger and larger. + * Disable EV_MSC/MSC_TIMESTAMP forwarding for MT. + */ + if (vi->idev->mt && type == EV_MSC && code == MSC_TIMESTAMP) + return 0; + stsbuf = kzalloc(sizeof(*stsbuf), GFP_ATOMIC); if (!stsbuf) return -ENOMEM; @@ -204,7 +220,7 @@ static int virtinput_probe(struct virtio_device *vdev) struct virtio_input *vi; unsigned long flags; size_t size; - int abs, err; + int abs, err, nslots; if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) return -ENODEV; @@ -289,6 +305,13 @@ static int virtinput_probe(struct virtio_device *vdev) continue; virtinput_cfg_abs(vi, abs); } + + if (test_bit(ABS_MT_SLOT, vi->idev->absbit)) { + nslots = input_abs_get_max(vi->idev, ABS_MT_SLOT) + 1; + err = input_mt_init_slots(vi->idev, nslots, 0); + if (err) + goto err_mt_init_slots; + } } virtio_device_ready(vdev); @@ -304,6 +327,7 @@ err_input_register: spin_lock_irqsave(&vi->lock, flags); vi->ready = false; spin_unlock_irqrestore(&vi->lock, flags); +err_mt_init_slots: input_free_device(vi->idev); err_input_alloc: vdev->config->del_vqs(vdev); diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index 1119e0c6f6c1..10ec60d81e84 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -2590,7 +2590,7 @@ static int virtio_mem_probe(struct virtio_device *vdev) * actually in use (e.g., trying to reload the driver). */ if (vm->plugged_size) { - vm->unplug_all_required = 1; + vm->unplug_all_required = true; dev_info(&vm->vdev->dev, "unplugging all memory is required\n"); } diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 238383ff1064..a286d22b6551 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -126,7 +126,7 @@ static int vm_finalize_features(struct virtio_device *vdev) /* Give virtio_ring a chance to accept features. */ vring_transport_features(vdev); - /* Make sure there is are no mixed devices */ + /* Make sure there are no mixed devices */ if (vm_dev->version == 2 && !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { dev_err(&vdev->dev, "New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n"); diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index b2f0eb4067cb..beec047a8f8d 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h @@ -25,6 +25,7 @@ #include <linux/virtio_config.h> #include <linux/virtio_ring.h> #include <linux/virtio_pci.h> +#include <linux/virtio_pci_modern.h> #include <linux/highmem.h> #include <linux/spinlock.h> @@ -43,31 +44,12 @@ struct virtio_pci_vq_info { struct virtio_pci_device { struct virtio_device vdev; struct pci_dev *pci_dev; + struct virtio_pci_modern_device mdev; /* In legacy mode, these two point to within ->legacy. */ /* Where to read and clear interrupt */ u8 __iomem *isr; - /* Modern only fields */ - /* The IO mapping for the PCI config space (non-legacy mode) */ - struct virtio_pci_common_cfg __iomem *common; - /* Device-specific data (non-legacy mode) */ - void __iomem *device; - /* Base of vq notifications (non-legacy mode). */ - void __iomem *notify_base; - - /* So we can sanity-check accesses. */ - size_t notify_len; - size_t device_len; - - /* Capability for when we need to map notifications per-vq. */ - int notify_map_cap; - - /* Multiply queue_notify_off by this value. (non-legacy mode). */ - u32 notify_offset_multiplier; - - int modern_bars; - /* Legacy only field */ /* the IO mapping for the PCI config space */ void __iomem *ioaddr; diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index 3d6ae5a5e252..fbd4ebc00eb6 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -19,136 +19,11 @@ #define VIRTIO_RING_NO_LEGACY #include "virtio_pci_common.h" -/* - * Type-safe wrappers for io accesses. - * Use these to enforce at compile time the following spec requirement: - * - * The driver MUST access each field using the “natural” access - * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses - * for 16-bit fields and 8-bit accesses for 8-bit fields. - */ -static inline u8 vp_ioread8(const u8 __iomem *addr) -{ - return ioread8(addr); -} -static inline u16 vp_ioread16 (const __le16 __iomem *addr) -{ - return ioread16(addr); -} - -static inline u32 vp_ioread32(const __le32 __iomem *addr) -{ - return ioread32(addr); -} - -static inline void vp_iowrite8(u8 value, u8 __iomem *addr) -{ - iowrite8(value, addr); -} - -static inline void vp_iowrite16(u16 value, __le16 __iomem *addr) -{ - iowrite16(value, addr); -} - -static inline void vp_iowrite32(u32 value, __le32 __iomem *addr) -{ - iowrite32(value, addr); -} - -static void vp_iowrite64_twopart(u64 val, - __le32 __iomem *lo, __le32 __iomem *hi) -{ - vp_iowrite32((u32)val, lo); - vp_iowrite32(val >> 32, hi); -} - -static void __iomem *map_capability(struct pci_dev *dev, int off, - size_t minlen, - u32 align, - u32 start, u32 size, - size_t *len) -{ - u8 bar; - u32 offset, length; - void __iomem *p; - - pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap, - bar), - &bar); - pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset), - &offset); - pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length), - &length); - - if (length <= start) { - dev_err(&dev->dev, - "virtio_pci: bad capability len %u (>%u expected)\n", - length, start); - return NULL; - } - - if (length - start < minlen) { - dev_err(&dev->dev, - "virtio_pci: bad capability len %u (>=%zu expected)\n", - length, minlen); - return NULL; - } - - length -= start; - - if (start + offset < offset) { - dev_err(&dev->dev, - "virtio_pci: map wrap-around %u+%u\n", - start, offset); - return NULL; - } - - offset += start; - - if (offset & (align - 1)) { - dev_err(&dev->dev, - "virtio_pci: offset %u not aligned to %u\n", - offset, align); - return NULL; - } - - if (length > size) - length = size; - - if (len) - *len = length; - - if (minlen + offset < minlen || - minlen + offset > pci_resource_len(dev, bar)) { - dev_err(&dev->dev, - "virtio_pci: map virtio %zu@%u " - "out of range on bar %i length %lu\n", - minlen, offset, - bar, (unsigned long)pci_resource_len(dev, bar)); - return NULL; - } - - p = pci_iomap_range(dev, bar, offset, length); - if (!p) - dev_err(&dev->dev, - "virtio_pci: unable to map virtio %u@%u on bar %i\n", - length, offset, bar); - return p; -} - -/* virtio config->get_features() implementation */ static u64 vp_get_features(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); - u64 features; - - vp_iowrite32(0, &vp_dev->common->device_feature_select); - features = vp_ioread32(&vp_dev->common->device_feature); - vp_iowrite32(1, &vp_dev->common->device_feature_select); - features |= ((u64)vp_ioread32(&vp_dev->common->device_feature) << 32); - return features; + return vp_modern_get_features(&vp_dev->mdev); } static void vp_transport_features(struct virtio_device *vdev, u64 features) @@ -179,10 +54,7 @@ static int vp_finalize_features(struct virtio_device *vdev) return -EINVAL; } - vp_iowrite32(0, &vp_dev->common->guest_feature_select); - vp_iowrite32((u32)vdev->features, &vp_dev->common->guest_feature); - vp_iowrite32(1, &vp_dev->common->guest_feature_select); - vp_iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature); + vp_modern_set_features(&vp_dev->mdev, vdev->features); return 0; } @@ -192,29 +64,31 @@ static void vp_get(struct virtio_device *vdev, unsigned offset, void *buf, unsigned len) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtio_pci_modern_device *mdev = &vp_dev->mdev; + void __iomem *device = mdev->device; u8 b; __le16 w; __le32 l; - BUG_ON(offset + len > vp_dev->device_len); + BUG_ON(offset + len > mdev->device_len); switch (len) { case 1: - b = ioread8(vp_dev->device + offset); + b = ioread8(device + offset); memcpy(buf, &b, sizeof b); break; case 2: - w = cpu_to_le16(ioread16(vp_dev->device + offset)); + w = cpu_to_le16(ioread16(device + offset)); memcpy(buf, &w, sizeof w); break; case 4: - l = cpu_to_le32(ioread32(vp_dev->device + offset)); + l = cpu_to_le32(ioread32(device + offset)); memcpy(buf, &l, sizeof l); break; case 8: - l = cpu_to_le32(ioread32(vp_dev->device + offset)); + l = cpu_to_le32(ioread32(device + offset)); memcpy(buf, &l, sizeof l); - l = cpu_to_le32(ioread32(vp_dev->device + offset + sizeof l)); + l = cpu_to_le32(ioread32(device + offset + sizeof l)); memcpy(buf + sizeof l, &l, sizeof l); break; default: @@ -228,30 +102,32 @@ static void vp_set(struct virtio_device *vdev, unsigned offset, const void *buf, unsigned len) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtio_pci_modern_device *mdev = &vp_dev->mdev; + void __iomem *device = mdev->device; u8 b; __le16 w; __le32 l; - BUG_ON(offset + len > vp_dev->device_len); + BUG_ON(offset + len > mdev->device_len); switch (len) { case 1: memcpy(&b, buf, sizeof b); - iowrite8(b, vp_dev->device + offset); + iowrite8(b, device + offset); break; case 2: memcpy(&w, buf, sizeof w); - iowrite16(le16_to_cpu(w), vp_dev->device + offset); + iowrite16(le16_to_cpu(w), device + offset); break; case 4: memcpy(&l, buf, sizeof l); - iowrite32(le32_to_cpu(l), vp_dev->device + offset); + iowrite32(le32_to_cpu(l), device + offset); break; case 8: memcpy(&l, buf, sizeof l); - iowrite32(le32_to_cpu(l), vp_dev->device + offset); + iowrite32(le32_to_cpu(l), device + offset); memcpy(&l, buf + sizeof l, sizeof l); - iowrite32(le32_to_cpu(l), vp_dev->device + offset + sizeof l); + iowrite32(le32_to_cpu(l), device + offset + sizeof l); break; default: BUG(); @@ -261,35 +137,40 @@ static void vp_set(struct virtio_device *vdev, unsigned offset, static u32 vp_generation(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); - return vp_ioread8(&vp_dev->common->config_generation); + + return vp_modern_generation(&vp_dev->mdev); } /* config->{get,set}_status() implementations */ static u8 vp_get_status(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); - return vp_ioread8(&vp_dev->common->device_status); + + return vp_modern_get_status(&vp_dev->mdev); } static void vp_set_status(struct virtio_device *vdev, u8 status) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); + /* We should never be setting status to 0. */ BUG_ON(status == 0); - vp_iowrite8(status, &vp_dev->common->device_status); + vp_modern_set_status(&vp_dev->mdev, status); } static void vp_reset(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtio_pci_modern_device *mdev = &vp_dev->mdev; + /* 0 status means a reset. */ - vp_iowrite8(0, &vp_dev->common->device_status); + vp_modern_set_status(mdev, 0); /* After writing 0 to device_status, the driver MUST wait for a read of * device_status to return 0 before reinitializing the device. * This will flush out the status write, and flush in device writes, * including MSI-X interrupts, if any. */ - while (vp_ioread8(&vp_dev->common->device_status)) + while (vp_modern_get_status(mdev)) msleep(1); /* Flush pending VQ/configuration callbacks. */ vp_synchronize_vectors(vdev); @@ -297,11 +178,7 @@ static void vp_reset(struct virtio_device *vdev) static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) { - /* Setup the vector used for configuration events */ - vp_iowrite16(vector, &vp_dev->common->msix_config); - /* Verify we had enough resources to assign the vector */ - /* Will also flush the write out to device */ - return vp_ioread16(&vp_dev->common->msix_config); + return vp_modern_config_vector(&vp_dev->mdev, vector); } static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, @@ -312,20 +189,18 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, bool ctx, u16 msix_vec) { - struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common; + + struct virtio_pci_modern_device *mdev = &vp_dev->mdev; struct virtqueue *vq; u16 num, off; int err; - if (index >= vp_ioread16(&cfg->num_queues)) + if (index >= vp_modern_get_num_queues(mdev)) return ERR_PTR(-ENOENT); - /* Select the queue we're interested in */ - vp_iowrite16(index, &cfg->queue_select); - /* Check if queue is either not available or already active. */ - num = vp_ioread16(&cfg->queue_size); - if (!num || vp_ioread16(&cfg->queue_enable)) + num = vp_modern_get_queue_size(mdev, index); + if (!num || vp_modern_get_queue_enable(mdev, index)) return ERR_PTR(-ENOENT); if (num & (num - 1)) { @@ -334,7 +209,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, } /* get offset of notification word for this vq */ - off = vp_ioread16(&cfg->queue_notify_off); + off = vp_modern_get_queue_notify_off(mdev, index); info->msix_vector = msix_vec; @@ -347,33 +222,30 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, return ERR_PTR(-ENOMEM); /* activate the queue */ - vp_iowrite16(virtqueue_get_vring_size(vq), &cfg->queue_size); - vp_iowrite64_twopart(virtqueue_get_desc_addr(vq), - &cfg->queue_desc_lo, &cfg->queue_desc_hi); - vp_iowrite64_twopart(virtqueue_get_avail_addr(vq), - &cfg->queue_avail_lo, &cfg->queue_avail_hi); - vp_iowrite64_twopart(virtqueue_get_used_addr(vq), - &cfg->queue_used_lo, &cfg->queue_used_hi); - - if (vp_dev->notify_base) { + vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq)); + vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq), + virtqueue_get_avail_addr(vq), + virtqueue_get_used_addr(vq)); + + if (mdev->notify_base) { /* offset should not wrap */ - if ((u64)off * vp_dev->notify_offset_multiplier + 2 - > vp_dev->notify_len) { - dev_warn(&vp_dev->pci_dev->dev, + if ((u64)off * mdev->notify_offset_multiplier + 2 + > mdev->notify_len) { + dev_warn(&mdev->pci_dev->dev, "bad notification offset %u (x %u) " "for queue %u > %zd", - off, vp_dev->notify_offset_multiplier, - index, vp_dev->notify_len); + off, mdev->notify_offset_multiplier, + index, mdev->notify_len); err = -EINVAL; goto err_map_notify; } - vq->priv = (void __force *)vp_dev->notify_base + - off * vp_dev->notify_offset_multiplier; + vq->priv = (void __force *)mdev->notify_base + + off * mdev->notify_offset_multiplier; } else { - vq->priv = (void __force *)map_capability(vp_dev->pci_dev, - vp_dev->notify_map_cap, 2, 2, - off * vp_dev->notify_offset_multiplier, 2, - NULL); + vq->priv = (void __force *)vp_modern_map_capability(mdev, + mdev->notify_map_cap, 2, 2, + off * mdev->notify_offset_multiplier, 2, + NULL); } if (!vq->priv) { @@ -382,8 +254,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, } if (msix_vec != VIRTIO_MSI_NO_VECTOR) { - vp_iowrite16(msix_vec, &cfg->queue_msix_vector); - msix_vec = vp_ioread16(&cfg->queue_msix_vector); + msix_vec = vp_modern_queue_vector(mdev, index, msix_vec); if (msix_vec == VIRTIO_MSI_NO_VECTOR) { err = -EBUSY; goto err_assign_vector; @@ -393,8 +264,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, return vq; err_assign_vector: - if (!vp_dev->notify_base) - pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv); + if (!mdev->notify_base) + pci_iounmap(mdev->pci_dev, (void __iomem __force *)vq->priv); err_map_notify: vring_del_virtqueue(vq); return ERR_PTR(err); @@ -416,10 +287,8 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, /* Select and activate all queues. Has to be done last: once we do * this, there's no way to go back except reset. */ - list_for_each_entry(vq, &vdev->vqs, list) { - vp_iowrite16(vq->index, &vp_dev->common->queue_select); - vp_iowrite16(1, &vp_dev->common->queue_enable); - } + list_for_each_entry(vq, &vdev->vqs, list) + vp_modern_set_queue_enable(&vp_dev->mdev, vq->index, true); return 0; } @@ -428,18 +297,14 @@ static void del_vq(struct virtio_pci_vq_info *info) { struct virtqueue *vq = info->vq; struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); + struct virtio_pci_modern_device *mdev = &vp_dev->mdev; - vp_iowrite16(vq->index, &vp_dev->common->queue_select); - - if (vp_dev->msix_enabled) { - vp_iowrite16(VIRTIO_MSI_NO_VECTOR, - &vp_dev->common->queue_msix_vector); - /* Flush the write out to device */ - vp_ioread16(&vp_dev->common->queue_msix_vector); - } + if (vp_dev->msix_enabled) + vp_modern_queue_vector(mdev, vq->index, + VIRTIO_MSI_NO_VECTOR); - if (!vp_dev->notify_base) - pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv); + if (!mdev->notify_base) + pci_iounmap(mdev->pci_dev, (void __force __iomem *)vq->priv); vring_del_virtqueue(vq); } @@ -571,261 +436,36 @@ static const struct virtio_config_ops virtio_pci_config_ops = { .get_shm_region = vp_get_shm_region, }; -/** - * virtio_pci_find_capability - walk capabilities to find device info. - * @dev: the pci device - * @cfg_type: the VIRTIO_PCI_CAP_* value we seek - * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO. - * @bars: the bitmask of BARs - * - * Returns offset of the capability, or 0. - */ -static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type, - u32 ioresource_types, int *bars) -{ - int pos; - - for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); - pos > 0; - pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) { - u8 type, bar; - pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, - cfg_type), - &type); - pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, - bar), - &bar); - - /* Ignore structures with reserved BAR values */ - if (bar > 0x5) - continue; - - if (type == cfg_type) { - if (pci_resource_len(dev, bar) && - pci_resource_flags(dev, bar) & ioresource_types) { - *bars |= (1 << bar); - return pos; - } - } - } - return 0; -} - -/* This is part of the ABI. Don't screw with it. */ -static inline void check_offsets(void) -{ - /* Note: disk space was harmed in compilation of this function. */ - BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR != - offsetof(struct virtio_pci_cap, cap_vndr)); - BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT != - offsetof(struct virtio_pci_cap, cap_next)); - BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN != - offsetof(struct virtio_pci_cap, cap_len)); - BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE != - offsetof(struct virtio_pci_cap, cfg_type)); - BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR != - offsetof(struct virtio_pci_cap, bar)); - BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET != - offsetof(struct virtio_pci_cap, offset)); - BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH != - offsetof(struct virtio_pci_cap, length)); - BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT != - offsetof(struct virtio_pci_notify_cap, - notify_off_multiplier)); - BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT != - offsetof(struct virtio_pci_common_cfg, - device_feature_select)); - BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF != - offsetof(struct virtio_pci_common_cfg, device_feature)); - BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT != - offsetof(struct virtio_pci_common_cfg, - guest_feature_select)); - BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF != - offsetof(struct virtio_pci_common_cfg, guest_feature)); - BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX != - offsetof(struct virtio_pci_common_cfg, msix_config)); - BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ != - offsetof(struct virtio_pci_common_cfg, num_queues)); - BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS != - offsetof(struct virtio_pci_common_cfg, device_status)); - BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION != - offsetof(struct virtio_pci_common_cfg, config_generation)); - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT != - offsetof(struct virtio_pci_common_cfg, queue_select)); - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE != - offsetof(struct virtio_pci_common_cfg, queue_size)); - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX != - offsetof(struct virtio_pci_common_cfg, queue_msix_vector)); - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE != - offsetof(struct virtio_pci_common_cfg, queue_enable)); - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF != - offsetof(struct virtio_pci_common_cfg, queue_notify_off)); - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO != - offsetof(struct virtio_pci_common_cfg, queue_desc_lo)); - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI != - offsetof(struct virtio_pci_common_cfg, queue_desc_hi)); - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO != - offsetof(struct virtio_pci_common_cfg, queue_avail_lo)); - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI != - offsetof(struct virtio_pci_common_cfg, queue_avail_hi)); - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO != - offsetof(struct virtio_pci_common_cfg, queue_used_lo)); - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI != - offsetof(struct virtio_pci_common_cfg, queue_used_hi)); -} - /* the PCI probing function */ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev) { + struct virtio_pci_modern_device *mdev = &vp_dev->mdev; struct pci_dev *pci_dev = vp_dev->pci_dev; - int err, common, isr, notify, device; - u32 notify_length; - u32 notify_offset; - - check_offsets(); - - /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */ - if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f) - return -ENODEV; - - if (pci_dev->device < 0x1040) { - /* Transitional devices: use the PCI subsystem device id as - * virtio device id, same as legacy driver always did. - */ - vp_dev->vdev.id.device = pci_dev->subsystem_device; - } else { - /* Modern devices: simply use PCI device id, but start from 0x1040. */ - vp_dev->vdev.id.device = pci_dev->device - 0x1040; - } - vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; - - /* check for a common config: if not, use legacy mode (bar 0). */ - common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG, - IORESOURCE_IO | IORESOURCE_MEM, - &vp_dev->modern_bars); - if (!common) { - dev_info(&pci_dev->dev, - "virtio_pci: leaving for legacy driver\n"); - return -ENODEV; - } - - /* If common is there, these should be too... */ - isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG, - IORESOURCE_IO | IORESOURCE_MEM, - &vp_dev->modern_bars); - notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG, - IORESOURCE_IO | IORESOURCE_MEM, - &vp_dev->modern_bars); - if (!isr || !notify) { - dev_err(&pci_dev->dev, - "virtio_pci: missing capabilities %i/%i/%i\n", - common, isr, notify); - return -EINVAL; - } - - err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)); - if (err) - err = dma_set_mask_and_coherent(&pci_dev->dev, - DMA_BIT_MASK(32)); - if (err) - dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n"); + int err; - /* Device capability is only mandatory for devices that have - * device-specific configuration. - */ - device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG, - IORESOURCE_IO | IORESOURCE_MEM, - &vp_dev->modern_bars); + mdev->pci_dev = pci_dev; - err = pci_request_selected_regions(pci_dev, vp_dev->modern_bars, - "virtio-pci-modern"); + err = vp_modern_probe(mdev); if (err) return err; - err = -EINVAL; - vp_dev->common = map_capability(pci_dev, common, - sizeof(struct virtio_pci_common_cfg), 4, - 0, sizeof(struct virtio_pci_common_cfg), - NULL); - if (!vp_dev->common) - goto err_map_common; - vp_dev->isr = map_capability(pci_dev, isr, sizeof(u8), 1, - 0, 1, - NULL); - if (!vp_dev->isr) - goto err_map_isr; - - /* Read notify_off_multiplier from config space. */ - pci_read_config_dword(pci_dev, - notify + offsetof(struct virtio_pci_notify_cap, - notify_off_multiplier), - &vp_dev->notify_offset_multiplier); - /* Read notify length and offset from config space. */ - pci_read_config_dword(pci_dev, - notify + offsetof(struct virtio_pci_notify_cap, - cap.length), - ¬ify_length); - - pci_read_config_dword(pci_dev, - notify + offsetof(struct virtio_pci_notify_cap, - cap.offset), - ¬ify_offset); - - /* We don't know how many VQs we'll map, ahead of the time. - * If notify length is small, map it all now. - * Otherwise, map each VQ individually later. - */ - if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) { - vp_dev->notify_base = map_capability(pci_dev, notify, 2, 2, - 0, notify_length, - &vp_dev->notify_len); - if (!vp_dev->notify_base) - goto err_map_notify; - } else { - vp_dev->notify_map_cap = notify; - } - - /* Again, we don't know how much we should map, but PAGE_SIZE - * is more than enough for all existing devices. - */ - if (device) { - vp_dev->device = map_capability(pci_dev, device, 0, 4, - 0, PAGE_SIZE, - &vp_dev->device_len); - if (!vp_dev->device) - goto err_map_device; - + if (mdev->device) vp_dev->vdev.config = &virtio_pci_config_ops; - } else { + else vp_dev->vdev.config = &virtio_pci_config_nodev_ops; - } vp_dev->config_vector = vp_config_vector; vp_dev->setup_vq = setup_vq; vp_dev->del_vq = del_vq; + vp_dev->isr = mdev->isr; + vp_dev->vdev.id = mdev->id; return 0; - -err_map_device: - if (vp_dev->notify_base) - pci_iounmap(pci_dev, vp_dev->notify_base); -err_map_notify: - pci_iounmap(pci_dev, vp_dev->isr); -err_map_isr: - pci_iounmap(pci_dev, vp_dev->common); -err_map_common: - return err; } void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev) { - struct pci_dev *pci_dev = vp_dev->pci_dev; + struct virtio_pci_modern_device *mdev = &vp_dev->mdev; - if (vp_dev->device) - pci_iounmap(pci_dev, vp_dev->device); - if (vp_dev->notify_base) - pci_iounmap(pci_dev, vp_dev->notify_base); - pci_iounmap(pci_dev, vp_dev->isr); - pci_iounmap(pci_dev, vp_dev->common); - pci_release_selected_regions(pci_dev, vp_dev->modern_bars); + vp_modern_remove(mdev); } diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c new file mode 100644 index 000000000000..cbd667496bb1 --- /dev/null +++ b/drivers/virtio/virtio_pci_modern_dev.c @@ -0,0 +1,599 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <linux/virtio_pci_modern.h> +#include <linux/module.h> +#include <linux/pci.h> + +/* + * vp_modern_map_capability - map a part of virtio pci capability + * @mdev: the modern virtio-pci device + * @off: offset of the capability + * @minlen: minimal length of the capability + * @align: align requirement + * @start: start from the capability + * @size: map size + * @len: the length that is actually mapped + * + * Returns the io address of for the part of the capability + */ +void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off, + size_t minlen, + u32 align, + u32 start, u32 size, + size_t *len) +{ + struct pci_dev *dev = mdev->pci_dev; + u8 bar; + u32 offset, length; + void __iomem *p; + + pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap, + bar), + &bar); + pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset), + &offset); + pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length), + &length); + + if (length <= start) { + dev_err(&dev->dev, + "virtio_pci: bad capability len %u (>%u expected)\n", + length, start); + return NULL; + } + + if (length - start < minlen) { + dev_err(&dev->dev, + "virtio_pci: bad capability len %u (>=%zu expected)\n", + length, minlen); + return NULL; + } + + length -= start; + + if (start + offset < offset) { + dev_err(&dev->dev, + "virtio_pci: map wrap-around %u+%u\n", + start, offset); + return NULL; + } + + offset += start; + + if (offset & (align - 1)) { + dev_err(&dev->dev, + "virtio_pci: offset %u not aligned to %u\n", + offset, align); + return NULL; + } + + if (length > size) + length = size; + + if (len) + *len = length; + + if (minlen + offset < minlen || + minlen + offset > pci_resource_len(dev, bar)) { + dev_err(&dev->dev, + "virtio_pci: map virtio %zu@%u " + "out of range on bar %i length %lu\n", + minlen, offset, + bar, (unsigned long)pci_resource_len(dev, bar)); + return NULL; + } + + p = pci_iomap_range(dev, bar, offset, length); + if (!p) + dev_err(&dev->dev, + "virtio_pci: unable to map virtio %u@%u on bar %i\n", + length, offset, bar); + return p; +} +EXPORT_SYMBOL_GPL(vp_modern_map_capability); + +/** + * virtio_pci_find_capability - walk capabilities to find device info. + * @dev: the pci device + * @cfg_type: the VIRTIO_PCI_CAP_* value we seek + * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO. + * @bars: the bitmask of BARs + * + * Returns offset of the capability, or 0. + */ +static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type, + u32 ioresource_types, int *bars) +{ + int pos; + + for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); + pos > 0; + pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) { + u8 type, bar; + pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, + cfg_type), + &type); + pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, + bar), + &bar); + + /* Ignore structures with reserved BAR values */ + if (bar > 0x5) + continue; + + if (type == cfg_type) { + if (pci_resource_len(dev, bar) && + pci_resource_flags(dev, bar) & ioresource_types) { + *bars |= (1 << bar); + return pos; + } + } + } + return 0; +} + +/* This is part of the ABI. Don't screw with it. */ +static inline void check_offsets(void) +{ + /* Note: disk space was harmed in compilation of this function. */ + BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR != + offsetof(struct virtio_pci_cap, cap_vndr)); + BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT != + offsetof(struct virtio_pci_cap, cap_next)); + BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN != + offsetof(struct virtio_pci_cap, cap_len)); + BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE != + offsetof(struct virtio_pci_cap, cfg_type)); + BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR != + offsetof(struct virtio_pci_cap, bar)); + BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET != + offsetof(struct virtio_pci_cap, offset)); + BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH != + offsetof(struct virtio_pci_cap, length)); + BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT != + offsetof(struct virtio_pci_notify_cap, + notify_off_multiplier)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT != + offsetof(struct virtio_pci_common_cfg, + device_feature_select)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF != + offsetof(struct virtio_pci_common_cfg, device_feature)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT != + offsetof(struct virtio_pci_common_cfg, + guest_feature_select)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF != + offsetof(struct virtio_pci_common_cfg, guest_feature)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX != + offsetof(struct virtio_pci_common_cfg, msix_config)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ != + offsetof(struct virtio_pci_common_cfg, num_queues)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS != + offsetof(struct virtio_pci_common_cfg, device_status)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION != + offsetof(struct virtio_pci_common_cfg, config_generation)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT != + offsetof(struct virtio_pci_common_cfg, queue_select)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE != + offsetof(struct virtio_pci_common_cfg, queue_size)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX != + offsetof(struct virtio_pci_common_cfg, queue_msix_vector)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE != + offsetof(struct virtio_pci_common_cfg, queue_enable)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF != + offsetof(struct virtio_pci_common_cfg, queue_notify_off)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO != + offsetof(struct virtio_pci_common_cfg, queue_desc_lo)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI != + offsetof(struct virtio_pci_common_cfg, queue_desc_hi)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO != + offsetof(struct virtio_pci_common_cfg, queue_avail_lo)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI != + offsetof(struct virtio_pci_common_cfg, queue_avail_hi)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO != + offsetof(struct virtio_pci_common_cfg, queue_used_lo)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI != + offsetof(struct virtio_pci_common_cfg, queue_used_hi)); +} + +/* + * vp_modern_probe: probe the modern virtio pci device, note that the + * caller is required to enable PCI device before calling this function. + * @mdev: the modern virtio-pci device + * + * Return 0 on succeed otherwise fail + */ +int vp_modern_probe(struct virtio_pci_modern_device *mdev) +{ + struct pci_dev *pci_dev = mdev->pci_dev; + int err, common, isr, notify, device; + u32 notify_length; + u32 notify_offset; + + check_offsets(); + + mdev->pci_dev = pci_dev; + + /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */ + if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f) + return -ENODEV; + + if (pci_dev->device < 0x1040) { + /* Transitional devices: use the PCI subsystem device id as + * virtio device id, same as legacy driver always did. + */ + mdev->id.device = pci_dev->subsystem_device; + } else { + /* Modern devices: simply use PCI device id, but start from 0x1040. */ + mdev->id.device = pci_dev->device - 0x1040; + } + mdev->id.vendor = pci_dev->subsystem_vendor; + + /* check for a common config: if not, use legacy mode (bar 0). */ + common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG, + IORESOURCE_IO | IORESOURCE_MEM, + &mdev->modern_bars); + if (!common) { + dev_info(&pci_dev->dev, + "virtio_pci: leaving for legacy driver\n"); + return -ENODEV; + } + + /* If common is there, these should be too... */ + isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG, + IORESOURCE_IO | IORESOURCE_MEM, + &mdev->modern_bars); + notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG, + IORESOURCE_IO | IORESOURCE_MEM, + &mdev->modern_bars); + if (!isr || !notify) { + dev_err(&pci_dev->dev, + "virtio_pci: missing capabilities %i/%i/%i\n", + common, isr, notify); + return -EINVAL; + } + + err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)); + if (err) + err = dma_set_mask_and_coherent(&pci_dev->dev, + DMA_BIT_MASK(32)); + if (err) + dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n"); + + /* Device capability is only mandatory for devices that have + * device-specific configuration. + */ + device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG, + IORESOURCE_IO | IORESOURCE_MEM, + &mdev->modern_bars); + + err = pci_request_selected_regions(pci_dev, mdev->modern_bars, + "virtio-pci-modern"); + if (err) + return err; + + err = -EINVAL; + mdev->common = vp_modern_map_capability(mdev, common, + sizeof(struct virtio_pci_common_cfg), 4, + 0, sizeof(struct virtio_pci_common_cfg), + NULL); + if (!mdev->common) + goto err_map_common; + mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1, + 0, 1, + NULL); + if (!mdev->isr) + goto err_map_isr; + + /* Read notify_off_multiplier from config space. */ + pci_read_config_dword(pci_dev, + notify + offsetof(struct virtio_pci_notify_cap, + notify_off_multiplier), + &mdev->notify_offset_multiplier); + /* Read notify length and offset from config space. */ + pci_read_config_dword(pci_dev, + notify + offsetof(struct virtio_pci_notify_cap, + cap.length), + ¬ify_length); + + pci_read_config_dword(pci_dev, + notify + offsetof(struct virtio_pci_notify_cap, + cap.offset), + ¬ify_offset); + + /* We don't know how many VQs we'll map, ahead of the time. + * If notify length is small, map it all now. + * Otherwise, map each VQ individually later. + */ + if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) { + mdev->notify_base = vp_modern_map_capability(mdev, notify, + 2, 2, + 0, notify_length, + &mdev->notify_len); + if (!mdev->notify_base) + goto err_map_notify; + } else { + mdev->notify_map_cap = notify; + } + + /* Again, we don't know how much we should map, but PAGE_SIZE + * is more than enough for all existing devices. + */ + if (device) { + mdev->device = vp_modern_map_capability(mdev, device, 0, 4, + 0, PAGE_SIZE, + &mdev->device_len); + if (!mdev->device) + goto err_map_device; + } + + return 0; + +err_map_device: + if (mdev->notify_base) + pci_iounmap(pci_dev, mdev->notify_base); +err_map_notify: + pci_iounmap(pci_dev, mdev->isr); +err_map_isr: + pci_iounmap(pci_dev, mdev->common); +err_map_common: + return err; +} +EXPORT_SYMBOL_GPL(vp_modern_probe); + +/* + * vp_modern_probe: remove and cleanup the modern virtio pci device + * @mdev: the modern virtio-pci device + */ +void vp_modern_remove(struct virtio_pci_modern_device *mdev) +{ + struct pci_dev *pci_dev = mdev->pci_dev; + + if (mdev->device) + pci_iounmap(pci_dev, mdev->device); + if (mdev->notify_base) + pci_iounmap(pci_dev, mdev->notify_base); + pci_iounmap(pci_dev, mdev->isr); + pci_iounmap(pci_dev, mdev->common); + pci_release_selected_regions(pci_dev, mdev->modern_bars); +} +EXPORT_SYMBOL_GPL(vp_modern_remove); + +/* + * vp_modern_get_features - get features from device + * @mdev: the modern virtio-pci device + * + * Returns the features read from the device + */ +u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev) +{ + struct virtio_pci_common_cfg __iomem *cfg = mdev->common; + + u64 features; + + vp_iowrite32(0, &cfg->device_feature_select); + features = vp_ioread32(&cfg->device_feature); + vp_iowrite32(1, &cfg->device_feature_select); + features |= ((u64)vp_ioread32(&cfg->device_feature) << 32); + + return features; +} +EXPORT_SYMBOL_GPL(vp_modern_get_features); + +/* + * vp_modern_set_features - set features to device + * @mdev: the modern virtio-pci device + * @features: the features set to device + */ +void vp_modern_set_features(struct virtio_pci_modern_device *mdev, + u64 features) +{ + struct virtio_pci_common_cfg __iomem *cfg = mdev->common; + + vp_iowrite32(0, &cfg->guest_feature_select); + vp_iowrite32((u32)features, &cfg->guest_feature); + vp_iowrite32(1, &cfg->guest_feature_select); + vp_iowrite32(features >> 32, &cfg->guest_feature); +} +EXPORT_SYMBOL_GPL(vp_modern_set_features); + +/* + * vp_modern_generation - get the device genreation + * @mdev: the modern virtio-pci device + * + * Returns the genreation read from device + */ +u32 vp_modern_generation(struct virtio_pci_modern_device *mdev) +{ + struct virtio_pci_common_cfg __iomem *cfg = mdev->common; + + return vp_ioread8(&cfg->config_generation); +} +EXPORT_SYMBOL_GPL(vp_modern_generation); + +/* + * vp_modern_get_status - get the device status + * @mdev: the modern virtio-pci device + * + * Returns the status read from device + */ +u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev) +{ + struct virtio_pci_common_cfg __iomem *cfg = mdev->common; + + return vp_ioread8(&cfg->device_status); +} +EXPORT_SYMBOL_GPL(vp_modern_get_status); + +/* + * vp_modern_set_status - set status to device + * @mdev: the modern virtio-pci device + * @status: the status set to device + */ +void vp_modern_set_status(struct virtio_pci_modern_device *mdev, + u8 status) +{ + struct virtio_pci_common_cfg __iomem *cfg = mdev->common; + + vp_iowrite8(status, &cfg->device_status); +} +EXPORT_SYMBOL_GPL(vp_modern_set_status); + +/* + * vp_modern_queue_vector - set the MSIX vector for a specific virtqueue + * @mdev: the modern virtio-pci device + * @index: queue index + * @vector: the config vector + * + * Returns the config vector read from the device + */ +u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev, + u16 index, u16 vector) +{ + struct virtio_pci_common_cfg __iomem *cfg = mdev->common; + + vp_iowrite16(index, &cfg->queue_select); + vp_iowrite16(vector, &cfg->queue_msix_vector); + /* Flush the write out to device */ + return vp_ioread16(&cfg->queue_msix_vector); +} +EXPORT_SYMBOL_GPL(vp_modern_queue_vector); + +/* + * vp_modern_config_vector - set the vector for config interrupt + * @mdev: the modern virtio-pci device + * @vector: the config vector + * + * Returns the config vector read from the device + */ +u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev, + u16 vector) +{ + struct virtio_pci_common_cfg __iomem *cfg = mdev->common; + + /* Setup the vector used for configuration events */ + vp_iowrite16(vector, &cfg->msix_config); + /* Verify we had enough resources to assign the vector */ + /* Will also flush the write out to device */ + return vp_ioread16(&cfg->msix_config); +} +EXPORT_SYMBOL_GPL(vp_modern_config_vector); + +/* + * vp_modern_queue_address - set the virtqueue address + * @mdev: the modern virtio-pci device + * @index: the queue index + * @desc_addr: address of the descriptor area + * @driver_addr: address of the driver area + * @device_addr: address of the device area + */ +void vp_modern_queue_address(struct virtio_pci_modern_device *mdev, + u16 index, u64 desc_addr, u64 driver_addr, + u64 device_addr) +{ + struct virtio_pci_common_cfg __iomem *cfg = mdev->common; + + vp_iowrite16(index, &cfg->queue_select); + + vp_iowrite64_twopart(desc_addr, &cfg->queue_desc_lo, + &cfg->queue_desc_hi); + vp_iowrite64_twopart(driver_addr, &cfg->queue_avail_lo, + &cfg->queue_avail_hi); + vp_iowrite64_twopart(device_addr, &cfg->queue_used_lo, + &cfg->queue_used_hi); +} +EXPORT_SYMBOL_GPL(vp_modern_queue_address); + +/* + * vp_modern_set_queue_enable - enable a virtqueue + * @mdev: the modern virtio-pci device + * @index: the queue index + * @enable: whether the virtqueue is enable or not + */ +void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev, + u16 index, bool enable) +{ + vp_iowrite16(index, &mdev->common->queue_select); + vp_iowrite16(enable, &mdev->common->queue_enable); +} +EXPORT_SYMBOL_GPL(vp_modern_set_queue_enable); + +/* + * vp_modern_get_queue_enable - enable a virtqueue + * @mdev: the modern virtio-pci device + * @index: the queue index + * + * Returns whether a virtqueue is enabled or not + */ +bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev, + u16 index) +{ + vp_iowrite16(index, &mdev->common->queue_select); + + return vp_ioread16(&mdev->common->queue_enable); +} +EXPORT_SYMBOL_GPL(vp_modern_get_queue_enable); + +/* + * vp_modern_set_queue_size - set size for a virtqueue + * @mdev: the modern virtio-pci device + * @index: the queue index + * @size: the size of the virtqueue + */ +void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev, + u16 index, u16 size) +{ + vp_iowrite16(index, &mdev->common->queue_select); + vp_iowrite16(size, &mdev->common->queue_size); + +} +EXPORT_SYMBOL_GPL(vp_modern_set_queue_size); + +/* + * vp_modern_get_queue_size - get size for a virtqueue + * @mdev: the modern virtio-pci device + * @index: the queue index + * + * Returns the size of the virtqueue + */ +u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev, + u16 index) +{ + vp_iowrite16(index, &mdev->common->queue_select); + + return vp_ioread16(&mdev->common->queue_size); + +} +EXPORT_SYMBOL_GPL(vp_modern_get_queue_size); + +/* + * vp_modern_get_num_queues - get the number of virtqueues + * @mdev: the modern virtio-pci device + * + * Returns the number of virtqueues + */ +u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev) +{ + return vp_ioread16(&mdev->common->num_queues); +} +EXPORT_SYMBOL_GPL(vp_modern_get_num_queues); + +/* + * vp_modern_get_queue_notify_off - get notification offset for a virtqueue + * @mdev: the modern virtio-pci device + * @index: the queue index + * + * Returns the notification offset for a virtqueue + */ +u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev, + u16 index) +{ + vp_iowrite16(index, &mdev->common->queue_select); + + return vp_ioread16(&mdev->common->queue_notify_off); +} +EXPORT_SYMBOL_GPL(vp_modern_get_queue_notify_off); + +MODULE_VERSION("0.1"); +MODULE_DESCRIPTION("Modern Virtio PCI Device"); +MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c index 4a9ddb44b2a7..e28acf482e0c 100644 --- a/drivers/virtio/virtio_vdpa.c +++ b/drivers/virtio/virtio_vdpa.c @@ -225,9 +225,8 @@ static void virtio_vdpa_del_vq(struct virtqueue *vq) list_del(&info->node); spin_unlock_irqrestore(&vd_dev->lock, flags); - /* Select and deactivate the queue */ + /* Select and deactivate the queue (best effort) */ ops->set_vq_ready(vdpa, index, 0); - WARN_ON(ops->get_vq_ready(vdpa, index)); vring_del_virtqueue(vq); diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 63940a7a70be..16ad5050e046 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -89,7 +89,7 @@ nfs_file_release(struct inode *inode, struct file *filp) EXPORT_SYMBOL_GPL(nfs_file_release); /** - * nfs_revalidate_size - Revalidate the file size + * nfs_revalidate_file_size - Revalidate the file size * @inode: pointer to inode struct * @filp: pointer to struct file * @@ -606,8 +606,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); - unsigned long written = 0; - ssize_t result; + unsigned int mntflags = NFS_SERVER(inode)->flags; + ssize_t result, written; errseq_t since; int error; @@ -626,13 +626,13 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) /* * O_APPEND implies that we must revalidate the file length. */ - if (iocb->ki_flags & IOCB_APPEND) { + if (iocb->ki_flags & IOCB_APPEND || iocb->ki_pos > i_size_read(inode)) { result = nfs_revalidate_file_size(inode, file); if (result) goto out; } - if (iocb->ki_pos > i_size_read(inode)) - nfs_revalidate_mapping(inode, file->f_mapping); + + nfs_clear_invalid_mapping(file->f_mapping); since = filemap_sample_wb_err(file->f_mapping); nfs_start_io_write(inode); @@ -648,6 +648,21 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) written = result; iocb->ki_pos += written; + + if (mntflags & NFS_MOUNT_WRITE_EAGER) { + result = filemap_fdatawrite_range(file->f_mapping, + iocb->ki_pos - written, + iocb->ki_pos - 1); + if (result < 0) + goto out; + } + if (mntflags & NFS_MOUNT_WRITE_WAIT) { + result = filemap_fdatawait_range(file->f_mapping, + iocb->ki_pos - written, + iocb->ki_pos - 1); + if (result < 0) + goto out; + } result = generic_write_sync(iocb, written); if (result < 0) goto out; diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c index 06894bcdea2d..971a9251c1d9 100644 --- a/fs/nfs/fs_context.c +++ b/fs/nfs/fs_context.c @@ -82,6 +82,7 @@ enum nfs_param { Opt_v, Opt_vers, Opt_wsize, + Opt_write, }; enum { @@ -113,6 +114,19 @@ static const struct constant_table nfs_param_enums_lookupcache[] = { {} }; +enum { + Opt_write_lazy, + Opt_write_eager, + Opt_write_wait, +}; + +static const struct constant_table nfs_param_enums_write[] = { + { "lazy", Opt_write_lazy }, + { "eager", Opt_write_eager }, + { "wait", Opt_write_wait }, + {} +}; + static const struct fs_parameter_spec nfs_fs_parameters[] = { fsparam_flag_no("ac", Opt_ac), fsparam_u32 ("acdirmax", Opt_acdirmax), @@ -171,6 +185,7 @@ static const struct fs_parameter_spec nfs_fs_parameters[] = { fsparam_flag ("v4.1", Opt_v), fsparam_flag ("v4.2", Opt_v), fsparam_string("vers", Opt_vers), + fsparam_enum ("write", Opt_write, nfs_param_enums_write), fsparam_u32 ("wsize", Opt_wsize), {} }; @@ -770,6 +785,24 @@ static int nfs_fs_context_parse_param(struct fs_context *fc, goto out_invalid_value; } break; + case Opt_write: + switch (result.uint_32) { + case Opt_write_lazy: + ctx->flags &= + ~(NFS_MOUNT_WRITE_EAGER | NFS_MOUNT_WRITE_WAIT); + break; + case Opt_write_eager: + ctx->flags |= NFS_MOUNT_WRITE_EAGER; + ctx->flags &= ~NFS_MOUNT_WRITE_WAIT; + break; + case Opt_write_wait: + ctx->flags |= + NFS_MOUNT_WRITE_EAGER | NFS_MOUNT_WRITE_WAIT; + break; + default: + goto out_invalid_value; + } + break; /* * Special options @@ -1479,6 +1512,8 @@ static int nfs_init_fs_context(struct fs_context *fc) ctx->selected_flavor = RPC_AUTH_MAXFLAVOR; ctx->minorversion = 0; ctx->need_mount = true; + + fc->s_iflags |= SB_I_STABLE_WRITES; } fc->fs_private = ctx; fc->ops = &nfs_fs_context_ops; diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c index a60df88efc40..c4c021c6ebbd 100644 --- a/fs/nfs/fscache.c +++ b/fs/nfs/fscache.c @@ -390,10 +390,6 @@ static void nfs_readpage_from_fscache_complete(struct page *page, if (!error) { SetPageUptodate(page); unlock_page(page); - } else { - error = nfs_readpage_async(context, page->mapping->host, page); - if (error) - unlock_page(page); } } diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 447e95974386..749bbea14d99 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -195,6 +195,18 @@ bool nfs_check_cache_invalid(struct inode *inode, unsigned long flags) } EXPORT_SYMBOL_GPL(nfs_check_cache_invalid); +#ifdef CONFIG_NFS_V4_2 +static bool nfs_has_xattr_cache(const struct nfs_inode *nfsi) +{ + return nfsi->xattr_cache != NULL; +} +#else +static bool nfs_has_xattr_cache(const struct nfs_inode *nfsi) +{ + return false; +} +#endif + static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags) { struct nfs_inode *nfsi = NFS_I(inode); @@ -209,6 +221,8 @@ static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags) | NFS_INO_INVALID_XATTR); } + if (!nfs_has_xattr_cache(nfsi)) + flags &= ~NFS_INO_INVALID_XATTR; if (inode->i_mapping->nrpages == 0) flags &= ~(NFS_INO_INVALID_DATA|NFS_INO_DATA_INVAL_DEFER); nfsi->cache_validity |= flags; @@ -1258,55 +1272,19 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map return 0; } -bool nfs_mapping_need_revalidate_inode(struct inode *inode) -{ - return nfs_check_cache_invalid(inode, NFS_INO_REVAL_PAGECACHE) || - NFS_STALE(inode); -} - -int nfs_revalidate_mapping_rcu(struct inode *inode) -{ - struct nfs_inode *nfsi = NFS_I(inode); - unsigned long *bitlock = &nfsi->flags; - int ret = 0; - - if (IS_SWAPFILE(inode)) - goto out; - if (nfs_mapping_need_revalidate_inode(inode)) { - ret = -ECHILD; - goto out; - } - spin_lock(&inode->i_lock); - if (test_bit(NFS_INO_INVALIDATING, bitlock) || - (nfsi->cache_validity & NFS_INO_INVALID_DATA)) - ret = -ECHILD; - spin_unlock(&inode->i_lock); -out: - return ret; -} - /** - * nfs_revalidate_mapping - Revalidate the pagecache - * @inode: pointer to host inode + * nfs_clear_invalid_mapping - Conditionally clear a mapping * @mapping: pointer to mapping + * + * If the NFS_INO_INVALID_DATA inode flag is set, clear the mapping. */ -int nfs_revalidate_mapping(struct inode *inode, - struct address_space *mapping) +int nfs_clear_invalid_mapping(struct address_space *mapping) { + struct inode *inode = mapping->host; struct nfs_inode *nfsi = NFS_I(inode); unsigned long *bitlock = &nfsi->flags; int ret = 0; - /* swapfiles are not supposed to be shared. */ - if (IS_SWAPFILE(inode)) - goto out; - - if (nfs_mapping_need_revalidate_inode(inode)) { - ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode); - if (ret < 0) - goto out; - } - /* * We must clear NFS_INO_INVALID_DATA first to ensure that * invalidations that come in while we're shooting down the mappings @@ -1337,8 +1315,8 @@ int nfs_revalidate_mapping(struct inode *inode, set_bit(NFS_INO_INVALIDATING, bitlock); smp_wmb(); - nfsi->cache_validity &= ~(NFS_INO_INVALID_DATA| - NFS_INO_DATA_INVAL_DEFER); + nfsi->cache_validity &= + ~(NFS_INO_INVALID_DATA | NFS_INO_DATA_INVAL_DEFER); spin_unlock(&inode->i_lock); trace_nfs_invalidate_mapping_enter(inode); ret = nfs_invalidate_mapping(inode, mapping); @@ -1351,6 +1329,53 @@ out: return ret; } +bool nfs_mapping_need_revalidate_inode(struct inode *inode) +{ + return nfs_check_cache_invalid(inode, NFS_INO_REVAL_PAGECACHE) || + NFS_STALE(inode); +} + +int nfs_revalidate_mapping_rcu(struct inode *inode) +{ + struct nfs_inode *nfsi = NFS_I(inode); + unsigned long *bitlock = &nfsi->flags; + int ret = 0; + + if (IS_SWAPFILE(inode)) + goto out; + if (nfs_mapping_need_revalidate_inode(inode)) { + ret = -ECHILD; + goto out; + } + spin_lock(&inode->i_lock); + if (test_bit(NFS_INO_INVALIDATING, bitlock) || + (nfsi->cache_validity & NFS_INO_INVALID_DATA)) + ret = -ECHILD; + spin_unlock(&inode->i_lock); +out: + return ret; +} + +/** + * nfs_revalidate_mapping - Revalidate the pagecache + * @inode: pointer to host inode + * @mapping: pointer to mapping + */ +int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping) +{ + /* swapfiles are not supposed to be shared. */ + if (IS_SWAPFILE(inode)) + return 0; + + if (nfs_mapping_need_revalidate_inode(inode)) { + int ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode); + if (ret < 0) + return ret; + } + + return nfs_clear_invalid_mapping(mapping); +} + static bool nfs_file_has_writers(struct nfs_inode *nfsi) { struct inode *inode = &nfsi->vfs_inode; diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c index 5604e807fc01..bb386a691e69 100644 --- a/fs/nfs/nfs3acl.c +++ b/fs/nfs/nfs3acl.c @@ -111,6 +111,7 @@ struct posix_acl *nfs3_get_acl(struct inode *inode, int type) fallthrough; case -ENOTSUPP: status = -EOPNOTSUPP; + goto getout; default: goto getout; } diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 86acffe7335c..889a9f4c0310 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -609,6 +609,7 @@ found: * changed. Schedule recovery! */ nfs4_schedule_path_down_recovery(pos); + goto out; default: goto out; } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index a07530cf673d..74bc5120013d 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -71,10 +71,6 @@ #include "nfs4trace.h" -#ifdef CONFIG_NFS_V4_2 -#include "nfs42.h" -#endif /* CONFIG_NFS_V4_2 */ - #define NFSDBG_FACILITY NFSDBG_PROC #define NFS4_BITMASK_SZ 3 @@ -2231,6 +2227,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct default: printk(KERN_ERR "NFS: %s: unhandled error " "%d.\n", __func__, err); + fallthrough; case 0: case -ENOENT: case -EAGAIN: @@ -5438,15 +5435,16 @@ static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode, if (cache_validity & NFS_INO_INVALID_ATIME) bitmask[1] |= FATTR4_WORD1_TIME_ACCESS; - if (cache_validity & NFS_INO_INVALID_ACCESS) - bitmask[0] |= FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | - FATTR4_WORD1_OWNER_GROUP; - if (cache_validity & NFS_INO_INVALID_ACL) - bitmask[0] |= FATTR4_WORD0_ACL; - if (cache_validity & NFS_INO_INVALID_LABEL) + if (cache_validity & NFS_INO_INVALID_OTHER) + bitmask[1] |= FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | + FATTR4_WORD1_OWNER_GROUP | + FATTR4_WORD1_NUMLINKS; + if (label && label->len && cache_validity & NFS_INO_INVALID_LABEL) bitmask[2] |= FATTR4_WORD2_SECURITY_LABEL; - if (cache_validity & NFS_INO_INVALID_CTIME) + if (cache_validity & NFS_INO_INVALID_CHANGE) bitmask[0] |= FATTR4_WORD0_CHANGE; + if (cache_validity & NFS_INO_INVALID_CTIME) + bitmask[1] |= FATTR4_WORD1_TIME_METADATA; if (cache_validity & NFS_INO_INVALID_MTIME) bitmask[1] |= FATTR4_WORD1_TIME_MODIFY; if (cache_validity & NFS_INO_INVALID_SIZE) @@ -9708,6 +9706,7 @@ nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) case -NFS4ERR_BADLAYOUT: /* no layout */ case -NFS4ERR_GRACE: /* loca_recalim always false */ task->tk_status = 0; + break; case 0: break; default: diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 4bf10792cb5b..3a51351bdc6a 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1125,6 +1125,7 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid) " sequence-id error on an" " unconfirmed sequence %p!\n", seqid->sequence); + return; case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_BAD_STATEID: diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index af64b4e6fd1f..102b66e0bdef 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -2875,6 +2875,7 @@ pnfs_do_write(struct nfs_pageio_descriptor *desc, switch (trypnfs) { case PNFS_NOT_ATTEMPTED: pnfs_write_through_mds(desc, hdr); + break; case PNFS_ATTEMPTED: break; case PNFS_TRY_AGAIN: @@ -3019,6 +3020,7 @@ pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr) switch (trypnfs) { case PNFS_NOT_ATTEMPTED: pnfs_read_through_mds(desc, hdr); + break; case PNFS_ATTEMPTED: break; case PNFS_TRY_AGAIN: diff --git a/fs/nfs/read.c b/fs/nfs/read.c index eb854f1f86e2..d2b6dce1f99f 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -74,6 +74,24 @@ void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, } EXPORT_SYMBOL_GPL(nfs_pageio_init_read); +static void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio, + struct inode *inode) +{ + struct nfs_pgio_mirror *pgm; + unsigned long npages; + + nfs_pageio_complete(pgio); + + /* It doesn't make sense to do mirrored reads! */ + WARN_ON_ONCE(pgio->pg_mirror_count != 1); + + pgm = &pgio->pg_mirrors[0]; + NFS_I(inode)->read_io += pgm->pg_bytes_written; + npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >> PAGE_SHIFT; + nfs_add_stats(inode, NFSIOS_READPAGES, npages); +} + + void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio) { struct nfs_pgio_mirror *mirror; @@ -114,41 +132,10 @@ static void nfs_readpage_release(struct nfs_page *req, int error) nfs_release_request(req); } -int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, - struct page *page) -{ - struct nfs_page *new; - unsigned int len; +struct nfs_readdesc { struct nfs_pageio_descriptor pgio; - struct nfs_pgio_mirror *pgm; - - len = nfs_page_length(page); - if (len == 0) - return nfs_return_empty_page(page); - new = nfs_create_request(ctx, page, 0, len); - if (IS_ERR(new)) { - unlock_page(page); - return PTR_ERR(new); - } - if (len < PAGE_SIZE) - zero_user_segment(page, len, PAGE_SIZE); - - nfs_pageio_init_read(&pgio, inode, false, - &nfs_async_read_completion_ops); - if (!nfs_pageio_add_request(&pgio, new)) { - nfs_list_remove_request(new); - nfs_readpage_release(new, pgio.pg_error); - } - nfs_pageio_complete(&pgio); - - /* It doesn't make sense to do mirrored reads! */ - WARN_ON_ONCE(pgio.pg_mirror_count != 1); - - pgm = &pgio.pg_mirrors[0]; - NFS_I(inode)->read_io += pgm->pg_bytes_written; - - return pgio.pg_error < 0 ? pgio.pg_error : 0; -} + struct nfs_open_context *ctx; +}; static void nfs_page_group_set_uptodate(struct nfs_page *req) { @@ -171,8 +158,7 @@ static void nfs_read_completion(struct nfs_pgio_header *hdr) if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) { /* note: regions of the page not covered by a - * request are zeroed in nfs_readpage_async / - * readpage_async_filler */ + * request are zeroed in readpage_async_filler */ if (bytes > hdr->good_bytes) { /* nothing in this request was good, so zero * the full extent of the request */ @@ -304,6 +290,38 @@ static void nfs_readpage_result(struct rpc_task *task, nfs_readpage_retry(task, hdr); } +static int +readpage_async_filler(void *data, struct page *page) +{ + struct nfs_readdesc *desc = data; + struct nfs_page *new; + unsigned int len; + int error; + + len = nfs_page_length(page); + if (len == 0) + return nfs_return_empty_page(page); + + new = nfs_create_request(desc->ctx, page, 0, len); + if (IS_ERR(new)) + goto out_error; + + if (len < PAGE_SIZE) + zero_user_segment(page, len, PAGE_SIZE); + if (!nfs_pageio_add_request(&desc->pgio, new)) { + nfs_list_remove_request(new); + error = desc->pgio.pg_error; + nfs_readpage_release(new, error); + goto out; + } + return 0; +out_error: + error = PTR_ERR(new); + unlock_page(page); +out: + return error; +} + /* * Read a page over NFS. * We read the page synchronously in the following case: @@ -312,14 +330,13 @@ static void nfs_readpage_result(struct rpc_task *task, */ int nfs_readpage(struct file *file, struct page *page) { - struct nfs_open_context *ctx; + struct nfs_readdesc desc; struct inode *inode = page_file_mapping(page)->host; - int error; + int ret; dprintk("NFS: nfs_readpage (%p %ld@%lu)\n", page, PAGE_SIZE, page_index(page)); nfs_inc_stats(inode, NFSIOS_VFSREADPAGE); - nfs_add_stats(inode, NFSIOS_READPAGES, 1); /* * Try to flush any pending writes to the file.. @@ -328,93 +345,59 @@ int nfs_readpage(struct file *file, struct page *page) * be any new pending writes generated at this point * for this page (other pages can be written to). */ - error = nfs_wb_page(inode, page); - if (error) + ret = nfs_wb_page(inode, page); + if (ret) goto out_unlock; if (PageUptodate(page)) goto out_unlock; - error = -ESTALE; + ret = -ESTALE; if (NFS_STALE(inode)) goto out_unlock; if (file == NULL) { - error = -EBADF; - ctx = nfs_find_open_context(inode, NULL, FMODE_READ); - if (ctx == NULL) + ret = -EBADF; + desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ); + if (desc.ctx == NULL) goto out_unlock; } else - ctx = get_nfs_open_context(nfs_file_open_context(file)); + desc.ctx = get_nfs_open_context(nfs_file_open_context(file)); if (!IS_SYNC(inode)) { - error = nfs_readpage_from_fscache(ctx, inode, page); - if (error == 0) + ret = nfs_readpage_from_fscache(desc.ctx, inode, page); + if (ret == 0) goto out; } - xchg(&ctx->error, 0); - error = nfs_readpage_async(ctx, inode, page); - if (!error) { - error = wait_on_page_locked_killable(page); - if (!PageUptodate(page) && !error) - error = xchg(&ctx->error, 0); - } -out: - put_nfs_open_context(ctx); - return error; -out_unlock: - unlock_page(page); - return error; -} - -struct nfs_readdesc { - struct nfs_pageio_descriptor *pgio; - struct nfs_open_context *ctx; -}; - -static int -readpage_async_filler(void *data, struct page *page) -{ - struct nfs_readdesc *desc = (struct nfs_readdesc *)data; - struct nfs_page *new; - unsigned int len; - int error; + xchg(&desc.ctx->error, 0); + nfs_pageio_init_read(&desc.pgio, inode, false, + &nfs_async_read_completion_ops); - len = nfs_page_length(page); - if (len == 0) - return nfs_return_empty_page(page); + ret = readpage_async_filler(&desc, page); - new = nfs_create_request(desc->ctx, page, 0, len); - if (IS_ERR(new)) - goto out_error; + if (!ret) + nfs_pageio_complete_read(&desc.pgio, inode); - if (len < PAGE_SIZE) - zero_user_segment(page, len, PAGE_SIZE); - if (!nfs_pageio_add_request(desc->pgio, new)) { - nfs_list_remove_request(new); - error = desc->pgio->pg_error; - nfs_readpage_release(new, error); - goto out; + ret = desc.pgio.pg_error < 0 ? desc.pgio.pg_error : 0; + if (!ret) { + ret = wait_on_page_locked_killable(page); + if (!PageUptodate(page) && !ret) + ret = xchg(&desc.ctx->error, 0); } - return 0; -out_error: - error = PTR_ERR(new); - unlock_page(page); out: - return error; + put_nfs_open_context(desc.ctx); + return ret; +out_unlock: + unlock_page(page); + return ret; } -int nfs_readpages(struct file *filp, struct address_space *mapping, +int nfs_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { - struct nfs_pageio_descriptor pgio; - struct nfs_pgio_mirror *pgm; - struct nfs_readdesc desc = { - .pgio = &pgio, - }; + struct nfs_readdesc desc; struct inode *inode = mapping->host; - unsigned long npages; - int ret = -ESTALE; + int ret; dprintk("NFS: nfs_readpages (%s/%Lu %d)\n", inode->i_sb->s_id, @@ -422,15 +405,17 @@ int nfs_readpages(struct file *filp, struct address_space *mapping, nr_pages); nfs_inc_stats(inode, NFSIOS_VFSREADPAGES); + ret = -ESTALE; if (NFS_STALE(inode)) goto out; - if (filp == NULL) { + if (file == NULL) { + ret = -EBADF; desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ); if (desc.ctx == NULL) - return -EBADF; + goto out; } else - desc.ctx = get_nfs_open_context(nfs_file_open_context(filp)); + desc.ctx = get_nfs_open_context(nfs_file_open_context(file)); /* attempt to read as many of the pages as possible from the cache * - this returns -ENOBUFS immediately if the cookie is negative @@ -440,20 +425,13 @@ int nfs_readpages(struct file *filp, struct address_space *mapping, if (ret == 0) goto read_complete; /* all pages were read */ - nfs_pageio_init_read(&pgio, inode, false, + nfs_pageio_init_read(&desc.pgio, inode, false, &nfs_async_read_completion_ops); ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc); - nfs_pageio_complete(&pgio); - /* It doesn't make sense to do mirrored reads! */ - WARN_ON_ONCE(pgio.pg_mirror_count != 1); + nfs_pageio_complete_read(&desc.pgio, inode); - pgm = &pgio.pg_mirrors[0]; - NFS_I(inode)->read_io += pgm->pg_bytes_written; - npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >> - PAGE_SHIFT; - nfs_add_stats(inode, NFSIOS_READPAGES, npages); read_complete: put_nfs_open_context(desc.ctx); out: diff --git a/fs/nfs/super.c b/fs/nfs/super.c index c7a924580eec..94885c6f8f54 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -523,6 +523,13 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss, seq_puts(m, ",local_lock=flock"); else seq_puts(m, ",local_lock=posix"); + + if (nfss->flags & NFS_MOUNT_WRITE_EAGER) { + if (nfss->flags & NFS_MOUNT_WRITE_WAIT) + seq_puts(m, ",write=wait"); + else + seq_puts(m, ",write=eager"); + } } /* diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 639c34fec04a..82bdcb982186 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -712,16 +712,23 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; struct nfs_pageio_descriptor pgio; - struct nfs_io_completion *ioc; + struct nfs_io_completion *ioc = NULL; + unsigned int mntflags = NFS_SERVER(inode)->flags; + int priority = 0; int err; nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); - ioc = nfs_io_completion_alloc(GFP_KERNEL); - if (ioc) - nfs_io_completion_init(ioc, nfs_io_completion_commit, inode); + if (!(mntflags & NFS_MOUNT_WRITE_EAGER) || wbc->for_kupdate || + wbc->for_background || wbc->for_sync || wbc->for_reclaim) { + ioc = nfs_io_completion_alloc(GFP_KERNEL); + if (ioc) + nfs_io_completion_init(ioc, nfs_io_completion_commit, + inode); + priority = wb_priority(wbc); + } - nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false, + nfs_pageio_init_write(&pgio, inode, priority, false, &nfs_async_write_completion_ops); pgio.pg_io_completion = ioc; err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio); @@ -1278,19 +1285,21 @@ bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode) * the PageUptodate() flag. In this case, we will need to turn off * write optimisations that depend on the page contents being correct. */ -static bool nfs_write_pageuptodate(struct page *page, struct inode *inode) +static bool nfs_write_pageuptodate(struct page *page, struct inode *inode, + unsigned int pagelen) { struct nfs_inode *nfsi = NFS_I(inode); if (nfs_have_delegated_attributes(inode)) goto out; - if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) + if (nfsi->cache_validity & + (NFS_INO_REVAL_PAGECACHE | NFS_INO_INVALID_SIZE)) return false; smp_rmb(); - if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags)) + if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags) && pagelen != 0) return false; out: - if (nfsi->cache_validity & NFS_INO_INVALID_DATA) + if (nfsi->cache_validity & NFS_INO_INVALID_DATA && pagelen != 0) return false; return PageUptodate(page) != 0; } @@ -1310,7 +1319,8 @@ is_whole_file_wrlock(struct file_lock *fl) * If the file is opened for synchronous writes then we can just skip the rest * of the checks. */ -static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode) +static int nfs_can_extend_write(struct file *file, struct page *page, + struct inode *inode, unsigned int pagelen) { int ret; struct file_lock_context *flctx = inode->i_flctx; @@ -1318,7 +1328,7 @@ static int nfs_can_extend_write(struct file *file, struct page *page, struct ino if (file->f_flags & O_DSYNC) return 0; - if (!nfs_write_pageuptodate(page, inode)) + if (!nfs_write_pageuptodate(page, inode, pagelen)) return 0; if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) return 1; @@ -1356,6 +1366,7 @@ int nfs_updatepage(struct file *file, struct page *page, struct nfs_open_context *ctx = nfs_file_open_context(file); struct address_space *mapping = page_file_mapping(page); struct inode *inode = mapping->host; + unsigned int pagelen = nfs_page_length(page); int status = 0; nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); @@ -1366,8 +1377,8 @@ int nfs_updatepage(struct file *file, struct page *page, if (!count) goto out; - if (nfs_can_extend_write(file, page, inode)) { - count = max(count + offset, nfs_page_length(page)); + if (nfs_can_extend_write(file, page, inode, pagelen)) { + count = max(count + offset, pagelen); offset = 0; } diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index fbc0ca8d903b..6786f8c0182f 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -98,7 +98,7 @@ */ #if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG) #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* -#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$Lubsan_* +#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L* #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L* #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..compoundliteral* diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index 716990bace10..b81b3bfb08c8 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -399,6 +399,9 @@ void drm_event_cancel_free(struct drm_device *dev, struct drm_pending_event *p); void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e); void drm_send_event(struct drm_device *dev, struct drm_pending_event *e); +void drm_send_event_timestamp_locked(struct drm_device *dev, + struct drm_pending_event *e, + ktime_t timestamp); struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags); diff --git a/include/linux/can/can-ml.h b/include/linux/can/can-ml.h index 2f5d731ae251..8afa92d15a66 100644 --- a/include/linux/can/can-ml.h +++ b/include/linux/can/can-ml.h @@ -44,6 +44,7 @@ #include <linux/can.h> #include <linux/list.h> +#include <linux/netdevice.h> #define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS) #define CAN_EFF_RCV_HASH_BITS 10 @@ -65,4 +66,15 @@ struct can_ml_priv { #endif }; +static inline struct can_ml_priv *can_get_ml_priv(struct net_device *dev) +{ + return netdev_get_ml_priv(dev, ML_PRIV_CAN); +} + +static inline void can_set_ml_priv(struct net_device *dev, + struct can_ml_priv *ml_priv) +{ + netdev_set_ml_priv(dev, ml_priv, ML_PRIV_CAN); +} + #endif /* CAN_ML_H */ diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 09e23adb351d..9f12efaaa93a 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -372,6 +372,9 @@ static inline void __dma_fence_might_wait(void) {} int dma_fence_signal(struct dma_fence *fence); int dma_fence_signal_locked(struct dma_fence *fence); +int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp); +int dma_fence_signal_timestamp_locked(struct dma_fence *fence, + ktime_t timestamp); signed long dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout); int dma_fence_add_callback(struct dma_fence *fence, diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h index 454e354d1ffb..5bc5c946af58 100644 --- a/include/linux/dma-heap.h +++ b/include/linux/dma-heap.h @@ -16,15 +16,15 @@ struct dma_heap; /** * struct dma_heap_ops - ops to operate on a given heap - * @allocate: allocate dmabuf and return fd + * @allocate: allocate dmabuf and return struct dma_buf ptr * - * allocate returns dmabuf fd on success, -errno on error. + * allocate returns dmabuf on success, ERR_PTR(-errno) on error. */ struct dma_heap_ops { - int (*allocate)(struct dma_heap *heap, - unsigned long len, - unsigned long fd_flags, - unsigned long heap_flags); + struct dma_buf *(*allocate)(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags); }; /** diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h index 452d8978ffc7..9055cb380ee2 100644 --- a/include/linux/icmpv6.h +++ b/include/linux/icmpv6.h @@ -3,6 +3,7 @@ #define _LINUX_ICMPV6_H #include <linux/skbuff.h> +#include <linux/ipv6.h> #include <uapi/linux/icmpv6.h> static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) @@ -15,13 +16,16 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) #if IS_ENABLED(CONFIG_IPV6) typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info, - const struct in6_addr *force_saddr); + const struct in6_addr *force_saddr, + const struct inet6_skb_parm *parm); void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, - const struct in6_addr *force_saddr); + const struct in6_addr *force_saddr, + const struct inet6_skb_parm *parm); #if IS_BUILTIN(CONFIG_IPV6) -static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) +static inline void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct inet6_skb_parm *parm) { - icmp6_send(skb, type, code, info, NULL); + icmp6_send(skb, type, code, info, NULL, parm); } static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn) { @@ -34,18 +38,28 @@ static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn) return 0; } #else -extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info); +extern void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct inet6_skb_parm *parm); extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn); extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn); #endif +static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) +{ + __icmpv6_send(skb, type, code, info, IP6CB(skb)); +} + int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, unsigned int data_len); #if IS_ENABLED(CONFIG_NF_NAT) void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info); #else -#define icmpv6_ndo_send icmpv6_send +static inline void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) +{ + struct inet6_skb_parm parm = { 0 }; + __icmpv6_send(skb_in, type, code, info, &parm); +} #endif #else diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 9d1f29f0c512..70b2ad3b9884 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -85,7 +85,6 @@ struct ipv6_params { __s32 autoconf; }; extern struct ipv6_params ipv6_defaults; -#include <linux/icmpv6.h> #include <linux/tcp.h> #include <linux/udp.h> diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ddf4cfc12615..f06fbee8638e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1584,6 +1584,12 @@ enum netdev_priv_flags { #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER #define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK +/* Specifies the type of the struct net_device::ml_priv pointer */ +enum netdev_ml_priv_type { + ML_PRIV_NONE, + ML_PRIV_CAN, +}; + /** * struct net_device - The DEVICE structure. * @@ -1779,6 +1785,7 @@ enum netdev_priv_flags { * @nd_net: Network namespace this network device is inside * * @ml_priv: Mid-layer private + * @ml_priv_type: Mid-layer private type * @lstats: Loopback statistics * @tstats: Tunnel statistics * @dstats: Dummy statistics @@ -2094,8 +2101,10 @@ struct net_device { possible_net_t nd_net; /* mid-layer private */ + void *ml_priv; + enum netdev_ml_priv_type ml_priv_type; + union { - void *ml_priv; struct pcpu_lstats __percpu *lstats; struct pcpu_sw_netstats __percpu *tstats; struct pcpu_dstats __percpu *dstats; @@ -2286,6 +2295,29 @@ static inline void netdev_reset_rx_headroom(struct net_device *dev) netdev_set_rx_headroom(dev, -1); } +static inline void *netdev_get_ml_priv(struct net_device *dev, + enum netdev_ml_priv_type type) +{ + if (dev->ml_priv_type != type) + return NULL; + + return dev->ml_priv; +} + +static inline void netdev_set_ml_priv(struct net_device *dev, + void *ml_priv, + enum netdev_ml_priv_type type) +{ + WARN(dev->ml_priv_type && dev->ml_priv_type != type, + "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n", + dev->ml_priv_type, type); + WARN(!dev->ml_priv_type && dev->ml_priv, + "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n"); + + dev->ml_priv = ml_priv; + dev->ml_priv_type = type; +} + /* * Net namespace inlines */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 8c6c4e32fc2f..eadaabd18dc7 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -388,6 +388,7 @@ extern int nfs_open(struct inode *, struct file *); extern int nfs_attribute_cache_expired(struct inode *inode); extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); +extern int nfs_clear_invalid_mapping(struct address_space *mapping); extern bool nfs_mapping_need_revalidate_inode(struct inode *inode); extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); extern int nfs_revalidate_mapping_rcu(struct inode *inode); @@ -571,8 +572,6 @@ nfs_have_writebacks(struct inode *inode) extern int nfs_readpage(struct file *, struct page *); extern int nfs_readpages(struct file *, struct address_space *, struct list_head *, unsigned); -extern int nfs_readpage_async(struct nfs_open_context *, struct inode *, - struct page *); /* * inline functions diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 38e60ec742df..6f76b32a0238 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -142,7 +142,7 @@ struct nfs_server { struct nlm_host *nlm_host; /* NLM client handle */ struct nfs_iostats __percpu *io_stats; /* I/O statistics */ atomic_long_t writeback; /* number of writeback pages */ - int flags; /* various flags */ + unsigned int flags; /* various flags */ /* The following are for internal use only. Also see uapi/linux/nfs_mount.h */ #define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000 @@ -153,6 +153,8 @@ struct nfs_server { #define NFS_MOUNT_LOCAL_FCNTL 0x200000 #define NFS_MOUNT_SOFTERR 0x400000 #define NFS_MOUNT_SOFTREVAL 0x800000 +#define NFS_MOUNT_WRITE_EAGER 0x01000000 +#define NFS_MOUNT_WRITE_WAIT 0x02000000 unsigned int caps; /* server capabilities */ unsigned int rsize; /* read size */ diff --git a/include/linux/platform_profile.h b/include/linux/platform_profile.h index a26542d53058..a6329003aee7 100644 --- a/include/linux/platform_profile.h +++ b/include/linux/platform_profile.h @@ -12,9 +12,8 @@ #include <linux/bitops.h> /* - * If more options are added please update profile_names - * array in platform-profile.c and sysfs-platform-profile.rst - * documentation. + * If more options are added please update profile_names array in + * platform_profile.c and sysfs-platform_profile documentation. */ enum platform_profile_option { @@ -22,6 +21,7 @@ enum platform_profile_option { PLATFORM_PROFILE_COOL, PLATFORM_PROFILE_QUIET, PLATFORM_PROFILE_BALANCED, + PLATFORM_PROFILE_BALANCED_PERFORMANCE, PLATFORM_PROFILE_PERFORMANCE, PLATFORM_PROFILE_LAST, /*must always be last */ }; diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 0fefeb976877..4ab5494503a8 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -35,6 +35,8 @@ struct vdpa_vq_state { u16 avail_index; }; +struct vdpa_mgmt_dev; + /** * vDPA device - representation of a vDPA device * @dev: underlying device @@ -43,6 +45,8 @@ struct vdpa_vq_state { * @index: device index * @features_valid: were features initialized? for legacy guests * @nvqs: maximum number of supported virtqueues + * @mdev: management device pointer; caller must setup when registering device as part + * of dev_add() mgmtdev ops callback before invoking _vdpa_register_device(). */ struct vdpa_device { struct device dev; @@ -51,6 +55,7 @@ struct vdpa_device { unsigned int index; bool features_valid; int nvqs; + struct vdpa_mgmt_dev *mdev; }; /** @@ -245,20 +250,22 @@ struct vdpa_config_ops { struct vdpa_device *__vdpa_alloc_device(struct device *parent, const struct vdpa_config_ops *config, - int nvqs, - size_t size); + int nvqs, size_t size, const char *name); -#define vdpa_alloc_device(dev_struct, member, parent, config, nvqs) \ +#define vdpa_alloc_device(dev_struct, member, parent, config, nvqs, name) \ container_of(__vdpa_alloc_device( \ parent, config, nvqs, \ sizeof(dev_struct) + \ BUILD_BUG_ON_ZERO(offsetof( \ - dev_struct, member))), \ + dev_struct, member)), name), \ dev_struct, member) int vdpa_register_device(struct vdpa_device *vdev); void vdpa_unregister_device(struct vdpa_device *vdev); +int _vdpa_register_device(struct vdpa_device *vdev); +void _vdpa_unregister_device(struct vdpa_device *vdev); + /** * vdpa_driver - operations for a vDPA driver * @driver: underlying device driver @@ -336,4 +343,33 @@ static inline void vdpa_get_config(struct vdpa_device *vdev, unsigned offset, ops->get_config(vdev, offset, buf, len); } +/** + * vdpa_mgmtdev_ops - vdpa device ops + * @dev_add: Add a vdpa device using alloc and register + * @mdev: parent device to use for device addition + * @name: name of the new vdpa device + * Driver need to add a new device using _vdpa_register_device() + * after fully initializing the vdpa device. Driver must return 0 + * on success or appropriate error code. + * @dev_del: Remove a vdpa device using unregister + * @mdev: parent device to use for device removal + * @dev: vdpa device to remove + * Driver need to remove the specified device by calling + * _vdpa_unregister_device(). + */ +struct vdpa_mgmtdev_ops { + int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name); + void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev); +}; + +struct vdpa_mgmt_dev { + struct device *device; + const struct vdpa_mgmtdev_ops *ops; + const struct virtio_device_id *id_table; /* supported ids */ + struct list_head list; +}; + +int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev); +void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev); + #endif /* _LINUX_VDPA_H */ diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h new file mode 100644 index 000000000000..f26acbeec965 --- /dev/null +++ b/include/linux/virtio_pci_modern.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_VIRTIO_PCI_MODERN_H +#define _LINUX_VIRTIO_PCI_MODERN_H + +#include <linux/pci.h> +#include <linux/virtio_pci.h> + +struct virtio_pci_modern_device { + struct pci_dev *pci_dev; + + struct virtio_pci_common_cfg __iomem *common; + /* Device-specific data (non-legacy mode) */ + void __iomem *device; + /* Base of vq notifications (non-legacy mode). */ + void __iomem *notify_base; + /* Where to read and clear interrupt */ + u8 __iomem *isr; + + /* So we can sanity-check accesses. */ + size_t notify_len; + size_t device_len; + + /* Capability for when we need to map notifications per-vq. */ + int notify_map_cap; + + /* Multiply queue_notify_off by this value. (non-legacy mode). */ + u32 notify_offset_multiplier; + + int modern_bars; + + struct virtio_device_id id; +}; + +/* + * Type-safe wrappers for io accesses. + * Use these to enforce at compile time the following spec requirement: + * + * The driver MUST access each field using the “natural” access + * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses + * for 16-bit fields and 8-bit accesses for 8-bit fields. + */ +static inline u8 vp_ioread8(const u8 __iomem *addr) +{ + return ioread8(addr); +} +static inline u16 vp_ioread16 (const __le16 __iomem *addr) +{ + return ioread16(addr); +} + +static inline u32 vp_ioread32(const __le32 __iomem *addr) +{ + return ioread32(addr); +} + +static inline void vp_iowrite8(u8 value, u8 __iomem *addr) +{ + iowrite8(value, addr); +} + +static inline void vp_iowrite16(u16 value, __le16 __iomem *addr) +{ + iowrite16(value, addr); +} + +static inline void vp_iowrite32(u32 value, __le32 __iomem *addr) +{ + iowrite32(value, addr); +} + +static inline void vp_iowrite64_twopart(u64 val, + __le32 __iomem *lo, + __le32 __iomem *hi) +{ + vp_iowrite32((u32)val, lo); + vp_iowrite32(val >> 32, hi); +} + +u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev); +void vp_modern_set_features(struct virtio_pci_modern_device *mdev, + u64 features); +u32 vp_modern_generation(struct virtio_pci_modern_device *mdev); +u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev); +void vp_modern_set_status(struct virtio_pci_modern_device *mdev, + u8 status); +u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev, + u16 idx, u16 vector); +u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev, + u16 vector); +void vp_modern_queue_address(struct virtio_pci_modern_device *mdev, + u16 index, u64 desc_addr, u64 driver_addr, + u64 device_addr); +void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev, + u16 idx, bool enable); +bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev, + u16 idx); +void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev, + u16 idx, u16 size); +u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev, + u16 idx); +u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev); +u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev, + u16 idx); +void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off, + size_t minlen, + u32 align, + u32 start, u32 size, + size_t *len); +int vp_modern_probe(struct virtio_pci_modern_device *mdev); +void vp_modern_remove(struct virtio_pci_modern_device *mdev); +#endif diff --git a/include/net/icmp.h b/include/net/icmp.h index 9ac2d2672a93..fd84adc47963 100644 --- a/include/net/icmp.h +++ b/include/net/icmp.h @@ -46,7 +46,11 @@ static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 #if IS_ENABLED(CONFIG_NF_NAT) void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info); #else -#define icmp_ndo_send icmp_send +static inline void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info) +{ + struct ip_options opts = { 0 }; + __icmp_send(skb_in, type, code, info, &opts); +} #endif int icmp_rcv(struct sk_buff *skb); diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 76e85e16854b..c838e7ac1c2d 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -60,6 +60,51 @@ DECLARE_EVENT_CLASS(rpcrdma_completion_class, ), \ TP_ARGS(wc, cid)) +DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class, + TP_PROTO( + const struct ib_wc *wc, + const struct rpc_rdma_cid *cid + ), + + TP_ARGS(wc, cid), + + TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) + __field(u32, received) + __field(unsigned long, status) + __field(unsigned int, vendor_err) + ), + + TP_fast_assign( + __entry->cq_id = cid->ci_queue_id; + __entry->completion_id = cid->ci_completion_id; + __entry->status = wc->status; + if (wc->status) { + __entry->received = 0; + __entry->vendor_err = wc->vendor_err; + } else { + __entry->received = wc->byte_len; + __entry->vendor_err = 0; + } + ), + + TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x) received=%u", + __entry->cq_id, __entry->completion_id, + rdma_show_wc_status(__entry->status), + __entry->status, __entry->vendor_err, + __entry->received + ) +); + +#define DEFINE_RECEIVE_COMPLETION_EVENT(name) \ + DEFINE_EVENT(rpcrdma_receive_completion_class, name, \ + TP_PROTO( \ + const struct ib_wc *wc, \ + const struct rpc_rdma_cid *cid \ + ), \ + TP_ARGS(wc, cid)) + DECLARE_EVENT_CLASS(xprtrdma_reply_class, TP_PROTO( const struct rpcrdma_rep *rep @@ -838,7 +883,8 @@ TRACE_EVENT(xprtrdma_post_linv_err, ** Completion events **/ -DEFINE_COMPLETION_EVENT(xprtrdma_wc_receive); +DEFINE_RECEIVE_COMPLETION_EVENT(xprtrdma_wc_receive); + DEFINE_COMPLETION_EVENT(xprtrdma_wc_send); DEFINE_COMPLETION_EVENT(xprtrdma_wc_fastreg); DEFINE_COMPLETION_EVENT(xprtrdma_wc_li); @@ -1790,7 +1836,7 @@ TRACE_EVENT(svcrdma_post_recv, ) ); -DEFINE_COMPLETION_EVENT(svcrdma_wc_receive); +DEFINE_RECEIVE_COMPLETION_EVENT(svcrdma_wc_receive); TRACE_EVENT(svcrdma_rq_post_err, TP_PROTO( diff --git a/include/uapi/linux/vdpa.h b/include/uapi/linux/vdpa.h new file mode 100644 index 000000000000..66a41e4ec163 --- /dev/null +++ b/include/uapi/linux/vdpa.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * vdpa device management interface + * Copyright (c) 2020 Mellanox Technologies Ltd. All rights reserved. + */ + +#ifndef _UAPI_LINUX_VDPA_H_ +#define _UAPI_LINUX_VDPA_H_ + +#define VDPA_GENL_NAME "vdpa" +#define VDPA_GENL_VERSION 0x1 + +enum vdpa_command { + VDPA_CMD_UNSPEC, + VDPA_CMD_MGMTDEV_NEW, + VDPA_CMD_MGMTDEV_GET, /* can dump */ + VDPA_CMD_DEV_NEW, + VDPA_CMD_DEV_DEL, + VDPA_CMD_DEV_GET, /* can dump */ +}; + +enum vdpa_attr { + VDPA_ATTR_UNSPEC, + + /* bus name (optional) + dev name together make the parent device handle */ + VDPA_ATTR_MGMTDEV_BUS_NAME, /* string */ + VDPA_ATTR_MGMTDEV_DEV_NAME, /* string */ + VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES, /* u64 */ + + VDPA_ATTR_DEV_NAME, /* string */ + VDPA_ATTR_DEV_ID, /* u32 */ + VDPA_ATTR_DEV_VENDOR_ID, /* u32 */ + VDPA_ATTR_DEV_MAX_VQS, /* u32 */ + VDPA_ATTR_DEV_MAX_VQ_SIZE, /* u16 */ + + /* new attributes must be added above here */ + VDPA_ATTR_MAX, +}; + +#endif diff --git a/net/can/af_can.c b/net/can/af_can.c index 837bb8af0ec3..cce2af10eb3e 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -304,8 +304,8 @@ static struct can_dev_rcv_lists *can_dev_rcv_lists_find(struct net *net, struct net_device *dev) { if (dev) { - struct can_ml_priv *ml_priv = dev->ml_priv; - return &ml_priv->dev_rcv_lists; + struct can_ml_priv *can_ml = can_get_ml_priv(dev); + return &can_ml->dev_rcv_lists; } else { return net->can.rx_alldev_list; } @@ -790,25 +790,6 @@ void can_proto_unregister(const struct can_proto *cp) } EXPORT_SYMBOL(can_proto_unregister); -/* af_can notifier to create/remove CAN netdevice specific structs */ -static int can_notifier(struct notifier_block *nb, unsigned long msg, - void *ptr) -{ - struct net_device *dev = netdev_notifier_info_to_dev(ptr); - - if (dev->type != ARPHRD_CAN) - return NOTIFY_DONE; - - switch (msg) { - case NETDEV_REGISTER: - WARN(!dev->ml_priv, - "No CAN mid layer private allocated, please fix your driver and use alloc_candev()!\n"); - break; - } - - return NOTIFY_DONE; -} - static int can_pernet_init(struct net *net) { spin_lock_init(&net->can.rcvlists_lock); @@ -876,11 +857,6 @@ static const struct net_proto_family can_family_ops = { .owner = THIS_MODULE, }; -/* notifier block for netdevice event */ -static struct notifier_block can_netdev_notifier __read_mostly = { - .notifier_call = can_notifier, -}; - static struct pernet_operations can_pernet_ops __read_mostly = { .init = can_pernet_init, .exit = can_pernet_exit, @@ -911,17 +887,12 @@ static __init int can_init(void) err = sock_register(&can_family_ops); if (err) goto out_sock; - err = register_netdevice_notifier(&can_netdev_notifier); - if (err) - goto out_notifier; dev_add_pack(&can_packet); dev_add_pack(&canfd_packet); return 0; -out_notifier: - sock_unregister(PF_CAN); out_sock: unregister_pernet_subsys(&can_pernet_ops); out_pernet: @@ -935,7 +906,6 @@ static __exit void can_exit(void) /* protocol unregister */ dev_remove_pack(&canfd_packet); dev_remove_pack(&can_packet); - unregister_netdevice_notifier(&can_netdev_notifier); sock_unregister(PF_CAN); unregister_pernet_subsys(&can_pernet_ops); diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c index bb914d8b4216..da3a7a7bcff2 100644 --- a/net/can/j1939/main.c +++ b/net/can/j1939/main.c @@ -140,9 +140,9 @@ static struct j1939_priv *j1939_priv_create(struct net_device *ndev) static inline void j1939_priv_set(struct net_device *ndev, struct j1939_priv *priv) { - struct can_ml_priv *can_ml_priv = ndev->ml_priv; + struct can_ml_priv *can_ml = can_get_ml_priv(ndev); - can_ml_priv->j1939_priv = priv; + can_ml->j1939_priv = priv; } static void __j1939_priv_release(struct kref *kref) @@ -211,12 +211,9 @@ static void __j1939_rx_release(struct kref *kref) /* get pointer to priv without increasing ref counter */ static inline struct j1939_priv *j1939_ndev_to_priv(struct net_device *ndev) { - struct can_ml_priv *can_ml_priv = ndev->ml_priv; + struct can_ml_priv *can_ml = can_get_ml_priv(ndev); - if (!can_ml_priv) - return NULL; - - return can_ml_priv->j1939_priv; + return can_ml->j1939_priv; } static struct j1939_priv *j1939_priv_get_by_ndev_locked(struct net_device *ndev) @@ -225,9 +222,6 @@ static struct j1939_priv *j1939_priv_get_by_ndev_locked(struct net_device *ndev) lockdep_assert_held(&j1939_netdev_lock); - if (ndev->type != ARPHRD_CAN) - return NULL; - priv = j1939_ndev_to_priv(ndev); if (priv) j1939_priv_get(priv); @@ -348,15 +342,16 @@ static int j1939_netdev_notify(struct notifier_block *nb, unsigned long msg, void *data) { struct net_device *ndev = netdev_notifier_info_to_dev(data); + struct can_ml_priv *can_ml = can_get_ml_priv(ndev); struct j1939_priv *priv; + if (!can_ml) + goto notify_done; + priv = j1939_priv_get_by_ndev(ndev); if (!priv) goto notify_done; - if (ndev->type != ARPHRD_CAN) - goto notify_put; - switch (msg) { case NETDEV_DOWN: j1939_cancel_active_session(priv, NULL); @@ -365,7 +360,6 @@ static int j1939_netdev_notify(struct notifier_block *nb, break; } -notify_put: j1939_priv_put(priv); notify_done: diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c index f23966526a88..56aa66147d5a 100644 --- a/net/can/j1939/socket.c +++ b/net/can/j1939/socket.c @@ -12,6 +12,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/can/can-ml.h> #include <linux/can/core.h> #include <linux/can/skb.h> #include <linux/errqueue.h> @@ -453,6 +454,7 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len) j1939_jsk_del(priv, jsk); j1939_local_ecu_put(priv, jsk->addr.src_name, jsk->addr.sa); } else { + struct can_ml_priv *can_ml; struct net_device *ndev; ndev = dev_get_by_index(net, addr->can_ifindex); @@ -461,15 +463,8 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len) goto out_release_sock; } - if (ndev->type != ARPHRD_CAN) { - dev_put(ndev); - ret = -ENODEV; - goto out_release_sock; - } - - if (!ndev->ml_priv) { - netdev_warn_once(ndev, - "No CAN mid layer private allocated, please fix your driver and use alloc_candev()!\n"); + can_ml = can_get_ml_priv(ndev); + if (!can_ml) { dev_put(ndev); ret = -ENODEV; goto out_release_sock; diff --git a/net/can/proc.c b/net/can/proc.c index 5ea8695f507e..b15760b5c1cc 100644 --- a/net/can/proc.c +++ b/net/can/proc.c @@ -322,8 +322,11 @@ static int can_rcvlist_proc_show(struct seq_file *m, void *v) /* receive list for registered CAN devices */ for_each_netdev_rcu(net, dev) { - if (dev->type == ARPHRD_CAN && dev->ml_priv) - can_rcvlist_proc_show_one(m, idx, dev, dev->ml_priv); + struct can_ml_priv *can_ml = can_get_ml_priv(dev); + + if (can_ml) + can_rcvlist_proc_show_one(m, idx, dev, + &can_ml->dev_rcv_lists); } rcu_read_unlock(); @@ -375,8 +378,10 @@ static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v) /* sff receive list for registered CAN devices */ for_each_netdev_rcu(net, dev) { - if (dev->type == ARPHRD_CAN && dev->ml_priv) { - dev_rcv_lists = dev->ml_priv; + struct can_ml_priv *can_ml = can_get_ml_priv(dev); + + if (can_ml) { + dev_rcv_lists = &can_ml->dev_rcv_lists; can_rcvlist_proc_show_array(m, dev, dev_rcv_lists->rx_sff, ARRAY_SIZE(dev_rcv_lists->rx_sff)); } @@ -406,8 +411,10 @@ static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v) /* eff receive list for registered CAN devices */ for_each_netdev_rcu(net, dev) { - if (dev->type == ARPHRD_CAN && dev->ml_priv) { - dev_rcv_lists = dev->ml_priv; + struct can_ml_priv *can_ml = can_get_ml_priv(dev); + + if (can_ml) { + dev_rcv_lists = &can_ml->dev_rcv_lists; can_rcvlist_proc_show_array(m, dev, dev_rcv_lists->rx_eff, ARRAY_SIZE(dev_rcv_lists->rx_eff)); } diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index a45572cfb71a..3589224c8da9 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -9,6 +9,7 @@ menuconfig NET_DSA tristate "Distributed Switch Architecture" depends on HAVE_NET_DSA depends on BRIDGE || BRIDGE=n + depends on HSR || HSR=n select GRO_CELLS select NET_SWITCHDEV select PHYLINK diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index f9a8cc82ae2e..bb1351c38397 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c @@ -164,8 +164,10 @@ static struct hsr_node *hsr_add_node(struct hsr_priv *hsr, * as initialization. (0 could trigger an spurious ring error warning). */ now = jiffies; - for (i = 0; i < HSR_PT_PORTS; i++) + for (i = 0; i < HSR_PT_PORTS; i++) { new_node->time_in[i] = now; + new_node->time_out[i] = now; + } for (i = 0; i < HSR_PT_PORTS; i++) new_node->seq_out[i] = seq_out; @@ -413,9 +415,12 @@ void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port, int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node, u16 sequence_nr) { - if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type])) + if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type]) && + time_is_after_jiffies(node->time_out[port->type] + + msecs_to_jiffies(HSR_ENTRY_FORGET_TIME))) return 1; + node->time_out[port->type] = jiffies; node->seq_out[port->type] = sequence_nr; return 0; } diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h index 86b43f539f2c..d9628e7a5f05 100644 --- a/net/hsr/hsr_framereg.h +++ b/net/hsr/hsr_framereg.h @@ -75,6 +75,7 @@ struct hsr_node { enum hsr_port_type addr_B_port; unsigned long time_in[HSR_PT_PORTS]; bool time_in_stale[HSR_PT_PORTS]; + unsigned long time_out[HSR_PT_PORTS]; /* if the node is a SAN */ bool san_a; bool san_b; diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h index a169808ee78a..8f264672b70b 100644 --- a/net/hsr/hsr_main.h +++ b/net/hsr/hsr_main.h @@ -22,6 +22,7 @@ #define HSR_LIFE_CHECK_INTERVAL 2000 /* ms */ #define HSR_NODE_FORGET_TIME 60000 /* ms */ #define HSR_ANNOUNCE_INTERVAL 100 /* ms */ +#define HSR_ENTRY_FORGET_TIME 400 /* ms */ /* By how much may slave1 and slave2 timestamps of latest received frame from * each node differ before we notify of communication problem? diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 396b492c804f..616e2dc1c8fa 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -775,13 +775,14 @@ EXPORT_SYMBOL(__icmp_send); void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info) { struct sk_buff *cloned_skb = NULL; + struct ip_options opts = { 0 }; enum ip_conntrack_info ctinfo; struct nf_conn *ct; __be32 orig_ip; ct = nf_ct_get(skb_in, &ctinfo); if (!ct || !(ct->status & IPS_SRC_NAT)) { - icmp_send(skb_in, type, code, info); + __icmp_send(skb_in, type, code, info, &opts); return; } @@ -796,7 +797,7 @@ void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info) orig_ip = ip_hdr(skb_in)->saddr; ip_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.ip; - icmp_send(skb_in, type, code, info); + __icmp_send(skb_in, type, code, info, &opts); ip_hdr(skb_in)->saddr = orig_ip; out: consume_skb(cloned_skb); diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index f3d05866692e..fd1f896115c1 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -331,10 +331,9 @@ static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, st } #if IS_ENABLED(CONFIG_IPV6_MIP6) -static void mip6_addr_swap(struct sk_buff *skb) +static void mip6_addr_swap(struct sk_buff *skb, const struct inet6_skb_parm *opt) { struct ipv6hdr *iph = ipv6_hdr(skb); - struct inet6_skb_parm *opt = IP6CB(skb); struct ipv6_destopt_hao *hao; struct in6_addr tmp; int off; @@ -351,7 +350,7 @@ static void mip6_addr_swap(struct sk_buff *skb) } } #else -static inline void mip6_addr_swap(struct sk_buff *skb) {} +static inline void mip6_addr_swap(struct sk_buff *skb, const struct inet6_skb_parm *opt) {} #endif static struct dst_entry *icmpv6_route_lookup(struct net *net, @@ -446,7 +445,8 @@ static int icmp6_iif(const struct sk_buff *skb) * Send an ICMP message in response to a packet in error */ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, - const struct in6_addr *force_saddr) + const struct in6_addr *force_saddr, + const struct inet6_skb_parm *parm) { struct inet6_dev *idev = NULL; struct ipv6hdr *hdr = ipv6_hdr(skb); @@ -542,7 +542,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, if (!(skb->dev->flags & IFF_LOOPBACK) && !icmpv6_global_allow(net, type)) goto out_bh_enable; - mip6_addr_swap(skb); + mip6_addr_swap(skb, parm); sk = icmpv6_xmit_lock(net); if (!sk) @@ -559,7 +559,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, /* select a more meaningful saddr from input if */ struct net_device *in_netdev; - in_netdev = dev_get_by_index(net, IP6CB(skb)->iif); + in_netdev = dev_get_by_index(net, parm->iif); if (in_netdev) { ipv6_dev_get_saddr(net, in_netdev, &fl6.daddr, inet6_sk(sk)->srcprefs, @@ -640,7 +640,7 @@ EXPORT_SYMBOL(icmp6_send); */ void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos) { - icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL); + icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL, IP6CB(skb)); kfree_skb(skb); } @@ -697,10 +697,10 @@ int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, } if (type == ICMP_TIME_EXCEEDED) icmp6_send(skb2, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, - info, &temp_saddr); + info, &temp_saddr, IP6CB(skb2)); else icmp6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, - info, &temp_saddr); + info, &temp_saddr, IP6CB(skb2)); if (rt) ip6_rt_put(rt); diff --git a/net/ipv6/ip6_icmp.c b/net/ipv6/ip6_icmp.c index 70c8c2f36c98..9e3574880cb0 100644 --- a/net/ipv6/ip6_icmp.c +++ b/net/ipv6/ip6_icmp.c @@ -33,23 +33,25 @@ int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn) } EXPORT_SYMBOL(inet6_unregister_icmp_sender); -void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) +void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct inet6_skb_parm *parm) { ip6_icmp_send_t *send; rcu_read_lock(); send = rcu_dereference(ip6_icmp_send); if (send) - send(skb, type, code, info, NULL); + send(skb, type, code, info, NULL, parm); rcu_read_unlock(); } -EXPORT_SYMBOL(icmpv6_send); +EXPORT_SYMBOL(__icmpv6_send); #endif #if IS_ENABLED(CONFIG_NF_NAT) #include <net/netfilter/nf_conntrack.h> void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) { + struct inet6_skb_parm parm = { 0 }; struct sk_buff *cloned_skb = NULL; enum ip_conntrack_info ctinfo; struct in6_addr orig_ip; @@ -57,7 +59,7 @@ void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) ct = nf_ct_get(skb_in, &ctinfo); if (!ct || !(ct->status & IPS_SRC_NAT)) { - icmpv6_send(skb_in, type, code, info); + __icmpv6_send(skb_in, type, code, info, &parm); return; } @@ -72,7 +74,7 @@ void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) orig_ip = ipv6_hdr(skb_in)->saddr; ipv6_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.in6; - icmpv6_send(skb_in, type, code, info); + __icmpv6_send(skb_in, type, code, info, &parm); ipv6_hdr(skb_in)->saddr = orig_ip; out: consume_skb(cloned_skb); diff --git a/net/mptcp/options.c b/net/mptcp/options.c index b63574d6b812..444a38681e93 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -411,6 +411,7 @@ static void clear_3rdack_retransmission(struct sock *sk) } static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb, + bool snd_data_fin_enable, unsigned int *size, unsigned int remaining, struct mptcp_out_options *opts) @@ -428,9 +429,10 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb, if (!skb) return false; - /* MPC/MPJ needed only on 3rd ack packet */ - if (subflow->fully_established || - subflow->snd_isn != TCP_SKB_CB(skb)->seq) + /* MPC/MPJ needed only on 3rd ack packet, DATA_FIN and TCP shutdown take precedence */ + if (subflow->fully_established || snd_data_fin_enable || + subflow->snd_isn != TCP_SKB_CB(skb)->seq || + sk->sk_state != TCP_ESTABLISHED) return false; if (subflow->mp_capable) { @@ -502,20 +504,20 @@ static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow, } static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb, + bool snd_data_fin_enable, unsigned int *size, unsigned int remaining, struct mptcp_out_options *opts) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); struct mptcp_sock *msk = mptcp_sk(subflow->conn); - u64 snd_data_fin_enable, ack_seq; unsigned int dss_size = 0; struct mptcp_ext *mpext; unsigned int ack_size; bool ret = false; + u64 ack_seq; mpext = skb ? mptcp_get_ext(skb) : NULL; - snd_data_fin_enable = mptcp_data_fin_enabled(msk); if (!skb || (mpext && mpext->use_map) || snd_data_fin_enable) { unsigned int map_size; @@ -717,12 +719,15 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb, unsigned int *size, unsigned int remaining, struct mptcp_out_options *opts) { + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); + struct mptcp_sock *msk = mptcp_sk(subflow->conn); unsigned int opt_size = 0; + bool snd_data_fin; bool ret = false; opts->suboptions = 0; - if (unlikely(mptcp_check_fallback(sk))) + if (unlikely(__mptcp_check_fallback(msk))) return false; /* prevent adding of any MPTCP related options on reset packet @@ -731,10 +736,10 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb, if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) return false; - if (mptcp_established_options_mp(sk, skb, &opt_size, remaining, opts)) + snd_data_fin = mptcp_data_fin_enabled(msk); + if (mptcp_established_options_mp(sk, skb, snd_data_fin, &opt_size, remaining, opts)) ret = true; - else if (mptcp_established_options_dss(sk, skb, &opt_size, remaining, - opts)) + else if (mptcp_established_options_dss(sk, skb, snd_data_fin, &opt_size, remaining, opts)) ret = true; /* we reserved enough space for the above options, and exceeding the diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index a57f3eab7b6a..c5d5e68940ea 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -11,6 +11,7 @@ #include <linux/netdevice.h> #include <linux/sched/signal.h> #include <linux/atomic.h> +#include <linux/igmp.h> #include <net/sock.h> #include <net/inet_common.h> #include <net/inet_hashtables.h> @@ -19,6 +20,7 @@ #include <net/tcp_states.h> #if IS_ENABLED(CONFIG_MPTCP_IPV6) #include <net/transp_v6.h> +#include <net/addrconf.h> #endif #include <net/mptcp.h> #include <net/xfrm.h> @@ -2264,13 +2266,12 @@ static void mptcp_worker(struct work_struct *work) __mptcp_check_send_data_fin(sk); mptcp_check_data_fin(sk); - /* if the msk data is completely acked, or the socket timedout, - * there is no point in keeping around an orphaned sk + /* There is no point in keeping around an orphaned sk timedout or + * closed, but we need the msk around to reply to incoming DATA_FIN, + * even if it is orphaned and in FIN_WAIT2 state */ if (sock_flag(sk, SOCK_DEAD) && - (mptcp_check_close_timeout(sk) || - (state != sk->sk_state && - ((1 << inet_sk_state_load(sk)) & (TCPF_CLOSE | TCPF_FIN_WAIT2))))) { + (mptcp_check_close_timeout(sk) || sk->sk_state == TCP_CLOSE)) { inet_sk_state_store(sk, TCP_CLOSE); __mptcp_destroy_sock(sk); goto unlock; @@ -3375,10 +3376,34 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock, return mask; } +static int mptcp_release(struct socket *sock) +{ + struct mptcp_subflow_context *subflow; + struct sock *sk = sock->sk; + struct mptcp_sock *msk; + + if (!sk) + return 0; + + lock_sock(sk); + + msk = mptcp_sk(sk); + + mptcp_for_each_subflow(msk, subflow) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + + ip_mc_drop_socket(ssk); + } + + release_sock(sk); + + return inet_release(sock); +} + static const struct proto_ops mptcp_stream_ops = { .family = PF_INET, .owner = THIS_MODULE, - .release = inet_release, + .release = mptcp_release, .bind = mptcp_bind, .connect = mptcp_stream_connect, .socketpair = sock_no_socketpair, @@ -3470,10 +3495,35 @@ void __init mptcp_proto_init(void) } #if IS_ENABLED(CONFIG_MPTCP_IPV6) +static int mptcp6_release(struct socket *sock) +{ + struct mptcp_subflow_context *subflow; + struct mptcp_sock *msk; + struct sock *sk = sock->sk; + + if (!sk) + return 0; + + lock_sock(sk); + + msk = mptcp_sk(sk); + + mptcp_for_each_subflow(msk, subflow) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + + ip_mc_drop_socket(ssk); + ipv6_sock_mc_close(ssk); + ipv6_sock_ac_close(ssk); + } + + release_sock(sk); + return inet6_release(sock); +} + static const struct proto_ops mptcp_v6_stream_ops = { .family = PF_INET6, .owner = THIS_MODULE, - .release = inet6_release, + .release = mptcp6_release, .bind = mptcp_bind, .connect = mptcp_stream_connect, .socketpair = sock_no_socketpair, diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 06e233410e0e..e1fbcab257e6 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -1096,6 +1096,12 @@ static void subflow_data_ready(struct sock *sk) msk = mptcp_sk(parent); if (state & TCPF_LISTEN) { + /* MPJ subflow are removed from accept queue before reaching here, + * avoid stray wakeups + */ + if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue)) + return; + set_bit(MPTCP_DATA_READY, &msk->flags); parent->sk_data_ready(parent); return; diff --git a/net/psample/psample.c b/net/psample/psample.c index 33e238c965bd..482c07f2766b 100644 --- a/net/psample/psample.c +++ b/net/psample/psample.c @@ -309,10 +309,10 @@ static int psample_tunnel_meta_len(struct ip_tunnel_info *tun_info) unsigned short tun_proto = ip_tunnel_info_af(tun_info); const struct ip_tunnel_key *tun_key = &tun_info->key; int tun_opts_len = tun_info->options_len; - int sum = 0; + int sum = nla_total_size(0); /* PSAMPLE_ATTR_TUNNEL */ if (tun_key->tun_flags & TUNNEL_KEY) - sum += nla_total_size(sizeof(u64)); + sum += nla_total_size_64bit(sizeof(u64)); if (tun_info->mode & IP_TUNNEL_INFO_BRIDGE) sum += nla_total_size(0); diff --git a/net/qrtr/tun.c b/net/qrtr/tun.c index b238c40a9984..304b41fea5ab 100644 --- a/net/qrtr/tun.c +++ b/net/qrtr/tun.c @@ -31,6 +31,7 @@ static int qrtr_tun_send(struct qrtr_endpoint *ep, struct sk_buff *skb) static int qrtr_tun_open(struct inode *inode, struct file *filp) { struct qrtr_tun *tun; + int ret; tun = kzalloc(sizeof(*tun), GFP_KERNEL); if (!tun) @@ -43,7 +44,16 @@ static int qrtr_tun_open(struct inode *inode, struct file *filp) filp->private_data = tun; - return qrtr_endpoint_register(&tun->ep, QRTR_EP_NID_AUTO); + ret = qrtr_endpoint_register(&tun->ep, QRTR_EP_NID_AUTO); + if (ret) + goto out; + + return 0; + +out: + filp->private_data = NULL; + kfree(tun); + return ret; } static ssize_t qrtr_tun_read_iter(struct kiocb *iocb, struct iov_iter *to) diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 2409e522c68f..d097b5c15faa 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -1417,6 +1417,21 @@ static int fl_validate_ct_state(u16 state, struct nlattr *tb, return -EINVAL; } + if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID && + state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED | + TCA_FLOWER_KEY_CT_FLAGS_INVALID)) { + NL_SET_ERR_MSG_ATTR(extack, tb, + "when inv is set, only trk may be set"); + return -EINVAL; + } + + if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW && + state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) { + NL_SET_ERR_MSG_ATTR(extack, tb, + "new and rpl are mutually exclusive"); + return -EINVAL; + } + return 0; } diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 8241f5a4a01c..09c000d490a1 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -478,6 +478,7 @@ rpc_get_inode(struct super_block *sb, umode_t mode) inode->i_fop = &simple_dir_operations; inode->i_op = &simple_dir_inode_operations; inc_nlink(inode); + break; default: break; } diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c index 946edf2db646..a249837d6a55 100644 --- a/net/sunrpc/xprtrdma/backchannel.c +++ b/net/sunrpc/xprtrdma/backchannel.c @@ -2,7 +2,7 @@ /* * Copyright (c) 2015-2020, Oracle and/or its affiliates. * - * Support for backward direction RPCs on RPC/RDMA. + * Support for reverse-direction RPCs on RPC/RDMA. */ #include <linux/sunrpc/xprt.h> @@ -208,7 +208,7 @@ create_req: } /** - * rpcrdma_bc_receive_call - Handle a backward direction call + * rpcrdma_bc_receive_call - Handle a reverse-direction Call * @r_xprt: transport receiving the call * @rep: receive buffer containing the call * diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index baca49fe83af..766a1048a48a 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -306,20 +306,14 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt, if (nsegs > ep->re_max_fr_depth) nsegs = ep->re_max_fr_depth; for (i = 0; i < nsegs;) { - if (seg->mr_page) - sg_set_page(&mr->mr_sg[i], - seg->mr_page, - seg->mr_len, - offset_in_page(seg->mr_offset)); - else - sg_set_buf(&mr->mr_sg[i], seg->mr_offset, - seg->mr_len); + sg_set_page(&mr->mr_sg[i], seg->mr_page, + seg->mr_len, seg->mr_offset); ++seg; ++i; if (ep->re_mrtype == IB_MR_TYPE_SG_GAPS) continue; - if ((i < nsegs && offset_in_page(seg->mr_offset)) || + if ((i < nsegs && seg->mr_offset) || offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) break; } diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 8f5d0cb68360..292f066d006e 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -204,9 +204,7 @@ rpcrdma_alloc_sparse_pages(struct xdr_buf *buf) return 0; } -/* Split @vec on page boundaries into SGEs. FMR registers pages, not - * a byte range. Other modes coalesce these SGEs into a single MR - * when they can. +/* Convert @vec to a single SGL element. * * Returns pointer to next available SGE, and bumps the total number * of SGEs consumed. @@ -215,22 +213,11 @@ static struct rpcrdma_mr_seg * rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg, unsigned int *n) { - u32 remaining, page_offset; - char *base; - - base = vec->iov_base; - page_offset = offset_in_page(base); - remaining = vec->iov_len; - while (remaining) { - seg->mr_page = NULL; - seg->mr_offset = base; - seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining); - remaining -= seg->mr_len; - base += seg->mr_len; - ++seg; - ++(*n); - page_offset = 0; - } + seg->mr_page = virt_to_page(vec->iov_base); + seg->mr_offset = offset_in_page(vec->iov_base); + seg->mr_len = vec->iov_len; + ++seg; + ++(*n); return seg; } @@ -259,7 +246,7 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf, page_base = offset_in_page(xdrbuf->page_base); while (len) { seg->mr_page = *ppages; - seg->mr_offset = (char *)page_base; + seg->mr_offset = page_base; seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len); len -= seg->mr_len; ++ppages; @@ -268,10 +255,7 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf, page_base = 0; } - /* When encoding a Read chunk, the tail iovec contains an - * XDR pad and may be omitted. - */ - if (type == rpcrdma_readch && r_xprt->rx_ep->re_implicit_roundup) + if (type == rpcrdma_readch) goto out; /* When encoding a Write chunk, some servers need to see an @@ -283,7 +267,7 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf, goto out; if (xdrbuf->tail[0].iov_len) - seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n); + rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n); out: if (unlikely(n > RPCRDMA_MAX_SEGS)) @@ -644,9 +628,8 @@ out_mapping_err: return false; } -/* The tail iovec may include an XDR pad for the page list, - * as well as additional content, and may not reside in the - * same page as the head iovec. +/* The tail iovec might not reside in the same page as the + * head iovec. */ static bool rpcrdma_prepare_tail_iov(struct rpcrdma_req *req, struct xdr_buf *xdr, @@ -764,27 +747,19 @@ static bool rpcrdma_prepare_readch(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, struct xdr_buf *xdr) { + struct kvec *tail = &xdr->tail[0]; + if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len)) return false; - /* If there is a Read chunk, the page list is being handled + /* If there is a Read chunk, the page list is handled * via explicit RDMA, and thus is skipped here. */ - /* Do not include the tail if it is only an XDR pad */ - if (xdr->tail[0].iov_len > 3) { - unsigned int page_base, len; - - /* If the content in the page list is an odd length, - * xdr_write_pages() adds a pad at the beginning of - * the tail iovec. Force the tail's non-pad content to - * land at the next XDR position in the Send message. - */ - page_base = offset_in_page(xdr->tail[0].iov_base); - len = xdr->tail[0].iov_len; - page_base += len & 3; - len -= len & 3; - if (!rpcrdma_prepare_tail_iov(req, xdr, page_base, len)) + if (tail->iov_len) { + if (!rpcrdma_prepare_tail_iov(req, xdr, + offset_in_page(tail->iov_base), + tail->iov_len)) return false; kref_get(&req->rl_kref); } @@ -1164,14 +1139,10 @@ rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep) */ p = xdr_inline_decode(xdr, 3 * sizeof(*p)); if (unlikely(!p)) - goto out_short; + return true; rpcrdma_bc_receive_call(r_xprt, rep); return true; - -out_short: - pr_warn("RPC/RDMA short backward direction call\n"); - return true; } #else /* CONFIG_SUNRPC_BACKCHANNEL */ { diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index 63f8be974df2..4a1edbb4028e 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -2,7 +2,7 @@ /* * Copyright (c) 2015-2018 Oracle. All rights reserved. * - * Support for backward direction RPCs on RPC/RDMA (server-side). + * Support for reverse-direction RPCs on RPC/RDMA (server-side). */ #include <linux/sunrpc/svc_rdma.h> @@ -59,7 +59,7 @@ out_unlock: spin_unlock(&xprt->queue_lock); } -/* Send a backwards direction RPC call. +/* Send a reverse-direction RPC Call. * * Caller holds the connection's mutex and has already marshaled * the RPC/RDMA request. diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 94b28657aeeb..fe3be985e239 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -98,9 +98,9 @@ struct rpcrdma_ep { atomic_t re_completion_ids; }; -/* Pre-allocate extra Work Requests for handling backward receives - * and sends. This is a fixed value because the Work Queues are - * allocated when the forward channel is set up, long before the +/* Pre-allocate extra Work Requests for handling reverse-direction + * Receives and Sends. This is a fixed value because the Work Queues + * are allocated when the forward channel is set up, long before the * backchannel is provisioned. This value is two times * NFS4_DEF_CB_SLOT_TABLE_SIZE. */ @@ -283,10 +283,11 @@ enum { RPCRDMA_MAX_IOV_SEGS, }; -struct rpcrdma_mr_seg { /* chunk descriptors */ - u32 mr_len; /* length of chunk or segment */ - struct page *mr_page; /* owning page, if any */ - char *mr_offset; /* kva if no page, else offset */ +/* Arguments for DMA mapping and registration */ +struct rpcrdma_mr_seg { + u32 mr_len; /* length of segment */ + struct page *mr_page; /* underlying struct page */ + u64 mr_offset; /* IN: page offset, OUT: iova */ }; /* The Send SGE array is provisioned to send a maximum size diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index c56a66cdf4ac..e35760f238a4 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -829,7 +829,7 @@ xs_stream_record_marker(struct xdr_buf *xdr) * EAGAIN: The socket was blocked, please call again later to * complete the request * ENOTCONN: Caller needs to invoke connect logic then call again - * other: Some other error occured, the request was not sent + * other: Some other error occurred, the request was not sent */ static int xs_local_send_request(struct rpc_rqst *req) { @@ -1665,7 +1665,7 @@ static int xs_bind(struct sock_xprt *transport, struct socket *sock) * This ensures that we can continue to establish TCP * connections even when all local ephemeral ports are already * a part of some TCP connection. This makes no difference - * for UDP sockets, but also doens't harm them. + * for UDP sockets, but also doesn't harm them. * * If we're asking for any reserved port (i.e. port == 0 && * transport->xprt.resvport == 1) xs_get_srcport above will @@ -1875,6 +1875,7 @@ static int xs_local_setup_socket(struct sock_xprt *transport) xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start; xprt_set_connected(xprt); + break; case -ENOBUFS: break; case -ENOENT: @@ -2276,10 +2277,8 @@ static void xs_tcp_setup_socket(struct work_struct *work) case -EHOSTUNREACH: case -EADDRINUSE: case -ENOBUFS: - /* - * xs_tcp_force_close() wakes tasks with -EIO. - * We need to wake them first to ensure the - * correct error code. + /* xs_tcp_force_close() wakes tasks with a fixed error code. + * We need to wake them first to ensure the correct error code. */ xprt_wake_pending_tasks(xprt, status); xs_tcp_force_close(xprt); @@ -2380,7 +2379,7 @@ static void xs_error_handle(struct work_struct *work) } /** - * xs_local_print_stats - display AF_LOCAL socket-specifc stats + * xs_local_print_stats - display AF_LOCAL socket-specific stats * @xprt: rpc_xprt struct containing statistics * @seq: output file * @@ -2409,7 +2408,7 @@ static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) } /** - * xs_udp_print_stats - display UDP socket-specifc stats + * xs_udp_print_stats - display UDP socket-specific stats * @xprt: rpc_xprt struct containing statistics * @seq: output file * @@ -2433,7 +2432,7 @@ static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) } /** - * xs_tcp_print_stats - display TCP socket-specifc stats + * xs_tcp_print_stats - display TCP socket-specific stats * @xprt: rpc_xprt struct containing statistics * @seq: output file * diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh index 74c69b75f6f5..7ed7cd95e58f 100755 --- a/tools/testing/selftests/wireguard/netns.sh +++ b/tools/testing/selftests/wireguard/netns.sh @@ -39,7 +39,7 @@ ip0() { pretty 0 "ip $*"; ip -n $netns0 "$@"; } ip1() { pretty 1 "ip $*"; ip -n $netns1 "$@"; } ip2() { pretty 2 "ip $*"; ip -n $netns2 "$@"; } sleep() { read -t "$1" -N 1 || true; } -waitiperf() { pretty "${1//*-}" "wait for iperf:5201 pid $2"; while [[ $(ss -N "$1" -tlpH 'sport = 5201') != *\"iperf3\",pid=$2,fd=* ]]; do sleep 0.1; done; } +waitiperf() { pretty "${1//*-}" "wait for iperf:${3:-5201} pid $2"; while [[ $(ss -N "$1" -tlpH "sport = ${3:-5201}") != *\"iperf3\",pid=$2,fd=* ]]; do sleep 0.1; done; } waitncatudp() { pretty "${1//*-}" "wait for udp:1111 pid $2"; while [[ $(ss -N "$1" -ulpH 'sport = 1111') != *\"ncat\",pid=$2,fd=* ]]; do sleep 0.1; done; } waitiface() { pretty "${1//*-}" "wait for $2 to come up"; ip netns exec "$1" bash -c "while [[ \$(< \"/sys/class/net/$2/operstate\") != up ]]; do read -t .1 -N 0 || true; done;"; } @@ -141,6 +141,19 @@ tests() { n2 iperf3 -s -1 -B fd00::2 & waitiperf $netns2 $! n1 iperf3 -Z -t 3 -b 0 -u -c fd00::2 + + # TCP over IPv4, in parallel + for max in 4 5 50; do + local pids=( ) + for ((i=0; i < max; ++i)) do + n2 iperf3 -p $(( 5200 + i )) -s -1 -B 192.168.241.2 & + pids+=( $! ); waitiperf $netns2 $! $(( 5200 + i )) + done + for ((i=0; i < max; ++i)) do + n1 iperf3 -Z -t 3 -p $(( 5200 + i )) -c 192.168.241.2 & + done + wait "${pids[@]}" + done } [[ $(ip1 link show dev wg0) =~ mtu\ ([0-9]+) ]] && orig_mtu="${BASH_REMATCH[1]}" |