diff options
Diffstat (limited to 'drivers')
228 files changed, 10050 insertions, 1735 deletions
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 30d8fd03fec7..97b711e57bff 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -70,11 +70,7 @@ module_param(device_id_scheme, bool, 0444); static int only_lcd = -1; module_param(only_lcd, int, 0444); -/* - * Display probing is known to take up to 5 seconds, so delay the fallback - * backlight registration by 5 seconds + 3 seconds for some extra margin. - */ -static int register_backlight_delay = 8; +static int register_backlight_delay; module_param(register_backlight_delay, int, 0444); MODULE_PARM_DESC(register_backlight_delay, "Delay in seconds before doing fallback (non GPU driver triggered) " @@ -2176,6 +2172,17 @@ static bool should_check_lcd_flag(void) return false; } +/* + * At least one graphics driver has reported that no LCD is connected + * via the native interface. cancel the registration for fallback acpi_video0. + * If another driver still deems this necessary, it can explicitly register it. + */ +void acpi_video_report_nolcd(void) +{ + cancel_delayed_work(&video_bus_register_backlight_work); +} +EXPORT_SYMBOL(acpi_video_report_nolcd); + int acpi_video_register(void) { int ret = 0; diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index f27914aedbd5..16dcd31d124f 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -432,10 +432,24 @@ static const struct dmi_system_id asus_laptop[] = { DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"), }, }, + { + .ident = "Asus ExpertBook B2502", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "B2502CBA"), + }, + }, { } }; -static const struct dmi_system_id lenovo_82ra[] = { +static const struct dmi_system_id lenovo_laptop[] = { + { + .ident = "LENOVO IdeaPad Flex 5 14ALC7", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82R9"), + }, + }, { .ident = "LENOVO IdeaPad Flex 5 16ALC7", .matches = { @@ -446,6 +460,17 @@ static const struct dmi_system_id lenovo_82ra[] = { { } }; +static const struct dmi_system_id schenker_gm_rg[] = { + { + .ident = "XMG CORE 15 (M22)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"), + DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"), + }, + }, + { } +}; + struct irq_override_cmp { const struct dmi_system_id *system; unsigned char irq; @@ -458,8 +483,9 @@ struct irq_override_cmp { static const struct irq_override_cmp override_table[] = { { medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, { asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, - { lenovo_82ra, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true }, - { lenovo_82ra, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true }, + { lenovo_laptop, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true }, + { lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true }, + { schenker_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true }, }; static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index a934bbc9dd37..1b78c7434492 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -34,6 +34,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/platform_data/x86/nvidia-wmi-ec-backlight.h> +#include <linux/pnp.h> #include <linux/types.h> #include <linux/workqueue.h> #include <acpi/video.h> @@ -105,6 +106,26 @@ static bool nvidia_wmi_ec_supported(void) } #endif +static bool apple_gmux_backlight_present(void) +{ + struct acpi_device *adev; + struct device *dev; + + adev = acpi_dev_get_first_match_dev(GMUX_ACPI_HID, NULL, -1); + if (!adev) + return false; + + dev = acpi_get_first_physical_node(adev); + if (!dev) + return false; + + /* + * drivers/platform/x86/apple-gmux.c only supports old style + * Apple GMUX with an IO-resource. + */ + return pnp_get_resource(to_pnp_dev(dev), IORESOURCE_IO, 0) != NULL; +} + /* Force to use vendor driver when the ACPI device is known to be * buggy */ static int video_detect_force_vendor(const struct dmi_system_id *d) @@ -767,7 +788,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native) if (nvidia_wmi_ec_present) return acpi_backlight_nvidia_wmi_ec; - if (apple_gmux_present()) + if (apple_gmux_backlight_present()) return acpi_backlight_apple_gmux; /* Use ACPI video if available, except when native should be preferred. */ diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c index 5350c73564b6..c7afce465a07 100644 --- a/drivers/acpi/x86/s2idle.c +++ b/drivers/acpi/x86/s2idle.c @@ -28,10 +28,6 @@ static bool sleep_no_lps0 __read_mostly; module_param(sleep_no_lps0, bool, 0644); MODULE_PARM_DESC(sleep_no_lps0, "Do not use the special LPS0 device interface"); -static bool prefer_microsoft_dsm_guid __read_mostly; -module_param(prefer_microsoft_dsm_guid, bool, 0644); -MODULE_PARM_DESC(prefer_microsoft_dsm_guid, "Prefer using Microsoft GUID in LPS0 device _DSM evaluation"); - static const struct acpi_device_id lps0_device_ids[] = { {"PNP0D80", }, {"", }, @@ -369,27 +365,15 @@ out: } struct amd_lps0_hid_device_data { - const unsigned int rev_id; const bool check_off_by_one; - const bool prefer_amd_guid; }; static const struct amd_lps0_hid_device_data amd_picasso = { - .rev_id = 0, .check_off_by_one = true, - .prefer_amd_guid = false, }; static const struct amd_lps0_hid_device_data amd_cezanne = { - .rev_id = 0, - .check_off_by_one = false, - .prefer_amd_guid = false, -}; - -static const struct amd_lps0_hid_device_data amd_rembrandt = { - .rev_id = 2, .check_off_by_one = false, - .prefer_amd_guid = true, }; static const struct acpi_device_id amd_hid_ids[] = { @@ -397,69 +381,27 @@ static const struct acpi_device_id amd_hid_ids[] = { {"AMD0005", (kernel_ulong_t)&amd_picasso, }, {"AMDI0005", (kernel_ulong_t)&amd_picasso, }, {"AMDI0006", (kernel_ulong_t)&amd_cezanne, }, - {"AMDI0007", (kernel_ulong_t)&amd_rembrandt, }, {} }; -static int lps0_prefer_microsoft(const struct dmi_system_id *id) +static int lps0_prefer_amd(const struct dmi_system_id *id) { - pr_debug("Preferring Microsoft GUID.\n"); - prefer_microsoft_dsm_guid = true; + pr_debug("Using AMD GUID w/ _REV 2.\n"); + rev_id = 2; return 0; } - static const struct dmi_system_id s2idle_dmi_table[] __initconst = { { /* - * ASUS TUF Gaming A17 FA707RE - * https://bugzilla.kernel.org/show_bug.cgi?id=216101 - */ - .callback = lps0_prefer_microsoft, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "ASUS TUF Gaming A17"), - }, - }, - { - /* ASUS ROG Zephyrus G14 (2022) */ - .callback = lps0_prefer_microsoft, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "ROG Zephyrus G14 GA402"), - }, - }, - { - /* - * Lenovo Yoga Slim 7 Pro X 14ARH7 - * https://bugzilla.kernel.org/show_bug.cgi?id=216473 : 82V2 - * https://bugzilla.kernel.org/show_bug.cgi?id=216438 : 82TL - */ - .callback = lps0_prefer_microsoft, - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "82"), - }, - }, - { - /* - * ASUSTeK COMPUTER INC. ROG Flow X13 GV301RE_GV301RE - * https://gitlab.freedesktop.org/drm/amd/-/issues/2148 + * AMD Rembrandt based HP EliteBook 835/845/865 G9 + * Contains specialized AML in AMD/_REV 2 path to avoid + * triggering a bug in Qualcomm WLAN firmware. This may be + * removed in the future if that firmware is fixed. */ - .callback = lps0_prefer_microsoft, + .callback = lps0_prefer_amd, .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow X13 GV301"), - }, - }, - { - /* - * ASUSTeK COMPUTER INC. ROG Flow X16 GV601RW_GV601RW - * https://gitlab.freedesktop.org/drm/amd/-/issues/2148 - */ - .callback = lps0_prefer_microsoft, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow X16 GV601"), + DMI_MATCH(DMI_BOARD_VENDOR, "HP"), + DMI_MATCH(DMI_BOARD_NAME, "8990"), }, }, {} @@ -484,16 +426,14 @@ static int lps0_device_attach(struct acpi_device *adev, if (dev_id->id[0]) data = (const struct amd_lps0_hid_device_data *) dev_id->driver_data; else - data = &amd_rembrandt; - rev_id = data->rev_id; + data = &amd_cezanne; lps0_dsm_func_mask = validate_dsm(adev->handle, ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid); if (lps0_dsm_func_mask > 0x3 && data->check_off_by_one) { lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1; acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n", ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask); - } else if (lps0_dsm_func_mask_microsoft > 0 && data->prefer_amd_guid && - !prefer_microsoft_dsm_guid) { + } else if (lps0_dsm_func_mask_microsoft > 0 && rev_id) { lps0_dsm_func_mask_microsoft = -EINVAL; acpi_handle_debug(adev->handle, "_DSM Using AMD method\n"); } @@ -501,8 +441,7 @@ static int lps0_device_attach(struct acpi_device *adev, rev_id = 1; lps0_dsm_func_mask = validate_dsm(adev->handle, ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid); - if (!prefer_microsoft_dsm_guid) - lps0_dsm_func_mask_microsoft = -EINVAL; + lps0_dsm_func_mask_microsoft = -EINVAL; } if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0) diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 0cfd0ec6229b..14a1c0d14916 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -83,6 +83,7 @@ enum board_ids { static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static void ahci_remove_one(struct pci_dev *dev); static void ahci_shutdown_one(struct pci_dev *dev); +static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv); static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, @@ -676,6 +677,25 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev, ahci_save_initial_config(&pdev->dev, hpriv); } +static int ahci_pci_reset_controller(struct ata_host *host) +{ + struct pci_dev *pdev = to_pci_dev(host->dev); + struct ahci_host_priv *hpriv = host->private_data; + int rc; + + rc = ahci_reset_controller(host); + if (rc) + return rc; + + /* + * If platform firmware failed to enable ports, try to enable + * them here. + */ + ahci_intel_pcs_quirk(pdev, hpriv); + + return 0; +} + static void ahci_pci_init_controller(struct ata_host *host) { struct ahci_host_priv *hpriv = host->private_data; @@ -870,7 +890,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev) struct ata_host *host = pci_get_drvdata(pdev); int rc; - rc = ahci_reset_controller(host); + rc = ahci_pci_reset_controller(host); if (rc) return rc; ahci_pci_init_controller(host); @@ -906,7 +926,7 @@ static int ahci_pci_device_resume(struct device *dev) ahci_mcp89_apple_enable(pdev); if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { - rc = ahci_reset_controller(host); + rc = ahci_pci_reset_controller(host); if (rc) return rc; @@ -1784,12 +1804,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* save initial config */ ahci_pci_save_initial_config(pdev, hpriv); - /* - * If platform firmware failed to enable ports, try to enable - * them here. - */ - ahci_intel_pcs_quirk(pdev, hpriv); - /* prepare host */ if (hpriv->cap & HOST_CAP_NCQ) { pi.flags |= ATA_FLAG_NCQ; @@ -1899,7 +1913,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; - rc = ahci_reset_controller(host); + rc = ahci_pci_reset_controller(host); if (rc) return rc; diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index a2184b428493..a41145d52de9 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -285,6 +285,49 @@ config BLK_DEV_RAM_SIZE The default value is 4096 kilobytes. Only change this if you know what you are doing. +config CDROM_PKTCDVD + tristate "Packet writing on CD/DVD media (DEPRECATED)" + depends on !UML + depends on SCSI + select CDROM + help + Note: This driver is deprecated and will be removed from the + kernel in the near future! + + If you have a CDROM/DVD drive that supports packet writing, say + Y to include support. It should work with any MMC/Mt Fuji + compliant ATAPI or SCSI drive, which is just about any newer + DVD/CD writer. + + Currently only writing to CD-RW, DVD-RW, DVD+RW and DVDRAM discs + is possible. + DVD-RW disks must be in restricted overwrite mode. + + See the file <file:Documentation/cdrom/packet-writing.rst> + for further information on the use of this driver. + + To compile this driver as a module, choose M here: the + module will be called pktcdvd. + +config CDROM_PKTCDVD_BUFFERS + int "Free buffers for data gathering" + depends on CDROM_PKTCDVD + default "8" + help + This controls the maximum number of active concurrent packets. More + concurrent packets can increase write performance, but also require + more memory. Each concurrent packet will require approximately 64Kb + of non-swappable kernel memory, memory which will be allocated when + a disc is opened for writing. + +config CDROM_PKTCDVD_WCACHE + bool "Enable write caching" + depends on CDROM_PKTCDVD + help + If enabled, write caching will be set for the CD-R/W device. For now + this option is dangerous unless the CD-RW media is known good, as we + don't do deferred write error handling yet. + config ATA_OVER_ETH tristate "ATA over Ethernet support" depends on NET diff --git a/drivers/block/Makefile b/drivers/block/Makefile index 962ee65d8ca3..101612cba303 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o obj-$(CONFIG_N64CART) += n64cart.o obj-$(CONFIG_BLK_DEV_RAM) += brd.o obj-$(CONFIG_BLK_DEV_LOOP) += loop.o +obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o obj-$(CONFIG_SUNVDC) += sunvdc.o obj-$(CONFIG_BLK_DEV_NBD) += nbd.o diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index eb14ec8ec04c..e36216d50753 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -1607,6 +1607,8 @@ void drbd_submit_bio(struct bio *bio) struct drbd_device *device = bio->bi_bdev->bd_disk->private_data; bio = bio_split_to_limits(bio); + if (!bio) + return; /* * what we "blindly" assume: diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c new file mode 100644 index 000000000000..4cea3b08087e --- /dev/null +++ b/drivers/block/pktcdvd.c @@ -0,0 +1,2944 @@ +/* + * Copyright (C) 2000 Jens Axboe <axboe@suse.de> + * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com> + * Copyright (C) 2006 Thomas Maier <balagi@justmail.de> + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and + * DVD-RAM devices. + * + * Theory of operation: + * + * At the lowest level, there is the standard driver for the CD/DVD device, + * such as drivers/scsi/sr.c. This driver can handle read and write requests, + * but it doesn't know anything about the special restrictions that apply to + * packet writing. One restriction is that write requests must be aligned to + * packet boundaries on the physical media, and the size of a write request + * must be equal to the packet size. Another restriction is that a + * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read + * command, if the previous command was a write. + * + * The purpose of the packet writing driver is to hide these restrictions from + * higher layers, such as file systems, and present a block device that can be + * randomly read and written using 2kB-sized blocks. + * + * The lowest layer in the packet writing driver is the packet I/O scheduler. + * Its data is defined by the struct packet_iosched and includes two bio + * queues with pending read and write requests. These queues are processed + * by the pkt_iosched_process_queue() function. The write requests in this + * queue are already properly aligned and sized. This layer is responsible for + * issuing the flush cache commands and scheduling the I/O in a good order. + * + * The next layer transforms unaligned write requests to aligned writes. This + * transformation requires reading missing pieces of data from the underlying + * block device, assembling the pieces to full packets and queuing them to the + * packet I/O scheduler. + * + * At the top layer there is a custom ->submit_bio function that forwards + * read requests directly to the iosched queue and puts write requests in the + * unaligned write queue. A kernel thread performs the necessary read + * gathering to convert the unaligned writes to aligned writes and then feeds + * them to the packet I/O scheduler. + * + *************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/pktcdvd.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/compat.h> +#include <linux/kthread.h> +#include <linux/errno.h> +#include <linux/spinlock.h> +#include <linux/file.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <linux/miscdevice.h> +#include <linux/freezer.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/backing-dev.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_ioctl.h> +#include <scsi/scsi.h> +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/nospec.h> +#include <linux/uaccess.h> + +#define DRIVER_NAME "pktcdvd" + +#define pkt_err(pd, fmt, ...) \ + pr_err("%s: " fmt, pd->name, ##__VA_ARGS__) +#define pkt_notice(pd, fmt, ...) \ + pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__) +#define pkt_info(pd, fmt, ...) \ + pr_info("%s: " fmt, pd->name, ##__VA_ARGS__) + +#define pkt_dbg(level, pd, fmt, ...) \ +do { \ + if (level == 2 && PACKET_DEBUG >= 2) \ + pr_notice("%s: %s():" fmt, \ + pd->name, __func__, ##__VA_ARGS__); \ + else if (level == 1 && PACKET_DEBUG >= 1) \ + pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__); \ +} while (0) + +#define MAX_SPEED 0xffff + +static DEFINE_MUTEX(pktcdvd_mutex); +static struct pktcdvd_device *pkt_devs[MAX_WRITERS]; +static struct proc_dir_entry *pkt_proc; +static int pktdev_major; +static int write_congestion_on = PKT_WRITE_CONGESTION_ON; +static int write_congestion_off = PKT_WRITE_CONGESTION_OFF; +static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */ +static mempool_t psd_pool; +static struct bio_set pkt_bio_set; + +static struct class *class_pktcdvd = NULL; /* /sys/class/pktcdvd */ +static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */ + +/* forward declaration */ +static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev); +static int pkt_remove_dev(dev_t pkt_dev); +static int pkt_seq_show(struct seq_file *m, void *p); + +static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd) +{ + return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1); +} + +/********************************************************** + * sysfs interface for pktcdvd + * by (C) 2006 Thomas Maier <balagi@justmail.de> + + /sys/class/pktcdvd/pktcdvd[0-7]/ + stat/reset + stat/packets_started + stat/packets_finished + stat/kb_written + stat/kb_read + stat/kb_read_gather + write_queue/size + write_queue/congestion_off + write_queue/congestion_on + **********************************************************/ + +static ssize_t packets_started_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pktcdvd_device *pd = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%lu\n", pd->stats.pkt_started); +} +static DEVICE_ATTR_RO(packets_started); + +static ssize_t packets_finished_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pktcdvd_device *pd = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%lu\n", pd->stats.pkt_ended); +} +static DEVICE_ATTR_RO(packets_finished); + +static ssize_t kb_written_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pktcdvd_device *pd = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%lu\n", pd->stats.secs_w >> 1); +} +static DEVICE_ATTR_RO(kb_written); + +static ssize_t kb_read_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pktcdvd_device *pd = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%lu\n", pd->stats.secs_r >> 1); +} +static DEVICE_ATTR_RO(kb_read); + +static ssize_t kb_read_gather_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pktcdvd_device *pd = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%lu\n", pd->stats.secs_rg >> 1); +} +static DEVICE_ATTR_RO(kb_read_gather); + +static ssize_t reset_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) +{ + struct pktcdvd_device *pd = dev_get_drvdata(dev); + + if (len > 0) { + pd->stats.pkt_started = 0; + pd->stats.pkt_ended = 0; + pd->stats.secs_w = 0; + pd->stats.secs_rg = 0; + pd->stats.secs_r = 0; + } + return len; +} +static DEVICE_ATTR_WO(reset); + +static struct attribute *pkt_stat_attrs[] = { + &dev_attr_packets_finished.attr, + &dev_attr_packets_started.attr, + &dev_attr_kb_read.attr, + &dev_attr_kb_written.attr, + &dev_attr_kb_read_gather.attr, + &dev_attr_reset.attr, + NULL, +}; + +static const struct attribute_group pkt_stat_group = { + .name = "stat", + .attrs = pkt_stat_attrs, +}; + +static ssize_t size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pktcdvd_device *pd = dev_get_drvdata(dev); + int n; + + spin_lock(&pd->lock); + n = sysfs_emit(buf, "%d\n", pd->bio_queue_size); + spin_unlock(&pd->lock); + return n; +} +static DEVICE_ATTR_RO(size); + +static void init_write_congestion_marks(int* lo, int* hi) +{ + if (*hi > 0) { + *hi = max(*hi, 500); + *hi = min(*hi, 1000000); + if (*lo <= 0) + *lo = *hi - 100; + else { + *lo = min(*lo, *hi - 100); + *lo = max(*lo, 100); + } + } else { + *hi = -1; + *lo = -1; + } +} + +static ssize_t congestion_off_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pktcdvd_device *pd = dev_get_drvdata(dev); + int n; + + spin_lock(&pd->lock); + n = sysfs_emit(buf, "%d\n", pd->write_congestion_off); + spin_unlock(&pd->lock); + return n; +} + +static ssize_t congestion_off_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct pktcdvd_device *pd = dev_get_drvdata(dev); + int val; + + if (sscanf(buf, "%d", &val) == 1) { + spin_lock(&pd->lock); + pd->write_congestion_off = val; + init_write_congestion_marks(&pd->write_congestion_off, + &pd->write_congestion_on); + spin_unlock(&pd->lock); + } + return len; +} +static DEVICE_ATTR_RW(congestion_off); + +static ssize_t congestion_on_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pktcdvd_device *pd = dev_get_drvdata(dev); + int n; + + spin_lock(&pd->lock); + n = sysfs_emit(buf, "%d\n", pd->write_congestion_on); + spin_unlock(&pd->lock); + return n; +} + +static ssize_t congestion_on_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct pktcdvd_device *pd = dev_get_drvdata(dev); + int val; + + if (sscanf(buf, "%d", &val) == 1) { + spin_lock(&pd->lock); + pd->write_congestion_on = val; + init_write_congestion_marks(&pd->write_congestion_off, + &pd->write_congestion_on); + spin_unlock(&pd->lock); + } + return len; +} +static DEVICE_ATTR_RW(congestion_on); + +static struct attribute *pkt_wq_attrs[] = { + &dev_attr_congestion_on.attr, + &dev_attr_congestion_off.attr, + &dev_attr_size.attr, + NULL, +}; + +static const struct attribute_group pkt_wq_group = { + .name = "write_queue", + .attrs = pkt_wq_attrs, +}; + +static const struct attribute_group *pkt_groups[] = { + &pkt_stat_group, + &pkt_wq_group, + NULL, +}; + +static void pkt_sysfs_dev_new(struct pktcdvd_device *pd) +{ + if (class_pktcdvd) { + pd->dev = device_create_with_groups(class_pktcdvd, NULL, + MKDEV(0, 0), pd, pkt_groups, + "%s", pd->name); + if (IS_ERR(pd->dev)) + pd->dev = NULL; + } +} + +static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd) +{ + if (class_pktcdvd) + device_unregister(pd->dev); +} + + +/******************************************************************** + /sys/class/pktcdvd/ + add map block device + remove unmap packet dev + device_map show mappings + *******************************************************************/ + +static void class_pktcdvd_release(struct class *cls) +{ + kfree(cls); +} + +static ssize_t device_map_show(struct class *c, struct class_attribute *attr, + char *data) +{ + int n = 0; + int idx; + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + for (idx = 0; idx < MAX_WRITERS; idx++) { + struct pktcdvd_device *pd = pkt_devs[idx]; + if (!pd) + continue; + n += sprintf(data+n, "%s %u:%u %u:%u\n", + pd->name, + MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev), + MAJOR(pd->bdev->bd_dev), + MINOR(pd->bdev->bd_dev)); + } + mutex_unlock(&ctl_mutex); + return n; +} +static CLASS_ATTR_RO(device_map); + +static ssize_t add_store(struct class *c, struct class_attribute *attr, + const char *buf, size_t count) +{ + unsigned int major, minor; + + if (sscanf(buf, "%u:%u", &major, &minor) == 2) { + /* pkt_setup_dev() expects caller to hold reference to self */ + if (!try_module_get(THIS_MODULE)) + return -ENODEV; + + pkt_setup_dev(MKDEV(major, minor), NULL); + + module_put(THIS_MODULE); + + return count; + } + + return -EINVAL; +} +static CLASS_ATTR_WO(add); + +static ssize_t remove_store(struct class *c, struct class_attribute *attr, + const char *buf, size_t count) +{ + unsigned int major, minor; + if (sscanf(buf, "%u:%u", &major, &minor) == 2) { + pkt_remove_dev(MKDEV(major, minor)); + return count; + } + return -EINVAL; +} +static CLASS_ATTR_WO(remove); + +static struct attribute *class_pktcdvd_attrs[] = { + &class_attr_add.attr, + &class_attr_remove.attr, + &class_attr_device_map.attr, + NULL, +}; +ATTRIBUTE_GROUPS(class_pktcdvd); + +static int pkt_sysfs_init(void) +{ + int ret = 0; + + /* + * create control files in sysfs + * /sys/class/pktcdvd/... + */ + class_pktcdvd = kzalloc(sizeof(*class_pktcdvd), GFP_KERNEL); + if (!class_pktcdvd) + return -ENOMEM; + class_pktcdvd->name = DRIVER_NAME; + class_pktcdvd->owner = THIS_MODULE; + class_pktcdvd->class_release = class_pktcdvd_release; + class_pktcdvd->class_groups = class_pktcdvd_groups; + ret = class_register(class_pktcdvd); + if (ret) { + kfree(class_pktcdvd); + class_pktcdvd = NULL; + pr_err("failed to create class pktcdvd\n"); + return ret; + } + return 0; +} + +static void pkt_sysfs_cleanup(void) +{ + if (class_pktcdvd) + class_destroy(class_pktcdvd); + class_pktcdvd = NULL; +} + +/******************************************************************** + entries in debugfs + + /sys/kernel/debug/pktcdvd[0-7]/ + info + + *******************************************************************/ + +static int pkt_debugfs_seq_show(struct seq_file *m, void *p) +{ + return pkt_seq_show(m, p); +} + +static int pkt_debugfs_fops_open(struct inode *inode, struct file *file) +{ + return single_open(file, pkt_debugfs_seq_show, inode->i_private); +} + +static const struct file_operations debug_fops = { + .open = pkt_debugfs_fops_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static void pkt_debugfs_dev_new(struct pktcdvd_device *pd) +{ + if (!pkt_debugfs_root) + return; + pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root); + if (!pd->dfs_d_root) + return; + + pd->dfs_f_info = debugfs_create_file("info", 0444, + pd->dfs_d_root, pd, &debug_fops); +} + +static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd) +{ + if (!pkt_debugfs_root) + return; + debugfs_remove(pd->dfs_f_info); + debugfs_remove(pd->dfs_d_root); + pd->dfs_f_info = NULL; + pd->dfs_d_root = NULL; +} + +static void pkt_debugfs_init(void) +{ + pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL); +} + +static void pkt_debugfs_cleanup(void) +{ + debugfs_remove(pkt_debugfs_root); + pkt_debugfs_root = NULL; +} + +/* ----------------------------------------------------------*/ + + +static void pkt_bio_finished(struct pktcdvd_device *pd) +{ + BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0); + if (atomic_dec_and_test(&pd->cdrw.pending_bios)) { + pkt_dbg(2, pd, "queue empty\n"); + atomic_set(&pd->iosched.attention, 1); + wake_up(&pd->wqueue); + } +} + +/* + * Allocate a packet_data struct + */ +static struct packet_data *pkt_alloc_packet_data(int frames) +{ + int i; + struct packet_data *pkt; + + pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL); + if (!pkt) + goto no_pkt; + + pkt->frames = frames; + pkt->w_bio = bio_kmalloc(frames, GFP_KERNEL); + if (!pkt->w_bio) + goto no_bio; + + for (i = 0; i < frames / FRAMES_PER_PAGE; i++) { + pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO); + if (!pkt->pages[i]) + goto no_page; + } + + spin_lock_init(&pkt->lock); + bio_list_init(&pkt->orig_bios); + + for (i = 0; i < frames; i++) { + pkt->r_bios[i] = bio_kmalloc(1, GFP_KERNEL); + if (!pkt->r_bios[i]) + goto no_rd_bio; + } + + return pkt; + +no_rd_bio: + for (i = 0; i < frames; i++) + kfree(pkt->r_bios[i]); +no_page: + for (i = 0; i < frames / FRAMES_PER_PAGE; i++) + if (pkt->pages[i]) + __free_page(pkt->pages[i]); + kfree(pkt->w_bio); +no_bio: + kfree(pkt); +no_pkt: + return NULL; +} + +/* + * Free a packet_data struct + */ +static void pkt_free_packet_data(struct packet_data *pkt) +{ + int i; + + for (i = 0; i < pkt->frames; i++) + kfree(pkt->r_bios[i]); + for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++) + __free_page(pkt->pages[i]); + kfree(pkt->w_bio); + kfree(pkt); +} + +static void pkt_shrink_pktlist(struct pktcdvd_device *pd) +{ + struct packet_data *pkt, *next; + + BUG_ON(!list_empty(&pd->cdrw.pkt_active_list)); + + list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) { + pkt_free_packet_data(pkt); + } + INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); +} + +static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets) +{ + struct packet_data *pkt; + + BUG_ON(!list_empty(&pd->cdrw.pkt_free_list)); + + while (nr_packets > 0) { + pkt = pkt_alloc_packet_data(pd->settings.size >> 2); + if (!pkt) { + pkt_shrink_pktlist(pd); + return 0; + } + pkt->id = nr_packets; + pkt->pd = pd; + list_add(&pkt->list, &pd->cdrw.pkt_free_list); + nr_packets--; + } + return 1; +} + +static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node) +{ + struct rb_node *n = rb_next(&node->rb_node); + if (!n) + return NULL; + return rb_entry(n, struct pkt_rb_node, rb_node); +} + +static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node) +{ + rb_erase(&node->rb_node, &pd->bio_queue); + mempool_free(node, &pd->rb_pool); + pd->bio_queue_size--; + BUG_ON(pd->bio_queue_size < 0); +} + +/* + * Find the first node in the pd->bio_queue rb tree with a starting sector >= s. + */ +static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s) +{ + struct rb_node *n = pd->bio_queue.rb_node; + struct rb_node *next; + struct pkt_rb_node *tmp; + + if (!n) { + BUG_ON(pd->bio_queue_size > 0); + return NULL; + } + + for (;;) { + tmp = rb_entry(n, struct pkt_rb_node, rb_node); + if (s <= tmp->bio->bi_iter.bi_sector) + next = n->rb_left; + else + next = n->rb_right; + if (!next) + break; + n = next; + } + + if (s > tmp->bio->bi_iter.bi_sector) { + tmp = pkt_rbtree_next(tmp); + if (!tmp) + return NULL; + } + BUG_ON(s > tmp->bio->bi_iter.bi_sector); + return tmp; +} + +/* + * Insert a node into the pd->bio_queue rb tree. + */ +static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node) +{ + struct rb_node **p = &pd->bio_queue.rb_node; + struct rb_node *parent = NULL; + sector_t s = node->bio->bi_iter.bi_sector; + struct pkt_rb_node *tmp; + + while (*p) { + parent = *p; + tmp = rb_entry(parent, struct pkt_rb_node, rb_node); + if (s < tmp->bio->bi_iter.bi_sector) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + rb_link_node(&node->rb_node, parent, p); + rb_insert_color(&node->rb_node, &pd->bio_queue); + pd->bio_queue_size++; +} + +/* + * Send a packet_command to the underlying block device and + * wait for completion. + */ +static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc) +{ + struct request_queue *q = bdev_get_queue(pd->bdev); + struct scsi_cmnd *scmd; + struct request *rq; + int ret = 0; + + rq = scsi_alloc_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? + REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); + if (IS_ERR(rq)) + return PTR_ERR(rq); + scmd = blk_mq_rq_to_pdu(rq); + + if (cgc->buflen) { + ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, + GFP_NOIO); + if (ret) + goto out; + } + + scmd->cmd_len = COMMAND_SIZE(cgc->cmd[0]); + memcpy(scmd->cmnd, cgc->cmd, CDROM_PACKET_SIZE); + + rq->timeout = 60*HZ; + if (cgc->quiet) + rq->rq_flags |= RQF_QUIET; + + blk_execute_rq(rq, false); + if (scmd->result) + ret = -EIO; +out: + blk_mq_free_request(rq); + return ret; +} + +static const char *sense_key_string(__u8 index) +{ + static const char * const info[] = { + "No sense", "Recovered error", "Not ready", + "Medium error", "Hardware error", "Illegal request", + "Unit attention", "Data protect", "Blank check", + }; + + return index < ARRAY_SIZE(info) ? info[index] : "INVALID"; +} + +/* + * A generic sense dump / resolve mechanism should be implemented across + * all ATAPI + SCSI devices. + */ +static void pkt_dump_sense(struct pktcdvd_device *pd, + struct packet_command *cgc) +{ + struct scsi_sense_hdr *sshdr = cgc->sshdr; + + if (sshdr) + pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n", + CDROM_PACKET_SIZE, cgc->cmd, + sshdr->sense_key, sshdr->asc, sshdr->ascq, + sense_key_string(sshdr->sense_key)); + else + pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd); +} + +/* + * flush the drive cache to media + */ +static int pkt_flush_cache(struct pktcdvd_device *pd) +{ + struct packet_command cgc; + + init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); + cgc.cmd[0] = GPCMD_FLUSH_CACHE; + cgc.quiet = 1; + + /* + * the IMMED bit -- we default to not setting it, although that + * would allow a much faster close, this is safer + */ +#if 0 + cgc.cmd[1] = 1 << 1; +#endif + return pkt_generic_packet(pd, &cgc); +} + +/* + * speed is given as the normal factor, e.g. 4 for 4x + */ +static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd, + unsigned write_speed, unsigned read_speed) +{ + struct packet_command cgc; + struct scsi_sense_hdr sshdr; + int ret; + + init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); + cgc.sshdr = &sshdr; + cgc.cmd[0] = GPCMD_SET_SPEED; + cgc.cmd[2] = (read_speed >> 8) & 0xff; + cgc.cmd[3] = read_speed & 0xff; + cgc.cmd[4] = (write_speed >> 8) & 0xff; + cgc.cmd[5] = write_speed & 0xff; + + ret = pkt_generic_packet(pd, &cgc); + if (ret) + pkt_dump_sense(pd, &cgc); + + return ret; +} + +/* + * Queue a bio for processing by the low-level CD device. Must be called + * from process context. + */ +static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio) +{ + spin_lock(&pd->iosched.lock); + if (bio_data_dir(bio) == READ) + bio_list_add(&pd->iosched.read_queue, bio); + else + bio_list_add(&pd->iosched.write_queue, bio); + spin_unlock(&pd->iosched.lock); + + atomic_set(&pd->iosched.attention, 1); + wake_up(&pd->wqueue); +} + +/* + * Process the queued read/write requests. This function handles special + * requirements for CDRW drives: + * - A cache flush command must be inserted before a read request if the + * previous request was a write. + * - Switching between reading and writing is slow, so don't do it more often + * than necessary. + * - Optimize for throughput at the expense of latency. This means that streaming + * writes will never be interrupted by a read, but if the drive has to seek + * before the next write, switch to reading instead if there are any pending + * read requests. + * - Set the read speed according to current usage pattern. When only reading + * from the device, it's best to use the highest possible read speed, but + * when switching often between reading and writing, it's better to have the + * same read and write speeds. + */ +static void pkt_iosched_process_queue(struct pktcdvd_device *pd) +{ + + if (atomic_read(&pd->iosched.attention) == 0) + return; + atomic_set(&pd->iosched.attention, 0); + + for (;;) { + struct bio *bio; + int reads_queued, writes_queued; + + spin_lock(&pd->iosched.lock); + reads_queued = !bio_list_empty(&pd->iosched.read_queue); + writes_queued = !bio_list_empty(&pd->iosched.write_queue); + spin_unlock(&pd->iosched.lock); + + if (!reads_queued && !writes_queued) + break; + + if (pd->iosched.writing) { + int need_write_seek = 1; + spin_lock(&pd->iosched.lock); + bio = bio_list_peek(&pd->iosched.write_queue); + spin_unlock(&pd->iosched.lock); + if (bio && (bio->bi_iter.bi_sector == + pd->iosched.last_write)) + need_write_seek = 0; + if (need_write_seek && reads_queued) { + if (atomic_read(&pd->cdrw.pending_bios) > 0) { + pkt_dbg(2, pd, "write, waiting\n"); + break; + } + pkt_flush_cache(pd); + pd->iosched.writing = 0; + } + } else { + if (!reads_queued && writes_queued) { + if (atomic_read(&pd->cdrw.pending_bios) > 0) { + pkt_dbg(2, pd, "read, waiting\n"); + break; + } + pd->iosched.writing = 1; + } + } + + spin_lock(&pd->iosched.lock); + if (pd->iosched.writing) + bio = bio_list_pop(&pd->iosched.write_queue); + else + bio = bio_list_pop(&pd->iosched.read_queue); + spin_unlock(&pd->iosched.lock); + + if (!bio) + continue; + + if (bio_data_dir(bio) == READ) + pd->iosched.successive_reads += + bio->bi_iter.bi_size >> 10; + else { + pd->iosched.successive_reads = 0; + pd->iosched.last_write = bio_end_sector(bio); + } + if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) { + if (pd->read_speed == pd->write_speed) { + pd->read_speed = MAX_SPEED; + pkt_set_speed(pd, pd->write_speed, pd->read_speed); + } + } else { + if (pd->read_speed != pd->write_speed) { + pd->read_speed = pd->write_speed; + pkt_set_speed(pd, pd->write_speed, pd->read_speed); + } + } + + atomic_inc(&pd->cdrw.pending_bios); + submit_bio_noacct(bio); + } +} + +/* + * Special care is needed if the underlying block device has a small + * max_phys_segments value. + */ +static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) +{ + if ((pd->settings.size << 9) / CD_FRAMESIZE + <= queue_max_segments(q)) { + /* + * The cdrom device can handle one segment/frame + */ + clear_bit(PACKET_MERGE_SEGS, &pd->flags); + return 0; + } else if ((pd->settings.size << 9) / PAGE_SIZE + <= queue_max_segments(q)) { + /* + * We can handle this case at the expense of some extra memory + * copies during write operations + */ + set_bit(PACKET_MERGE_SEGS, &pd->flags); + return 0; + } else { + pkt_err(pd, "cdrom max_phys_segments too small\n"); + return -EIO; + } +} + +static void pkt_end_io_read(struct bio *bio) +{ + struct packet_data *pkt = bio->bi_private; + struct pktcdvd_device *pd = pkt->pd; + BUG_ON(!pd); + + pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", + bio, (unsigned long long)pkt->sector, + (unsigned long long)bio->bi_iter.bi_sector, bio->bi_status); + + if (bio->bi_status) + atomic_inc(&pkt->io_errors); + bio_uninit(bio); + if (atomic_dec_and_test(&pkt->io_wait)) { + atomic_inc(&pkt->run_sm); + wake_up(&pd->wqueue); + } + pkt_bio_finished(pd); +} + +static void pkt_end_io_packet_write(struct bio *bio) +{ + struct packet_data *pkt = bio->bi_private; + struct pktcdvd_device *pd = pkt->pd; + BUG_ON(!pd); + + pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status); + + pd->stats.pkt_ended++; + + bio_uninit(bio); + pkt_bio_finished(pd); + atomic_dec(&pkt->io_wait); + atomic_inc(&pkt->run_sm); + wake_up(&pd->wqueue); +} + +/* + * Schedule reads for the holes in a packet + */ +static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) +{ + int frames_read = 0; + struct bio *bio; + int f; + char written[PACKET_MAX_SIZE]; + + BUG_ON(bio_list_empty(&pkt->orig_bios)); + + atomic_set(&pkt->io_wait, 0); + atomic_set(&pkt->io_errors, 0); + + /* + * Figure out which frames we need to read before we can write. + */ + memset(written, 0, sizeof(written)); + spin_lock(&pkt->lock); + bio_list_for_each(bio, &pkt->orig_bios) { + int first_frame = (bio->bi_iter.bi_sector - pkt->sector) / + (CD_FRAMESIZE >> 9); + int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE; + pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); + BUG_ON(first_frame < 0); + BUG_ON(first_frame + num_frames > pkt->frames); + for (f = first_frame; f < first_frame + num_frames; f++) + written[f] = 1; + } + spin_unlock(&pkt->lock); + + if (pkt->cache_valid) { + pkt_dbg(2, pd, "zone %llx cached\n", + (unsigned long long)pkt->sector); + goto out_account; + } + + /* + * Schedule reads for missing parts of the packet. + */ + for (f = 0; f < pkt->frames; f++) { + int p, offset; + + if (written[f]) + continue; + + bio = pkt->r_bios[f]; + bio_init(bio, pd->bdev, bio->bi_inline_vecs, 1, REQ_OP_READ); + bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); + bio->bi_end_io = pkt_end_io_read; + bio->bi_private = pkt; + + p = (f * CD_FRAMESIZE) / PAGE_SIZE; + offset = (f * CD_FRAMESIZE) % PAGE_SIZE; + pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n", + f, pkt->pages[p], offset); + if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset)) + BUG(); + + atomic_inc(&pkt->io_wait); + pkt_queue_bio(pd, bio); + frames_read++; + } + +out_account: + pkt_dbg(2, pd, "need %d frames for zone %llx\n", + frames_read, (unsigned long long)pkt->sector); + pd->stats.pkt_started++; + pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9); +} + +/* + * Find a packet matching zone, or the least recently used packet if + * there is no match. + */ +static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone) +{ + struct packet_data *pkt; + + list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) { + if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) { + list_del_init(&pkt->list); + if (pkt->sector != zone) + pkt->cache_valid = 0; + return pkt; + } + } + BUG(); + return NULL; +} + +static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt) +{ + if (pkt->cache_valid) { + list_add(&pkt->list, &pd->cdrw.pkt_free_list); + } else { + list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list); + } +} + +static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state) +{ +#if PACKET_DEBUG > 1 + static const char *state_name[] = { + "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED" + }; + enum packet_data_state old_state = pkt->state; + pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n", + pkt->id, (unsigned long long)pkt->sector, + state_name[old_state], state_name[state]); +#endif + pkt->state = state; +} + +/* + * Scan the work queue to see if we can start a new packet. + * returns non-zero if any work was done. + */ +static int pkt_handle_queue(struct pktcdvd_device *pd) +{ + struct packet_data *pkt, *p; + struct bio *bio = NULL; + sector_t zone = 0; /* Suppress gcc warning */ + struct pkt_rb_node *node, *first_node; + struct rb_node *n; + + atomic_set(&pd->scan_queue, 0); + + if (list_empty(&pd->cdrw.pkt_free_list)) { + pkt_dbg(2, pd, "no pkt\n"); + return 0; + } + + /* + * Try to find a zone we are not already working on. + */ + spin_lock(&pd->lock); + first_node = pkt_rbtree_find(pd, pd->current_sector); + if (!first_node) { + n = rb_first(&pd->bio_queue); + if (n) + first_node = rb_entry(n, struct pkt_rb_node, rb_node); + } + node = first_node; + while (node) { + bio = node->bio; + zone = get_zone(bio->bi_iter.bi_sector, pd); + list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) { + if (p->sector == zone) { + bio = NULL; + goto try_next_bio; + } + } + break; +try_next_bio: + node = pkt_rbtree_next(node); + if (!node) { + n = rb_first(&pd->bio_queue); + if (n) + node = rb_entry(n, struct pkt_rb_node, rb_node); + } + if (node == first_node) + node = NULL; + } + spin_unlock(&pd->lock); + if (!bio) { + pkt_dbg(2, pd, "no bio\n"); + return 0; + } + + pkt = pkt_get_packet_data(pd, zone); + + pd->current_sector = zone + pd->settings.size; + pkt->sector = zone; + BUG_ON(pkt->frames != pd->settings.size >> 2); + pkt->write_size = 0; + + /* + * Scan work queue for bios in the same zone and link them + * to this packet. + */ + spin_lock(&pd->lock); + pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone); + while ((node = pkt_rbtree_find(pd, zone)) != NULL) { + bio = node->bio; + pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long) + get_zone(bio->bi_iter.bi_sector, pd)); + if (get_zone(bio->bi_iter.bi_sector, pd) != zone) + break; + pkt_rbtree_erase(pd, node); + spin_lock(&pkt->lock); + bio_list_add(&pkt->orig_bios, bio); + pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE; + spin_unlock(&pkt->lock); + } + /* check write congestion marks, and if bio_queue_size is + * below, wake up any waiters + */ + if (pd->congested && + pd->bio_queue_size <= pd->write_congestion_off) { + pd->congested = false; + wake_up_var(&pd->congested); + } + spin_unlock(&pd->lock); + + pkt->sleep_time = max(PACKET_WAIT_TIME, 1); + pkt_set_state(pkt, PACKET_WAITING_STATE); + atomic_set(&pkt->run_sm, 1); + + spin_lock(&pd->cdrw.active_list_lock); + list_add(&pkt->list, &pd->cdrw.pkt_active_list); + spin_unlock(&pd->cdrw.active_list_lock); + + return 1; +} + +/** + * bio_list_copy_data - copy contents of data buffers from one chain of bios to + * another + * @src: source bio list + * @dst: destination bio list + * + * Stops when it reaches the end of either the @src list or @dst list - that is, + * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of + * bios). + */ +static void bio_list_copy_data(struct bio *dst, struct bio *src) +{ + struct bvec_iter src_iter = src->bi_iter; + struct bvec_iter dst_iter = dst->bi_iter; + + while (1) { + if (!src_iter.bi_size) { + src = src->bi_next; + if (!src) + break; + + src_iter = src->bi_iter; + } + + if (!dst_iter.bi_size) { + dst = dst->bi_next; + if (!dst) + break; + + dst_iter = dst->bi_iter; + } + + bio_copy_data_iter(dst, &dst_iter, src, &src_iter); + } +} + +/* + * Assemble a bio to write one packet and queue the bio for processing + * by the underlying block device. + */ +static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) +{ + int f; + + bio_init(pkt->w_bio, pd->bdev, pkt->w_bio->bi_inline_vecs, pkt->frames, + REQ_OP_WRITE); + pkt->w_bio->bi_iter.bi_sector = pkt->sector; + pkt->w_bio->bi_end_io = pkt_end_io_packet_write; + pkt->w_bio->bi_private = pkt; + + /* XXX: locking? */ + for (f = 0; f < pkt->frames; f++) { + struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE]; + unsigned offset = (f * CD_FRAMESIZE) % PAGE_SIZE; + + if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset)) + BUG(); + } + pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt); + + /* + * Fill-in bvec with data from orig_bios. + */ + spin_lock(&pkt->lock); + bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head); + + pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE); + spin_unlock(&pkt->lock); + + pkt_dbg(2, pd, "Writing %d frames for zone %llx\n", + pkt->write_size, (unsigned long long)pkt->sector); + + if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) + pkt->cache_valid = 1; + else + pkt->cache_valid = 0; + + /* Start the write request */ + atomic_set(&pkt->io_wait, 1); + pkt_queue_bio(pd, pkt->w_bio); +} + +static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status) +{ + struct bio *bio; + + if (status) + pkt->cache_valid = 0; + + /* Finish all bios corresponding to this packet */ + while ((bio = bio_list_pop(&pkt->orig_bios))) { + bio->bi_status = status; + bio_endio(bio); + } +} + +static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt) +{ + pkt_dbg(2, pd, "pkt %d\n", pkt->id); + + for (;;) { + switch (pkt->state) { + case PACKET_WAITING_STATE: + if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0)) + return; + + pkt->sleep_time = 0; + pkt_gather_data(pd, pkt); + pkt_set_state(pkt, PACKET_READ_WAIT_STATE); + break; + + case PACKET_READ_WAIT_STATE: + if (atomic_read(&pkt->io_wait) > 0) + return; + + if (atomic_read(&pkt->io_errors) > 0) { + pkt_set_state(pkt, PACKET_RECOVERY_STATE); + } else { + pkt_start_write(pd, pkt); + } + break; + + case PACKET_WRITE_WAIT_STATE: + if (atomic_read(&pkt->io_wait) > 0) + return; + + if (!pkt->w_bio->bi_status) { + pkt_set_state(pkt, PACKET_FINISHED_STATE); + } else { + pkt_set_state(pkt, PACKET_RECOVERY_STATE); + } + break; + + case PACKET_RECOVERY_STATE: + pkt_dbg(2, pd, "No recovery possible\n"); + pkt_set_state(pkt, PACKET_FINISHED_STATE); + break; + + case PACKET_FINISHED_STATE: + pkt_finish_packet(pkt, pkt->w_bio->bi_status); + return; + + default: + BUG(); + break; + } + } +} + +static void pkt_handle_packets(struct pktcdvd_device *pd) +{ + struct packet_data *pkt, *next; + + /* + * Run state machine for active packets + */ + list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { + if (atomic_read(&pkt->run_sm) > 0) { + atomic_set(&pkt->run_sm, 0); + pkt_run_state_machine(pd, pkt); + } + } + + /* + * Move no longer active packets to the free list + */ + spin_lock(&pd->cdrw.active_list_lock); + list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) { + if (pkt->state == PACKET_FINISHED_STATE) { + list_del(&pkt->list); + pkt_put_packet_data(pd, pkt); + pkt_set_state(pkt, PACKET_IDLE_STATE); + atomic_set(&pd->scan_queue, 1); + } + } + spin_unlock(&pd->cdrw.active_list_lock); +} + +static void pkt_count_states(struct pktcdvd_device *pd, int *states) +{ + struct packet_data *pkt; + int i; + + for (i = 0; i < PACKET_NUM_STATES; i++) + states[i] = 0; + + spin_lock(&pd->cdrw.active_list_lock); + list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { + states[pkt->state]++; + } + spin_unlock(&pd->cdrw.active_list_lock); +} + +/* + * kcdrwd is woken up when writes have been queued for one of our + * registered devices + */ +static int kcdrwd(void *foobar) +{ + struct pktcdvd_device *pd = foobar; + struct packet_data *pkt; + long min_sleep_time, residue; + + set_user_nice(current, MIN_NICE); + set_freezable(); + + for (;;) { + DECLARE_WAITQUEUE(wait, current); + + /* + * Wait until there is something to do + */ + add_wait_queue(&pd->wqueue, &wait); + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + + /* Check if we need to run pkt_handle_queue */ + if (atomic_read(&pd->scan_queue) > 0) + goto work_to_do; + + /* Check if we need to run the state machine for some packet */ + list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { + if (atomic_read(&pkt->run_sm) > 0) + goto work_to_do; + } + + /* Check if we need to process the iosched queues */ + if (atomic_read(&pd->iosched.attention) != 0) + goto work_to_do; + + /* Otherwise, go to sleep */ + if (PACKET_DEBUG > 1) { + int states[PACKET_NUM_STATES]; + pkt_count_states(pd, states); + pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n", + states[0], states[1], states[2], + states[3], states[4], states[5]); + } + + min_sleep_time = MAX_SCHEDULE_TIMEOUT; + list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { + if (pkt->sleep_time && pkt->sleep_time < min_sleep_time) + min_sleep_time = pkt->sleep_time; + } + + pkt_dbg(2, pd, "sleeping\n"); + residue = schedule_timeout(min_sleep_time); + pkt_dbg(2, pd, "wake up\n"); + + /* make swsusp happy with our thread */ + try_to_freeze(); + + list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { + if (!pkt->sleep_time) + continue; + pkt->sleep_time -= min_sleep_time - residue; + if (pkt->sleep_time <= 0) { + pkt->sleep_time = 0; + atomic_inc(&pkt->run_sm); + } + } + + if (kthread_should_stop()) + break; + } +work_to_do: + set_current_state(TASK_RUNNING); + remove_wait_queue(&pd->wqueue, &wait); + + if (kthread_should_stop()) + break; + + /* + * if pkt_handle_queue returns true, we can queue + * another request. + */ + while (pkt_handle_queue(pd)) + ; + + /* + * Handle packet state machine + */ + pkt_handle_packets(pd); + + /* + * Handle iosched queues + */ + pkt_iosched_process_queue(pd); + } + + return 0; +} + +static void pkt_print_settings(struct pktcdvd_device *pd) +{ + pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n", + pd->settings.fp ? "Fixed" : "Variable", + pd->settings.size >> 2, + pd->settings.block_mode == 8 ? '1' : '2'); +} + +static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control) +{ + memset(cgc->cmd, 0, sizeof(cgc->cmd)); + + cgc->cmd[0] = GPCMD_MODE_SENSE_10; + cgc->cmd[2] = page_code | (page_control << 6); + cgc->cmd[7] = cgc->buflen >> 8; + cgc->cmd[8] = cgc->buflen & 0xff; + cgc->data_direction = CGC_DATA_READ; + return pkt_generic_packet(pd, cgc); +} + +static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc) +{ + memset(cgc->cmd, 0, sizeof(cgc->cmd)); + memset(cgc->buffer, 0, 2); + cgc->cmd[0] = GPCMD_MODE_SELECT_10; + cgc->cmd[1] = 0x10; /* PF */ + cgc->cmd[7] = cgc->buflen >> 8; + cgc->cmd[8] = cgc->buflen & 0xff; + cgc->data_direction = CGC_DATA_WRITE; + return pkt_generic_packet(pd, cgc); +} + +static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di) +{ + struct packet_command cgc; + int ret; + + /* set up command and get the disc info */ + init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ); + cgc.cmd[0] = GPCMD_READ_DISC_INFO; + cgc.cmd[8] = cgc.buflen = 2; + cgc.quiet = 1; + + ret = pkt_generic_packet(pd, &cgc); + if (ret) + return ret; + + /* not all drives have the same disc_info length, so requeue + * packet with the length the drive tells us it can supply + */ + cgc.buflen = be16_to_cpu(di->disc_information_length) + + sizeof(di->disc_information_length); + + if (cgc.buflen > sizeof(disc_information)) + cgc.buflen = sizeof(disc_information); + + cgc.cmd[8] = cgc.buflen; + return pkt_generic_packet(pd, &cgc); +} + +static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti) +{ + struct packet_command cgc; + int ret; + + init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ); + cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO; + cgc.cmd[1] = type & 3; + cgc.cmd[4] = (track & 0xff00) >> 8; + cgc.cmd[5] = track & 0xff; + cgc.cmd[8] = 8; + cgc.quiet = 1; + + ret = pkt_generic_packet(pd, &cgc); + if (ret) + return ret; + + cgc.buflen = be16_to_cpu(ti->track_information_length) + + sizeof(ti->track_information_length); + + if (cgc.buflen > sizeof(track_information)) + cgc.buflen = sizeof(track_information); + + cgc.cmd[8] = cgc.buflen; + return pkt_generic_packet(pd, &cgc); +} + +static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd, + long *last_written) +{ + disc_information di; + track_information ti; + __u32 last_track; + int ret; + + ret = pkt_get_disc_info(pd, &di); + if (ret) + return ret; + + last_track = (di.last_track_msb << 8) | di.last_track_lsb; + ret = pkt_get_track_info(pd, last_track, 1, &ti); + if (ret) + return ret; + + /* if this track is blank, try the previous. */ + if (ti.blank) { + last_track--; + ret = pkt_get_track_info(pd, last_track, 1, &ti); + if (ret) + return ret; + } + + /* if last recorded field is valid, return it. */ + if (ti.lra_v) { + *last_written = be32_to_cpu(ti.last_rec_address); + } else { + /* make it up instead */ + *last_written = be32_to_cpu(ti.track_start) + + be32_to_cpu(ti.track_size); + if (ti.free_blocks) + *last_written -= (be32_to_cpu(ti.free_blocks) + 7); + } + return 0; +} + +/* + * write mode select package based on pd->settings + */ +static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) +{ + struct packet_command cgc; + struct scsi_sense_hdr sshdr; + write_param_page *wp; + char buffer[128]; + int ret, size; + + /* doesn't apply to DVD+RW or DVD-RAM */ + if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12)) + return 0; + + memset(buffer, 0, sizeof(buffer)); + init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ); + cgc.sshdr = &sshdr; + ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0); + if (ret) { + pkt_dump_sense(pd, &cgc); + return ret; + } + + size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff)); + pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff); + if (size > sizeof(buffer)) + size = sizeof(buffer); + + /* + * now get it all + */ + init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ); + cgc.sshdr = &sshdr; + ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0); + if (ret) { + pkt_dump_sense(pd, &cgc); + return ret; + } + + /* + * write page is offset header + block descriptor length + */ + wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset]; + + wp->fp = pd->settings.fp; + wp->track_mode = pd->settings.track_mode; + wp->write_type = pd->settings.write_type; + wp->data_block_type = pd->settings.block_mode; + + wp->multi_session = 0; + +#ifdef PACKET_USE_LS + wp->link_size = 7; + wp->ls_v = 1; +#endif + + if (wp->data_block_type == PACKET_BLOCK_MODE1) { + wp->session_format = 0; + wp->subhdr2 = 0x20; + } else if (wp->data_block_type == PACKET_BLOCK_MODE2) { + wp->session_format = 0x20; + wp->subhdr2 = 8; +#if 0 + wp->mcn[0] = 0x80; + memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1); +#endif + } else { + /* + * paranoia + */ + pkt_err(pd, "write mode wrong %d\n", wp->data_block_type); + return 1; + } + wp->packet_size = cpu_to_be32(pd->settings.size >> 2); + + cgc.buflen = cgc.cmd[8] = size; + ret = pkt_mode_select(pd, &cgc); + if (ret) { + pkt_dump_sense(pd, &cgc); + return ret; + } + + pkt_print_settings(pd); + return 0; +} + +/* + * 1 -- we can write to this track, 0 -- we can't + */ +static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti) +{ + switch (pd->mmc3_profile) { + case 0x1a: /* DVD+RW */ + case 0x12: /* DVD-RAM */ + /* The track is always writable on DVD+RW/DVD-RAM */ + return 1; + default: + break; + } + + if (!ti->packet || !ti->fp) + return 0; + + /* + * "good" settings as per Mt Fuji. + */ + if (ti->rt == 0 && ti->blank == 0) + return 1; + + if (ti->rt == 0 && ti->blank == 1) + return 1; + + if (ti->rt == 1 && ti->blank == 0) + return 1; + + pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet); + return 0; +} + +/* + * 1 -- we can write to this disc, 0 -- we can't + */ +static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di) +{ + switch (pd->mmc3_profile) { + case 0x0a: /* CD-RW */ + case 0xffff: /* MMC3 not supported */ + break; + case 0x1a: /* DVD+RW */ + case 0x13: /* DVD-RW */ + case 0x12: /* DVD-RAM */ + return 1; + default: + pkt_dbg(2, pd, "Wrong disc profile (%x)\n", + pd->mmc3_profile); + return 0; + } + + /* + * for disc type 0xff we should probably reserve a new track. + * but i'm not sure, should we leave this to user apps? probably. + */ + if (di->disc_type == 0xff) { + pkt_notice(pd, "unknown disc - no track?\n"); + return 0; + } + + if (di->disc_type != 0x20 && di->disc_type != 0) { + pkt_err(pd, "wrong disc type (%x)\n", di->disc_type); + return 0; + } + + if (di->erasable == 0) { + pkt_notice(pd, "disc not erasable\n"); + return 0; + } + + if (di->border_status == PACKET_SESSION_RESERVED) { + pkt_err(pd, "can't write to last track (reserved)\n"); + return 0; + } + + return 1; +} + +static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) +{ + struct packet_command cgc; + unsigned char buf[12]; + disc_information di; + track_information ti; + int ret, track; + + init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ); + cgc.cmd[0] = GPCMD_GET_CONFIGURATION; + cgc.cmd[8] = 8; + ret = pkt_generic_packet(pd, &cgc); + pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7]; + + memset(&di, 0, sizeof(disc_information)); + memset(&ti, 0, sizeof(track_information)); + + ret = pkt_get_disc_info(pd, &di); + if (ret) { + pkt_err(pd, "failed get_disc\n"); + return ret; + } + + if (!pkt_writable_disc(pd, &di)) + return -EROFS; + + pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR; + + track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */ + ret = pkt_get_track_info(pd, track, 1, &ti); + if (ret) { + pkt_err(pd, "failed get_track\n"); + return ret; + } + + if (!pkt_writable_track(pd, &ti)) { + pkt_err(pd, "can't write to this track\n"); + return -EROFS; + } + + /* + * we keep packet size in 512 byte units, makes it easier to + * deal with request calculations. + */ + pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2; + if (pd->settings.size == 0) { + pkt_notice(pd, "detected zero packet size!\n"); + return -ENXIO; + } + if (pd->settings.size > PACKET_MAX_SECTORS) { + pkt_err(pd, "packet size is too big\n"); + return -EROFS; + } + pd->settings.fp = ti.fp; + pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1); + + if (ti.nwa_v) { + pd->nwa = be32_to_cpu(ti.next_writable); + set_bit(PACKET_NWA_VALID, &pd->flags); + } + + /* + * in theory we could use lra on -RW media as well and just zero + * blocks that haven't been written yet, but in practice that + * is just a no-go. we'll use that for -R, naturally. + */ + if (ti.lra_v) { + pd->lra = be32_to_cpu(ti.last_rec_address); + set_bit(PACKET_LRA_VALID, &pd->flags); + } else { + pd->lra = 0xffffffff; + set_bit(PACKET_LRA_VALID, &pd->flags); + } + + /* + * fine for now + */ + pd->settings.link_loss = 7; + pd->settings.write_type = 0; /* packet */ + pd->settings.track_mode = ti.track_mode; + + /* + * mode1 or mode2 disc + */ + switch (ti.data_mode) { + case PACKET_MODE1: + pd->settings.block_mode = PACKET_BLOCK_MODE1; + break; + case PACKET_MODE2: + pd->settings.block_mode = PACKET_BLOCK_MODE2; + break; + default: + pkt_err(pd, "unknown data mode\n"); + return -EROFS; + } + return 0; +} + +/* + * enable/disable write caching on drive + */ +static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd, + int set) +{ + struct packet_command cgc; + struct scsi_sense_hdr sshdr; + unsigned char buf[64]; + int ret; + + init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ); + cgc.sshdr = &sshdr; + cgc.buflen = pd->mode_offset + 12; + + /* + * caching mode page might not be there, so quiet this command + */ + cgc.quiet = 1; + + ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0); + if (ret) + return ret; + + buf[pd->mode_offset + 10] |= (!!set << 2); + + cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff)); + ret = pkt_mode_select(pd, &cgc); + if (ret) { + pkt_err(pd, "write caching control failed\n"); + pkt_dump_sense(pd, &cgc); + } else if (!ret && set) + pkt_notice(pd, "enabled write caching\n"); + return ret; +} + +static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag) +{ + struct packet_command cgc; + + init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); + cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; + cgc.cmd[4] = lockflag ? 1 : 0; + return pkt_generic_packet(pd, &cgc); +} + +/* + * Returns drive maximum write speed + */ +static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd, + unsigned *write_speed) +{ + struct packet_command cgc; + struct scsi_sense_hdr sshdr; + unsigned char buf[256+18]; + unsigned char *cap_buf; + int ret, offset; + + cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset]; + init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN); + cgc.sshdr = &sshdr; + + ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0); + if (ret) { + cgc.buflen = pd->mode_offset + cap_buf[1] + 2 + + sizeof(struct mode_page_header); + ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0); + if (ret) { + pkt_dump_sense(pd, &cgc); + return ret; + } + } + + offset = 20; /* Obsoleted field, used by older drives */ + if (cap_buf[1] >= 28) + offset = 28; /* Current write speed selected */ + if (cap_buf[1] >= 30) { + /* If the drive reports at least one "Logical Unit Write + * Speed Performance Descriptor Block", use the information + * in the first block. (contains the highest speed) + */ + int num_spdb = (cap_buf[30] << 8) + cap_buf[31]; + if (num_spdb > 0) + offset = 34; + } + + *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1]; + return 0; +} + +/* These tables from cdrecord - I don't have orange book */ +/* standard speed CD-RW (1-4x) */ +static char clv_to_speed[16] = { + /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */ + 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +}; +/* high speed CD-RW (-10x) */ +static char hs_clv_to_speed[16] = { + /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */ + 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +}; +/* ultra high speed CD-RW */ +static char us_clv_to_speed[16] = { + /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */ + 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0 +}; + +/* + * reads the maximum media speed from ATIP + */ +static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, + unsigned *speed) +{ + struct packet_command cgc; + struct scsi_sense_hdr sshdr; + unsigned char buf[64]; + unsigned int size, st, sp; + int ret; + + init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ); + cgc.sshdr = &sshdr; + cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP; + cgc.cmd[1] = 2; + cgc.cmd[2] = 4; /* READ ATIP */ + cgc.cmd[8] = 2; + ret = pkt_generic_packet(pd, &cgc); + if (ret) { + pkt_dump_sense(pd, &cgc); + return ret; + } + size = ((unsigned int) buf[0]<<8) + buf[1] + 2; + if (size > sizeof(buf)) + size = sizeof(buf); + + init_cdrom_command(&cgc, buf, size, CGC_DATA_READ); + cgc.sshdr = &sshdr; + cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP; + cgc.cmd[1] = 2; + cgc.cmd[2] = 4; + cgc.cmd[8] = size; + ret = pkt_generic_packet(pd, &cgc); + if (ret) { + pkt_dump_sense(pd, &cgc); + return ret; + } + + if (!(buf[6] & 0x40)) { + pkt_notice(pd, "disc type is not CD-RW\n"); + return 1; + } + if (!(buf[6] & 0x4)) { + pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n"); + return 1; + } + + st = (buf[6] >> 3) & 0x7; /* disc sub-type */ + + sp = buf[16] & 0xf; /* max speed from ATIP A1 field */ + + /* Info from cdrecord */ + switch (st) { + case 0: /* standard speed */ + *speed = clv_to_speed[sp]; + break; + case 1: /* high speed */ + *speed = hs_clv_to_speed[sp]; + break; + case 2: /* ultra high speed */ + *speed = us_clv_to_speed[sp]; + break; + default: + pkt_notice(pd, "unknown disc sub-type %d\n", st); + return 1; + } + if (*speed) { + pkt_info(pd, "maximum media speed: %d\n", *speed); + return 0; + } else { + pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st); + return 1; + } +} + +static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd) +{ + struct packet_command cgc; + struct scsi_sense_hdr sshdr; + int ret; + + pkt_dbg(2, pd, "Performing OPC\n"); + + init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); + cgc.sshdr = &sshdr; + cgc.timeout = 60*HZ; + cgc.cmd[0] = GPCMD_SEND_OPC; + cgc.cmd[1] = 1; + ret = pkt_generic_packet(pd, &cgc); + if (ret) + pkt_dump_sense(pd, &cgc); + return ret; +} + +static int pkt_open_write(struct pktcdvd_device *pd) +{ + int ret; + unsigned int write_speed, media_write_speed, read_speed; + + ret = pkt_probe_settings(pd); + if (ret) { + pkt_dbg(2, pd, "failed probe\n"); + return ret; + } + + ret = pkt_set_write_settings(pd); + if (ret) { + pkt_dbg(1, pd, "failed saving write settings\n"); + return -EIO; + } + + pkt_write_caching(pd, USE_WCACHING); + + ret = pkt_get_max_speed(pd, &write_speed); + if (ret) + write_speed = 16 * 177; + switch (pd->mmc3_profile) { + case 0x13: /* DVD-RW */ + case 0x1a: /* DVD+RW */ + case 0x12: /* DVD-RAM */ + pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed); + break; + default: + ret = pkt_media_speed(pd, &media_write_speed); + if (ret) + media_write_speed = 16; + write_speed = min(write_speed, media_write_speed * 177); + pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176); + break; + } + read_speed = write_speed; + + ret = pkt_set_speed(pd, write_speed, read_speed); + if (ret) { + pkt_dbg(1, pd, "couldn't set write speed\n"); + return -EIO; + } + pd->write_speed = write_speed; + pd->read_speed = read_speed; + + ret = pkt_perform_opc(pd); + if (ret) { + pkt_dbg(1, pd, "Optimum Power Calibration failed\n"); + } + + return 0; +} + +/* + * called at open time. + */ +static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) +{ + int ret; + long lba; + struct request_queue *q; + struct block_device *bdev; + + /* + * We need to re-open the cdrom device without O_NONBLOCK to be able + * to read/write from/to it. It is already opened in O_NONBLOCK mode + * so open should not fail. + */ + bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ | FMODE_EXCL, pd); + if (IS_ERR(bdev)) { + ret = PTR_ERR(bdev); + goto out; + } + + ret = pkt_get_last_written(pd, &lba); + if (ret) { + pkt_err(pd, "pkt_get_last_written failed\n"); + goto out_putdev; + } + + set_capacity(pd->disk, lba << 2); + set_capacity_and_notify(pd->bdev->bd_disk, lba << 2); + + q = bdev_get_queue(pd->bdev); + if (write) { + ret = pkt_open_write(pd); + if (ret) + goto out_putdev; + /* + * Some CDRW drives can not handle writes larger than one packet, + * even if the size is a multiple of the packet size. + */ + blk_queue_max_hw_sectors(q, pd->settings.size); + set_bit(PACKET_WRITABLE, &pd->flags); + } else { + pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); + clear_bit(PACKET_WRITABLE, &pd->flags); + } + + ret = pkt_set_segment_merging(pd, q); + if (ret) + goto out_putdev; + + if (write) { + if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) { + pkt_err(pd, "not enough memory for buffers\n"); + ret = -ENOMEM; + goto out_putdev; + } + pkt_info(pd, "%lukB available on disc\n", lba << 1); + } + + return 0; + +out_putdev: + blkdev_put(bdev, FMODE_READ | FMODE_EXCL); +out: + return ret; +} + +/* + * called when the device is closed. makes sure that the device flushes + * the internal cache before we close. + */ +static void pkt_release_dev(struct pktcdvd_device *pd, int flush) +{ + if (flush && pkt_flush_cache(pd)) + pkt_dbg(1, pd, "not flushing cache\n"); + + pkt_lock_door(pd, 0); + + pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); + blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL); + + pkt_shrink_pktlist(pd); +} + +static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor) +{ + if (dev_minor >= MAX_WRITERS) + return NULL; + + dev_minor = array_index_nospec(dev_minor, MAX_WRITERS); + return pkt_devs[dev_minor]; +} + +static int pkt_open(struct block_device *bdev, fmode_t mode) +{ + struct pktcdvd_device *pd = NULL; + int ret; + + mutex_lock(&pktcdvd_mutex); + mutex_lock(&ctl_mutex); + pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev)); + if (!pd) { + ret = -ENODEV; + goto out; + } + BUG_ON(pd->refcnt < 0); + + pd->refcnt++; + if (pd->refcnt > 1) { + if ((mode & FMODE_WRITE) && + !test_bit(PACKET_WRITABLE, &pd->flags)) { + ret = -EBUSY; + goto out_dec; + } + } else { + ret = pkt_open_dev(pd, mode & FMODE_WRITE); + if (ret) + goto out_dec; + /* + * needed here as well, since ext2 (among others) may change + * the blocksize at mount time + */ + set_blocksize(bdev, CD_FRAMESIZE); + } + + mutex_unlock(&ctl_mutex); + mutex_unlock(&pktcdvd_mutex); + return 0; + +out_dec: + pd->refcnt--; +out: + mutex_unlock(&ctl_mutex); + mutex_unlock(&pktcdvd_mutex); + return ret; +} + +static void pkt_close(struct gendisk *disk, fmode_t mode) +{ + struct pktcdvd_device *pd = disk->private_data; + + mutex_lock(&pktcdvd_mutex); + mutex_lock(&ctl_mutex); + pd->refcnt--; + BUG_ON(pd->refcnt < 0); + if (pd->refcnt == 0) { + int flush = test_bit(PACKET_WRITABLE, &pd->flags); + pkt_release_dev(pd, flush); + } + mutex_unlock(&ctl_mutex); + mutex_unlock(&pktcdvd_mutex); +} + + +static void pkt_end_io_read_cloned(struct bio *bio) +{ + struct packet_stacked_data *psd = bio->bi_private; + struct pktcdvd_device *pd = psd->pd; + + psd->bio->bi_status = bio->bi_status; + bio_put(bio); + bio_endio(psd->bio); + mempool_free(psd, &psd_pool); + pkt_bio_finished(pd); +} + +static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio) +{ + struct bio *cloned_bio = + bio_alloc_clone(pd->bdev, bio, GFP_NOIO, &pkt_bio_set); + struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO); + + psd->pd = pd; + psd->bio = bio; + cloned_bio->bi_private = psd; + cloned_bio->bi_end_io = pkt_end_io_read_cloned; + pd->stats.secs_r += bio_sectors(bio); + pkt_queue_bio(pd, cloned_bio); +} + +static void pkt_make_request_write(struct request_queue *q, struct bio *bio) +{ + struct pktcdvd_device *pd = q->queuedata; + sector_t zone; + struct packet_data *pkt; + int was_empty, blocked_bio; + struct pkt_rb_node *node; + + zone = get_zone(bio->bi_iter.bi_sector, pd); + + /* + * If we find a matching packet in state WAITING or READ_WAIT, we can + * just append this bio to that packet. + */ + spin_lock(&pd->cdrw.active_list_lock); + blocked_bio = 0; + list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { + if (pkt->sector == zone) { + spin_lock(&pkt->lock); + if ((pkt->state == PACKET_WAITING_STATE) || + (pkt->state == PACKET_READ_WAIT_STATE)) { + bio_list_add(&pkt->orig_bios, bio); + pkt->write_size += + bio->bi_iter.bi_size / CD_FRAMESIZE; + if ((pkt->write_size >= pkt->frames) && + (pkt->state == PACKET_WAITING_STATE)) { + atomic_inc(&pkt->run_sm); + wake_up(&pd->wqueue); + } + spin_unlock(&pkt->lock); + spin_unlock(&pd->cdrw.active_list_lock); + return; + } else { + blocked_bio = 1; + } + spin_unlock(&pkt->lock); + } + } + spin_unlock(&pd->cdrw.active_list_lock); + + /* + * Test if there is enough room left in the bio work queue + * (queue size >= congestion on mark). + * If not, wait till the work queue size is below the congestion off mark. + */ + spin_lock(&pd->lock); + if (pd->write_congestion_on > 0 + && pd->bio_queue_size >= pd->write_congestion_on) { + struct wait_bit_queue_entry wqe; + + init_wait_var_entry(&wqe, &pd->congested, 0); + for (;;) { + prepare_to_wait_event(__var_waitqueue(&pd->congested), + &wqe.wq_entry, + TASK_UNINTERRUPTIBLE); + if (pd->bio_queue_size <= pd->write_congestion_off) + break; + pd->congested = true; + spin_unlock(&pd->lock); + schedule(); + spin_lock(&pd->lock); + } + } + spin_unlock(&pd->lock); + + /* + * No matching packet found. Store the bio in the work queue. + */ + node = mempool_alloc(&pd->rb_pool, GFP_NOIO); + node->bio = bio; + spin_lock(&pd->lock); + BUG_ON(pd->bio_queue_size < 0); + was_empty = (pd->bio_queue_size == 0); + pkt_rbtree_insert(pd, node); + spin_unlock(&pd->lock); + + /* + * Wake up the worker thread. + */ + atomic_set(&pd->scan_queue, 1); + if (was_empty) { + /* This wake_up is required for correct operation */ + wake_up(&pd->wqueue); + } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) { + /* + * This wake up is not required for correct operation, + * but improves performance in some cases. + */ + wake_up(&pd->wqueue); + } +} + +static void pkt_submit_bio(struct bio *bio) +{ + struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->queue->queuedata; + struct bio *split; + + bio = bio_split_to_limits(bio); + + pkt_dbg(2, pd, "start = %6llx stop = %6llx\n", + (unsigned long long)bio->bi_iter.bi_sector, + (unsigned long long)bio_end_sector(bio)); + + /* + * Clone READ bios so we can have our own bi_end_io callback. + */ + if (bio_data_dir(bio) == READ) { + pkt_make_request_read(pd, bio); + return; + } + + if (!test_bit(PACKET_WRITABLE, &pd->flags)) { + pkt_notice(pd, "WRITE for ro device (%llu)\n", + (unsigned long long)bio->bi_iter.bi_sector); + goto end_io; + } + + if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) { + pkt_err(pd, "wrong bio size\n"); + goto end_io; + } + + do { + sector_t zone = get_zone(bio->bi_iter.bi_sector, pd); + sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd); + + if (last_zone != zone) { + BUG_ON(last_zone != zone + pd->settings.size); + + split = bio_split(bio, last_zone - + bio->bi_iter.bi_sector, + GFP_NOIO, &pkt_bio_set); + bio_chain(split, bio); + } else { + split = bio; + } + + pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split); + } while (split != bio); + + return; +end_io: + bio_io_error(bio); +} + +static void pkt_init_queue(struct pktcdvd_device *pd) +{ + struct request_queue *q = pd->disk->queue; + + blk_queue_logical_block_size(q, CD_FRAMESIZE); + blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS); + q->queuedata = pd; +} + +static int pkt_seq_show(struct seq_file *m, void *p) +{ + struct pktcdvd_device *pd = m->private; + char *msg; + int states[PACKET_NUM_STATES]; + + seq_printf(m, "Writer %s mapped to %pg:\n", pd->name, pd->bdev); + + seq_printf(m, "\nSettings:\n"); + seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2); + + if (pd->settings.write_type == 0) + msg = "Packet"; + else + msg = "Unknown"; + seq_printf(m, "\twrite type:\t\t%s\n", msg); + + seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable"); + seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss); + + seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode); + + if (pd->settings.block_mode == PACKET_BLOCK_MODE1) + msg = "Mode 1"; + else if (pd->settings.block_mode == PACKET_BLOCK_MODE2) + msg = "Mode 2"; + else + msg = "Unknown"; + seq_printf(m, "\tblock mode:\t\t%s\n", msg); + + seq_printf(m, "\nStatistics:\n"); + seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started); + seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended); + seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1); + seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1); + seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1); + + seq_printf(m, "\nMisc:\n"); + seq_printf(m, "\treference count:\t%d\n", pd->refcnt); + seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags); + seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed); + seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed); + seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset); + seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset); + + seq_printf(m, "\nQueue state:\n"); + seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size); + seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios)); + seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector); + + pkt_count_states(pd, states); + seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n", + states[0], states[1], states[2], states[3], states[4], states[5]); + + seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n", + pd->write_congestion_off, + pd->write_congestion_on); + return 0; +} + +static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) +{ + int i; + struct block_device *bdev; + struct scsi_device *sdev; + + if (pd->pkt_dev == dev) { + pkt_err(pd, "recursive setup not allowed\n"); + return -EBUSY; + } + for (i = 0; i < MAX_WRITERS; i++) { + struct pktcdvd_device *pd2 = pkt_devs[i]; + if (!pd2) + continue; + if (pd2->bdev->bd_dev == dev) { + pkt_err(pd, "%pg already setup\n", pd2->bdev); + return -EBUSY; + } + if (pd2->pkt_dev == dev) { + pkt_err(pd, "can't chain pktcdvd devices\n"); + return -EBUSY; + } + } + + bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL); + if (IS_ERR(bdev)) + return PTR_ERR(bdev); + sdev = scsi_device_from_queue(bdev->bd_disk->queue); + if (!sdev) { + blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); + return -EINVAL; + } + put_device(&sdev->sdev_gendev); + + /* This is safe, since we have a reference from open(). */ + __module_get(THIS_MODULE); + + pd->bdev = bdev; + set_blocksize(bdev, CD_FRAMESIZE); + + pkt_init_queue(pd); + + atomic_set(&pd->cdrw.pending_bios, 0); + pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name); + if (IS_ERR(pd->cdrw.thread)) { + pkt_err(pd, "can't start kernel thread\n"); + goto out_mem; + } + + proc_create_single_data(pd->name, 0, pkt_proc, pkt_seq_show, pd); + pkt_dbg(1, pd, "writer mapped to %pg\n", bdev); + return 0; + +out_mem: + blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); + /* This is safe: open() is still holding a reference. */ + module_put(THIS_MODULE); + return -ENOMEM; +} + +static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) +{ + struct pktcdvd_device *pd = bdev->bd_disk->private_data; + int ret; + + pkt_dbg(2, pd, "cmd %x, dev %d:%d\n", + cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); + + mutex_lock(&pktcdvd_mutex); + switch (cmd) { + case CDROMEJECT: + /* + * The door gets locked when the device is opened, so we + * have to unlock it or else the eject command fails. + */ + if (pd->refcnt == 1) + pkt_lock_door(pd, 0); + fallthrough; + /* + * forward selected CDROM ioctls to CD-ROM, for UDF + */ + case CDROMMULTISESSION: + case CDROMREADTOCENTRY: + case CDROM_LAST_WRITTEN: + case CDROM_SEND_PACKET: + case SCSI_IOCTL_SEND_COMMAND: + if (!bdev->bd_disk->fops->ioctl) + ret = -ENOTTY; + else + ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); + break; + default: + pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd); + ret = -ENOTTY; + } + mutex_unlock(&pktcdvd_mutex); + + return ret; +} + +static unsigned int pkt_check_events(struct gendisk *disk, + unsigned int clearing) +{ + struct pktcdvd_device *pd = disk->private_data; + struct gendisk *attached_disk; + + if (!pd) + return 0; + if (!pd->bdev) + return 0; + attached_disk = pd->bdev->bd_disk; + if (!attached_disk || !attached_disk->fops->check_events) + return 0; + return attached_disk->fops->check_events(attached_disk, clearing); +} + +static char *pkt_devnode(struct gendisk *disk, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "pktcdvd/%s", disk->disk_name); +} + +static const struct block_device_operations pktcdvd_ops = { + .owner = THIS_MODULE, + .submit_bio = pkt_submit_bio, + .open = pkt_open, + .release = pkt_close, + .ioctl = pkt_ioctl, + .compat_ioctl = blkdev_compat_ptr_ioctl, + .check_events = pkt_check_events, + .devnode = pkt_devnode, +}; + +/* + * Set up mapping from pktcdvd device to CD-ROM device. + */ +static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) +{ + int idx; + int ret = -ENOMEM; + struct pktcdvd_device *pd; + struct gendisk *disk; + + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + + for (idx = 0; idx < MAX_WRITERS; idx++) + if (!pkt_devs[idx]) + break; + if (idx == MAX_WRITERS) { + pr_err("max %d writers supported\n", MAX_WRITERS); + ret = -EBUSY; + goto out_mutex; + } + + pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL); + if (!pd) + goto out_mutex; + + ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE, + sizeof(struct pkt_rb_node)); + if (ret) + goto out_mem; + + INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); + INIT_LIST_HEAD(&pd->cdrw.pkt_active_list); + spin_lock_init(&pd->cdrw.active_list_lock); + + spin_lock_init(&pd->lock); + spin_lock_init(&pd->iosched.lock); + bio_list_init(&pd->iosched.read_queue); + bio_list_init(&pd->iosched.write_queue); + sprintf(pd->name, DRIVER_NAME"%d", idx); + init_waitqueue_head(&pd->wqueue); + pd->bio_queue = RB_ROOT; + + pd->write_congestion_on = write_congestion_on; + pd->write_congestion_off = write_congestion_off; + + ret = -ENOMEM; + disk = blk_alloc_disk(NUMA_NO_NODE); + if (!disk) + goto out_mem; + pd->disk = disk; + disk->major = pktdev_major; + disk->first_minor = idx; + disk->minors = 1; + disk->fops = &pktcdvd_ops; + disk->flags = GENHD_FL_REMOVABLE | GENHD_FL_NO_PART; + strcpy(disk->disk_name, pd->name); + disk->private_data = pd; + + pd->pkt_dev = MKDEV(pktdev_major, idx); + ret = pkt_new_dev(pd, dev); + if (ret) + goto out_mem2; + + /* inherit events of the host device */ + disk->events = pd->bdev->bd_disk->events; + + ret = add_disk(disk); + if (ret) + goto out_mem2; + + pkt_sysfs_dev_new(pd); + pkt_debugfs_dev_new(pd); + + pkt_devs[idx] = pd; + if (pkt_dev) + *pkt_dev = pd->pkt_dev; + + mutex_unlock(&ctl_mutex); + return 0; + +out_mem2: + put_disk(disk); +out_mem: + mempool_exit(&pd->rb_pool); + kfree(pd); +out_mutex: + mutex_unlock(&ctl_mutex); + pr_err("setup of pktcdvd device failed\n"); + return ret; +} + +/* + * Tear down mapping from pktcdvd device to CD-ROM device. + */ +static int pkt_remove_dev(dev_t pkt_dev) +{ + struct pktcdvd_device *pd; + int idx; + int ret = 0; + + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + + for (idx = 0; idx < MAX_WRITERS; idx++) { + pd = pkt_devs[idx]; + if (pd && (pd->pkt_dev == pkt_dev)) + break; + } + if (idx == MAX_WRITERS) { + pr_debug("dev not setup\n"); + ret = -ENXIO; + goto out; + } + + if (pd->refcnt > 0) { + ret = -EBUSY; + goto out; + } + if (!IS_ERR(pd->cdrw.thread)) + kthread_stop(pd->cdrw.thread); + + pkt_devs[idx] = NULL; + + pkt_debugfs_dev_remove(pd); + pkt_sysfs_dev_remove(pd); + + blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY); + + remove_proc_entry(pd->name, pkt_proc); + pkt_dbg(1, pd, "writer unmapped\n"); + + del_gendisk(pd->disk); + put_disk(pd->disk); + + mempool_exit(&pd->rb_pool); + kfree(pd); + + /* This is safe: open() is still holding a reference. */ + module_put(THIS_MODULE); + +out: + mutex_unlock(&ctl_mutex); + return ret; +} + +static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd) +{ + struct pktcdvd_device *pd; + + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + + pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index); + if (pd) { + ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev); + ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev); + } else { + ctrl_cmd->dev = 0; + ctrl_cmd->pkt_dev = 0; + } + ctrl_cmd->num_devices = MAX_WRITERS; + + mutex_unlock(&ctl_mutex); +} + +static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct pkt_ctrl_command ctrl_cmd; + int ret = 0; + dev_t pkt_dev = 0; + + if (cmd != PACKET_CTRL_CMD) + return -ENOTTY; + + if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command))) + return -EFAULT; + + switch (ctrl_cmd.command) { + case PKT_CTRL_CMD_SETUP: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev); + ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev); + break; + case PKT_CTRL_CMD_TEARDOWN: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev)); + break; + case PKT_CTRL_CMD_STATUS: + pkt_get_status(&ctrl_cmd); + break; + default: + return -ENOTTY; + } + + if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command))) + return -EFAULT; + return ret; +} + +#ifdef CONFIG_COMPAT +static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); +} +#endif + +static const struct file_operations pkt_ctl_fops = { + .open = nonseekable_open, + .unlocked_ioctl = pkt_ctl_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = pkt_ctl_compat_ioctl, +#endif + .owner = THIS_MODULE, + .llseek = no_llseek, +}; + +static struct miscdevice pkt_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = DRIVER_NAME, + .nodename = "pktcdvd/control", + .fops = &pkt_ctl_fops +}; + +static int __init pkt_init(void) +{ + int ret; + + mutex_init(&ctl_mutex); + + ret = mempool_init_kmalloc_pool(&psd_pool, PSD_POOL_SIZE, + sizeof(struct packet_stacked_data)); + if (ret) + return ret; + ret = bioset_init(&pkt_bio_set, BIO_POOL_SIZE, 0, 0); + if (ret) { + mempool_exit(&psd_pool); + return ret; + } + + ret = register_blkdev(pktdev_major, DRIVER_NAME); + if (ret < 0) { + pr_err("unable to register block device\n"); + goto out2; + } + if (!pktdev_major) + pktdev_major = ret; + + ret = pkt_sysfs_init(); + if (ret) + goto out; + + pkt_debugfs_init(); + + ret = misc_register(&pkt_misc); + if (ret) { + pr_err("unable to register misc device\n"); + goto out_misc; + } + + pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL); + + return 0; + +out_misc: + pkt_debugfs_cleanup(); + pkt_sysfs_cleanup(); +out: + unregister_blkdev(pktdev_major, DRIVER_NAME); +out2: + mempool_exit(&psd_pool); + bioset_exit(&pkt_bio_set); + return ret; +} + +static void __exit pkt_exit(void) +{ + remove_proc_entry("driver/"DRIVER_NAME, NULL); + misc_deregister(&pkt_misc); + + pkt_debugfs_cleanup(); + pkt_sysfs_cleanup(); + + unregister_blkdev(pktdev_major, DRIVER_NAME); + mempool_exit(&psd_pool); + bioset_exit(&pkt_bio_set); +} + +MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives"); +MODULE_AUTHOR("Jens Axboe <axboe@suse.de>"); +MODULE_LICENSE("GPL"); + +module_init(pkt_init); +module_exit(pkt_exit); diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index c76e0148eada..574e470b220b 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c @@ -587,6 +587,8 @@ static void ps3vram_submit_bio(struct bio *bio) dev_dbg(&dev->core, "%s\n", __func__); bio = bio_split_to_limits(bio); + if (!bio) + return; spin_lock_irq(&priv->lock); busy = !bio_list_empty(&priv->list); diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index e9de9d846b73..17b677b5d3b2 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -1992,6 +1992,9 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd, struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd; int ret = -EINVAL; + if (issue_flags & IO_URING_F_NONBLOCK) + return -EAGAIN; + ublk_ctrl_cmd_dump(cmd); if (!(issue_flags & IO_URING_F_SQE128)) diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 68bd2f7961b3..6a77fa917428 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -315,22 +315,35 @@ static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx) virtqueue_notify(vq->vq); } +static blk_status_t virtblk_fail_to_queue(struct request *req, int rc) +{ + virtblk_cleanup_cmd(req); + switch (rc) { + case -ENOSPC: + return BLK_STS_DEV_RESOURCE; + case -ENOMEM: + return BLK_STS_RESOURCE; + default: + return BLK_STS_IOERR; + } +} + static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx, struct virtio_blk *vblk, struct request *req, struct virtblk_req *vbr) { blk_status_t status; + int num; status = virtblk_setup_cmd(vblk->vdev, req, vbr); if (unlikely(status)) return status; - vbr->sg_table.nents = virtblk_map_data(hctx, req, vbr); - if (unlikely(vbr->sg_table.nents < 0)) { - virtblk_cleanup_cmd(req); - return BLK_STS_RESOURCE; - } + num = virtblk_map_data(hctx, req, vbr); + if (unlikely(num < 0)) + return virtblk_fail_to_queue(req, -ENOMEM); + vbr->sg_table.nents = num; blk_mq_start_request(req); @@ -364,15 +377,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, blk_mq_stop_hw_queue(hctx); spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); virtblk_unmap_data(req, vbr); - virtblk_cleanup_cmd(req); - switch (err) { - case -ENOSPC: - return BLK_STS_DEV_RESOURCE; - case -ENOMEM: - return BLK_STS_RESOURCE; - default: - return BLK_STS_IOERR; - } + return virtblk_fail_to_queue(req, err); } if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) @@ -991,7 +996,7 @@ static int virtblk_probe(struct virtio_device *vdev) blk_queue_max_segments(q, sg_elems); /* No real sector limit. */ - blk_queue_max_hw_sectors(q, -1U); + blk_queue_max_hw_sectors(q, UINT_MAX); max_size = virtio_max_dma_size(vdev); diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c index 828c66bbaa67..2a6b4f676458 100644 --- a/drivers/bus/imx-weim.c +++ b/drivers/bus/imx-weim.c @@ -263,7 +263,6 @@ static int weim_parse_dt(struct platform_device *pdev) static int weim_probe(struct platform_device *pdev) { struct weim_priv *priv; - struct resource *res; struct clk *clk; void __iomem *base; int ret; @@ -273,8 +272,7 @@ static int weim_probe(struct platform_device *pdev) return -ENOMEM; /* get the resource */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(&pdev->dev, res); + base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(base)) return PTR_ERR(base); diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index d69905233aff..7e513b771832 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -412,7 +412,9 @@ int tpm_pm_suspend(struct device *dev) } suspended: - return rc; + if (rc) + dev_err(dev, "Ignoring error %d while suspending\n", rc); + return 0; } EXPORT_SYMBOL_GPL(tpm_pm_suspend); diff --git a/drivers/crypto/caam/blob_gen.c b/drivers/crypto/caam/blob_gen.c index 1f65df489847..f46b161d2cda 100644 --- a/drivers/crypto/caam/blob_gen.c +++ b/drivers/crypto/caam/blob_gen.c @@ -104,7 +104,7 @@ int caam_process_blob(struct caam_blob_priv *priv, } ctrlpriv = dev_get_drvdata(jrdev->parent); - moo = FIELD_GET(CSTA_MOO, ioread32(&ctrlpriv->ctrl->perfmon.status)); + moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->ctrl->perfmon.status)); if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED) dev_warn(jrdev, "using insecure test key, enable HAB to use unique device key!\n"); diff --git a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c index e553ccadbcbc..e5876286828b 100644 --- a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c @@ -239,7 +239,8 @@ static int virtio_crypto_alg_skcipher_close_session( pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n", ctrl_status->status, destroy_session->session_id); - return -EINVAL; + err = -EINVAL; + goto out; } err = 0; diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.c b/drivers/dma-buf/dma-buf-sysfs-stats.c index f69d68122b9b..fbf725fae7c1 100644 --- a/drivers/dma-buf/dma-buf-sysfs-stats.c +++ b/drivers/dma-buf/dma-buf-sysfs-stats.c @@ -168,14 +168,11 @@ void dma_buf_uninit_sysfs_statistics(void) kset_unregister(dma_buf_stats_kset); } -int dma_buf_stats_setup(struct dma_buf *dmabuf) +int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file) { struct dma_buf_sysfs_entry *sysfs_entry; int ret; - if (!dmabuf || !dmabuf->file) - return -EINVAL; - if (!dmabuf->exp_name) { pr_err("exporter name must not be empty if stats needed\n"); return -EINVAL; @@ -192,7 +189,7 @@ int dma_buf_stats_setup(struct dma_buf *dmabuf) /* create the directory for buffer stats */ ret = kobject_init_and_add(&sysfs_entry->kobj, &dma_buf_ktype, NULL, - "%lu", file_inode(dmabuf->file)->i_ino); + "%lu", file_inode(file)->i_ino); if (ret) goto err_sysfs_dmabuf; diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.h b/drivers/dma-buf/dma-buf-sysfs-stats.h index a49c6e2650cc..7a8a995b75ba 100644 --- a/drivers/dma-buf/dma-buf-sysfs-stats.h +++ b/drivers/dma-buf/dma-buf-sysfs-stats.h @@ -13,7 +13,7 @@ int dma_buf_init_sysfs_statistics(void); void dma_buf_uninit_sysfs_statistics(void); -int dma_buf_stats_setup(struct dma_buf *dmabuf); +int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file); void dma_buf_stats_teardown(struct dma_buf *dmabuf); #else @@ -25,7 +25,7 @@ static inline int dma_buf_init_sysfs_statistics(void) static inline void dma_buf_uninit_sysfs_statistics(void) {} -static inline int dma_buf_stats_setup(struct dma_buf *dmabuf) +static inline int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file) { return 0; } diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index b6c36914e7c6..e6528767efc7 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -95,10 +95,11 @@ static int dma_buf_file_release(struct inode *inode, struct file *file) return -EINVAL; dmabuf = file->private_data; - - mutex_lock(&db_list.lock); - list_del(&dmabuf->list_node); - mutex_unlock(&db_list.lock); + if (dmabuf) { + mutex_lock(&db_list.lock); + list_del(&dmabuf->list_node); + mutex_unlock(&db_list.lock); + } return 0; } @@ -528,17 +529,17 @@ static inline int is_dma_buf_file(struct file *file) return file->f_op == &dma_buf_fops; } -static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags) +static struct file *dma_buf_getfile(size_t size, int flags) { static atomic64_t dmabuf_inode = ATOMIC64_INIT(0); - struct file *file; struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb); + struct file *file; if (IS_ERR(inode)) return ERR_CAST(inode); - inode->i_size = dmabuf->size; - inode_set_bytes(inode, dmabuf->size); + inode->i_size = size; + inode_set_bytes(inode, size); /* * The ->i_ino acquired from get_next_ino() is not unique thus @@ -552,8 +553,6 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags) flags, &dma_buf_fops); if (IS_ERR(file)) goto err_alloc_file; - file->private_data = dmabuf; - file->f_path.dentry->d_fsdata = dmabuf; return file; @@ -619,19 +618,11 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) size_t alloc_size = sizeof(struct dma_buf); int ret; - if (!exp_info->resv) - alloc_size += sizeof(struct dma_resv); - else - /* prevent &dma_buf[1] == dma_buf->resv */ - alloc_size += 1; - - if (WARN_ON(!exp_info->priv - || !exp_info->ops - || !exp_info->ops->map_dma_buf - || !exp_info->ops->unmap_dma_buf - || !exp_info->ops->release)) { + if (WARN_ON(!exp_info->priv || !exp_info->ops + || !exp_info->ops->map_dma_buf + || !exp_info->ops->unmap_dma_buf + || !exp_info->ops->release)) return ERR_PTR(-EINVAL); - } if (WARN_ON(exp_info->ops->cache_sgt_mapping && (exp_info->ops->pin || exp_info->ops->unpin))) @@ -643,10 +634,21 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) if (!try_module_get(exp_info->owner)) return ERR_PTR(-ENOENT); + file = dma_buf_getfile(exp_info->size, exp_info->flags); + if (IS_ERR(file)) { + ret = PTR_ERR(file); + goto err_module; + } + + if (!exp_info->resv) + alloc_size += sizeof(struct dma_resv); + else + /* prevent &dma_buf[1] == dma_buf->resv */ + alloc_size += 1; dmabuf = kzalloc(alloc_size, GFP_KERNEL); if (!dmabuf) { ret = -ENOMEM; - goto err_module; + goto err_file; } dmabuf->priv = exp_info->priv; @@ -658,43 +660,35 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) init_waitqueue_head(&dmabuf->poll); dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll; dmabuf->cb_in.active = dmabuf->cb_out.active = 0; + INIT_LIST_HEAD(&dmabuf->attachments); if (!resv) { - resv = (struct dma_resv *)&dmabuf[1]; - dma_resv_init(resv); + dmabuf->resv = (struct dma_resv *)&dmabuf[1]; + dma_resv_init(dmabuf->resv); + } else { + dmabuf->resv = resv; } - dmabuf->resv = resv; - file = dma_buf_getfile(dmabuf, exp_info->flags); - if (IS_ERR(file)) { - ret = PTR_ERR(file); + ret = dma_buf_stats_setup(dmabuf, file); + if (ret) goto err_dmabuf; - } + file->private_data = dmabuf; + file->f_path.dentry->d_fsdata = dmabuf; dmabuf->file = file; - INIT_LIST_HEAD(&dmabuf->attachments); - mutex_lock(&db_list.lock); list_add(&dmabuf->list_node, &db_list.head); mutex_unlock(&db_list.lock); - ret = dma_buf_stats_setup(dmabuf); - if (ret) - goto err_sysfs; - return dmabuf; -err_sysfs: - /* - * Set file->f_path.dentry->d_fsdata to NULL so that when - * dma_buf_release() gets invoked by dentry_ops, it exits - * early before calling the release() dma_buf op. - */ - file->f_path.dentry->d_fsdata = NULL; - fput(file); err_dmabuf: + if (!resv) + dma_resv_fini(dmabuf->resv); kfree(dmabuf); +err_file: + fput(file); err_module: module_put(exp_info->owner); return ERR_PTR(ret); diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig index a14f65444b35..ea0f5083ac47 100644 --- a/drivers/firmware/arm_scmi/Kconfig +++ b/drivers/firmware/arm_scmi/Kconfig @@ -23,6 +23,38 @@ config ARM_SCMI_PROTOCOL if ARM_SCMI_PROTOCOL +config ARM_SCMI_NEED_DEBUGFS + bool + help + This declares whether at least one SCMI facility is configured + which needs debugfs support. When selected causess the creation + of a common SCMI debugfs root directory. + +config ARM_SCMI_RAW_MODE_SUPPORT + bool "Enable support for SCMI Raw transmission mode" + depends on DEBUG_FS + select ARM_SCMI_NEED_DEBUGFS + help + Enable support for SCMI Raw transmission mode. + + If enabled allows the direct injection and snooping of SCMI bare + messages through a dedicated debugfs interface. + It is meant to be used by SCMI compliance/testing suites. + + When enabled regular SCMI drivers interactions are inhibited in + order to avoid unexpected interactions with the SCMI Raw message + flow. If unsure say N. + +config ARM_SCMI_RAW_MODE_SUPPORT_COEX + bool "Allow SCMI Raw mode coexistence with normal SCMI stack" + depends on ARM_SCMI_RAW_MODE_SUPPORT + help + Allow SCMI Raw transmission mode to coexist with normal SCMI stack. + + This will allow regular SCMI drivers to register with the core and + operate normally, thing which could make an SCMI test suite using the + SCMI Raw mode support unreliable. If unsure, say N. + config ARM_SCMI_HAVE_TRANSPORT bool help diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index 9ea86f8cc8f7..b31d78fa66cc 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -1,6 +1,9 @@ # SPDX-License-Identifier: GPL-2.0-only scmi-bus-y = bus.o +scmi-core-objs := $(scmi-bus-y) + scmi-driver-y = driver.o notify.o +scmi-driver-$(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT) += raw_mode.o scmi-transport-$(CONFIG_ARM_SCMI_HAVE_SHMEM) = shmem.o scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += mailbox.o scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o @@ -8,9 +11,11 @@ scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o -scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \ - $(scmi-transport-y) +scmi-module-objs := $(scmi-driver-y) $(scmi-protocols-y) $(scmi-transport-y) + +obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-core.o obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o + obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o obj-$(CONFIG_ARM_SCMI_POWER_CONTROL) += scmi_power_control.o diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index 35bb70724d44..68cc4b4290c1 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -7,17 +7,184 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/atomic.h> #include <linux/types.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/device.h> #include "common.h" +BLOCKING_NOTIFIER_HEAD(scmi_requested_devices_nh); +EXPORT_SYMBOL_GPL(scmi_requested_devices_nh); + static DEFINE_IDA(scmi_bus_id); -static DEFINE_IDR(scmi_protocols); -static DEFINE_SPINLOCK(protocol_lock); + +static DEFINE_IDR(scmi_requested_devices); +/* Protect access to scmi_requested_devices */ +static DEFINE_MUTEX(scmi_requested_devices_mtx); + +struct scmi_requested_dev { + const struct scmi_device_id *id_table; + struct list_head node; +}; + +/* Track globally the creation of SCMI SystemPower related devices */ +static atomic_t scmi_syspower_registered = ATOMIC_INIT(0); + +/** + * scmi_protocol_device_request - Helper to request a device + * + * @id_table: A protocol/name pair descriptor for the device to be created. + * + * This helper let an SCMI driver request specific devices identified by the + * @id_table to be created for each active SCMI instance. + * + * The requested device name MUST NOT be already existent for any protocol; + * at first the freshly requested @id_table is annotated in the IDR table + * @scmi_requested_devices and then the requested device is advertised to any + * registered party via the @scmi_requested_devices_nh notification chain. + * + * Return: 0 on Success + */ +static int scmi_protocol_device_request(const struct scmi_device_id *id_table) +{ + int ret = 0; + unsigned int id = 0; + struct list_head *head, *phead = NULL; + struct scmi_requested_dev *rdev; + + pr_debug("Requesting SCMI device (%s) for protocol %x\n", + id_table->name, id_table->protocol_id); + + if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT) && + !IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX)) { + pr_warn("SCMI Raw mode active. Rejecting '%s'/0x%02X\n", + id_table->name, id_table->protocol_id); + return -EINVAL; + } + + /* + * Search for the matching protocol rdev list and then search + * of any existent equally named device...fails if any duplicate found. + */ + mutex_lock(&scmi_requested_devices_mtx); + idr_for_each_entry(&scmi_requested_devices, head, id) { + if (!phead) { + /* A list found registered in the IDR is never empty */ + rdev = list_first_entry(head, struct scmi_requested_dev, + node); + if (rdev->id_table->protocol_id == + id_table->protocol_id) + phead = head; + } + list_for_each_entry(rdev, head, node) { + if (!strcmp(rdev->id_table->name, id_table->name)) { + pr_err("Ignoring duplicate request [%d] %s\n", + rdev->id_table->protocol_id, + rdev->id_table->name); + ret = -EINVAL; + goto out; + } + } + } + + /* + * No duplicate found for requested id_table, so let's create a new + * requested device entry for this new valid request. + */ + rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); + if (!rdev) { + ret = -ENOMEM; + goto out; + } + rdev->id_table = id_table; + + /* + * Append the new requested device table descriptor to the head of the + * related protocol list, eventually creating such head if not already + * there. + */ + if (!phead) { + phead = kzalloc(sizeof(*phead), GFP_KERNEL); + if (!phead) { + kfree(rdev); + ret = -ENOMEM; + goto out; + } + INIT_LIST_HEAD(phead); + + ret = idr_alloc(&scmi_requested_devices, (void *)phead, + id_table->protocol_id, + id_table->protocol_id + 1, GFP_KERNEL); + if (ret != id_table->protocol_id) { + pr_err("Failed to save SCMI device - ret:%d\n", ret); + kfree(rdev); + kfree(phead); + ret = -EINVAL; + goto out; + } + ret = 0; + } + list_add(&rdev->node, phead); + +out: + mutex_unlock(&scmi_requested_devices_mtx); + + if (!ret) + blocking_notifier_call_chain(&scmi_requested_devices_nh, + SCMI_BUS_NOTIFY_DEVICE_REQUEST, + (void *)rdev->id_table); + + return ret; +} + +/** + * scmi_protocol_device_unrequest - Helper to unrequest a device + * + * @id_table: A protocol/name pair descriptor for the device to be unrequested. + * + * The unrequested device, described by the provided id_table, is at first + * removed from the IDR @scmi_requested_devices and then the removal is + * advertised to any registered party via the @scmi_requested_devices_nh + * notification chain. + */ +static void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table) +{ + struct list_head *phead; + + pr_debug("Unrequesting SCMI device (%s) for protocol %x\n", + id_table->name, id_table->protocol_id); + + mutex_lock(&scmi_requested_devices_mtx); + phead = idr_find(&scmi_requested_devices, id_table->protocol_id); + if (phead) { + struct scmi_requested_dev *victim, *tmp; + + list_for_each_entry_safe(victim, tmp, phead, node) { + if (!strcmp(victim->id_table->name, id_table->name)) { + list_del(&victim->node); + + mutex_unlock(&scmi_requested_devices_mtx); + blocking_notifier_call_chain(&scmi_requested_devices_nh, + SCMI_BUS_NOTIFY_DEVICE_UNREQUEST, + (void *)victim->id_table); + kfree(victim); + mutex_lock(&scmi_requested_devices_mtx); + break; + } + } + + if (list_empty(phead)) { + idr_remove(&scmi_requested_devices, + id_table->protocol_id); + kfree(phead); + } + } + mutex_unlock(&scmi_requested_devices_mtx); +} static const struct scmi_device_id * scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv) @@ -57,11 +224,11 @@ static int scmi_match_by_id_table(struct device *dev, void *data) struct scmi_device_id *id_table = data; return sdev->protocol_id == id_table->protocol_id && - !strcmp(sdev->name, id_table->name); + (id_table->name && !strcmp(sdev->name, id_table->name)); } -struct scmi_device *scmi_child_dev_find(struct device *parent, - int prot_id, const char *name) +static struct scmi_device *scmi_child_dev_find(struct device *parent, + int prot_id, const char *name) { struct scmi_device_id id_table; struct device *dev; @@ -76,30 +243,6 @@ struct scmi_device *scmi_child_dev_find(struct device *parent, return to_scmi_dev(dev); } -const struct scmi_protocol *scmi_protocol_get(int protocol_id) -{ - const struct scmi_protocol *proto; - - proto = idr_find(&scmi_protocols, protocol_id); - if (!proto || !try_module_get(proto->owner)) { - pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id); - return NULL; - } - - pr_debug("Found SCMI Protocol 0x%x\n", protocol_id); - - return proto; -} - -void scmi_protocol_put(int protocol_id) -{ - const struct scmi_protocol *proto; - - proto = idr_find(&scmi_protocols, protocol_id); - if (proto) - module_put(proto->owner); -} - static int scmi_dev_probe(struct device *dev) { struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver); @@ -120,12 +263,13 @@ static void scmi_dev_remove(struct device *dev) scmi_drv->remove(scmi_dev); } -static struct bus_type scmi_bus_type = { +struct bus_type scmi_bus_type = { .name = "scmi_protocol", .match = scmi_dev_match, .probe = scmi_dev_probe, .remove = scmi_dev_remove, }; +EXPORT_SYMBOL_GPL(scmi_bus_type); int scmi_driver_register(struct scmi_driver *driver, struct module *owner, const char *mod_name) @@ -146,7 +290,7 @@ int scmi_driver_register(struct scmi_driver *driver, struct module *owner, retval = driver_register(&driver->driver); if (!retval) - pr_debug("registered new scmi driver %s\n", driver->name); + pr_debug("Registered new scmi driver %s\n", driver->name); return retval; } @@ -164,13 +308,53 @@ static void scmi_device_release(struct device *dev) kfree(to_scmi_dev(dev)); } -struct scmi_device * -scmi_device_create(struct device_node *np, struct device *parent, int protocol, - const char *name) +static void __scmi_device_destroy(struct scmi_device *scmi_dev) +{ + pr_debug("(%s) Destroying SCMI device '%s' for protocol 0x%x (%s)\n", + of_node_full_name(scmi_dev->dev.parent->of_node), + dev_name(&scmi_dev->dev), scmi_dev->protocol_id, + scmi_dev->name); + + if (scmi_dev->protocol_id == SCMI_PROTOCOL_SYSTEM) + atomic_set(&scmi_syspower_registered, 0); + + kfree_const(scmi_dev->name); + ida_free(&scmi_bus_id, scmi_dev->id); + device_unregister(&scmi_dev->dev); +} + +static struct scmi_device * +__scmi_device_create(struct device_node *np, struct device *parent, + int protocol, const char *name) { int id, retval; struct scmi_device *scmi_dev; + /* + * If the same protocol/name device already exist under the same parent + * (i.e. SCMI instance) just return the existent device. + * This avoids any race between the SCMI driver, creating devices for + * each DT defined protocol at probe time, and the concurrent + * registration of SCMI drivers. + */ + scmi_dev = scmi_child_dev_find(parent, protocol, name); + if (scmi_dev) + return scmi_dev; + + /* + * Ignore any possible subsequent failures while creating the device + * since we are doomed anyway at that point; not using a mutex which + * spans across this whole function to keep things simple and to avoid + * to serialize all the __scmi_device_create calls across possibly + * different SCMI server instances (parent) + */ + if (protocol == SCMI_PROTOCOL_SYSTEM && + atomic_cmpxchg(&scmi_syspower_registered, 0, 1)) { + dev_warn(parent, + "SCMI SystemPower protocol device must be unique !\n"); + return NULL; + } + scmi_dev = kzalloc(sizeof(*scmi_dev), GFP_KERNEL); if (!scmi_dev) return NULL; @@ -200,6 +384,10 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol, if (retval) goto put_dev; + pr_debug("(%s) Created SCMI device '%s' for protocol 0x%x (%s)\n", + of_node_full_name(parent->of_node), + dev_name(&scmi_dev->dev), protocol, name); + return scmi_dev; put_dev: kfree_const(scmi_dev->name); @@ -208,77 +396,85 @@ put_dev: return NULL; } -void scmi_device_destroy(struct scmi_device *scmi_dev) -{ - kfree_const(scmi_dev->name); - scmi_handle_put(scmi_dev->handle); - ida_free(&scmi_bus_id, scmi_dev->id); - device_unregister(&scmi_dev->dev); -} - -void scmi_device_link_add(struct device *consumer, struct device *supplier) -{ - struct device_link *link; - - link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER); - - WARN_ON(!link); -} - -void scmi_set_handle(struct scmi_device *scmi_dev) -{ - scmi_dev->handle = scmi_handle_get(&scmi_dev->dev); - if (scmi_dev->handle) - scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev); -} - -int scmi_protocol_register(const struct scmi_protocol *proto) +/** + * scmi_device_create - A method to create one or more SCMI devices + * + * @np: A reference to the device node to use for the new device(s) + * @parent: The parent device to use identifying a specific SCMI instance + * @protocol: The SCMI protocol to be associated with this device + * @name: The requested-name of the device to be created; this is optional + * and if no @name is provided, all the devices currently known to + * be requested on the SCMI bus for @protocol will be created. + * + * This method can be invoked to create a single well-defined device (like + * a transport device or a device requested by an SCMI driver loaded after + * the core SCMI stack has been probed), or to create all the devices currently + * known to have been requested by the loaded SCMI drivers for a specific + * protocol (typically during SCMI core protocol enumeration at probe time). + * + * Return: The created device (or one of them if @name was NOT provided and + * multiple devices were created) or NULL if no device was created; + * note that NULL indicates an error ONLY in case a specific @name + * was provided: when @name param was not provided, a number of devices + * could have been potentially created for a whole protocol, unless no + * device was found to have been requested for that specific protocol. + */ +struct scmi_device *scmi_device_create(struct device_node *np, + struct device *parent, int protocol, + const char *name) { - int ret; - - if (!proto) { - pr_err("invalid protocol\n"); - return -EINVAL; - } - - if (!proto->instance_init) { - pr_err("missing init for protocol 0x%x\n", proto->id); - return -EINVAL; + struct list_head *phead; + struct scmi_requested_dev *rdev; + struct scmi_device *scmi_dev = NULL; + + if (name) + return __scmi_device_create(np, parent, protocol, name); + + mutex_lock(&scmi_requested_devices_mtx); + phead = idr_find(&scmi_requested_devices, protocol); + /* Nothing to do. */ + if (!phead) { + mutex_unlock(&scmi_requested_devices_mtx); + return scmi_dev; } - spin_lock(&protocol_lock); - ret = idr_alloc(&scmi_protocols, (void *)proto, - proto->id, proto->id + 1, GFP_ATOMIC); - spin_unlock(&protocol_lock); - if (ret != proto->id) { - pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n", - proto->id, ret); - return ret; + /* Walk the list of requested devices for protocol and create them */ + list_for_each_entry(rdev, phead, node) { + struct scmi_device *sdev; + + sdev = __scmi_device_create(np, parent, + rdev->id_table->protocol_id, + rdev->id_table->name); + /* Report errors and carry on... */ + if (sdev) + scmi_dev = sdev; + else + pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n", + of_node_full_name(parent->of_node), + rdev->id_table->protocol_id, + rdev->id_table->name); } + mutex_unlock(&scmi_requested_devices_mtx); - pr_debug("Registered SCMI Protocol 0x%x\n", proto->id); - - return 0; + return scmi_dev; } -EXPORT_SYMBOL_GPL(scmi_protocol_register); +EXPORT_SYMBOL_GPL(scmi_device_create); -void scmi_protocol_unregister(const struct scmi_protocol *proto) +void scmi_device_destroy(struct device *parent, int protocol, const char *name) { - spin_lock(&protocol_lock); - idr_remove(&scmi_protocols, proto->id); - spin_unlock(&protocol_lock); - - pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id); + struct scmi_device *scmi_dev; - return; + scmi_dev = scmi_child_dev_find(parent, protocol, name); + if (scmi_dev) + __scmi_device_destroy(scmi_dev); } -EXPORT_SYMBOL_GPL(scmi_protocol_unregister); +EXPORT_SYMBOL_GPL(scmi_device_destroy); static int __scmi_devices_unregister(struct device *dev, void *data) { struct scmi_device *scmi_dev = to_scmi_dev(dev); - scmi_device_destroy(scmi_dev); + __scmi_device_destroy(scmi_dev); return 0; } @@ -287,20 +483,33 @@ static void scmi_devices_unregister(void) bus_for_each_dev(&scmi_bus_type, NULL, NULL, __scmi_devices_unregister); } -int __init scmi_bus_init(void) +static int __init scmi_bus_init(void) { int retval; retval = bus_register(&scmi_bus_type); if (retval) - pr_err("scmi protocol bus register failed (%d)\n", retval); + pr_err("SCMI protocol bus register failed (%d)\n", retval); + + pr_info("SCMI protocol bus registered\n"); return retval; } +subsys_initcall(scmi_bus_init); -void __exit scmi_bus_exit(void) +static void __exit scmi_bus_exit(void) { + /* + * Destroy all remaining devices: just in case the drivers were + * manually unbound and at first and then the modules unloaded. + */ scmi_devices_unregister(); bus_unregister(&scmi_bus_type); ida_destroy(&scmi_bus_id); } +module_exit(scmi_bus_exit); + +MODULE_ALIAS("scmi-core"); +MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); +MODULE_DESCRIPTION("ARM SCMI protocol bus"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index a1c0154c31c6..c46dc5215af7 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -27,6 +27,48 @@ #include "protocols.h" #include "notify.h" +#define SCMI_MAX_CHANNELS 256 + +#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC) + +enum scmi_error_codes { + SCMI_SUCCESS = 0, /* Success */ + SCMI_ERR_SUPPORT = -1, /* Not supported */ + SCMI_ERR_PARAMS = -2, /* Invalid Parameters */ + SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */ + SCMI_ERR_ENTRY = -4, /* Not found */ + SCMI_ERR_RANGE = -5, /* Value out of range */ + SCMI_ERR_BUSY = -6, /* Device busy */ + SCMI_ERR_COMMS = -7, /* Communication Error */ + SCMI_ERR_GENERIC = -8, /* Generic Error */ + SCMI_ERR_HARDWARE = -9, /* Hardware Error */ + SCMI_ERR_PROTOCOL = -10,/* Protocol Error */ +}; + +static const int scmi_linux_errmap[] = { + /* better than switch case as long as return value is continuous */ + 0, /* SCMI_SUCCESS */ + -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */ + -EINVAL, /* SCMI_ERR_PARAM */ + -EACCES, /* SCMI_ERR_ACCESS */ + -ENOENT, /* SCMI_ERR_ENTRY */ + -ERANGE, /* SCMI_ERR_RANGE */ + -EBUSY, /* SCMI_ERR_BUSY */ + -ECOMM, /* SCMI_ERR_COMMS */ + -EIO, /* SCMI_ERR_GENERIC */ + -EREMOTEIO, /* SCMI_ERR_HARDWARE */ + -EPROTO, /* SCMI_ERR_PROTOCOL */ +}; + +static inline int scmi_to_linux_errno(int errno) +{ + int err_idx = -errno; + + if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap)) + return scmi_linux_errmap[err_idx]; + return -EIO; +} + #define MSG_ID_MASK GENMASK(7, 0) #define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr)) #define MSG_TYPE_MASK GENMASK(9, 8) @@ -96,18 +138,19 @@ static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr) struct scmi_revision_info * scmi_revision_area_get(const struct scmi_protocol_handle *ph); -int scmi_handle_put(const struct scmi_handle *handle); -void scmi_device_link_add(struct device *consumer, struct device *supplier); -struct scmi_handle *scmi_handle_get(struct device *dev); -void scmi_set_handle(struct scmi_device *scmi_dev); void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph, u8 *prot_imp); -int __init scmi_bus_init(void); -void __exit scmi_bus_exit(void); +extern struct bus_type scmi_bus_type; + +#define SCMI_BUS_NOTIFY_DEVICE_REQUEST 0 +#define SCMI_BUS_NOTIFY_DEVICE_UNREQUEST 1 +extern struct blocking_notifier_head scmi_requested_devices_nh; -const struct scmi_protocol *scmi_protocol_get(int protocol_id); -void scmi_protocol_put(int protocol_id); +struct scmi_device *scmi_device_create(struct device_node *np, + struct device *parent, int protocol, + const char *name); +void scmi_device_destroy(struct device *parent, int protocol, const char *name); int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id); void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id); @@ -116,6 +159,8 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id); /** * struct scmi_chan_info - Structure representing a SCMI channel information * + * @id: An identifier for this channel: this matches the protocol number + * used to initialize this channel * @dev: Reference to device in the SCMI hierarchy corresponding to this * channel * @rx_timeout_ms: The configured RX timeout in milliseconds. @@ -127,6 +172,7 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id); * @transport_info: Transport layer related information */ struct scmi_chan_info { + int id; struct device *dev; unsigned int rx_timeout_ms; struct scmi_handle *handle; @@ -153,7 +199,7 @@ struct scmi_chan_info { */ struct scmi_transport_ops { int (*link_supplier)(struct device *dev); - bool (*chan_available)(struct device *dev, int idx); + bool (*chan_available)(struct device_node *of_node, int idx); int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev, bool tx); int (*chan_free)(int id, void *p, void *data); @@ -170,11 +216,6 @@ struct scmi_transport_ops { bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer); }; -int scmi_protocol_device_request(const struct scmi_device_id *id_table); -void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table); -struct scmi_device *scmi_child_dev_find(struct device *parent, - int prot_id, const char *name); - /** * struct scmi_desc - Description of SoC integration * @@ -215,6 +256,36 @@ struct scmi_desc { const bool atomic_enabled; }; +static inline bool is_polling_required(struct scmi_chan_info *cinfo, + const struct scmi_desc *desc) +{ + return cinfo->no_completion_irq || desc->force_polling; +} + +static inline bool is_transport_polling_capable(const struct scmi_desc *desc) +{ + return desc->ops->poll_done || desc->sync_cmds_completed_on_ret; +} + +static inline bool is_polling_enabled(struct scmi_chan_info *cinfo, + const struct scmi_desc *desc) +{ + return is_polling_required(cinfo, desc) && + is_transport_polling_capable(desc); +} + +void scmi_xfer_raw_put(const struct scmi_handle *handle, + struct scmi_xfer *xfer); +struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle); +struct scmi_chan_info * +scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id); + +int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle, + struct scmi_xfer *xfer); + +int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer, + unsigned int timeout_ms); #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX extern const struct scmi_desc scmi_mailbox_desc; #endif @@ -229,7 +300,6 @@ extern const struct scmi_desc scmi_optee_desc; #endif void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv); -void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id); /* shmem related declarations */ struct scmi_shared_mem; diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index f818d00bb2c6..d21c7eafd641 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -14,7 +14,10 @@ * Copyright (C) 2018-2021 ARM Ltd. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/bitmap.h> +#include <linux/debugfs.h> #include <linux/device.h> #include <linux/export.h> #include <linux/idr.h> @@ -34,22 +37,15 @@ #include "common.h" #include "notify.h" +#include "raw_mode.h" + #define CREATE_TRACE_POINTS #include <trace/events/scmi.h> -enum scmi_error_codes { - SCMI_SUCCESS = 0, /* Success */ - SCMI_ERR_SUPPORT = -1, /* Not supported */ - SCMI_ERR_PARAMS = -2, /* Invalid Parameters */ - SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */ - SCMI_ERR_ENTRY = -4, /* Not found */ - SCMI_ERR_RANGE = -5, /* Value out of range */ - SCMI_ERR_BUSY = -6, /* Device busy */ - SCMI_ERR_COMMS = -7, /* Communication Error */ - SCMI_ERR_GENERIC = -8, /* Generic Error */ - SCMI_ERR_HARDWARE = -9, /* Hardware Error */ - SCMI_ERR_PROTOCOL = -10,/* Protocol Error */ -}; +static DEFINE_IDA(scmi_id); + +static DEFINE_IDR(scmi_protocols); +static DEFINE_SPINLOCK(protocol_lock); /* List of all SCMI devices active in system */ static LIST_HEAD(scmi_list); @@ -58,18 +54,7 @@ static DEFINE_MUTEX(scmi_list_mutex); /* Track the unique id for the transfers for debug & profiling purpose */ static atomic_t transfer_last_id; -static DEFINE_IDR(scmi_requested_devices); -static DEFINE_MUTEX(scmi_requested_devices_mtx); - -/* Track globally the creation of SCMI SystemPower related devices */ -static bool scmi_syspower_registered; -/* Protect access to scmi_syspower_registered */ -static DEFINE_MUTEX(scmi_syspower_mtx); - -struct scmi_requested_dev { - const struct scmi_device_id *id_table; - struct list_head node; -}; +static struct dentry *scmi_top_dentry; /** * struct scmi_xfers_info - Structure to manage transfer information @@ -118,8 +103,23 @@ struct scmi_protocol_instance { #define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph) /** + * struct scmi_debug_info - Debug common info + * @top_dentry: A reference to the top debugfs dentry + * @name: Name of this SCMI instance + * @type: Type of this SCMI instance + * @is_atomic: Flag to state if the transport of this instance is atomic + */ +struct scmi_debug_info { + struct dentry *top_dentry; + const char *name; + const char *type; + bool is_atomic; +}; + +/** * struct scmi_info - Structure representing a SCMI instance * + * @id: A sequence number starting from zero identifying this instance * @dev: Device pointer * @desc: SoC description for this instance * @version: SCMI revision information containing protocol version, @@ -147,8 +147,15 @@ struct scmi_protocol_instance { * @notify_priv: Pointer to private data structure specific to notifications. * @node: List head * @users: Number of users of this instance + * @bus_nb: A notifier to listen for device bind/unbind on the scmi bus + * @dev_req_nb: A notifier to listen for device request/unrequest on the scmi + * bus + * @devreq_mtx: A mutex to serialize device creation for this SCMI instance + * @dbg: A pointer to debugfs related data (if any) + * @raw: An opaque reference handle used by SCMI Raw mode. */ struct scmi_info { + int id; struct device *dev; const struct scmi_desc *desc; struct scmi_revision_info version; @@ -166,32 +173,114 @@ struct scmi_info { void *notify_priv; struct list_head node; int users; + struct notifier_block bus_nb; + struct notifier_block dev_req_nb; + /* Serialize device creation process for this instance */ + struct mutex devreq_mtx; + struct scmi_debug_info *dbg; + void *raw; }; #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle) +#define bus_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, bus_nb) +#define req_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, dev_req_nb) -static const int scmi_linux_errmap[] = { - /* better than switch case as long as return value is continuous */ - 0, /* SCMI_SUCCESS */ - -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */ - -EINVAL, /* SCMI_ERR_PARAM */ - -EACCES, /* SCMI_ERR_ACCESS */ - -ENOENT, /* SCMI_ERR_ENTRY */ - -ERANGE, /* SCMI_ERR_RANGE */ - -EBUSY, /* SCMI_ERR_BUSY */ - -ECOMM, /* SCMI_ERR_COMMS */ - -EIO, /* SCMI_ERR_GENERIC */ - -EREMOTEIO, /* SCMI_ERR_HARDWARE */ - -EPROTO, /* SCMI_ERR_PROTOCOL */ -}; +static const struct scmi_protocol *scmi_protocol_get(int protocol_id) +{ + const struct scmi_protocol *proto; + + proto = idr_find(&scmi_protocols, protocol_id); + if (!proto || !try_module_get(proto->owner)) { + pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id); + return NULL; + } + + pr_debug("Found SCMI Protocol 0x%x\n", protocol_id); + + return proto; +} + +static void scmi_protocol_put(int protocol_id) +{ + const struct scmi_protocol *proto; + + proto = idr_find(&scmi_protocols, protocol_id); + if (proto) + module_put(proto->owner); +} + +int scmi_protocol_register(const struct scmi_protocol *proto) +{ + int ret; + + if (!proto) { + pr_err("invalid protocol\n"); + return -EINVAL; + } + + if (!proto->instance_init) { + pr_err("missing init for protocol 0x%x\n", proto->id); + return -EINVAL; + } + + spin_lock(&protocol_lock); + ret = idr_alloc(&scmi_protocols, (void *)proto, + proto->id, proto->id + 1, GFP_ATOMIC); + spin_unlock(&protocol_lock); + if (ret != proto->id) { + pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n", + proto->id, ret); + return ret; + } + + pr_debug("Registered SCMI Protocol 0x%x\n", proto->id); + + return 0; +} +EXPORT_SYMBOL_GPL(scmi_protocol_register); + +void scmi_protocol_unregister(const struct scmi_protocol *proto) +{ + spin_lock(&protocol_lock); + idr_remove(&scmi_protocols, proto->id); + spin_unlock(&protocol_lock); -static inline int scmi_to_linux_errno(int errno) + pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id); +} +EXPORT_SYMBOL_GPL(scmi_protocol_unregister); + +/** + * scmi_create_protocol_devices - Create devices for all pending requests for + * this SCMI instance. + * + * @np: The device node describing the protocol + * @info: The SCMI instance descriptor + * @prot_id: The protocol ID + * @name: The optional name of the device to be created: if not provided this + * call will lead to the creation of all the devices currently requested + * for the specified protocol. + */ +static void scmi_create_protocol_devices(struct device_node *np, + struct scmi_info *info, + int prot_id, const char *name) { - int err_idx = -errno; + struct scmi_device *sdev; - if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap)) - return scmi_linux_errmap[err_idx]; - return -EIO; + mutex_lock(&info->devreq_mtx); + sdev = scmi_device_create(np, info->dev, prot_id, name); + if (name && !sdev) + dev_err(info->dev, + "failed to create device for protocol 0x%X (%s)\n", + prot_id, name); + mutex_unlock(&info->devreq_mtx); +} + +static void scmi_destroy_protocol_devices(struct scmi_info *info, + int prot_id, const char *name) +{ + mutex_lock(&info->devreq_mtx); + scmi_device_destroy(info->dev, prot_id, name); + mutex_unlock(&info->devreq_mtx); } void scmi_notification_instance_data_set(const struct scmi_handle *handle, @@ -311,8 +400,6 @@ static int scmi_xfer_token_set(struct scmi_xfers_info *minfo, if (xfer_id != next_token) atomic_add((int)(xfer_id - next_token), &transfer_last_id); - /* Set in-flight */ - set_bit(xfer_id, minfo->xfer_alloc_table); xfer->hdr.seq = (u16)xfer_id; return 0; @@ -331,32 +418,123 @@ static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo, } /** + * scmi_xfer_inflight_register_unlocked - Register the xfer as in-flight + * + * @xfer: The xfer to register + * @minfo: Pointer to Tx/Rx Message management info based on channel type + * + * Note that this helper assumes that the xfer to be registered as in-flight + * had been built using an xfer sequence number which still corresponds to a + * free slot in the xfer_alloc_table. + * + * Context: Assumes to be called with @xfer_lock already acquired. + */ +static inline void +scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer, + struct scmi_xfers_info *minfo) +{ + /* Set in-flight */ + set_bit(xfer->hdr.seq, minfo->xfer_alloc_table); + hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq); + xfer->pending = true; +} + +/** + * scmi_xfer_inflight_register - Try to register an xfer as in-flight + * + * @xfer: The xfer to register + * @minfo: Pointer to Tx/Rx Message management info based on channel type + * + * Note that this helper does NOT assume anything about the sequence number + * that was baked into the provided xfer, so it checks at first if it can + * be mapped to a free slot and fails with an error if another xfer with the + * same sequence number is currently still registered as in-flight. + * + * Return: 0 on Success or -EBUSY if sequence number embedded in the xfer + * could not rbe mapped to a free slot in the xfer_alloc_table. + */ +static int scmi_xfer_inflight_register(struct scmi_xfer *xfer, + struct scmi_xfers_info *minfo) +{ + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&minfo->xfer_lock, flags); + if (!test_bit(xfer->hdr.seq, minfo->xfer_alloc_table)) + scmi_xfer_inflight_register_unlocked(xfer, minfo); + else + ret = -EBUSY; + spin_unlock_irqrestore(&minfo->xfer_lock, flags); + + return ret; +} + +/** + * scmi_xfer_raw_inflight_register - An helper to register the given xfer as in + * flight on the TX channel, if possible. + * + * @handle: Pointer to SCMI entity handle + * @xfer: The xfer to register + * + * Return: 0 on Success, error otherwise + */ +int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle, + struct scmi_xfer *xfer) +{ + struct scmi_info *info = handle_to_scmi_info(handle); + + return scmi_xfer_inflight_register(xfer, &info->tx_minfo); +} + +/** + * scmi_xfer_pending_set - Pick a proper sequence number and mark the xfer + * as pending in-flight + * + * @xfer: The xfer to act upon + * @minfo: Pointer to Tx/Rx Message management info based on channel type + * + * Return: 0 on Success or error otherwise + */ +static inline int scmi_xfer_pending_set(struct scmi_xfer *xfer, + struct scmi_xfers_info *minfo) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(&minfo->xfer_lock, flags); + /* Set a new monotonic token as the xfer sequence number */ + ret = scmi_xfer_token_set(minfo, xfer); + if (!ret) + scmi_xfer_inflight_register_unlocked(xfer, minfo); + spin_unlock_irqrestore(&minfo->xfer_lock, flags); + + return ret; +} + +/** * scmi_xfer_get() - Allocate one message * * @handle: Pointer to SCMI entity handle * @minfo: Pointer to Tx/Rx Message management info based on channel type - * @set_pending: If true a monotonic token is picked and the xfer is added to - * the pending hash table. * * Helper function which is used by various message functions that are * exposed to clients of this driver for allocating a message traffic event. * - * Picks an xfer from the free list @free_xfers (if any available) and, if - * required, sets a monotonically increasing token and stores the inflight xfer - * into the @pending_xfers hashtable for later retrieval. + * Picks an xfer from the free list @free_xfers (if any available) and perform + * a basic initialization. + * + * Note that, at this point, still no sequence number is assigned to the + * allocated xfer, nor it is registered as a pending transaction. * * The successfully initialized xfer is refcounted. * - * Context: Holds @xfer_lock while manipulating @xfer_alloc_table and - * @free_xfers. + * Context: Holds @xfer_lock while manipulating @free_xfers. * - * Return: 0 if all went fine, else corresponding error. + * Return: An initialized xfer if all went fine, else pointer error. */ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle, - struct scmi_xfers_info *minfo, - bool set_pending) + struct scmi_xfers_info *minfo) { - int ret; unsigned long flags; struct scmi_xfer *xfer; @@ -376,31 +554,71 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle, */ xfer->transfer_id = atomic_inc_return(&transfer_last_id); - if (set_pending) { - /* Pick and set monotonic token */ - ret = scmi_xfer_token_set(minfo, xfer); - if (!ret) { - hash_add(minfo->pending_xfers, &xfer->node, - xfer->hdr.seq); - xfer->pending = true; - } else { - dev_err(handle->dev, - "Failed to get monotonic token %d\n", ret); - hlist_add_head(&xfer->node, &minfo->free_xfers); - xfer = ERR_PTR(ret); - } - } - - if (!IS_ERR(xfer)) { - refcount_set(&xfer->users, 1); - atomic_set(&xfer->busy, SCMI_XFER_FREE); - } + refcount_set(&xfer->users, 1); + atomic_set(&xfer->busy, SCMI_XFER_FREE); spin_unlock_irqrestore(&minfo->xfer_lock, flags); return xfer; } /** + * scmi_xfer_raw_get - Helper to get a bare free xfer from the TX channel + * + * @handle: Pointer to SCMI entity handle + * + * Note that xfer is taken from the TX channel structures. + * + * Return: A valid xfer on Success, or an error-pointer otherwise + */ +struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle) +{ + struct scmi_xfer *xfer; + struct scmi_info *info = handle_to_scmi_info(handle); + + xfer = scmi_xfer_get(handle, &info->tx_minfo); + if (!IS_ERR(xfer)) + xfer->flags |= SCMI_XFER_FLAG_IS_RAW; + + return xfer; +} + +/** + * scmi_xfer_raw_channel_get - Helper to get a reference to the proper channel + * to use for a specific protocol_id Raw transaction. + * + * @handle: Pointer to SCMI entity handle + * @protocol_id: Identifier of the protocol + * + * Note that in a regular SCMI stack, usually, a protocol has to be defined in + * the DT to have an associated channel and be usable; but in Raw mode any + * protocol in range is allowed, re-using the Base channel, so as to enable + * fuzzing on any protocol without the need of a fully compiled DT. + * + * Return: A reference to the channel to use, or an ERR_PTR + */ +struct scmi_chan_info * +scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id) +{ + struct scmi_chan_info *cinfo; + struct scmi_info *info = handle_to_scmi_info(handle); + + cinfo = idr_find(&info->tx_idr, protocol_id); + if (!cinfo) { + if (protocol_id == SCMI_PROTOCOL_BASE) + return ERR_PTR(-EINVAL); + /* Use Base channel for protocols not defined for DT */ + cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE); + if (!cinfo) + return ERR_PTR(-EINVAL); + dev_warn_once(handle->dev, + "Using Base channel for protocol 0x%X\n", + protocol_id); + } + + return cinfo; +} + +/** * __scmi_xfer_put() - Release a message * * @minfo: Pointer to Tx/Rx Message management info based on channel type @@ -429,6 +647,24 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer) } /** + * scmi_xfer_raw_put - Release an xfer that was taken by @scmi_xfer_raw_get + * + * @handle: Pointer to SCMI entity handle + * @xfer: A reference to the xfer to put + * + * Note that as with other xfer_put() handlers the xfer is really effectively + * released only if there are no more users on the system. + */ +void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer) +{ + struct scmi_info *info = handle_to_scmi_info(handle); + + xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW; + xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET; + return __scmi_xfer_put(&info->tx_minfo, xfer); +} + +/** * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id * * @minfo: Pointer to Tx/Rx Message management info based on channel type @@ -623,25 +859,6 @@ static inline void scmi_clear_channel(struct scmi_info *info, info->desc->ops->clear_channel(cinfo); } -static inline bool is_polling_required(struct scmi_chan_info *cinfo, - struct scmi_info *info) -{ - return cinfo->no_completion_irq || info->desc->force_polling; -} - -static inline bool is_transport_polling_capable(struct scmi_info *info) -{ - return info->desc->ops->poll_done || - info->desc->sync_cmds_completed_on_ret; -} - -static inline bool is_polling_enabled(struct scmi_chan_info *cinfo, - struct scmi_info *info) -{ - return is_polling_required(cinfo, info) && - is_transport_polling_capable(info); -} - static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv) { @@ -652,7 +869,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, ktime_t ts; ts = ktime_get_boottime(); - xfer = scmi_xfer_get(cinfo->handle, minfo, false); + xfer = scmi_xfer_get(cinfo->handle, minfo); if (IS_ERR(xfer)) { dev_err(dev, "failed to get free message slot (%ld)\n", PTR_ERR(xfer)); @@ -667,9 +884,9 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size, xfer); - trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "NOTI", - xfer->hdr.seq, xfer->hdr.status, - xfer->rx.buf, xfer->rx.len); + trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, + xfer->hdr.id, "NOTI", xfer->hdr.seq, + xfer->hdr.status, xfer->rx.buf, xfer->rx.len); scmi_notify(cinfo->handle, xfer->hdr.protocol_id, xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts); @@ -678,6 +895,12 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, xfer->hdr.protocol_id, xfer->hdr.seq, MSG_TYPE_NOTIFICATION); + if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { + xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr); + scmi_raw_message_report(info->raw, xfer, SCMI_RAW_NOTIF_QUEUE, + cinfo->id); + } + __scmi_xfer_put(minfo, xfer); scmi_clear_channel(info, cinfo); @@ -691,6 +914,9 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, xfer = scmi_xfer_command_acquire(cinfo, msg_hdr); if (IS_ERR(xfer)) { + if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) + scmi_raw_error_report(info->raw, cinfo, msg_hdr, priv); + if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP) scmi_clear_channel(info, cinfo); return; @@ -705,9 +931,11 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, smp_store_mb(xfer->priv, priv); info->desc->ops->fetch_response(cinfo, xfer); - trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, + trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, + xfer->hdr.id, xfer->hdr.type == MSG_TYPE_DELAYED_RESP ? - "DLYD" : "RESP", + (!SCMI_XFER_IS_RAW(xfer) ? "DLYD" : "dlyd") : + (!SCMI_XFER_IS_RAW(xfer) ? "RESP" : "resp"), xfer->hdr.seq, xfer->hdr.status, xfer->rx.buf, xfer->rx.len); @@ -722,6 +950,18 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, complete(&xfer->done); } + if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { + /* + * When in polling mode avoid to queue the Raw xfer on the IRQ + * RX path since it will be already queued at the end of the TX + * poll loop. + */ + if (!xfer->hdr.poll_completion) + scmi_raw_message_report(info->raw, xfer, + SCMI_RAW_REPLY_QUEUE, + cinfo->id); + } + scmi_xfer_command_release(info, xfer); } @@ -785,36 +1025,18 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, ktime_after(ktime_get(), stop); } -/** - * scmi_wait_for_message_response - An helper to group all the possible ways of - * waiting for a synchronous message response. - * - * @cinfo: SCMI channel info - * @xfer: Reference to the transfer being waited for. - * - * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on - * configuration flags like xfer->hdr.poll_completion. - * - * Return: 0 on Success, error otherwise. - */ -static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo, - struct scmi_xfer *xfer) +static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, + struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer, unsigned int timeout_ms) { - struct scmi_info *info = handle_to_scmi_info(cinfo->handle); - struct device *dev = info->dev; - int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms; - - trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id, - xfer->hdr.protocol_id, xfer->hdr.seq, - timeout_ms, - xfer->hdr.poll_completion); + int ret = 0; if (xfer->hdr.poll_completion) { /* * Real polling is needed only if transport has NOT declared * itself to support synchronous commands replies. */ - if (!info->desc->sync_cmds_completed_on_ret) { + if (!desc->sync_cmds_completed_on_ret) { /* * Poll on xfer using transport provided .poll_done(); * assumes no completion interrupt was available. @@ -833,6 +1055,8 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo, if (!ret) { unsigned long flags; + struct scmi_info *info = + handle_to_scmi_info(cinfo->handle); /* * Do not fetch_response if an out-of-order delayed @@ -840,16 +1064,27 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo, */ spin_lock_irqsave(&xfer->lock, flags); if (xfer->state == SCMI_XFER_SENT_OK) { - info->desc->ops->fetch_response(cinfo, xfer); + desc->ops->fetch_response(cinfo, xfer); xfer->state = SCMI_XFER_RESP_OK; } spin_unlock_irqrestore(&xfer->lock, flags); /* Trace polled replies. */ - trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, - "RESP", + trace_scmi_msg_dump(info->id, cinfo->id, + xfer->hdr.protocol_id, xfer->hdr.id, + !SCMI_XFER_IS_RAW(xfer) ? + "RESP" : "resp", xfer->hdr.seq, xfer->hdr.status, xfer->rx.buf, xfer->rx.len); + + if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { + struct scmi_info *info = + handle_to_scmi_info(cinfo->handle); + + scmi_raw_message_report(info->raw, xfer, + SCMI_RAW_REPLY_QUEUE, + cinfo->id); + } } } else { /* And we wait for the response. */ @@ -865,6 +1100,59 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo, } /** + * scmi_wait_for_message_response - An helper to group all the possible ways of + * waiting for a synchronous message response. + * + * @cinfo: SCMI channel info + * @xfer: Reference to the transfer being waited for. + * + * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on + * configuration flags like xfer->hdr.poll_completion. + * + * Return: 0 on Success, error otherwise. + */ +static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_info *info = handle_to_scmi_info(cinfo->handle); + struct device *dev = info->dev; + + trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id, + xfer->hdr.protocol_id, xfer->hdr.seq, + info->desc->max_rx_timeout_ms, + xfer->hdr.poll_completion); + + return scmi_wait_for_reply(dev, info->desc, cinfo, xfer, + info->desc->max_rx_timeout_ms); +} + +/** + * scmi_xfer_raw_wait_for_message_response - An helper to wait for a message + * reply to an xfer raw request on a specific channel for the required timeout. + * + * @cinfo: SCMI channel info + * @xfer: Reference to the transfer being waited for. + * @timeout_ms: The maximum timeout in milliseconds + * + * Return: 0 on Success, error otherwise. + */ +int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer, + unsigned int timeout_ms) +{ + int ret; + struct scmi_info *info = handle_to_scmi_info(cinfo->handle); + struct device *dev = info->dev; + + ret = scmi_wait_for_reply(dev, info->desc, cinfo, xfer, timeout_ms); + if (ret) + dev_dbg(dev, "timed out in RAW response - HDR:%08X\n", + pack_scmi_header(&xfer->hdr)); + + return ret; +} + +/** * do_xfer() - Do one transfer * * @ph: Pointer to SCMI protocol handle @@ -884,7 +1172,8 @@ static int do_xfer(const struct scmi_protocol_handle *ph, struct scmi_chan_info *cinfo; /* Check for polling request on custom command xfers at first */ - if (xfer->hdr.poll_completion && !is_transport_polling_capable(info)) { + if (xfer->hdr.poll_completion && + !is_transport_polling_capable(info->desc)) { dev_warn_once(dev, "Polling mode is not supported by transport.\n"); return -EINVAL; @@ -895,7 +1184,7 @@ static int do_xfer(const struct scmi_protocol_handle *ph, return -EINVAL; /* True ONLY if also supported by transport. */ - if (is_polling_enabled(cinfo, info)) + if (is_polling_enabled(cinfo, info->desc)) xfer->hdr.poll_completion = true; /* @@ -910,6 +1199,8 @@ static int do_xfer(const struct scmi_protocol_handle *ph, xfer->hdr.protocol_id, xfer->hdr.seq, xfer->hdr.poll_completion); + /* Clear any stale status */ + xfer->hdr.status = SCMI_SUCCESS; xfer->state = SCMI_XFER_SENT_OK; /* * Even though spinlocking is not needed here since no race is possible @@ -926,9 +1217,9 @@ static int do_xfer(const struct scmi_protocol_handle *ph, return ret; } - trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "CMND", - xfer->hdr.seq, xfer->hdr.status, - xfer->tx.buf, xfer->tx.len); + trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, + xfer->hdr.id, "CMND", xfer->hdr.seq, + xfer->hdr.status, xfer->tx.buf, xfer->tx.len); ret = scmi_wait_for_message_response(cinfo, xfer); if (!ret && xfer->hdr.status) @@ -952,8 +1243,6 @@ static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph, xfer->rx.len = info->desc->max_msg_size; } -#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC) - /** * do_xfer_with_response() - Do one transfer and wait until the delayed * response is received @@ -1041,13 +1330,22 @@ static int xfer_get_init(const struct scmi_protocol_handle *ph, tx_size > info->desc->max_msg_size) return -ERANGE; - xfer = scmi_xfer_get(pi->handle, minfo, true); + xfer = scmi_xfer_get(pi->handle, minfo); if (IS_ERR(xfer)) { ret = PTR_ERR(xfer); dev_err(dev, "failed to get free message slot(%d)\n", ret); return ret; } + /* Pick a sequence number and register this xfer as in-flight */ + ret = scmi_xfer_pending_set(xfer, minfo); + if (ret) { + dev_err(pi->handle->dev, + "Failed to get monotonic token %d\n", ret); + __scmi_xfer_put(minfo, xfer); + return ret; + } + xfer->tx.len = tx_size; xfer->rx.len = rx_size ? : info->desc->max_msg_size; xfer->hdr.type = MSG_TYPE_COMMAND; @@ -1820,20 +2118,14 @@ static bool scmi_is_transport_atomic(const struct scmi_handle *handle, bool ret; struct scmi_info *info = handle_to_scmi_info(handle); - ret = info->desc->atomic_enabled && is_transport_polling_capable(info); + ret = info->desc->atomic_enabled && + is_transport_polling_capable(info->desc); if (ret && atomic_threshold) *atomic_threshold = info->atomic_threshold; return ret; } -static inline -struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info) -{ - info->users++; - return &info->handle; -} - /** * scmi_handle_get() - Get the SCMI handle for a device * @@ -1845,7 +2137,7 @@ struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info) * * Return: pointer to handle if successful, NULL on error */ -struct scmi_handle *scmi_handle_get(struct device *dev) +static struct scmi_handle *scmi_handle_get(struct device *dev) { struct list_head *p; struct scmi_info *info; @@ -1855,7 +2147,8 @@ struct scmi_handle *scmi_handle_get(struct device *dev) list_for_each(p, &scmi_list) { info = list_entry(p, struct scmi_info, node); if (dev->parent == info->dev) { - handle = scmi_handle_get_from_info_unlocked(info); + info->users++; + handle = &info->handle; break; } } @@ -1876,7 +2169,7 @@ struct scmi_handle *scmi_handle_get(struct device *dev) * Return: 0 is successfully released * if null was passed, it returns -EINVAL; */ -int scmi_handle_put(const struct scmi_handle *handle) +static int scmi_handle_put(const struct scmi_handle *handle) { struct scmi_info *info; @@ -1892,6 +2185,23 @@ int scmi_handle_put(const struct scmi_handle *handle) return 0; } +static void scmi_device_link_add(struct device *consumer, + struct device *supplier) +{ + struct device_link *link; + + link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER); + + WARN_ON(!link); +} + +static void scmi_set_handle(struct scmi_device *scmi_dev) +{ + scmi_dev->handle = scmi_handle_get(&scmi_dev->dev); + if (scmi_dev->handle) + scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev); +} + static int __scmi_xfer_info_init(struct scmi_info *sinfo, struct scmi_xfers_info *info) { @@ -1985,23 +2295,20 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo) return ret; } -static int scmi_chan_setup(struct scmi_info *info, struct device *dev, +static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node, int prot_id, bool tx) { int ret, idx; + char name[32]; struct scmi_chan_info *cinfo; struct idr *idr; + struct scmi_device *tdev = NULL; /* Transmit channel is first entry i.e. index 0 */ idx = tx ? 0 : 1; idr = tx ? &info->tx_idr : &info->rx_idr; - /* check if already allocated, used for multiple device per protocol */ - cinfo = idr_find(idr, prot_id); - if (cinfo) - return 0; - - if (!info->desc->ops->chan_available(dev, idx)) { + if (!info->desc->ops->chan_available(of_node, idx)) { cinfo = idr_find(idr, SCMI_PROTOCOL_BASE); if (unlikely(!cinfo)) /* Possible only if platform has no Rx */ return -EINVAL; @@ -2012,27 +2319,52 @@ static int scmi_chan_setup(struct scmi_info *info, struct device *dev, if (!cinfo) return -ENOMEM; - cinfo->dev = dev; cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms; + /* Create a unique name for this transport device */ + snprintf(name, 32, "__scmi_transport_device_%s_%02X", + idx ? "rx" : "tx", prot_id); + /* Create a uniquely named, dedicated transport device for this chan */ + tdev = scmi_device_create(of_node, info->dev, prot_id, name); + if (!tdev) { + dev_err(info->dev, + "failed to create transport device (%s)\n", name); + devm_kfree(info->dev, cinfo); + return -EINVAL; + } + of_node_get(of_node); + + cinfo->id = prot_id; + cinfo->dev = &tdev->dev; ret = info->desc->ops->chan_setup(cinfo, info->dev, tx); - if (ret) + if (ret) { + of_node_put(of_node); + scmi_device_destroy(info->dev, prot_id, name); + devm_kfree(info->dev, cinfo); return ret; + } - if (tx && is_polling_required(cinfo, info)) { - if (is_transport_polling_capable(info)) - dev_info(dev, + if (tx && is_polling_required(cinfo, info->desc)) { + if (is_transport_polling_capable(info->desc)) + dev_info(&tdev->dev, "Enabled polling mode TX channel - prot_id:%d\n", prot_id); else - dev_warn(dev, + dev_warn(&tdev->dev, "Polling mode NOT supported by transport.\n"); } idr_alloc: ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL); if (ret != prot_id) { - dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret); + dev_err(info->dev, + "unable to allocate SCMI idr slot err %d\n", ret); + /* Destroy channel and device only if created by this call. */ + if (tdev) { + of_node_put(of_node); + scmi_device_destroy(info->dev, prot_id, name); + devm_kfree(info->dev, cinfo); + } return ret; } @@ -2041,13 +2373,14 @@ idr_alloc: } static inline int -scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id) +scmi_txrx_setup(struct scmi_info *info, struct device_node *of_node, + int prot_id) { - int ret = scmi_chan_setup(info, dev, prot_id, true); + int ret = scmi_chan_setup(info, of_node, prot_id, true); if (!ret) { /* Rx is optional, report only memory errors */ - ret = scmi_chan_setup(info, dev, prot_id, false); + ret = scmi_chan_setup(info, of_node, prot_id, false); if (ret && ret != -ENOMEM) ret = 0; } @@ -2056,306 +2389,264 @@ scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id) } /** - * scmi_get_protocol_device - Helper to get/create an SCMI device. - * - * @np: A device node representing a valid active protocols for the referred - * SCMI instance. - * @info: The referred SCMI instance for which we are getting/creating this - * device. - * @prot_id: The protocol ID. - * @name: The device name. - * - * Referring to the specific SCMI instance identified by @info, this helper - * takes care to return a properly initialized device matching the requested - * @proto_id and @name: if device was still not existent it is created as a - * child of the specified SCMI instance @info and its transport properly - * initialized as usual. - * - * Return: A properly initialized scmi device, NULL otherwise. + * scmi_channels_setup - Helper to initialize all required channels + * + * @info: The SCMI instance descriptor. + * + * Initialize all the channels found described in the DT against the underlying + * configured transport using custom defined dedicated devices instead of + * borrowing devices from the SCMI drivers; this way channels are initialized + * upfront during core SCMI stack probing and are no more coupled with SCMI + * devices used by SCMI drivers. + * + * Note that, even though a pair of TX/RX channels is associated to each + * protocol defined in the DT, a distinct freshly initialized channel is + * created only if the DT node for the protocol at hand describes a dedicated + * channel: in all the other cases the common BASE protocol channel is reused. + * + * Return: 0 on Success */ -static inline struct scmi_device * -scmi_get_protocol_device(struct device_node *np, struct scmi_info *info, - int prot_id, const char *name) +static int scmi_channels_setup(struct scmi_info *info) { - struct scmi_device *sdev; + int ret; + struct device_node *child, *top_np = info->dev->of_node; - /* Already created for this parent SCMI instance ? */ - sdev = scmi_child_dev_find(info->dev, prot_id, name); - if (sdev) - return sdev; + /* Initialize a common generic channel at first */ + ret = scmi_txrx_setup(info, top_np, SCMI_PROTOCOL_BASE); + if (ret) + return ret; - mutex_lock(&scmi_syspower_mtx); - if (prot_id == SCMI_PROTOCOL_SYSTEM && scmi_syspower_registered) { - dev_warn(info->dev, - "SCMI SystemPower protocol device must be unique !\n"); - mutex_unlock(&scmi_syspower_mtx); + for_each_available_child_of_node(top_np, child) { + u32 prot_id; - return NULL; + if (of_property_read_u32(child, "reg", &prot_id)) + continue; + + if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id)) + dev_err(info->dev, + "Out of range protocol %d\n", prot_id); + + ret = scmi_txrx_setup(info, child, prot_id); + if (ret) { + of_node_put(child); + return ret; + } } - pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id); + return 0; +} - sdev = scmi_device_create(np, info->dev, prot_id, name); - if (!sdev) { - dev_err(info->dev, "failed to create %d protocol device\n", - prot_id); - mutex_unlock(&scmi_syspower_mtx); +static int scmi_chan_destroy(int id, void *p, void *idr) +{ + struct scmi_chan_info *cinfo = p; - return NULL; + if (cinfo->dev) { + struct scmi_info *info = handle_to_scmi_info(cinfo->handle); + struct scmi_device *sdev = to_scmi_dev(cinfo->dev); + + of_node_put(cinfo->dev->of_node); + scmi_device_destroy(info->dev, id, sdev->name); + cinfo->dev = NULL; } - if (scmi_txrx_setup(info, &sdev->dev, prot_id)) { - dev_err(&sdev->dev, "failed to setup transport\n"); - scmi_device_destroy(sdev); - mutex_unlock(&scmi_syspower_mtx); + idr_remove(idr, id); - return NULL; - } + return 0; +} - if (prot_id == SCMI_PROTOCOL_SYSTEM) - scmi_syspower_registered = true; +static void scmi_cleanup_channels(struct scmi_info *info, struct idr *idr) +{ + /* At first free all channels at the transport layer ... */ + idr_for_each(idr, info->desc->ops->chan_free, idr); - mutex_unlock(&scmi_syspower_mtx); + /* ...then destroy all underlying devices */ + idr_for_each(idr, scmi_chan_destroy, idr); - return sdev; + idr_destroy(idr); } -static inline void -scmi_create_protocol_device(struct device_node *np, struct scmi_info *info, - int prot_id, const char *name) +static void scmi_cleanup_txrx_channels(struct scmi_info *info) { - struct scmi_device *sdev; - - sdev = scmi_get_protocol_device(np, info, prot_id, name); - if (!sdev) - return; + scmi_cleanup_channels(info, &info->tx_idr); - /* setup handle now as the transport is ready */ - scmi_set_handle(sdev); + scmi_cleanup_channels(info, &info->rx_idr); } -/** - * scmi_create_protocol_devices - Create devices for all pending requests for - * this SCMI instance. - * - * @np: The device node describing the protocol - * @info: The SCMI instance descriptor - * @prot_id: The protocol ID - * - * All devices previously requested for this instance (if any) are found and - * created by scanning the proper @&scmi_requested_devices entry. - */ -static void scmi_create_protocol_devices(struct device_node *np, - struct scmi_info *info, int prot_id) +static int scmi_bus_notifier(struct notifier_block *nb, + unsigned long action, void *data) { - struct list_head *phead; + struct scmi_info *info = bus_nb_to_scmi_info(nb); + struct scmi_device *sdev = to_scmi_dev(data); - mutex_lock(&scmi_requested_devices_mtx); - phead = idr_find(&scmi_requested_devices, prot_id); - if (phead) { - struct scmi_requested_dev *rdev; + /* Skip transport devices and devices of different SCMI instances */ + if (!strncmp(sdev->name, "__scmi_transport_device", 23) || + sdev->dev.parent != info->dev) + return NOTIFY_DONE; - list_for_each_entry(rdev, phead, node) - scmi_create_protocol_device(np, info, prot_id, - rdev->id_table->name); + switch (action) { + case BUS_NOTIFY_BIND_DRIVER: + /* setup handle now as the transport is ready */ + scmi_set_handle(sdev); + break; + case BUS_NOTIFY_UNBOUND_DRIVER: + scmi_handle_put(sdev->handle); + sdev->handle = NULL; + break; + default: + return NOTIFY_DONE; } - mutex_unlock(&scmi_requested_devices_mtx); + + dev_dbg(info->dev, "Device %s (%s) is now %s\n", dev_name(&sdev->dev), + sdev->name, action == BUS_NOTIFY_BIND_DRIVER ? + "about to be BOUND." : "UNBOUND."); + + return NOTIFY_OK; } -/** - * scmi_protocol_device_request - Helper to request a device - * - * @id_table: A protocol/name pair descriptor for the device to be created. - * - * This helper let an SCMI driver request specific devices identified by the - * @id_table to be created for each active SCMI instance. - * - * The requested device name MUST NOT be already existent for any protocol; - * at first the freshly requested @id_table is annotated in the IDR table - * @scmi_requested_devices, then a matching device is created for each already - * active SCMI instance. (if any) - * - * This way the requested device is created straight-away for all the already - * initialized(probed) SCMI instances (handles) and it remains also annotated - * as pending creation if the requesting SCMI driver was loaded before some - * SCMI instance and related transports were available: when such late instance - * is probed, its probe will take care to scan the list of pending requested - * devices and create those on its own (see @scmi_create_protocol_devices and - * its enclosing loop) - * - * Return: 0 on Success - */ -int scmi_protocol_device_request(const struct scmi_device_id *id_table) +static int scmi_device_request_notifier(struct notifier_block *nb, + unsigned long action, void *data) { - int ret = 0; - unsigned int id = 0; - struct list_head *head, *phead = NULL; - struct scmi_requested_dev *rdev; - struct scmi_info *info; + struct device_node *np; + struct scmi_device_id *id_table = data; + struct scmi_info *info = req_nb_to_scmi_info(nb); - pr_debug("Requesting SCMI device (%s) for protocol %x\n", - id_table->name, id_table->protocol_id); + np = idr_find(&info->active_protocols, id_table->protocol_id); + if (!np) + return NOTIFY_DONE; - /* - * Search for the matching protocol rdev list and then search - * of any existent equally named device...fails if any duplicate found. - */ - mutex_lock(&scmi_requested_devices_mtx); - idr_for_each_entry(&scmi_requested_devices, head, id) { - if (!phead) { - /* A list found registered in the IDR is never empty */ - rdev = list_first_entry(head, struct scmi_requested_dev, - node); - if (rdev->id_table->protocol_id == - id_table->protocol_id) - phead = head; - } - list_for_each_entry(rdev, head, node) { - if (!strcmp(rdev->id_table->name, id_table->name)) { - pr_err("Ignoring duplicate request [%d] %s\n", - rdev->id_table->protocol_id, - rdev->id_table->name); - ret = -EINVAL; - goto out; - } - } - } + dev_dbg(info->dev, "%sRequested device (%s) for protocol 0x%x\n", + action == SCMI_BUS_NOTIFY_DEVICE_REQUEST ? "" : "UN-", + id_table->name, id_table->protocol_id); - /* - * No duplicate found for requested id_table, so let's create a new - * requested device entry for this new valid request. - */ - rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); - if (!rdev) { - ret = -ENOMEM; - goto out; + switch (action) { + case SCMI_BUS_NOTIFY_DEVICE_REQUEST: + scmi_create_protocol_devices(np, info, id_table->protocol_id, + id_table->name); + break; + case SCMI_BUS_NOTIFY_DEVICE_UNREQUEST: + scmi_destroy_protocol_devices(info, id_table->protocol_id, + id_table->name); + break; + default: + return NOTIFY_DONE; } - rdev->id_table = id_table; - /* - * Append the new requested device table descriptor to the head of the - * related protocol list, eventually creating such head if not already - * there. - */ - if (!phead) { - phead = kzalloc(sizeof(*phead), GFP_KERNEL); - if (!phead) { - kfree(rdev); - ret = -ENOMEM; - goto out; - } - INIT_LIST_HEAD(phead); - - ret = idr_alloc(&scmi_requested_devices, (void *)phead, - id_table->protocol_id, - id_table->protocol_id + 1, GFP_KERNEL); - if (ret != id_table->protocol_id) { - pr_err("Failed to save SCMI device - ret:%d\n", ret); - kfree(rdev); - kfree(phead); - ret = -EINVAL; - goto out; - } - ret = 0; - } - list_add(&rdev->node, phead); + return NOTIFY_OK; +} - /* - * Now effectively create and initialize the requested device for every - * already initialized SCMI instance which has registered the requested - * protocol as a valid active one: i.e. defined in DT and supported by - * current platform FW. - */ - mutex_lock(&scmi_list_mutex); - list_for_each_entry(info, &scmi_list, node) { - struct device_node *child; - - child = idr_find(&info->active_protocols, - id_table->protocol_id); - if (child) { - struct scmi_device *sdev; - - sdev = scmi_get_protocol_device(child, info, - id_table->protocol_id, - id_table->name); - if (sdev) { - /* Set handle if not already set: device existed */ - if (!sdev->handle) - sdev->handle = - scmi_handle_get_from_info_unlocked(info); - /* Relink consumer and suppliers */ - if (sdev->handle) - scmi_device_link_add(&sdev->dev, - sdev->handle->dev); - } - } else { - dev_err(info->dev, - "Failed. SCMI protocol %d not active.\n", - id_table->protocol_id); - } - } - mutex_unlock(&scmi_list_mutex); +static void scmi_debugfs_common_cleanup(void *d) +{ + struct scmi_debug_info *dbg = d; -out: - mutex_unlock(&scmi_requested_devices_mtx); + if (!dbg) + return; - return ret; + debugfs_remove_recursive(dbg->top_dentry); + kfree(dbg->name); + kfree(dbg->type); } -/** - * scmi_protocol_device_unrequest - Helper to unrequest a device - * - * @id_table: A protocol/name pair descriptor for the device to be unrequested. - * - * An helper to let an SCMI driver release its request about devices; note that - * devices are created and initialized once the first SCMI driver request them - * but they destroyed only on SCMI core unloading/unbinding. - * - * The current SCMI transport layer uses such devices as internal references and - * as such they could be shared as same transport between multiple drivers so - * that cannot be safely destroyed till the whole SCMI stack is removed. - * (unless adding further burden of refcounting.) - */ -void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table) +static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info) { - struct list_head *phead; + char top_dir[16]; + struct dentry *trans, *top_dentry; + struct scmi_debug_info *dbg; + const char *c_ptr = NULL; - pr_debug("Unrequesting SCMI device (%s) for protocol %x\n", - id_table->name, id_table->protocol_id); + dbg = devm_kzalloc(info->dev, sizeof(*dbg), GFP_KERNEL); + if (!dbg) + return NULL; - mutex_lock(&scmi_requested_devices_mtx); - phead = idr_find(&scmi_requested_devices, id_table->protocol_id); - if (phead) { - struct scmi_requested_dev *victim, *tmp; + dbg->name = kstrdup(of_node_full_name(info->dev->of_node), GFP_KERNEL); + if (!dbg->name) { + devm_kfree(info->dev, dbg); + return NULL; + } - list_for_each_entry_safe(victim, tmp, phead, node) { - if (!strcmp(victim->id_table->name, id_table->name)) { - list_del(&victim->node); - kfree(victim); - break; - } - } + of_property_read_string(info->dev->of_node, "compatible", &c_ptr); + dbg->type = kstrdup(c_ptr, GFP_KERNEL); + if (!dbg->type) { + kfree(dbg->name); + devm_kfree(info->dev, dbg); + return NULL; + } - if (list_empty(phead)) { - idr_remove(&scmi_requested_devices, - id_table->protocol_id); - kfree(phead); - } + snprintf(top_dir, 16, "%d", info->id); + top_dentry = debugfs_create_dir(top_dir, scmi_top_dentry); + trans = debugfs_create_dir("transport", top_dentry); + + dbg->is_atomic = info->desc->atomic_enabled && + is_transport_polling_capable(info->desc); + + debugfs_create_str("instance_name", 0400, top_dentry, + (char **)&dbg->name); + + debugfs_create_u32("atomic_threshold_us", 0400, top_dentry, + &info->atomic_threshold); + + debugfs_create_str("type", 0400, trans, (char **)&dbg->type); + + debugfs_create_bool("is_atomic", 0400, trans, &dbg->is_atomic); + + debugfs_create_u32("max_rx_timeout_ms", 0400, trans, + (u32 *)&info->desc->max_rx_timeout_ms); + + debugfs_create_u32("max_msg_size", 0400, trans, + (u32 *)&info->desc->max_msg_size); + + debugfs_create_u32("tx_max_msg", 0400, trans, + (u32 *)&info->tx_minfo.max_msg); + + debugfs_create_u32("rx_max_msg", 0400, trans, + (u32 *)&info->rx_minfo.max_msg); + + dbg->top_dentry = top_dentry; + + if (devm_add_action_or_reset(info->dev, + scmi_debugfs_common_cleanup, dbg)) { + scmi_debugfs_common_cleanup(dbg); + return NULL; } - mutex_unlock(&scmi_requested_devices_mtx); + + return dbg; } -static int scmi_cleanup_txrx_channels(struct scmi_info *info) +static int scmi_debugfs_raw_mode_setup(struct scmi_info *info) { - int ret; - struct idr *idr = &info->tx_idr; + int id, num_chans = 0, ret = 0; + struct scmi_chan_info *cinfo; + u8 channels[SCMI_MAX_CHANNELS] = {}; + DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {}; + + if (!info->dbg) + return -EINVAL; + + /* Enumerate all channels to collect their ids */ + idr_for_each_entry(&info->tx_idr, cinfo, id) { + /* + * Cannot happen, but be defensive. + * Zero as num_chans is ok, warn and carry on. + */ + if (num_chans >= SCMI_MAX_CHANNELS || !cinfo) { + dev_warn(info->dev, + "SCMI RAW - Error enumerating channels\n"); + break; + } - ret = idr_for_each(idr, info->desc->ops->chan_free, idr); - idr_destroy(&info->tx_idr); + if (!test_bit(cinfo->id, protos)) { + channels[num_chans++] = cinfo->id; + set_bit(cinfo->id, protos); + } + } - idr = &info->rx_idr; - ret = idr_for_each(idr, info->desc->ops->chan_free, idr); - idr_destroy(&info->rx_idr); + info->raw = scmi_raw_mode_init(&info->handle, info->dbg->top_dentry, + info->id, channels, num_chans, + info->desc, info->tx_minfo.max_msg); + if (IS_ERR(info->raw)) { + dev_err(info->dev, "Failed to initialize SCMI RAW Mode !\n"); + ret = PTR_ERR(info->raw); + info->raw = NULL; + } return ret; } @@ -2377,12 +2668,19 @@ static int scmi_probe(struct platform_device *pdev) if (!info) return -ENOMEM; + info->id = ida_alloc_min(&scmi_id, 0, GFP_KERNEL); + if (info->id < 0) + return info->id; + info->dev = dev; info->desc = desc; + info->bus_nb.notifier_call = scmi_bus_notifier; + info->dev_req_nb.notifier_call = scmi_device_request_notifier; INIT_LIST_HEAD(&info->node); idr_init(&info->protocols); mutex_init(&info->protocols_mtx); idr_init(&info->active_protocols); + mutex_init(&info->devreq_mtx); platform_set_drvdata(pdev, info); idr_init(&info->tx_idr); @@ -2406,21 +2704,55 @@ static int scmi_probe(struct platform_device *pdev) if (desc->ops->link_supplier) { ret = desc->ops->link_supplier(dev); if (ret) - return ret; + goto clear_ida; } - ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE); + /* Setup all channels described in the DT at first */ + ret = scmi_channels_setup(info); if (ret) - return ret; + goto clear_ida; - ret = scmi_xfer_info_init(info); + ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb); if (ret) goto clear_txrx_setup; + ret = blocking_notifier_chain_register(&scmi_requested_devices_nh, + &info->dev_req_nb); + if (ret) + goto clear_bus_notifier; + + ret = scmi_xfer_info_init(info); + if (ret) + goto clear_dev_req_notifier; + + if (scmi_top_dentry) { + info->dbg = scmi_debugfs_common_setup(info); + if (!info->dbg) + dev_warn(dev, "Failed to setup SCMI debugfs.\n"); + + if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { + bool coex = + IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX); + + ret = scmi_debugfs_raw_mode_setup(info); + if (!coex) { + if (ret) + goto clear_dev_req_notifier; + + /* Bail out anyway when coex enabled */ + return ret; + } + + /* Coex enabled, carry on in any case. */ + dev_info(dev, "SCMI RAW Mode COEX enabled !\n"); + } + } + if (scmi_notification_init(handle)) dev_err(dev, "SCMI Notifications NOT available.\n"); - if (info->desc->atomic_enabled && !is_transport_polling_capable(info)) + if (info->desc->atomic_enabled && + !is_transport_polling_capable(info->desc)) dev_err(dev, "Transport is not polling capable. Atomic mode not supported.\n"); @@ -2467,29 +2799,36 @@ static int scmi_probe(struct platform_device *pdev) } of_node_get(child); - scmi_create_protocol_devices(child, info, prot_id); + scmi_create_protocol_devices(child, info, prot_id, NULL); } return 0; notification_exit: + if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) + scmi_raw_mode_cleanup(info->raw); scmi_notification_exit(&info->handle); +clear_dev_req_notifier: + blocking_notifier_chain_unregister(&scmi_requested_devices_nh, + &info->dev_req_nb); +clear_bus_notifier: + bus_unregister_notifier(&scmi_bus_type, &info->bus_nb); clear_txrx_setup: scmi_cleanup_txrx_channels(info); +clear_ida: + ida_free(&scmi_id, info->id); return ret; } -void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id) -{ - idr_remove(idr, id); -} - static int scmi_remove(struct platform_device *pdev) { - int ret, id; + int id; struct scmi_info *info = platform_get_drvdata(pdev); struct device_node *child; + if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) + scmi_raw_mode_cleanup(info->raw); + mutex_lock(&scmi_list_mutex); if (info->users) dev_warn(&pdev->dev, @@ -2507,10 +2846,14 @@ static int scmi_remove(struct platform_device *pdev) of_node_put(child); idr_destroy(&info->active_protocols); + blocking_notifier_chain_unregister(&scmi_requested_devices_nh, + &info->dev_req_nb); + bus_unregister_notifier(&scmi_bus_type, &info->bus_nb); + /* Safe to free channels since no more users */ - ret = scmi_cleanup_txrx_channels(info); - if (ret) - dev_warn(&pdev->dev, "Failed to cleanup SCMI channels.\n"); + scmi_cleanup_txrx_channels(info); + + ida_free(&scmi_id, info->id); return 0; } @@ -2639,6 +2982,19 @@ static void __exit scmi_transports_exit(void) __scmi_transports_setup(false); } +static struct dentry *scmi_debugfs_init(void) +{ + struct dentry *d; + + d = debugfs_create_dir("scmi", NULL); + if (IS_ERR(d)) { + pr_err("Could NOT create SCMI top dentry.\n"); + return NULL; + } + + return d; +} + static int __init scmi_driver_init(void) { int ret; @@ -2647,13 +3003,14 @@ static int __init scmi_driver_init(void) if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT))) return -EINVAL; - scmi_bus_init(); - /* Initialize any compiled-in transport which provided an init/exit */ ret = scmi_transports_init(); if (ret) return ret; + if (IS_ENABLED(CONFIG_ARM_SCMI_NEED_DEBUGFS)) + scmi_top_dentry = scmi_debugfs_init(); + scmi_base_register(); scmi_clock_register(); @@ -2667,7 +3024,7 @@ static int __init scmi_driver_init(void) return platform_driver_register(&scmi_driver); } -subsys_initcall(scmi_driver_init); +module_init(scmi_driver_init); static void __exit scmi_driver_exit(void) { @@ -2682,11 +3039,11 @@ static void __exit scmi_driver_exit(void) scmi_system_unregister(); scmi_powercap_unregister(); - scmi_bus_exit(); - scmi_transports_exit(); platform_driver_unregister(&scmi_driver); + + debugfs_remove_recursive(scmi_top_dentry); } module_exit(scmi_driver_exit); diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c index 1e40cb035044..0d9c9538b7f4 100644 --- a/drivers/firmware/arm_scmi/mailbox.c +++ b/drivers/firmware/arm_scmi/mailbox.c @@ -46,9 +46,9 @@ static void rx_callback(struct mbox_client *cl, void *m) scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL); } -static bool mailbox_chan_available(struct device *dev, int idx) +static bool mailbox_chan_available(struct device_node *of_node, int idx) { - return !of_parse_phandle_with_args(dev->of_node, "mboxes", + return !of_parse_phandle_with_args(of_node, "mboxes", "#mbox-cells", idx, NULL); } @@ -120,8 +120,6 @@ static int mailbox_chan_free(int id, void *p, void *data) smbox->cinfo = NULL; } - scmi_free_channel(cinfo, data, id); - return 0; } diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c index 2a7aeab40e54..929720387102 100644 --- a/drivers/firmware/arm_scmi/optee.c +++ b/drivers/firmware/arm_scmi/optee.c @@ -328,11 +328,11 @@ static int scmi_optee_link_supplier(struct device *dev) return 0; } -static bool scmi_optee_chan_available(struct device *dev, int idx) +static bool scmi_optee_chan_available(struct device_node *of_node, int idx) { u32 channel_id; - return !of_property_read_u32_index(dev->of_node, "linaro,optee-channel-id", + return !of_property_read_u32_index(of_node, "linaro,optee-channel-id", idx, &channel_id); } @@ -481,8 +481,6 @@ static int scmi_optee_chan_free(int id, void *p, void *data) cinfo->transport_info = NULL; channel->cinfo = NULL; - scmi_free_channel(cinfo, data, id); - return 0; } diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h index 2f3bf691db7c..78e1a01eb656 100644 --- a/drivers/firmware/arm_scmi/protocols.h +++ b/drivers/firmware/arm_scmi/protocols.h @@ -115,6 +115,7 @@ struct scmi_msg_hdr { * - SCMI_XFER_SENT_OK -> SCMI_XFER_RESP_OK [ -> SCMI_XFER_DRESP_OK ] * - SCMI_XFER_SENT_OK -> SCMI_XFER_DRESP_OK * (Missing synchronous response is assumed OK and ignored) + * @flags: Optional flags associated to this xfer. * @lock: A spinlock to protect state and busy fields. * @priv: A pointer for transport private usage. */ @@ -135,6 +136,12 @@ struct scmi_xfer { #define SCMI_XFER_RESP_OK 1 #define SCMI_XFER_DRESP_OK 2 int state; +#define SCMI_XFER_FLAG_IS_RAW BIT(0) +#define SCMI_XFER_IS_RAW(x) ((x)->flags & SCMI_XFER_FLAG_IS_RAW) +#define SCMI_XFER_FLAG_CHAN_SET BIT(1) +#define SCMI_XFER_IS_CHAN_SET(x) \ + ((x)->flags & SCMI_XFER_FLAG_CHAN_SET) + int flags; /* A lock to protect state and busy fields */ spinlock_t lock; void *priv; diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c new file mode 100644 index 000000000000..d40df099fd51 --- /dev/null +++ b/drivers/firmware/arm_scmi/raw_mode.c @@ -0,0 +1,1443 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Raw mode support + * + * Copyright (C) 2022 ARM Ltd. + */ +/** + * DOC: Theory of operation + * + * When enabled the SCMI Raw mode support exposes a userspace API which allows + * to send and receive SCMI commands, replies and notifications from a user + * application through injection and snooping of bare SCMI messages in binary + * little-endian format. + * + * Such injected SCMI transactions will then be routed through the SCMI core + * stack towards the SCMI backend server using whatever SCMI transport is + * currently configured on the system under test. + * + * It is meant to help in running any sort of SCMI backend server testing, no + * matter where the server is placed, as long as it is normally reachable via + * the transport configured on the system. + * + * It is activated by a Kernel configuration option since it is NOT meant to + * be used in production but only during development and in CI deployments. + * + * In order to avoid possible interferences between the SCMI Raw transactions + * originated from a test-suite and the normal operations of the SCMI drivers, + * when Raw mode is enabled, by default, all the regular SCMI drivers are + * inhibited, unless CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX is enabled: in this + * latter case the regular SCMI stack drivers will be loaded as usual and it is + * up to the user of this interface to take care of manually inhibiting the + * regular SCMI drivers in order to avoid interferences during the test runs. + * + * The exposed API is as follows. + * + * All SCMI Raw entries are rooted under a common top /raw debugfs top directory + * which in turn is rooted under the corresponding underlying SCMI instance. + * + * /sys/kernel/debug/scmi/ + * `-- 0 + * |-- atomic_threshold_us + * |-- instance_name + * |-- raw + * | |-- channels + * | | |-- 0x10 + * | | | |-- message + * | | | `-- message_async + * | | `-- 0x13 + * | | |-- message + * | | `-- message_async + * | |-- errors + * | |-- message + * | |-- message_async + * | |-- notification + * | `-- reset + * `-- transport + * |-- is_atomic + * |-- max_msg_size + * |-- max_rx_timeout_ms + * |-- rx_max_msg + * |-- tx_max_msg + * `-- type + * + * where: + * + * - errors: used to read back timed-out and unexpected replies + * - message*: used to send sync/async commands and read back immediate and + * delayed reponses (if any) + * - notification: used to read any notification being emitted by the system + * (if previously enabled by the user app) + * - reset: used to flush the queues of messages (of any kind) still pending + * to be read; this is useful at test-suite start/stop to get + * rid of any unread messages from the previous run. + * + * with the per-channel entries rooted at /channels being present only on a + * system where multiple transport channels have been configured. + * + * Such per-channel entries can be used to explicitly choose a specific channel + * for SCMI bare message injection, in contrast with the general entries above + * where, instead, the selection of the proper channel to use is automatically + * performed based the protocol embedded in the injected message and on how the + * transport is configured on the system. + * + * Note that other common general entries are available under transport/ to let + * the user applications properly make up their expectations in terms of + * timeouts and message characteristics. + * + * Each write to the message* entries causes one command request to be built + * and sent while the replies or delayed response are read back from those same + * entries one message at time (receiving an EOF at each message boundary). + * + * The user application running the test is in charge of handling timeouts + * on replies and properly choosing SCMI sequence numbers for the outgoing + * requests (using the same sequence number is supported but discouraged). + * + * Injection of multiple in-flight requests is supported as long as the user + * application uses properly distinct sequence numbers for concurrent requests + * and takes care to properly manage all the related issues about concurrency + * and command/reply pairing. Keep in mind that, anyway, the real level of + * parallelism attainable in such scenario is dependent on the characteristics + * of the underlying transport being used. + * + * Since the SCMI core regular stack is partially used to deliver and collect + * the messages, late replies arrived after timeouts and any other sort of + * unexpected message can be identified by the SCMI core as usual and they will + * be reported as messages under "errors" for later analysis. + */ + +#include <linux/bitmap.h> +#include <linux/debugfs.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/export.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/poll.h> +#include <linux/of.h> +#include <linux/slab.h> +#include <linux/xarray.h> + +#include "common.h" + +#include "raw_mode.h" + +#include <trace/events/scmi.h> + +#define SCMI_XFER_RAW_MAX_RETRIES 10 + +/** + * struct scmi_raw_queue - Generic Raw queue descriptor + * + * @free_bufs: A freelists listhead used to keep unused raw buffers + * @free_bufs_lock: Spinlock used to protect access to @free_bufs + * @msg_q: A listhead to a queue of snooped messages waiting to be read out + * @msg_q_lock: Spinlock used to protect access to @msg_q + * @wq: A waitqueue used to wait and poll on related @msg_q + */ +struct scmi_raw_queue { + struct list_head free_bufs; + /* Protect free_bufs[] lists */ + spinlock_t free_bufs_lock; + struct list_head msg_q; + /* Protect msg_q[] lists */ + spinlock_t msg_q_lock; + wait_queue_head_t wq; +}; + +/** + * struct scmi_raw_mode_info - Structure holding SCMI Raw instance data + * + * @id: Sequential Raw instance ID. + * @handle: Pointer to SCMI entity handle to use + * @desc: Pointer to the transport descriptor to use + * @tx_max_msg: Maximum number of concurrent TX in-flight messages + * @q: An array of Raw queue descriptors + * @chans_q: An XArray mapping optional additional per-channel queues + * @free_waiters: Head of freelist for unused waiters + * @free_mtx: A mutex to protect the waiters freelist + * @active_waiters: Head of list for currently active and used waiters + * @active_mtx: A mutex to protect the active waiters list + * @waiters_work: A work descriptor to be used with the workqueue machinery + * @wait_wq: A workqueue reference to the created workqueue + * @dentry: Top debugfs root dentry for SCMI Raw + * @gid: A group ID used for devres accounting + * + * Note that this descriptor is passed back to the core after SCMI Raw is + * initialized as an opaque handle to use by subsequent SCMI Raw call hooks. + * + */ +struct scmi_raw_mode_info { + unsigned int id; + const struct scmi_handle *handle; + const struct scmi_desc *desc; + int tx_max_msg; + struct scmi_raw_queue *q[SCMI_RAW_MAX_QUEUE]; + struct xarray chans_q; + struct list_head free_waiters; + /* Protect free_waiters list */ + struct mutex free_mtx; + struct list_head active_waiters; + /* Protect active_waiters list */ + struct mutex active_mtx; + struct work_struct waiters_work; + struct workqueue_struct *wait_wq; + struct dentry *dentry; + void *gid; +}; + +/** + * struct scmi_xfer_raw_waiter - Structure to describe an xfer to be waited for + * + * @start_jiffies: The timestamp in jiffies of when this structure was queued. + * @cinfo: A reference to the channel to use for this transaction + * @xfer: A reference to the xfer to be waited for + * @async_response: A completion to be, optionally, used for async waits: it + * will be setup by @scmi_do_xfer_raw_start, if needed, to be + * pointed at by xfer->async_done. + * @node: A list node. + */ +struct scmi_xfer_raw_waiter { + unsigned long start_jiffies; + struct scmi_chan_info *cinfo; + struct scmi_xfer *xfer; + struct completion async_response; + struct list_head node; +}; + +/** + * struct scmi_raw_buffer - Structure to hold a full SCMI message + * + * @max_len: The maximum allowed message size (header included) that can be + * stored into @msg + * @msg: A message buffer used to collect a full message grabbed from an xfer. + * @node: A list node. + */ +struct scmi_raw_buffer { + size_t max_len; + struct scmi_msg msg; + struct list_head node; +}; + +/** + * struct scmi_dbg_raw_data - Structure holding data needed by the debugfs + * layer + * + * @chan_id: The preferred channel to use: if zero the channel is automatically + * selected based on protocol. + * @raw: A reference to the Raw instance. + * @tx: A message buffer used to collect TX message on write. + * @tx_size: The effective size of the TX message. + * @tx_req_size: The final expected size of the complete TX message. + * @rx: A message buffer to collect RX message on read. + * @rx_size: The effective size of the RX message. + */ +struct scmi_dbg_raw_data { + u8 chan_id; + struct scmi_raw_mode_info *raw; + struct scmi_msg tx; + size_t tx_size; + size_t tx_req_size; + struct scmi_msg rx; + size_t rx_size; +}; + +static struct scmi_raw_queue * +scmi_raw_queue_select(struct scmi_raw_mode_info *raw, unsigned int idx, + unsigned int chan_id) +{ + if (!chan_id) + return raw->q[idx]; + + return xa_load(&raw->chans_q, chan_id); +} + +static struct scmi_raw_buffer *scmi_raw_buffer_get(struct scmi_raw_queue *q) +{ + unsigned long flags; + struct scmi_raw_buffer *rb = NULL; + struct list_head *head = &q->free_bufs; + + spin_lock_irqsave(&q->free_bufs_lock, flags); + if (!list_empty(head)) { + rb = list_first_entry(head, struct scmi_raw_buffer, node); + list_del_init(&rb->node); + } + spin_unlock_irqrestore(&q->free_bufs_lock, flags); + + return rb; +} + +static void scmi_raw_buffer_put(struct scmi_raw_queue *q, + struct scmi_raw_buffer *rb) +{ + unsigned long flags; + + /* Reset to full buffer length */ + rb->msg.len = rb->max_len; + + spin_lock_irqsave(&q->free_bufs_lock, flags); + list_add_tail(&rb->node, &q->free_bufs); + spin_unlock_irqrestore(&q->free_bufs_lock, flags); +} + +static void scmi_raw_buffer_enqueue(struct scmi_raw_queue *q, + struct scmi_raw_buffer *rb) +{ + unsigned long flags; + + spin_lock_irqsave(&q->msg_q_lock, flags); + list_add_tail(&rb->node, &q->msg_q); + spin_unlock_irqrestore(&q->msg_q_lock, flags); + + wake_up_interruptible(&q->wq); +} + +static struct scmi_raw_buffer* +scmi_raw_buffer_dequeue_unlocked(struct scmi_raw_queue *q) +{ + struct scmi_raw_buffer *rb = NULL; + + if (!list_empty(&q->msg_q)) { + rb = list_first_entry(&q->msg_q, struct scmi_raw_buffer, node); + list_del_init(&rb->node); + } + + return rb; +} + +static struct scmi_raw_buffer *scmi_raw_buffer_dequeue(struct scmi_raw_queue *q) +{ + unsigned long flags; + struct scmi_raw_buffer *rb; + + spin_lock_irqsave(&q->msg_q_lock, flags); + rb = scmi_raw_buffer_dequeue_unlocked(q); + spin_unlock_irqrestore(&q->msg_q_lock, flags); + + return rb; +} + +static void scmi_raw_buffer_queue_flush(struct scmi_raw_queue *q) +{ + struct scmi_raw_buffer *rb; + + do { + rb = scmi_raw_buffer_dequeue(q); + if (rb) + scmi_raw_buffer_put(q, rb); + } while (rb); +} + +static struct scmi_xfer_raw_waiter * +scmi_xfer_raw_waiter_get(struct scmi_raw_mode_info *raw, struct scmi_xfer *xfer, + struct scmi_chan_info *cinfo, bool async) +{ + struct scmi_xfer_raw_waiter *rw = NULL; + + mutex_lock(&raw->free_mtx); + if (!list_empty(&raw->free_waiters)) { + rw = list_first_entry(&raw->free_waiters, + struct scmi_xfer_raw_waiter, node); + list_del_init(&rw->node); + + if (async) { + reinit_completion(&rw->async_response); + xfer->async_done = &rw->async_response; + } + + rw->cinfo = cinfo; + rw->xfer = xfer; + } + mutex_unlock(&raw->free_mtx); + + return rw; +} + +static void scmi_xfer_raw_waiter_put(struct scmi_raw_mode_info *raw, + struct scmi_xfer_raw_waiter *rw) +{ + if (rw->xfer) { + rw->xfer->async_done = NULL; + rw->xfer = NULL; + } + + mutex_lock(&raw->free_mtx); + list_add_tail(&rw->node, &raw->free_waiters); + mutex_unlock(&raw->free_mtx); +} + +static void scmi_xfer_raw_waiter_enqueue(struct scmi_raw_mode_info *raw, + struct scmi_xfer_raw_waiter *rw) +{ + /* A timestamp for the deferred worker to know how much this has aged */ + rw->start_jiffies = jiffies; + + trace_scmi_xfer_response_wait(rw->xfer->transfer_id, rw->xfer->hdr.id, + rw->xfer->hdr.protocol_id, + rw->xfer->hdr.seq, + raw->desc->max_rx_timeout_ms, + rw->xfer->hdr.poll_completion); + + mutex_lock(&raw->active_mtx); + list_add_tail(&rw->node, &raw->active_waiters); + mutex_unlock(&raw->active_mtx); + + /* kick waiter work */ + queue_work(raw->wait_wq, &raw->waiters_work); +} + +static struct scmi_xfer_raw_waiter * +scmi_xfer_raw_waiter_dequeue(struct scmi_raw_mode_info *raw) +{ + struct scmi_xfer_raw_waiter *rw = NULL; + + mutex_lock(&raw->active_mtx); + if (!list_empty(&raw->active_waiters)) { + rw = list_first_entry(&raw->active_waiters, + struct scmi_xfer_raw_waiter, node); + list_del_init(&rw->node); + } + mutex_unlock(&raw->active_mtx); + + return rw; +} + +/** + * scmi_xfer_raw_worker - Work function to wait for Raw xfers completions + * + * @work: A reference to the work. + * + * In SCMI Raw mode, once a user-provided injected SCMI message is sent, we + * cannot wait to receive its response (if any) in the context of the injection + * routines so as not to leave the userspace write syscall, which delivered the + * SCMI message to send, pending till eventually a reply is received. + * Userspace should and will poll/wait instead on the read syscalls which will + * be in charge of reading a received reply (if any). + * + * Even though reply messages are collected and reported into the SCMI Raw layer + * on the RX path, nonetheless we have to properly wait for their completion as + * usual (and async_completion too if needed) in order to properly release the + * xfer structure at the end: to do this out of the context of the write/send + * these waiting jobs are delegated to this deferred worker. + * + * Any sent xfer, to be waited for, is timestamped and queued for later + * consumption by this worker: queue aging is accounted for while choosing a + * timeout for the completion, BUT we do not really care here if we end up + * accidentally waiting for a bit too long. + */ +static void scmi_xfer_raw_worker(struct work_struct *work) +{ + struct scmi_raw_mode_info *raw; + struct device *dev; + unsigned long max_tmo; + + raw = container_of(work, struct scmi_raw_mode_info, waiters_work); + dev = raw->handle->dev; + max_tmo = msecs_to_jiffies(raw->desc->max_rx_timeout_ms); + + do { + int ret = 0; + unsigned int timeout_ms; + unsigned long aging; + struct scmi_xfer *xfer; + struct scmi_xfer_raw_waiter *rw; + struct scmi_chan_info *cinfo; + + rw = scmi_xfer_raw_waiter_dequeue(raw); + if (!rw) + return; + + cinfo = rw->cinfo; + xfer = rw->xfer; + /* + * Waiters are queued by wait-deadline at the end, so some of + * them could have been already expired when processed, BUT we + * have to check the completion status anyway just in case a + * virtually expired (aged) transaction was indeed completed + * fine and we'll have to wait for the asynchronous part (if + * any): for this reason a 1 ms timeout is used for already + * expired/aged xfers. + */ + aging = jiffies - rw->start_jiffies; + timeout_ms = max_tmo > aging ? + jiffies_to_msecs(max_tmo - aging) : 1; + + ret = scmi_xfer_raw_wait_for_message_response(cinfo, xfer, + timeout_ms); + if (!ret && xfer->hdr.status) + ret = scmi_to_linux_errno(xfer->hdr.status); + + if (raw->desc->ops->mark_txdone) + raw->desc->ops->mark_txdone(rw->cinfo, ret, xfer); + + trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id, + xfer->hdr.protocol_id, xfer->hdr.seq, ret); + + /* Wait also for an async delayed response if needed */ + if (!ret && xfer->async_done) { + unsigned long tmo = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT); + + if (!wait_for_completion_timeout(xfer->async_done, tmo)) + dev_err(dev, + "timed out in RAW delayed resp - HDR:%08X\n", + pack_scmi_header(&xfer->hdr)); + } + + /* Release waiter and xfer */ + scmi_xfer_raw_put(raw->handle, xfer); + scmi_xfer_raw_waiter_put(raw, rw); + } while (1); +} + +static void scmi_xfer_raw_reset(struct scmi_raw_mode_info *raw) +{ + int i; + + dev_info(raw->handle->dev, "Resetting SCMI Raw stack.\n"); + + for (i = 0; i < SCMI_RAW_MAX_QUEUE; i++) + scmi_raw_buffer_queue_flush(raw->q[i]); +} + +/** + * scmi_xfer_raw_get_init - An helper to build a valid xfer from the provided + * bare SCMI message. + * + * @raw: A reference to the Raw instance. + * @buf: A buffer containing the whole SCMI message to send (including the + * header) in little-endian binary formmat. + * @len: Length of the message in @buf. + * @p: A pointer to return the initialized Raw xfer. + * + * After an xfer is picked from the TX pool and filled in with the message + * content, the xfer is registered as pending with the core in the usual way + * using the original sequence number provided by the user with the message. + * + * Note that, in case the testing user application is NOT using distinct + * sequence-numbers between successive SCMI messages such registration could + * fail temporarily if the previous message, using the same sequence number, + * had still not released; in such a case we just wait and retry. + * + * Return: 0 on Success + */ +static int scmi_xfer_raw_get_init(struct scmi_raw_mode_info *raw, void *buf, + size_t len, struct scmi_xfer **p) +{ + u32 msg_hdr; + size_t tx_size; + struct scmi_xfer *xfer; + int ret, retry = SCMI_XFER_RAW_MAX_RETRIES; + struct device *dev = raw->handle->dev; + + if (!buf || len < sizeof(u32)) + return -EINVAL; + + tx_size = len - sizeof(u32); + /* Ensure we have sane transfer sizes */ + if (tx_size > raw->desc->max_msg_size) + return -ERANGE; + + xfer = scmi_xfer_raw_get(raw->handle); + if (IS_ERR(xfer)) { + dev_warn(dev, "RAW - Cannot get a free RAW xfer !\n"); + return PTR_ERR(xfer); + } + + /* Build xfer from the provided SCMI bare LE message */ + msg_hdr = le32_to_cpu(*((__le32 *)buf)); + unpack_scmi_header(msg_hdr, &xfer->hdr); + xfer->hdr.seq = (u16)MSG_XTRACT_TOKEN(msg_hdr); + /* Polling not supported */ + xfer->hdr.poll_completion = false; + xfer->hdr.status = SCMI_SUCCESS; + xfer->tx.len = tx_size; + xfer->rx.len = raw->desc->max_msg_size; + /* Clear the whole TX buffer */ + memset(xfer->tx.buf, 0x00, raw->desc->max_msg_size); + if (xfer->tx.len) + memcpy(xfer->tx.buf, (u8 *)buf + sizeof(msg_hdr), xfer->tx.len); + *p = xfer; + + /* + * In flight registration can temporarily fail in case of Raw messages + * if the user injects messages without using monotonically increasing + * sequence numbers since, in Raw mode, the xfer (and the token) is + * finally released later by a deferred worker. Just retry for a while. + */ + do { + ret = scmi_xfer_raw_inflight_register(raw->handle, xfer); + if (ret) { + dev_dbg(dev, + "...retrying[%d] inflight registration\n", + retry); + msleep(raw->desc->max_rx_timeout_ms / + SCMI_XFER_RAW_MAX_RETRIES); + } + } while (ret && --retry); + + if (ret) { + dev_warn(dev, + "RAW - Could NOT register xfer %d in-flight HDR:0x%08X\n", + xfer->hdr.seq, msg_hdr); + scmi_xfer_raw_put(raw->handle, xfer); + } + + return ret; +} + +/** + * scmi_do_xfer_raw_start - An helper to send a valid raw xfer + * + * @raw: A reference to the Raw instance. + * @xfer: The xfer to send + * @chan_id: The channel ID to use, if zero the channels is automatically + * selected based on the protocol used. + * @async: A flag stating if an asynchronous command is required. + * + * This function send a previously built raw xfer using an appropriate channel + * and queues the related waiting work. + * + * Note that we need to know explicitly if the required command is meant to be + * asynchronous in kind since we have to properly setup the waiter. + * (and deducing this from the payload is weak and do not scale given there is + * NOT a common header-flag stating if the command is asynchronous or not) + * + * Return: 0 on Success + */ +static int scmi_do_xfer_raw_start(struct scmi_raw_mode_info *raw, + struct scmi_xfer *xfer, u8 chan_id, + bool async) +{ + int ret; + struct scmi_chan_info *cinfo; + struct scmi_xfer_raw_waiter *rw; + struct device *dev = raw->handle->dev; + + if (!chan_id) + chan_id = xfer->hdr.protocol_id; + else + xfer->flags |= SCMI_XFER_FLAG_CHAN_SET; + + cinfo = scmi_xfer_raw_channel_get(raw->handle, chan_id); + if (IS_ERR(cinfo)) + return PTR_ERR(cinfo); + + rw = scmi_xfer_raw_waiter_get(raw, xfer, cinfo, async); + if (!rw) { + dev_warn(dev, "RAW - Cannot get a free waiter !\n"); + return -ENOMEM; + } + + /* True ONLY if also supported by transport. */ + if (is_polling_enabled(cinfo, raw->desc)) + xfer->hdr.poll_completion = true; + + reinit_completion(&xfer->done); + /* Make sure xfer state update is visible before sending */ + smp_store_mb(xfer->state, SCMI_XFER_SENT_OK); + + trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id, + xfer->hdr.protocol_id, xfer->hdr.seq, + xfer->hdr.poll_completion); + + ret = raw->desc->ops->send_message(rw->cinfo, xfer); + if (ret) { + dev_err(dev, "Failed to send RAW message %d\n", ret); + scmi_xfer_raw_waiter_put(raw, rw); + return ret; + } + + trace_scmi_msg_dump(raw->id, cinfo->id, xfer->hdr.protocol_id, + xfer->hdr.id, "cmnd", xfer->hdr.seq, + xfer->hdr.status, + xfer->tx.buf, xfer->tx.len); + + scmi_xfer_raw_waiter_enqueue(raw, rw); + + return ret; +} + +/** + * scmi_raw_message_send - An helper to build and send an SCMI command using + * the provided SCMI bare message buffer + * + * @raw: A reference to the Raw instance. + * @buf: A buffer containing the whole SCMI message to send (including the + * header) in little-endian binary format. + * @len: Length of the message in @buf. + * @chan_id: The channel ID to use. + * @async: A flag stating if an asynchronous command is required. + * + * Return: 0 on Success + */ +static int scmi_raw_message_send(struct scmi_raw_mode_info *raw, + void *buf, size_t len, u8 chan_id, bool async) +{ + int ret; + struct scmi_xfer *xfer; + + ret = scmi_xfer_raw_get_init(raw, buf, len, &xfer); + if (ret) + return ret; + + ret = scmi_do_xfer_raw_start(raw, xfer, chan_id, async); + if (ret) + scmi_xfer_raw_put(raw->handle, xfer); + + return ret; +} + +static struct scmi_raw_buffer * +scmi_raw_message_dequeue(struct scmi_raw_queue *q, bool o_nonblock) +{ + unsigned long flags; + struct scmi_raw_buffer *rb; + + spin_lock_irqsave(&q->msg_q_lock, flags); + while (list_empty(&q->msg_q)) { + spin_unlock_irqrestore(&q->msg_q_lock, flags); + + if (o_nonblock) + return ERR_PTR(-EAGAIN); + + if (wait_event_interruptible(q->wq, !list_empty(&q->msg_q))) + return ERR_PTR(-ERESTARTSYS); + + spin_lock_irqsave(&q->msg_q_lock, flags); + } + + rb = scmi_raw_buffer_dequeue_unlocked(q); + + spin_unlock_irqrestore(&q->msg_q_lock, flags); + + return rb; +} + +/** + * scmi_raw_message_receive - An helper to dequeue and report the next + * available enqueued raw message payload that has been collected. + * + * @raw: A reference to the Raw instance. + * @buf: A buffer to get hold of the whole SCMI message received and represented + * in little-endian binary format. + * @len: Length of @buf. + * @size: The effective size of the message copied into @buf + * @idx: The index of the queue to pick the next queued message from. + * @chan_id: The channel ID to use. + * @o_nonblock: A flag to request a non-blocking message dequeue. + * + * Return: 0 on Success + */ +static int scmi_raw_message_receive(struct scmi_raw_mode_info *raw, + void *buf, size_t len, size_t *size, + unsigned int idx, unsigned int chan_id, + bool o_nonblock) +{ + int ret = 0; + struct scmi_raw_buffer *rb; + struct scmi_raw_queue *q; + + q = scmi_raw_queue_select(raw, idx, chan_id); + if (!q) + return -ENODEV; + + rb = scmi_raw_message_dequeue(q, o_nonblock); + if (IS_ERR(rb)) { + dev_dbg(raw->handle->dev, "RAW - No message available!\n"); + return PTR_ERR(rb); + } + + if (rb->msg.len <= len) { + memcpy(buf, rb->msg.buf, rb->msg.len); + *size = rb->msg.len; + } else { + ret = -ENOSPC; + } + + scmi_raw_buffer_put(q, rb); + + return ret; +} + +/* SCMI Raw debugfs helpers */ + +static ssize_t scmi_dbg_raw_mode_common_read(struct file *filp, + char __user *buf, + size_t count, loff_t *ppos, + unsigned int idx) +{ + ssize_t cnt; + struct scmi_dbg_raw_data *rd = filp->private_data; + + if (!rd->rx_size) { + int ret; + + ret = scmi_raw_message_receive(rd->raw, rd->rx.buf, rd->rx.len, + &rd->rx_size, idx, rd->chan_id, + filp->f_flags & O_NONBLOCK); + if (ret) { + rd->rx_size = 0; + return ret; + } + + /* Reset any previous filepos change, including writes */ + *ppos = 0; + } else if (*ppos == rd->rx_size) { + /* Return EOF once all the message has been read-out */ + rd->rx_size = 0; + return 0; + } + + cnt = simple_read_from_buffer(buf, count, ppos, + rd->rx.buf, rd->rx_size); + + return cnt; +} + +static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos, + bool async) +{ + int ret; + struct scmi_dbg_raw_data *rd = filp->private_data; + + if (count > rd->tx.len - rd->tx_size) + return -ENOSPC; + + /* On first write attempt @count carries the total full message size. */ + if (!rd->tx_size) + rd->tx_req_size = count; + + /* + * Gather a full message, possibly across multiple interrupted wrrtes, + * before sending it with a single RAW xfer. + */ + if (rd->tx_size < rd->tx_req_size) { + size_t cnt; + + cnt = simple_write_to_buffer(rd->tx.buf, rd->tx.len, ppos, + buf, count); + rd->tx_size += cnt; + if (cnt < count) + return cnt; + } + + ret = scmi_raw_message_send(rd->raw, rd->tx.buf, rd->tx_size, + rd->chan_id, async); + + /* Reset ppos for next message ... */ + rd->tx_size = 0; + *ppos = 0; + + return ret ?: count; +} + +static __poll_t scmi_test_dbg_raw_common_poll(struct file *filp, + struct poll_table_struct *wait, + unsigned int idx) +{ + unsigned long flags; + struct scmi_dbg_raw_data *rd = filp->private_data; + struct scmi_raw_queue *q; + __poll_t mask = 0; + + q = scmi_raw_queue_select(rd->raw, idx, rd->chan_id); + if (!q) + return mask; + + poll_wait(filp, &q->wq, wait); + + spin_lock_irqsave(&q->msg_q_lock, flags); + if (!list_empty(&q->msg_q)) + mask = EPOLLIN | EPOLLRDNORM; + spin_unlock_irqrestore(&q->msg_q_lock, flags); + + return mask; +} + +static ssize_t scmi_dbg_raw_mode_message_read(struct file *filp, + char __user *buf, + size_t count, loff_t *ppos) +{ + return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos, + SCMI_RAW_REPLY_QUEUE); +} + +static ssize_t scmi_dbg_raw_mode_message_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, false); +} + +static __poll_t scmi_dbg_raw_mode_message_poll(struct file *filp, + struct poll_table_struct *wait) +{ + return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_REPLY_QUEUE); +} + +static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp) +{ + u8 id; + struct scmi_raw_mode_info *raw; + struct scmi_dbg_raw_data *rd; + const char *id_str = filp->f_path.dentry->d_parent->d_name.name; + + if (!inode->i_private) + return -ENODEV; + + raw = inode->i_private; + rd = kzalloc(sizeof(*rd), GFP_KERNEL); + if (!rd) + return -ENOMEM; + + rd->rx.len = raw->desc->max_msg_size + sizeof(u32); + rd->rx.buf = kzalloc(rd->rx.len, GFP_KERNEL); + if (!rd->rx.buf) { + kfree(rd); + return -ENOMEM; + } + + rd->tx.len = raw->desc->max_msg_size + sizeof(u32); + rd->tx.buf = kzalloc(rd->tx.len, GFP_KERNEL); + if (!rd->tx.buf) { + kfree(rd->rx.buf); + kfree(rd); + return -ENOMEM; + } + + /* Grab channel ID from debugfs entry naming if any */ + if (!kstrtou8(id_str, 16, &id)) + rd->chan_id = id; + + rd->raw = raw; + filp->private_data = rd; + + return 0; +} + +static int scmi_dbg_raw_mode_release(struct inode *inode, struct file *filp) +{ + struct scmi_dbg_raw_data *rd = filp->private_data; + + kfree(rd->rx.buf); + kfree(rd->tx.buf); + kfree(rd); + + return 0; +} + +static ssize_t scmi_dbg_raw_mode_reset_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct scmi_dbg_raw_data *rd = filp->private_data; + + scmi_xfer_raw_reset(rd->raw); + + return count; +} + +static const struct file_operations scmi_dbg_raw_mode_reset_fops = { + .open = scmi_dbg_raw_mode_open, + .release = scmi_dbg_raw_mode_release, + .write = scmi_dbg_raw_mode_reset_write, + .owner = THIS_MODULE, +}; + +static const struct file_operations scmi_dbg_raw_mode_message_fops = { + .open = scmi_dbg_raw_mode_open, + .release = scmi_dbg_raw_mode_release, + .read = scmi_dbg_raw_mode_message_read, + .write = scmi_dbg_raw_mode_message_write, + .poll = scmi_dbg_raw_mode_message_poll, + .owner = THIS_MODULE, +}; + +static ssize_t scmi_dbg_raw_mode_message_async_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, true); +} + +static const struct file_operations scmi_dbg_raw_mode_message_async_fops = { + .open = scmi_dbg_raw_mode_open, + .release = scmi_dbg_raw_mode_release, + .read = scmi_dbg_raw_mode_message_read, + .write = scmi_dbg_raw_mode_message_async_write, + .poll = scmi_dbg_raw_mode_message_poll, + .owner = THIS_MODULE, +}; + +static ssize_t scmi_test_dbg_raw_mode_notif_read(struct file *filp, + char __user *buf, + size_t count, loff_t *ppos) +{ + return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos, + SCMI_RAW_NOTIF_QUEUE); +} + +static __poll_t +scmi_test_dbg_raw_mode_notif_poll(struct file *filp, + struct poll_table_struct *wait) +{ + return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_NOTIF_QUEUE); +} + +static const struct file_operations scmi_dbg_raw_mode_notification_fops = { + .open = scmi_dbg_raw_mode_open, + .release = scmi_dbg_raw_mode_release, + .read = scmi_test_dbg_raw_mode_notif_read, + .poll = scmi_test_dbg_raw_mode_notif_poll, + .owner = THIS_MODULE, +}; + +static ssize_t scmi_test_dbg_raw_mode_errors_read(struct file *filp, + char __user *buf, + size_t count, loff_t *ppos) +{ + return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos, + SCMI_RAW_ERRS_QUEUE); +} + +static __poll_t +scmi_test_dbg_raw_mode_errors_poll(struct file *filp, + struct poll_table_struct *wait) +{ + return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_ERRS_QUEUE); +} + +static const struct file_operations scmi_dbg_raw_mode_errors_fops = { + .open = scmi_dbg_raw_mode_open, + .release = scmi_dbg_raw_mode_release, + .read = scmi_test_dbg_raw_mode_errors_read, + .poll = scmi_test_dbg_raw_mode_errors_poll, + .owner = THIS_MODULE, +}; + +static struct scmi_raw_queue * +scmi_raw_queue_init(struct scmi_raw_mode_info *raw) +{ + int i; + struct scmi_raw_buffer *rb; + struct device *dev = raw->handle->dev; + struct scmi_raw_queue *q; + + q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL); + if (!q) + return ERR_PTR(-ENOMEM); + + rb = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rb), GFP_KERNEL); + if (!rb) + return ERR_PTR(-ENOMEM); + + spin_lock_init(&q->free_bufs_lock); + INIT_LIST_HEAD(&q->free_bufs); + for (i = 0; i < raw->tx_max_msg; i++, rb++) { + rb->max_len = raw->desc->max_msg_size + sizeof(u32); + rb->msg.buf = devm_kzalloc(dev, rb->max_len, GFP_KERNEL); + if (!rb->msg.buf) + return ERR_PTR(-ENOMEM); + scmi_raw_buffer_put(q, rb); + } + + spin_lock_init(&q->msg_q_lock); + INIT_LIST_HEAD(&q->msg_q); + init_waitqueue_head(&q->wq); + + return q; +} + +static int scmi_xfer_raw_worker_init(struct scmi_raw_mode_info *raw) +{ + int i; + struct scmi_xfer_raw_waiter *rw; + struct device *dev = raw->handle->dev; + + rw = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rw), GFP_KERNEL); + if (!rw) + return -ENOMEM; + + raw->wait_wq = alloc_workqueue("scmi-raw-wait-wq-%d", + WQ_UNBOUND | WQ_FREEZABLE | + WQ_HIGHPRI, WQ_SYSFS, raw->id); + if (!raw->wait_wq) + return -ENOMEM; + + mutex_init(&raw->free_mtx); + INIT_LIST_HEAD(&raw->free_waiters); + mutex_init(&raw->active_mtx); + INIT_LIST_HEAD(&raw->active_waiters); + + for (i = 0; i < raw->tx_max_msg; i++, rw++) { + init_completion(&rw->async_response); + scmi_xfer_raw_waiter_put(raw, rw); + } + INIT_WORK(&raw->waiters_work, scmi_xfer_raw_worker); + + return 0; +} + +static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw, + u8 *channels, int num_chans) +{ + int ret, idx; + void *gid; + struct device *dev = raw->handle->dev; + + gid = devres_open_group(dev, NULL, GFP_KERNEL); + if (!gid) + return -ENOMEM; + + for (idx = 0; idx < SCMI_RAW_MAX_QUEUE; idx++) { + raw->q[idx] = scmi_raw_queue_init(raw); + if (IS_ERR(raw->q[idx])) { + ret = PTR_ERR(raw->q[idx]); + goto err; + } + } + + xa_init(&raw->chans_q); + if (num_chans > 1) { + int i; + + for (i = 0; i < num_chans; i++) { + void *xret; + struct scmi_raw_queue *q; + + q = scmi_raw_queue_init(raw); + if (IS_ERR(q)) { + ret = PTR_ERR(q); + goto err_xa; + } + + xret = xa_store(&raw->chans_q, channels[i], q, + GFP_KERNEL); + if (xa_err(xret)) { + dev_err(dev, + "Fail to allocate Raw queue 0x%02X\n", + channels[i]); + ret = xa_err(xret); + goto err_xa; + } + } + } + + ret = scmi_xfer_raw_worker_init(raw); + if (ret) + goto err_xa; + + devres_close_group(dev, gid); + raw->gid = gid; + + return 0; + +err_xa: + xa_destroy(&raw->chans_q); +err: + devres_release_group(dev, gid); + return ret; +} + +/** + * scmi_raw_mode_init - Function to initialize the SCMI Raw stack + * + * @handle: Pointer to SCMI entity handle + * @top_dentry: A reference to the top Raw debugfs dentry + * @instance_id: The ID of the underlying SCMI platform instance represented by + * this Raw instance + * @channels: The list of the existing channels + * @num_chans: The number of entries in @channels + * @desc: Reference to the transport operations + * @tx_max_msg: Max number of in-flight messages allowed by the transport + * + * This function prepare the SCMI Raw stack and creates the debugfs API. + * + * Return: An opaque handle to the Raw instance on Success, an ERR_PTR otherwise + */ +void *scmi_raw_mode_init(const struct scmi_handle *handle, + struct dentry *top_dentry, int instance_id, + u8 *channels, int num_chans, + const struct scmi_desc *desc, int tx_max_msg) +{ + int ret; + struct scmi_raw_mode_info *raw; + struct device *dev; + + if (!handle || !desc) + return ERR_PTR(-EINVAL); + + dev = handle->dev; + raw = devm_kzalloc(dev, sizeof(*raw), GFP_KERNEL); + if (!raw) + return ERR_PTR(-ENOMEM); + + raw->handle = handle; + raw->desc = desc; + raw->tx_max_msg = tx_max_msg; + raw->id = instance_id; + + ret = scmi_raw_mode_setup(raw, channels, num_chans); + if (ret) { + devm_kfree(dev, raw); + return ERR_PTR(ret); + } + + raw->dentry = debugfs_create_dir("raw", top_dentry); + + debugfs_create_file("reset", 0200, raw->dentry, raw, + &scmi_dbg_raw_mode_reset_fops); + + debugfs_create_file("message", 0600, raw->dentry, raw, + &scmi_dbg_raw_mode_message_fops); + + debugfs_create_file("message_async", 0600, raw->dentry, raw, + &scmi_dbg_raw_mode_message_async_fops); + + debugfs_create_file("notification", 0400, raw->dentry, raw, + &scmi_dbg_raw_mode_notification_fops); + + debugfs_create_file("errors", 0400, raw->dentry, raw, + &scmi_dbg_raw_mode_errors_fops); + + /* + * Expose per-channel entries if multiple channels available. + * Just ignore errors while setting up these interfaces since we + * have anyway already a working core Raw support. + */ + if (num_chans > 1) { + int i; + struct dentry *top_chans; + + top_chans = debugfs_create_dir("channels", raw->dentry); + + for (i = 0; i < num_chans; i++) { + char cdir[8]; + struct dentry *chd; + + snprintf(cdir, 8, "0x%02X", channels[i]); + chd = debugfs_create_dir(cdir, top_chans); + + debugfs_create_file("message", 0600, chd, raw, + &scmi_dbg_raw_mode_message_fops); + + debugfs_create_file("message_async", 0600, chd, raw, + &scmi_dbg_raw_mode_message_async_fops); + } + } + + dev_info(dev, "SCMI RAW Mode initialized for instance %d\n", raw->id); + + return raw; +} + +/** + * scmi_raw_mode_cleanup - Function to cleanup the SCMI Raw stack + * + * @r: An opaque handle to an initialized SCMI Raw instance + */ +void scmi_raw_mode_cleanup(void *r) +{ + struct scmi_raw_mode_info *raw = r; + + if (!raw) + return; + + debugfs_remove_recursive(raw->dentry); + + cancel_work_sync(&raw->waiters_work); + destroy_workqueue(raw->wait_wq); + xa_destroy(&raw->chans_q); +} + +static int scmi_xfer_raw_collect(void *msg, size_t *msg_len, + struct scmi_xfer *xfer) +{ + __le32 *m; + size_t msg_size; + + if (!xfer || !msg || !msg_len) + return -EINVAL; + + /* Account for hdr ...*/ + msg_size = xfer->rx.len + sizeof(u32); + /* ... and status if needed */ + if (xfer->hdr.type != MSG_TYPE_NOTIFICATION) + msg_size += sizeof(u32); + + if (msg_size > *msg_len) + return -ENOSPC; + + m = msg; + *m = cpu_to_le32(pack_scmi_header(&xfer->hdr)); + if (xfer->hdr.type != MSG_TYPE_NOTIFICATION) + *++m = cpu_to_le32(xfer->hdr.status); + + memcpy(++m, xfer->rx.buf, xfer->rx.len); + + *msg_len = msg_size; + + return 0; +} + +/** + * scmi_raw_message_report - Helper to report back valid reponses/notifications + * to raw message requests. + * + * @r: An opaque reference to the raw instance configuration + * @xfer: The xfer containing the message to be reported + * @idx: The index of the queue. + * @chan_id: The channel ID to use. + * + * If Raw mode is enabled, this is called from the SCMI core on the regular RX + * path to save and enqueue the response/notification payload carried by this + * xfer into a dedicated scmi_raw_buffer for later consumption by the user. + * + * This way the caller can free the related xfer immediately afterwards and the + * user can read back the raw message payload at its own pace (if ever) without + * holding an xfer for too long. + */ +void scmi_raw_message_report(void *r, struct scmi_xfer *xfer, + unsigned int idx, unsigned int chan_id) +{ + int ret; + unsigned long flags; + struct scmi_raw_buffer *rb; + struct device *dev; + struct scmi_raw_queue *q; + struct scmi_raw_mode_info *raw = r; + + if (!raw || (idx == SCMI_RAW_REPLY_QUEUE && !SCMI_XFER_IS_RAW(xfer))) + return; + + dev = raw->handle->dev; + q = scmi_raw_queue_select(raw, idx, + SCMI_XFER_IS_CHAN_SET(xfer) ? chan_id : 0); + + /* + * Grab the msg_q_lock upfront to avoid a possible race between + * realizing the free list was empty and effectively picking the next + * buffer to use from the oldest one enqueued and still unread on this + * msg_q. + * + * Note that nowhere else these locks are taken together, so no risk of + * deadlocks du eto inversion. + */ + spin_lock_irqsave(&q->msg_q_lock, flags); + rb = scmi_raw_buffer_get(q); + if (!rb) { + /* + * Immediate and delayed replies to previously injected Raw + * commands MUST be read back from userspace to free the buffers: + * if this is not happening something is seriously broken and + * must be fixed at the application level: complain loudly. + */ + if (idx == SCMI_RAW_REPLY_QUEUE) { + spin_unlock_irqrestore(&q->msg_q_lock, flags); + dev_warn(dev, + "RAW[%d] - Buffers exhausted. Dropping report.\n", + idx); + return; + } + + /* + * Notifications and errors queues are instead handled in a + * circular manner: unread old buffers are just overwritten by + * newer ones. + * + * The main reason for this is that notifications originated + * by Raw requests cannot be distinguished from normal ones, so + * your Raw buffers queues risk to be flooded and depleted by + * notifications if you left it mistakenly enabled or when in + * coexistence mode. + */ + rb = scmi_raw_buffer_dequeue_unlocked(q); + if (WARN_ON(!rb)) { + spin_unlock_irqrestore(&q->msg_q_lock, flags); + return; + } + + /* Reset to full buffer length */ + rb->msg.len = rb->max_len; + + dev_warn_once(dev, + "RAW[%d] - Buffers exhausted. Re-using oldest.\n", + idx); + } + spin_unlock_irqrestore(&q->msg_q_lock, flags); + + ret = scmi_xfer_raw_collect(rb->msg.buf, &rb->msg.len, xfer); + if (ret) { + dev_warn(dev, "RAW - Cannot collect xfer into buffer !\n"); + scmi_raw_buffer_put(q, rb); + return; + } + + scmi_raw_buffer_enqueue(q, rb); +} + +static void scmi_xfer_raw_fill(struct scmi_raw_mode_info *raw, + struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer, u32 msg_hdr) +{ + /* Unpack received HDR as it is */ + unpack_scmi_header(msg_hdr, &xfer->hdr); + xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr); + + memset(xfer->rx.buf, 0x00, xfer->rx.len); + + raw->desc->ops->fetch_response(cinfo, xfer); +} + +/** + * scmi_raw_error_report - Helper to report back timed-out or generally + * unexpected replies. + * + * @r: An opaque reference to the raw instance configuration + * @cinfo: A reference to the channel to use to retrieve the broken xfer + * @msg_hdr: The SCMI message header of the message to fetch and report + * @priv: Any private data related to the xfer. + * + * If Raw mode is enabled, this is called from the SCMI core on the RX path in + * case of errors to save and enqueue the bad message payload carried by the + * message that has just been received. + * + * Note that we have to manually fetch any available payload into a temporary + * xfer to be able to save and enqueue the message, since the regular RX error + * path which had called this would have not fetched the message payload having + * classified it as an error. + */ +void scmi_raw_error_report(void *r, struct scmi_chan_info *cinfo, + u32 msg_hdr, void *priv) +{ + struct scmi_xfer xfer; + struct scmi_raw_mode_info *raw = r; + + if (!raw) + return; + + xfer.rx.len = raw->desc->max_msg_size; + xfer.rx.buf = kzalloc(xfer.rx.len, GFP_ATOMIC); + if (!xfer.rx.buf) { + dev_info(raw->handle->dev, + "Cannot report Raw error for HDR:0x%X - ENOMEM\n", + msg_hdr); + return; + } + + /* Any transport-provided priv must be passed back down to transport */ + if (priv) + /* Ensure priv is visible */ + smp_store_mb(xfer.priv, priv); + + scmi_xfer_raw_fill(raw, cinfo, &xfer, msg_hdr); + scmi_raw_message_report(raw, &xfer, SCMI_RAW_ERRS_QUEUE, 0); + + kfree(xfer.rx.buf); +} diff --git a/drivers/firmware/arm_scmi/raw_mode.h b/drivers/firmware/arm_scmi/raw_mode.h new file mode 100644 index 000000000000..8af756a83fd1 --- /dev/null +++ b/drivers/firmware/arm_scmi/raw_mode.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * System Control and Management Interface (SCMI) Message Protocol + * Raw mode support header. + * + * Copyright (C) 2022 ARM Ltd. + */ +#ifndef _SCMI_RAW_MODE_H +#define _SCMI_RAW_MODE_H + +#include "common.h" + +enum { + SCMI_RAW_REPLY_QUEUE, + SCMI_RAW_NOTIF_QUEUE, + SCMI_RAW_ERRS_QUEUE, + SCMI_RAW_MAX_QUEUE +}; + +void *scmi_raw_mode_init(const struct scmi_handle *handle, + struct dentry *top_dentry, int instance_id, + u8 *channels, int num_chans, + const struct scmi_desc *desc, int tx_max_msg); +void scmi_raw_mode_cleanup(void *raw); + +void scmi_raw_message_report(void *raw, struct scmi_xfer *xfer, + unsigned int idx, unsigned int chan_id); +void scmi_raw_error_report(void *raw, struct scmi_chan_info *cinfo, + u32 msg_hdr, void *priv); + +#endif /* _SCMI_RAW_MODE_H */ diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c index 1dfe534b8518..87b4f4d35f06 100644 --- a/drivers/firmware/arm_scmi/shmem.c +++ b/drivers/firmware/arm_scmi/shmem.c @@ -81,10 +81,11 @@ u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem) void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, struct scmi_xfer *xfer) { + size_t len = ioread32(&shmem->length); + xfer->hdr.status = ioread32(shmem->msg_payload); /* Skip the length of header and status in shmem area i.e 8 bytes */ - xfer->rx.len = min_t(size_t, xfer->rx.len, - ioread32(&shmem->length) - 8); + xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0); /* Take a copy to the rx buffer.. */ memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len); @@ -93,8 +94,10 @@ void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem, size_t max_len, struct scmi_xfer *xfer) { + size_t len = ioread32(&shmem->length); + /* Skip only the length of header in shmem area i.e 4 bytes */ - xfer->rx.len = min_t(size_t, max_len, ioread32(&shmem->length) - 4); + xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0); /* Take a copy to the rx buffer.. */ memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len); diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c index 87a7b13cf868..93272e4bbd12 100644 --- a/drivers/firmware/arm_scmi/smc.c +++ b/drivers/firmware/arm_scmi/smc.c @@ -52,9 +52,9 @@ static irqreturn_t smc_msg_done_isr(int irq, void *data) return IRQ_HANDLED; } -static bool smc_chan_available(struct device *dev, int idx) +static bool smc_chan_available(struct device_node *of_node, int idx) { - struct device_node *np = of_parse_phandle(dev->of_node, "shmem", 0); + struct device_node *np = of_parse_phandle(of_node, "shmem", 0); if (!np) return false; @@ -171,8 +171,6 @@ static int smc_chan_free(int id, void *p, void *data) cinfo->transport_info = NULL; scmi_info->cinfo = NULL; - scmi_free_channel(cinfo, data, id); - return 0; } diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c index 33c9b81a55cd..d68c01cb7aa0 100644 --- a/drivers/firmware/arm_scmi/virtio.c +++ b/drivers/firmware/arm_scmi/virtio.c @@ -160,7 +160,6 @@ static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch) } vioch->shutdown_done = &vioch_shutdown_done; - virtio_break_device(vioch->vqueue->vdev); if (!vioch->is_rx && vioch->deferred_tx_wq) /* Cannot be kicked anymore after this...*/ vioch->deferred_tx_wq = NULL; @@ -386,7 +385,7 @@ static int virtio_link_supplier(struct device *dev) return 0; } -static bool virtio_chan_available(struct device *dev, int idx) +static bool virtio_chan_available(struct device_node *of_node, int idx) { struct scmi_vio_channel *channels, *vioch = NULL; @@ -482,10 +481,14 @@ static int virtio_chan_free(int id, void *p, void *data) struct scmi_chan_info *cinfo = p; struct scmi_vio_channel *vioch = cinfo->transport_info; + /* + * Break device to inhibit further traffic flowing while shutting down + * the channels: doing it later holding vioch->lock creates unsafe + * locking dependency chains as reported by LOCKDEP. + */ + virtio_break_device(vioch->vqueue->vdev); scmi_vio_channel_cleanup_sync(vioch); - scmi_free_channel(cinfo, data, id); - return 0; } diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c index 77aa5c6398aa..3f5ff9ed668e 100644 --- a/drivers/firmware/meson/meson_sm.c +++ b/drivers/firmware/meson/meson_sm.c @@ -82,7 +82,7 @@ static void __iomem *meson_sm_map_shmem(u32 cmd_shmem, unsigned int size) sm_phy_base = __meson_sm_call(cmd_shmem, 0, 0, 0, 0, 0); if (!sm_phy_base) - return 0; + return NULL; return ioremap_cache(sm_phy_base, size); } diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index 129f68d7a6f5..acd83d29c866 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -738,8 +738,31 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_data); */ int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value) { - return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SET_SD_TAPDELAY, - type, value, NULL); + u32 reg = (type == PM_TAPDELAY_INPUT) ? SD_ITAPDLY : SD_OTAPDLYSEL; + u32 mask = (node_id == NODE_SD_0) ? GENMASK(15, 0) : GENMASK(31, 16); + + if (value) { + return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, + IOCTL_SET_SD_TAPDELAY, + type, value, NULL); + } + + /* + * Work around completely misdesigned firmware API on Xilinx ZynqMP. + * The IOCTL_SET_SD_TAPDELAY firmware call allows the caller to only + * ever set IOU_SLCR SD_ITAPDLY Register SD0_ITAPDLYENA/SD1_ITAPDLYENA + * bits, but there is no matching call to clear those bits. If those + * bits are not cleared, SDMMC tuning may fail. + * + * Luckily, there are PM_MMIO_READ/PM_MMIO_WRITE calls which seem to + * allow complete unrestricted access to all address space, including + * IOU_SLCR SD_ITAPDLY Register and all the other registers, access + * to which was supposed to be protected by the current firmware API. + * + * Use PM_MMIO_READ/PM_MMIO_WRITE to re-implement the missing counter + * part of IOCTL_SET_SD_TAPDELAY which clears SDx_ITAPDLYENA bits. + */ + return zynqmp_pm_invoke_fn(PM_MMIO_WRITE, reg, mask, 0, 0, NULL); } EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_tapdelay); diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c index 8d722e026e9c..84352a6f4973 100644 --- a/drivers/gpio/gpio-eic-sprd.c +++ b/drivers/gpio/gpio-eic-sprd.c @@ -91,7 +91,6 @@ enum sprd_eic_type { struct sprd_eic { struct gpio_chip chip; - struct irq_chip intc; void __iomem *base[SPRD_EIC_MAX_BANK]; enum sprd_eic_type type; spinlock_t lock; @@ -255,6 +254,8 @@ static void sprd_eic_irq_mask(struct irq_data *data) default: dev_err(chip->parent, "Unsupported EIC type.\n"); } + + gpiochip_disable_irq(chip, offset); } static void sprd_eic_irq_unmask(struct irq_data *data) @@ -263,6 +264,8 @@ static void sprd_eic_irq_unmask(struct irq_data *data) struct sprd_eic *sprd_eic = gpiochip_get_data(chip); u32 offset = irqd_to_hwirq(data); + gpiochip_enable_irq(chip, offset); + switch (sprd_eic->type) { case SPRD_EIC_DEBOUNCE: sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IE, 1); @@ -564,6 +567,15 @@ static void sprd_eic_irq_handler(struct irq_desc *desc) chained_irq_exit(ic, desc); } +static const struct irq_chip sprd_eic_irq = { + .name = "sprd-eic", + .irq_ack = sprd_eic_irq_ack, + .irq_mask = sprd_eic_irq_mask, + .irq_unmask = sprd_eic_irq_unmask, + .irq_set_type = sprd_eic_irq_set_type, + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, +}; static int sprd_eic_probe(struct platform_device *pdev) { const struct sprd_eic_variant_data *pdata; @@ -626,15 +638,8 @@ static int sprd_eic_probe(struct platform_device *pdev) break; } - sprd_eic->intc.name = dev_name(&pdev->dev); - sprd_eic->intc.irq_ack = sprd_eic_irq_ack; - sprd_eic->intc.irq_mask = sprd_eic_irq_mask; - sprd_eic->intc.irq_unmask = sprd_eic_irq_unmask; - sprd_eic->intc.irq_set_type = sprd_eic_irq_set_type; - sprd_eic->intc.flags = IRQCHIP_SKIP_SET_WAKE; - irq = &sprd_eic->chip.irq; - irq->chip = &sprd_eic->intc; + gpio_irq_chip_set_chip(irq, &sprd_eic_irq); irq->handler = handle_bad_irq; irq->default_type = IRQ_TYPE_NONE; irq->parent_handler = sprd_eic_irq_handler; diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index a59d61cd44b2..5299e5bb76d6 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -474,6 +474,9 @@ static u8 pcal6534_recalc_addr(struct pca953x_chip *chip, int reg, int off) case PCAL6524_DEBOUNCE: pinctrl = ((reg & PCAL_PINCTRL_MASK) >> 1) + 0x1c; break; + default: + pinctrl = 0; + break; } return pinctrl + addr + (off / BANK_SZ); diff --git a/drivers/gpio/gpio-pmic-eic-sprd.c b/drivers/gpio/gpio-pmic-eic-sprd.c index e518490c4b68..c3e4d90f6b18 100644 --- a/drivers/gpio/gpio-pmic-eic-sprd.c +++ b/drivers/gpio/gpio-pmic-eic-sprd.c @@ -47,7 +47,6 @@ enum { /** * struct sprd_pmic_eic - PMIC EIC controller * @chip: the gpio_chip structure. - * @intc: the irq_chip structure. * @map: the regmap from the parent device. * @offset: the EIC controller's offset address of the PMIC. * @reg: the array to cache the EIC registers. @@ -56,7 +55,6 @@ enum { */ struct sprd_pmic_eic { struct gpio_chip chip; - struct irq_chip intc; struct regmap *map; u32 offset; u8 reg[CACHE_NR_REGS]; @@ -151,15 +149,21 @@ static void sprd_pmic_eic_irq_mask(struct irq_data *data) { struct gpio_chip *chip = irq_data_get_irq_chip_data(data); struct sprd_pmic_eic *pmic_eic = gpiochip_get_data(chip); + u32 offset = irqd_to_hwirq(data); pmic_eic->reg[REG_IE] = 0; pmic_eic->reg[REG_TRIG] = 0; + + gpiochip_disable_irq(chip, offset); } static void sprd_pmic_eic_irq_unmask(struct irq_data *data) { struct gpio_chip *chip = irq_data_get_irq_chip_data(data); struct sprd_pmic_eic *pmic_eic = gpiochip_get_data(chip); + u32 offset = irqd_to_hwirq(data); + + gpiochip_enable_irq(chip, offset); pmic_eic->reg[REG_IE] = 1; pmic_eic->reg[REG_TRIG] = 1; @@ -292,6 +296,17 @@ static irqreturn_t sprd_pmic_eic_irq_handler(int irq, void *data) return IRQ_HANDLED; } +static const struct irq_chip pmic_eic_irq_chip = { + .name = "sprd-pmic-eic", + .irq_mask = sprd_pmic_eic_irq_mask, + .irq_unmask = sprd_pmic_eic_irq_unmask, + .irq_set_type = sprd_pmic_eic_irq_set_type, + .irq_bus_lock = sprd_pmic_eic_bus_lock, + .irq_bus_sync_unlock = sprd_pmic_eic_bus_sync_unlock, + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, +}; + static int sprd_pmic_eic_probe(struct platform_device *pdev) { struct gpio_irq_chip *irq; @@ -338,16 +353,8 @@ static int sprd_pmic_eic_probe(struct platform_device *pdev) pmic_eic->chip.set = sprd_pmic_eic_set; pmic_eic->chip.get = sprd_pmic_eic_get; - pmic_eic->intc.name = dev_name(&pdev->dev); - pmic_eic->intc.irq_mask = sprd_pmic_eic_irq_mask; - pmic_eic->intc.irq_unmask = sprd_pmic_eic_irq_unmask; - pmic_eic->intc.irq_set_type = sprd_pmic_eic_irq_set_type; - pmic_eic->intc.irq_bus_lock = sprd_pmic_eic_bus_lock; - pmic_eic->intc.irq_bus_sync_unlock = sprd_pmic_eic_bus_sync_unlock; - pmic_eic->intc.flags = IRQCHIP_SKIP_SET_WAKE; - irq = &pmic_eic->chip.irq; - irq->chip = &pmic_eic->intc; + gpio_irq_chip_set_chip(irq, &pmic_eic_irq_chip); irq->threaded = true; ret = devm_gpiochip_add_data(&pdev->dev, &pmic_eic->chip, pmic_eic); diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c index 238f3210970c..bc5660f61c57 100644 --- a/drivers/gpio/gpio-sifive.c +++ b/drivers/gpio/gpio-sifive.c @@ -215,6 +215,7 @@ static int sifive_gpio_probe(struct platform_device *pdev) return -ENODEV; } parent = irq_find_host(irq_parent); + of_node_put(irq_parent); if (!parent) { dev_err(dev, "no IRQ parent domain\n"); return -ENODEV; diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c index 9bff63990eee..072b4e653216 100644 --- a/drivers/gpio/gpio-sprd.c +++ b/drivers/gpio/gpio-sprd.c @@ -120,6 +120,7 @@ static void sprd_gpio_irq_mask(struct irq_data *data) u32 offset = irqd_to_hwirq(data); sprd_gpio_update(chip, offset, SPRD_GPIO_IE, 0); + gpiochip_disable_irq(chip, offset); } static void sprd_gpio_irq_ack(struct irq_data *data) @@ -136,6 +137,7 @@ static void sprd_gpio_irq_unmask(struct irq_data *data) u32 offset = irqd_to_hwirq(data); sprd_gpio_update(chip, offset, SPRD_GPIO_IE, 1); + gpiochip_enable_irq(chip, offset); } static int sprd_gpio_irq_set_type(struct irq_data *data, @@ -205,13 +207,14 @@ static void sprd_gpio_irq_handler(struct irq_desc *desc) chained_irq_exit(ic, desc); } -static struct irq_chip sprd_gpio_irqchip = { +static const struct irq_chip sprd_gpio_irqchip = { .name = "sprd-gpio", .irq_ack = sprd_gpio_irq_ack, .irq_mask = sprd_gpio_irq_mask, .irq_unmask = sprd_gpio_irq_unmask, .irq_set_type = sprd_gpio_irq_set_type, - .flags = IRQCHIP_SKIP_SET_WAKE, + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static int sprd_gpio_probe(struct platform_device *pdev) @@ -245,7 +248,7 @@ static int sprd_gpio_probe(struct platform_device *pdev) sprd_gpio->chip.direction_output = sprd_gpio_direction_output; irq = &sprd_gpio->chip.irq; - irq->chip = &sprd_gpio_irqchip; + gpio_irq_chip_set_chip(irq, &sprd_gpio_irqchip); irq->handler = handle_bad_irq; irq->default_type = IRQ_TYPE_NONE; irq->parent_handler = sprd_gpio_irq_handler; diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 5a66d9616d7c..939c776b9488 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -3905,8 +3905,8 @@ static struct gpio_desc *gpiod_find_and_request(struct device *consumer, const char *label, bool platform_lookup_allowed) { + unsigned long lookupflags = GPIO_LOOKUP_FLAGS_DEFAULT; struct gpio_desc *desc = ERR_PTR(-ENOENT); - unsigned long lookupflags; int ret; if (!IS_ERR_OR_NULL(fwnode)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 6b74df446694..e3e2e6e3b485 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -195,6 +195,7 @@ extern int amdgpu_emu_mode; extern uint amdgpu_smu_memory_pool_size; extern int amdgpu_smu_pptable_id; extern uint amdgpu_dc_feature_mask; +extern uint amdgpu_freesync_vid_mode; extern uint amdgpu_dc_debug_mask; extern uint amdgpu_dc_visual_confirm; extern uint amdgpu_dm_abm_level; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index b4f2d61ea0d5..1353ffd08988 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -181,6 +181,7 @@ int amdgpu_mes_kiq; int amdgpu_noretry = -1; int amdgpu_force_asic_type = -1; int amdgpu_tmz = -1; /* auto */ +uint amdgpu_freesync_vid_mode; int amdgpu_reset_method = -1; /* auto */ int amdgpu_num_kcq = -1; int amdgpu_smartshift_bias; @@ -880,6 +881,32 @@ MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on) module_param_named(tmz, amdgpu_tmz, int, 0444); /** + * DOC: freesync_video (uint) + * Enable the optimization to adjust front porch timing to achieve seamless + * mode change experience when setting a freesync supported mode for which full + * modeset is not needed. + * + * The Display Core will add a set of modes derived from the base FreeSync + * video mode into the corresponding connector's mode list based on commonly + * used refresh rates and VRR range of the connected display, when users enable + * this feature. From the userspace perspective, they can see a seamless mode + * change experience when the change between different refresh rates under the + * same resolution. Additionally, userspace applications such as Video playback + * can read this modeset list and change the refresh rate based on the video + * frame rate. Finally, the userspace can also derive an appropriate mode for a + * particular refresh rate based on the FreeSync Mode and add it to the + * connector's mode list. + * + * Note: This is an experimental feature. + * + * The default value: 0 (off). + */ +MODULE_PARM_DESC( + freesync_video, + "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)"); +module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444); + +/** * DOC: reset_method (int) * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco) */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index bceb1a5b2518..3fdaba56be6f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -801,7 +801,7 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, p2plink->attr.name = "properties"; p2plink->attr.mode = KFD_SYSFS_FILE_MODE; - sysfs_attr_init(&iolink->attr); + sysfs_attr_init(&p2plink->attr); ret = sysfs_create_file(p2plink->kobj, &p2plink->attr); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 50c783e19f5a..1b7f20a9d4ae 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4361,6 +4361,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) amdgpu_set_panel_orientation(&aconnector->base); } + /* If we didn't find a panel, notify the acpi video detection */ + if (dm->adev->flags & AMD_IS_APU && dm->num_of_edps == 0) + acpi_video_report_nolcd(); + /* Software is initialized. Now we can register interrupt handlers. */ switch (adev->asic_type) { #if defined(CONFIG_DRM_AMD_DC_SI) @@ -5831,7 +5835,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, */ DRM_DEBUG_DRIVER("No preferred mode found\n"); } else { - recalculate_timing = is_freesync_video_mode(&mode, aconnector); + recalculate_timing = amdgpu_freesync_vid_mode && + is_freesync_video_mode(&mode, aconnector); if (recalculate_timing) { freesync_mode = get_highest_refresh_rate_mode(aconnector, false); drm_mode_copy(&saved_mode, &mode); @@ -6982,7 +6987,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); - if (!edid) + if (!(amdgpu_freesync_vid_mode && edid)) return; if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) @@ -8846,7 +8851,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, * TODO: Refactor this function to allow this check to work * in all conditions. */ - if (dm_new_crtc_state->stream && + if (amdgpu_freesync_vid_mode && + dm_new_crtc_state->stream && is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) goto skip_modeset; @@ -8881,7 +8887,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, if (!dm_old_crtc_state->stream) goto skip_modeset; - if (dm_new_crtc_state->stream && + if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream && is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) { new_crtc_state->mode_changed = false; @@ -8893,7 +8899,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, set_freesync_fixed_config(dm_new_crtc_state); goto skip_modeset; - } else if (aconnector && + } else if (amdgpu_freesync_vid_mode && aconnector && is_freesync_video_mode(&new_crtc_state->mode, aconnector)) { struct drm_display_mode *high_mode; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c index 5af601cff1a0..b53feeaf5cf1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c @@ -6257,12 +6257,12 @@ bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurface double SwathSizePerSurfaceC[DC__NUM_DPP__MAX]; bool NotEnoughDETSwathFillLatencyHiding = false; - /* calculate sum of single swath size for all pipes in bytes*/ + /* calculate sum of single swath size for all pipes in bytes */ for (k = 0; k < NumberOfActiveSurfaces; k++) { - SwathSizePerSurfaceY[k] += SwathHeightY[k] * SwathWidthY[k] * BytePerPixelInDETY[k] * NumOfDPP[k]; + SwathSizePerSurfaceY[k] = SwathHeightY[k] * SwathWidthY[k] * BytePerPixelInDETY[k] * NumOfDPP[k]; if (SwathHeightC[k] != 0) - SwathSizePerSurfaceC[k] += SwathHeightC[k] * SwathWidthC[k] * BytePerPixelInDETC[k] * NumOfDPP[k]; + SwathSizePerSurfaceC[k] = SwathHeightC[k] * SwathWidthC[k] * BytePerPixelInDETC[k] * NumOfDPP[k]; else SwathSizePerSurfaceC[k] = 0; diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index fce69fa446d5..2cbc1292ab38 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -41,9 +41,11 @@ #include "i915_drv.h" #include "i915_reg.h" +#include "intel_de.h" #include "intel_display_types.h" #include "intel_dsi.h" #include "intel_dsi_vbt.h" +#include "intel_gmbus_regs.h" #include "vlv_dsi.h" #include "vlv_dsi_regs.h" #include "vlv_sideband.h" @@ -377,6 +379,85 @@ static void icl_exec_gpio(struct intel_connector *connector, drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n"); } +enum { + MIPI_RESET_1 = 0, + MIPI_AVDD_EN_1, + MIPI_BKLT_EN_1, + MIPI_AVEE_EN_1, + MIPI_VIO_EN_1, + MIPI_RESET_2, + MIPI_AVDD_EN_2, + MIPI_BKLT_EN_2, + MIPI_AVEE_EN_2, + MIPI_VIO_EN_2, +}; + +static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv, + int gpio, bool value) +{ + int index; + + if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 11 && gpio >= MIPI_RESET_2)) + return; + + switch (gpio) { + case MIPI_RESET_1: + case MIPI_RESET_2: + index = gpio == MIPI_RESET_1 ? HPD_PORT_A : HPD_PORT_B; + + /* + * Disable HPD to set the pin to output, and set output + * value. The HPD pin should not be enabled for DSI anyway, + * assuming the board design and VBT are sane, and the pin isn't + * used by a non-DSI encoder. + * + * The locking protects against concurrent SHOTPLUG_CTL_DDI + * modifications in irq setup and handling. + */ + spin_lock_irq(&dev_priv->irq_lock); + intel_de_rmw(dev_priv, SHOTPLUG_CTL_DDI, + SHOTPLUG_CTL_DDI_HPD_ENABLE(index) | + SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index), + value ? SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index) : 0); + spin_unlock_irq(&dev_priv->irq_lock); + break; + case MIPI_AVDD_EN_1: + case MIPI_AVDD_EN_2: + index = gpio == MIPI_AVDD_EN_1 ? 0 : 1; + + intel_de_rmw(dev_priv, PP_CONTROL(index), PANEL_POWER_ON, + value ? PANEL_POWER_ON : 0); + break; + case MIPI_BKLT_EN_1: + case MIPI_BKLT_EN_2: + index = gpio == MIPI_BKLT_EN_1 ? 0 : 1; + + intel_de_rmw(dev_priv, PP_CONTROL(index), EDP_BLC_ENABLE, + value ? EDP_BLC_ENABLE : 0); + break; + case MIPI_AVEE_EN_1: + case MIPI_AVEE_EN_2: + index = gpio == MIPI_AVEE_EN_1 ? 1 : 2; + + intel_de_rmw(dev_priv, GPIO(dev_priv, index), + GPIO_CLOCK_VAL_OUT, + GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_DIR_OUT | + GPIO_CLOCK_VAL_MASK | (value ? GPIO_CLOCK_VAL_OUT : 0)); + break; + case MIPI_VIO_EN_1: + case MIPI_VIO_EN_2: + index = gpio == MIPI_VIO_EN_1 ? 1 : 2; + + intel_de_rmw(dev_priv, GPIO(dev_priv, index), + GPIO_DATA_VAL_OUT, + GPIO_DATA_DIR_MASK | GPIO_DATA_DIR_OUT | + GPIO_DATA_VAL_MASK | (value ? GPIO_DATA_VAL_OUT : 0)); + break; + default: + MISSING_CASE(gpio); + } +} + static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) { struct drm_device *dev = intel_dsi->base.base.dev; @@ -384,8 +465,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) struct intel_connector *connector = intel_dsi->attached_connector; u8 gpio_source, gpio_index = 0, gpio_number; bool value; - - drm_dbg_kms(&dev_priv->drm, "\n"); + bool native = DISPLAY_VER(dev_priv) >= 11; if (connector->panel.vbt.dsi.seq_version >= 3) gpio_index = *data++; @@ -398,10 +478,18 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) else gpio_source = 0; + if (connector->panel.vbt.dsi.seq_version >= 4 && *data & BIT(1)) + native = false; + /* pull up/down */ value = *data++ & 1; - if (DISPLAY_VER(dev_priv) >= 11) + drm_dbg_kms(&dev_priv->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n", + gpio_index, gpio_number, gpio_source, str_yes_no(native), str_on_off(value)); + + if (native) + icl_native_gpio_set_value(dev_priv, gpio_number, value); + else if (DISPLAY_VER(dev_priv) >= 11) icl_exec_gpio(connector, gpio_source, gpio_index, value); else if (IS_VALLEYVIEW(dev_priv)) vlv_exec_gpio(connector, gpio_source, gpio_number, value); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index da09767fda07..f266b68cf012 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -730,32 +730,69 @@ static int eb_reserve(struct i915_execbuffer *eb) bool unpinned; /* - * Attempt to pin all of the buffers into the GTT. - * This is done in 2 phases: + * We have one more buffers that we couldn't bind, which could be due to + * various reasons. To resolve this we have 4 passes, with every next + * level turning the screws tighter: * - * 1. Unbind all objects that do not match the GTT constraints for - * the execbuffer (fenceable, mappable, alignment etc). - * 2. Bind new objects. + * 0. Unbind all objects that do not match the GTT constraints for the + * execbuffer (fenceable, mappable, alignment etc). Bind all new + * objects. This avoids unnecessary unbinding of later objects in order + * to make room for the earlier objects *unless* we need to defragment. * - * This avoid unnecessary unbinding of later objects in order to make - * room for the earlier objects *unless* we need to defragment. + * 1. Reorder the buffers, where objects with the most restrictive + * placement requirements go first (ignoring fixed location buffers for + * now). For example, objects needing the mappable aperture (the first + * 256M of GTT), should go first vs objects that can be placed just + * about anywhere. Repeat the previous pass. * - * Defragmenting is skipped if all objects are pinned at a fixed location. + * 2. Consider buffers that are pinned at a fixed location. Also try to + * evict the entire VM this time, leaving only objects that we were + * unable to lock. Try again to bind the buffers. (still using the new + * buffer order). + * + * 3. We likely have object lock contention for one or more stubborn + * objects in the VM, for which we need to evict to make forward + * progress (perhaps we are fighting the shrinker?). When evicting the + * VM this time around, anything that we can't lock we now track using + * the busy_bo, using the full lock (after dropping the vm->mutex to + * prevent deadlocks), instead of trylock. We then continue to evict the + * VM, this time with the stubborn object locked, which we can now + * hopefully unbind (if still bound in the VM). Repeat until the VM is + * evicted. Finally we should be able bind everything. */ - for (pass = 0; pass <= 2; pass++) { + for (pass = 0; pass <= 3; pass++) { int pin_flags = PIN_USER | PIN_VALIDATE; if (pass == 0) pin_flags |= PIN_NONBLOCK; if (pass >= 1) - unpinned = eb_unbind(eb, pass == 2); + unpinned = eb_unbind(eb, pass >= 2); if (pass == 2) { err = mutex_lock_interruptible(&eb->context->vm->mutex); if (!err) { - err = i915_gem_evict_vm(eb->context->vm, &eb->ww); + err = i915_gem_evict_vm(eb->context->vm, &eb->ww, NULL); + mutex_unlock(&eb->context->vm->mutex); + } + if (err) + return err; + } + + if (pass == 3) { +retry: + err = mutex_lock_interruptible(&eb->context->vm->mutex); + if (!err) { + struct drm_i915_gem_object *busy_bo = NULL; + + err = i915_gem_evict_vm(eb->context->vm, &eb->ww, &busy_bo); mutex_unlock(&eb->context->vm->mutex); + if (err && busy_bo) { + err = i915_gem_object_lock(busy_bo, &eb->ww); + i915_gem_object_put(busy_bo); + if (!err) + goto retry; + } } if (err) return err; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index c29efdef8313..0ad44f3868de 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -369,7 +369,7 @@ retry: if (vma == ERR_PTR(-ENOSPC)) { ret = mutex_lock_interruptible(&ggtt->vm.mutex); if (!ret) { - ret = i915_gem_evict_vm(&ggtt->vm, &ww); + ret = i915_gem_evict_vm(&ggtt->vm, &ww, NULL); mutex_unlock(&ggtt->vm.mutex); } if (ret) diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 767e329e1cc5..9c18b5f2e789 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -1109,9 +1109,15 @@ static void mmio_invalidate_full(struct intel_gt *gt) continue; if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { + u32 val = BIT(engine->instance); + + if (engine->class == VIDEO_DECODE_CLASS || + engine->class == VIDEO_ENHANCEMENT_CLASS || + engine->class == COMPUTE_CLASS) + val = _MASKED_BIT_ENABLE(val); intel_gt_mcr_multicast_write_fw(gt, xehp_regs[engine->class], - BIT(engine->instance)); + val); } else { rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num); if (!i915_mmio_reg_offset(rb.reg)) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 0c80ba51a4bd..2bcdd192f814 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -545,6 +545,32 @@ static int check_ccs_header(struct intel_gt *gt, return 0; } +static int try_firmware_load(struct intel_uc_fw *uc_fw, const struct firmware **fw) +{ + struct intel_gt *gt = __uc_fw_to_gt(uc_fw); + struct device *dev = gt->i915->drm.dev; + int err; + + err = firmware_request_nowarn(fw, uc_fw->file_selected.path, dev); + + if (err) + return err; + + if ((*fw)->size > INTEL_UC_RSVD_GGTT_PER_FW) { + drm_err(>->i915->drm, + "%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, + (*fw)->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K); + + /* try to find another blob to load */ + release_firmware(*fw); + *fw = NULL; + return -ENOENT; + } + + return 0; +} + /** * intel_uc_fw_fetch - fetch uC firmware * @uc_fw: uC firmware @@ -558,7 +584,6 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) struct intel_gt *gt = __uc_fw_to_gt(uc_fw); struct drm_i915_private *i915 = gt->i915; struct intel_uc_fw_file file_ideal; - struct device *dev = i915->drm.dev; struct drm_i915_gem_object *obj; const struct firmware *fw = NULL; bool old_ver = false; @@ -574,20 +599,9 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) __force_fw_fetch_failures(uc_fw, -EINVAL); __force_fw_fetch_failures(uc_fw, -ESTALE); - err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev); + err = try_firmware_load(uc_fw, &fw); memcpy(&file_ideal, &uc_fw->file_wanted, sizeof(file_ideal)); - if (!err && fw->size > INTEL_UC_RSVD_GGTT_PER_FW) { - drm_err(&i915->drm, - "%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, - fw->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K); - - /* try to find another blob to load */ - release_firmware(fw); - err = -ENOENT; - } - /* Any error is terminal if overriding. Don't bother searching for older versions */ if (err && intel_uc_fw_is_overridden(uc_fw)) goto fail; @@ -608,7 +622,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) break; } - err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev); + err = try_firmware_load(uc_fw, &fw); } if (err) diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c index 9f1c209d9251..0616b73175f3 100644 --- a/drivers/gpu/drm/i915/gvt/debugfs.c +++ b/drivers/gpu/drm/i915/gvt/debugfs.c @@ -151,6 +151,22 @@ DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops, vgpu_scan_nonprivbb_get, vgpu_scan_nonprivbb_set, "0x%llx\n"); +static int vgpu_status_get(void *data, u64 *val) +{ + struct intel_vgpu *vgpu = (struct intel_vgpu *)data; + + *val = 0; + + if (test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) + *val |= (1 << INTEL_VGPU_STATUS_ATTACHED); + if (test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status)) + *val |= (1 << INTEL_VGPU_STATUS_ACTIVE); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(vgpu_status_fops, vgpu_status_get, NULL, "0x%llx\n"); + /** * intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU * @vgpu: a vGPU @@ -162,11 +178,12 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu) snprintf(name, 16, "vgpu%d", vgpu->id); vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root); - debugfs_create_bool("active", 0444, vgpu->debugfs, &vgpu->active); debugfs_create_file("mmio_diff", 0444, vgpu->debugfs, vgpu, &vgpu_mmio_diff_fops); debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs, vgpu, &vgpu_scan_nonprivbb_fops); + debugfs_create_file("status", 0644, vgpu->debugfs, vgpu, + &vgpu_status_fops); } /** @@ -175,8 +192,13 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu) */ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu) { - debugfs_remove_recursive(vgpu->debugfs); - vgpu->debugfs = NULL; + struct intel_gvt *gvt = vgpu->gvt; + struct drm_minor *minor = gvt->gt->i915->drm.primary; + + if (minor->debugfs_root && gvt->debugfs_root) { + debugfs_remove_recursive(vgpu->debugfs); + vgpu->debugfs = NULL; + } } /** @@ -199,6 +221,10 @@ void intel_gvt_debugfs_init(struct intel_gvt *gvt) */ void intel_gvt_debugfs_clean(struct intel_gvt *gvt) { - debugfs_remove_recursive(gvt->debugfs_root); - gvt->debugfs_root = NULL; + struct drm_minor *minor = gvt->gt->i915->drm.primary; + + if (minor->debugfs_root) { + debugfs_remove_recursive(gvt->debugfs_root); + gvt->debugfs_root = NULL; + } } diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 355f1c0e8664..ffe41e9be04f 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -134,7 +134,8 @@ static void dmabuf_gem_object_free(struct kref *kref) struct list_head *pos; struct intel_vgpu_dmabuf_obj *dmabuf_obj; - if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) { + if (vgpu && test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status) && + !list_empty(&vgpu->dmabuf_obj_list_head)) { list_for_each(pos, &vgpu->dmabuf_obj_list_head) { dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); if (dmabuf_obj == obj) { diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 51e5e8fb505b..4ec85308379a 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -55,7 +55,7 @@ static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn) int idx; bool ret; - if (!vgpu->attached) + if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) return false; idx = srcu_read_lock(&kvm->srcu); @@ -1178,7 +1178,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu, if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M)) return 0; - if (!vgpu->attached) + if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) return -EINVAL; pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry)); if (is_error_noslot_pfn(pfn)) @@ -1209,10 +1209,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, for_each_shadow_entry(sub_spt, &sub_se, sub_index) { ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index, PAGE_SIZE, &dma_addr); - if (ret) { - ppgtt_invalidate_spt(spt); - return ret; - } + if (ret) + goto err; sub_se.val64 = se->val64; /* Copy the PAT field from PDE. */ @@ -1231,6 +1229,17 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, ops->set_pfn(se, sub_spt->shadow_page.mfn); ppgtt_set_shadow_entry(spt, se, index); return 0; +err: + /* Cancel the existing addess mappings of DMA addr. */ + for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) { + gvt_vdbg_mm("invalidate 4K entry\n"); + ppgtt_invalidate_pte(sub_spt, &sub_se); + } + /* Release the new allocated spt. */ + trace_spt_change(sub_spt->vgpu->id, "release", sub_spt, + sub_spt->guest_page.gfn, sub_spt->shadow_page.type); + ppgtt_free_spt(sub_spt); + return ret; } static int split_64KB_gtt_entry(struct intel_vgpu *vgpu, diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 62823c0e13ab..2d65800d8e93 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -172,13 +172,18 @@ struct intel_vgpu_submission { #define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries" +enum { + INTEL_VGPU_STATUS_ATTACHED = 0, + INTEL_VGPU_STATUS_ACTIVE, + INTEL_VGPU_STATUS_NR_BITS, +}; + struct intel_vgpu { struct vfio_device vfio_device; struct intel_gvt *gvt; struct mutex vgpu_lock; int id; - bool active; - bool attached; + DECLARE_BITMAP(status, INTEL_VGPU_STATUS_NR_BITS); bool pv_notified; bool failsafe; unsigned int resetting_eng; @@ -467,7 +472,7 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu, #define for_each_active_vgpu(gvt, vgpu, id) \ idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \ - for_each_if(vgpu->active) + for_each_if(test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status)) static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu, u32 offset, u32 val, bool low) @@ -725,7 +730,7 @@ static inline bool intel_gvt_mmio_is_cmd_write_patch( static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa, void *buf, unsigned long len) { - if (!vgpu->attached) + if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) return -ESRCH; return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, false); } @@ -743,7 +748,7 @@ static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa, static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu, unsigned long gpa, void *buf, unsigned long len) { - if (!vgpu->attached) + if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) return -ESRCH; return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, true); } diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index a6b2021b665f..68eca023bbc6 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -433,7 +433,7 @@ static int inject_virtual_interrupt(struct intel_vgpu *vgpu) * enabled by guest. so if msi_trigger is null, success is still * returned and don't inject interrupt into guest. */ - if (!vgpu->attached) + if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) return -ESRCH; if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1) return -EFAULT; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index f5451adcd489..8ae7039b3683 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -638,7 +638,7 @@ static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu) mutex_lock(&vgpu->gvt->lock); for_each_active_vgpu(vgpu->gvt, itr, id) { - if (!itr->attached) + if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, itr->status)) continue; if (vgpu->vfio_device.kvm == itr->vfio_device.kvm) { @@ -655,9 +655,6 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev) { struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); - if (vgpu->attached) - return -EEXIST; - if (!vgpu->vfio_device.kvm || vgpu->vfio_device.kvm->mm != current->mm) { gvt_vgpu_err("KVM is required to use Intel vGPU\n"); @@ -667,14 +664,14 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev) if (__kvmgt_vgpu_exist(vgpu)) return -EEXIST; - vgpu->attached = true; - vgpu->track_node.track_write = kvmgt_page_track_write; vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot; kvm_get_kvm(vgpu->vfio_device.kvm); kvm_page_track_register_notifier(vgpu->vfio_device.kvm, &vgpu->track_node); + set_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status); + debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs, &vgpu->nr_cache_entries); @@ -698,11 +695,10 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev) { struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); - if (!vgpu->attached) - return; - intel_gvt_release_vgpu(vgpu); + clear_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status); + debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs)); kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm, @@ -718,8 +714,6 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev) vgpu->dma_addr_cache = RB_ROOT; intel_vgpu_release_msi_eventfd_ctx(vgpu); - - vgpu->attached = false; } static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar) @@ -1512,9 +1506,6 @@ static void intel_vgpu_remove(struct mdev_device *mdev) { struct intel_vgpu *vgpu = dev_get_drvdata(&mdev->dev); - if (WARN_ON_ONCE(vgpu->attached)) - return; - vfio_unregister_group_dev(&vgpu->vfio_device); vfio_put_device(&vgpu->vfio_device); } @@ -1559,7 +1550,7 @@ int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) struct kvm_memory_slot *slot; int idx; - if (!info->attached) + if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status)) return -ESRCH; idx = srcu_read_lock(&kvm->srcu); @@ -1589,8 +1580,8 @@ int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn) struct kvm_memory_slot *slot; int idx; - if (!info->attached) - return 0; + if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status)) + return -ESRCH; idx = srcu_read_lock(&kvm->srcu); slot = gfn_to_memslot(kvm, gfn); @@ -1668,7 +1659,7 @@ int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, struct gvt_dma *entry; int ret; - if (!vgpu->attached) + if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) return -EINVAL; mutex_lock(&vgpu->cache_lock); @@ -1714,8 +1705,8 @@ int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr) struct gvt_dma *entry; int ret = 0; - if (!vgpu->attached) - return -ENODEV; + if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) + return -EINVAL; mutex_lock(&vgpu->cache_lock); entry = __gvt_cache_find_dma_addr(vgpu, dma_addr); @@ -1742,7 +1733,7 @@ void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu, { struct gvt_dma *entry; - if (!vgpu->attached) + if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) return; mutex_lock(&vgpu->cache_lock); @@ -1778,7 +1769,7 @@ static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt) idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) { if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id, (void *)&gvt->service_request)) { - if (vgpu->active) + if (test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status)) intel_vgpu_emulate_vblank(vgpu); } } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 9cd8fcbf7cad..f4055804aad1 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -695,6 +695,7 @@ intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload) if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT || !workload->shadow_mm->ppgtt_mm.shadowed) { + intel_vgpu_unpin_mm(workload->shadow_mm); gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); return -EINVAL; } @@ -865,7 +866,8 @@ pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine) goto out; } - if (!scheduler->current_vgpu->active || + if (!test_bit(INTEL_VGPU_STATUS_ACTIVE, + scheduler->current_vgpu->status) || list_empty(workload_q_head(scheduler->current_vgpu, engine))) goto out; diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 3c529c2705dd..a5497440484f 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -166,9 +166,7 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt) */ void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu) { - mutex_lock(&vgpu->vgpu_lock); - vgpu->active = true; - mutex_unlock(&vgpu->vgpu_lock); + set_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status); } /** @@ -183,7 +181,7 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu) { mutex_lock(&vgpu->vgpu_lock); - vgpu->active = false; + clear_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status); if (atomic_read(&vgpu->submission.running_workload_num)) { mutex_unlock(&vgpu->vgpu_lock); @@ -228,7 +226,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_private *i915 = gvt->gt->i915; - drm_WARN(&i915->drm, vgpu->active, "vGPU is still active!\n"); + drm_WARN(&i915->drm, test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status), + "vGPU is still active!\n"); /* * remove idr first so later clean can judge if need to stop @@ -285,8 +284,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt) if (ret) goto out_free_vgpu; - vgpu->active = false; - + clear_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status); return vgpu; out_free_vgpu: diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index f025ee4fa526..a4b4d9b7d26c 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -416,6 +416,11 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, * @vm: Address space to cleanse * @ww: An optional struct i915_gem_ww_ctx. If not NULL, i915_gem_evict_vm * will be able to evict vma's locked by the ww as well. + * @busy_bo: Optional pointer to struct drm_i915_gem_object. If not NULL, then + * in the event i915_gem_evict_vm() is unable to trylock an object for eviction, + * then @busy_bo will point to it. -EBUSY is also returned. The caller must drop + * the vm->mutex, before trying again to acquire the contended lock. The caller + * also owns a reference to the object. * * This function evicts all vmas from a vm. * @@ -425,7 +430,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, * To clarify: This is for freeing up virtual address space, not for freeing * memory in e.g. the shrinker. */ -int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww) +int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww, + struct drm_i915_gem_object **busy_bo) { int ret = 0; @@ -457,15 +463,22 @@ int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww) * the resv is shared among multiple objects, we still * need the object ref. */ - if (dying_vma(vma) || + if (!i915_gem_object_get_rcu(vma->obj) || (ww && (dma_resv_locking_ctx(vma->obj->base.resv) == &ww->ctx))) { __i915_vma_pin(vma); list_add(&vma->evict_link, &locked_eviction_list); continue; } - if (!i915_gem_object_trylock(vma->obj, ww)) + if (!i915_gem_object_trylock(vma->obj, ww)) { + if (busy_bo) { + *busy_bo = vma->obj; /* holds ref */ + ret = -EBUSY; + break; + } + i915_gem_object_put(vma->obj); continue; + } __i915_vma_pin(vma); list_add(&vma->evict_link, &eviction_list); @@ -473,25 +486,29 @@ int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww) if (list_empty(&eviction_list) && list_empty(&locked_eviction_list)) break; - ret = 0; /* Unbind locked objects first, before unlocking the eviction_list */ list_for_each_entry_safe(vma, vn, &locked_eviction_list, evict_link) { __i915_vma_unpin(vma); - if (ret == 0) + if (ret == 0) { ret = __i915_vma_unbind(vma); - if (ret != -EINTR) /* "Get me out of here!" */ - ret = 0; + if (ret != -EINTR) /* "Get me out of here!" */ + ret = 0; + } + if (!dying_vma(vma)) + i915_gem_object_put(vma->obj); } list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) { __i915_vma_unpin(vma); - if (ret == 0) + if (ret == 0) { ret = __i915_vma_unbind(vma); - if (ret != -EINTR) /* "Get me out of here!" */ - ret = 0; + if (ret != -EINTR) /* "Get me out of here!" */ + ret = 0; + } i915_gem_object_unlock(vma->obj); + i915_gem_object_put(vma->obj); } } while (ret == 0); diff --git a/drivers/gpu/drm/i915/i915_gem_evict.h b/drivers/gpu/drm/i915/i915_gem_evict.h index e593c530f9bd..bf0ee0e4fe60 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.h +++ b/drivers/gpu/drm/i915/i915_gem_evict.h @@ -11,6 +11,7 @@ struct drm_mm_node; struct i915_address_space; struct i915_gem_ww_ctx; +struct drm_i915_gem_object; int __must_check i915_gem_evict_something(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww, @@ -23,6 +24,7 @@ int __must_check i915_gem_evict_for_node(struct i915_address_space *vm, struct drm_mm_node *node, unsigned int flags); int i915_gem_evict_vm(struct i915_address_space *vm, - struct i915_gem_ww_ctx *ww); + struct i915_gem_ww_ctx *ww, + struct drm_i915_gem_object **busy_bo); #endif /* __I915_GEM_EVICT_H__ */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index edfe363af838..91c533986041 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1974,7 +1974,10 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) if (ddi_hotplug_trigger) { u32 dig_hotplug_reg; + /* Locking due to DSI native GPIO sequences */ + spin_lock(&dev_priv->irq_lock); dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0); + spin_unlock(&dev_priv->irq_lock); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, ddi_hotplug_trigger, dig_hotplug_reg, diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 6da9784fe4a2..ccd1f864aa19 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -1129,7 +1129,6 @@ static const struct intel_gt_definition xelpmp_extra_gt[] = { {} }; -__maybe_unused static const struct intel_device_info mtl_info = { XE_HP_FEATURES, XE_LPDP_FEATURES, diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 8e1892d14774..916176872544 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -5988,6 +5988,7 @@ #define SHOTPLUG_CTL_DDI _MMIO(0xc4030) #define SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin) (0x8 << (_HPD_PIN_DDI(hpd_pin) * 4)) +#define SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(hpd_pin) (0x4 << (_HPD_PIN_DDI(hpd_pin) * 4)) #define SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(hpd_pin) (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4)) #define SHOTPLUG_CTL_DDI_HPD_NO_DETECT(hpd_pin) (0x0 << (_HPD_PIN_DDI(hpd_pin) * 4)) #define SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(hpd_pin) (0x1 << (_HPD_PIN_DDI(hpd_pin) * 4)) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 703fee6b5f75..3a33be5401ed 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -1566,7 +1566,7 @@ static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, * locked objects when called from execbuf when pinning * is removed. This would probably regress badly. */ - i915_gem_evict_vm(vm, NULL); + i915_gem_evict_vm(vm, NULL, NULL); mutex_unlock(&vm->mutex); } } while (1); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 8c6517d29b8e..37068542aafe 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -344,7 +344,7 @@ static int igt_evict_vm(void *arg) /* Everything is pinned, nothing should happen */ mutex_lock(&ggtt->vm.mutex); - err = i915_gem_evict_vm(&ggtt->vm, NULL); + err = i915_gem_evict_vm(&ggtt->vm, NULL, NULL); mutex_unlock(&ggtt->vm.mutex); if (err) { pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n", @@ -356,7 +356,7 @@ static int igt_evict_vm(void *arg) for_i915_gem_ww(&ww, err, false) { mutex_lock(&ggtt->vm.mutex); - err = i915_gem_evict_vm(&ggtt->vm, &ww); + err = i915_gem_evict_vm(&ggtt->vm, &ww, NULL); mutex_unlock(&ggtt->vm.mutex); } diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index dba4f7d81d69..80142d9a4a55 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -614,6 +614,11 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, break; } + if (ipu_plane->dp_flow == IPU_DP_FLOW_SYNC_BG) + width = ipu_src_rect_width(new_state); + else + width = drm_rect_width(&new_state->src) >> 16; + eba = drm_plane_state_to_eba(new_state, 0); /* @@ -622,8 +627,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, */ if (ipu_state->use_pre) { axi_id = ipu_chan_assign_axi_id(ipu_plane->dma); - ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id, - ipu_src_rect_width(new_state), + ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id, width, drm_rect_height(&new_state->src) >> 16, fb->pitches[0], fb->format->format, fb->modifier, &eba); @@ -678,9 +682,8 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, break; } - ipu_dmfc_config_wait4eot(ipu_plane->dmfc, ALIGN(drm_rect_width(dst), 8)); + ipu_dmfc_config_wait4eot(ipu_plane->dmfc, width); - width = ipu_src_rect_width(new_state); height = drm_rect_height(&new_state->src) >> 16; info = drm_format_info(fb->format->format); ipu_calculate_bursts(width, info->cpp[0], fb->pitches[0], @@ -744,8 +747,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, ipu_cpmem_set_burstsize(ipu_plane->ipu_ch, 16); ipu_cpmem_zero(ipu_plane->alpha_ch); - ipu_cpmem_set_resolution(ipu_plane->alpha_ch, - ipu_src_rect_width(new_state), + ipu_cpmem_set_resolution(ipu_plane->alpha_ch, width, drm_rect_height(&new_state->src) >> 16); ipu_cpmem_set_format_passthrough(ipu_plane->alpha_ch, 8); ipu_cpmem_set_high_priority(ipu_plane->alpha_ch); diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c index d4b907889a21..cd399b0b7181 100644 --- a/drivers/gpu/drm/meson/meson_viu.c +++ b/drivers/gpu/drm/meson/meson_viu.c @@ -436,15 +436,14 @@ void meson_viu_init(struct meson_drm *priv) /* Initialize OSD1 fifo control register */ reg = VIU_OSD_DDR_PRIORITY_URGENT | - VIU_OSD_HOLD_FIFO_LINES(31) | VIU_OSD_FIFO_DEPTH_VAL(32) | /* fifo_depth_val: 32*8=256 */ VIU_OSD_WORDS_PER_BURST(4) | /* 4 words in 1 burst */ VIU_OSD_FIFO_LIMITS(2); /* fifo_lim: 2*16=32 */ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) - reg |= VIU_OSD_BURST_LENGTH_32; + reg |= (VIU_OSD_BURST_LENGTH_32 | VIU_OSD_HOLD_FIFO_LINES(31)); else - reg |= VIU_OSD_BURST_LENGTH_64; + reg |= (VIU_OSD_BURST_LENGTH_64 | VIU_OSD_HOLD_FIFO_LINES(4)); writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT)); writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT)); diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 2fa5afe21288..919e6cc04982 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -82,6 +82,7 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, struct panfrost_gem_object *bo; struct drm_panfrost_create_bo *args = data; struct panfrost_gem_mapping *mapping; + int ret; if (!args->size || args->pad || (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP))) @@ -92,21 +93,29 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, !(args->flags & PANFROST_BO_NOEXEC)) return -EINVAL; - bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags, - &args->handle); + bo = panfrost_gem_create(dev, args->size, args->flags); if (IS_ERR(bo)) return PTR_ERR(bo); + ret = drm_gem_handle_create(file, &bo->base.base, &args->handle); + if (ret) + goto out; + mapping = panfrost_gem_mapping_get(bo, priv); - if (!mapping) { - drm_gem_object_put(&bo->base.base); - return -EINVAL; + if (mapping) { + args->offset = mapping->mmnode.start << PAGE_SHIFT; + panfrost_gem_mapping_put(mapping); + } else { + /* This can only happen if the handle from + * drm_gem_handle_create() has already been guessed and freed + * by user space + */ + ret = -EINVAL; } - args->offset = mapping->mmnode.start << PAGE_SHIFT; - panfrost_gem_mapping_put(mapping); - - return 0; +out: + drm_gem_object_put(&bo->base.base); + return ret; } /** diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index 293e799e2fe8..3c812fbd126f 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -235,12 +235,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t } struct panfrost_gem_object * -panfrost_gem_create_with_handle(struct drm_file *file_priv, - struct drm_device *dev, size_t size, - u32 flags, - uint32_t *handle) +panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags) { - int ret; struct drm_gem_shmem_object *shmem; struct panfrost_gem_object *bo; @@ -256,16 +252,6 @@ panfrost_gem_create_with_handle(struct drm_file *file_priv, bo->noexec = !!(flags & PANFROST_BO_NOEXEC); bo->is_heap = !!(flags & PANFROST_BO_HEAP); - /* - * Allocate an id of idr table where the obj is registered - * and handle has the id what user can see. - */ - ret = drm_gem_handle_create(file_priv, &shmem->base, handle); - /* drop reference from allocate - handle holds it now. */ - drm_gem_object_put(&shmem->base); - if (ret) - return ERR_PTR(ret); - return bo; } diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h index 8088d5fd8480..ad2877eeeccd 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.h +++ b/drivers/gpu/drm/panfrost/panfrost_gem.h @@ -69,10 +69,7 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev, struct sg_table *sgt); struct panfrost_gem_object * -panfrost_gem_create_with_handle(struct drm_file *file_priv, - struct drm_device *dev, size_t size, - u32 flags, - uint32_t *handle); +panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags); int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv); void panfrost_gem_close(struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index fe09e5be79bd..15d04a0ec623 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -81,7 +81,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, init_completion(&entity->entity_idle); /* We start in an idle state. */ - complete(&entity->entity_idle); + complete_all(&entity->entity_idle); spin_lock_init(&entity->rq_lock); spsc_queue_init(&entity->job_queue); diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 31f3a1267be4..fd22d753b4ed 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -987,7 +987,7 @@ static int drm_sched_main(void *param) sched_job = drm_sched_entity_pop_job(entity); if (!sched_job) { - complete(&entity->entity_idle); + complete_all(&entity->entity_idle); continue; } @@ -998,7 +998,7 @@ static int drm_sched_main(void *param) trace_drm_run_job(sched_job, entity); fence = sched->ops->run_job(sched_job); - complete(&entity->entity_idle); + complete_all(&entity->entity_idle); drm_sched_fence_scheduled(s_fence); if (!IS_ERR_OR_NULL(fence)) { diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile index b29ef1085cad..f896ef85c2f2 100644 --- a/drivers/gpu/drm/tests/Makefile +++ b/drivers/gpu/drm/tests/Makefile @@ -12,3 +12,5 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \ drm_mm_test.o \ drm_plane_helper_test.o \ drm_rect_test.o + +CFLAGS_drm_mm_test.o := $(DISABLE_STRUCTLEAK_PLUGIN) diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c index 89f12d3b4a21..186b28dc7038 100644 --- a/drivers/gpu/drm/tests/drm_mm_test.c +++ b/drivers/gpu/drm/tests/drm_mm_test.c @@ -298,9 +298,9 @@ static bool expect_reserve_fail(struct kunit *test, struct drm_mm *mm, struct dr return false; } -static bool check_reserve_boundaries(struct kunit *test, struct drm_mm *mm, - unsigned int count, - u64 size) +static bool noinline_for_stack check_reserve_boundaries(struct kunit *test, struct drm_mm *mm, + unsigned int count, + u64 size) { const struct boundary { u64 start, size; diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 8d7728181de0..c7e74cf13022 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -184,7 +184,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, struct virtio_gpu_object_array *objs = NULL; struct drm_gem_shmem_object *shmem_obj; struct virtio_gpu_object *bo; - struct virtio_gpu_mem_entry *ents; + struct virtio_gpu_mem_entry *ents = NULL; unsigned int nents; int ret; @@ -210,7 +210,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, ret = -ENOMEM; objs = virtio_gpu_array_alloc(1); if (!objs) - goto err_put_id; + goto err_free_entry; virtio_gpu_array_add_obj(objs, &bo->base.base); ret = virtio_gpu_array_lock_resv(objs); @@ -239,6 +239,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, err_put_objs: virtio_gpu_array_put_free(objs); +err_free_entry: + kvfree(ents); err_put_id: virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); err_free_gem: diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c index 945758f39523..3e1272695d99 100644 --- a/drivers/infiniband/hw/mlx5/counters.c +++ b/drivers/infiniband/hw/mlx5/counters.c @@ -278,7 +278,6 @@ static int do_get_hw_stats(struct ib_device *ibdev, const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1); struct mlx5_core_dev *mdev; int ret, num_counters; - u32 mdev_port_num; if (!stats) return -EINVAL; @@ -299,8 +298,9 @@ static int do_get_hw_stats(struct ib_device *ibdev, } if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { - mdev = mlx5_ib_get_native_port_mdev(dev, port_num, - &mdev_port_num); + if (!port_num) + port_num = 1; + mdev = mlx5_ib_get_native_port_mdev(dev, port_num, NULL); if (!mdev) { /* If port is not affiliated yet, its in down state * which doesn't have any counters yet, so it would be diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 40d9410ec303..cf953d23d18d 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -4502,6 +4502,40 @@ static bool mlx5_ib_modify_qp_allowed(struct mlx5_ib_dev *dev, return false; } +static int validate_rd_atomic(struct mlx5_ib_dev *dev, struct ib_qp_attr *attr, + int attr_mask, enum ib_qp_type qp_type) +{ + int log_max_ra_res; + int log_max_ra_req; + + if (qp_type == MLX5_IB_QPT_DCI) { + log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev, + log_max_ra_res_dc); + log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev, + log_max_ra_req_dc); + } else { + log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev, + log_max_ra_res_qp); + log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev, + log_max_ra_req_qp); + } + + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && + attr->max_rd_atomic > log_max_ra_res) { + mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n", + attr->max_rd_atomic); + return false; + } + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && + attr->max_dest_rd_atomic > log_max_ra_req) { + mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n", + attr->max_dest_rd_atomic); + return false; + } + return true; +} + int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { @@ -4589,21 +4623,8 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, goto out; } - if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && - attr->max_rd_atomic > - (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) { - mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n", - attr->max_rd_atomic); - goto out; - } - - if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && - attr->max_dest_rd_atomic > - (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) { - mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n", - attr->max_dest_rd_atomic); + if (!validate_rd_atomic(dev, attr, attr_mask, qp_type)) goto out; - } if (cur_state == new_state && cur_state == IB_QPS_RESET) { err = 0; diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index 00b0068fda20..5d94db453df3 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h @@ -62,9 +62,6 @@ enum { SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE, - SRP_TAG_NO_REQ = ~0U, - SRP_TAG_TSK_MGMT = 1U << 31, - SRP_MAX_PAGES_PER_MR = 512, SRP_MAX_ADD_CDB_LEN = 16, @@ -79,6 +76,11 @@ enum { sizeof(struct srp_imm_buf), }; +enum { + SRP_TAG_NO_REQ = ~0U, + SRP_TAG_TSK_MGMT = BIT(31), +}; + enum srp_target_state { SRP_TARGET_SCANNING, SRP_TARGET_LIVE, diff --git a/drivers/input/keyboard/mtk-pmic-keys.c b/drivers/input/keyboard/mtk-pmic-keys.c index 3d4ffa25e3df..4364c3401ff1 100644 --- a/drivers/input/keyboard/mtk-pmic-keys.c +++ b/drivers/input/keyboard/mtk-pmic-keys.c @@ -10,6 +10,7 @@ #include <linux/kernel.h> #include <linux/mfd/mt6323/registers.h> #include <linux/mfd/mt6331/registers.h> +#include <linux/mfd/mt6357/registers.h> #include <linux/mfd/mt6358/registers.h> #include <linux/mfd/mt6397/core.h> #include <linux/mfd/mt6397/registers.h> @@ -90,6 +91,19 @@ static const struct mtk_pmic_regs mt6331_regs = { .rst_lprst_mask = MTK_PMIC_MT6331_RST_DU_MASK, }; +static const struct mtk_pmic_regs mt6357_regs = { + .keys_regs[MTK_PMIC_PWRKEY_INDEX] = + MTK_PMIC_KEYS_REGS(MT6357_TOPSTATUS, + 0x2, MT6357_PSC_TOP_INT_CON0, 0x5, + MTK_PMIC_PWRKEY_RST), + .keys_regs[MTK_PMIC_HOMEKEY_INDEX] = + MTK_PMIC_KEYS_REGS(MT6357_TOPSTATUS, + 0x8, MT6357_PSC_TOP_INT_CON0, 0xa, + MTK_PMIC_HOMEKEY_INDEX), + .pmic_rst_reg = MT6357_TOP_RST_MISC, + .rst_lprst_mask = MTK_PMIC_RST_DU_MASK, +}; + static const struct mtk_pmic_regs mt6358_regs = { .keys_regs[MTK_PMIC_PWRKEY_INDEX] = MTK_PMIC_KEYS_REGS(MT6358_TOPSTATUS, @@ -277,6 +291,9 @@ static const struct of_device_id of_mtk_pmic_keys_match_tbl[] = { .compatible = "mediatek,mt6331-keys", .data = &mt6331_regs, }, { + .compatible = "mediatek,mt6357-keys", + .data = &mt6357_regs, + }, { .compatible = "mediatek,mt6358-keys", .data = &mt6358_regs, }, { diff --git a/drivers/md/dm.c b/drivers/md/dm.c index e1ea3a7bd9d9..b424a6ee27ba 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1742,6 +1742,8 @@ static void dm_split_and_process_bio(struct mapped_device *md, * otherwise associated queue_limits won't be imposed. */ bio = bio_split_to_limits(bio); + if (!bio) + return; } init_clone_info(&ci, md, map, bio, is_abnormal); diff --git a/drivers/md/md.c b/drivers/md/md.c index 775f1dde190a..8af639296b3c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -455,6 +455,8 @@ static void md_submit_bio(struct bio *bio) } bio = bio_split_to_limits(bio); + if (!bio) + return; if (mddev->ro == MD_RDONLY && unlikely(rw == WRITE)) { if (bio_sectors(bio) != 0) diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c index 09cd4318a83d..62b06041d758 100644 --- a/drivers/memory/renesas-rpc-if.c +++ b/drivers/memory/renesas-rpc-if.c @@ -163,14 +163,36 @@ static const struct regmap_access_table rpcif_volatile_table = { .n_yes_ranges = ARRAY_SIZE(rpcif_volatile_ranges), }; +struct rpcif_priv { + struct device *dev; + void __iomem *base; + void __iomem *dirmap; + struct regmap *regmap; + struct reset_control *rstc; + struct platform_device *vdev; + size_t size; + enum rpcif_type type; + enum rpcif_data_dir dir; + u8 bus_size; + u8 xfer_size; + void *buffer; + u32 xferlen; + u32 smcr; + u32 smadr; + u32 command; /* DRCMR or SMCMR */ + u32 option; /* DROPR or SMOPR */ + u32 enable; /* DRENR or SMENR */ + u32 dummy; /* DRDMCR or SMDMCR */ + u32 ddr; /* DRDRENR or SMDRENR */ +}; /* * Custom accessor functions to ensure SM[RW]DR[01] are always accessed with - * proper width. Requires rpcif.xfer_size to be correctly set before! + * proper width. Requires rpcif_priv.xfer_size to be correctly set before! */ static int rpcif_reg_read(void *context, unsigned int reg, unsigned int *val) { - struct rpcif *rpc = context; + struct rpcif_priv *rpc = context; switch (reg) { case RPCIF_SMRDR0: @@ -206,7 +228,7 @@ static int rpcif_reg_read(void *context, unsigned int reg, unsigned int *val) static int rpcif_reg_write(void *context, unsigned int reg, unsigned int val) { - struct rpcif *rpc = context; + struct rpcif_priv *rpc = context; switch (reg) { case RPCIF_SMWDR0: @@ -253,39 +275,18 @@ static const struct regmap_config rpcif_regmap_config = { .volatile_table = &rpcif_volatile_table, }; -int rpcif_sw_init(struct rpcif *rpc, struct device *dev) +int rpcif_sw_init(struct rpcif *rpcif, struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct resource *res; + struct rpcif_priv *rpc = dev_get_drvdata(dev); - rpc->dev = dev; - - rpc->base = devm_platform_ioremap_resource_byname(pdev, "regs"); - if (IS_ERR(rpc->base)) - return PTR_ERR(rpc->base); - - rpc->regmap = devm_regmap_init(&pdev->dev, NULL, rpc, &rpcif_regmap_config); - if (IS_ERR(rpc->regmap)) { - dev_err(&pdev->dev, - "failed to init regmap for rpcif, error %ld\n", - PTR_ERR(rpc->regmap)); - return PTR_ERR(rpc->regmap); - } - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap"); - rpc->dirmap = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(rpc->dirmap)) - return PTR_ERR(rpc->dirmap); - rpc->size = resource_size(res); - - rpc->type = (uintptr_t)of_device_get_match_data(dev); - rpc->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); - - return PTR_ERR_OR_ZERO(rpc->rstc); + rpcif->dev = dev; + rpcif->dirmap = rpc->dirmap; + rpcif->size = rpc->size; + return 0; } EXPORT_SYMBOL(rpcif_sw_init); -static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif *rpc) +static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif_priv *rpc) { regmap_write(rpc->regmap, RPCIF_PHYWR, 0xa5390000); regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000000); @@ -299,15 +300,17 @@ static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif *rpc) regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000032); } -int rpcif_hw_init(struct rpcif *rpc, bool hyperflash) +int rpcif_hw_init(struct device *dev, bool hyperflash) { + struct rpcif_priv *rpc = dev_get_drvdata(dev); u32 dummy; + int ret; - pm_runtime_get_sync(rpc->dev); + ret = pm_runtime_resume_and_get(dev); + if (ret) + return ret; if (rpc->type == RPCIF_RZ_G2L) { - int ret; - ret = reset_control_reset(rpc->rstc); if (ret) return ret; @@ -356,7 +359,7 @@ int rpcif_hw_init(struct rpcif *rpc, bool hyperflash) regmap_write(rpc->regmap, RPCIF_SSLDR, RPCIF_SSLDR_SPNDL(7) | RPCIF_SSLDR_SLNDL(7) | RPCIF_SSLDR_SCKDL(7)); - pm_runtime_put(rpc->dev); + pm_runtime_put(dev); rpc->bus_size = hyperflash ? 2 : 1; @@ -364,7 +367,7 @@ int rpcif_hw_init(struct rpcif *rpc, bool hyperflash) } EXPORT_SYMBOL(rpcif_hw_init); -static int wait_msg_xfer_end(struct rpcif *rpc) +static int wait_msg_xfer_end(struct rpcif_priv *rpc) { u32 sts; @@ -373,7 +376,7 @@ static int wait_msg_xfer_end(struct rpcif *rpc) USEC_PER_SEC); } -static u8 rpcif_bits_set(struct rpcif *rpc, u32 nbytes) +static u8 rpcif_bits_set(struct rpcif_priv *rpc, u32 nbytes) { if (rpc->bus_size == 2) nbytes /= 2; @@ -386,9 +389,11 @@ static u8 rpcif_bit_size(u8 buswidth) return buswidth > 4 ? 2 : ilog2(buswidth); } -void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs, +void rpcif_prepare(struct device *dev, const struct rpcif_op *op, u64 *offs, size_t *len) { + struct rpcif_priv *rpc = dev_get_drvdata(dev); + rpc->smcr = 0; rpc->smadr = 0; rpc->enable = 0; @@ -472,12 +477,15 @@ void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs, } EXPORT_SYMBOL(rpcif_prepare); -int rpcif_manual_xfer(struct rpcif *rpc) +int rpcif_manual_xfer(struct device *dev) { + struct rpcif_priv *rpc = dev_get_drvdata(dev); u32 smenr, smcr, pos = 0, max = rpc->bus_size == 2 ? 8 : 4; int ret = 0; - pm_runtime_get_sync(rpc->dev); + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + return ret; regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_CAL, RPCIF_PHYCNT_CAL); @@ -587,13 +595,13 @@ int rpcif_manual_xfer(struct rpcif *rpc) } exit: - pm_runtime_put(rpc->dev); + pm_runtime_put(dev); return ret; err_out: if (reset_control_reset(rpc->rstc)) - dev_err(rpc->dev, "Failed to reset HW\n"); - rpcif_hw_init(rpc, rpc->bus_size == 2); + dev_err(dev, "Failed to reset HW\n"); + rpcif_hw_init(dev, rpc->bus_size == 2); goto exit; } EXPORT_SYMBOL(rpcif_manual_xfer); @@ -640,15 +648,19 @@ static void memcpy_fromio_readw(void *to, } } -ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf) +ssize_t rpcif_dirmap_read(struct device *dev, u64 offs, size_t len, void *buf) { + struct rpcif_priv *rpc = dev_get_drvdata(dev); loff_t from = offs & (rpc->size - 1); size_t size = rpc->size - from; + int ret; if (len > size) len = size; - pm_runtime_get_sync(rpc->dev); + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + return ret; regmap_update_bits(rpc->regmap, RPCIF_CMNCR, RPCIF_CMNCR_MD, 0); regmap_write(rpc->regmap, RPCIF_DRCR, 0); @@ -666,7 +678,7 @@ ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf) else memcpy_fromio(buf, rpc->dirmap + from, len); - pm_runtime_put(rpc->dev); + pm_runtime_put(dev); return len; } @@ -674,14 +686,17 @@ EXPORT_SYMBOL(rpcif_dirmap_read); static int rpcif_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct platform_device *vdev; struct device_node *flash; + struct rpcif_priv *rpc; + struct resource *res; const char *name; int ret; - flash = of_get_next_child(pdev->dev.of_node, NULL); + flash = of_get_next_child(dev->of_node, NULL); if (!flash) { - dev_warn(&pdev->dev, "no flash node found\n"); + dev_warn(dev, "no flash node found\n"); return -ENODEV; } @@ -691,16 +706,45 @@ static int rpcif_probe(struct platform_device *pdev) name = "rpc-if-hyperflash"; } else { of_node_put(flash); - dev_warn(&pdev->dev, "unknown flash type\n"); + dev_warn(dev, "unknown flash type\n"); return -ENODEV; } of_node_put(flash); + rpc = devm_kzalloc(dev, sizeof(*rpc), GFP_KERNEL); + if (!rpc) + return -ENOMEM; + + rpc->base = devm_platform_ioremap_resource_byname(pdev, "regs"); + if (IS_ERR(rpc->base)) + return PTR_ERR(rpc->base); + + rpc->regmap = devm_regmap_init(dev, NULL, rpc, &rpcif_regmap_config); + if (IS_ERR(rpc->regmap)) { + dev_err(dev, "failed to init regmap for rpcif, error %ld\n", + PTR_ERR(rpc->regmap)); + return PTR_ERR(rpc->regmap); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap"); + rpc->dirmap = devm_ioremap_resource(dev, res); + if (IS_ERR(rpc->dirmap)) + return PTR_ERR(rpc->dirmap); + rpc->size = resource_size(res); + + rpc->type = (uintptr_t)of_device_get_match_data(dev); + rpc->rstc = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(rpc->rstc)) + return PTR_ERR(rpc->rstc); + vdev = platform_device_alloc(name, pdev->id); if (!vdev) return -ENOMEM; - vdev->dev.parent = &pdev->dev; - platform_set_drvdata(pdev, vdev); + vdev->dev.parent = dev; + + rpc->dev = dev; + rpc->vdev = vdev; + platform_set_drvdata(pdev, rpc); ret = platform_device_add(vdev); if (ret) { @@ -713,9 +757,9 @@ static int rpcif_probe(struct platform_device *pdev) static int rpcif_remove(struct platform_device *pdev) { - struct platform_device *vdev = platform_get_drvdata(pdev); + struct rpcif_priv *rpc = platform_get_drvdata(pdev); - platform_device_unregister(vdev); + platform_device_unregister(rpc->vdev); return 0; } diff --git a/drivers/memory/ti-emif-pm.c b/drivers/memory/ti-emif-pm.c index 31d6266f008c..cef0d3beb63b 100644 --- a/drivers/memory/ti-emif-pm.c +++ b/drivers/memory/ti-emif-pm.c @@ -277,18 +277,13 @@ static int ti_emif_probe(struct platform_device *pdev) int ret; struct resource *res; struct device *dev = &pdev->dev; - const struct of_device_id *match; struct ti_emif_data *emif_data; emif_data = devm_kzalloc(dev, sizeof(*emif_data), GFP_KERNEL); if (!emif_data) return -ENOMEM; - match = of_match_device(ti_emif_of_match, &pdev->dev); - if (!match) - return -ENODEV; - - emif_data->pm_data.ti_emif_sram_config = (unsigned long)match->data; + emif_data->pm_data.ti_emif_sram_config = (unsigned long) device_get_match_data(&pdev->dev); emif_data->pm_data.ti_emif_base_addr_virt = devm_platform_get_and_ioremap_resource(pdev, 0, diff --git a/drivers/mtd/hyperbus/rpc-if.c b/drivers/mtd/hyperbus/rpc-if.c index d00d30243403..ef32fca5f785 100644 --- a/drivers/mtd/hyperbus/rpc-if.c +++ b/drivers/mtd/hyperbus/rpc-if.c @@ -56,7 +56,7 @@ static void rpcif_hb_prepare_read(struct rpcif *rpc, void *to, op.data.nbytes = len; op.data.buf.in = to; - rpcif_prepare(rpc, &op, NULL, NULL); + rpcif_prepare(rpc->dev, &op, NULL, NULL); } static void rpcif_hb_prepare_write(struct rpcif *rpc, unsigned long to, @@ -70,7 +70,7 @@ static void rpcif_hb_prepare_write(struct rpcif *rpc, unsigned long to, op.data.nbytes = len; op.data.buf.out = from; - rpcif_prepare(rpc, &op, NULL, NULL); + rpcif_prepare(rpc->dev, &op, NULL, NULL); } static u16 rpcif_hb_read16(struct hyperbus_device *hbdev, unsigned long addr) @@ -81,7 +81,7 @@ static u16 rpcif_hb_read16(struct hyperbus_device *hbdev, unsigned long addr) rpcif_hb_prepare_read(&hyperbus->rpc, &data, addr, 2); - rpcif_manual_xfer(&hyperbus->rpc); + rpcif_manual_xfer(hyperbus->rpc.dev); return data.x[0]; } @@ -94,7 +94,7 @@ static void rpcif_hb_write16(struct hyperbus_device *hbdev, unsigned long addr, rpcif_hb_prepare_write(&hyperbus->rpc, addr, &data, 2); - rpcif_manual_xfer(&hyperbus->rpc); + rpcif_manual_xfer(hyperbus->rpc.dev); } static void rpcif_hb_copy_from(struct hyperbus_device *hbdev, void *to, @@ -105,7 +105,7 @@ static void rpcif_hb_copy_from(struct hyperbus_device *hbdev, void *to, rpcif_hb_prepare_read(&hyperbus->rpc, to, from, len); - rpcif_dirmap_read(&hyperbus->rpc, from, len, to); + rpcif_dirmap_read(hyperbus->rpc.dev, from, len, to); } static const struct hyperbus_ops rpcif_hb_ops = { @@ -130,9 +130,9 @@ static int rpcif_hb_probe(struct platform_device *pdev) platform_set_drvdata(pdev, hyperbus); - rpcif_enable_rpm(&hyperbus->rpc); + pm_runtime_enable(hyperbus->rpc.dev); - error = rpcif_hw_init(&hyperbus->rpc, true); + error = rpcif_hw_init(hyperbus->rpc.dev, true); if (error) goto out_disable_rpm; @@ -150,7 +150,7 @@ static int rpcif_hb_probe(struct platform_device *pdev) return 0; out_disable_rpm: - rpcif_disable_rpm(&hyperbus->rpc); + pm_runtime_disable(hyperbus->rpc.dev); return error; } @@ -160,7 +160,7 @@ static int rpcif_hb_remove(struct platform_device *pdev) hyperbus_unregister_device(&hyperbus->hbdev); - rpcif_disable_rpm(&hyperbus->rpc); + pm_runtime_disable(hyperbus->rpc.dev); return 0; } diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 455b555275f1..c99ffe6c683a 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -1549,6 +1549,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr) slave_err(bond->dev, port->slave->dev, "Port %d did not find a suitable aggregator\n", port->actor_port_number); + return; } } /* if all aggregator's ports are READY_N == TRUE, set ready=TRUE diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b4c65783960a..0363ce597661 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2654,10 +2654,12 @@ static void bond_miimon_link_change(struct bonding *bond, static void bond_miimon_commit(struct bonding *bond) { - struct slave *slave, *primary; + struct slave *slave, *primary, *active; bool do_failover = false; struct list_head *iter; + ASSERT_RTNL(); + bond_for_each_slave(bond, slave, iter) { switch (slave->link_new_state) { case BOND_LINK_NOCHANGE: @@ -2700,8 +2702,8 @@ static void bond_miimon_commit(struct bonding *bond) bond_miimon_link_change(bond, slave, BOND_LINK_UP); - if (!rcu_access_pointer(bond->curr_active_slave) || slave == primary || - slave->prio > rcu_dereference(bond->curr_active_slave)->prio) + active = rtnl_dereference(bond->curr_active_slave); + if (!active || slave == primary || slave->prio > active->prio) do_failover = true; continue; diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig index 7a2445a34eb7..e3181d5471df 100644 --- a/drivers/net/dsa/mv88e6xxx/Kconfig +++ b/drivers/net/dsa/mv88e6xxx/Kconfig @@ -2,7 +2,6 @@ config NET_DSA_MV88E6XXX tristate "Marvell 88E6xxx Ethernet switch fabric support" depends on NET_DSA - depends on PTP_1588_CLOCK_OPTIONAL select IRQ_DOMAIN select NET_DSA_TAG_EDSA select NET_DSA_TAG_DSA @@ -13,7 +12,8 @@ config NET_DSA_MV88E6XXX config NET_DSA_MV88E6XXX_PTP bool "PTP support for Marvell 88E6xxx" default n - depends on NET_DSA_MV88E6XXX && PTP_1588_CLOCK + depends on (NET_DSA_MV88E6XXX = y && PTP_1588_CLOCK = y) || \ + (NET_DSA_MV88E6XXX = m && PTP_1588_CLOCK) help Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch chips that support it. diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c index c5c3b4e92f28..2f224b166bbb 100644 --- a/drivers/net/dsa/qca/qca8k-8xxx.c +++ b/drivers/net/dsa/qca/qca8k-8xxx.c @@ -37,77 +37,104 @@ qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page) } static int -qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo) +qca8k_mii_write_lo(struct mii_bus *bus, int phy_id, u32 regnum, u32 val) { - u16 *cached_lo = &priv->mdio_cache.lo; - struct mii_bus *bus = priv->bus; int ret; + u16 lo; - if (lo == *cached_lo) - return 0; - + lo = val & 0xffff; ret = bus->write(bus, phy_id, regnum, lo); if (ret < 0) dev_err_ratelimited(&bus->dev, "failed to write qca8k 32bit lo register\n"); - *cached_lo = lo; - return 0; + return ret; } static int -qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi) +qca8k_mii_write_hi(struct mii_bus *bus, int phy_id, u32 regnum, u32 val) { - u16 *cached_hi = &priv->mdio_cache.hi; - struct mii_bus *bus = priv->bus; int ret; + u16 hi; - if (hi == *cached_hi) - return 0; - + hi = (u16)(val >> 16); ret = bus->write(bus, phy_id, regnum, hi); if (ret < 0) dev_err_ratelimited(&bus->dev, "failed to write qca8k 32bit hi register\n"); - *cached_hi = hi; - return 0; + return ret; } static int -qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val) +qca8k_mii_read_lo(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val) { int ret; ret = bus->read(bus, phy_id, regnum); - if (ret >= 0) { - *val = ret; - ret = bus->read(bus, phy_id, regnum + 1); - *val |= ret << 16; - } + if (ret < 0) + goto err; - if (ret < 0) { - dev_err_ratelimited(&bus->dev, - "failed to read qca8k 32bit register\n"); - *val = 0; - return ret; - } + *val = ret & 0xffff; + return 0; + +err: + dev_err_ratelimited(&bus->dev, + "failed to read qca8k 32bit lo register\n"); + *val = 0; + + return ret; +} +static int +qca8k_mii_read_hi(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val) +{ + int ret; + + ret = bus->read(bus, phy_id, regnum); + if (ret < 0) + goto err; + + *val = ret << 16; return 0; + +err: + dev_err_ratelimited(&bus->dev, + "failed to read qca8k 32bit hi register\n"); + *val = 0; + + return ret; } -static void -qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val) +static int +qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val) { - u16 lo, hi; + u32 hi, lo; int ret; - lo = val & 0xffff; - hi = (u16)(val >> 16); + *val = 0; - ret = qca8k_set_lo(priv, phy_id, regnum, lo); - if (ret >= 0) - ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi); + ret = qca8k_mii_read_lo(bus, phy_id, regnum, &lo); + if (ret < 0) + goto err; + + ret = qca8k_mii_read_hi(bus, phy_id, regnum + 1, &hi); + if (ret < 0) + goto err; + + *val = lo | hi; + +err: + return ret; +} + +static void +qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val) +{ + if (qca8k_mii_write_lo(bus, phy_id, regnum, val) < 0) + return; + + qca8k_mii_write_hi(bus, phy_id, regnum + 1, val); } static int @@ -146,7 +173,16 @@ static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb) command = get_unaligned_le32(&mgmt_ethhdr->command); cmd = FIELD_GET(QCA_HDR_MGMT_CMD, command); + len = FIELD_GET(QCA_HDR_MGMT_LENGTH, command); + /* Special case for len of 15 as this is the max value for len and needs to + * be increased before converting it from word to dword. + */ + if (len == 15) + len++; + + /* We can ignore odd value, we always round up them in the alloc function. */ + len *= sizeof(u16); /* Make sure the seq match the requested packet */ if (get_unaligned_le32(&mgmt_ethhdr->seq) == mgmt_eth_data->seq) @@ -193,17 +229,33 @@ static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 * if (!skb) return NULL; - /* Max value for len reg is 15 (0xf) but the switch actually return 16 byte - * Actually for some reason the steps are: - * 0: nothing - * 1-4: first 4 byte - * 5-6: first 12 byte - * 7-15: all 16 byte + /* Hdr mgmt length value is in step of word size. + * As an example to process 4 byte of data the correct length to set is 2. + * To process 8 byte 4, 12 byte 6, 16 byte 8... + * + * Odd values will always return the next size on the ack packet. + * (length of 3 (6 byte) will always return 8 bytes of data) + * + * This means that a value of 15 (0xf) actually means reading/writing 32 bytes + * of data. + * + * To correctly calculate the length we devide the requested len by word and + * round up. + * On the ack function we can skip the odd check as we already handle the + * case here. */ - if (len == 16) - real_len = 15; - else - real_len = len; + real_len = DIV_ROUND_UP(len, sizeof(u16)); + + /* We check if the result len is odd and we round up another time to + * the next size. (length of 3 will be increased to 4 as switch will always + * return 8 bytes) + */ + if (real_len % sizeof(u16) != 0) + real_len++; + + /* Max reg value is 0xf(15) but switch will always return the next size (32 byte) */ + if (real_len == 16) + real_len--; skb_reset_mac_header(skb); skb_set_network_header(skb, skb->len); @@ -417,7 +469,7 @@ qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val) if (ret < 0) goto exit; - qca8k_mii_write32(priv, 0x10 | r2, r1, val); + qca8k_mii_write32(bus, 0x10 | r2, r1, val); exit: mutex_unlock(&bus->mdio_lock); @@ -450,7 +502,7 @@ qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_ val &= ~mask; val |= write_val; - qca8k_mii_write32(priv, 0x10 | r2, r1, val); + qca8k_mii_write32(bus, 0x10 | r2, r1, val); exit: mutex_unlock(&bus->mdio_lock); @@ -688,9 +740,9 @@ qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask) qca8k_split_addr(reg, &r1, &r2, &page); - ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0, + ret = read_poll_timeout(qca8k_mii_read_hi, ret1, !(val & mask), 0, QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false, - bus, 0x10 | r2, r1, &val); + bus, 0x10 | r2, r1 + 1, &val); /* Check if qca8k_read has failed for a different reason * before returnting -ETIMEDOUT @@ -725,14 +777,14 @@ qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data) if (ret) goto exit; - qca8k_mii_write32(priv, 0x10 | r2, r1, val); + qca8k_mii_write32(bus, 0x10 | r2, r1, val); ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL, QCA8K_MDIO_MASTER_BUSY); exit: /* even if the busy_wait timeouts try to clear the MASTER_EN */ - qca8k_mii_write32(priv, 0x10 | r2, r1, 0); + qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, 0); mutex_unlock(&bus->mdio_lock); @@ -762,18 +814,18 @@ qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum) if (ret) goto exit; - qca8k_mii_write32(priv, 0x10 | r2, r1, val); + qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, val); ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL, QCA8K_MDIO_MASTER_BUSY); if (ret) goto exit; - ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val); + ret = qca8k_mii_read_lo(bus, 0x10 | r2, r1, &val); exit: /* even if the busy_wait timeouts try to clear the MASTER_EN */ - qca8k_mii_write32(priv, 0x10 | r2, r1, 0); + qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, 0); mutex_unlock(&bus->mdio_lock); @@ -1943,8 +1995,6 @@ qca8k_sw_probe(struct mdio_device *mdiodev) } priv->mdio_cache.page = 0xffff; - priv->mdio_cache.lo = 0xffff; - priv->mdio_cache.hi = 0xffff; /* Check the detected switch id */ ret = qca8k_read_switch_id(priv); diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h index 0b7a5cb12321..03514f7a20be 100644 --- a/drivers/net/dsa/qca/qca8k.h +++ b/drivers/net/dsa/qca/qca8k.h @@ -375,11 +375,6 @@ struct qca8k_mdio_cache { * mdio writes */ u16 page; -/* lo and hi can also be cached and from Documentation we can skip one - * extra mdio write if lo or hi is didn't change. - */ - u16 lo; - u16 hi; }; struct qca8k_pcs { diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 8c8b4c88c7de..451c3a1b6255 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -2400,29 +2400,18 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, return -EOPNOTSUPP; } - switch (func) { - case ENA_ADMIN_TOEPLITZ: - if (key) { - if (key_len != sizeof(hash_key->key)) { - netdev_err(ena_dev->net_device, - "key len (%u) doesn't equal the supported size (%zu)\n", - key_len, sizeof(hash_key->key)); - return -EINVAL; - } - memcpy(hash_key->key, key, key_len); - rss->hash_init_val = init_val; - hash_key->key_parts = key_len / sizeof(hash_key->key[0]); + if ((func == ENA_ADMIN_TOEPLITZ) && key) { + if (key_len != sizeof(hash_key->key)) { + netdev_err(ena_dev->net_device, + "key len (%u) doesn't equal the supported size (%zu)\n", + key_len, sizeof(hash_key->key)); + return -EINVAL; } - break; - case ENA_ADMIN_CRC32: - rss->hash_init_val = init_val; - break; - default: - netdev_err(ena_dev->net_device, "Invalid hash function (%d)\n", - func); - return -EINVAL; + memcpy(hash_key->key, key, key_len); + hash_key->key_parts = key_len / sizeof(hash_key->key[0]); } + rss->hash_init_val = init_val; old_func = rss->hash_func; rss->hash_func = func; rc = ena_com_set_hash_function(ena_dev); diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 48ae6d810f8f..8da79eedc057 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -887,11 +887,7 @@ static int ena_set_tunable(struct net_device *netdev, switch (tuna->id) { case ETHTOOL_RX_COPYBREAK: len = *(u32 *)data; - if (len > adapter->netdev->mtu) { - ret = -EINVAL; - break; - } - adapter->rx_copybreak = len; + ret = ena_set_rx_copybreak(adapter, len); break; default: ret = -EINVAL; diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index a95529a69cbb..e8ad5ea31aff 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -374,9 +374,9 @@ static int ena_xdp_xmit(struct net_device *dev, int n, static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) { + u32 verdict = ENA_XDP_PASS; struct bpf_prog *xdp_prog; struct ena_ring *xdp_ring; - u32 verdict = XDP_PASS; struct xdp_frame *xdpf; u64 *xdp_stat; @@ -393,7 +393,7 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) if (unlikely(!xdpf)) { trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); xdp_stat = &rx_ring->rx_stats.xdp_aborted; - verdict = XDP_ABORTED; + verdict = ENA_XDP_DROP; break; } @@ -409,29 +409,35 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) spin_unlock(&xdp_ring->xdp_tx_lock); xdp_stat = &rx_ring->rx_stats.xdp_tx; + verdict = ENA_XDP_TX; break; case XDP_REDIRECT: if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) { xdp_stat = &rx_ring->rx_stats.xdp_redirect; + verdict = ENA_XDP_REDIRECT; break; } trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); xdp_stat = &rx_ring->rx_stats.xdp_aborted; - verdict = XDP_ABORTED; + verdict = ENA_XDP_DROP; break; case XDP_ABORTED: trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); xdp_stat = &rx_ring->rx_stats.xdp_aborted; + verdict = ENA_XDP_DROP; break; case XDP_DROP: xdp_stat = &rx_ring->rx_stats.xdp_drop; + verdict = ENA_XDP_DROP; break; case XDP_PASS: xdp_stat = &rx_ring->rx_stats.xdp_pass; + verdict = ENA_XDP_PASS; break; default: bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict); xdp_stat = &rx_ring->rx_stats.xdp_invalid; + verdict = ENA_XDP_DROP; } ena_increase_stat(xdp_stat, 1, &rx_ring->syncp); @@ -512,16 +518,18 @@ static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter, struct bpf_prog *prog, int first, int count) { + struct bpf_prog *old_bpf_prog; struct ena_ring *rx_ring; int i = 0; for (i = first; i < count; i++) { rx_ring = &adapter->rx_ring[i]; - xchg(&rx_ring->xdp_bpf_prog, prog); - if (prog) { + old_bpf_prog = xchg(&rx_ring->xdp_bpf_prog, prog); + + if (!old_bpf_prog && prog) { ena_xdp_register_rxq_info(rx_ring); rx_ring->rx_headroom = XDP_PACKET_HEADROOM; - } else { + } else if (old_bpf_prog && !prog) { ena_xdp_unregister_rxq_info(rx_ring); rx_ring->rx_headroom = NET_SKB_PAD; } @@ -672,6 +680,7 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter, ring->ena_dev = adapter->ena_dev; ring->per_napi_packets = 0; ring->cpu = 0; + ring->numa_node = 0; ring->no_interrupt_event_cnt = 0; u64_stats_init(&ring->syncp); } @@ -775,6 +784,7 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid) tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; tx_ring->cpu = ena_irq->cpu; + tx_ring->numa_node = node; return 0; err_push_buf_intermediate_buf: @@ -907,6 +917,7 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter, rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; rx_ring->cpu = ena_irq->cpu; + rx_ring->numa_node = node; return 0; } @@ -1619,12 +1630,12 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp) * we expect, then we simply drop it */ if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU)) - return XDP_DROP; + return ENA_XDP_DROP; ret = ena_xdp_execute(rx_ring, xdp); /* The xdp program might expand the headers */ - if (ret == XDP_PASS) { + if (ret == ENA_XDP_PASS) { rx_info->page_offset = xdp->data - xdp->data_hard_start; rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data; } @@ -1663,7 +1674,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq); do { - xdp_verdict = XDP_PASS; + xdp_verdict = ENA_XDP_PASS; skb = NULL; ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; ena_rx_ctx.max_bufs = rx_ring->sgl_size; @@ -1691,7 +1702,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp); /* allocate skb and fill it */ - if (xdp_verdict == XDP_PASS) + if (xdp_verdict == ENA_XDP_PASS) skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs, @@ -1709,14 +1720,15 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, /* Packets was passed for transmission, unmap it * from RX side. */ - if (xdp_verdict == XDP_TX || xdp_verdict == XDP_REDIRECT) { + if (xdp_verdict & ENA_XDP_FORWARDED) { ena_unmap_rx_buff(rx_ring, &rx_ring->rx_buffer_info[req_id]); rx_ring->rx_buffer_info[req_id].page = NULL; } } - if (xdp_verdict != XDP_PASS) { + if (xdp_verdict != ENA_XDP_PASS) { xdp_flags |= xdp_verdict; + total_len += ena_rx_ctx.ena_bufs[0].len; res_budget--; continue; } @@ -1760,7 +1772,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, ena_refill_rx_bufs(rx_ring, refill_required); } - if (xdp_flags & XDP_REDIRECT) + if (xdp_flags & ENA_XDP_REDIRECT) xdp_do_flush_map(); return work_done; @@ -1814,8 +1826,9 @@ static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi) static void ena_unmask_interrupt(struct ena_ring *tx_ring, struct ena_ring *rx_ring) { + u32 rx_interval = tx_ring->smoothed_interval; struct ena_eth_io_intr_reg intr_reg; - u32 rx_interval = 0; + /* Rx ring can be NULL when for XDP tx queues which don't have an * accompanying rx_ring pair. */ @@ -1853,20 +1866,27 @@ static void ena_update_ring_numa_node(struct ena_ring *tx_ring, if (likely(tx_ring->cpu == cpu)) goto out; + tx_ring->cpu = cpu; + if (rx_ring) + rx_ring->cpu = cpu; + numa_node = cpu_to_node(cpu); + + if (likely(tx_ring->numa_node == numa_node)) + goto out; + put_cpu(); if (numa_node != NUMA_NO_NODE) { ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node); - if (rx_ring) + tx_ring->numa_node = numa_node; + if (rx_ring) { + rx_ring->numa_node = numa_node; ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node); + } } - tx_ring->cpu = cpu; - if (rx_ring) - rx_ring->cpu = cpu; - return; out: put_cpu(); @@ -1987,11 +2007,10 @@ static int ena_io_poll(struct napi_struct *napi, int budget) if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) ena_adjust_adaptive_rx_intr_moderation(ena_napi); + ena_update_ring_numa_node(tx_ring, rx_ring); ena_unmask_interrupt(tx_ring, rx_ring); } - ena_update_ring_numa_node(tx_ring, rx_ring); - ret = rx_work_done; } else { ret = budget; @@ -2376,7 +2395,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) ctx.mem_queue_type = ena_dev->tx_mem_queue_type; ctx.msix_vector = msix_vector; ctx.queue_size = tx_ring->ring_size; - ctx.numa_node = cpu_to_node(tx_ring->cpu); + ctx.numa_node = tx_ring->numa_node; rc = ena_com_create_io_queue(ena_dev, &ctx); if (rc) { @@ -2444,7 +2463,7 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; ctx.msix_vector = msix_vector; ctx.queue_size = rx_ring->ring_size; - ctx.numa_node = cpu_to_node(rx_ring->cpu); + ctx.numa_node = rx_ring->numa_node; rc = ena_com_create_io_queue(ena_dev, &ctx); if (rc) { @@ -2805,6 +2824,24 @@ int ena_update_queue_sizes(struct ena_adapter *adapter, return dev_was_up ? ena_up(adapter) : 0; } +int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak) +{ + struct ena_ring *rx_ring; + int i; + + if (rx_copybreak > min_t(u16, adapter->netdev->mtu, ENA_PAGE_SIZE)) + return -EINVAL; + + adapter->rx_copybreak = rx_copybreak; + + for (i = 0; i < adapter->num_io_queues; i++) { + rx_ring = &adapter->rx_ring[i]; + rx_ring->rx_copybreak = rx_copybreak; + } + + return 0; +} + int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count) { struct ena_com_dev *ena_dev = adapter->ena_dev; diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 1bdce99bf688..2cb141079474 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -262,9 +262,11 @@ struct ena_ring { bool disable_meta_caching; u16 no_interrupt_event_cnt; - /* cpu for TPH */ + /* cpu and NUMA for TPH */ int cpu; - /* number of tx/rx_buffer_info's entries */ + int numa_node; + + /* number of tx/rx_buffer_info's entries */ int ring_size; enum ena_admin_placement_policy_type tx_mem_queue_type; @@ -392,6 +394,8 @@ int ena_update_queue_sizes(struct ena_adapter *adapter, int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count); +int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak); + int ena_get_sset_count(struct net_device *netdev, int sset); static inline void ena_reset_device(struct ena_adapter *adapter, @@ -409,6 +413,15 @@ enum ena_xdp_errors_t { ENA_XDP_NO_ENOUGH_QUEUES, }; +enum ENA_XDP_ACTIONS { + ENA_XDP_PASS = 0, + ENA_XDP_TX = BIT(0), + ENA_XDP_REDIRECT = BIT(1), + ENA_XDP_DROP = BIT(2) +}; + +#define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT) + static inline bool ena_xdp_present(struct ena_adapter *adapter) { return !!adapter->xdp_bpf_prog; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 7b666106feee..614c0278419b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1064,6 +1064,9 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata) devm_free_irq(pdata->dev, pdata->dev_irq, pdata); + tasklet_kill(&pdata->tasklet_dev); + tasklet_kill(&pdata->tasklet_ecc); + if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) devm_free_irq(pdata->dev, pdata->ecc_irq, pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c index 22d4fc547a0a..a9ccc4258ee5 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c @@ -447,8 +447,10 @@ static void xgbe_i2c_stop(struct xgbe_prv_data *pdata) xgbe_i2c_disable(pdata); xgbe_i2c_clear_all_interrupts(pdata); - if (pdata->dev_irq != pdata->i2c_irq) + if (pdata->dev_irq != pdata->i2c_irq) { devm_free_irq(pdata->dev, pdata->i2c_irq, pdata); + tasklet_kill(&pdata->tasklet_i2c); + } } static int xgbe_i2c_start(struct xgbe_prv_data *pdata) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 4e97b4869522..0c5c1b155683 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -1390,8 +1390,10 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata) /* Disable auto-negotiation */ xgbe_an_disable_all(pdata); - if (pdata->dev_irq != pdata->an_irq) + if (pdata->dev_irq != pdata->an_irq) { devm_free_irq(pdata->dev, pdata->an_irq, pdata); + tasklet_kill(&pdata->tasklet_an); + } pdata->phy_if.phy_impl.stop(pdata); diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index d91fdb0c2649..2cf96892e565 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -2784,17 +2784,11 @@ static int bcm_enet_shared_probe(struct platform_device *pdev) return 0; } -static int bcm_enet_shared_remove(struct platform_device *pdev) -{ - return 0; -} - /* this "shared" driver is needed because both macs share a single * address space */ struct platform_driver bcm63xx_enet_shared_driver = { .probe = bcm_enet_shared_probe, - .remove = bcm_enet_shared_remove, .driver = { .name = "bcm63xx_enet_shared", .owner = THIS_MODULE, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 4c7d07c684c4..16ce7a90610c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -991,8 +991,7 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp, dma_addr -= bp->rx_dma_offset; dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, DMA_ATTR_WEAK_ORDERING); - skb = build_skb(page_address(page), BNXT_PAGE_MODE_BUF_SIZE + - bp->rx_dma_offset); + skb = build_skb(page_address(page), PAGE_SIZE); if (!skb) { __free_page(page); return NULL; @@ -1925,7 +1924,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, dma_addr = rx_buf->mapping; if (bnxt_xdp_attached(bp, rxr)) { - bnxt_xdp_buff_init(bp, rxr, cons, &data_ptr, &len, &xdp); + bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp); if (agg_bufs) { u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp, cp_cons, agg_bufs, @@ -1940,7 +1939,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } if (xdp_active) { - if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &len, event)) { + if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) { rc = 1; goto next_rx; } @@ -3969,8 +3968,10 @@ void bnxt_set_ring_params(struct bnxt *bp) bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; if (BNXT_RX_PAGE_MODE(bp)) { - rx_space = BNXT_PAGE_MODE_BUF_SIZE; - rx_size = BNXT_MAX_PAGE_MODE_MTU; + rx_space = PAGE_SIZE; + rx_size = PAGE_SIZE - + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) - + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); } else { rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); rx_space = rx_size + NET_SKB_PAD + @@ -5398,15 +5399,16 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT); req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID); - if (BNXT_RX_PAGE_MODE(bp) && !BNXT_RX_JUMBO_MODE(bp)) { + if (BNXT_RX_PAGE_MODE(bp)) { + req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size); + } else { req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); req->enables |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); + req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); + req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh); } - /* thresholds not implemented in firmware yet */ - req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); - req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh); req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); return hwrm_req_send(bp, req); } @@ -13591,7 +13593,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENOMEM; bp = netdev_priv(dev); - SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port); bp->board_idx = ent->driver_data; bp->msg_enable = BNXT_DEF_MSG_ENABLE; bnxt_set_max_func_irqs(bp, max_irqs); @@ -13599,6 +13600,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (bnxt_vf_pciid(bp->board_idx)) bp->flags |= BNXT_FLAG_VF; + /* No devlink port registration in case of a VF */ + if (BNXT_PF(bp)) + SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port); + if (pdev->msix_cap) bp->flags |= BNXT_FLAG_MSIX_CAP; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 41c6dd0ae447..5163ef4a49ea 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -591,12 +591,20 @@ struct nqe_cn { #define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT) #define BNXT_MAX_MTU 9500 -#define BNXT_PAGE_MODE_BUF_SIZE \ + +/* First RX buffer page in XDP multi-buf mode + * + * +-------------------------------------------------------------------------+ + * | XDP_PACKET_HEADROOM | bp->rx_buf_use_size | skb_shared_info| + * | (bp->rx_dma_offset) | | | + * +-------------------------------------------------------------------------+ + */ +#define BNXT_MAX_PAGE_MODE_MTU_SBUF \ ((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \ XDP_PACKET_HEADROOM) #define BNXT_MAX_PAGE_MODE_MTU \ - BNXT_PAGE_MODE_BUF_SIZE - \ - SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info)) + (BNXT_MAX_PAGE_MODE_MTU_SBUF - \ + SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info))) #define BNXT_MIN_PKT_SIZE 52 @@ -2134,7 +2142,6 @@ struct bnxt { #define BNXT_DUMP_CRASH 1 struct bpf_prog *xdp_prog; - u8 xdp_has_frags; struct bnxt_ptp_cfg *ptp_cfg; u8 ptp_all_rx_tstamp; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index c3065ec0a479..36d5202c0aee 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -177,7 +177,7 @@ bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) } void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, - u16 cons, u8 **data_ptr, unsigned int *len, + u16 cons, u8 *data_ptr, unsigned int len, struct xdp_buff *xdp) { struct bnxt_sw_rx_bd *rx_buf; @@ -191,13 +191,10 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, offset = bp->rx_offset; mapping = rx_buf->mapping - bp->rx_dma_offset; - dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir); - - if (bp->xdp_has_frags) - buflen = BNXT_PAGE_MODE_BUF_SIZE + offset; + dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir); xdp_init_buff(xdp, buflen, &rxr->xdp_rxq); - xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false); + xdp_prepare_buff(xdp, data_ptr - offset, offset, len, false); } void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr, @@ -222,7 +219,8 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr, * false - packet should be passed to the stack. */ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, - struct xdp_buff xdp, struct page *page, unsigned int *len, u8 *event) + struct xdp_buff xdp, struct page *page, u8 **data_ptr, + unsigned int *len, u8 *event) { struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); struct bnxt_tx_ring_info *txr; @@ -255,8 +253,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, *event &= ~BNXT_RX_EVENT; *len = xdp.data_end - xdp.data; - if (orig_data != xdp.data) + if (orig_data != xdp.data) { offset = xdp.data - xdp.data_hard_start; + *data_ptr = xdp.data_hard_start + offset; + } switch (act) { case XDP_PASS: @@ -401,10 +401,8 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n"); return -EOPNOTSUPP; } - if (prog) { + if (prog) tx_xdp = bp->rx_nr_rings; - bp->xdp_has_frags = prog->aux->xdp_has_frags; - } tc = netdev_get_num_tc(dev); if (!tc) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h index 505911ae095d..ea430d6961df 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h @@ -18,8 +18,8 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, struct xdp_buff *xdp); void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts); bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, - struct xdp_buff xdp, struct page *page, unsigned int *len, - u8 *event); + struct xdp_buff xdp, struct page *page, u8 **data_ptr, + unsigned int *len, u8 *event); int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp); int bnxt_xdp_xmit(struct net_device *dev, int num_frames, struct xdp_frame **frames, u32 flags); @@ -27,7 +27,7 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames, bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr); void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, - u16 cons, u8 **data_ptr, unsigned int *len, + u16 cons, u8 *data_ptr, unsigned int len, struct xdp_buff *xdp); void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr, struct xdp_buff *xdp); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c index 91f02c505028..b307bef4dc29 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c @@ -127,11 +127,6 @@ static int enetc_ierb_probe(struct platform_device *pdev) return 0; } -static int enetc_ierb_remove(struct platform_device *pdev) -{ - return 0; -} - static const struct of_device_id enetc_ierb_match[] = { { .compatible = "fsl,ls1028a-enetc-ierb", }, {}, @@ -144,7 +139,6 @@ static struct platform_driver enetc_ierb_driver = { .of_match_table = enetc_ierb_match, }, .probe = enetc_ierb_probe, - .remove = enetc_ierb_remove, }; module_platform_driver(enetc_ierb_driver); diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index d00bae15a901..d528ca681b6f 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -1430,7 +1430,7 @@ int dtsec_initialization(struct mac_device *mac_dev, dtsec->dtsec_drv_param->tx_pad_crc = true; phy_node = of_parse_phandle(mac_node, "tbi-handle", 0); - if (!phy_node || of_device_is_available(phy_node)) { + if (!phy_node || !of_device_is_available(phy_node)) { of_node_put(phy_node); err = -EINVAL; dev_err_probe(mac_dev->dev, err, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 0ec5730b1788..b4c4fb873568 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -3855,18 +3855,16 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info) return 0; } -static bool hns3_checksum_complete(struct hns3_enet_ring *ring, +static void hns3_checksum_complete(struct hns3_enet_ring *ring, struct sk_buff *skb, u32 ptype, u16 csum) { if (ptype == HNS3_INVALID_PTYPE || hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE) - return false; + return; hns3_ring_stats_update(ring, csum_complete); skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold((__force __sum16)csum); - - return true; } static void hns3_rx_handle_csum(struct sk_buff *skb, u32 l234info, @@ -3926,8 +3924,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M, HNS3_RXD_PTYPE_S); - if (hns3_checksum_complete(ring, skb, ptype, csum)) - return; + hns3_checksum_complete(ring, skb, ptype, csum); /* check if hardware has done checksum */ if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) @@ -3936,6 +3933,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) | BIT(HNS3_RXD_OL3E_B) | BIT(HNS3_RXD_OL4E_B)))) { + skb->ip_summed = CHECKSUM_NONE; hns3_ring_stats_update(ring, l3l4_csum_err); return; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 4e54f91f7a6c..07ad5f35219e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3910,9 +3910,17 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) return ret; } - if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) + if (!reset || + !test_bit(HCLGE_VPORT_STATE_INITED, &vport->state)) continue; + if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) && + hdev->reset_type == HNAE3_FUNC_RESET) { + set_bit(HCLGE_VPORT_NEED_NOTIFY_RESET, + &vport->need_notify); + continue; + } + /* Inform VF to process the reset. * hclge_inform_reset_assert_to_vf may fail if VF * driver is not loaded. @@ -4609,18 +4617,25 @@ static void hclge_reset_service_task(struct hclge_dev *hdev) static void hclge_update_vport_alive(struct hclge_dev *hdev) { +#define HCLGE_ALIVE_SECONDS_NORMAL 8 + + unsigned long alive_time = HCLGE_ALIVE_SECONDS_NORMAL * HZ; int i; /* start from vport 1 for PF is always alive */ for (i = 1; i < hdev->num_alloc_vport; i++) { struct hclge_vport *vport = &hdev->vport[i]; - if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ)) + if (!test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) || + !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) + continue; + if (time_after(jiffies, vport->last_active_jiffies + + alive_time)) { clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); - - /* If vf is not alive, set to default value */ - if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) - vport->mps = HCLGE_MAC_DEFAULT_FRAME; + dev_warn(&hdev->pdev->dev, + "VF %u heartbeat timeout\n", + i - HCLGE_VF_VPORT_START_NUM); + } } } @@ -8064,9 +8079,11 @@ int hclge_vport_start(struct hclge_vport *vport) { struct hclge_dev *hdev = vport->back; + set_bit(HCLGE_VPORT_STATE_INITED, &vport->state); set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); vport->last_active_jiffies = jiffies; + vport->need_notify = 0; if (test_bit(vport->vport_id, hdev->vport_config_block)) { if (vport->vport_id) { @@ -8084,7 +8101,9 @@ int hclge_vport_start(struct hclge_vport *vport) void hclge_vport_stop(struct hclge_vport *vport) { + clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state); clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); + vport->need_notify = 0; } static int hclge_client_start(struct hnae3_handle *handle) @@ -9208,7 +9227,8 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf, return 0; } - dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %s\n", + dev_info(&hdev->pdev->dev, + "MAC of VF %d has been set to %s, will be active after VF reset\n", vf, format_mac_addr); return 0; } @@ -10465,12 +10485,16 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, * for DEVICE_VERSION_V3, vf doesn't need to know about the port based * VLAN state. */ - if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 && - test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) - (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0], - vport->vport_id, - state, &vlan_info); - + if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { + if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) + (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0], + vport->vport_id, + state, + &vlan_info); + else + set_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, + &vport->need_notify); + } return 0; } @@ -11941,7 +11965,7 @@ static void hclge_reset_vport_state(struct hclge_dev *hdev) int i; for (i = 0; i < hdev->num_alloc_vport; i++) { - hclge_vport_stop(vport); + clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); vport++; } } @@ -12754,60 +12778,71 @@ static int hclge_gro_en(struct hnae3_handle *handle, bool enable) return ret; } -static void hclge_sync_promisc_mode(struct hclge_dev *hdev) +static int hclge_sync_vport_promisc_mode(struct hclge_vport *vport) { - struct hclge_vport *vport = &hdev->vport[0]; struct hnae3_handle *handle = &vport->nic; + struct hclge_dev *hdev = vport->back; + bool uc_en = false; + bool mc_en = false; u8 tmp_flags; + bool bc_en; int ret; - u16 i; if (vport->last_promisc_flags != vport->overflow_promisc_flags) { set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); vport->last_promisc_flags = vport->overflow_promisc_flags; } - if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) { + if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, + &vport->state)) + return 0; + + /* for PF */ + if (!vport->vport_id) { tmp_flags = handle->netdev_flags | vport->last_promisc_flags; ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE, tmp_flags & HNAE3_MPE); - if (!ret) { - clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, - &vport->state); + if (!ret) set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state); - } + else + set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, + &vport->state); + return ret; } - for (i = 1; i < hdev->num_alloc_vport; i++) { - bool uc_en = false; - bool mc_en = false; - bool bc_en; + /* for VF */ + if (vport->vf_info.trusted) { + uc_en = vport->vf_info.request_uc_en > 0 || + vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE; + mc_en = vport->vf_info.request_mc_en > 0 || + vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE; + } + bc_en = vport->vf_info.request_bc_en > 0; - vport = &hdev->vport[i]; + ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en, + mc_en, bc_en); + if (ret) { + set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); + return ret; + } + hclge_set_vport_vlan_fltr_change(vport); - if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, - &vport->state)) - continue; + return 0; +} - if (vport->vf_info.trusted) { - uc_en = vport->vf_info.request_uc_en > 0 || - vport->overflow_promisc_flags & - HNAE3_OVERFLOW_UPE; - mc_en = vport->vf_info.request_mc_en > 0 || - vport->overflow_promisc_flags & - HNAE3_OVERFLOW_MPE; - } - bc_en = vport->vf_info.request_bc_en > 0; +static void hclge_sync_promisc_mode(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int ret; + u16 i; - ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en, - mc_en, bc_en); - if (ret) { - set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, - &vport->state); + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + + ret = hclge_sync_vport_promisc_mode(vport); + if (ret) return; - } - hclge_set_vport_vlan_fltr_change(vport); } } @@ -12944,6 +12979,11 @@ static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid) struct hclge_vlan_info vlan_info; int ret; + clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state); + clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); + vport->need_notify = 0; + vport->mps = 0; + /* after disable sriov, clean VF rate configured by PF */ ret = hclge_tm_qs_shaper_cfg(vport, 0); if (ret) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 495b639b0dc2..13f23d606e77 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -995,9 +995,15 @@ enum HCLGE_VPORT_STATE { HCLGE_VPORT_STATE_MAC_TBL_CHANGE, HCLGE_VPORT_STATE_PROMISC_CHANGE, HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, + HCLGE_VPORT_STATE_INITED, HCLGE_VPORT_STATE_MAX }; +enum HCLGE_VPORT_NEED_NOTIFY { + HCLGE_VPORT_NEED_NOTIFY_RESET, + HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, +}; + struct hclge_vlan_info { u16 vlan_proto; /* so far support 802.1Q only */ u16 qos; @@ -1044,6 +1050,7 @@ struct hclge_vport { struct hnae3_handle roce; unsigned long state; + unsigned long need_notify; unsigned long last_active_jiffies; u32 mps; /* Max packet size */ struct hclge_vf_info vf_info; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index a7b06c63143c..04ff9bf12185 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -124,17 +124,26 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, return status; } +static int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type) +{ + __le16 msg_data; + u8 dest_vfid; + + dest_vfid = (u8)vport->vport_id; + msg_data = cpu_to_le16(reset_type); + + /* send this requested info to VF */ + return hclge_send_mbx_msg(vport, (u8 *)&msg_data, sizeof(msg_data), + HCLGE_MBX_ASSERTING_RESET, dest_vfid); +} + int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) { struct hclge_dev *hdev = vport->back; - __le16 msg_data; u16 reset_type; - u8 dest_vfid; BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX); - dest_vfid = (u8)vport->vport_id; - if (hdev->reset_type == HNAE3_FUNC_RESET) reset_type = HNAE3_VF_PF_FUNC_RESET; else if (hdev->reset_type == HNAE3_FLR_RESET) @@ -142,11 +151,7 @@ int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) else reset_type = HNAE3_VF_FUNC_RESET; - msg_data = cpu_to_le16(reset_type); - - /* send this requested info to VF */ - return hclge_send_mbx_msg(vport, (u8 *)&msg_data, sizeof(msg_data), - HCLGE_MBX_ASSERTING_RESET, dest_vfid); + return hclge_inform_vf_reset(vport, reset_type); } static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head) @@ -652,9 +657,56 @@ static int hclge_reset_vf(struct hclge_vport *vport) return hclge_func_reset_cmd(hdev, vport->vport_id); } +static void hclge_notify_vf_config(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + struct hclge_port_base_vlan_config *vlan_cfg; + int ret; + + hclge_push_vf_link_status(vport); + if (test_bit(HCLGE_VPORT_NEED_NOTIFY_RESET, &vport->need_notify)) { + ret = hclge_inform_vf_reset(vport, HNAE3_VF_PF_FUNC_RESET); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to inform VF %u reset!", + vport->vport_id - HCLGE_VF_VPORT_START_NUM); + return; + } + vport->need_notify = 0; + return; + } + + if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 && + test_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, &vport->need_notify)) { + vlan_cfg = &vport->port_base_vlan_cfg; + ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0], + vport->vport_id, + vlan_cfg->state, + &vlan_cfg->vlan_info); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to inform VF %u port base vlan!", + vport->vport_id - HCLGE_VF_VPORT_START_NUM); + return; + } + clear_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, &vport->need_notify); + } +} + static void hclge_vf_keep_alive(struct hclge_vport *vport) { + struct hclge_dev *hdev = vport->back; + vport->last_active_jiffies = jiffies; + + if (test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) && + !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { + set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); + dev_info(&hdev->pdev->dev, "VF %u is alive!", + vport->vport_id - HCLGE_VF_VPORT_START_NUM); + hclge_notify_vf_config(vport); + } } static int hclge_set_vf_mtu(struct hclge_vport *vport, @@ -954,6 +1006,7 @@ static int hclge_mbx_vf_uninit_handler(struct hclge_mbx_ops_param *param) hclge_rm_vport_all_mac_table(param->vport, true, HCLGE_MAC_ADDR_MC); hclge_rm_vport_all_vlan_table(param->vport, true); + param->vport->mps = 0; return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index db6f7cdba958..081bd2c3f289 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2767,7 +2767,8 @@ static int hclgevf_pci_reset(struct hclgevf_dev *hdev) struct pci_dev *pdev = hdev->pdev; int ret = 0; - if (hdev->reset_type == HNAE3_VF_FULL_RESET && + if ((hdev->reset_type == HNAE3_VF_FULL_RESET || + hdev->reset_type == HNAE3_FLR_RESET) && test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { hclgevf_misc_irq_uninit(hdev); hclgevf_uninit_msi(hdev); diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 907055b77af0..7105de6fb344 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -783,7 +783,7 @@ construct_skb: static void ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf) { - xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf); + page_frag_free(tx_buf->raw_buf); xdp_ring->xdp_tx_active--; dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma), dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c index fa8029a94068..eb25e458266c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c @@ -589,7 +589,7 @@ int rvu_mbox_handler_mcs_free_resources(struct rvu *rvu, u16 pcifunc = req->hdr.pcifunc; struct mcs_rsrc_map *map; struct mcs *mcs; - int rc; + int rc = 0; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 9e10e7471b88..88f8772a61cd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -1012,6 +1012,7 @@ static void otx2_pool_refill_task(struct work_struct *work) rbpool = cq->rbpool; free_ptrs = cq->pool_ptrs; + get_cpu(); while (cq->pool_ptrs) { if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) { /* Schedule a WQ if we fails to free atleast half of the @@ -1031,6 +1032,7 @@ static void otx2_pool_refill_task(struct work_struct *work) pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM); cq->pool_ptrs--; } + put_cpu(); cq->refill_task_sched = false; } @@ -1368,6 +1370,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) if (err) goto fail; + get_cpu(); /* Allocate pointers and free them to aura/pool */ for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) { pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); @@ -1376,18 +1379,24 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) sq = &qset->sq[qidx]; sq->sqb_count = 0; sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL); - if (!sq->sqb_ptrs) - return -ENOMEM; + if (!sq->sqb_ptrs) { + err = -ENOMEM; + goto err_mem; + } for (ptr = 0; ptr < num_sqbs; ptr++) { - if (otx2_alloc_rbuf(pfvf, pool, &bufptr)) - return -ENOMEM; + err = otx2_alloc_rbuf(pfvf, pool, &bufptr); + if (err) + goto err_mem; pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr); sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr; } } - return 0; +err_mem: + put_cpu(); + return err ? -ENOMEM : 0; + fail: otx2_mbox_reset(&pfvf->mbox.mbox, 0); otx2_aura_pool_free(pfvf); @@ -1426,18 +1435,21 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf) if (err) goto fail; + get_cpu(); /* Allocate pointers and free them to aura/pool */ for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { pool = &pfvf->qset.pool[pool_id]; for (ptr = 0; ptr < num_ptrs; ptr++) { - if (otx2_alloc_rbuf(pfvf, pool, &bufptr)) - return -ENOMEM; + err = otx2_alloc_rbuf(pfvf, pool, &bufptr); + if (err) + goto err_mem; pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr + OTX2_HEAD_ROOM); } } - - return 0; +err_mem: + put_cpu(); + return err ? -ENOMEM : 0; fail: otx2_mbox_reset(&pfvf->mbox.mbox, 0); otx2_aura_pool_free(pfvf); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index ddb197970c22..5bd83c0275f8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -468,7 +468,7 @@ static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id, bool new_state = val.vbool; if (new_state && !MLX5_CAP_GEN(dev, roce) && - !MLX5_CAP_GEN(dev, roce_rw_supported)) { + !(MLX5_CAP_GEN(dev, roce_rw_supported) && MLX5_CAP_GEN_MAX(dev, roce))) { NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE"); return -EOPNOTSUPP; } @@ -563,7 +563,7 @@ static int mlx5_devlink_eq_depth_validate(struct devlink *devlink, u32 id, union devlink_param_value val, struct netlink_ext_ack *extack) { - return (val.vu16 >= 64 && val.vu16 <= 4096) ? 0 : -EINVAL; + return (val.vu32 >= 64 && val.vu32 <= 4096) ? 0 : -EINVAL; } static const struct devlink_param mlx5_devlink_params[] = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index 5f6f95ad6888..1ae15b8536a8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -459,7 +459,11 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter, goto unlock; for (i = 0; i < priv->channels.num; i++) { - struct mlx5e_rq *rq = &priv->channels.c[i]->rq; + struct mlx5e_channel *c = priv->channels.c[i]; + struct mlx5e_rq *rq; + + rq = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state) ? + &c->xskrq : &c->rq; err = mlx5e_rx_reporter_build_diagnose_output(rq, fmsg); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index a69849e0deed..313df8232db7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -2103,14 +2103,9 @@ out_err: static void mlx5_ct_tc_create_dbgfs(struct mlx5_tc_ct_priv *ct_priv) { - bool is_fdb = ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB; struct mlx5_tc_ct_debugfs *ct_dbgfs = &ct_priv->debugfs; - char dirname[16] = {}; - if (sscanf(dirname, "ct_%s", is_fdb ? "fdb" : "nic") < 0) - return; - - ct_dbgfs->root = debugfs_create_dir(dirname, mlx5_debugfs_get_dev_root(ct_priv->dev)); + ct_dbgfs->root = debugfs_create_dir("ct", mlx5_debugfs_get_dev_root(ct_priv->dev)); debugfs_create_atomic_t("offloaded", 0400, ct_dbgfs->root, &ct_dbgfs->stats.offloaded); debugfs_create_atomic_t("rx_dropped", 0400, ct_dbgfs->root, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index ff73d25bc6eb..2aaf8ab857b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -222,7 +222,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, int err; list_for_each_entry(flow, flow_list, tmp_list) { - if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW)) + if (!mlx5e_is_offloaded_flow(flow)) continue; attr = mlx5e_tc_get_encap_attr(flow); @@ -231,6 +231,13 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID; esw_attr->dests[flow->tmp_entry_index].pkt_reformat = NULL; + /* Clear pkt_reformat before checking slow path flag. Because + * in next iteration, the same flow is already set slow path + * flag, but still need to clear the pkt_reformat. + */ + if (flow_flag_test(flow, SLOW)) + continue; + /* update from encap rule to slow path rule */ spec = &flow->attr->parse_attr->spec; rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c index f5b26f5a7de4..054d80c4e65c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c @@ -273,6 +273,11 @@ static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv, geneve_tlv_option_0_data, be32_to_cpu(opt_data_key)); MLX5_SET(fte_match_set_misc3, misc_3_c, geneve_tlv_option_0_data, be32_to_cpu(opt_data_mask)); + if (MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, + ft_field_support.geneve_tlv_option_0_exist)) { + MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, geneve_tlv_option_0_exist); + MLX5_SET_TO_ONES(fte_match_set_misc, misc_v, geneve_tlv_option_0_exist); + } spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 8d36e2de53a9..cff5f2e29e1e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1305,7 +1305,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, sq->channel = c; sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; - sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN; sq->xsk_pool = xsk_pool; sq->stats = sq->xsk_pool ? diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c index 60a73990017c..6b4c9ffad95b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c @@ -67,6 +67,7 @@ static void esw_acl_egress_lgcy_groups_destroy(struct mlx5_vport *vport) int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { + bool vst_mode_steering = esw_vst_mode_is_steering(esw); struct mlx5_flow_destination drop_ctr_dst = {}; struct mlx5_flow_destination *dst = NULL; struct mlx5_fc *drop_counter = NULL; @@ -77,6 +78,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw, */ int table_size = 2; int dest_num = 0; + int actions_flag; int err = 0; if (vport->egress.legacy.drop_counter) { @@ -119,8 +121,11 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw, vport->vport, vport->info.vlan, vport->info.qos); /* Allowed vlan rule */ + actions_flag = MLX5_FLOW_CONTEXT_ACTION_ALLOW; + if (vst_mode_steering) + actions_flag |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; err = esw_egress_acl_vlan_create(esw, vport, NULL, vport->info.vlan, - MLX5_FLOW_CONTEXT_ACTION_ALLOW); + actions_flag); if (err) goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c index b1a5199260f6..093ed86a0acd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c @@ -139,11 +139,14 @@ static void esw_acl_ingress_lgcy_groups_destroy(struct mlx5_vport *vport) int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { + bool vst_mode_steering = esw_vst_mode_is_steering(esw); struct mlx5_flow_destination drop_ctr_dst = {}; struct mlx5_flow_destination *dst = NULL; struct mlx5_flow_act flow_act = {}; struct mlx5_flow_spec *spec = NULL; struct mlx5_fc *counter = NULL; + bool vst_check_cvlan = false; + bool vst_push_cvlan = false; /* The ingress acl table contains 4 groups * (2 active rules at the same time - * 1 allow rule from one of the first 3 groups. @@ -203,7 +206,26 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, goto out; } - if (vport->info.vlan || vport->info.qos) + if ((vport->info.vlan || vport->info.qos)) { + if (vst_mode_steering) + vst_push_cvlan = true; + else if (!MLX5_CAP_ESW(esw->dev, vport_cvlan_insert_always)) + vst_check_cvlan = true; + } + + if (vst_check_cvlan || vport->info.spoofchk) + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + + /* Create ingress allow rule */ + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; + if (vst_push_cvlan) { + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; + flow_act.vlan[0].prio = vport->info.qos; + flow_act.vlan[0].vid = vport->info.vlan; + flow_act.vlan[0].ethtype = ETH_P_8021Q; + } + + if (vst_check_cvlan) MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); @@ -218,9 +240,6 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, ether_addr_copy(smac_v, vport->info.mac); } - /* Create ingress allow rule */ - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec, &flow_act, NULL, 0); if (IS_ERR(vport->ingress.allow_rule)) { @@ -232,6 +251,9 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, goto out; } + if (!vst_check_cvlan && !vport->info.spoofchk) + goto out; + memset(&flow_act, 0, sizeof(flow_act)); flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; /* Attach drop flow counter */ @@ -257,7 +279,8 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, return 0; out: - esw_acl_ingress_lgcy_cleanup(esw, vport); + if (err) + esw_acl_ingress_lgcy_cleanup(esw, vport); kvfree(spec); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 527e4bffda8d..0dfd5742c6fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -161,10 +161,17 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport, esw_vport_context.vport_cvlan_strip, 1); if (set_flags & SET_VLAN_INSERT) { - /* insert only if no vlan in packet */ - MLX5_SET(modify_esw_vport_context_in, in, - esw_vport_context.vport_cvlan_insert, 1); - + if (MLX5_CAP_ESW(dev, vport_cvlan_insert_always)) { + /* insert either if vlan exist in packet or not */ + MLX5_SET(modify_esw_vport_context_in, in, + esw_vport_context.vport_cvlan_insert, + MLX5_VPORT_CVLAN_INSERT_ALWAYS); + } else { + /* insert only if no vlan in packet */ + MLX5_SET(modify_esw_vport_context_in, in, + esw_vport_context.vport_cvlan_insert, + MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN); + } MLX5_SET(modify_esw_vport_context_in, in, esw_vport_context.cvlan_pcp, qos); MLX5_SET(modify_esw_vport_context_in, in, @@ -809,6 +816,7 @@ out_free: static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { + bool vst_mode_steering = esw_vst_mode_is_steering(esw); u16 vport_num = vport->vport; int flags; int err; @@ -839,8 +847,9 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) flags = (vport->info.vlan || vport->info.qos) ? SET_VLAN_STRIP | SET_VLAN_INSERT : 0; - modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, - vport->info.qos, flags); + if (esw->mode == MLX5_ESWITCH_OFFLOADS || !vst_mode_steering) + modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, + vport->info.qos, flags); return 0; @@ -1848,6 +1857,7 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, u16 vport, u16 vlan, u8 qos, u8 set_flags) { struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); + bool vst_mode_steering = esw_vst_mode_is_steering(esw); int err = 0; if (IS_ERR(evport)) @@ -1855,9 +1865,11 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, if (vlan > 4095 || qos > 7) return -EINVAL; - err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags); - if (err) - return err; + if (esw->mode == MLX5_ESWITCH_OFFLOADS || !vst_mode_steering) { + err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags); + if (err) + return err; + } evport->info.vlan = vlan; evport->info.qos = qos; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 5a85a5d32be7..92644fbb5081 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -527,6 +527,12 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, u16 vport, u16 vlan, u8 qos, u8 set_flags); +static inline bool esw_vst_mode_is_steering(struct mlx5_eswitch *esw) +{ + return (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, pop_vlan) && + MLX5_CAP_ESW_INGRESS_ACL(esw->dev, push_vlan)); +} + static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev, u8 vlan_depth) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 86ed87d704f7..96417c5feed7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -674,6 +674,12 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work) dev = container_of(priv, struct mlx5_core_dev, priv); devlink = priv_to_devlink(dev); + mutex_lock(&dev->intf_state_mutex); + if (test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) { + mlx5_core_err(dev, "health works are not permitted at this stage\n"); + return; + } + mutex_unlock(&dev->intf_state_mutex); enter_error_state(dev, false); if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) { devl_lock(devlink); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 7c5c500fd215..2c73c8445e63 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -71,6 +71,10 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev, params->packet_merge.type = MLX5E_PACKET_MERGE_NONE; params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN; params->tunneled_offload_en = false; + + /* CQE compression is not supported for IPoIB */ + params->rx_cqe_compress_def = false; + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def); } /* Called directly after IPoIB netdevice was created to initialize SW structs */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 32c3e0a649a7..ad32b80e8501 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -228,6 +228,7 @@ static void mlx5_ldev_free(struct kref *ref) if (ldev->nb.notifier_call) unregister_netdevice_notifier_net(&init_net, &ldev->nb); mlx5_lag_mp_cleanup(ldev); + cancel_delayed_work_sync(&ldev->bond_work); destroy_workqueue(ldev->wq); mlx5_lag_mpesw_cleanup(ldev); mutex_destroy(&ldev->lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 7f5db13e3550..df134f6d32dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -613,7 +613,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) MLX5_SET(cmd_hca_cap, set_hca_cap, num_total_dynamic_vf_msix, MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix)); - if (MLX5_CAP_GEN(dev, roce_rw_supported)) + if (MLX5_CAP_GEN(dev, roce_rw_supported) && MLX5_CAP_GEN_MAX(dev, roce)) MLX5_SET(cmd_hca_cap, set_hca_cap, roce, mlx5_is_roce_on(dev)); @@ -1050,6 +1050,8 @@ err_rl_cleanup: err_tables_cleanup: mlx5_geneve_destroy(dev->geneve); mlx5_vxlan_destroy(dev->vxlan); + mlx5_cleanup_clock(dev); + mlx5_cleanup_reserved_gids(dev); mlx5_cq_debugfs_cleanup(dev); mlx5_fw_reset_cleanup(dev); err_events_cleanup: diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c index 1a61c6cdb077..0050fcb988b7 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c @@ -381,7 +381,7 @@ int lan966x_port_pcs_set(struct lan966x_port *port, } /* Take PCS out of reset */ - lan_rmw(DEV_CLOCK_CFG_LINK_SPEED_SET(2) | + lan_rmw(DEV_CLOCK_CFG_LINK_SPEED_SET(LAN966X_SPEED_1000) | DEV_CLOCK_CFG_PCS_RX_RST_SET(0) | DEV_CLOCK_CFG_PCS_TX_RST_SET(0), DEV_CLOCK_CFG_LINK_SPEED | diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index d25f4f09faa0..3c5d4fe99373 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -834,7 +834,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev) if (err) goto cleanup_config; - if (!of_get_mac_address(np, sparx5->base_mac)) { + if (of_get_mac_address(np, sparx5->base_mac)) { dev_info(sparx5->dev, "MAC addr was not set, use random MAC\n"); eth_random_addr(sparx5->base_mac); sparx5->base_mac[5] = 0; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index da33f09facb9..432d79d691c2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -617,6 +617,9 @@ struct nfp_net_dp { * @vnic_no_name: For non-port PF vNIC make ndo_get_phys_port_name return * -EOPNOTSUPP to keep backwards compatibility (set by app) * @port: Pointer to nfp_port structure if vNIC is a port + * @mc_lock: Protect mc_addrs list + * @mc_addrs: List of mc addrs to add/del to HW + * @mc_work: Work to update mc addrs * @app_priv: APP private data for this vNIC */ struct nfp_net { @@ -718,6 +721,10 @@ struct nfp_net { struct nfp_port *port; + spinlock_t mc_lock; + struct list_head mc_addrs; + struct work_struct mc_work; + void *app_priv; }; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 09053373288f..18fc9971f1c8 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1334,9 +1334,14 @@ err_unlock: return err; } -static int nfp_net_mc_cfg(struct net_device *netdev, const unsigned char *addr, const u32 cmd) +struct nfp_mc_addr_entry { + u8 addr[ETH_ALEN]; + u32 cmd; + struct list_head list; +}; + +static int nfp_net_mc_cfg(struct nfp_net *nn, const unsigned char *addr, const u32 cmd) { - struct nfp_net *nn = netdev_priv(netdev); int ret; ret = nfp_net_mbox_lock(nn, NFP_NET_CFG_MULTICAST_SZ); @@ -1351,6 +1356,25 @@ static int nfp_net_mc_cfg(struct net_device *netdev, const unsigned char *addr, return nfp_net_mbox_reconfig_and_unlock(nn, cmd); } +static int nfp_net_mc_prep(struct nfp_net *nn, const unsigned char *addr, const u32 cmd) +{ + struct nfp_mc_addr_entry *entry; + + entry = kmalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) + return -ENOMEM; + + ether_addr_copy(entry->addr, addr); + entry->cmd = cmd; + spin_lock_bh(&nn->mc_lock); + list_add_tail(&entry->list, &nn->mc_addrs); + spin_unlock_bh(&nn->mc_lock); + + schedule_work(&nn->mc_work); + + return 0; +} + static int nfp_net_mc_sync(struct net_device *netdev, const unsigned char *addr) { struct nfp_net *nn = netdev_priv(netdev); @@ -1361,12 +1385,35 @@ static int nfp_net_mc_sync(struct net_device *netdev, const unsigned char *addr) return -EINVAL; } - return nfp_net_mc_cfg(netdev, addr, NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD); + return nfp_net_mc_prep(nn, addr, NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD); } static int nfp_net_mc_unsync(struct net_device *netdev, const unsigned char *addr) { - return nfp_net_mc_cfg(netdev, addr, NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL); + struct nfp_net *nn = netdev_priv(netdev); + + return nfp_net_mc_prep(nn, addr, NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL); +} + +static void nfp_net_mc_addr_config(struct work_struct *work) +{ + struct nfp_net *nn = container_of(work, struct nfp_net, mc_work); + struct nfp_mc_addr_entry *entry, *tmp; + struct list_head tmp_list; + + INIT_LIST_HEAD(&tmp_list); + + spin_lock_bh(&nn->mc_lock); + list_splice_init(&nn->mc_addrs, &tmp_list); + spin_unlock_bh(&nn->mc_lock); + + list_for_each_entry_safe(entry, tmp, &tmp_list, list) { + if (nfp_net_mc_cfg(nn, entry->addr, entry->cmd)) + nn_err(nn, "Config mc address to HW failed.\n"); + + list_del(&entry->list); + kfree(entry); + } } static void nfp_net_set_rx_mode(struct net_device *netdev) @@ -2633,6 +2680,11 @@ int nfp_net_init(struct nfp_net *nn) if (!nn->dp.netdev) return 0; + + spin_lock_init(&nn->mc_lock); + INIT_LIST_HEAD(&nn->mc_addrs); + INIT_WORK(&nn->mc_work, nfp_net_mc_addr_config); + return register_netdev(nn->dp.netdev); err_clean_mbox: @@ -2652,5 +2704,6 @@ void nfp_net_clean(struct nfp_net *nn) unregister_netdev(nn->dp.netdev); nfp_net_ipsec_clean(nn); nfp_ccm_mbox_clean(nn); + flush_work(&nn->mc_work); nfp_net_reconfig_wait_posted(nn); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 86ecb080b153..cdcead614e9f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -1832,7 +1832,8 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 image_type, u32 *nvram_offset_bytes, - u32 *nvram_size_bytes) + u32 *nvram_size_bytes, + bool b_can_sleep) { u32 ret_mcp_resp, ret_mcp_param, ret_txn_size; struct mcp_file_att file_att; @@ -1846,7 +1847,8 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, &ret_mcp_resp, &ret_mcp_param, &ret_txn_size, - (u32 *)&file_att, false); + (u32 *)&file_att, + b_can_sleep); /* Check response */ if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) != @@ -1873,7 +1875,9 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 nvram_offset_bytes, - u32 nvram_size_bytes, u32 *ret_buf) + u32 nvram_size_bytes, + u32 *ret_buf, + bool b_can_sleep) { u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy; s32 bytes_left = nvram_size_bytes; @@ -1899,7 +1903,7 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, &ret_mcp_resp, &ret_mcp_param, &ret_read_size, (u32 *)((u8 *)ret_buf + read_offset), - false)) + b_can_sleep)) return DBG_STATUS_NVRAM_READ_FAILED; /* Check response */ @@ -3380,7 +3384,8 @@ static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn, p_ptt, NVM_TYPE_HW_DUMP_OUT, &hw_dump_offset_bytes, - &hw_dump_size_bytes); + &hw_dump_size_bytes, + false); if (status != DBG_STATUS_OK) return 0; @@ -3397,7 +3402,9 @@ static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn, status = qed_nvram_read(p_hwfn, p_ptt, hw_dump_offset_bytes, - hw_dump_size_bytes, dump_buf + offset); + hw_dump_size_bytes, + dump_buf + offset, + false); if (status != DBG_STATUS_OK) { DP_NOTICE(p_hwfn, "Failed to read MCP HW Dump image from NVRAM\n"); @@ -4123,7 +4130,9 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn, return qed_find_nvram_image(p_hwfn, p_ptt, nvram_image_type, - trace_meta_offset, trace_meta_size); + trace_meta_offset, + trace_meta_size, + true); } /* Reads the MCP Trace meta data from NVRAM into the specified buffer */ @@ -4139,7 +4148,10 @@ static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn, /* Read meta data from NVRAM */ status = qed_nvram_read(p_hwfn, p_ptt, - nvram_offset_in_bytes, size_in_bytes, buf); + nvram_offset_in_bytes, + size_in_bytes, + buf, + true); if (status != DBG_STATUS_OK) return status; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index dbb800769cb6..c95d56e56c59 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -2505,7 +2505,13 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter) goto disable_mbx_intr; qlcnic_83xx_clear_function_resources(adapter); - qlcnic_dcb_enable(adapter->dcb); + + err = qlcnic_dcb_enable(adapter->dcb); + if (err) { + qlcnic_dcb_free(adapter->dcb); + goto disable_mbx_intr; + } + qlcnic_83xx_initialize_nic(adapter, 1); qlcnic_dcb_get_info(adapter->dcb); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h index 7519773eaca6..22afa2be85fd 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h @@ -41,11 +41,6 @@ struct qlcnic_dcb { unsigned long state; }; -static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb) -{ - kfree(dcb); -} - static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb) { if (dcb && dcb->ops->get_hw_capability) @@ -112,9 +107,8 @@ static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb) dcb->ops->init_dcbnl_ops(dcb); } -static inline void qlcnic_dcb_enable(struct qlcnic_dcb *dcb) +static inline int qlcnic_dcb_enable(struct qlcnic_dcb *dcb) { - if (dcb && qlcnic_dcb_attach(dcb)) - qlcnic_clear_dcb_ops(dcb); + return dcb ? qlcnic_dcb_attach(dcb) : 0; } #endif diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 28476b982bab..44dac3c0908e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2599,7 +2599,13 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) "Device does not support MSI interrupts\n"); if (qlcnic_82xx_check(adapter)) { - qlcnic_dcb_enable(adapter->dcb); + err = qlcnic_dcb_enable(adapter->dcb); + if (err) { + qlcnic_dcb_free(adapter->dcb); + dev_err(&pdev->dev, "Failed to enable DCB\n"); + goto err_out_free_hw; + } + qlcnic_dcb_get_info(adapter->dcb); err = qlcnic_setup_intr(adapter); diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index a9dcc98b6af1..24592d972523 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -2210,28 +2210,6 @@ static int rtl_set_mac_address(struct net_device *dev, void *p) return 0; } -static void rtl_wol_enable_rx(struct rtl8169_private *tp) -{ - if (tp->mac_version >= RTL_GIGA_MAC_VER_25) - RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | - AcceptBroadcast | AcceptMulticast | AcceptMyPhys); -} - -static void rtl_prepare_power_down(struct rtl8169_private *tp) -{ - if (tp->dash_type != RTL_DASH_NONE) - return; - - if (tp->mac_version == RTL_GIGA_MAC_VER_32 || - tp->mac_version == RTL_GIGA_MAC_VER_33) - rtl_ephy_write(tp, 0x19, 0xff64); - - if (device_may_wakeup(tp_to_dev(tp))) { - phy_speed_down(tp->phydev, false); - rtl_wol_enable_rx(tp); - } -} - static void rtl_init_rxcfg(struct rtl8169_private *tp) { switch (tp->mac_version) { @@ -2455,6 +2433,31 @@ static void rtl_enable_rxdvgate(struct rtl8169_private *tp) rtl_wait_txrx_fifo_empty(tp); } +static void rtl_wol_enable_rx(struct rtl8169_private *tp) +{ + if (tp->mac_version >= RTL_GIGA_MAC_VER_25) + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | + AcceptBroadcast | AcceptMulticast | AcceptMyPhys); + + if (tp->mac_version >= RTL_GIGA_MAC_VER_40) + rtl_disable_rxdvgate(tp); +} + +static void rtl_prepare_power_down(struct rtl8169_private *tp) +{ + if (tp->dash_type != RTL_DASH_NONE) + return; + + if (tp->mac_version == RTL_GIGA_MAC_VER_32 || + tp->mac_version == RTL_GIGA_MAC_VER_33) + rtl_ephy_write(tp, 0x19, 0xff64); + + if (device_may_wakeup(tp_to_dev(tp))) { + phy_speed_down(tp->phydev, false); + rtl_wol_enable_rx(tp); + } +} + static void rtl_set_tx_config_registers(struct rtl8169_private *tp) { u32 val = TX_DMA_BURST << TxDMAShift | @@ -3872,7 +3875,7 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp) netdev_reset_queue(tp->dev); } -static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down) +static void rtl8169_cleanup(struct rtl8169_private *tp) { napi_disable(&tp->napi); @@ -3884,9 +3887,6 @@ static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down) rtl_rx_close(tp); - if (going_down && tp->dev->wol_enabled) - goto no_reset; - switch (tp->mac_version) { case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: @@ -3907,7 +3907,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down) } rtl_hw_reset(tp); -no_reset: + rtl8169_tx_clear(tp); rtl8169_init_ring_indexes(tp); } @@ -3918,7 +3918,7 @@ static void rtl_reset_work(struct rtl8169_private *tp) netif_stop_queue(tp->dev); - rtl8169_cleanup(tp, false); + rtl8169_cleanup(tp); for (i = 0; i < NUM_RX_DESC; i++) rtl8169_mark_to_asic(tp->RxDescArray + i); @@ -4605,7 +4605,7 @@ static void rtl8169_down(struct rtl8169_private *tp) pci_clear_master(tp->pci_dev); rtl_pci_commit(tp); - rtl8169_cleanup(tp, true); + rtl8169_cleanup(tp); rtl_disable_exit_l1(tp); rtl_prepare_power_down(tp); } diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index e42ceaa0099f..6441892636db 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -1578,6 +1578,7 @@ static int rswitch_device_alloc(struct rswitch_private *priv, int index) { struct platform_device *pdev = priv->pdev; struct rswitch_device *rdev; + struct device_node *port; struct net_device *ndev; int err; @@ -1606,7 +1607,9 @@ static int rswitch_device_alloc(struct rswitch_private *priv, int index) netif_napi_add(ndev, &rdev->napi, rswitch_poll); - err = of_get_ethdev_address(pdev->dev.of_node, ndev); + port = rswitch_get_port_node(rdev); + err = of_get_ethdev_address(port, ndev); + of_node_put(port); if (err) { if (is_valid_ether_addr(rdev->etha->mac_addr)) eth_hw_addr_set(ndev, rdev->etha->mac_addr); @@ -1786,6 +1789,11 @@ static int renesas_eth_sw_probe(struct platform_device *pdev) pm_runtime_get_sync(&pdev->dev); ret = rswitch_init(priv); + if (ret < 0) { + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return ret; + } device_set_wakeup_capable(&pdev->dev, 1); diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c index a49f66efacb8..d458a35839cc 100644 --- a/drivers/net/ipa/ipa_interrupt.c +++ b/drivers/net/ipa/ipa_interrupt.c @@ -132,10 +132,10 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt, u32 endpoint_id, bool enable) { struct ipa *ipa = interrupt->ipa; + u32 mask = BIT(endpoint_id % 32); u32 unit = endpoint_id / 32; const struct ipa_reg *reg; u32 offset; - u32 mask; u32 val; WARN_ON(!test_bit(endpoint_id, ipa->available)); @@ -148,7 +148,6 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt, offset = ipa_reg_n_offset(reg, unit); val = ioread32(ipa->reg_virt + offset); - mask = BIT(endpoint_id); if (enable) val |= mask; else diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index 8dcb49ed1f3d..7fd9fe6a602b 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -105,6 +105,7 @@ static int xgmiitorgmii_probe(struct mdio_device *mdiodev) if (!priv->phy_dev->drv) { dev_info(dev, "Attached phy not ready\n"); + put_device(&priv->phy_dev->mdio.dev); return -EPROBE_DEFER; } diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index f79333fe1783..7b3739b29c8f 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -255,7 +255,8 @@ static int rndis_query(struct usbnet *dev, struct usb_interface *intf, off = le32_to_cpu(u.get_c->offset); len = le32_to_cpu(u.get_c->len); - if (unlikely((8 + off + len) > CONTROL_BUFFER_SIZE)) + if (unlikely((off > CONTROL_BUFFER_SIZE - 8) || + (len > CONTROL_BUFFER_SIZE - 8 - off))) goto response_error; if (*reply_len != -1 && len != *reply_len) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index ac7c0653695f..dfc7d87fad59 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -974,6 +974,9 @@ static int veth_poll(struct napi_struct *napi, int budget) xdp_set_return_frame_no_direct(); done = veth_xdp_rcv(rq, budget, &bq, &stats); + if (stats.xdp_redirect > 0) + xdp_do_flush(); + if (done < budget && napi_complete_done(napi, done)) { /* Write rx_notify_masked before reading ptr_ring */ smp_store_mb(rq->rx_notify_masked, false); @@ -987,8 +990,6 @@ static int veth_poll(struct napi_struct *napi, int budget) if (stats.xdp_tx > 0) veth_xdp_flush(rq, &bq); - if (stats.xdp_redirect > 0) - xdp_do_flush(); xdp_clear_return_frame_no_direct(); return done; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 6f1e560fb15c..56267c327f0b 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1288,6 +1288,10 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, (le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) { skb->ip_summed = CHECKSUM_UNNECESSARY; + if ((le32_to_cpu(gdesc->dword[0]) & + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) { + skb->csum_level = 1; + } WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && !(le32_to_cpu(gdesc->dword[0]) & (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))); @@ -1297,6 +1301,10 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) & (1 << VMXNET3_RCD_TUC_SHIFT))) { skb->ip_summed = CHECKSUM_UNNECESSARY; + if ((le32_to_cpu(gdesc->dword[0]) & + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) { + skb->csum_level = 1; + } WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && !(le32_to_cpu(gdesc->dword[0]) & (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))); diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 6b5a4d036d15..bdb3a76a352e 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -1385,8 +1385,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, /* loopback, multicast & non-ND link-local traffic; do not push through * packet taps again. Reset pkt_type for upper layers to process skb. - * For strict packets with a source LLA, determine the dst using the - * original ifindex. + * For non-loopback strict packets, determine the dst using the original + * ifindex. */ if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) { skb->dev = vrf_dev; @@ -1395,7 +1395,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, if (skb->pkt_type == PACKET_LOOPBACK) skb->pkt_type = PACKET_HOST; - else if (ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) + else vrf_ip6_input_dst(skb, vrf_dev, orig_iif); goto out; diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index 92224b36787a..b1b179effe2a 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -2917,16 +2917,23 @@ static int vxlan_init(struct net_device *dev) vxlan_vnigroup_init(vxlan); dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); - if (!dev->tstats) - return -ENOMEM; + if (!dev->tstats) { + err = -ENOMEM; + goto err_vnigroup_uninit; + } err = gro_cells_init(&vxlan->gro_cells, dev); - if (err) { - free_percpu(dev->tstats); - return err; - } + if (err) + goto err_free_percpu; return 0; + +err_free_percpu: + free_percpu(dev->tstats); +err_vnigroup_uninit: + if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) + vxlan_vnigroup_uninit(vxlan); + return err; } static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni) diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h index 30f0765fb9fd..237f4ec2cffd 100644 --- a/drivers/net/wireless/ath/ath9k/htc.h +++ b/drivers/net/wireless/ath/ath9k/htc.h @@ -327,9 +327,9 @@ static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb) } #ifdef CONFIG_ATH9K_HTC_DEBUGFS -#define __STAT_SAFE(hif_dev, expr) ((hif_dev)->htc_handle->drv_priv ? (expr) : 0) -#define CAB_STAT_INC(priv) ((priv)->debug.tx_stats.cab_queued++) -#define TX_QSTAT_INC(priv, q) ((priv)->debug.tx_stats.queue_stats[q]++) +#define __STAT_SAFE(hif_dev, expr) do { ((hif_dev)->htc_handle->drv_priv ? (expr) : 0); } while (0) +#define CAB_STAT_INC(priv) do { ((priv)->debug.tx_stats.cab_queued++); } while (0) +#define TX_QSTAT_INC(priv, q) do { ((priv)->debug.tx_stats.queue_stats[q]++); } while (0) #define TX_STAT_INC(hif_dev, c) \ __STAT_SAFE((hif_dev), (hif_dev)->htc_handle->drv_priv->debug.tx_stats.c++) @@ -378,10 +378,10 @@ void ath9k_htc_get_et_stats(struct ieee80211_hw *hw, struct ethtool_stats *stats, u64 *data); #else -#define TX_STAT_INC(hif_dev, c) -#define TX_STAT_ADD(hif_dev, c, a) -#define RX_STAT_INC(hif_dev, c) -#define RX_STAT_ADD(hif_dev, c, a) +#define TX_STAT_INC(hif_dev, c) do { } while (0) +#define TX_STAT_ADD(hif_dev, c, a) do { } while (0) +#define RX_STAT_INC(hif_dev, c) do { } while (0) +#define RX_STAT_ADD(hif_dev, c, a) do { } while (0) #define CAB_STAT_INC(priv) #define TX_QSTAT_INC(priv, c) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index e6d64152c81a..a02e5a67b706 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -1106,6 +1106,11 @@ int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *c int i, j, num_sub_bands; s8 *gain; + /* many firmware images for JF lie about this */ + if (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id) == + CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF)) + return -EOPNOTSUPP; + if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) { IWL_DEBUG_RADIO(fwrt, "PPAG capability not supported by FW, command not sent.\n"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7996/Kconfig index 5c5fc569e6d5..79fb47a73c91 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt7996/Kconfig @@ -2,6 +2,7 @@ config MT7996E tristate "MediaTek MT7996 (PCIe) support" select MT76_CONNAC_LIB + select RELAY depends on MAC80211 depends on PCI help diff --git a/drivers/net/wireless/ti/Makefile b/drivers/net/wireless/ti/Makefile index 0530dd744275..05ee016594f8 100644 --- a/drivers/net/wireless/ti/Makefile +++ b/drivers/net/wireless/ti/Makefile @@ -3,6 +3,3 @@ obj-$(CONFIG_WLCORE) += wlcore/ obj-$(CONFIG_WL12XX) += wl12xx/ obj-$(CONFIG_WL1251) += wl1251/ obj-$(CONFIG_WL18XX) += wl18xx/ - -# small builtin driver bit -obj-$(CONFIG_WILINK_PLATFORM_DATA) += wilink_platform_data.o diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c index bb0abbe4491c..4424f53a8a0a 100644 --- a/drivers/nvme/host/auth.c +++ b/drivers/nvme/host/auth.c @@ -953,7 +953,7 @@ int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) goto err_free_dhchap_secret; if (!ctrl->opts->dhchap_secret && !ctrl->opts->dhchap_ctrl_secret) - return ret; + return 0; ctrl->dhchap_ctxs = kvcalloc(ctrl_max_dhchaps(ctrl), sizeof(*chap), GFP_KERNEL); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 95c488ea91c3..7be562a4e1aa 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1074,6 +1074,18 @@ static u32 nvme_known_admin_effects(u8 opcode) return 0; } +static u32 nvme_known_nvm_effects(u8 opcode) +{ + switch (opcode) { + case nvme_cmd_write: + case nvme_cmd_write_zeroes: + case nvme_cmd_write_uncor: + return NVME_CMD_EFFECTS_LBCC; + default: + return 0; + } +} + u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) { u32 effects = 0; @@ -1081,16 +1093,24 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) if (ns) { if (ns->head->effects) effects = le32_to_cpu(ns->head->effects->iocs[opcode]); + if (ns->head->ids.csi == NVME_CAP_CSS_NVM) + effects |= nvme_known_nvm_effects(opcode); if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC)) dev_warn_once(ctrl->device, - "IO command:%02x has unhandled effects:%08x\n", + "IO command:%02x has unusual effects:%08x\n", opcode, effects); - return 0; - } - if (ctrl->effects) - effects = le32_to_cpu(ctrl->effects->acs[opcode]); - effects |= nvme_known_admin_effects(opcode); + /* + * NVME_CMD_EFFECTS_CSE_MASK causes a freeze all I/O queues, + * which would deadlock when done on an I/O command. Note that + * We already warn about an unusual effect above. + */ + effects &= ~NVME_CMD_EFFECTS_CSE_MASK; + } else { + if (ctrl->effects) + effects = le32_to_cpu(ctrl->effects->acs[opcode]); + effects |= nvme_known_admin_effects(opcode); + } return effects; } @@ -4926,7 +4946,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, memset(set, 0, sizeof(*set)); set->ops = ops; - set->queue_depth = ctrl->sqsize + 1; + set->queue_depth = min_t(unsigned, ctrl->sqsize, BLK_MQ_MAX_DEPTH - 1); /* * Some Apple controllers requires tags to be unique across admin and * the (only) I/O queue, so reserve the first 32 tags of the I/O queue. diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 9ddda571f046..a8639919237e 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -11,6 +11,8 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c, fmode_t mode) { + u32 effects; + if (capable(CAP_SYS_ADMIN)) return true; @@ -43,11 +45,29 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c, } /* - * Only allow I/O commands that transfer data to the controller if the - * special file is open for writing, but always allow I/O commands that - * transfer data from the controller. + * Check if the controller provides a Commands Supported and Effects log + * and marks this command as supported. If not reject unprivileged + * passthrough. + */ + effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode); + if (!(effects & NVME_CMD_EFFECTS_CSUPP)) + return false; + + /* + * Don't allow passthrough for command that have intrusive (or unknown) + * effects. + */ + if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC | + NVME_CMD_EFFECTS_UUID_SEL | + NVME_CMD_EFFECTS_SCOPE_MASK)) + return false; + + /* + * Only allow I/O commands that transfer data to the controller or that + * change the logical block contents if the file descriptor is open for + * writing. */ - if (nvme_is_write(c)) + if (nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC)) return mode & FMODE_WRITE; return true; } diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index c03093b6813c..fc39d01e7b63 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -376,6 +376,8 @@ static void nvme_ns_head_submit_bio(struct bio *bio) * pool from the original queue to allocate the bvecs from. */ bio = bio_split_to_limits(bio); + if (!bio) + return; srcu_idx = srcu_read_lock(&head->srcu); ns = nvme_find_path(head); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 6bbb73ef8b25..424c8a467a0c 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -893,7 +893,7 @@ static inline void nvme_trace_bio_complete(struct request *req) { struct nvme_ns *ns = req->q->queuedata; - if (req->cmd_flags & REQ_NVME_MPATH) + if ((req->cmd_flags & REQ_NVME_MPATH) && req->bio) trace_block_bio_complete(ns->head->disk->queue, req->bio); } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index f0f8027644bb..b13baccedb4a 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -36,7 +36,7 @@ #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) -#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) +#define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc)) /* * These can be higher, but we need to ensure that any command doesn't @@ -144,9 +144,9 @@ struct nvme_dev { mempool_t *iod_mempool; /* shadow doorbell buffer support: */ - u32 *dbbuf_dbs; + __le32 *dbbuf_dbs; dma_addr_t dbbuf_dbs_dma_addr; - u32 *dbbuf_eis; + __le32 *dbbuf_eis; dma_addr_t dbbuf_eis_dma_addr; /* host memory buffer support: */ @@ -208,10 +208,10 @@ struct nvme_queue { #define NVMEQ_SQ_CMB 1 #define NVMEQ_DELETE_ERROR 2 #define NVMEQ_POLLED 3 - u32 *dbbuf_sq_db; - u32 *dbbuf_cq_db; - u32 *dbbuf_sq_ei; - u32 *dbbuf_cq_ei; + __le32 *dbbuf_sq_db; + __le32 *dbbuf_cq_db; + __le32 *dbbuf_sq_ei; + __le32 *dbbuf_cq_ei; struct completion delete_done; }; @@ -343,11 +343,11 @@ static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) } /* Update dbbuf and return true if an MMIO is required */ -static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, - volatile u32 *dbbuf_ei) +static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db, + volatile __le32 *dbbuf_ei) { if (dbbuf_db) { - u16 old_value; + u16 old_value, event_idx; /* * Ensure that the queue is written before updating @@ -355,8 +355,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, */ wmb(); - old_value = *dbbuf_db; - *dbbuf_db = value; + old_value = le32_to_cpu(*dbbuf_db); + *dbbuf_db = cpu_to_le32(value); /* * Ensure that the doorbell is updated before reading the event @@ -366,7 +366,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, */ mb(); - if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) + event_idx = le32_to_cpu(*dbbuf_ei); + if (!nvme_dbbuf_need_event(event_idx, value, old_value)) return false; } @@ -380,9 +381,9 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, */ static int nvme_pci_npages_prp(void) { - unsigned nprps = DIV_ROUND_UP(NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, - NVME_CTRL_PAGE_SIZE); - return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); + unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE; + unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE); + return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8); } /* @@ -392,7 +393,7 @@ static int nvme_pci_npages_prp(void) static int nvme_pci_npages_sgl(void) { return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc), - PAGE_SIZE); + NVME_CTRL_PAGE_SIZE); } static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, @@ -708,7 +709,7 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, sge->length = cpu_to_le32(entries * sizeof(*sge)); sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; } else { - sge->length = cpu_to_le32(PAGE_SIZE); + sge->length = cpu_to_le32(NVME_CTRL_PAGE_SIZE); sge->type = NVME_SGL_FMT_SEG_DESC << 4; } } @@ -2332,10 +2333,12 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) if (dev->cmb_use_sqes) { result = nvme_cmb_qdepth(dev, nr_io_queues, sizeof(struct nvme_command)); - if (result > 0) + if (result > 0) { dev->q_depth = result; - else + dev->ctrl.sqsize = result - 1; + } else { dev->cmb_use_sqes = false; + } } do { @@ -2536,7 +2539,6 @@ static int nvme_pci_enable(struct nvme_dev *dev) dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1, io_queue_depth); - dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); dev->dbs = dev->bar + 4096; @@ -2577,7 +2579,7 @@ static int nvme_pci_enable(struct nvme_dev *dev) dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n", dev->q_depth); } - + dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ nvme_map_cmb(dev); diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 53a004ea320c..6a54ed6fb121 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -164,26 +164,31 @@ out: static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log) { - log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0); - log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0); - log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0); - log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0); - log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0); - log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0); - log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0); - - log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0); - log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0); - log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0); - log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0); - log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0); + log->acs[nvme_admin_get_log_page] = + log->acs[nvme_admin_identify] = + log->acs[nvme_admin_abort_cmd] = + log->acs[nvme_admin_set_features] = + log->acs[nvme_admin_get_features] = + log->acs[nvme_admin_async_event] = + log->acs[nvme_admin_keep_alive] = + cpu_to_le32(NVME_CMD_EFFECTS_CSUPP); + + log->iocs[nvme_cmd_read] = + log->iocs[nvme_cmd_flush] = + log->iocs[nvme_cmd_dsm] = + cpu_to_le32(NVME_CMD_EFFECTS_CSUPP); + log->iocs[nvme_cmd_write] = + log->iocs[nvme_cmd_write_zeroes] = + cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC); } static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log) { - log->iocs[nvme_cmd_zone_append] = cpu_to_le32(1 << 0); - log->iocs[nvme_cmd_zone_mgmt_send] = cpu_to_le32(1 << 0); - log->iocs[nvme_cmd_zone_mgmt_recv] = cpu_to_le32(1 << 0); + log->iocs[nvme_cmd_zone_append] = + log->iocs[nvme_cmd_zone_mgmt_send] = + cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC); + log->iocs[nvme_cmd_zone_mgmt_recv] = + cpu_to_le32(NVME_CMD_EFFECTS_CSUPP); } static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req) diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index 79af5140af8b..adc0958755d6 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -334,14 +334,13 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) } /* - * If there are effects for the command we are about to execute, or - * an end_req function we need to use nvme_execute_passthru_rq() - * synchronously in a work item seeing the end_req function and - * nvme_passthru_end() can't be called in the request done callback - * which is typically in interrupt context. + * If a command needs post-execution fixups, or there are any + * non-trivial effects, make sure to execute the command synchronously + * in a workqueue so that nvme_passthru_end gets called. */ effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode); - if (req->p.use_workqueue || effects) { + if (req->p.use_workqueue || + (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))) { INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work); req->p.rq = rq; queue_work(nvmet_wq, &req->p.work); diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index b2272bccf85c..f08b25195ae7 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -1099,7 +1099,7 @@ u64 __init dt_mem_next_cell(int s, const __be32 **cellp) */ int __init early_init_dt_scan_memory(void) { - int node; + int node, found_memory = 0; const void *fdt = initial_boot_params; fdt_for_each_subnode(node, fdt, 0) { @@ -1139,6 +1139,8 @@ int __init early_init_dt_scan_memory(void) early_init_dt_add_memory_arch(base, size); + found_memory = 1; + if (!hotpluggable) continue; @@ -1147,7 +1149,7 @@ int __init early_init_dt_scan_memory(void) base, base + size); } } - return 0; + return found_memory; } int __init early_init_dt_scan_chosen(char *cmdline) @@ -1161,18 +1163,14 @@ int __init early_init_dt_scan_chosen(char *cmdline) if (node < 0) node = fdt_path_offset(fdt, "/chosen@0"); if (node < 0) - return -ENOENT; + /* Handle the cmdline config options even if no /chosen node */ + goto handle_cmdline; chosen_node_offset = node; early_init_dt_check_for_initrd(node); early_init_dt_check_for_elfcorehdr(node); - /* Retrieve command line */ - p = of_get_flat_dt_prop(node, "bootargs", &l); - if (p != NULL && l > 0) - strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE)); - rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l); if (rng_seed && l > 0) { add_bootloader_randomness(rng_seed, l); @@ -1185,6 +1183,32 @@ int __init early_init_dt_scan_chosen(char *cmdline) fdt_totalsize(initial_boot_params)); } + /* Retrieve command line */ + p = of_get_flat_dt_prop(node, "bootargs", &l); + if (p != NULL && l > 0) + strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE)); + +handle_cmdline: + /* + * CONFIG_CMDLINE is meant to be a default in case nothing else + * managed to set the command line, unless CONFIG_CMDLINE_FORCE + * is set in which case we override whatever was found earlier. + */ +#ifdef CONFIG_CMDLINE +#if defined(CONFIG_CMDLINE_EXTEND) + strlcat(cmdline, " ", COMMAND_LINE_SIZE); + strlcat(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE); +#elif defined(CONFIG_CMDLINE_FORCE) + strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE); +#else + /* No arguments from boot loader, use kernel's cmdl*/ + if (!((char *)cmdline)[0]) + strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE); +#endif +#endif /* CONFIG_CMDLINE */ + + pr_debug("Command line is: %s\n", (char *)cmdline); + return 0; } @@ -1277,26 +1301,6 @@ void __init early_init_dt_scan_nodes(void) if (rc) pr_warn("No chosen node found, continuing without\n"); - /* - * CONFIG_CMDLINE is meant to be a default in case nothing else - * managed to set the command line, unless CONFIG_CMDLINE_FORCE - * is set in which case we override whatever was found earlier. - */ -#ifdef CONFIG_CMDLINE -#if defined(CONFIG_CMDLINE_EXTEND) - strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); - strlcat(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); -#elif defined(CONFIG_CMDLINE_FORCE) - strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); -#else - /* No arguments from boot loader, use kernel's cmdl */ - if (!boot_command_line[0]) - strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); -#endif -#endif /* CONFIG_CMDLINE */ - - pr_debug("Command line is: %s\n", boot_command_line); - /* Setup memory, calling early_init_dt_add_memory_arch */ early_init_dt_scan_memory(); diff --git a/drivers/phy/samsung/phy-exynos-dp-video.c b/drivers/phy/samsung/phy-exynos-dp-video.c index 2b670ef91deb..6069fedbd8f3 100644 --- a/drivers/phy/samsung/phy-exynos-dp-video.c +++ b/drivers/phy/samsung/phy-exynos-dp-video.c @@ -83,8 +83,11 @@ static int exynos_dp_video_phy_probe(struct platform_device *pdev) if (!state) return -ENOMEM; - state->regs = syscon_regmap_lookup_by_phandle(dev->of_node, - "samsung,pmu-syscon"); + state->regs = syscon_node_to_regmap(dev->parent->of_node); + if (IS_ERR(state->regs)) + /* Backwards compatible way */ + state->regs = syscon_regmap_lookup_by_phandle(dev->of_node, + "samsung,pmu-syscon"); if (IS_ERR(state->regs)) { dev_err(dev, "Failed to lookup PMU regmap\n"); return PTR_ERR(state->regs); diff --git a/drivers/phy/samsung/phy-exynos-mipi-video.c b/drivers/phy/samsung/phy-exynos-mipi-video.c index c1df1ef3ee3c..a7f67857e5b2 100644 --- a/drivers/phy/samsung/phy-exynos-mipi-video.c +++ b/drivers/phy/samsung/phy-exynos-mipi-video.c @@ -298,7 +298,7 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct phy_provider *phy_provider; - unsigned int i; + unsigned int i = 0; phy_dev = of_device_get_match_data(dev); if (!phy_dev) @@ -308,7 +308,10 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev) if (!state) return -ENOMEM; - for (i = 0; i < phy_dev->num_regmaps; i++) { + state->regmaps[i] = syscon_node_to_regmap(dev->parent->of_node); + if (!IS_ERR(state->regmaps[i])) + i++; + for (; i < phy_dev->num_regmaps; i++) { state->regmaps[i] = syscon_regmap_lookup_by_phandle(np, phy_dev->regmap_names[i]); if (IS_ERR(state->regmaps[i])) diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index b392b9f5482e..c0f85ffb2b62 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -865,6 +865,8 @@ dcssblk_submit_bio(struct bio *bio) unsigned long bytes_done; bio = bio_split_to_limits(bio); + if (!bio) + return; bytes_done = 0; dev_info = bio->bi_bdev->bd_disk->private_data; diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index 406be169173c..d1adc4b83193 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -410,13 +410,13 @@ static ssize_t qeth_dev_isolation_show(struct device *dev, switch (card->options.isolation) { case ISOLATION_MODE_NONE: - return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_NONE); + return sysfs_emit(buf, "%s\n", ATTR_QETH_ISOLATION_NONE); case ISOLATION_MODE_FWD: - return snprintf(buf, 9, "%s\n", ATTR_QETH_ISOLATION_FWD); + return sysfs_emit(buf, "%s\n", ATTR_QETH_ISOLATION_FWD); case ISOLATION_MODE_DROP: - return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_DROP); + return sysfs_emit(buf, "%s\n", ATTR_QETH_ISOLATION_DROP); default: - return snprintf(buf, 5, "%s\n", "N/A"); + return sysfs_emit(buf, "%s\n", "N/A"); } } @@ -500,9 +500,9 @@ static ssize_t qeth_hw_trap_show(struct device *dev, struct qeth_card *card = dev_get_drvdata(dev); if (card->info.hwtrap) - return snprintf(buf, 5, "arm\n"); + return sysfs_emit(buf, "arm\n"); else - return snprintf(buf, 8, "disarm\n"); + return sysfs_emit(buf, "disarm\n"); } static ssize_t qeth_hw_trap_store(struct device *dev, diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig index 5dbb09f843f7..4e176280113a 100644 --- a/drivers/soc/Kconfig +++ b/drivers/soc/Kconfig @@ -16,12 +16,14 @@ source "drivers/soc/litex/Kconfig" source "drivers/soc/loongson/Kconfig" source "drivers/soc/mediatek/Kconfig" source "drivers/soc/microchip/Kconfig" +source "drivers/soc/nuvoton/Kconfig" source "drivers/soc/pxa/Kconfig" source "drivers/soc/qcom/Kconfig" source "drivers/soc/renesas/Kconfig" source "drivers/soc/rockchip/Kconfig" source "drivers/soc/samsung/Kconfig" source "drivers/soc/sifive/Kconfig" +source "drivers/soc/starfive/Kconfig" source "drivers/soc/sunxi/Kconfig" source "drivers/soc/tegra/Kconfig" source "drivers/soc/ti/Kconfig" diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index fff513bd522d..3b0f9fb3b5c8 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -21,13 +21,15 @@ obj-$(CONFIG_LITEX_SOC_CONTROLLER) += litex/ obj-y += loongson/ obj-y += mediatek/ obj-y += microchip/ +obj-y += nuvoton/ obj-y += pxa/ obj-y += amlogic/ obj-y += qcom/ obj-y += renesas/ obj-y += rockchip/ obj-$(CONFIG_SOC_SAMSUNG) += samsung/ -obj-$(CONFIG_SOC_SIFIVE) += sifive/ +obj-y += sifive/ +obj-$(CONFIG_SOC_STARFIVE) += starfive/ obj-y += sunxi/ obj-$(CONFIG_ARCH_TEGRA) += tegra/ obj-y += ti/ diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c index dd5f2a13ceb5..f54acffc83f9 100644 --- a/drivers/soc/amlogic/meson-ee-pwrc.c +++ b/drivers/soc/amlogic/meson-ee-pwrc.c @@ -46,6 +46,9 @@ #define HHI_NANOQ_MEM_PD_REG1 (0x47 << 2) #define HHI_VPU_MEM_PD_REG2 (0x4d << 2) +#define G12A_HHI_NANOQ_MEM_PD_REG0 (0x43 << 2) +#define G12A_HHI_NANOQ_MEM_PD_REG1 (0x44 << 2) + struct meson_ee_pwrc; struct meson_ee_pwrc_domain; @@ -106,6 +109,13 @@ static struct meson_ee_pwrc_top_domain sm1_pwrc_usb = SM1_EE_PD(17); static struct meson_ee_pwrc_top_domain sm1_pwrc_pci = SM1_EE_PD(18); static struct meson_ee_pwrc_top_domain sm1_pwrc_ge2d = SM1_EE_PD(19); +static struct meson_ee_pwrc_top_domain g12a_pwrc_nna = { + .sleep_reg = GX_AO_RTI_GEN_PWR_SLEEP0, + .sleep_mask = BIT(16) | BIT(17), + .iso_reg = GX_AO_RTI_GEN_PWR_ISO0, + .iso_mask = BIT(16) | BIT(17), +}; + /* Memory PD Domains */ #define VPU_MEMPD(__reg) \ @@ -217,6 +227,11 @@ static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_audio[] = { { HHI_AUDIO_MEM_PD_REG0, GENMASK(27, 26) }, }; +static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_nna[] = { + { G12A_HHI_NANOQ_MEM_PD_REG0, GENMASK(31, 0) }, + { G12A_HHI_NANOQ_MEM_PD_REG1, GENMASK(23, 0) }, +}; + #define VPU_PD(__name, __top_pd, __mem, __is_pwr_off, __resets, __clks) \ { \ .name = __name, \ @@ -253,6 +268,8 @@ static struct meson_ee_pwrc_domain_desc g12a_pwrc_domains[] = { [PWRC_G12A_VPU_ID] = VPU_PD("VPU", &gx_pwrc_vpu, g12a_pwrc_mem_vpu, pwrc_ee_is_powered_off, 11, 2), [PWRC_G12A_ETH_ID] = MEM_PD("ETH", meson_pwrc_mem_eth), + [PWRC_G12A_NNA_ID] = TOP_PD("NNA", &g12a_pwrc_nna, g12a_pwrc_mem_nna, + pwrc_ee_is_powered_off), }; static struct meson_ee_pwrc_domain_desc gxbb_pwrc_domains[] = { diff --git a/drivers/soc/bcm/bcm2835-power.c b/drivers/soc/bcm/bcm2835-power.c index 5bcd047768b6..bf51f03f77d6 100644 --- a/drivers/soc/bcm/bcm2835-power.c +++ b/drivers/soc/bcm/bcm2835-power.c @@ -701,14 +701,8 @@ fail: return ret; } -static int bcm2835_power_remove(struct platform_device *pdev) -{ - return 0; -} - static struct platform_driver bcm2835_power_driver = { .probe = bcm2835_power_probe, - .remove = bcm2835_power_remove, .driver = { .name = "bcm2835-power", }, diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig index 4b906791d6c7..a8742fc58f01 100644 --- a/drivers/soc/imx/Kconfig +++ b/drivers/soc/imx/Kconfig @@ -28,4 +28,15 @@ config SOC_IMX9 help If you say yes here, you get support for the NXP i.MX9 family +config IMX8M_BLK_CTRL + bool + default SOC_IMX8M && IMX_GPCV2_PM_DOMAINS + depends on PM_GENERIC_DOMAINS + depends on COMMON_CLK + +config IMX9_BLK_CTRL + bool + default SOC_IMX9 && IMX_GPCV2_PM_DOMAINS + depends on PM_GENERIC_DOMAINS + endmenu diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile index 7b4099ceafd6..a28c44a1f16a 100644 --- a/drivers/soc/imx/Makefile +++ b/drivers/soc/imx/Makefile @@ -5,7 +5,7 @@ endif obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o -obj-$(CONFIG_SOC_IMX8M) += imx8m-blk-ctrl.o -obj-$(CONFIG_SOC_IMX8M) += imx8mp-blk-ctrl.o +obj-$(CONFIG_IMX8M_BLK_CTRL) += imx8m-blk-ctrl.o +obj-$(CONFIG_IMX8M_BLK_CTRL) += imx8mp-blk-ctrl.o obj-$(CONFIG_SOC_IMX9) += imx93-src.o imx93-pd.o -obj-$(CONFIG_SOC_IMX9) += imx93-blk-ctrl.o +obj-$(CONFIG_IMX9_BLK_CTRL) += imx93-blk-ctrl.o diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c index ddcf6be3d8b4..399cb85105a1 100644 --- a/drivers/soc/imx/imx8m-blk-ctrl.c +++ b/drivers/soc/imx/imx8m-blk-ctrl.c @@ -4,6 +4,7 @@ * Copyright 2021 Pengutronix, Lucas Stach <kernel@pengutronix.de> */ +#include <linux/bitfield.h> #include <linux/device.h> #include <linux/interconnect.h> #include <linux/module.h> @@ -654,6 +655,10 @@ static const struct imx8m_blk_ctrl_data imx8mn_disp_blk_ctl_dev_data = { .num_domains = ARRAY_SIZE(imx8mn_disp_blk_ctl_domain_data), }; +#define LCDIF_ARCACHE_CTRL 0x4c +#define LCDIF_1_RD_HURRY GENMASK(15, 13) +#define LCDIF_0_RD_HURRY GENMASK(12, 10) + static int imx8mp_media_power_notifier(struct notifier_block *nb, unsigned long action, void *data) { @@ -667,14 +672,24 @@ static int imx8mp_media_power_notifier(struct notifier_block *nb, regmap_set_bits(bc->regmap, BLK_CLK_EN, BIT(8)); regmap_set_bits(bc->regmap, BLK_SFT_RSTN, BIT(8)); - /* - * On power up we have no software backchannel to the GPC to - * wait for the ADB handshake to happen, so we just delay for a - * bit. On power down the GPC driver waits for the handshake. - */ - if (action == GENPD_NOTIFY_ON) + if (action == GENPD_NOTIFY_ON) { + /* + * On power up we have no software backchannel to the GPC to + * wait for the ADB handshake to happen, so we just delay for a + * bit. On power down the GPC driver waits for the handshake. + */ udelay(5); + /* + * Set panic read hurry level for both LCDIF interfaces to + * maximum priority to minimize chances of display FIFO + * underflow. + */ + regmap_set_bits(bc->regmap, LCDIF_ARCACHE_CTRL, + FIELD_PREP(LCDIF_1_RD_HURRY, 7) | + FIELD_PREP(LCDIF_0_RD_HURRY, 7)); + } + return NOTIFY_OK; } diff --git a/drivers/soc/imx/imx8mp-blk-ctrl.c b/drivers/soc/imx/imx8mp-blk-ctrl.c index 0e3b6ba22f94..28458ed1793b 100644 --- a/drivers/soc/imx/imx8mp-blk-ctrl.c +++ b/drivers/soc/imx/imx8mp-blk-ctrl.c @@ -4,7 +4,9 @@ * Copyright 2022 Pengutronix, Lucas Stach <kernel@pengutronix.de> */ +#include <linux/bitfield.h> #include <linux/clk.h> +#include <linux/clk-provider.h> #include <linux/device.h> #include <linux/interconnect.h> #include <linux/module.h> @@ -21,6 +23,15 @@ #define USB_CLOCK_MODULE_EN BIT(1) #define PCIE_PHY_APB_RST BIT(4) #define PCIE_PHY_INIT_RST BIT(5) +#define GPR_REG1 0x4 +#define PLL_LOCK BIT(13) +#define GPR_REG2 0x8 +#define P_PLL_MASK GENMASK(5, 0) +#define M_PLL_MASK GENMASK(15, 6) +#define S_PLL_MASK GENMASK(18, 16) +#define GPR_REG3 0xc +#define PLL_CKE BIT(17) +#define PLL_RST BIT(31) struct imx8mp_blk_ctrl_domain; @@ -60,6 +71,7 @@ struct imx8mp_blk_ctrl_domain { struct imx8mp_blk_ctrl_data { int max_reg; + int (*probe) (struct imx8mp_blk_ctrl *bc); notifier_fn_t power_notifier_fn; void (*power_off) (struct imx8mp_blk_ctrl *bc, struct imx8mp_blk_ctrl_domain *domain); void (*power_on) (struct imx8mp_blk_ctrl *bc, struct imx8mp_blk_ctrl_domain *domain); @@ -73,6 +85,92 @@ to_imx8mp_blk_ctrl_domain(struct generic_pm_domain *genpd) return container_of(genpd, struct imx8mp_blk_ctrl_domain, genpd); } +struct clk_hsio_pll { + struct clk_hw hw; + struct regmap *regmap; +}; + +static inline struct clk_hsio_pll *to_clk_hsio_pll(struct clk_hw *hw) +{ + return container_of(hw, struct clk_hsio_pll, hw); +} + +static int clk_hsio_pll_prepare(struct clk_hw *hw) +{ + struct clk_hsio_pll *clk = to_clk_hsio_pll(hw); + u32 val; + + /* set the PLL configuration */ + regmap_update_bits(clk->regmap, GPR_REG2, + P_PLL_MASK | M_PLL_MASK | S_PLL_MASK, + FIELD_PREP(P_PLL_MASK, 12) | + FIELD_PREP(M_PLL_MASK, 800) | + FIELD_PREP(S_PLL_MASK, 4)); + + /* de-assert PLL reset */ + regmap_update_bits(clk->regmap, GPR_REG3, PLL_RST, PLL_RST); + + /* enable PLL */ + regmap_update_bits(clk->regmap, GPR_REG3, PLL_CKE, PLL_CKE); + + return regmap_read_poll_timeout(clk->regmap, GPR_REG1, val, + val & PLL_LOCK, 10, 100); +} + +static void clk_hsio_pll_unprepare(struct clk_hw *hw) +{ + struct clk_hsio_pll *clk = to_clk_hsio_pll(hw); + + regmap_update_bits(clk->regmap, GPR_REG3, PLL_RST | PLL_CKE, 0); +} + +static int clk_hsio_pll_is_prepared(struct clk_hw *hw) +{ + struct clk_hsio_pll *clk = to_clk_hsio_pll(hw); + + return regmap_test_bits(clk->regmap, GPR_REG1, PLL_LOCK); +} + +static unsigned long clk_hsio_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + return 100000000; +} + +static const struct clk_ops clk_hsio_pll_ops = { + .prepare = clk_hsio_pll_prepare, + .unprepare = clk_hsio_pll_unprepare, + .is_prepared = clk_hsio_pll_is_prepared, + .recalc_rate = clk_hsio_pll_recalc_rate, +}; + +static int imx8mp_hsio_blk_ctrl_probe(struct imx8mp_blk_ctrl *bc) +{ + struct clk_hsio_pll *clk_hsio_pll; + struct clk_hw *hw; + struct clk_init_data init = {}; + int ret; + + clk_hsio_pll = devm_kzalloc(bc->dev, sizeof(*clk_hsio_pll), GFP_KERNEL); + if (!clk_hsio_pll) + return -ENOMEM; + + init.name = "hsio_pll"; + init.ops = &clk_hsio_pll_ops; + init.parent_names = (const char *[]){"osc_24m"}; + init.num_parents = 1; + + clk_hsio_pll->regmap = bc->regmap; + clk_hsio_pll->hw.init = &init; + + hw = &clk_hsio_pll->hw; + ret = devm_clk_hw_register(bc->dev, hw); + if (ret) + return ret; + + return devm_of_clk_add_hw_provider(bc->dev, of_clk_hw_simple_get, hw); +} + static void imx8mp_hsio_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc, struct imx8mp_blk_ctrl_domain *domain) { @@ -187,6 +285,7 @@ static const struct imx8mp_blk_ctrl_domain_data imx8mp_hsio_domain_data[] = { static const struct imx8mp_blk_ctrl_data imx8mp_hsio_blk_ctl_dev_data = { .max_reg = 0x24, + .probe = imx8mp_hsio_blk_ctrl_probe, .power_on = imx8mp_hsio_blk_ctrl_power_on, .power_off = imx8mp_hsio_blk_ctrl_power_off, .power_notifier_fn = imx8mp_hsio_power_notifier, @@ -201,6 +300,7 @@ static const struct imx8mp_blk_ctrl_data imx8mp_hsio_blk_ctl_dev_data = { #define HDMI_RTX_CLK_CTL3 0x70 #define HDMI_RTX_CLK_CTL4 0x80 #define HDMI_TX_CONTROL0 0x200 +#define HDMI_LCDIF_NOC_HURRY_MASK GENMASK(14, 12) static void imx8mp_hdmi_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc, struct imx8mp_blk_ctrl_domain *domain) @@ -217,6 +317,8 @@ static void imx8mp_hdmi_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc, regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(11)); regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(4) | BIT(5) | BIT(6)); + regmap_set_bits(bc->regmap, HDMI_TX_CONTROL0, + FIELD_PREP(HDMI_LCDIF_NOC_HURRY_MASK, 7)); break; case IMX8MP_HDMIBLK_PD_PAI: regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(17)); @@ -634,6 +736,12 @@ static int imx8mp_blk_ctrl_probe(struct platform_device *pdev) goto cleanup_provider; } + if (bc_data->probe) { + ret = bc_data->probe(bc); + if (ret) + goto cleanup_provider; + } + dev_set_drvdata(dev, bc); return 0; diff --git a/drivers/soc/imx/imx93-pd.c b/drivers/soc/imx/imx93-pd.c index 4d235c8c4924..832deeed8fd6 100644 --- a/drivers/soc/imx/imx93-pd.c +++ b/drivers/soc/imx/imx93-pd.c @@ -164,7 +164,6 @@ MODULE_DEVICE_TABLE(of, imx93_pd_ids); static struct platform_driver imx93_power_domain_driver = { .driver = { .name = "imx93_power_domain", - .owner = THIS_MODULE, .of_match_table = imx93_pd_ids, }, .probe = imx93_pd_probe, diff --git a/drivers/soc/imx/imx93-src.c b/drivers/soc/imx/imx93-src.c index 4d74921cae0f..f1c2e22d5cbd 100644 --- a/drivers/soc/imx/imx93-src.c +++ b/drivers/soc/imx/imx93-src.c @@ -21,7 +21,6 @@ MODULE_DEVICE_TABLE(of, imx93_src_ids); static struct platform_driver imx93_src_driver = { .driver = { .name = "imx93_src", - .owner = THIS_MODULE, .of_match_table = imx93_src_ids, }, .probe = imx93_src_probe, diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig index 40d0cc600cae..d6b83a5508ca 100644 --- a/drivers/soc/mediatek/Kconfig +++ b/drivers/soc/mediatek/Kconfig @@ -44,6 +44,11 @@ config MTK_PMIC_WRAP on different MediaTek SoCs. The PMIC wrapper is a proprietary hardware to connect the PMIC. +config MTK_REGULATOR_COUPLER + bool "MediaTek SoC Regulator Coupler" if COMPILE_TEST + default ARCH_MEDIATEK + depends on REGULATOR + config MTK_SCPSYS bool "MediaTek SCPSYS Support" default ARCH_MEDIATEK @@ -68,7 +73,7 @@ config MTK_SCPSYS_PM_DOMAINS tasks in the system. config MTK_MMSYS - bool "MediaTek MMSYS Support" + tristate "MediaTek MMSYS Support" default ARCH_MEDIATEK depends on HAS_IOMEM help diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile index 0e9e703c931a..8c0ddacbcde8 100644 --- a/drivers/soc/mediatek/Makefile +++ b/drivers/soc/mediatek/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_MTK_CMDQ) += mtk-cmdq-helper.o obj-$(CONFIG_MTK_DEVAPC) += mtk-devapc.o obj-$(CONFIG_MTK_INFRACFG) += mtk-infracfg.o obj-$(CONFIG_MTK_PMIC_WRAP) += mtk-pmic-wrap.o +obj-$(CONFIG_MTK_REGULATOR_COUPLER) += mtk-regulator-coupler.o obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o obj-$(CONFIG_MTK_SCPSYS_PM_DOMAINS) += mtk-pm-domains.o obj-$(CONFIG_MTK_MMSYS) += mtk-mmsys.o diff --git a/drivers/soc/mediatek/mt8186-pm-domains.h b/drivers/soc/mediatek/mt8186-pm-domains.h index 108af61854a3..fce86f79c505 100644 --- a/drivers/soc/mediatek/mt8186-pm-domains.h +++ b/drivers/soc/mediatek/mt8186-pm-domains.h @@ -304,7 +304,6 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = { .ctl_offs = 0x9FC, .pwr_sta_offs = 0x16C, .pwr_sta2nd_offs = 0x170, - .caps = MTK_SCPD_KEEP_DEFAULT_OFF, }, [MT8186_POWER_DOMAIN_ADSP_INFRA] = { .name = "adsp_infra", @@ -312,7 +311,6 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = { .ctl_offs = 0x9F8, .pwr_sta_offs = 0x16C, .pwr_sta2nd_offs = 0x170, - .caps = MTK_SCPD_KEEP_DEFAULT_OFF, }, [MT8186_POWER_DOMAIN_ADSP_TOP] = { .name = "adsp_top", @@ -332,7 +330,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = { MT8186_TOP_AXI_PROT_EN_3_CLR, MT8186_TOP_AXI_PROT_EN_3_STA), }, - .caps = MTK_SCPD_SRAM_ISO | MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_ACTIVE_WAKEUP, + .caps = MTK_SCPD_SRAM_ISO | MTK_SCPD_ACTIVE_WAKEUP, }, }; diff --git a/drivers/soc/mediatek/mt8188-mmsys.h b/drivers/soc/mediatek/mt8188-mmsys.h new file mode 100644 index 000000000000..448cc3761b43 --- /dev/null +++ b/drivers/soc/mediatek/mt8188-mmsys.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __SOC_MEDIATEK_MT8188_MMSYS_H +#define __SOC_MEDIATEK_MT8188_MMSYS_H + +#define MT8188_VDO0_OVL_MOUT_EN 0xf14 +#define MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0 BIT(0) +#define MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0 BIT(1) +#define MT8188_MOUT_DISP_OVL0_TO_DISP_OVL1 BIT(2) +#define MT8188_MOUT_DISP_OVL1_TO_DISP_RDMA1 BIT(4) +#define MT8188_MOUT_DISP_OVL1_TO_DISP_WDMA1 BIT(5) +#define MT8188_MOUT_DISP_OVL1_TO_DISP_OVL0 BIT(6) + +#define MT8188_VDO0_SEL_IN 0xf34 +#define MT8188_VDO0_SEL_OUT 0xf38 + +#define MT8188_VDO0_DISP_RDMA_SEL 0xf40 +#define MT8188_SOUT_DISP_RDMA0_TO_MASK GENMASK(2, 0) +#define MT8188_SOUT_DISP_RDMA0_TO_DISP_COLOR0 (0 << 0) +#define MT8188_SOUT_DISP_RDMA0_TO_DISP_DSI0 (1 << 0) +#define MT8188_SOUT_DISP_RDMA0_TO_DISP_DP_INTF0 (5 << 0) +#define MT8188_SEL_IN_DISP_RDMA0_FROM_MASK GENMASK(8, 8) +#define MT8188_SEL_IN_DISP_RDMA0_FROM_DISP_OVL0 (0 << 8) +#define MT8188_SEL_IN_DISP_RDMA0_FROM_DISP_RSZ0 (1 << 8) + + +#define MT8188_VDO0_DSI0_SEL_IN 0xf44 +#define MT8188_SEL_IN_DSI0_FROM_MASK BIT(0) +#define MT8188_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT (0 << 0) +#define MT8188_SEL_IN_DSI0_FROM_DISP_DITHER0 (1 << 0) + +#define MT8188_VDO0_DP_INTF0_SEL_IN 0xf4C +#define MT8188_SEL_IN_DP_INTF0_FROM_MASK GENMASK(2, 0) +#define MT8188_SEL_IN_DP_INTF0_FROM_DSC_WRAP0C1_OUT (0 << 0) +#define MT8188_SEL_IN_DP_INTF0_FROM_VPP_MERGE (1 << 0) +#define MT8188_SEL_IN_DP_INTF0_FROM_DISP_DITHER0 (3 << 0) + +#define MT8188_VDO0_DISP_DITHER0_SEL_OUT 0xf58 +#define MT8188_SOUT_DISP_DITHER0_TO_MASK GENMASK(2, 0) +#define MT8188_SOUT_DISP_DITHER0_TO_DSC_WRAP0_IN (0 << 0) +#define MT8188_SOUT_DISP_DITHER0_TO_DSI0 (1 << 0) +#define MT8188_SOUT_DISP_DITHER0_TO_VPP_MERGE0 (6 << 0) +#define MT8188_SOUT_DISP_DITHER0_TO_DP_INTF0 (7 << 0) + +#define MT8188_VDO0_VPP_MERGE_SEL 0xf60 +#define MT8188_SEL_IN_VPP_MERGE_FROM_MASK GENMASK(1, 0) +#define MT8188_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT (0 << 0) +#define MT8188_SEL_IN_VPP_MERGE_FROM_DITHER0_OUT (3 << 0) + +#define MT8188_SOUT_VPP_MERGE_TO_MASK GENMASK(6, 4) +#define MT8188_SOUT_VPP_MERGE_TO_DSI1 (0 << 4) +#define MT8188_SOUT_VPP_MERGE_TO_DP_INTF0 (1 << 4) +#define MT8188_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0 (2 << 4) +#define MT8188_SOUT_VPP_MERGE_TO_DISP_WDMA1 (3 << 4) +#define MT8188_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN (4 << 4) +#define MT8188_SOUT_VPP_MERGE_TO_DISP_WDMA0 (5 << 4) +#define MT8188_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN_MASK GENMASK(11, 11) +#define MT8188_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN (0 << 11) + +#define MT8188_VDO0_DSC_WARP_SEL 0xf64 +#define MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_MASK GENMASK(0, 0) +#define MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_DISP_DITHER0 (0 << 0) +#define MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_VPP_MERGE (1 << 0) +#define MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK GENMASK(19, 16) +#define MT8188_SOUT_DSC_WRAP0_OUT_TO_DSI0 BIT(16) +#define MT8188_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0 BIT(17) +#define MT8188_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE BIT(18) +#define MT8188_SOUT_DSC_WRAP0_OUT_TO_DISP_WDMA0 BIT(19) + +static const struct mtk_mmsys_routes mmsys_mt8188_routing_table[] = { + { + DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0, + MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0, + MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0 + }, { + DDP_COMPONENT_OVL0, DDP_COMPONENT_WDMA0, + MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0, + MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0 + }, { + DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0, + MT8188_VDO0_DISP_RDMA_SEL, MT8188_SEL_IN_DISP_RDMA0_FROM_MASK, + MT8188_SEL_IN_DISP_RDMA0_FROM_DISP_OVL0 + }, { + DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0, + MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK, + MT8188_SEL_IN_DSI0_FROM_DISP_DITHER0 + }, { + DDP_COMPONENT_DITHER0, DDP_COMPONENT_MERGE0, + MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK, + MT8188_SEL_IN_VPP_MERGE_FROM_DITHER0_OUT + }, { + DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSC0, + MT8188_VDO0_DSC_WARP_SEL, + MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_MASK, + MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_DISP_DITHER0 + }, { + DDP_COMPONENT_DITHER0, DDP_COMPONENT_DP_INTF0, + MT8188_VDO0_DP_INTF0_SEL_IN, MT8188_SEL_IN_DP_INTF0_FROM_MASK, + MT8188_SEL_IN_DP_INTF0_FROM_DISP_DITHER0 + }, { + DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0, + MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK, + MT8188_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT + }, { + DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0, + MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK, + MT8188_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT + }, { + DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0, + MT8188_VDO0_DISP_RDMA_SEL, MT8188_SOUT_DISP_RDMA0_TO_MASK, + MT8188_SOUT_DISP_RDMA0_TO_DISP_COLOR0 + }, { + DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0, + MT8188_VDO0_DISP_DITHER0_SEL_OUT, + MT8188_SOUT_DISP_DITHER0_TO_MASK, + MT8188_SOUT_DISP_DITHER0_TO_DSI0 + }, { + DDP_COMPONENT_DITHER0, DDP_COMPONENT_DP_INTF0, + MT8188_VDO0_DISP_DITHER0_SEL_OUT, + MT8188_SOUT_DISP_DITHER0_TO_MASK, + MT8188_SOUT_DISP_DITHER0_TO_DP_INTF0 + }, { + DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF0, + MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK, + MT8188_SOUT_VPP_MERGE_TO_DP_INTF0 + }, { + DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI0, + MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK, + MT8188_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0 + }, { + DDP_COMPONENT_MERGE0, DDP_COMPONENT_WDMA0, + MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK, + MT8188_SOUT_VPP_MERGE_TO_DISP_WDMA0 + }, { + DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC0, + MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK, + MT8188_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN + }, { + DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0, + MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK, + MT8188_SOUT_DSC_WRAP0_OUT_TO_DSI0 + }, { + DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0, + MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK, + MT8188_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE + }, +}; + +#endif /* __SOC_MEDIATEK_MT8188_MMSYS_H */ diff --git a/drivers/soc/mediatek/mt8188-pm-domains.h b/drivers/soc/mediatek/mt8188-pm-domains.h new file mode 100644 index 000000000000..0692cb444ed0 --- /dev/null +++ b/drivers/soc/mediatek/mt8188-pm-domains.h @@ -0,0 +1,623 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 MediaTek Inc. + * Author: Garmin Chang <garmin.chang@mediatek.com> + */ + +#ifndef __SOC_MEDIATEK_MT8188_PM_DOMAINS_H +#define __SOC_MEDIATEK_MT8188_PM_DOMAINS_H + +#include "mtk-pm-domains.h" +#include <dt-bindings/power/mediatek,mt8188-power.h> + +/* + * MT8188 power domain support + */ + +static const struct scpsys_domain_data scpsys_domain_data_mt8188[] = { + [MT8188_POWER_DOMAIN_MFG0] = { + .name = "mfg0", + .sta_mask = BIT(1), + .ctl_offs = 0x300, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY, + }, + [MT8188_POWER_DOMAIN_MFG1] = { + .name = "mfg1", + .sta_mask = BIT(2), + .ctl_offs = 0x304, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MFG1_STEP1, + MT8188_TOP_AXI_PROT_EN_SET, + MT8188_TOP_AXI_PROT_EN_CLR, + MT8188_TOP_AXI_PROT_EN_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_MFG1_STEP2, + MT8188_TOP_AXI_PROT_EN_2_SET, + MT8188_TOP_AXI_PROT_EN_2_CLR, + MT8188_TOP_AXI_PROT_EN_2_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_1_MFG1_STEP3, + MT8188_TOP_AXI_PROT_EN_1_SET, + MT8188_TOP_AXI_PROT_EN_1_CLR, + MT8188_TOP_AXI_PROT_EN_1_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_MFG1_STEP4, + MT8188_TOP_AXI_PROT_EN_2_SET, + MT8188_TOP_AXI_PROT_EN_2_CLR, + MT8188_TOP_AXI_PROT_EN_2_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MFG1_STEP5, + MT8188_TOP_AXI_PROT_EN_SET, + MT8188_TOP_AXI_PROT_EN_CLR, + MT8188_TOP_AXI_PROT_EN_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_MFG1_STEP6, + MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET, + MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR, + MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY, + }, + [MT8188_POWER_DOMAIN_MFG2] = { + .name = "mfg2", + .sta_mask = BIT(3), + .ctl_offs = 0x308, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8188_POWER_DOMAIN_MFG3] = { + .name = "mfg3", + .sta_mask = BIT(4), + .ctl_offs = 0x30C, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8188_POWER_DOMAIN_MFG4] = { + .name = "mfg4", + .sta_mask = BIT(5), + .ctl_offs = 0x310, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8188_POWER_DOMAIN_PEXTP_MAC_P0] = { + .name = "pextp_mac_p0", + .sta_mask = BIT(10), + .ctl_offs = 0x324, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_PEXTP_MAC_P0_STEP1, + MT8188_TOP_AXI_PROT_EN_SET, + MT8188_TOP_AXI_PROT_EN_CLR, + MT8188_TOP_AXI_PROT_EN_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_PEXTP_MAC_P0_STEP2, + MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_SET, + MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_CLR, + MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8188_POWER_DOMAIN_PEXTP_PHY_TOP] = { + .name = "pextp_phy_top", + .sta_mask = BIT(12), + .ctl_offs = 0x328, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8188_POWER_DOMAIN_CSIRX_TOP] = { + .name = "pextp_csirx_top", + .sta_mask = BIT(17), + .ctl_offs = 0x3C4, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8188_POWER_DOMAIN_ETHER] = { + .name = "ether", + .sta_mask = BIT(1), + .ctl_offs = 0x338, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_ETHER_STEP1, + MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_SET, + MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_CLR, + MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_ACTIVE_WAKEUP, + }, + [MT8188_POWER_DOMAIN_HDMI_TX] = { + .name = "hdmi_tx", + .sta_mask = BIT(18), + .ctl_offs = 0x37C, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_HDMI_TX_STEP1, + MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_SET, + MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_CLR, + MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_ACTIVE_WAKEUP, + }, + [MT8188_POWER_DOMAIN_ADSP_AO] = { + .name = "adsp_ao", + .sta_mask = BIT(10), + .ctl_offs = 0x35C, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .bp_infracfg = { + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_ADSP_AO_STEP1, + MT8188_TOP_AXI_PROT_EN_2_SET, + MT8188_TOP_AXI_PROT_EN_2_CLR, + MT8188_TOP_AXI_PROT_EN_2_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_ADSP_AO_STEP2, + MT8188_TOP_AXI_PROT_EN_2_SET, + MT8188_TOP_AXI_PROT_EN_2_CLR, + MT8188_TOP_AXI_PROT_EN_2_STA), + }, + .caps = MTK_SCPD_ALWAYS_ON, + }, + [MT8188_POWER_DOMAIN_ADSP_INFRA] = { + .name = "adsp_infra", + .sta_mask = BIT(9), + .ctl_offs = 0x358, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_ADSP_INFRA_STEP1, + MT8188_TOP_AXI_PROT_EN_2_SET, + MT8188_TOP_AXI_PROT_EN_2_CLR, + MT8188_TOP_AXI_PROT_EN_2_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_ADSP_INFRA_STEP2, + MT8188_TOP_AXI_PROT_EN_2_SET, + MT8188_TOP_AXI_PROT_EN_2_CLR, + MT8188_TOP_AXI_PROT_EN_2_STA), + }, + .caps = MTK_SCPD_SRAM_ISO | MTK_SCPD_ALWAYS_ON, + }, + [MT8188_POWER_DOMAIN_ADSP] = { + .name = "adsp", + .sta_mask = BIT(8), + .ctl_offs = 0x354, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_ADSP_STEP1, + MT8188_TOP_AXI_PROT_EN_2_SET, + MT8188_TOP_AXI_PROT_EN_2_CLR, + MT8188_TOP_AXI_PROT_EN_2_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_ADSP_STEP2, + MT8188_TOP_AXI_PROT_EN_2_SET, + MT8188_TOP_AXI_PROT_EN_2_CLR, + MT8188_TOP_AXI_PROT_EN_2_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_SRAM_ISO | MTK_SCPD_ACTIVE_WAKEUP, + }, + [MT8188_POWER_DOMAIN_AUDIO] = { + .name = "audio", + .sta_mask = BIT(6), + .ctl_offs = 0x34C, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_AUDIO_STEP1, + MT8188_TOP_AXI_PROT_EN_2_SET, + MT8188_TOP_AXI_PROT_EN_2_CLR, + MT8188_TOP_AXI_PROT_EN_2_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_AUDIO_STEP2, + MT8188_TOP_AXI_PROT_EN_2_SET, + MT8188_TOP_AXI_PROT_EN_2_CLR, + MT8188_TOP_AXI_PROT_EN_2_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_ACTIVE_WAKEUP, + }, + [MT8188_POWER_DOMAIN_AUDIO_ASRC] = { + .name = "audio_asrc", + .sta_mask = BIT(7), + .ctl_offs = 0x350, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_AUDIO_ASRC_STEP1, + MT8188_TOP_AXI_PROT_EN_2_SET, + MT8188_TOP_AXI_PROT_EN_2_CLR, + MT8188_TOP_AXI_PROT_EN_2_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_AUDIO_ASRC_STEP2, + MT8188_TOP_AXI_PROT_EN_2_SET, + MT8188_TOP_AXI_PROT_EN_2_CLR, + MT8188_TOP_AXI_PROT_EN_2_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8188_POWER_DOMAIN_VPPSYS0] = { + .name = "vppsys0", + .sta_mask = BIT(11), + .ctl_offs = 0x360, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_VPPSYS0_STEP1, + MT8188_TOP_AXI_PROT_EN_SET, + MT8188_TOP_AXI_PROT_EN_CLR, + MT8188_TOP_AXI_PROT_EN_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_VPPSYS0_STEP2, + MT8188_TOP_AXI_PROT_EN_MM_2_SET, + MT8188_TOP_AXI_PROT_EN_MM_2_CLR, + MT8188_TOP_AXI_PROT_EN_MM_2_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_VPPSYS0_STEP3, + MT8188_TOP_AXI_PROT_EN_SET, + MT8188_TOP_AXI_PROT_EN_CLR, + MT8188_TOP_AXI_PROT_EN_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_VPPSYS0_STEP4, + MT8188_TOP_AXI_PROT_EN_MM_2_SET, + MT8188_TOP_AXI_PROT_EN_MM_2_CLR, + MT8188_TOP_AXI_PROT_EN_MM_2_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VPPSYS0_STEP5, + MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET, + MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR, + MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA), + }, + }, + [MT8188_POWER_DOMAIN_VDOSYS0] = { + .name = "vdosys0", + .sta_mask = BIT(13), + .ctl_offs = 0x368, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_VDOSYS0_STEP1, + MT8188_TOP_AXI_PROT_EN_MM_SET, + MT8188_TOP_AXI_PROT_EN_MM_CLR, + MT8188_TOP_AXI_PROT_EN_MM_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_VDOSYS0_STEP2, + MT8188_TOP_AXI_PROT_EN_SET, + MT8188_TOP_AXI_PROT_EN_CLR, + MT8188_TOP_AXI_PROT_EN_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VDOSYS0_STEP3, + MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET, + MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR, + MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA), + }, + }, + [MT8188_POWER_DOMAIN_VDOSYS1] = { + .name = "vdosys1", + .sta_mask = BIT(14), + .ctl_offs = 0x36C, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_VDOSYS1_STEP1, + MT8188_TOP_AXI_PROT_EN_MM_SET, + MT8188_TOP_AXI_PROT_EN_MM_CLR, + MT8188_TOP_AXI_PROT_EN_MM_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_VDOSYS1_STEP2, + MT8188_TOP_AXI_PROT_EN_MM_SET, + MT8188_TOP_AXI_PROT_EN_MM_CLR, + MT8188_TOP_AXI_PROT_EN_MM_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_VDOSYS1_STEP3, + MT8188_TOP_AXI_PROT_EN_MM_2_SET, + MT8188_TOP_AXI_PROT_EN_MM_2_CLR, + MT8188_TOP_AXI_PROT_EN_MM_2_STA), + }, + }, + [MT8188_POWER_DOMAIN_DP_TX] = { + .name = "dp_tx", + .sta_mask = BIT(16), + .ctl_offs = 0x374, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_DP_TX_STEP1, + MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_SET, + MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_CLR, + MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8188_POWER_DOMAIN_EDP_TX] = { + .name = "edp_tx", + .sta_mask = BIT(17), + .ctl_offs = 0x378, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_EDP_TX_STEP1, + MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_SET, + MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_CLR, + MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8188_POWER_DOMAIN_VPPSYS1] = { + .name = "vppsys1", + .sta_mask = BIT(12), + .ctl_offs = 0x364, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_VPPSYS1_STEP1, + MT8188_TOP_AXI_PROT_EN_MM_SET, + MT8188_TOP_AXI_PROT_EN_MM_CLR, + MT8188_TOP_AXI_PROT_EN_MM_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_VPPSYS1_STEP2, + MT8188_TOP_AXI_PROT_EN_MM_SET, + MT8188_TOP_AXI_PROT_EN_MM_CLR, + MT8188_TOP_AXI_PROT_EN_MM_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_VPPSYS1_STEP3, + MT8188_TOP_AXI_PROT_EN_MM_2_SET, + MT8188_TOP_AXI_PROT_EN_MM_2_CLR, + MT8188_TOP_AXI_PROT_EN_MM_2_STA), + }, + }, + [MT8188_POWER_DOMAIN_WPE] = { + .name = "wpe", + .sta_mask = BIT(15), + .ctl_offs = 0x370, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_WPE_STEP1, + MT8188_TOP_AXI_PROT_EN_MM_2_SET, + MT8188_TOP_AXI_PROT_EN_MM_2_CLR, + MT8188_TOP_AXI_PROT_EN_MM_2_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_WPE_STEP2, + MT8188_TOP_AXI_PROT_EN_MM_2_SET, + MT8188_TOP_AXI_PROT_EN_MM_2_CLR, + MT8188_TOP_AXI_PROT_EN_MM_2_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8188_POWER_DOMAIN_VDEC0] = { + .name = "vdec0", + .sta_mask = BIT(19), + .ctl_offs = 0x380, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_VDEC0_STEP1, + MT8188_TOP_AXI_PROT_EN_MM_SET, + MT8188_TOP_AXI_PROT_EN_MM_CLR, + MT8188_TOP_AXI_PROT_EN_MM_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_VDEC0_STEP2, + MT8188_TOP_AXI_PROT_EN_MM_2_SET, + MT8188_TOP_AXI_PROT_EN_MM_2_CLR, + MT8188_TOP_AXI_PROT_EN_MM_2_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8188_POWER_DOMAIN_VDEC1] = { + .name = "vdec1", + .sta_mask = BIT(20), + .ctl_offs = 0x384, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_VDEC1_STEP1, + MT8188_TOP_AXI_PROT_EN_MM_SET, + MT8188_TOP_AXI_PROT_EN_MM_CLR, + MT8188_TOP_AXI_PROT_EN_MM_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_VDEC1_STEP2, + MT8188_TOP_AXI_PROT_EN_MM_SET, + MT8188_TOP_AXI_PROT_EN_MM_CLR, + MT8188_TOP_AXI_PROT_EN_MM_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8188_POWER_DOMAIN_VENC] = { + .name = "venc", + .sta_mask = BIT(22), + .ctl_offs = 0x38C, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_VENC_STEP1, + MT8188_TOP_AXI_PROT_EN_MM_SET, + MT8188_TOP_AXI_PROT_EN_MM_CLR, + MT8188_TOP_AXI_PROT_EN_MM_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_VENC_STEP2, + MT8188_TOP_AXI_PROT_EN_MM_SET, + MT8188_TOP_AXI_PROT_EN_MM_CLR, + MT8188_TOP_AXI_PROT_EN_MM_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_VENC_STEP3, + MT8188_TOP_AXI_PROT_EN_MM_2_SET, + MT8188_TOP_AXI_PROT_EN_MM_2_CLR, + MT8188_TOP_AXI_PROT_EN_MM_2_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8188_POWER_DOMAIN_IMG_VCORE] = { + .name = "vcore", + .sta_mask = BIT(28), + .ctl_offs = 0x3A4, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .bp_infracfg = { + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_IMG_VCORE_STEP1, + MT8188_TOP_AXI_PROT_EN_MM_SET, + MT8188_TOP_AXI_PROT_EN_MM_CLR, + MT8188_TOP_AXI_PROT_EN_MM_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_IMG_VCORE_STEP2, + MT8188_TOP_AXI_PROT_EN_MM_SET, + MT8188_TOP_AXI_PROT_EN_MM_CLR, + MT8188_TOP_AXI_PROT_EN_MM_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_IMG_VCORE_STEP3, + MT8188_TOP_AXI_PROT_EN_MM_2_SET, + MT8188_TOP_AXI_PROT_EN_MM_2_CLR, + MT8188_TOP_AXI_PROT_EN_MM_2_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY, + }, + [MT8188_POWER_DOMAIN_IMG_MAIN] = { + .name = "img_main", + .sta_mask = BIT(29), + .ctl_offs = 0x3A8, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_IMG_MAIN_STEP1, + MT8188_TOP_AXI_PROT_EN_MM_2_SET, + MT8188_TOP_AXI_PROT_EN_MM_2_CLR, + MT8188_TOP_AXI_PROT_EN_MM_2_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_IMG_MAIN_STEP2, + MT8188_TOP_AXI_PROT_EN_MM_2_SET, + MT8188_TOP_AXI_PROT_EN_MM_2_CLR, + MT8188_TOP_AXI_PROT_EN_MM_2_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8188_POWER_DOMAIN_DIP] = { + .name = "dip", + .sta_mask = BIT(30), + .ctl_offs = 0x3AC, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8188_POWER_DOMAIN_IPE] = { + .name = "ipe", + .sta_mask = BIT(31), + .ctl_offs = 0x3B0, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8188_POWER_DOMAIN_CAM_VCORE] = { + .name = "cam_vcore", + .sta_mask = BIT(27), + .ctl_offs = 0x3A0, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .bp_infracfg = { + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_CAM_VCORE_STEP1, + MT8188_TOP_AXI_PROT_EN_MM_SET, + MT8188_TOP_AXI_PROT_EN_MM_CLR, + MT8188_TOP_AXI_PROT_EN_MM_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_CAM_VCORE_STEP2, + MT8188_TOP_AXI_PROT_EN_2_SET, + MT8188_TOP_AXI_PROT_EN_2_CLR, + MT8188_TOP_AXI_PROT_EN_2_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_1_CAM_VCORE_STEP3, + MT8188_TOP_AXI_PROT_EN_1_SET, + MT8188_TOP_AXI_PROT_EN_1_CLR, + MT8188_TOP_AXI_PROT_EN_1_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_CAM_VCORE_STEP4, + MT8188_TOP_AXI_PROT_EN_MM_SET, + MT8188_TOP_AXI_PROT_EN_MM_CLR, + MT8188_TOP_AXI_PROT_EN_MM_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_CAM_VCORE_STEP5, + MT8188_TOP_AXI_PROT_EN_MM_2_SET, + MT8188_TOP_AXI_PROT_EN_MM_2_CLR, + MT8188_TOP_AXI_PROT_EN_MM_2_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY, + }, + [MT8188_POWER_DOMAIN_CAM_MAIN] = { + .name = "cam_main", + .sta_mask = BIT(24), + .ctl_offs = 0x394, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_CAM_MAIN_STEP1, + MT8188_TOP_AXI_PROT_EN_MM_2_SET, + MT8188_TOP_AXI_PROT_EN_MM_2_CLR, + MT8188_TOP_AXI_PROT_EN_MM_2_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_CAM_MAIN_STEP2, + MT8188_TOP_AXI_PROT_EN_2_SET, + MT8188_TOP_AXI_PROT_EN_2_CLR, + MT8188_TOP_AXI_PROT_EN_2_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_MM_2_CAM_MAIN_STEP3, + MT8188_TOP_AXI_PROT_EN_MM_2_SET, + MT8188_TOP_AXI_PROT_EN_MM_2_CLR, + MT8188_TOP_AXI_PROT_EN_MM_2_STA), + BUS_PROT_WR(MT8188_TOP_AXI_PROT_EN_2_CAM_MAIN_STEP4, + MT8188_TOP_AXI_PROT_EN_2_SET, + MT8188_TOP_AXI_PROT_EN_2_CLR, + MT8188_TOP_AXI_PROT_EN_2_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8188_POWER_DOMAIN_CAM_SUBA] = { + .name = "cam_suba", + .sta_mask = BIT(25), + .ctl_offs = 0x398, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8188_POWER_DOMAIN_CAM_SUBB] = { + .name = "cam_subb", + .sta_mask = BIT(26), + .ctl_offs = 0x39C, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, +}; + +static const struct scpsys_soc_data mt8188_scpsys_data = { + .domains_data = scpsys_domain_data_mt8188, + .num_domains = ARRAY_SIZE(scpsys_domain_data_mt8188), +}; + +#endif /* __SOC_MEDIATEK_MT8188_PM_DOMAINS_H */ diff --git a/drivers/soc/mediatek/mt8195-mmsys.h b/drivers/soc/mediatek/mt8195-mmsys.h index abfe94a30248..a6652ae63431 100644 --- a/drivers/soc/mediatek/mt8195-mmsys.h +++ b/drivers/soc/mediatek/mt8195-mmsys.h @@ -75,6 +75,77 @@ #define MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0 (2 << 16) #define MT8195_SOUT_DSC_WRAP1_OUT_TO_VPP_MERGE (3 << 16) +#define MT8195_VDO1_SW0_RST_B 0x1d0 +#define MT8195_VDO1_MERGE0_ASYNC_CFG_WD 0xe30 +#define MT8195_VDO1_HDRBE_ASYNC_CFG_WD 0xe70 +#define MT8195_VDO1_HDR_TOP_CFG 0xd00 +#define MT8195_VDO1_MIXER_IN1_ALPHA 0xd30 +#define MT8195_VDO1_MIXER_IN1_PAD 0xd40 + +#define MT8195_VDO1_VPP_MERGE0_P0_SEL_IN 0xf04 +#define MT8195_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0 1 + +#define MT8195_VDO1_VPP_MERGE0_P1_SEL_IN 0xf08 +#define MT8195_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1 1 + +#define MT8195_VDO1_DISP_DPI1_SEL_IN 0xf10 +#define MT8195_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT 0 + +#define MT8195_VDO1_DISP_DP_INTF0_SEL_IN 0xf14 +#define MT8195_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT 0 + +#define MT8195_VDO1_MERGE4_SOUT_SEL 0xf18 +#define MT8195_MERGE4_SOUT_TO_DPI1_SEL 2 +#define MT8195_MERGE4_SOUT_TO_DP_INTF0_SEL 3 + +#define MT8195_VDO1_MIXER_IN1_SEL_IN 0xf24 +#define MT8195_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT 1 + +#define MT8195_VDO1_MIXER_IN2_SEL_IN 0xf28 +#define MT8195_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT 1 + +#define MT8195_VDO1_MIXER_IN3_SEL_IN 0xf2c +#define MT8195_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT 1 + +#define MT8195_VDO1_MIXER_IN4_SEL_IN 0xf30 +#define MT8195_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT 1 + +#define MT8195_VDO1_MIXER_OUT_SOUT_SEL 0xf34 +#define MT8195_MIXER_SOUT_TO_MERGE4_ASYNC_SEL 1 + +#define MT8195_VDO1_VPP_MERGE1_P0_SEL_IN 0xf3c +#define MT8195_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2 1 + +#define MT8195_VDO1_MERGE0_ASYNC_SOUT_SEL 0xf40 +#define MT8195_SOUT_TO_MIXER_IN1_SEL 1 + +#define MT8195_VDO1_MERGE1_ASYNC_SOUT_SEL 0xf44 +#define MT8195_SOUT_TO_MIXER_IN2_SEL 1 + +#define MT8195_VDO1_MERGE2_ASYNC_SOUT_SEL 0xf48 +#define MT8195_SOUT_TO_MIXER_IN3_SEL 1 + +#define MT8195_VDO1_MERGE3_ASYNC_SOUT_SEL 0xf4c +#define MT8195_SOUT_TO_MIXER_IN4_SEL 1 + +#define MT8195_VDO1_MERGE4_ASYNC_SEL_IN 0xf50 +#define MT8195_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT 1 + +#define MT8195_VDO1_MIXER_IN1_SOUT_SEL 0xf58 +#define MT8195_MIXER_IN1_SOUT_TO_DISP_MIXER 0 + +#define MT8195_VDO1_MIXER_IN2_SOUT_SEL 0xf5c +#define MT8195_MIXER_IN2_SOUT_TO_DISP_MIXER 0 + +#define MT8195_VDO1_MIXER_IN3_SOUT_SEL 0xf60 +#define MT8195_MIXER_IN3_SOUT_TO_DISP_MIXER 0 + +#define MT8195_VDO1_MIXER_IN4_SOUT_SEL 0xf64 +#define MT8195_MIXER_IN4_SOUT_TO_DISP_MIXER 0 + +#define MT8195_VDO1_MIXER_SOUT_SEL_IN 0xf68 +#define MT8195_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER 0 + static const struct mtk_mmsys_routes mmsys_mt8195_routing_table[] = { { DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0, @@ -367,4 +438,79 @@ static const struct mtk_mmsys_routes mmsys_mt8195_routing_table[] = { } }; +static const struct mtk_mmsys_routes mmsys_mt8195_vdo1_routing_table[] = { + { + DDP_COMPONENT_MDP_RDMA0, DDP_COMPONENT_MERGE1, + MT8195_VDO1_VPP_MERGE0_P0_SEL_IN, GENMASK(0, 0), + MT8195_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0 + }, { + DDP_COMPONENT_MDP_RDMA1, DDP_COMPONENT_MERGE1, + MT8195_VDO1_VPP_MERGE0_P1_SEL_IN, GENMASK(0, 0), + MT8195_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1 + }, { + DDP_COMPONENT_MDP_RDMA2, DDP_COMPONENT_MERGE2, + MT8195_VDO1_VPP_MERGE1_P0_SEL_IN, GENMASK(0, 0), + MT8195_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2 + }, { + DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER, + MT8195_VDO1_MERGE0_ASYNC_SOUT_SEL, GENMASK(1, 0), + MT8195_SOUT_TO_MIXER_IN1_SEL + }, { + DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER, + MT8195_VDO1_MERGE1_ASYNC_SOUT_SEL, GENMASK(1, 0), + MT8195_SOUT_TO_MIXER_IN2_SEL + }, { + DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER, + MT8195_VDO1_MERGE2_ASYNC_SOUT_SEL, GENMASK(1, 0), + MT8195_SOUT_TO_MIXER_IN3_SEL + }, { + DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER, + MT8195_VDO1_MERGE3_ASYNC_SOUT_SEL, GENMASK(1, 0), + MT8195_SOUT_TO_MIXER_IN4_SEL + }, { + DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5, + MT8195_VDO1_MIXER_OUT_SOUT_SEL, GENMASK(0, 0), + MT8195_MIXER_SOUT_TO_MERGE4_ASYNC_SEL + }, { + DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER, + MT8195_VDO1_MIXER_IN1_SEL_IN, GENMASK(0, 0), + MT8195_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT + }, { + DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER, + MT8195_VDO1_MIXER_IN2_SEL_IN, GENMASK(0, 0), + MT8195_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT + }, { + DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER, + MT8195_VDO1_MIXER_IN3_SEL_IN, GENMASK(0, 0), + MT8195_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT + }, { + DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER, + MT8195_VDO1_MIXER_IN4_SEL_IN, GENMASK(0, 0), + MT8195_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT + }, { + DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5, + MT8195_VDO1_MIXER_SOUT_SEL_IN, GENMASK(2, 0), + MT8195_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER + }, { + DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5, + MT8195_VDO1_MERGE4_ASYNC_SEL_IN, GENMASK(2, 0), + MT8195_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT + }, { + DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1, + MT8195_VDO1_DISP_DPI1_SEL_IN, GENMASK(1, 0), + MT8195_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT + }, { + DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1, + MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0), + MT8195_MERGE4_SOUT_TO_DPI1_SEL + }, { + DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1, + MT8195_VDO1_DISP_DP_INTF0_SEL_IN, GENMASK(1, 0), + MT8195_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT + }, { + DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1, + MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0), + MT8195_MERGE4_SOUT_TO_DP_INTF0_SEL + } +}; #endif /* __SOC_MEDIATEK_MT8195_MMSYS_H */ diff --git a/drivers/soc/mediatek/mtk-devapc.c b/drivers/soc/mediatek/mtk-devapc.c index fc13334db1b1..bad139cb117e 100644 --- a/drivers/soc/mediatek/mtk-devapc.c +++ b/drivers/soc/mediatek/mtk-devapc.c @@ -276,19 +276,14 @@ static int mtk_devapc_probe(struct platform_device *pdev) if (!devapc_irq) return -EINVAL; - ctx->infra_clk = devm_clk_get(&pdev->dev, "devapc-infra-clock"); + ctx->infra_clk = devm_clk_get_enabled(&pdev->dev, "devapc-infra-clock"); if (IS_ERR(ctx->infra_clk)) return -EINVAL; - if (clk_prepare_enable(ctx->infra_clk)) - return -EINVAL; - ret = devm_request_irq(&pdev->dev, devapc_irq, devapc_violation_irq, IRQF_TRIGGER_NONE, "devapc", ctx); - if (ret) { - clk_disable_unprepare(ctx->infra_clk); + if (ret) return ret; - } platform_set_drvdata(pdev, ctx); @@ -303,8 +298,6 @@ static int mtk_devapc_remove(struct platform_device *pdev) stop_devapc(ctx); - clk_disable_unprepare(ctx->infra_clk); - return 0; } diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c index f3431448e843..eb4c7e57896c 100644 --- a/drivers/soc/mediatek/mtk-mmsys.c +++ b/drivers/soc/mediatek/mtk-mmsys.c @@ -7,6 +7,7 @@ #include <linux/delay.h> #include <linux/device.h> #include <linux/io.h> +#include <linux/module.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/reset-controller.h> @@ -16,10 +17,13 @@ #include "mt8167-mmsys.h" #include "mt8183-mmsys.h" #include "mt8186-mmsys.h" +#include "mt8188-mmsys.h" #include "mt8192-mmsys.h" #include "mt8195-mmsys.h" #include "mt8365-mmsys.h" +#define MMSYS_SW_RESET_PER_REG 32 + static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = { .clk_driver = "clk-mt2701-mm", .routes = mmsys_default_routing_table, @@ -51,6 +55,7 @@ static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = { .routes = mmsys_default_routing_table, .num_routes = ARRAY_SIZE(mmsys_default_routing_table), .sw0_rst_offset = MT8183_MMSYS_SW0_RST_B, + .num_resets = 32, }; static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = { @@ -58,6 +63,7 @@ static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = { .routes = mmsys_mt8183_routing_table, .num_routes = ARRAY_SIZE(mmsys_mt8183_routing_table), .sw0_rst_offset = MT8183_MMSYS_SW0_RST_B, + .num_resets = 32, }; static const struct mtk_mmsys_driver_data mt8186_mmsys_driver_data = { @@ -65,6 +71,13 @@ static const struct mtk_mmsys_driver_data mt8186_mmsys_driver_data = { .routes = mmsys_mt8186_routing_table, .num_routes = ARRAY_SIZE(mmsys_mt8186_routing_table), .sw0_rst_offset = MT8186_MMSYS_SW0_RST_B, + .num_resets = 32, +}; + +static const struct mtk_mmsys_driver_data mt8188_vdosys0_driver_data = { + .clk_driver = "clk-mt8188-vdo0", + .routes = mmsys_mt8188_routing_table, + .num_routes = ARRAY_SIZE(mmsys_mt8188_routing_table), }; static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = { @@ -72,6 +85,7 @@ static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = { .routes = mmsys_mt8192_routing_table, .num_routes = ARRAY_SIZE(mmsys_mt8192_routing_table), .sw0_rst_offset = MT8186_MMSYS_SW0_RST_B, + .num_resets = 32, }; static const struct mtk_mmsys_driver_data mt8195_vdosys0_driver_data = { @@ -80,6 +94,24 @@ static const struct mtk_mmsys_driver_data mt8195_vdosys0_driver_data = { .num_routes = ARRAY_SIZE(mmsys_mt8195_routing_table), }; +static const struct mtk_mmsys_driver_data mt8195_vdosys1_driver_data = { + .clk_driver = "clk-mt8195-vdo1", + .routes = mmsys_mt8195_vdo1_routing_table, + .num_routes = ARRAY_SIZE(mmsys_mt8195_vdo1_routing_table), + .sw0_rst_offset = MT8195_VDO1_SW0_RST_B, + .num_resets = 64, +}; + +static const struct mtk_mmsys_driver_data mt8195_vppsys0_driver_data = { + .clk_driver = "clk-mt8195-vpp0", + .is_vppsys = true, +}; + +static const struct mtk_mmsys_driver_data mt8195_vppsys1_driver_data = { + .clk_driver = "clk-mt8195-vpp1", + .is_vppsys = true, +}; + static const struct mtk_mmsys_driver_data mt8365_mmsys_driver_data = { .clk_driver = "clk-mt8365-mm", .routes = mt8365_mmsys_routing_table, @@ -91,24 +123,44 @@ struct mtk_mmsys { const struct mtk_mmsys_driver_data *data; spinlock_t lock; /* protects mmsys_sw_rst_b reg */ struct reset_controller_dev rcdev; + struct cmdq_client_reg cmdq_base; }; +static void mtk_mmsys_update_bits(struct mtk_mmsys *mmsys, u32 offset, u32 mask, u32 val, + struct cmdq_pkt *cmdq_pkt) +{ + u32 tmp; + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (cmdq_pkt) { + if (mmsys->cmdq_base.size == 0) { + pr_err("mmsys lose gce property, failed to update mmsys bits with cmdq"); + return; + } + cmdq_pkt_write_mask(cmdq_pkt, mmsys->cmdq_base.subsys, + mmsys->cmdq_base.offset + offset, val, + mask); + return; + } +#endif + + tmp = readl_relaxed(mmsys->regs + offset); + tmp = (tmp & ~mask) | (val & mask); + writel_relaxed(tmp, mmsys->regs + offset); +} + void mtk_mmsys_ddp_connect(struct device *dev, enum mtk_ddp_comp_id cur, enum mtk_ddp_comp_id next) { struct mtk_mmsys *mmsys = dev_get_drvdata(dev); const struct mtk_mmsys_routes *routes = mmsys->data->routes; - u32 reg; int i; for (i = 0; i < mmsys->data->num_routes; i++) - if (cur == routes[i].from_comp && next == routes[i].to_comp) { - reg = readl_relaxed(mmsys->regs + routes[i].addr); - reg &= ~routes[i].mask; - reg |= routes[i].val; - writel_relaxed(reg, mmsys->regs + routes[i].addr); - } + if (cur == routes[i].from_comp && next == routes[i].to_comp) + mtk_mmsys_update_bits(mmsys, routes[i].addr, routes[i].mask, + routes[i].val, NULL); } EXPORT_SYMBOL_GPL(mtk_mmsys_ddp_connect); @@ -118,26 +170,51 @@ void mtk_mmsys_ddp_disconnect(struct device *dev, { struct mtk_mmsys *mmsys = dev_get_drvdata(dev); const struct mtk_mmsys_routes *routes = mmsys->data->routes; - u32 reg; int i; for (i = 0; i < mmsys->data->num_routes; i++) - if (cur == routes[i].from_comp && next == routes[i].to_comp) { - reg = readl_relaxed(mmsys->regs + routes[i].addr); - reg &= ~routes[i].mask; - writel_relaxed(reg, mmsys->regs + routes[i].addr); - } + if (cur == routes[i].from_comp && next == routes[i].to_comp) + mtk_mmsys_update_bits(mmsys, routes[i].addr, routes[i].mask, 0, NULL); } EXPORT_SYMBOL_GPL(mtk_mmsys_ddp_disconnect); -static void mtk_mmsys_update_bits(struct mtk_mmsys *mmsys, u32 offset, u32 mask, u32 val) +void mtk_mmsys_merge_async_config(struct device *dev, int idx, int width, int height, + struct cmdq_pkt *cmdq_pkt) { - u32 tmp; + mtk_mmsys_update_bits(dev_get_drvdata(dev), MT8195_VDO1_MERGE0_ASYNC_CFG_WD + 0x10 * idx, + ~0, height << 16 | width, cmdq_pkt); +} +EXPORT_SYMBOL_GPL(mtk_mmsys_merge_async_config); - tmp = readl_relaxed(mmsys->regs + offset); - tmp = (tmp & ~mask) | val; - writel_relaxed(tmp, mmsys->regs + offset); +void mtk_mmsys_hdr_config(struct device *dev, int be_width, int be_height, + struct cmdq_pkt *cmdq_pkt) +{ + mtk_mmsys_update_bits(dev_get_drvdata(dev), MT8195_VDO1_HDRBE_ASYNC_CFG_WD, ~0, + be_height << 16 | be_width, cmdq_pkt); } +EXPORT_SYMBOL_GPL(mtk_mmsys_hdr_config); + +void mtk_mmsys_mixer_in_config(struct device *dev, int idx, bool alpha_sel, u16 alpha, + u8 mode, u32 biwidth, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_mmsys *mmsys = dev_get_drvdata(dev); + + mtk_mmsys_update_bits(mmsys, MT8195_VDO1_MIXER_IN1_ALPHA + (idx - 1) * 4, ~0, + alpha << 16 | alpha, cmdq_pkt); + mtk_mmsys_update_bits(mmsys, MT8195_VDO1_HDR_TOP_CFG, BIT(19 + idx), + alpha_sel << (19 + idx), cmdq_pkt); + mtk_mmsys_update_bits(mmsys, MT8195_VDO1_MIXER_IN1_PAD + (idx - 1) * 4, + GENMASK(31, 16) | GENMASK(1, 0), biwidth << 16 | mode, cmdq_pkt); +} +EXPORT_SYMBOL_GPL(mtk_mmsys_mixer_in_config); + +void mtk_mmsys_mixer_in_channel_swap(struct device *dev, int idx, bool channel_swap, + struct cmdq_pkt *cmdq_pkt) +{ + mtk_mmsys_update_bits(dev_get_drvdata(dev), MT8195_VDO1_MIXER_IN1_PAD + (idx - 1) * 4, + BIT(4), channel_swap << 4, cmdq_pkt); +} +EXPORT_SYMBOL_GPL(mtk_mmsys_mixer_in_channel_swap); void mtk_mmsys_ddp_dpi_fmt_config(struct device *dev, u32 val) { @@ -146,20 +223,20 @@ void mtk_mmsys_ddp_dpi_fmt_config(struct device *dev, u32 val) switch (val) { case MTK_DPI_RGB888_SDR_CON: mtk_mmsys_update_bits(mmsys, MT8186_MMSYS_DPI_OUTPUT_FORMAT, - MT8186_DPI_FORMAT_MASK, MT8186_DPI_RGB888_SDR_CON); + MT8186_DPI_FORMAT_MASK, MT8186_DPI_RGB888_SDR_CON, NULL); break; case MTK_DPI_RGB565_SDR_CON: mtk_mmsys_update_bits(mmsys, MT8186_MMSYS_DPI_OUTPUT_FORMAT, - MT8186_DPI_FORMAT_MASK, MT8186_DPI_RGB565_SDR_CON); + MT8186_DPI_FORMAT_MASK, MT8186_DPI_RGB565_SDR_CON, NULL); break; case MTK_DPI_RGB565_DDR_CON: mtk_mmsys_update_bits(mmsys, MT8186_MMSYS_DPI_OUTPUT_FORMAT, - MT8186_DPI_FORMAT_MASK, MT8186_DPI_RGB565_DDR_CON); + MT8186_DPI_FORMAT_MASK, MT8186_DPI_RGB565_DDR_CON, NULL); break; case MTK_DPI_RGB888_DDR_CON: default: mtk_mmsys_update_bits(mmsys, MT8186_MMSYS_DPI_OUTPUT_FORMAT, - MT8186_DPI_FORMAT_MASK, MT8186_DPI_RGB888_DDR_CON); + MT8186_DPI_FORMAT_MASK, MT8186_DPI_RGB888_DDR_CON, NULL); break; } } @@ -170,18 +247,19 @@ static int mtk_mmsys_reset_update(struct reset_controller_dev *rcdev, unsigned l { struct mtk_mmsys *mmsys = container_of(rcdev, struct mtk_mmsys, rcdev); unsigned long flags; + u32 offset; u32 reg; - spin_lock_irqsave(&mmsys->lock, flags); + offset = (id / MMSYS_SW_RESET_PER_REG) * sizeof(u32); + id = id % MMSYS_SW_RESET_PER_REG; + reg = mmsys->data->sw0_rst_offset + offset; - reg = readl_relaxed(mmsys->regs + mmsys->data->sw0_rst_offset); + spin_lock_irqsave(&mmsys->lock, flags); if (assert) - reg &= ~BIT(id); + mtk_mmsys_update_bits(mmsys, reg, BIT(id), 0, NULL); else - reg |= BIT(id); - - writel_relaxed(reg, mmsys->regs + mmsys->data->sw0_rst_offset); + mtk_mmsys_update_bits(mmsys, reg, BIT(id), BIT(id), NULL); spin_unlock_irqrestore(&mmsys->lock, flags); @@ -236,19 +314,28 @@ static int mtk_mmsys_probe(struct platform_device *pdev) return ret; } - spin_lock_init(&mmsys->lock); + mmsys->data = of_device_get_match_data(&pdev->dev); - mmsys->rcdev.owner = THIS_MODULE; - mmsys->rcdev.nr_resets = 32; - mmsys->rcdev.ops = &mtk_mmsys_reset_ops; - mmsys->rcdev.of_node = pdev->dev.of_node; - ret = devm_reset_controller_register(&pdev->dev, &mmsys->rcdev); - if (ret) { - dev_err(&pdev->dev, "Couldn't register mmsys reset controller: %d\n", ret); - return ret; + if (mmsys->data->num_resets > 0) { + spin_lock_init(&mmsys->lock); + + mmsys->rcdev.owner = THIS_MODULE; + mmsys->rcdev.nr_resets = mmsys->data->num_resets; + mmsys->rcdev.ops = &mtk_mmsys_reset_ops; + mmsys->rcdev.of_node = pdev->dev.of_node; + ret = devm_reset_controller_register(&pdev->dev, &mmsys->rcdev); + if (ret) { + dev_err(&pdev->dev, "Couldn't register mmsys reset controller: %d\n", ret); + return ret; + } } - mmsys->data = of_device_get_match_data(&pdev->dev); +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + ret = cmdq_dev_get_client_reg(dev, &mmsys->cmdq_base, 0); + if (ret) + dev_dbg(dev, "No mediatek,gce-client-reg!\n"); +#endif + platform_set_drvdata(pdev, mmsys); clks = platform_device_register_data(&pdev->dev, mmsys->data->clk_driver, @@ -256,6 +343,9 @@ static int mtk_mmsys_probe(struct platform_device *pdev) if (IS_ERR(clks)) return PTR_ERR(clks); + if (mmsys->data->is_vppsys) + goto out_probe_done; + drm = platform_device_register_data(&pdev->dev, "mediatek-drm", PLATFORM_DEVID_AUTO, NULL, 0); if (IS_ERR(drm)) { @@ -263,6 +353,7 @@ static int mtk_mmsys_probe(struct platform_device *pdev) return PTR_ERR(drm); } +out_probe_done: return 0; } @@ -300,6 +391,10 @@ static const struct of_device_id of_match_mtk_mmsys[] = { .data = &mt8186_mmsys_driver_data, }, { + .compatible = "mediatek,mt8188-vdosys0", + .data = &mt8188_vdosys0_driver_data, + }, + { .compatible = "mediatek,mt8192-mmsys", .data = &mt8192_mmsys_driver_data, }, @@ -312,6 +407,18 @@ static const struct of_device_id of_match_mtk_mmsys[] = { .data = &mt8195_vdosys0_driver_data, }, { + .compatible = "mediatek,mt8195-vdosys1", + .data = &mt8195_vdosys1_driver_data, + }, + { + .compatible = "mediatek,mt8195-vppsys0", + .data = &mt8195_vppsys0_driver_data, + }, + { + .compatible = "mediatek,mt8195-vppsys1", + .data = &mt8195_vppsys1_driver_data, + }, + { .compatible = "mediatek,mt8365-mmsys", .data = &mt8365_mmsys_driver_data, }, @@ -326,4 +433,19 @@ static struct platform_driver mtk_mmsys_drv = { .probe = mtk_mmsys_probe, }; -builtin_platform_driver(mtk_mmsys_drv); +static int __init mtk_mmsys_init(void) +{ + return platform_driver_register(&mtk_mmsys_drv); +} + +static void __exit mtk_mmsys_exit(void) +{ + platform_driver_unregister(&mtk_mmsys_drv); +} + +module_init(mtk_mmsys_init); +module_exit(mtk_mmsys_exit); + +MODULE_AUTHOR("Yongqiang Niu <yongqiang.niu@mediatek.com>"); +MODULE_DESCRIPTION("MediaTek SoC MMSYS driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/mediatek/mtk-mmsys.h b/drivers/soc/mediatek/mtk-mmsys.h index 77f37f8c715b..56f8cc3a97b7 100644 --- a/drivers/soc/mediatek/mtk-mmsys.h +++ b/drivers/soc/mediatek/mtk-mmsys.h @@ -91,6 +91,8 @@ struct mtk_mmsys_driver_data { const struct mtk_mmsys_routes *routes; const unsigned int num_routes; const u16 sw0_rst_offset; + const u32 num_resets; + const bool is_vppsys; }; /* diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c index c1a33d52038e..c5b1b42303ac 100644 --- a/drivers/soc/mediatek/mtk-mutex.c +++ b/drivers/soc/mediatek/mtk-mutex.c @@ -116,6 +116,21 @@ #define MT8173_MUTEX_MOD_DISP_PWM1 24 #define MT8173_MUTEX_MOD_DISP_OD 25 +#define MT8188_MUTEX_MOD_DISP_OVL0 0 +#define MT8188_MUTEX_MOD_DISP_WDMA0 1 +#define MT8188_MUTEX_MOD_DISP_RDMA0 2 +#define MT8188_MUTEX_MOD_DISP_COLOR0 3 +#define MT8188_MUTEX_MOD_DISP_CCORR0 4 +#define MT8188_MUTEX_MOD_DISP_AAL0 5 +#define MT8188_MUTEX_MOD_DISP_GAMMA0 6 +#define MT8188_MUTEX_MOD_DISP_DITHER0 7 +#define MT8188_MUTEX_MOD_DISP_DSI0 8 +#define MT8188_MUTEX_MOD_DISP_DSC_WRAP0_CORE0 9 +#define MT8188_MUTEX_MOD_DISP_VPP_MERGE 20 +#define MT8188_MUTEX_MOD_DISP_DP_INTF0 21 +#define MT8188_MUTEX_MOD_DISP_POSTMASK0 24 +#define MT8188_MUTEX_MOD2_DISP_PWM0 33 + #define MT8195_MUTEX_MOD_DISP_OVL0 0 #define MT8195_MUTEX_MOD_DISP_WDMA0 1 #define MT8195_MUTEX_MOD_DISP_RDMA0 2 @@ -130,6 +145,24 @@ #define MT8195_MUTEX_MOD_DISP_DP_INTF0 21 #define MT8195_MUTEX_MOD_DISP_PWM0 27 +#define MT8195_MUTEX_MOD_DISP1_MDP_RDMA0 0 +#define MT8195_MUTEX_MOD_DISP1_MDP_RDMA1 1 +#define MT8195_MUTEX_MOD_DISP1_MDP_RDMA2 2 +#define MT8195_MUTEX_MOD_DISP1_MDP_RDMA3 3 +#define MT8195_MUTEX_MOD_DISP1_MDP_RDMA4 4 +#define MT8195_MUTEX_MOD_DISP1_MDP_RDMA5 5 +#define MT8195_MUTEX_MOD_DISP1_MDP_RDMA6 6 +#define MT8195_MUTEX_MOD_DISP1_MDP_RDMA7 7 +#define MT8195_MUTEX_MOD_DISP1_VPP_MERGE0 8 +#define MT8195_MUTEX_MOD_DISP1_VPP_MERGE1 9 +#define MT8195_MUTEX_MOD_DISP1_VPP_MERGE2 10 +#define MT8195_MUTEX_MOD_DISP1_VPP_MERGE3 11 +#define MT8195_MUTEX_MOD_DISP1_VPP_MERGE4 12 +#define MT8195_MUTEX_MOD_DISP1_DISP_MIXER 18 +#define MT8195_MUTEX_MOD_DISP1_DPI0 25 +#define MT8195_MUTEX_MOD_DISP1_DPI1 26 +#define MT8195_MUTEX_MOD_DISP1_DP_INTF0 27 + #define MT8365_MUTEX_MOD_DISP_OVL0 7 #define MT8365_MUTEX_MOD_DISP_OVL0_2L 8 #define MT8365_MUTEX_MOD_DISP_RDMA0 9 @@ -180,6 +213,8 @@ #define MT8167_MUTEX_SOF_DPI1 3 #define MT8183_MUTEX_SOF_DSI0 1 #define MT8183_MUTEX_SOF_DPI0 2 +#define MT8188_MUTEX_SOF_DSI0 1 +#define MT8188_MUTEX_SOF_DP_INTF0 3 #define MT8195_MUTEX_SOF_DSI0 1 #define MT8195_MUTEX_SOF_DSI1 2 #define MT8195_MUTEX_SOF_DP_INTF0 3 @@ -189,6 +224,8 @@ #define MT8183_MUTEX_EOF_DSI0 (MT8183_MUTEX_SOF_DSI0 << 6) #define MT8183_MUTEX_EOF_DPI0 (MT8183_MUTEX_SOF_DPI0 << 6) +#define MT8188_MUTEX_EOF_DSI0 (MT8188_MUTEX_SOF_DSI0 << 7) +#define MT8188_MUTEX_EOF_DP_INTF0 (MT8188_MUTEX_SOF_DP_INTF0 << 7) #define MT8195_MUTEX_EOF_DSI0 (MT8195_MUTEX_SOF_DSI0 << 7) #define MT8195_MUTEX_EOF_DSI1 (MT8195_MUTEX_SOF_DSI1 << 7) #define MT8195_MUTEX_EOF_DP_INTF0 (MT8195_MUTEX_SOF_DP_INTF0 << 7) @@ -344,6 +381,23 @@ static const unsigned int mt8186_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = { [MUTEX_MOD_IDX_MDP_COLOR0] = MT8186_MUTEX_MOD_MDP_COLOR0, }; +static const unsigned int mt8188_mutex_mod[DDP_COMPONENT_ID_MAX] = { + [DDP_COMPONENT_OVL0] = MT8188_MUTEX_MOD_DISP_OVL0, + [DDP_COMPONENT_WDMA0] = MT8188_MUTEX_MOD_DISP_WDMA0, + [DDP_COMPONENT_RDMA0] = MT8188_MUTEX_MOD_DISP_RDMA0, + [DDP_COMPONENT_COLOR0] = MT8188_MUTEX_MOD_DISP_COLOR0, + [DDP_COMPONENT_CCORR] = MT8188_MUTEX_MOD_DISP_CCORR0, + [DDP_COMPONENT_AAL0] = MT8188_MUTEX_MOD_DISP_AAL0, + [DDP_COMPONENT_GAMMA] = MT8188_MUTEX_MOD_DISP_GAMMA0, + [DDP_COMPONENT_POSTMASK0] = MT8188_MUTEX_MOD_DISP_POSTMASK0, + [DDP_COMPONENT_DITHER0] = MT8188_MUTEX_MOD_DISP_DITHER0, + [DDP_COMPONENT_MERGE0] = MT8188_MUTEX_MOD_DISP_VPP_MERGE, + [DDP_COMPONENT_DSC0] = MT8188_MUTEX_MOD_DISP_DSC_WRAP0_CORE0, + [DDP_COMPONENT_DSI0] = MT8188_MUTEX_MOD_DISP_DSI0, + [DDP_COMPONENT_PWM0] = MT8188_MUTEX_MOD2_DISP_PWM0, + [DDP_COMPONENT_DP_INTF0] = MT8188_MUTEX_MOD_DISP_DP_INTF0, +}; + static const unsigned int mt8192_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_AAL0] = MT8192_MUTEX_MOD_DISP_AAL0, [DDP_COMPONENT_CCORR] = MT8192_MUTEX_MOD_DISP_CCORR0, @@ -372,6 +426,21 @@ static const unsigned int mt8195_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_DSI0] = MT8195_MUTEX_MOD_DISP_DSI0, [DDP_COMPONENT_PWM0] = MT8195_MUTEX_MOD_DISP_PWM0, [DDP_COMPONENT_DP_INTF0] = MT8195_MUTEX_MOD_DISP_DP_INTF0, + [DDP_COMPONENT_MDP_RDMA0] = MT8195_MUTEX_MOD_DISP1_MDP_RDMA0, + [DDP_COMPONENT_MDP_RDMA1] = MT8195_MUTEX_MOD_DISP1_MDP_RDMA1, + [DDP_COMPONENT_MDP_RDMA2] = MT8195_MUTEX_MOD_DISP1_MDP_RDMA2, + [DDP_COMPONENT_MDP_RDMA3] = MT8195_MUTEX_MOD_DISP1_MDP_RDMA3, + [DDP_COMPONENT_MDP_RDMA4] = MT8195_MUTEX_MOD_DISP1_MDP_RDMA4, + [DDP_COMPONENT_MDP_RDMA5] = MT8195_MUTEX_MOD_DISP1_MDP_RDMA5, + [DDP_COMPONENT_MDP_RDMA6] = MT8195_MUTEX_MOD_DISP1_MDP_RDMA6, + [DDP_COMPONENT_MDP_RDMA7] = MT8195_MUTEX_MOD_DISP1_MDP_RDMA7, + [DDP_COMPONENT_MERGE1] = MT8195_MUTEX_MOD_DISP1_VPP_MERGE0, + [DDP_COMPONENT_MERGE2] = MT8195_MUTEX_MOD_DISP1_VPP_MERGE1, + [DDP_COMPONENT_MERGE3] = MT8195_MUTEX_MOD_DISP1_VPP_MERGE2, + [DDP_COMPONENT_MERGE4] = MT8195_MUTEX_MOD_DISP1_VPP_MERGE3, + [DDP_COMPONENT_ETHDR_MIXER] = MT8195_MUTEX_MOD_DISP1_DISP_MIXER, + [DDP_COMPONENT_MERGE5] = MT8195_MUTEX_MOD_DISP1_VPP_MERGE4, + [DDP_COMPONENT_DP_INTF1] = MT8195_MUTEX_MOD_DISP1_DP_INTF0, }; static const unsigned int mt8365_mutex_mod[DDP_COMPONENT_ID_MAX] = { @@ -435,6 +504,14 @@ static const unsigned int mt8186_mutex_sof[MUTEX_SOF_DSI3 + 1] = { * but also detect the error at end of frame(EAEOF) when EOF signal * arrives. */ +static const unsigned int mt8188_mutex_sof[DDP_MUTEX_SOF_MAX] = { + [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE, + [MUTEX_SOF_DSI0] = + MT8188_MUTEX_SOF_DSI0 | MT8188_MUTEX_EOF_DSI0, + [MUTEX_SOF_DP_INTF0] = + MT8188_MUTEX_SOF_DP_INTF0 | MT8188_MUTEX_EOF_DP_INTF0, +}; + static const unsigned int mt8195_mutex_sof[DDP_MUTEX_SOF_MAX] = { [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE, [MUTEX_SOF_DSI0] = MT8195_MUTEX_SOF_DSI0 | MT8195_MUTEX_EOF_DSI0, @@ -505,6 +582,13 @@ static const struct mtk_mutex_data mt8186_mutex_driver_data = { .mutex_sof_reg = MT8183_MUTEX0_SOF0, }; +static const struct mtk_mutex_data mt8188_mutex_driver_data = { + .mutex_mod = mt8188_mutex_mod, + .mutex_sof = mt8188_mutex_sof, + .mutex_mod_reg = MT8183_MUTEX0_MOD0, + .mutex_sof_reg = MT8183_MUTEX0_SOF0, +}; + static const struct mtk_mutex_data mt8192_mutex_driver_data = { .mutex_mod = mt8192_mutex_mod, .mutex_sof = mt8183_mutex_sof, @@ -602,6 +686,9 @@ void mtk_mutex_add_comp(struct mtk_mutex *mutex, case DDP_COMPONENT_DP_INTF0: sof_id = MUTEX_SOF_DP_INTF0; break; + case DDP_COMPONENT_DP_INTF1: + sof_id = MUTEX_SOF_DP_INTF1; + break; default: if (mtx->data->mutex_mod[id] < 32) { offset = DISP_REG_MUTEX_MOD(mtx->data->mutex_mod_reg, @@ -642,6 +729,7 @@ void mtk_mutex_remove_comp(struct mtk_mutex *mutex, case DDP_COMPONENT_DPI0: case DDP_COMPONENT_DPI1: case DDP_COMPONENT_DP_INTF0: + case DDP_COMPONENT_DP_INTF1: writel_relaxed(MUTEX_SOF_SINGLE_MODE, mtx->regs + DISP_REG_MUTEX_SOF(mtx->data->mutex_sof_reg, @@ -832,11 +920,6 @@ static int mtk_mutex_probe(struct platform_device *pdev) return 0; } -static int mtk_mutex_remove(struct platform_device *pdev) -{ - return 0; -} - static const struct of_device_id mutex_driver_dt_match[] = { { .compatible = "mediatek,mt2701-disp-mutex", .data = &mt2701_mutex_driver_data}, @@ -854,6 +937,8 @@ static const struct of_device_id mutex_driver_dt_match[] = { .data = &mt8186_mutex_driver_data}, { .compatible = "mediatek,mt8186-mdp3-mutex", .data = &mt8186_mdp_mutex_driver_data}, + { .compatible = "mediatek,mt8188-disp-mutex", + .data = &mt8188_mutex_driver_data}, { .compatible = "mediatek,mt8192-disp-mutex", .data = &mt8192_mutex_driver_data}, { .compatible = "mediatek,mt8195-disp-mutex", @@ -866,7 +951,6 @@ MODULE_DEVICE_TABLE(of, mutex_driver_dt_match); static struct platform_driver mtk_mutex_driver = { .probe = mtk_mutex_probe, - .remove = mtk_mutex_remove, .driver = { .name = "mediatek-mutex", .owner = THIS_MODULE, @@ -874,4 +958,19 @@ static struct platform_driver mtk_mutex_driver = { }, }; -builtin_platform_driver(mtk_mutex_driver); +static int __init mtk_mutex_init(void) +{ + return platform_driver_register(&mtk_mutex_driver); +} + +static void __exit mtk_mutex_exit(void) +{ + platform_driver_unregister(&mtk_mutex_driver); +} + +module_init(mtk_mutex_init); +module_exit(mtk_mutex_exit); + +MODULE_AUTHOR("Yongqiang Niu <yongqiang.niu@mediatek.com>"); +MODULE_DESCRIPTION("MediaTek SoC MUTEX driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c index 474b272f9b02..354249cc1b12 100644 --- a/drivers/soc/mediatek/mtk-pm-domains.c +++ b/drivers/soc/mediatek/mtk-pm-domains.c @@ -21,6 +21,7 @@ #include "mt8173-pm-domains.h" #include "mt8183-pm-domains.h" #include "mt8186-pm-domains.h" +#include "mt8188-pm-domains.h" #include "mt8192-pm-domains.h" #include "mt8195-pm-domains.h" @@ -218,6 +219,10 @@ static int scpsys_power_on(struct generic_pm_domain *genpd) if (ret) goto err_reg; + if (pd->data->ext_buck_iso_offs && MTK_SCPD_CAPS(pd, MTK_SCPD_EXT_BUCK_ISO)) + regmap_clear_bits(scpsys->base, pd->data->ext_buck_iso_offs, + pd->data->ext_buck_iso_mask); + /* subsys power on */ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT); regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT); @@ -272,6 +277,10 @@ static int scpsys_power_off(struct generic_pm_domain *genpd) if (ret < 0) return ret; + if (pd->data->ext_buck_iso_offs && MTK_SCPD_CAPS(pd, MTK_SCPD_EXT_BUCK_ISO)) + regmap_set_bits(scpsys->base, pd->data->ext_buck_iso_offs, + pd->data->ext_buck_iso_mask); + clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks); /* subsys power off */ @@ -580,6 +589,10 @@ static const struct of_device_id scpsys_of_match[] = { .data = &mt8186_scpsys_data, }, { + .compatible = "mediatek,mt8188-power-controller", + .data = &mt8188_scpsys_data, + }, + { .compatible = "mediatek,mt8192-power-controller", .data = &mt8192_scpsys_data, }, diff --git a/drivers/soc/mediatek/mtk-pm-domains.h b/drivers/soc/mediatek/mtk-pm-domains.h index 7d3c0c36316c..5ec53ee073c4 100644 --- a/drivers/soc/mediatek/mtk-pm-domains.h +++ b/drivers/soc/mediatek/mtk-pm-domains.h @@ -10,6 +10,7 @@ #define MTK_SCPD_DOMAIN_SUPPLY BIT(4) /* can't set MTK_SCPD_KEEP_DEFAULT_OFF at the same time */ #define MTK_SCPD_ALWAYS_ON BIT(5) +#define MTK_SCPD_EXT_BUCK_ISO BIT(6) #define MTK_SCPD_CAPS(_scpd, _x) ((_scpd)->data->caps & (_x)) #define SPM_VDE_PWR_CON 0x0210 @@ -81,6 +82,8 @@ struct scpsys_bus_prot_data { * @ctl_offs: The offset for main power control register. * @sram_pdn_bits: The mask for sram power control bits. * @sram_pdn_ack_bits: The mask for sram power control acked bits. + * @ext_buck_iso_offs: The offset for external buck isolation + * @ext_buck_iso_mask: The mask for external buck isolation * @caps: The flag for active wake-up action. * @bp_infracfg: bus protection for infracfg subsystem * @bp_smi: bus protection for smi subsystem @@ -91,6 +94,8 @@ struct scpsys_domain_data { int ctl_offs; u32 sram_pdn_bits; u32 sram_pdn_ack_bits; + int ext_buck_iso_offs; + u32 ext_buck_iso_mask; u8 caps; const struct scpsys_bus_prot_data bp_infracfg[SPM_MAX_BUS_PROT_DATA]; const struct scpsys_bus_prot_data bp_smi[SPM_MAX_BUS_PROT_DATA]; diff --git a/drivers/soc/mediatek/mtk-regulator-coupler.c b/drivers/soc/mediatek/mtk-regulator-coupler.c new file mode 100644 index 000000000000..ad2ed42aa697 --- /dev/null +++ b/drivers/soc/mediatek/mtk-regulator-coupler.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Voltage regulators coupler for MediaTek SoCs + * + * Copyright (C) 2022 Collabora, Ltd. + * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/regulator/coupler.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> +#include <linux/suspend.h> + +#define to_mediatek_coupler(x) container_of(x, struct mediatek_regulator_coupler, coupler) + +struct mediatek_regulator_coupler { + struct regulator_coupler coupler; + struct regulator_dev *vsram_rdev; +}; + +/* + * We currently support only couples of not more than two vregs and + * modify the vsram voltage only when changing voltage of vgpu. + * + * This function is limited to the GPU<->SRAM voltages relationships. + */ +static int mediatek_regulator_balance_voltage(struct regulator_coupler *coupler, + struct regulator_dev *rdev, + suspend_state_t state) +{ + struct mediatek_regulator_coupler *mrc = to_mediatek_coupler(coupler); + int max_spread = rdev->constraints->max_spread[0]; + int vsram_min_uV = mrc->vsram_rdev->constraints->min_uV; + int vsram_max_uV = mrc->vsram_rdev->constraints->max_uV; + int vsram_target_min_uV, vsram_target_max_uV; + int min_uV = 0; + int max_uV = INT_MAX; + int ret; + + /* + * If the target device is on, setting the SRAM voltage directly + * is not supported as it scales through its coupled supply voltage. + * + * An exception is made in case the use_count is zero: this means + * that this is the first time we power up the SRAM regulator, which + * implies that the target device has yet to perform initialization + * and setting a voltage at that time is harmless. + */ + if (rdev == mrc->vsram_rdev) { + if (rdev->use_count == 0) + return regulator_do_balance_voltage(rdev, state, true); + + return -EPERM; + } + + ret = regulator_check_consumers(rdev, &min_uV, &max_uV, state); + if (ret < 0) + return ret; + + if (min_uV == 0) { + ret = regulator_get_voltage_rdev(rdev); + if (ret < 0) + return ret; + min_uV = ret; + } + + ret = regulator_check_voltage(rdev, &min_uV, &max_uV); + if (ret < 0) + return ret; + + /* + * If we're asked to set a voltage less than VSRAM min_uV, set + * the minimum allowed voltage on VSRAM, as in this case it is + * safe to ignore the max_spread parameter. + */ + vsram_target_min_uV = max(vsram_min_uV, min_uV + max_spread); + vsram_target_max_uV = min(vsram_max_uV, vsram_target_min_uV + max_spread); + + /* Make sure we're not out of range */ + vsram_target_min_uV = min(vsram_target_min_uV, vsram_max_uV); + + pr_debug("Setting voltage %d-%duV on %s (minuV %d)\n", + vsram_target_min_uV, vsram_target_max_uV, + rdev_get_name(mrc->vsram_rdev), min_uV); + + ret = regulator_set_voltage_rdev(mrc->vsram_rdev, vsram_target_min_uV, + vsram_target_max_uV, state); + if (ret) + return ret; + + /* The sram voltage is now balanced: update the target vreg voltage */ + return regulator_do_balance_voltage(rdev, state, true); +} + +static int mediatek_regulator_attach(struct regulator_coupler *coupler, + struct regulator_dev *rdev) +{ + struct mediatek_regulator_coupler *mrc = to_mediatek_coupler(coupler); + const char *rdev_name = rdev_get_name(rdev); + + /* + * If we're getting a coupling of more than two regulators here and + * this means that this is surely not a GPU<->SRAM couple: in that + * case, we may want to use another coupler implementation, if any, + * or the generic one: the regulator core will keep walking through + * the list of couplers when any .attach_regulator() cb returns 1. + */ + if (rdev->coupling_desc.n_coupled > 2) + return 1; + + if (strstr(rdev_name, "sram")) { + if (mrc->vsram_rdev) + return -EINVAL; + mrc->vsram_rdev = rdev; + } else if (!strstr(rdev_name, "vgpu") && !strstr(rdev_name, "Vgpu")) { + return 1; + } + + return 0; +} + +static int mediatek_regulator_detach(struct regulator_coupler *coupler, + struct regulator_dev *rdev) +{ + struct mediatek_regulator_coupler *mrc = to_mediatek_coupler(coupler); + + if (rdev == mrc->vsram_rdev) + mrc->vsram_rdev = NULL; + + return 0; +} + +static struct mediatek_regulator_coupler mediatek_coupler = { + .coupler = { + .attach_regulator = mediatek_regulator_attach, + .detach_regulator = mediatek_regulator_detach, + .balance_voltage = mediatek_regulator_balance_voltage, + }, +}; + +static int mediatek_regulator_coupler_init(void) +{ + if (!of_machine_is_compatible("mediatek,mt8183") && + !of_machine_is_compatible("mediatek,mt8186") && + !of_machine_is_compatible("mediatek,mt8192")) + return 0; + + return regulator_coupler_register(&mediatek_coupler.coupler); +} +arch_initcall(mediatek_regulator_coupler_init); + +MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>"); +MODULE_DESCRIPTION("MediaTek Regulator Coupler driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c index 0469c9dfeb04..f26eb2f637d5 100644 --- a/drivers/soc/mediatek/mtk-svs.c +++ b/drivers/soc/mediatek/mtk-svs.c @@ -138,6 +138,7 @@ static DEFINE_SPINLOCK(svs_lock); +#ifdef CONFIG_DEBUG_FS #define debug_fops_ro(name) \ static int svs_##name##_debug_open(struct inode *inode, \ struct file *filp) \ @@ -170,6 +171,7 @@ static DEFINE_SPINLOCK(svs_lock); } #define svs_dentry_data(name) {__stringify(name), &svs_##name##_debug_fops} +#endif /** * enum svsb_phase - svs bank phase enumeration @@ -311,15 +313,12 @@ static const u32 svs_regs_v2[] = { /** * struct svs_platform - svs platform control - * @name: svs platform name * @base: svs platform register base * @dev: svs platform device * @main_clk: main clock for svs bank * @pbank: svs bank pointer needing to be protected by spin_lock section * @banks: svs banks that svs platform supports * @rst: svs platform reset control - * @efuse_parsing: svs platform efuse parsing function pointer - * @probe: svs platform probe function pointer * @efuse_max: total number of svs efuse * @tefuse_max: total number of thermal efuse * @regs: svs platform registers map @@ -328,15 +327,12 @@ static const u32 svs_regs_v2[] = { * @tefuse: thermal efuse data received from NVMEM framework */ struct svs_platform { - char *name; void __iomem *base; struct device *dev; struct clk *main_clk; struct svs_bank *pbank; struct svs_bank *banks; struct reset_control *rst; - bool (*efuse_parsing)(struct svs_platform *svsp); - int (*probe)(struct svs_platform *svsp); size_t efuse_max; size_t tefuse_max; const u32 *regs; @@ -628,6 +624,7 @@ unlock_mutex: return ret; } +#ifdef CONFIG_DEBUG_FS static int svs_dump_debug_show(struct seq_file *m, void *p) { struct svs_platform *svsp = (struct svs_platform *)m->private; @@ -843,6 +840,7 @@ static int svs_create_debug_cmds(struct svs_platform *svsp) return 0; } +#endif /* CONFIG_DEBUG_FS */ static u32 interpolate(u32 f0, u32 f1, u32 v0, u32 v1, u32 fx) { @@ -1324,7 +1322,7 @@ static int svs_init01(struct svs_platform *svsp) svsb->pm_runtime_enabled_count++; } - ret = pm_runtime_get_sync(svsb->opp_dev); + ret = pm_runtime_resume_and_get(svsb->opp_dev); if (ret < 0) { dev_err(svsb->dev, "mtcmos on fail: %d\n", ret); goto svs_init01_resume_cpuidle; @@ -1461,6 +1459,7 @@ static int svs_init02(struct svs_platform *svsp) { struct svs_bank *svsb; unsigned long flags, time_left; + int ret; u32 idx; for (idx = 0; idx < svsp->bank_max; idx++) { @@ -1479,7 +1478,8 @@ static int svs_init02(struct svs_platform *svsp) msecs_to_jiffies(5000)); if (!time_left) { dev_err(svsb->dev, "init02 completion timeout\n"); - return -EBUSY; + ret = -EBUSY; + goto out_of_init02; } } @@ -1497,12 +1497,30 @@ static int svs_init02(struct svs_platform *svsp) if (svsb->type == SVSB_HIGH || svsb->type == SVSB_LOW) { if (svs_sync_bank_volts_from_opp(svsb)) { dev_err(svsb->dev, "sync volt fail\n"); - return -EPERM; + ret = -EPERM; + goto out_of_init02; } } } return 0; + +out_of_init02: + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + svs_switch_bank(svsp); + svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN); + svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS); + spin_unlock_irqrestore(&svs_lock, flags); + + svsb->phase = SVSB_PHASE_ERROR; + svs_adjust_pm_opp_volts(svsb); + } + + return ret; } static void svs_mon_mode(struct svs_platform *svsp) @@ -1594,12 +1612,16 @@ static int svs_resume(struct device *dev) ret = svs_init02(svsp); if (ret) - goto out_of_resume; + goto svs_resume_reset_assert; svs_mon_mode(svsp); return 0; +svs_resume_reset_assert: + dev_err(svsp->dev, "assert reset: %d\n", + reset_control_assert(svsp->rst)); + out_of_resume: clk_disable_unprepare(svsp->main_clk); return ret; @@ -1899,26 +1921,27 @@ static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp) o_slope_sign = (svsp->tefuse[0] >> 7) & BIT(0); ts_id = (svsp->tefuse[1] >> 9) & BIT(0); - o_slope = (svsp->tefuse[0] >> 26) & GENMASK(5, 0); - - if (adc_cali_en_t == 1) { - if (!ts_id) - o_slope = 0; - - if (adc_ge_t < 265 || adc_ge_t > 758 || - adc_oe_t < 265 || adc_oe_t > 758 || - o_vtsmcu[0] < -8 || o_vtsmcu[0] > 484 || - o_vtsmcu[1] < -8 || o_vtsmcu[1] > 484 || - o_vtsmcu[2] < -8 || o_vtsmcu[2] > 484 || - o_vtsmcu[3] < -8 || o_vtsmcu[3] > 484 || - o_vtsmcu[4] < -8 || o_vtsmcu[4] > 484 || - o_vtsabb < -8 || o_vtsabb > 484 || - degc_cali < 1 || degc_cali > 63) { - dev_err(svsp->dev, "bad thermal efuse, no mon mode\n"); - goto remove_mt8183_svsb_mon_mode; - } + if (!ts_id) { + o_slope = 1534; } else { - dev_err(svsp->dev, "no thermal efuse, no mon mode\n"); + o_slope = (svsp->tefuse[0] >> 26) & GENMASK(5, 0); + if (!o_slope_sign) + o_slope = 1534 + o_slope * 10; + else + o_slope = 1534 - o_slope * 10; + } + + if (adc_cali_en_t == 0 || + adc_ge_t < 265 || adc_ge_t > 758 || + adc_oe_t < 265 || adc_oe_t > 758 || + o_vtsmcu[0] < -8 || o_vtsmcu[0] > 484 || + o_vtsmcu[1] < -8 || o_vtsmcu[1] > 484 || + o_vtsmcu[2] < -8 || o_vtsmcu[2] > 484 || + o_vtsmcu[3] < -8 || o_vtsmcu[3] > 484 || + o_vtsmcu[4] < -8 || o_vtsmcu[4] > 484 || + o_vtsabb < -8 || o_vtsabb > 484 || + degc_cali < 1 || degc_cali > 63) { + dev_err(svsp->dev, "bad thermal efuse, no mon mode\n"); goto remove_mt8183_svsb_mon_mode; } @@ -1937,11 +1960,7 @@ static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp) x_roomt[i] = (((format[i] * 10000) / 4096) * 10000) / gain; temp0 = (10000 * 100000 / gain) * 15 / 18; - - if (!o_slope_sign) - mts = (temp0 * 10) / (1534 + o_slope * 10); - else - mts = (temp0 * 10) / (1534 - o_slope * 10); + mts = (temp0 * 10) / o_slope; for (idx = 0; idx < svsp->bank_max; idx++) { svsb = &svsp->banks[idx]; @@ -1968,11 +1987,7 @@ static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp) temp0 = (degc_cali * 10 / 2); temp1 = ((10000 * 100000 / 4096 / gain) * oe + tb_roomt * 10) * 15 / 18; - - if (!o_slope_sign) - temp2 = temp1 * 100 / (1534 + o_slope * 10); - else - temp2 = temp1 * 100 / (1534 - o_slope * 10); + temp2 = temp1 * 100 / o_slope; svsb->bts = (temp0 + temp2 - 250) * 4 / 10; } @@ -2011,7 +2026,7 @@ static bool svs_is_efuse_data_correct(struct svs_platform *svsp) svsp->efuse_max /= sizeof(u32); nvmem_cell_put(cell); - return svsp->efuse_parsing(svsp); + return true; } static struct device *svs_get_subsys_device(struct svs_platform *svsp, @@ -2326,50 +2341,38 @@ static const struct of_device_id svs_of_match[] = { /* Sentinel */ }, }; +MODULE_DEVICE_TABLE(of, svs_of_match); -static struct svs_platform *svs_platform_probe(struct platform_device *pdev) +static int svs_probe(struct platform_device *pdev) { struct svs_platform *svsp; const struct svs_platform_data *svsp_data; - int ret; + int ret, svsp_irq; svsp_data = of_device_get_match_data(&pdev->dev); - if (!svsp_data) { - dev_err(&pdev->dev, "no svs platform data?\n"); - return ERR_PTR(-EPERM); - } svsp = devm_kzalloc(&pdev->dev, sizeof(*svsp), GFP_KERNEL); if (!svsp) - return ERR_PTR(-ENOMEM); + return -ENOMEM; svsp->dev = &pdev->dev; - svsp->name = svsp_data->name; svsp->banks = svsp_data->banks; - svsp->efuse_parsing = svsp_data->efuse_parsing; - svsp->probe = svsp_data->probe; svsp->regs = svsp_data->regs; svsp->bank_max = svsp_data->bank_max; - ret = svsp->probe(svsp); + ret = svsp_data->probe(svsp); if (ret) - return ERR_PTR(ret); - - return svsp; -} - -static int svs_probe(struct platform_device *pdev) -{ - struct svs_platform *svsp; - int svsp_irq, ret; - - svsp = svs_platform_probe(pdev); - if (IS_ERR(svsp)) - return PTR_ERR(svsp); + return ret; if (!svs_is_efuse_data_correct(svsp)) { dev_notice(svsp->dev, "efuse data isn't correct\n"); ret = -EPERM; + goto svs_probe_free_efuse; + } + + if (!svsp_data->efuse_parsing(svsp)) { + dev_err(svsp->dev, "efuse data parsing failed\n"); + ret = -EPERM; goto svs_probe_free_resource; } @@ -2385,14 +2388,6 @@ static int svs_probe(struct platform_device *pdev) goto svs_probe_free_resource; } - ret = devm_request_threaded_irq(svsp->dev, svsp_irq, NULL, svs_isr, - IRQF_ONESHOT, svsp->name, svsp); - if (ret) { - dev_err(svsp->dev, "register irq(%d) failed: %d\n", - svsp_irq, ret); - goto svs_probe_free_resource; - } - svsp->main_clk = devm_clk_get(svsp->dev, "main"); if (IS_ERR(svsp->main_clk)) { dev_err(svsp->dev, "failed to get clock: %ld\n", @@ -2414,17 +2409,27 @@ static int svs_probe(struct platform_device *pdev) goto svs_probe_clk_disable; } + ret = devm_request_threaded_irq(svsp->dev, svsp_irq, NULL, svs_isr, + IRQF_ONESHOT, svsp_data->name, svsp); + if (ret) { + dev_err(svsp->dev, "register irq(%d) failed: %d\n", + svsp_irq, ret); + goto svs_probe_iounmap; + } + ret = svs_start(svsp); if (ret) { dev_err(svsp->dev, "svs start fail: %d\n", ret); goto svs_probe_iounmap; } +#ifdef CONFIG_DEBUG_FS ret = svs_create_debug_cmds(svsp); if (ret) { dev_err(svsp->dev, "svs create debug cmds fail: %d\n", ret); goto svs_probe_iounmap; } +#endif return 0; @@ -2435,11 +2440,13 @@ svs_probe_clk_disable: clk_disable_unprepare(svsp->main_clk); svs_probe_free_resource: - if (!IS_ERR_OR_NULL(svsp->efuse)) - kfree(svsp->efuse); if (!IS_ERR_OR_NULL(svsp->tefuse)) kfree(svsp->tefuse); +svs_probe_free_efuse: + if (!IS_ERR_OR_NULL(svsp->efuse)) + kfree(svsp->efuse); + return ret; } diff --git a/drivers/soc/nuvoton/Kconfig b/drivers/soc/nuvoton/Kconfig new file mode 100644 index 000000000000..df46182088ec --- /dev/null +++ b/drivers/soc/nuvoton/Kconfig @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +menuconfig WPCM450_SOC + tristate "Nuvoton WPCM450 SoC driver" + default y if ARCH_WPCM450 + select SOC_BUS + help + Say Y here to compile the SoC information driver for Nuvoton + WPCM450 SoCs. + + This driver provides information such as the SoC model and + revision. diff --git a/drivers/soc/nuvoton/Makefile b/drivers/soc/nuvoton/Makefile new file mode 100644 index 000000000000..e30317b4e829 --- /dev/null +++ b/drivers/soc/nuvoton/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_WPCM450_SOC) += wpcm450-soc.o diff --git a/drivers/soc/nuvoton/wpcm450-soc.c b/drivers/soc/nuvoton/wpcm450-soc.c new file mode 100644 index 000000000000..c5e0d11c383b --- /dev/null +++ b/drivers/soc/nuvoton/wpcm450-soc.c @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Nuvoton WPCM450 SoC Identification + * + * Copyright (C) 2022 Jonathan Neuschäfer + */ + +#include <linux/mfd/syscon.h> +#include <linux/of.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <linux/sys_soc.h> + +#define GCR_PDID 0 +#define PDID_CHIP(x) ((x) & 0x00ffffff) +#define CHIP_WPCM450 0x926450 +#define PDID_REV(x) ((x) >> 24) + +struct revision { + u8 number; + const char *name; +}; + +static const struct revision revisions[] __initconst = { + { 0x00, "Z1" }, + { 0x03, "Z2" }, + { 0x04, "Z21" }, + { 0x08, "A1" }, + { 0x09, "A2" }, + { 0x0a, "A3" }, + {} +}; + +static const char * __init get_revision(unsigned int rev) +{ + int i; + + for (i = 0; revisions[i].name; i++) + if (revisions[i].number == rev) + return revisions[i].name; + return NULL; +} + +static struct soc_device_attribute *wpcm450_attr; +static struct soc_device *wpcm450_soc; + +static int __init wpcm450_soc_init(void) +{ + struct soc_device_attribute *attr; + struct soc_device *soc; + const char *revision; + struct regmap *gcr; + u32 pdid; + int ret; + + if (!of_machine_is_compatible("nuvoton,wpcm450")) + return 0; + + gcr = syscon_regmap_lookup_by_compatible("nuvoton,wpcm450-gcr"); + if (IS_ERR(gcr)) + return PTR_ERR(gcr); + ret = regmap_read(gcr, GCR_PDID, &pdid); + if (ret) + return ret; + + if (PDID_CHIP(pdid) != CHIP_WPCM450) { + pr_warn("Unknown chip ID in GCR.PDID: 0x%06x\n", PDID_CHIP(pdid)); + return -ENODEV; + } + + revision = get_revision(PDID_REV(pdid)); + if (!revision) { + pr_warn("Unknown chip revision in GCR.PDID: 0x%02x\n", PDID_REV(pdid)); + return -ENODEV; + } + + attr = kzalloc(sizeof(*attr), GFP_KERNEL); + if (!attr) + return -ENOMEM; + + attr->family = "Nuvoton NPCM"; + attr->soc_id = "WPCM450"; + attr->revision = revision; + soc = soc_device_register(attr); + if (IS_ERR(soc)) { + kfree(attr); + pr_warn("Could not register SoC device\n"); + return PTR_ERR(soc); + } + + wpcm450_soc = soc; + wpcm450_attr = attr; + return 0; +} +module_init(wpcm450_soc_init); + +static void __exit wpcm450_soc_exit(void) +{ + if (wpcm450_soc) { + soc_device_unregister(wpcm450_soc); + wpcm450_soc = NULL; + kfree(wpcm450_attr); + } +} +module_exit(wpcm450_soc_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jonathan Neuschäfer"); +MODULE_DESCRIPTION("Nuvoton WPCM450 SoC Identification driver"); diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig index 660498252ec5..4e8b51ba2266 100644 --- a/drivers/soc/renesas/Kconfig +++ b/drivers/soc/renesas/Kconfig @@ -330,6 +330,7 @@ config ARCH_R9A09G011 bool "ARM64 Platform support for RZ/V2M" select PM select PM_GENERIC_DOMAINS + select PWC_RZV2M help This enables support for the Renesas RZ/V2M SoC. @@ -345,6 +346,9 @@ config ARCH_R9A07G043 endif # RISCV +config PWC_RZV2M + bool "Renesas RZ/V2M PWC support" if COMPILE_TEST + config RST_RCAR bool "Reset Controller support for R-Car" if COMPILE_TEST diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile index 535868c9c7e4..6e4e77b0afff 100644 --- a/drivers/soc/renesas/Makefile +++ b/drivers/soc/renesas/Makefile @@ -32,6 +32,7 @@ obj-$(CONFIG_ARCH_R9A06G032) += r9a06g032-smp.o endif # Family +obj-$(CONFIG_PWC_RZV2M) += pwc-rzv2m.o obj-$(CONFIG_RST_RCAR) += rcar-rst.o obj-$(CONFIG_SYSC_RCAR) += rcar-sysc.o obj-$(CONFIG_SYSC_RCAR_GEN4) += rcar-gen4-sysc.o diff --git a/drivers/soc/renesas/pwc-rzv2m.c b/drivers/soc/renesas/pwc-rzv2m.c new file mode 100644 index 000000000000..c83bdbdabb64 --- /dev/null +++ b/drivers/soc/renesas/pwc-rzv2m.c @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023 Renesas Electronics Corporation + */ + +#include <linux/delay.h> +#include <linux/gpio/driver.h> +#include <linux/platform_device.h> +#include <linux/reboot.h> + +#define PWC_PWCRST 0x00 +#define PWC_PWCCKEN 0x04 +#define PWC_PWCCTL 0x50 +#define PWC_GPIO 0x80 + +#define PWC_PWCRST_RSTSOFTAX 0x1 +#define PWC_PWCCKEN_ENGCKMAIN 0x1 +#define PWC_PWCCTL_PWOFF 0x1 + +struct rzv2m_pwc_priv { + void __iomem *base; + struct device *dev; + struct gpio_chip gp; + DECLARE_BITMAP(ch_en_bits, 2); +}; + +static void rzv2m_pwc_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) +{ + struct rzv2m_pwc_priv *priv = gpiochip_get_data(chip); + u32 reg; + + /* BIT 16 enables write to BIT 0, and BIT 17 enables write to BIT 1 */ + reg = BIT(offset + 16); + if (value) + reg |= BIT(offset); + + writel(reg, priv->base + PWC_GPIO); + + assign_bit(offset, priv->ch_en_bits, value); +} + +static int rzv2m_pwc_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct rzv2m_pwc_priv *priv = gpiochip_get_data(chip); + + return test_bit(offset, priv->ch_en_bits); +} + +static int rzv2m_pwc_gpio_direction_output(struct gpio_chip *gc, + unsigned int nr, int value) +{ + if (nr > 1) + return -EINVAL; + + rzv2m_pwc_gpio_set(gc, nr, value); + + return 0; +} + +static const struct gpio_chip rzv2m_pwc_gc = { + .label = "gpio_rzv2m_pwc", + .owner = THIS_MODULE, + .get = rzv2m_pwc_gpio_get, + .set = rzv2m_pwc_gpio_set, + .direction_output = rzv2m_pwc_gpio_direction_output, + .can_sleep = false, + .ngpio = 2, + .base = -1, +}; + +static int rzv2m_pwc_poweroff(struct sys_off_data *data) +{ + struct rzv2m_pwc_priv *priv = data->cb_data; + + writel(PWC_PWCRST_RSTSOFTAX, priv->base + PWC_PWCRST); + writel(PWC_PWCCKEN_ENGCKMAIN, priv->base + PWC_PWCCKEN); + writel(PWC_PWCCTL_PWOFF, priv->base + PWC_PWCCTL); + + mdelay(150); + + dev_err(priv->dev, "Failed to power off the system"); + + return NOTIFY_DONE; +} + +static int rzv2m_pwc_probe(struct platform_device *pdev) +{ + struct rzv2m_pwc_priv *priv; + int ret; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + /* + * The register used by this driver cannot be read, therefore set the + * outputs to their default values and initialize priv->ch_en_bits + * accordingly. BIT 16 enables write to BIT 0, BIT 17 enables write to + * BIT 1, and the default value of both BIT 0 and BIT 1 is 0. + */ + writel(BIT(17) | BIT(16), priv->base + PWC_GPIO); + bitmap_zero(priv->ch_en_bits, 2); + + priv->gp = rzv2m_pwc_gc; + priv->gp.parent = pdev->dev.parent; + priv->gp.fwnode = dev_fwnode(&pdev->dev); + + ret = devm_gpiochip_add_data(&pdev->dev, &priv->gp, priv); + if (ret) + return ret; + + if (device_property_read_bool(&pdev->dev, "renesas,rzv2m-pwc-power")) + ret = devm_register_power_off_handler(&pdev->dev, + rzv2m_pwc_poweroff, priv); + + return ret; +} + +static const struct of_device_id rzv2m_pwc_of_match[] = { + { .compatible = "renesas,rzv2m-pwc" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, rzv2m_pwc_of_match); + +static struct platform_driver rzv2m_pwc_driver = { + .probe = rzv2m_pwc_probe, + .driver = { + .name = "rzv2m_pwc", + .of_match_table = of_match_ptr(rzv2m_pwc_of_match), + }, +}; +module_platform_driver(rzv2m_pwc_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Fabrizio Castro <castro.fabrizio.jz@renesas.com>"); +MODULE_DESCRIPTION("Renesas RZ/V2M PWC driver"); diff --git a/drivers/soc/renesas/r8a779g0-sysc.c b/drivers/soc/renesas/r8a779g0-sysc.c index a452709f066d..b932eba1b804 100644 --- a/drivers/soc/renesas/r8a779g0-sysc.c +++ b/drivers/soc/renesas/r8a779g0-sysc.c @@ -37,6 +37,7 @@ static struct rcar_gen4_sysc_area r8a779g0_areas[] __initdata = { { "a3vip0", R8A779G0_PD_A3VIP0, R8A779G0_PD_ALWAYS_ON }, { "a3vip1", R8A779G0_PD_A3VIP1, R8A779G0_PD_ALWAYS_ON }, { "a3vip2", R8A779G0_PD_A3VIP2, R8A779G0_PD_ALWAYS_ON }, + { "a3dul", R8A779G0_PD_A3DUL, R8A779G0_PD_ALWAYS_ON }, { "a3isp0", R8A779G0_PD_A3ISP0, R8A779G0_PD_ALWAYS_ON }, { "a3isp1", R8A779G0_PD_A3ISP1, R8A779G0_PD_ALWAYS_ON }, { "a3ir", R8A779G0_PD_A3IR, R8A779G0_PD_ALWAYS_ON }, diff --git a/drivers/soc/sifive/Kconfig b/drivers/soc/sifive/Kconfig index ed4c571f8771..e86870be34c9 100644 --- a/drivers/soc/sifive/Kconfig +++ b/drivers/soc/sifive/Kconfig @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -if SOC_SIFIVE +if SOC_SIFIVE || SOC_STARFIVE config SIFIVE_CCACHE bool "Sifive Composable Cache controller" diff --git a/drivers/soc/starfive/Kconfig b/drivers/soc/starfive/Kconfig new file mode 100644 index 000000000000..bdb96dc4c989 --- /dev/null +++ b/drivers/soc/starfive/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0 + +config JH71XX_PMU + bool "Support PMU for StarFive JH71XX Soc" + depends on PM + depends on SOC_STARFIVE || COMPILE_TEST + default SOC_STARFIVE + select PM_GENERIC_DOMAINS + help + Say 'y' here to enable support power domain support. + In order to meet low power requirements, a Power Management Unit (PMU) + is designed for controlling power resources in StarFive JH71XX SoCs. diff --git a/drivers/soc/starfive/Makefile b/drivers/soc/starfive/Makefile new file mode 100644 index 000000000000..13b589d6b5f3 --- /dev/null +++ b/drivers/soc/starfive/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_JH71XX_PMU) += jh71xx_pmu.o diff --git a/drivers/soc/starfive/jh71xx_pmu.c b/drivers/soc/starfive/jh71xx_pmu.c new file mode 100644 index 000000000000..7d5f50d71c0d --- /dev/null +++ b/drivers/soc/starfive/jh71xx_pmu.c @@ -0,0 +1,383 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * StarFive JH71XX PMU (Power Management Unit) Controller Driver + * + * Copyright (C) 2022 StarFive Technology Co., Ltd. + */ + +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <dt-bindings/power/starfive,jh7110-pmu.h> + +/* register offset */ +#define JH71XX_PMU_SW_TURN_ON_POWER 0x0C +#define JH71XX_PMU_SW_TURN_OFF_POWER 0x10 +#define JH71XX_PMU_SW_ENCOURAGE 0x44 +#define JH71XX_PMU_TIMER_INT_MASK 0x48 +#define JH71XX_PMU_CURR_POWER_MODE 0x80 +#define JH71XX_PMU_EVENT_STATUS 0x88 +#define JH71XX_PMU_INT_STATUS 0x8C + +/* sw encourage cfg */ +#define JH71XX_PMU_SW_ENCOURAGE_EN_LO 0x05 +#define JH71XX_PMU_SW_ENCOURAGE_EN_HI 0x50 +#define JH71XX_PMU_SW_ENCOURAGE_DIS_LO 0x0A +#define JH71XX_PMU_SW_ENCOURAGE_DIS_HI 0xA0 +#define JH71XX_PMU_SW_ENCOURAGE_ON 0xFF + +/* pmu int status */ +#define JH71XX_PMU_INT_SEQ_DONE BIT(0) +#define JH71XX_PMU_INT_HW_REQ BIT(1) +#define JH71XX_PMU_INT_SW_FAIL GENMASK(3, 2) +#define JH71XX_PMU_INT_HW_FAIL GENMASK(5, 4) +#define JH71XX_PMU_INT_PCH_FAIL GENMASK(8, 6) +#define JH71XX_PMU_INT_ALL_MASK GENMASK(8, 0) + +/* + * The time required for switching power status is based on the time + * to turn on the largest domain's power, which is at microsecond level + */ +#define JH71XX_PMU_TIMEOUT_US 100 + +struct jh71xx_domain_info { + const char * const name; + unsigned int flags; + u8 bit; +}; + +struct jh71xx_pmu_match_data { + const struct jh71xx_domain_info *domain_info; + int num_domains; +}; + +struct jh71xx_pmu { + struct device *dev; + const struct jh71xx_pmu_match_data *match_data; + void __iomem *base; + struct generic_pm_domain **genpd; + struct genpd_onecell_data genpd_data; + int irq; + spinlock_t lock; /* protects pmu reg */ +}; + +struct jh71xx_pmu_dev { + const struct jh71xx_domain_info *domain_info; + struct jh71xx_pmu *pmu; + struct generic_pm_domain genpd; +}; + +static int jh71xx_pmu_get_state(struct jh71xx_pmu_dev *pmd, u32 mask, bool *is_on) +{ + struct jh71xx_pmu *pmu = pmd->pmu; + + if (!mask) + return -EINVAL; + + *is_on = readl(pmu->base + JH71XX_PMU_CURR_POWER_MODE) & mask; + + return 0; +} + +static int jh71xx_pmu_set_state(struct jh71xx_pmu_dev *pmd, u32 mask, bool on) +{ + struct jh71xx_pmu *pmu = pmd->pmu; + unsigned long flags; + u32 val; + u32 mode; + u32 encourage_lo; + u32 encourage_hi; + bool is_on; + int ret; + + ret = jh71xx_pmu_get_state(pmd, mask, &is_on); + if (ret) { + dev_dbg(pmu->dev, "unable to get current state for %s\n", + pmd->genpd.name); + return ret; + } + + if (is_on == on) { + dev_dbg(pmu->dev, "pm domain [%s] is already %sable status.\n", + pmd->genpd.name, on ? "en" : "dis"); + return 0; + } + + spin_lock_irqsave(&pmu->lock, flags); + + /* + * The PMU accepts software encourage to switch power mode in the following 2 steps: + * + * 1.Configure the register SW_TURN_ON_POWER (offset 0x0c) by writing 1 to + * the bit corresponding to the power domain that will be turned on + * and writing 0 to the others. + * Likewise, configure the register SW_TURN_OFF_POWER (offset 0x10) by + * writing 1 to the bit corresponding to the power domain that will be + * turned off and writing 0 to the others. + */ + if (on) { + mode = JH71XX_PMU_SW_TURN_ON_POWER; + encourage_lo = JH71XX_PMU_SW_ENCOURAGE_EN_LO; + encourage_hi = JH71XX_PMU_SW_ENCOURAGE_EN_HI; + } else { + mode = JH71XX_PMU_SW_TURN_OFF_POWER; + encourage_lo = JH71XX_PMU_SW_ENCOURAGE_DIS_LO; + encourage_hi = JH71XX_PMU_SW_ENCOURAGE_DIS_HI; + } + + writel(mask, pmu->base + mode); + + /* + * 2.Write SW encourage command sequence to the Software Encourage Reg (offset 0x44) + * First write SW_MODE_ENCOURAGE_ON to JH71XX_PMU_SW_ENCOURAGE. This will reset + * the state machine which parses the command sequence. This register must be + * written every time software wants to power on/off a domain. + * Then write the lower bits of the command sequence, followed by the upper + * bits. The sequence differs between powering on & off a domain. + */ + writel(JH71XX_PMU_SW_ENCOURAGE_ON, pmu->base + JH71XX_PMU_SW_ENCOURAGE); + writel(encourage_lo, pmu->base + JH71XX_PMU_SW_ENCOURAGE); + writel(encourage_hi, pmu->base + JH71XX_PMU_SW_ENCOURAGE); + + spin_unlock_irqrestore(&pmu->lock, flags); + + /* Wait for the power domain bit to be enabled / disabled */ + if (on) { + ret = readl_poll_timeout_atomic(pmu->base + JH71XX_PMU_CURR_POWER_MODE, + val, val & mask, + 1, JH71XX_PMU_TIMEOUT_US); + } else { + ret = readl_poll_timeout_atomic(pmu->base + JH71XX_PMU_CURR_POWER_MODE, + val, !(val & mask), + 1, JH71XX_PMU_TIMEOUT_US); + } + + if (ret) { + dev_err(pmu->dev, "%s: failed to power %s\n", + pmd->genpd.name, on ? "on" : "off"); + return -ETIMEDOUT; + } + + return 0; +} + +static int jh71xx_pmu_on(struct generic_pm_domain *genpd) +{ + struct jh71xx_pmu_dev *pmd = container_of(genpd, + struct jh71xx_pmu_dev, genpd); + u32 pwr_mask = BIT(pmd->domain_info->bit); + + return jh71xx_pmu_set_state(pmd, pwr_mask, true); +} + +static int jh71xx_pmu_off(struct generic_pm_domain *genpd) +{ + struct jh71xx_pmu_dev *pmd = container_of(genpd, + struct jh71xx_pmu_dev, genpd); + u32 pwr_mask = BIT(pmd->domain_info->bit); + + return jh71xx_pmu_set_state(pmd, pwr_mask, false); +} + +static void jh71xx_pmu_int_enable(struct jh71xx_pmu *pmu, u32 mask, bool enable) +{ + u32 val; + unsigned long flags; + + spin_lock_irqsave(&pmu->lock, flags); + val = readl(pmu->base + JH71XX_PMU_TIMER_INT_MASK); + + if (enable) + val &= ~mask; + else + val |= mask; + + writel(val, pmu->base + JH71XX_PMU_TIMER_INT_MASK); + spin_unlock_irqrestore(&pmu->lock, flags); +} + +static irqreturn_t jh71xx_pmu_interrupt(int irq, void *data) +{ + struct jh71xx_pmu *pmu = data; + u32 val; + + val = readl(pmu->base + JH71XX_PMU_INT_STATUS); + + if (val & JH71XX_PMU_INT_SEQ_DONE) + dev_dbg(pmu->dev, "sequence done.\n"); + if (val & JH71XX_PMU_INT_HW_REQ) + dev_dbg(pmu->dev, "hardware encourage requestion.\n"); + if (val & JH71XX_PMU_INT_SW_FAIL) + dev_err(pmu->dev, "software encourage fail.\n"); + if (val & JH71XX_PMU_INT_HW_FAIL) + dev_err(pmu->dev, "hardware encourage fail.\n"); + if (val & JH71XX_PMU_INT_PCH_FAIL) + dev_err(pmu->dev, "p-channel fail event.\n"); + + /* clear interrupts */ + writel(val, pmu->base + JH71XX_PMU_INT_STATUS); + writel(val, pmu->base + JH71XX_PMU_EVENT_STATUS); + + return IRQ_HANDLED; +} + +static int jh71xx_pmu_init_domain(struct jh71xx_pmu *pmu, int index) +{ + struct jh71xx_pmu_dev *pmd; + u32 pwr_mask; + int ret; + bool is_on = false; + + pmd = devm_kzalloc(pmu->dev, sizeof(*pmd), GFP_KERNEL); + if (!pmd) + return -ENOMEM; + + pmd->domain_info = &pmu->match_data->domain_info[index]; + pmd->pmu = pmu; + pwr_mask = BIT(pmd->domain_info->bit); + + pmd->genpd.name = pmd->domain_info->name; + pmd->genpd.flags = pmd->domain_info->flags; + + ret = jh71xx_pmu_get_state(pmd, pwr_mask, &is_on); + if (ret) + dev_warn(pmu->dev, "unable to get current state for %s\n", + pmd->genpd.name); + + pmd->genpd.power_on = jh71xx_pmu_on; + pmd->genpd.power_off = jh71xx_pmu_off; + pm_genpd_init(&pmd->genpd, NULL, !is_on); + + pmu->genpd_data.domains[index] = &pmd->genpd; + + return 0; +} + +static int jh71xx_pmu_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + const struct jh71xx_pmu_match_data *match_data; + struct jh71xx_pmu *pmu; + unsigned int i; + int ret; + + pmu = devm_kzalloc(dev, sizeof(*pmu), GFP_KERNEL); + if (!pmu) + return -ENOMEM; + + pmu->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(pmu->base)) + return PTR_ERR(pmu->base); + + pmu->irq = platform_get_irq(pdev, 0); + if (pmu->irq < 0) + return pmu->irq; + + ret = devm_request_irq(dev, pmu->irq, jh71xx_pmu_interrupt, + 0, pdev->name, pmu); + if (ret) + dev_err(dev, "failed to request irq\n"); + + match_data = of_device_get_match_data(dev); + if (!match_data) + return -EINVAL; + + pmu->genpd = devm_kcalloc(dev, match_data->num_domains, + sizeof(struct generic_pm_domain *), + GFP_KERNEL); + if (!pmu->genpd) + return -ENOMEM; + + pmu->dev = dev; + pmu->match_data = match_data; + pmu->genpd_data.domains = pmu->genpd; + pmu->genpd_data.num_domains = match_data->num_domains; + + for (i = 0; i < match_data->num_domains; i++) { + ret = jh71xx_pmu_init_domain(pmu, i); + if (ret) { + dev_err(dev, "failed to initialize power domain\n"); + return ret; + } + } + + spin_lock_init(&pmu->lock); + jh71xx_pmu_int_enable(pmu, JH71XX_PMU_INT_ALL_MASK & ~JH71XX_PMU_INT_PCH_FAIL, true); + + ret = of_genpd_add_provider_onecell(np, &pmu->genpd_data); + if (ret) { + dev_err(dev, "failed to register genpd driver: %d\n", ret); + return ret; + } + + dev_dbg(dev, "registered %u power domains\n", i); + + return 0; +} + +static const struct jh71xx_domain_info jh7110_power_domains[] = { + [JH7110_PD_SYSTOP] = { + .name = "SYSTOP", + .bit = 0, + .flags = GENPD_FLAG_ALWAYS_ON, + }, + [JH7110_PD_CPU] = { + .name = "CPU", + .bit = 1, + .flags = GENPD_FLAG_ALWAYS_ON, + }, + [JH7110_PD_GPUA] = { + .name = "GPUA", + .bit = 2, + }, + [JH7110_PD_VDEC] = { + .name = "VDEC", + .bit = 3, + }, + [JH7110_PD_VOUT] = { + .name = "VOUT", + .bit = 4, + }, + [JH7110_PD_ISP] = { + .name = "ISP", + .bit = 5, + }, + [JH7110_PD_VENC] = { + .name = "VENC", + .bit = 6, + }, +}; + +static const struct jh71xx_pmu_match_data jh7110_pmu = { + .num_domains = ARRAY_SIZE(jh7110_power_domains), + .domain_info = jh7110_power_domains, +}; + +static const struct of_device_id jh71xx_pmu_of_match[] = { + { + .compatible = "starfive,jh7110-pmu", + .data = (void *)&jh7110_pmu, + }, { + /* sentinel */ + } +}; + +static struct platform_driver jh71xx_pmu_driver = { + .probe = jh71xx_pmu_probe, + .driver = { + .name = "jh71xx-pmu", + .of_match_table = jh71xx_pmu_of_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(jh71xx_pmu_driver); + +MODULE_AUTHOR("Walker Chen <walker.chen@starfivetech.com>"); +MODULE_DESCRIPTION("StarFive JH71XX PMU Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/sunxi/Kconfig b/drivers/soc/sunxi/Kconfig index 8aecbc9b1976..02d0b338feb3 100644 --- a/drivers/soc/sunxi/Kconfig +++ b/drivers/soc/sunxi/Kconfig @@ -19,3 +19,12 @@ config SUNXI_SRAM Say y here to enable the SRAM controller support. This device is responsible on mapping the SRAM in the sunXi SoCs whether to the CPU/DMA, or to the devices. + +config SUN20I_PPU + bool "Allwinner D1 PPU power domain driver" + depends on ARCH_SUNXI || COMPILE_TEST + select PM + select PM_GENERIC_DOMAINS + help + Say y to enable the PPU power domain driver. This saves power + when certain peripherals, such as the video engine, are idle. diff --git a/drivers/soc/sunxi/Makefile b/drivers/soc/sunxi/Makefile index 549159571d4f..90ff2ebe7655 100644 --- a/drivers/soc/sunxi/Makefile +++ b/drivers/soc/sunxi/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_SUNXI_MBUS) += sunxi_mbus.o obj-$(CONFIG_SUNXI_SRAM) += sunxi_sram.o +obj-$(CONFIG_SUN20I_PPU) += sun20i-ppu.o diff --git a/drivers/soc/sunxi/sun20i-ppu.c b/drivers/soc/sunxi/sun20i-ppu.c new file mode 100644 index 000000000000..98cb41d36560 --- /dev/null +++ b/drivers/soc/sunxi/sun20i-ppu.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/reset.h> + +#define PD_STATE_ON 1 +#define PD_STATE_OFF 2 + +#define PD_RSTN_REG 0x00 +#define PD_CLK_GATE_REG 0x04 +#define PD_PWROFF_GATE_REG 0x08 +#define PD_PSW_ON_REG 0x0c +#define PD_PSW_OFF_REG 0x10 +#define PD_PSW_DELAY_REG 0x14 +#define PD_OFF_DELAY_REG 0x18 +#define PD_ON_DELAY_REG 0x1c +#define PD_COMMAND_REG 0x20 +#define PD_STATUS_REG 0x24 +#define PD_STATUS_COMPLETE BIT(1) +#define PD_STATUS_BUSY BIT(3) +#define PD_STATUS_STATE GENMASK(17, 16) +#define PD_ACTIVE_CTRL_REG 0x2c +#define PD_GATE_STATUS_REG 0x30 +#define PD_RSTN_STATUS BIT(0) +#define PD_CLK_GATE_STATUS BIT(1) +#define PD_PWROFF_GATE_STATUS BIT(2) +#define PD_PSW_STATUS_REG 0x34 + +#define PD_REGS_SIZE 0x80 + +struct sun20i_ppu_desc { + const char *const *names; + unsigned int num_domains; +}; + +struct sun20i_ppu_pd { + struct generic_pm_domain genpd; + void __iomem *base; +}; + +#define to_sun20i_ppu_pd(_genpd) \ + container_of(_genpd, struct sun20i_ppu_pd, genpd) + +static bool sun20i_ppu_pd_is_on(const struct sun20i_ppu_pd *pd) +{ + u32 status = readl(pd->base + PD_STATUS_REG); + + return FIELD_GET(PD_STATUS_STATE, status) == PD_STATE_ON; +} + +static int sun20i_ppu_pd_set_power(const struct sun20i_ppu_pd *pd, bool power_on) +{ + u32 state, status; + int ret; + + if (sun20i_ppu_pd_is_on(pd) == power_on) + return 0; + + /* Wait for the power controller to be idle. */ + ret = readl_poll_timeout(pd->base + PD_STATUS_REG, status, + !(status & PD_STATUS_BUSY), 100, 1000); + if (ret) + return ret; + + state = power_on ? PD_STATE_ON : PD_STATE_OFF; + writel(state, pd->base + PD_COMMAND_REG); + + /* Wait for the state transition to complete. */ + ret = readl_poll_timeout(pd->base + PD_STATUS_REG, status, + FIELD_GET(PD_STATUS_STATE, status) == state && + (status & PD_STATUS_COMPLETE), 100, 1000); + if (ret) + return ret; + + /* Clear the completion flag. */ + writel(status, pd->base + PD_STATUS_REG); + + return 0; +} + +static int sun20i_ppu_pd_power_on(struct generic_pm_domain *genpd) +{ + const struct sun20i_ppu_pd *pd = to_sun20i_ppu_pd(genpd); + + return sun20i_ppu_pd_set_power(pd, true); +} + +static int sun20i_ppu_pd_power_off(struct generic_pm_domain *genpd) +{ + const struct sun20i_ppu_pd *pd = to_sun20i_ppu_pd(genpd); + + return sun20i_ppu_pd_set_power(pd, false); +} + +static int sun20i_ppu_probe(struct platform_device *pdev) +{ + const struct sun20i_ppu_desc *desc; + struct device *dev = &pdev->dev; + struct genpd_onecell_data *ppu; + struct sun20i_ppu_pd *pds; + struct reset_control *rst; + void __iomem *base; + struct clk *clk; + int ret; + + desc = of_device_get_match_data(dev); + if (!desc) + return -EINVAL; + + pds = devm_kcalloc(dev, desc->num_domains, sizeof(*pds), GFP_KERNEL); + if (!pds) + return -ENOMEM; + + ppu = devm_kzalloc(dev, sizeof(*ppu), GFP_KERNEL); + if (!ppu) + return -ENOMEM; + + ppu->domains = devm_kcalloc(dev, desc->num_domains, + sizeof(*ppu->domains), GFP_KERNEL); + if (!ppu->domains) + return -ENOMEM; + + ppu->num_domains = desc->num_domains; + platform_set_drvdata(pdev, ppu); + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + clk = devm_clk_get_enabled(dev, NULL); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + rst = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(rst)) + return PTR_ERR(rst); + + ret = reset_control_deassert(rst); + if (ret) + return ret; + + for (unsigned int i = 0; i < ppu->num_domains; ++i) { + struct sun20i_ppu_pd *pd = &pds[i]; + + pd->genpd.name = desc->names[i]; + pd->genpd.power_off = sun20i_ppu_pd_power_off; + pd->genpd.power_on = sun20i_ppu_pd_power_on; + pd->base = base + PD_REGS_SIZE * i; + + ret = pm_genpd_init(&pd->genpd, NULL, sun20i_ppu_pd_is_on(pd)); + if (ret) { + dev_warn(dev, "Failed to add '%s' domain: %d\n", + pd->genpd.name, ret); + continue; + } + + ppu->domains[i] = &pd->genpd; + } + + ret = of_genpd_add_provider_onecell(dev->of_node, ppu); + if (ret) + dev_warn(dev, "Failed to add provider: %d\n", ret); + + return 0; +} + +static const char *const sun20i_d1_ppu_pd_names[] = { + "CPU", + "VE", + "DSP", +}; + +static const struct sun20i_ppu_desc sun20i_d1_ppu_desc = { + .names = sun20i_d1_ppu_pd_names, + .num_domains = ARRAY_SIZE(sun20i_d1_ppu_pd_names), +}; + +static const struct of_device_id sun20i_ppu_of_match[] = { + { + .compatible = "allwinner,sun20i-d1-ppu", + .data = &sun20i_d1_ppu_desc, + }, + { } +}; +MODULE_DEVICE_TABLE(of, sun20i_ppu_of_match); + +static struct platform_driver sun20i_ppu_driver = { + .probe = sun20i_ppu_probe, + .driver = { + .name = "sun20i-ppu", + .of_match_table = sun20i_ppu_of_match, + /* Power domains cannot be removed while they are in use. */ + .suppress_bind_attrs = true, + }, +}; +module_platform_driver(sun20i_ppu_driver); + +MODULE_AUTHOR("Samuel Holland <samuel@sholland.org>"); +MODULE_DESCRIPTION("Allwinner D1 PPU power domain driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c index 92f9186c1c42..f09918c59042 100644 --- a/drivers/soc/sunxi/sunxi_sram.c +++ b/drivers/soc/sunxi/sunxi_sram.c @@ -120,6 +120,9 @@ static int sunxi_sram_show(struct seq_file *s, void *data) seq_puts(s, "--------------------\n\n"); for_each_child_of_node(sram_dev->of_node, sram_node) { + if (!of_device_is_compatible(sram_node, "mmio-sram")) + continue; + sram_addr_p = of_get_address(sram_node, 0, NULL, NULL); seq_printf(s, "sram@%08x\n", diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c index 2de082765bef..c76381899ef4 100644 --- a/drivers/soc/xilinx/xlnx_event_manager.c +++ b/drivers/soc/xilinx/xlnx_event_manager.c @@ -116,8 +116,10 @@ static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, cons INIT_LIST_HEAD(&eve_data->cb_list_head); cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL); - if (!cb_data) + if (!cb_data) { + kfree(eve_data); return -ENOMEM; + } cb_data->eve_cb = cb_fun; cb_data->agent_data = data; diff --git a/drivers/soc/xilinx/zynqmp_pm_domains.c b/drivers/soc/xilinx/zynqmp_pm_domains.c index fcce2433bd6d..69d03ad4cf1e 100644 --- a/drivers/soc/xilinx/zynqmp_pm_domains.c +++ b/drivers/soc/xilinx/zynqmp_pm_domains.c @@ -227,7 +227,7 @@ static struct generic_pm_domain *zynqmp_gpd_xlate goto done; } - /** + /* * Add index in empty node_id of power domain list as no existing * power domain found for current index. */ diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c index 24ec1c83f379..ec0904faf3a1 100644 --- a/drivers/spi/spi-rpc-if.c +++ b/drivers/spi/spi-rpc-if.c @@ -58,7 +58,7 @@ static void rpcif_spi_mem_prepare(struct spi_device *spi_dev, rpc_op.data.dir = RPCIF_NO_DATA; } - rpcif_prepare(rpc, &rpc_op, offs, len); + rpcif_prepare(rpc->dev, &rpc_op, offs, len); } static bool rpcif_spi_mem_supports_op(struct spi_mem *mem, @@ -86,7 +86,7 @@ static ssize_t rpcif_spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc, rpcif_spi_mem_prepare(desc->mem->spi, &desc->info.op_tmpl, &offs, &len); - return rpcif_dirmap_read(rpc, offs, len, buf); + return rpcif_dirmap_read(rpc->dev, offs, len, buf); } static int rpcif_spi_mem_dirmap_create(struct spi_mem_dirmap_desc *desc) @@ -117,7 +117,7 @@ static int rpcif_spi_mem_exec_op(struct spi_mem *mem, rpcif_spi_mem_prepare(mem->spi, op, NULL, NULL); - return rpcif_manual_xfer(rpc); + return rpcif_manual_xfer(rpc->dev); } static const struct spi_controller_mem_ops rpcif_spi_mem_ops = { @@ -147,7 +147,7 @@ static int rpcif_spi_probe(struct platform_device *pdev) ctlr->dev.of_node = parent->of_node; - rpcif_enable_rpm(rpc); + pm_runtime_enable(rpc->dev); ctlr->num_chipselect = 1; ctlr->mem_ops = &rpcif_spi_mem_ops; @@ -156,7 +156,7 @@ static int rpcif_spi_probe(struct platform_device *pdev) ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_QUAD | SPI_RX_QUAD; ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX; - error = rpcif_hw_init(rpc, false); + error = rpcif_hw_init(rpc->dev, false); if (error) goto out_disable_rpm; @@ -169,7 +169,7 @@ static int rpcif_spi_probe(struct platform_device *pdev) return 0; out_disable_rpm: - rpcif_disable_rpm(rpc); + pm_runtime_disable(rpc->dev); return error; } @@ -179,7 +179,7 @@ static int rpcif_spi_remove(struct platform_device *pdev) struct rpcif *rpc = spi_controller_get_devdata(ctlr); spi_unregister_controller(ctlr); - rpcif_disable_rpm(rpc); + pm_runtime_disable(rpc->dev); return 0; } diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c index 8c42e7662033..92ed1213fe37 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c @@ -172,6 +172,7 @@ static const struct attribute_group fivr_attribute_group = { RFIM_SHOW(rfi_restriction_run_busy, 1) RFIM_SHOW(rfi_restriction_err_code, 1) RFIM_SHOW(rfi_restriction_data_rate, 1) +RFIM_SHOW(rfi_restriction_data_rate_base, 1) RFIM_SHOW(ddr_data_rate_point_0, 1) RFIM_SHOW(ddr_data_rate_point_1, 1) RFIM_SHOW(ddr_data_rate_point_2, 1) @@ -181,11 +182,13 @@ RFIM_SHOW(rfi_disable, 1) RFIM_STORE(rfi_restriction_run_busy, 1) RFIM_STORE(rfi_restriction_err_code, 1) RFIM_STORE(rfi_restriction_data_rate, 1) +RFIM_STORE(rfi_restriction_data_rate_base, 1) RFIM_STORE(rfi_disable, 1) static DEVICE_ATTR_RW(rfi_restriction_run_busy); static DEVICE_ATTR_RW(rfi_restriction_err_code); static DEVICE_ATTR_RW(rfi_restriction_data_rate); +static DEVICE_ATTR_RW(rfi_restriction_data_rate_base); static DEVICE_ATTR_RO(ddr_data_rate_point_0); static DEVICE_ATTR_RO(ddr_data_rate_point_1); static DEVICE_ATTR_RO(ddr_data_rate_point_2); @@ -248,6 +251,7 @@ static struct attribute *dvfs_attrs[] = { &dev_attr_rfi_restriction_run_busy.attr, &dev_attr_rfi_restriction_err_code.attr, &dev_attr_rfi_restriction_data_rate.attr, + &dev_attr_rfi_restriction_data_rate_base.attr, &dev_attr_ddr_data_rate_point_0.attr, &dev_attr_ddr_data_rate_point_1.attr, &dev_attr_ddr_data_rate_point_2.attr, diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c index 60e8174686a1..d7c8461976ce 100644 --- a/drivers/usb/common/ulpi.c +++ b/drivers/usb/common/ulpi.c @@ -207,7 +207,7 @@ static int ulpi_read_id(struct ulpi *ulpi) /* Test the interface */ ret = ulpi_write(ulpi, ULPI_SCRATCH, 0xaa); if (ret < 0) - return ret; + goto err; ret = ulpi_read(ulpi, ULPI_SCRATCH); if (ret < 0) diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c index 8607d4c23283..0745e9f11b2e 100644 --- a/drivers/usb/dwc3/dwc3-xilinx.c +++ b/drivers/usb/dwc3/dwc3-xilinx.c @@ -13,6 +13,7 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> +#include <linux/gpio/consumer.h> #include <linux/of_gpio.h> #include <linux/of_platform.h> #include <linux/pm_runtime.h> diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 789976567f9f..89dcfac01235 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1727,6 +1727,7 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int else if (!ret) dep->flags |= DWC3_EP_END_TRANSFER_PENDING; + dep->flags &= ~DWC3_EP_DELAY_STOP; return ret; } @@ -3732,8 +3733,10 @@ void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, if (dep->number <= 1 && dwc->ep0state != EP0_DATA_PHASE) return; + if (interrupt && (dep->flags & DWC3_EP_DELAY_STOP)) + return; + if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) || - (dep->flags & DWC3_EP_DELAY_STOP) || (dep->flags & DWC3_EP_END_TRANSFER_PENDING)) return; diff --git a/drivers/usb/fotg210/fotg210-core.c b/drivers/usb/fotg210/fotg210-core.c index 8a54edf921ac..ee740a6da463 100644 --- a/drivers/usb/fotg210/fotg210-core.c +++ b/drivers/usb/fotg210/fotg210-core.c @@ -144,10 +144,7 @@ static struct platform_driver fotg210_driver = { static int __init fotg210_init(void) { - if (usb_disabled()) - return -ENODEV; - - if (IS_ENABLED(CONFIG_USB_FOTG210_HCD)) + if (IS_ENABLED(CONFIG_USB_FOTG210_HCD) && !usb_disabled()) fotg210_hcd_init(); return platform_driver_register(&fotg210_driver); } diff --git a/drivers/usb/fotg210/fotg210-udc.c b/drivers/usb/fotg210/fotg210-udc.c index 66e1b7ee3346..87cca81bf4ac 100644 --- a/drivers/usb/fotg210/fotg210-udc.c +++ b/drivers/usb/fotg210/fotg210-udc.c @@ -1201,6 +1201,8 @@ int fotg210_udc_probe(struct platform_device *pdev) dev_info(dev, "found and initialized PHY\n"); } + ret = -ENOMEM; + for (i = 0; i < FOTG210_MAX_NUM_EP; i++) { fotg210->ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL); if (!fotg210->ep[i]) diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h index 6af9fdbb86b7..058fbe28107e 100644 --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h @@ -116,8 +116,9 @@ int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in, int inlen); int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey); int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, - bool *change_map); -int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb); + bool *change_map, unsigned int asid); +int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, + unsigned int asid); void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev); #define mlx5_vdpa_warn(__dev, format, ...) \ diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c index a639b9208d41..0a1e0b0dc37e 100644 --- a/drivers/vdpa/mlx5/core/mr.c +++ b/drivers/vdpa/mlx5/core/mr.c @@ -311,7 +311,6 @@ static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8 u64 st; u64 sz; int err; - int i = 0; st = start; while (size) { @@ -336,7 +335,6 @@ static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8 mr->num_directs++; mr->num_klms++; st += sz; - i++; } list_splice_tail(&tmp, &mr->head); return 0; @@ -511,7 +509,8 @@ out: mutex_unlock(&mr->mkey_mtx); } -static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) +static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, + struct vhost_iotlb *iotlb, unsigned int asid) { struct mlx5_vdpa_mr *mr = &mvdev->mr; int err; @@ -519,42 +518,49 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb if (mr->initialized) return 0; - if (iotlb) - err = create_user_mr(mvdev, iotlb); - else - err = create_dma_mr(mvdev, mr); + if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) { + if (iotlb) + err = create_user_mr(mvdev, iotlb); + else + err = create_dma_mr(mvdev, mr); - if (err) - return err; + if (err) + return err; + } - err = dup_iotlb(mvdev, iotlb); - if (err) - goto out_err; + if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid) { + err = dup_iotlb(mvdev, iotlb); + if (err) + goto out_err; + } mr->initialized = true; return 0; out_err: - if (iotlb) - destroy_user_mr(mvdev, mr); - else - destroy_dma_mr(mvdev, mr); + if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) { + if (iotlb) + destroy_user_mr(mvdev, mr); + else + destroy_dma_mr(mvdev, mr); + } return err; } -int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) +int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, + unsigned int asid) { int err; mutex_lock(&mvdev->mr.mkey_mtx); - err = _mlx5_vdpa_create_mr(mvdev, iotlb); + err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid); mutex_unlock(&mvdev->mr.mkey_mtx); return err; } int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, - bool *change_map) + bool *change_map, unsigned int asid) { struct mlx5_vdpa_mr *mr = &mvdev->mr; int err = 0; @@ -566,7 +572,7 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io *change_map = true; } if (!*change_map) - err = _mlx5_vdpa_create_mr(mvdev, iotlb); + err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid); mutex_unlock(&mr->mkey_mtx); return err; diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 90913365def4..3a6dbbc6440d 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -1468,11 +1468,13 @@ static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac, dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16); eth_broadcast_addr(dmac_c); ether_addr_copy(dmac_v, mac); - MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); + if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)) { + MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, first_vid); + } if (tagged) { MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1); - MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, first_vid); - MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, vid); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, vid); } flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; @@ -1684,7 +1686,7 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd) /* Need recreate the flow table entry, so that the packet could forward back */ - mac_vlan_del(ndev, ndev->config.mac, 0, false); + mac_vlan_del(ndev, mac_back, 0, false); if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) { mlx5_vdpa_warn(mvdev, "failed to insert forward rules, try to restore\n"); @@ -1821,6 +1823,9 @@ static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd) size_t read; u16 id; + if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) + return status; + switch (cmd) { case VIRTIO_NET_CTRL_VLAN_ADD: read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan)); @@ -2389,7 +2394,8 @@ static void restore_channels_info(struct mlx5_vdpa_net *ndev) } } -static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) +static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, + struct vhost_iotlb *iotlb, unsigned int asid) { struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); int err; @@ -2401,7 +2407,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb teardown_driver(ndev); mlx5_vdpa_destroy_mr(mvdev); - err = mlx5_vdpa_create_mr(mvdev, iotlb); + err = mlx5_vdpa_create_mr(mvdev, iotlb, asid); if (err) goto err_mr; @@ -2582,7 +2588,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev) ++mvdev->generation; if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { - if (mlx5_vdpa_create_mr(mvdev, NULL)) + if (mlx5_vdpa_create_mr(mvdev, NULL, 0)) mlx5_vdpa_warn(mvdev, "create MR failed\n"); } up_write(&ndev->reslock); @@ -2618,41 +2624,20 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev) return mvdev->generation; } -static int set_map_control(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) -{ - u64 start = 0ULL, last = 0ULL - 1; - struct vhost_iotlb_map *map; - int err = 0; - - spin_lock(&mvdev->cvq.iommu_lock); - vhost_iotlb_reset(mvdev->cvq.iotlb); - - for (map = vhost_iotlb_itree_first(iotlb, start, last); map; - map = vhost_iotlb_itree_next(map, start, last)) { - err = vhost_iotlb_add_range(mvdev->cvq.iotlb, map->start, - map->last, map->addr, map->perm); - if (err) - goto out; - } - -out: - spin_unlock(&mvdev->cvq.iommu_lock); - return err; -} - -static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) +static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, + unsigned int asid) { bool change_map; int err; - err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map); + err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map, asid); if (err) { mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err); return err; } if (change_map) - err = mlx5_vdpa_change_map(mvdev, iotlb); + err = mlx5_vdpa_change_map(mvdev, iotlb, asid); return err; } @@ -2665,16 +2650,7 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid, int err = -EINVAL; down_write(&ndev->reslock); - if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) { - err = set_map_data(mvdev, iotlb); - if (err) - goto out; - } - - if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid) - err = set_map_control(mvdev, iotlb); - -out: + err = set_map_data(mvdev, iotlb, asid); up_write(&ndev->reslock); return err; } @@ -2840,8 +2816,8 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev) int i; down_write(&ndev->reslock); - mlx5_notifier_unregister(mvdev->mdev, &ndev->nb); ndev->nb_registered = false; + mlx5_notifier_unregister(mvdev->mdev, &ndev->nb); flush_workqueue(ndev->mvdev.wq); for (i = 0; i < ndev->cur_num_vqs; i++) { mvq = &ndev->vqs[i]; @@ -3019,7 +2995,7 @@ static void update_carrier(struct work_struct *work) else ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP); - if (ndev->config_cb.callback) + if (ndev->nb_registered && ndev->config_cb.callback) ndev->config_cb.callback(ndev->config_cb.private); kfree(wqent); @@ -3036,21 +3012,13 @@ static int event_handler(struct notifier_block *nb, unsigned long event, void *p switch (eqe->sub_type) { case MLX5_PORT_CHANGE_SUBTYPE_DOWN: case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: - down_read(&ndev->reslock); - if (!ndev->nb_registered) { - up_read(&ndev->reslock); - return NOTIFY_DONE; - } wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC); - if (!wqent) { - up_read(&ndev->reslock); + if (!wqent) return NOTIFY_DONE; - } wqent->mvdev = &ndev->mvdev; INIT_WORK(&wqent->work, update_carrier); queue_work(ndev->mvdev.wq, &wqent->work); - up_read(&ndev->reslock); ret = NOTIFY_OK; break; default: @@ -3185,7 +3153,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, goto err_mpfs; if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { - err = mlx5_vdpa_create_mr(mvdev, NULL); + err = mlx5_vdpa_create_mr(mvdev, NULL, 0); if (err) goto err_res; } @@ -3237,8 +3205,8 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device * struct workqueue_struct *wq; if (ndev->nb_registered) { - mlx5_notifier_unregister(mvdev->mdev, &ndev->nb); ndev->nb_registered = false; + mlx5_notifier_unregister(mvdev->mdev, &ndev->nb); } wq = mvdev->wq; mvdev->wq = NULL; diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index febdc99b51a7..8ef7aa1365cc 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -855,7 +855,7 @@ static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *ms features_device = vdev->config->get_device_features(vdev); - if (nla_put_u64_64bit(msg, VDPA_ATTR_VDPA_DEV_SUPPORTED_FEATURES, features_device, + if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_FEATURES, features_device, VDPA_ATTR_PAD)) return -EMSGSIZE; @@ -935,7 +935,6 @@ static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg, { struct virtio_net_config config = {}; u64 features; - u16 max_vqp; u8 status; int err; @@ -946,15 +945,15 @@ static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg, } vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config)); - max_vqp = __virtio16_to_cpu(true, config.max_virtqueue_pairs); - if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, max_vqp)) - return -EMSGSIZE; - features = vdev->config->get_driver_features(vdev); if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features, VDPA_ATTR_PAD)) return -EMSGSIZE; + err = vdpa_dev_net_mq_config_fill(msg, features, &config); + if (err) + return err; + if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index)) return -EMSGSIZE; diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index b071f0d842fb..cb88891b44a8 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -67,8 +67,7 @@ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx) { struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; - vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features, - VDPASIM_QUEUE_MAX, false, + vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num, false, (struct vring_desc *)(uintptr_t)vq->desc_addr, (struct vring_avail *) (uintptr_t)vq->driver_addr, @@ -690,7 +689,9 @@ static void vdpasim_free(struct vdpa_device *vdpa) } kvfree(vdpasim->buffer); - vhost_iotlb_free(vdpasim->iommu); + for (i = 0; i < vdpasim->dev_attr.nas; i++) + vhost_iotlb_reset(&vdpasim->iommu[i]); + kfree(vdpasim->iommu); kfree(vdpasim->vqs); kfree(vdpasim->config); } diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c index c6db1a1baf76..f745926237a8 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c @@ -427,8 +427,10 @@ static int __init vdpasim_blk_init(void) int ret; ret = device_register(&vdpasim_blk_mgmtdev); - if (ret) + if (ret) { + put_device(&vdpasim_blk_mgmtdev); return ret; + } ret = vdpa_mgmtdev_register(&mgmt_dev); if (ret) diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c index c3cb225ea469..584b975a98a7 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c @@ -62,6 +62,9 @@ static bool receive_filter(struct vdpasim *vdpasim, size_t len) if (len < ETH_ALEN + hdr_len) return false; + if (is_broadcast_ether_addr(vdpasim->buffer + hdr_len) || + is_multicast_ether_addr(vdpasim->buffer + hdr_len)) + return true; if (!strncmp(vdpasim->buffer + hdr_len, vio_config->mac, ETH_ALEN)) return true; @@ -305,8 +308,10 @@ static int __init vdpasim_net_init(void) int ret; ret = device_register(&vdpasim_net_mgmtdev); - if (ret) + if (ret) { + put_device(&vdpasim_net_mgmtdev); return ret; + } ret = vdpa_mgmtdev_register(&mgmt_dev); if (ret) diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index 0dd3c1f291da..0c3b48616a9f 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -1440,6 +1440,9 @@ static bool vduse_validate_config(struct vduse_dev_config *config) if (config->config_size > PAGE_SIZE) return false; + if (config->vq_num > 0xffff) + return false; + if (!device_is_allowed(config->device_id)) return false; diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c index d448db0c4de3..8fe267ca3e76 100644 --- a/drivers/vdpa/virtio_pci/vp_vdpa.c +++ b/drivers/vdpa/virtio_pci/vp_vdpa.c @@ -647,7 +647,7 @@ static void vp_vdpa_remove(struct pci_dev *pdev) mdev = vp_vdpa_mgtdev->mdev; vp_modern_remove(mdev); vdpa_mgmtdev_unregister(&vp_vdpa_mgtdev->mgtdev); - kfree(&vp_vdpa_mgtdev->mgtdev.id_table); + kfree(vp_vdpa_mgtdev->mgtdev.id_table); kfree(mdev); kfree(vp_vdpa_mgtdev); } diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 166044642fd5..ec32f785dfde 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -65,6 +65,10 @@ static DEFINE_IDA(vhost_vdpa_ida); static dev_t vhost_vdpa_major; +static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, + struct vhost_iotlb *iotlb, u64 start, + u64 last, u32 asid); + static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb) { struct vhost_vdpa_as *as = container_of(iotlb, struct @@ -135,7 +139,7 @@ static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid) return -EINVAL; hlist_del(&as->hash_link); - vhost_iotlb_reset(&as->iotlb); + vhost_vdpa_iotlb_unmap(v, &as->iotlb, 0ULL, 0ULL - 1, asid); kfree(as); return 0; @@ -683,10 +687,20 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep, mutex_unlock(&d->mutex); return r; } +static void vhost_vdpa_general_unmap(struct vhost_vdpa *v, + struct vhost_iotlb_map *map, u32 asid) +{ + struct vdpa_device *vdpa = v->vdpa; + const struct vdpa_config_ops *ops = vdpa->config; + if (ops->dma_map) { + ops->dma_unmap(vdpa, asid, map->start, map->size); + } else if (ops->set_map == NULL) { + iommu_unmap(v->domain, map->start, map->size); + } +} -static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, - struct vhost_iotlb *iotlb, - u64 start, u64 last) +static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb, + u64 start, u64 last, u32 asid) { struct vhost_dev *dev = &v->vdev; struct vhost_iotlb_map *map; @@ -703,13 +717,13 @@ static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, unpin_user_page(page); } atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm); + vhost_vdpa_general_unmap(v, map, asid); vhost_iotlb_map_free(iotlb, map); } } -static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, - struct vhost_iotlb *iotlb, - u64 start, u64 last) +static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb, + u64 start, u64 last, u32 asid) { struct vhost_iotlb_map *map; struct vdpa_map_file *map_file; @@ -718,20 +732,21 @@ static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, map_file = (struct vdpa_map_file *)map->opaque; fput(map_file->file); kfree(map_file); + vhost_vdpa_general_unmap(v, map, asid); vhost_iotlb_map_free(iotlb, map); } } static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, - struct vhost_iotlb *iotlb, - u64 start, u64 last) + struct vhost_iotlb *iotlb, u64 start, + u64 last, u32 asid) { struct vdpa_device *vdpa = v->vdpa; if (vdpa->use_va) - return vhost_vdpa_va_unmap(v, iotlb, start, last); + return vhost_vdpa_va_unmap(v, iotlb, start, last, asid); - return vhost_vdpa_pa_unmap(v, iotlb, start, last); + return vhost_vdpa_pa_unmap(v, iotlb, start, last, asid); } static int perm_to_iommu_flags(u32 perm) @@ -798,17 +813,12 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v, const struct vdpa_config_ops *ops = vdpa->config; u32 asid = iotlb_to_asid(iotlb); - vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1); + vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1, asid); - if (ops->dma_map) { - ops->dma_unmap(vdpa, asid, iova, size); - } else if (ops->set_map) { + if (ops->set_map) { if (!v->in_batch) ops->set_map(vdpa, asid, iotlb); - } else { - iommu_unmap(v->domain, iova, size); } - /* If we are in the middle of batch processing, delay the free * of AS until BATCH_END. */ @@ -1162,14 +1172,14 @@ static void vhost_vdpa_cleanup(struct vhost_vdpa *v) struct vhost_vdpa_as *as; u32 asid; - vhost_dev_cleanup(&v->vdev); - kfree(v->vdev.vqs); - for (asid = 0; asid < v->vdpa->nas; asid++) { as = asid_to_as(v, asid); if (as) vhost_vdpa_remove_as(v, asid); } + + vhost_dev_cleanup(&v->vdev); + kfree(v->vdev.vqs); } static int vhost_vdpa_open(struct inode *inode, struct file *filep) diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 5c9fe3c9c364..cbe72bfd2f1f 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -2053,7 +2053,7 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, struct vhost_dev *dev = vq->dev; struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem; struct iovec *_iov; - u64 s = 0; + u64 s = 0, last = addr + len - 1; int ret = 0; while ((u64)len > s) { @@ -2063,7 +2063,7 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, break; } - map = vhost_iotlb_itree_first(umem, addr, addr + len - 1); + map = vhost_iotlb_itree_first(umem, addr, last); if (map == NULL || map->start > addr) { if (umem != dev->iotlb) { ret = -EFAULT; diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index c9f5c8ea3afb..33eb941fcf15 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -1102,7 +1102,7 @@ static int iotlb_translate(const struct vringh *vrh, struct vhost_iotlb_map *map; struct vhost_iotlb *iotlb = vrh->iotlb; int ret = 0; - u64 s = 0; + u64 s = 0, last = addr + len - 1; spin_lock(vrh->iotlb_lock); @@ -1114,8 +1114,7 @@ static int iotlb_translate(const struct vringh *vrh, break; } - map = vhost_iotlb_itree_first(iotlb, addr, - addr + len - 1); + map = vhost_iotlb_itree_first(iotlb, addr, last); if (!map || map->start > addr) { ret = -EINVAL; break; diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index cd6f7776013a..a2b374372363 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -959,7 +959,14 @@ static int __init vhost_vsock_init(void) VSOCK_TRANSPORT_F_H2G); if (ret < 0) return ret; - return misc_register(&vhost_vsock_misc); + + ret = misc_register(&vhost_vsock_misc); + if (ret) { + vsock_core_unregister(&vhost_transport.transport); + return ret; + } + + return 0; }; static void __exit vhost_vsock_exit(void) diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index df6e09f7d242..b2bed599e6c6 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -456,8 +456,8 @@ config FB_ATARI chipset found in Ataris. config FB_OF - bool "Open Firmware frame buffer device support" - depends on (FB = y) && PPC && (!PPC_PSERIES || PCI) + tristate "Open Firmware frame buffer device support" + depends on FB && PPC && (!PPC_PSERIES || PCI) depends on !DRM_OFDRM select APERTURE_HELPERS select FB_CFB_FILLRECT diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c index 0ccf5d401ecb..d59215a4992e 100644 --- a/drivers/video/fbdev/aty/atyfb_base.c +++ b/drivers/video/fbdev/aty/atyfb_base.c @@ -3192,8 +3192,7 @@ static void aty_init_lcd(struct atyfb_par *par, u32 bios_base) * which we print to the screen. */ id = *(u8 *)par->lcd_table; - strncpy(model, (char *)par->lcd_table+1, 24); - model[23] = 0; + strscpy(model, (char *)par->lcd_table+1, sizeof(model)); width = par->lcd_width = *(u16 *)(par->lcd_table+25); height = par->lcd_height = *(u16 *)(par->lcd_table+27); diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c index 0d3cee7ae726..a043a737ea9f 100644 --- a/drivers/video/fbdev/matrox/matroxfb_base.c +++ b/drivers/video/fbdev/matrox/matroxfb_base.c @@ -1378,8 +1378,8 @@ static struct video_board vbG200 = { .lowlevel = &matrox_G100 }; static struct video_board vbG200eW = { - .maxvram = 0x100000, - .maxdisplayable = 0x800000, + .maxvram = 0x1000000, + .maxdisplayable = 0x0800000, .accelID = FB_ACCEL_MATROX_MGAG200, .lowlevel = &matrox_G100 }; diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c index 17cda5765683..1f3df2055ff0 100644 --- a/drivers/video/fbdev/omap/omapfb_main.c +++ b/drivers/video/fbdev/omap/omapfb_main.c @@ -1447,7 +1447,7 @@ static int fbinfo_init(struct omapfb_device *fbdev, struct fb_info *info) info->fbops = &omapfb_ops; info->flags = FBINFO_FLAG_DEFAULT; - strncpy(fix->id, MODULE_NAME, sizeof(fix->id)); + strscpy(fix->id, MODULE_NAME, sizeof(fix->id)); info->pseudo_palette = fbdev->pseudo_palette; @@ -1573,8 +1573,7 @@ static int omapfb_find_ctrl(struct omapfb_device *fbdev) fbdev->ctrl = NULL; - strncpy(name, conf->lcd.ctrl_name, sizeof(name) - 1); - name[sizeof(name) - 1] = '\0'; + strscpy(name, conf->lcd.ctrl_name, sizeof(name)); if (strcmp(name, "internal") == 0) { fbdev->ctrl = fbdev->int_ctrl; diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c index 54b0f034c2ed..7cddb7b8ae34 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c @@ -1536,22 +1536,28 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev, { struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); unsigned long flags; - struct dsi_irq_stats stats; + struct dsi_irq_stats *stats; + + stats = kzalloc(sizeof(*stats), GFP_KERNEL); + if (!stats) { + seq_printf(s, "out of memory\n"); + return; + } spin_lock_irqsave(&dsi->irq_stats_lock, flags); - stats = dsi->irq_stats; + *stats = dsi->irq_stats; memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats)); dsi->irq_stats.last_reset = jiffies; spin_unlock_irqrestore(&dsi->irq_stats_lock, flags); seq_printf(s, "period %u ms\n", - jiffies_to_msecs(jiffies - stats.last_reset)); + jiffies_to_msecs(jiffies - stats->last_reset)); - seq_printf(s, "irqs %d\n", stats.irq_count); + seq_printf(s, "irqs %d\n", stats->irq_count); #define PIS(x) \ - seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]) + seq_printf(s, "%-20s %10d\n", #x, stats->dsi_irqs[ffs(DSI_IRQ_##x)-1]) seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1); PIS(VC0); @@ -1575,10 +1581,10 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev, #define PIS(x) \ seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \ - stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \ - stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \ - stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \ - stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]); + stats->vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \ + stats->vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \ + stats->vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \ + stats->vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]); seq_printf(s, "-- VC interrupts --\n"); PIS(CS); @@ -1594,7 +1600,7 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev, #define PIS(x) \ seq_printf(s, "%-20s %10d\n", #x, \ - stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]); + stats->cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]); seq_printf(s, "-- CIO interrupts --\n"); PIS(ERRSYNCESC1); @@ -1618,6 +1624,8 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev, PIS(ULPSACTIVENOT_ALL0); PIS(ULPSACTIVENOT_ALL1); #undef PIS + + kfree(stats); } static void dsi1_dump_irqs(struct seq_file *s) diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 828ced060742..b9a80aedee1b 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -15,7 +15,7 @@ static ssize_t device_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); - return sprintf(buf, "0x%04x\n", dev->id.device); + return sysfs_emit(buf, "0x%04x\n", dev->id.device); } static DEVICE_ATTR_RO(device); @@ -23,7 +23,7 @@ static ssize_t vendor_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); - return sprintf(buf, "0x%04x\n", dev->id.vendor); + return sysfs_emit(buf, "0x%04x\n", dev->id.vendor); } static DEVICE_ATTR_RO(vendor); @@ -31,7 +31,7 @@ static ssize_t status_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); - return sprintf(buf, "0x%08x\n", dev->config->get_status(dev)); + return sysfs_emit(buf, "0x%08x\n", dev->config->get_status(dev)); } static DEVICE_ATTR_RO(status); @@ -39,7 +39,7 @@ static ssize_t modalias_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); - return sprintf(buf, "virtio:d%08Xv%08X\n", + return sysfs_emit(buf, "virtio:d%08Xv%08X\n", dev->id.device, dev->id.vendor); } static DEVICE_ATTR_RO(modalias); @@ -54,9 +54,9 @@ static ssize_t features_show(struct device *_d, /* We actually represent this as a bitstring, as it could be * arbitrary length in future. */ for (i = 0; i < sizeof(dev->features)*8; i++) - len += sprintf(buf+len, "%c", + len += sysfs_emit_at(buf, len, "%c", __virtio_test_bit(dev, i) ? '1' : '0'); - len += sprintf(buf+len, "\n"); + len += sysfs_emit_at(buf, len, "\n"); return len; } static DEVICE_ATTR_RO(features); diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index c3b9f2761849..9e496e288cfa 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -303,14 +303,14 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, int err; if (index >= vp_modern_get_num_queues(mdev)) - return ERR_PTR(-ENOENT); + return ERR_PTR(-EINVAL); /* Check if queue is either not available or already active. */ num = vp_modern_get_queue_size(mdev, index); if (!num || vp_modern_get_queue_enable(mdev, index)) return ERR_PTR(-ENOENT); - if (num & (num - 1)) { + if (!is_power_of_2(num)) { dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num); return ERR_PTR(-EINVAL); } diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 2e7689bb933b..723c4e29e1d3 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -1052,7 +1052,7 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split, dma_addr_t dma_addr; /* We assume num is a power of 2. */ - if (num & (num - 1)) { + if (!is_power_of_2(num)) { dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); return -EINVAL; } |