diff options
Diffstat (limited to 'drivers')
1048 files changed, 36733 insertions, 9535 deletions
diff --git a/drivers/accessibility/speakup/speakup_dectlk.c b/drivers/accessibility/speakup/speakup_dectlk.c index 580ec796816b..78ca4987e619 100644 --- a/drivers/accessibility/speakup/speakup_dectlk.c +++ b/drivers/accessibility/speakup/speakup_dectlk.c @@ -44,6 +44,7 @@ static struct var_t vars[] = { { CAPS_START, .u.s = {"[:dv ap 160] " } }, { CAPS_STOP, .u.s = {"[:dv ap 100 ] " } }, { RATE, .u.n = {"[:ra %d] ", 180, 75, 650, 0, 0, NULL } }, + { PITCH, .u.n = {"[:dv ap %d] ", 122, 50, 350, 0, 0, NULL } }, { INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } }, { VOL, .u.n = {"[:dv g5 %d] ", 86, 60, 86, 0, 0, NULL } }, { PUNCT, .u.n = {"[:pu %c] ", 0, 0, 2, 0, 0, "nsa" } }, diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 3b23fb775ac4..f2f8f05662de 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1361,9 +1361,17 @@ static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res, res[0].start = pmcg->page0_base_address; res[0].end = pmcg->page0_base_address + SZ_4K - 1; res[0].flags = IORESOURCE_MEM; - res[1].start = pmcg->page1_base_address; - res[1].end = pmcg->page1_base_address + SZ_4K - 1; - res[1].flags = IORESOURCE_MEM; + /* + * The initial version in DEN0049C lacked a way to describe register + * page 1, which makes it broken for most PMCG implementations; in + * that case, just let the driver fail gracefully if it expects to + * find a second memory resource. + */ + if (node->revision > 0) { + res[1].start = pmcg->page1_base_address; + res[1].end = pmcg->page1_base_address + SZ_4K - 1; + res[1].flags = IORESOURCE_MEM; + } if (pmcg->overflow_gsiv) acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow", diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 0077d2c85df8..46710380a402 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -2066,6 +2066,16 @@ bool acpi_ec_dispatch_gpe(void) return true; /* + * Cancel the SCI wakeup and process all pending events in case there + * are any wakeup ones in there. + * + * Note that if any non-EC GPEs are active at this point, the SCI will + * retrigger after the rearming in acpi_s2idle_wake(), so no events + * should be missed by canceling the wakeup here. + */ + pm_system_cancel_wakeup(); + + /* * Dispatch the EC GPE in-band, but do not report wakeup in any case * to allow the caller to process events properly after that. */ diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 86560a28751b..f8e9fa82cb9b 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -96,6 +96,11 @@ static const struct dmi_system_id processor_power_dmi_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")}, (void *)1}, + /* T40 can not handle C3 idle state */ + { set_max_cstate, "IBM ThinkPad T40", { + DMI_MATCH(DMI_SYS_VENDOR, "IBM"), + DMI_MATCH(DMI_PRODUCT_NAME, "23737CU")}, + (void *)2}, {}, }; diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index a60ff5dfed3a..d4fbea91ab6b 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -736,21 +736,15 @@ bool acpi_s2idle_wake(void) return true; } - /* Check non-EC GPE wakeups and dispatch the EC GPE. */ + /* + * Check non-EC GPE wakeups and if there are none, cancel the + * SCI-related wakeup and dispatch the EC GPE. + */ if (acpi_ec_dispatch_gpe()) { pm_pr_dbg("ACPI non-EC GPE wakeup\n"); return true; } - /* - * Cancel the SCI wakeup and process all pending events in case - * there are any wakeup ones in there. - * - * Note that if any non-EC GPEs are active at this point, the - * SCI will retrigger after the rearming below, so no events - * should be missed by canceling the wakeup here. - */ - pm_system_cancel_wakeup(); acpi_os_wait_events_complete(); /* @@ -764,6 +758,7 @@ bool acpi_s2idle_wake(void) return true; } + pm_wakeup_clear(acpi_sci_irq); rearm_wake_irq(acpi_sci_irq); } diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index 0741a4933f62..34600b5b9d8e 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -400,7 +400,7 @@ int __init_or_acpilib acpi_table_parse_entries_array( acpi_get_table(id, instance, &table_header); if (!table_header) { - pr_warn("%4.4s not present\n", id); + pr_debug("%4.4s not present\n", id); return -ENODEV; } diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c index abc06e7f89d8..ed889f827f53 100644 --- a/drivers/acpi/x86/s2idle.c +++ b/drivers/acpi/x86/s2idle.c @@ -424,15 +424,11 @@ static int lps0_device_attach(struct acpi_device *adev, mem_sleep_current = PM_SUSPEND_TO_IDLE; /* - * Some Intel based LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U don't - * use intel-hid or intel-vbtn but require the EC GPE to be enabled while - * suspended for certain wakeup devices to work, so mark it as wakeup-capable. - * - * Only enable on !AMD as enabling this universally causes problems for a number - * of AMD based systems. + * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the + * EC GPE to be enabled while suspended for certain wakeup devices to + * work, so mark it as wakeup-capable. */ - if (!acpi_s2idle_vendor_amd()) - acpi_ec_mark_gpe_for_wake(); + acpi_ec_mark_gpe_for_wake(); return 0; } diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index ba9273f80069..0c854aebfe0b 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4029,6 +4029,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* devices that don't properly handle TRIM commands */ { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, }, + { "M88V29*", NULL, ATA_HORKAGE_NOTRIM, }, /* * As defined, the DRAT (Deterministic Read After Trim) and RZAT diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index 7abc7e04f656..6fa4a2faf49c 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c @@ -920,6 +920,20 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) pci_write_config_byte(dev, 0x5a, irqmask); /* + * HPT371 chips physically have only one channel, the secondary one, + * but the primary channel registers do exist! Go figure... + * So, we manually disable the non-existing channel here + * (if the BIOS hasn't done this already). + */ + if (dev->device == PCI_DEVICE_ID_TTI_HPT371) { + u8 mcr1; + + pci_read_config_byte(dev, 0x50, &mcr1); + mcr1 &= ~0x04; + pci_write_config_byte(dev, 0x50, mcr1); + } + + /* * default to pci clock. make sure MA15/16 are set to output * to prevent drives having problems with 40-pin cables. Needed * for some drives such as IBM-DTLA which will not enter ready @@ -950,14 +964,14 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) if ((freq >> 12) != 0xABCDE) { int i; - u8 sr; + u16 sr; u32 total = 0; dev_warn(&dev->dev, "BIOS has not set timing clocks\n"); /* This is the process the HPT371 BIOS is reported to use */ for (i = 0; i < 128; i++) { - pci_read_config_byte(dev, 0x78, &sr); + pci_read_config_word(dev, 0x78, &sr); total += sr & 0x1FF; udelay(15); } diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index da0152116d9f..556034a15430 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -322,7 +322,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host, static ssize_t fsl_sata_intr_coalescing_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sysfs_emit(buf, "%d %d\n", + return sysfs_emit(buf, "%u %u\n", intr_coalescing_count, intr_coalescing_ticks); } @@ -332,10 +332,8 @@ static ssize_t fsl_sata_intr_coalescing_store(struct device *dev, { unsigned int coalescing_count, coalescing_ticks; - if (sscanf(buf, "%d%d", - &coalescing_count, - &coalescing_ticks) != 2) { - printk(KERN_ERR "fsl-sata: wrong parameter format.\n"); + if (sscanf(buf, "%u%u", &coalescing_count, &coalescing_ticks) != 2) { + dev_err(dev, "fsl-sata: wrong parameter format.\n"); return -EINVAL; } @@ -359,7 +357,7 @@ static ssize_t fsl_sata_rx_watermark_show(struct device *dev, rx_watermark &= 0x1f; spin_unlock_irqrestore(&host->lock, flags); - return sysfs_emit(buf, "%d\n", rx_watermark); + return sysfs_emit(buf, "%u\n", rx_watermark); } static ssize_t fsl_sata_rx_watermark_store(struct device *dev, @@ -373,8 +371,8 @@ static ssize_t fsl_sata_rx_watermark_store(struct device *dev, void __iomem *csr_base = host_priv->csr_base; u32 temp; - if (sscanf(buf, "%d", &rx_watermark) != 1) { - printk(KERN_ERR "fsl-sata: wrong parameter format.\n"); + if (kstrtouint(buf, 10, &rx_watermark) < 0) { + dev_err(dev, "fsl-sata: wrong parameter format.\n"); return -EINVAL; } @@ -382,8 +380,8 @@ static ssize_t fsl_sata_rx_watermark_store(struct device *dev, temp = ioread32(csr_base + TRANSCFG); temp &= 0xffffffe0; iowrite32(temp | rx_watermark, csr_base + TRANSCFG); - spin_unlock_irqrestore(&host->lock, flags); + return strlen(buf); } diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index 3bc3c314a467..4f67404fe64c 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -1676,6 +1676,8 @@ static int fs_init(struct fs_dev *dev) dev->hw_base = pci_resource_start(pci_dev, 0); dev->base = ioremap(dev->hw_base, 0x1000); + if (!dev->base) + return 1; reset_chip (dev); diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index bc5a6ab6fa4b..1a50de39f5b5 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c @@ -861,7 +861,6 @@ static void ns_init_card_error(ns_dev *card, int error) static scq_info *get_scq(ns_dev *card, int size, u32 scd) { scq_info *scq; - int i; if (size != VBR_SCQSIZE && size != CBR_SCQSIZE) return NULL; @@ -875,9 +874,8 @@ static scq_info *get_scq(ns_dev *card, int size, u32 scd) kfree(scq); return NULL; } - scq->skb = kmalloc_array(size / NS_SCQE_SIZE, - sizeof(*scq->skb), - GFP_KERNEL); + scq->skb = kcalloc(size / NS_SCQE_SIZE, sizeof(*scq->skb), + GFP_KERNEL); if (!scq->skb) { dma_free_coherent(&card->pcidev->dev, 2 * size, scq->org, scq->dma); @@ -890,15 +888,11 @@ static scq_info *get_scq(ns_dev *card, int size, u32 scd) scq->last = scq->base + (scq->num_entries - 1); scq->tail = scq->last; scq->scd = scd; - scq->num_entries = size / NS_SCQE_SIZE; scq->tbd_count = 0; init_waitqueue_head(&scq->scqfull_waitq); scq->full = 0; spin_lock_init(&scq->lock); - for (i = 0; i < scq->num_entries; i++) - scq->skb[i] = NULL; - return scq; } diff --git a/drivers/auxdisplay/lcd2s.c b/drivers/auxdisplay/lcd2s.c index 38ba08628ccb..2578b2d45439 100644 --- a/drivers/auxdisplay/lcd2s.c +++ b/drivers/auxdisplay/lcd2s.c @@ -238,7 +238,7 @@ static int lcd2s_redefine_char(struct charlcd *lcd, char *esc) if (buf[1] > 7) return 1; - i = 0; + i = 2; shift = 0; value = 0; while (*esc && i < LCD2S_CHARACTER_SIZE + 2) { @@ -298,6 +298,10 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c, I2C_FUNC_SMBUS_WRITE_BLOCK_DATA)) return -EIO; + lcd2s = devm_kzalloc(&i2c->dev, sizeof(*lcd2s), GFP_KERNEL); + if (!lcd2s) + return -ENOMEM; + /* Test, if the display is responding */ err = lcd2s_i2c_smbus_write_byte(i2c, LCD2S_CMD_DISPLAY_OFF); if (err < 0) @@ -307,12 +311,6 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c, if (!lcd) return -ENOMEM; - lcd2s = kzalloc(sizeof(struct lcd2s_data), GFP_KERNEL); - if (!lcd2s) { - err = -ENOMEM; - goto fail1; - } - lcd->drvdata = lcd2s; lcd2s->i2c = i2c; lcd2s->charlcd = lcd; @@ -321,26 +319,24 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c, err = device_property_read_u32(&i2c->dev, "display-height-chars", &lcd->height); if (err) - goto fail2; + goto fail1; err = device_property_read_u32(&i2c->dev, "display-width-chars", &lcd->width); if (err) - goto fail2; + goto fail1; lcd->ops = &lcd2s_ops; err = charlcd_register(lcd2s->charlcd); if (err) - goto fail2; + goto fail1; i2c_set_clientdata(i2c, lcd2s); return 0; -fail2: - kfree(lcd2s); fail1: - kfree(lcd); + charlcd_free(lcd2s->charlcd); return err; } @@ -349,7 +345,7 @@ static int lcd2s_i2c_remove(struct i2c_client *i2c) struct lcd2s_data *lcd2s = i2c_get_clientdata(i2c); charlcd_unregister(lcd2s->charlcd); - kfree(lcd2s->charlcd); + charlcd_free(lcd2s->charlcd); return 0; } diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 9eaaff2f556c..f47cab21430f 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -629,6 +629,9 @@ re_probe: drv->remove(dev); devres_release_all(dev); + arch_teardown_dma_ops(dev); + kfree(dev->dma_range_map); + dev->dma_range_map = NULL; driver_sysfs_remove(dev); dev->driver = NULL; dev_set_drvdata(dev, NULL); @@ -1209,6 +1212,8 @@ static void __device_release_driver(struct device *dev, struct device *parent) devres_release_all(dev); arch_teardown_dma_ops(dev); + kfree(dev->dma_range_map); + dev->dma_range_map = NULL; dev->driver = NULL; dev_set_drvdata(dev, NULL); if (dev->pm_domain && dev->pm_domain->dismiss) diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 99bda0da23a8..8666590201c9 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -34,7 +34,8 @@ suspend_state_t pm_suspend_target_state; bool events_check_enabled __read_mostly; /* First wakeup IRQ seen by the kernel in the last cycle. */ -unsigned int pm_wakeup_irq __read_mostly; +static unsigned int wakeup_irq[2] __read_mostly; +static DEFINE_RAW_SPINLOCK(wakeup_irq_lock); /* If greater than 0 and the system is suspending, terminate the suspend. */ static atomic_t pm_abort_suspend __read_mostly; @@ -942,19 +943,45 @@ void pm_system_cancel_wakeup(void) atomic_dec_if_positive(&pm_abort_suspend); } -void pm_wakeup_clear(bool reset) +void pm_wakeup_clear(unsigned int irq_number) { - pm_wakeup_irq = 0; - if (reset) + raw_spin_lock_irq(&wakeup_irq_lock); + + if (irq_number && wakeup_irq[0] == irq_number) + wakeup_irq[0] = wakeup_irq[1]; + else + wakeup_irq[0] = 0; + + wakeup_irq[1] = 0; + + raw_spin_unlock_irq(&wakeup_irq_lock); + + if (!irq_number) atomic_set(&pm_abort_suspend, 0); } void pm_system_irq_wakeup(unsigned int irq_number) { - if (pm_wakeup_irq == 0) { - pm_wakeup_irq = irq_number; + unsigned long flags; + + raw_spin_lock_irqsave(&wakeup_irq_lock, flags); + + if (wakeup_irq[0] == 0) + wakeup_irq[0] = irq_number; + else if (wakeup_irq[1] == 0) + wakeup_irq[1] = irq_number; + else + irq_number = 0; + + raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags); + + if (irq_number) pm_system_wakeup(); - } +} + +unsigned int pm_wakeup_irq(void) +{ + return wakeup_irq[0]; } /** diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index d2656581a608..4a446259a184 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -189,11 +189,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data) ret = regmap_write(map, reg, d->mask_buf[i]); if (d->chip->clear_ack) { if (d->chip->ack_invert && !ret) - ret = regmap_write(map, reg, - d->mask_buf[i]); + ret = regmap_write(map, reg, UINT_MAX); else if (!ret) - ret = regmap_write(map, reg, - ~d->mask_buf[i]); + ret = regmap_write(map, reg, 0); } if (ret != 0) dev_err(d->map->dev, "Failed to ack 0x%x: %d\n", @@ -556,11 +554,9 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) data->status_buf[i]); if (chip->clear_ack) { if (chip->ack_invert && !ret) - ret = regmap_write(map, reg, - data->status_buf[i]); + ret = regmap_write(map, reg, UINT_MAX); else if (!ret) - ret = regmap_write(map, reg, - ~data->status_buf[i]); + ret = regmap_write(map, reg, 0); } if (ret != 0) dev_err(map->dev, "Failed to ack 0x%x: %d\n", @@ -817,13 +813,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, d->status_buf[i] & d->mask_buf[i]); if (chip->clear_ack) { if (chip->ack_invert && !ret) - ret = regmap_write(map, reg, - (d->status_buf[i] & - d->mask_buf[i])); + ret = regmap_write(map, reg, UINT_MAX); else if (!ret) - ret = regmap_write(map, reg, - ~(d->status_buf[i] & - d->mask_buf[i])); + ret = regmap_write(map, reg, 0); } if (ret != 0) { dev_err(map->dev, "Failed to ack 0x%x: %d\n", diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 01cbbfc4e9e2..19fe19eaa50e 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -79,6 +79,7 @@ #include <linux/ioprio.h> #include <linux/blk-cgroup.h> #include <linux/sched/mm.h> +#include <linux/statfs.h> #include "loop.h" @@ -774,8 +775,13 @@ static void loop_config_discard(struct loop_device *lo) granularity = 0; } else { + struct kstatfs sbuf; + max_discard_sectors = UINT_MAX >> 9; - granularity = inode->i_sb->s_blocksize; + if (!vfs_statfs(&file->f_path, &sbuf)) + granularity = sbuf.f_bsize; + else + max_discard_sectors = 0; } if (max_discard_sectors) { @@ -1082,7 +1088,7 @@ out_putf: return error; } -static void __loop_clr_fd(struct loop_device *lo) +static void __loop_clr_fd(struct loop_device *lo, bool release) { struct file *filp; gfp_t gfp = lo->old_gfp_mask; @@ -1144,6 +1150,8 @@ static void __loop_clr_fd(struct loop_device *lo) /* let user-space know about this change */ kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); mapping_set_gfp_mask(filp->f_mapping, gfp); + /* This is safe: open() is still holding a reference. */ + module_put(THIS_MODULE); blk_mq_unfreeze_queue(lo->lo_queue); disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE); @@ -1151,52 +1159,44 @@ static void __loop_clr_fd(struct loop_device *lo) if (lo->lo_flags & LO_FLAGS_PARTSCAN) { int err; - mutex_lock(&lo->lo_disk->open_mutex); + /* + * open_mutex has been held already in release path, so don't + * acquire it if this function is called in such case. + * + * If the reread partition isn't from release path, lo_refcnt + * must be at least one and it can only become zero when the + * current holder is released. + */ + if (!release) + mutex_lock(&lo->lo_disk->open_mutex); err = bdev_disk_changed(lo->lo_disk, false); - mutex_unlock(&lo->lo_disk->open_mutex); + if (!release) + mutex_unlock(&lo->lo_disk->open_mutex); if (err) pr_warn("%s: partition scan of loop%d failed (rc=%d)\n", __func__, lo->lo_number, err); /* Device is gone, no point in returning error */ } + /* + * lo->lo_state is set to Lo_unbound here after above partscan has + * finished. There cannot be anybody else entering __loop_clr_fd() as + * Lo_rundown state protects us from all the other places trying to + * change the 'lo' device. + */ lo->lo_flags = 0; if (!part_shift) lo->lo_disk->flags |= GENHD_FL_NO_PART; - - fput(filp); -} - -static void loop_rundown_completed(struct loop_device *lo) -{ mutex_lock(&lo->lo_mutex); lo->lo_state = Lo_unbound; mutex_unlock(&lo->lo_mutex); - module_put(THIS_MODULE); -} - -static void loop_rundown_workfn(struct work_struct *work) -{ - struct loop_device *lo = container_of(work, struct loop_device, - rundown_work); - struct block_device *bdev = lo->lo_device; - struct gendisk *disk = lo->lo_disk; - - __loop_clr_fd(lo); - kobject_put(&bdev->bd_device.kobj); - module_put(disk->fops->owner); - loop_rundown_completed(lo); -} - -static void loop_schedule_rundown(struct loop_device *lo) -{ - struct block_device *bdev = lo->lo_device; - struct gendisk *disk = lo->lo_disk; - __module_get(disk->fops->owner); - kobject_get(&bdev->bd_device.kobj); - INIT_WORK(&lo->rundown_work, loop_rundown_workfn); - queue_work(system_long_wq, &lo->rundown_work); + /* + * Need not hold lo_mutex to fput backing file. Calling fput holding + * lo_mutex triggers a circular lock dependency possibility warning as + * fput can take open_mutex which is usually taken before lo_mutex. + */ + fput(filp); } static int loop_clr_fd(struct loop_device *lo) @@ -1228,8 +1228,7 @@ static int loop_clr_fd(struct loop_device *lo) lo->lo_state = Lo_rundown; mutex_unlock(&lo->lo_mutex); - __loop_clr_fd(lo); - loop_rundown_completed(lo); + __loop_clr_fd(lo, false); return 0; } @@ -1754,7 +1753,7 @@ static void lo_release(struct gendisk *disk, fmode_t mode) * In autoclear mode, stop the loop thread * and remove configuration after last close. */ - loop_schedule_rundown(lo); + __loop_clr_fd(lo, true); return; } else if (lo->lo_state == Lo_bound) { /* diff --git a/drivers/block/loop.h b/drivers/block/loop.h index 918a7a2dc025..082d4b6bfc6a 100644 --- a/drivers/block/loop.h +++ b/drivers/block/loop.h @@ -56,7 +56,6 @@ struct loop_device { struct gendisk *lo_disk; struct mutex lo_mutex; bool idr_visible; - struct work_struct rundown_work; }; struct loop_cmd { diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index e6005c232328..2b588b62cbbb 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -4112,7 +4112,7 @@ static void mtip_pci_remove(struct pci_dev *pdev) "Completion workers still active!\n"); } - blk_set_queue_dying(dd->queue); + blk_mark_disk_dead(dd->disk); set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); /* Clean up the block layer. */ diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 4203cdab8abf..b844432bad20 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -7185,7 +7185,7 @@ static ssize_t do_rbd_remove(struct bus_type *bus, * IO to complete/fail. */ blk_mq_freeze_queue(rbd_dev->disk->queue); - blk_set_queue_dying(rbd_dev->disk->queue); + blk_mark_disk_dead(rbd_dev->disk); } del_gendisk(rbd_dev->disk); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index c443cd64fc9b..8c415be86732 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -76,9 +76,6 @@ struct virtio_blk { */ refcount_t refs; - /* What host tells us, plus 2 for header & tailer. */ - unsigned int sg_elems; - /* Ida index - used to track minor number allocations. */ int index; @@ -322,8 +319,6 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, blk_status_t status; int err; - BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); - status = virtblk_setup_cmd(vblk->vdev, req, vbr); if (unlikely(status)) return status; @@ -783,8 +778,6 @@ static int virtblk_probe(struct virtio_device *vdev) /* Prevent integer overflows and honor max vq size */ sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2); - /* We need extra sg elements at head and tail. */ - sg_elems += 2; vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); if (!vblk) { err = -ENOMEM; @@ -796,7 +789,6 @@ static int virtblk_probe(struct virtio_device *vdev) mutex_init(&vblk->vdev_mutex); vblk->vdev = vdev; - vblk->sg_elems = sg_elems; INIT_WORK(&vblk->config_work, virtblk_config_changed_work); @@ -853,7 +845,7 @@ static int virtblk_probe(struct virtio_device *vdev) set_disk_ro(vblk->disk, 1); /* We can handle whatever the host told us to handle. */ - blk_queue_max_segments(q, vblk->sg_elems-2); + blk_queue_max_segments(q, sg_elems); /* No real sector limit. */ blk_queue_max_hw_sectors(q, -1U); @@ -925,9 +917,15 @@ static int virtblk_probe(struct virtio_device *vdev) virtio_cread(vdev, struct virtio_blk_config, max_discard_seg, &v); + + /* + * max_discard_seg == 0 is out of spec but we always + * handled it. + */ + if (!v) + v = sg_elems; blk_queue_max_discard_segments(q, - min_not_zero(v, - MAX_DISCARD_SEGMENTS)); + min(v, MAX_DISCARD_SEGMENTS)); blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index ccd0dd0c6b83..03b5fb341e58 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1288,7 +1288,8 @@ free_shadow: rinfo->ring_ref[i] = GRANT_INVALID_REF; } } - free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE)); + free_pages_exact(rinfo->ring.sring, + info->nr_ring_pages * XEN_PAGE_SIZE); rinfo->ring.sring = NULL; if (rinfo->irq) @@ -1372,9 +1373,15 @@ static int blkif_get_final_status(enum blk_req_status s1, return BLKIF_RSP_OKAY; } -static bool blkif_completion(unsigned long *id, - struct blkfront_ring_info *rinfo, - struct blkif_response *bret) +/* + * Return values: + * 1 response processed. + * 0 missing further responses. + * -1 error while processing. + */ +static int blkif_completion(unsigned long *id, + struct blkfront_ring_info *rinfo, + struct blkif_response *bret) { int i = 0; struct scatterlist *sg; @@ -1397,7 +1404,7 @@ static bool blkif_completion(unsigned long *id, /* Wait the second response if not yet here. */ if (s2->status < REQ_DONE) - return false; + return 0; bret->status = blkif_get_final_status(s->status, s2->status); @@ -1448,42 +1455,43 @@ static bool blkif_completion(unsigned long *id, } /* Add the persistent grant into the list of free grants */ for (i = 0; i < num_grant; i++) { - if (gnttab_query_foreign_access(s->grants_used[i]->gref)) { + if (!gnttab_try_end_foreign_access(s->grants_used[i]->gref)) { /* * If the grant is still mapped by the backend (the * backend has chosen to make this grant persistent) * we add it at the head of the list, so it will be * reused first. */ - if (!info->feature_persistent) - pr_alert_ratelimited("backed has not unmapped grant: %u\n", - s->grants_used[i]->gref); + if (!info->feature_persistent) { + pr_alert("backed has not unmapped grant: %u\n", + s->grants_used[i]->gref); + return -1; + } list_add(&s->grants_used[i]->node, &rinfo->grants); rinfo->persistent_gnts_c++; } else { /* - * If the grant is not mapped by the backend we end the - * foreign access and add it to the tail of the list, - * so it will not be picked again unless we run out of - * persistent grants. + * If the grant is not mapped by the backend we add it + * to the tail of the list, so it will not be picked + * again unless we run out of persistent grants. */ - gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL); s->grants_used[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->grants_used[i]->node, &rinfo->grants); } } if (s->req.operation == BLKIF_OP_INDIRECT) { for (i = 0; i < INDIRECT_GREFS(num_grant); i++) { - if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) { - if (!info->feature_persistent) - pr_alert_ratelimited("backed has not unmapped grant: %u\n", - s->indirect_grants[i]->gref); + if (!gnttab_try_end_foreign_access(s->indirect_grants[i]->gref)) { + if (!info->feature_persistent) { + pr_alert("backed has not unmapped grant: %u\n", + s->indirect_grants[i]->gref); + return -1; + } list_add(&s->indirect_grants[i]->node, &rinfo->grants); rinfo->persistent_gnts_c++; } else { struct page *indirect_page; - gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL); /* * Add the used indirect page back to the list of * available pages for indirect grefs. @@ -1498,7 +1506,7 @@ static bool blkif_completion(unsigned long *id, } } - return true; + return 1; } static irqreturn_t blkif_interrupt(int irq, void *dev_id) @@ -1564,12 +1572,17 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) } if (bret.operation != BLKIF_OP_DISCARD) { + int ret; + /* * We may need to wait for an extra response if the * I/O request is split in 2 */ - if (!blkif_completion(&id, rinfo, &bret)) + ret = blkif_completion(&id, rinfo, &bret); + if (!ret) continue; + if (unlikely(ret < 0)) + goto err; } if (add_id_to_freelist(rinfo, id)) { @@ -1676,8 +1689,7 @@ static int setup_blkring(struct xenbus_device *dev, for (i = 0; i < info->nr_ring_pages; i++) rinfo->ring_ref[i] = GRANT_INVALID_REF; - sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH, - get_order(ring_size)); + sring = alloc_pages_exact(ring_size, GFP_NOIO); if (!sring) { xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; @@ -1687,7 +1699,7 @@ static int setup_blkring(struct xenbus_device *dev, err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref); if (err < 0) { - free_pages((unsigned long)sring, get_order(ring_size)); + free_pages_exact(sring, ring_size); rinfo->ring.sring = NULL; goto fail; } @@ -2126,7 +2138,7 @@ static void blkfront_closing(struct blkfront_info *info) /* No more blkif_request(). */ blk_mq_stop_hw_queues(info->rq); - blk_set_queue_dying(info->rq); + blk_mark_disk_dead(info->gd); set_capacity(info->gd, 0); for_each_rinfo(info, rinfo, i) { @@ -2532,11 +2544,10 @@ static void purge_persistent_grants(struct blkfront_info *info) list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants, node) { if (gnt_list_entry->gref == GRANT_INVALID_REF || - gnttab_query_foreign_access(gnt_list_entry->gref)) + !gnttab_try_end_foreign_access(gnt_list_entry->gref)) continue; list_del(&gnt_list_entry->node); - gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); rinfo->persistent_gnts_c--; gnt_list_entry->gref = GRANT_INVALID_REF; list_add_tail(&gnt_list_entry->node, &rinfo->grants); diff --git a/drivers/bluetooth/btmtk.h b/drivers/bluetooth/btmtk.h index fb76d9765ce0..013850fd2055 100644 --- a/drivers/bluetooth/btmtk.h +++ b/drivers/bluetooth/btmtk.h @@ -5,14 +5,21 @@ #define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin" #define FIRMWARE_MT7961 "mediatek/BT_RAM_CODE_MT7961_1_2_hdr.bin" +#define HCI_EV_WMT 0xe4 #define HCI_WMT_MAX_EVENT_SIZE 64 #define BTMTK_WMT_REG_WRITE 0x1 #define BTMTK_WMT_REG_READ 0x2 +#define MT7921_BTSYS_RST 0x70002610 +#define MT7921_BTSYS_RST_WITH_GPIO BIT(7) + #define MT7921_PINMUX_0 0x70005050 #define MT7921_PINMUX_1 0x70005054 +#define MT7921_DLSTATUS 0x7c053c10 +#define BT_DL_STATE BIT(1) + enum { BTMTK_WMT_PATCH_DWNLD = 0x1, BTMTK_WMT_TEST = 0x2, diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c index 8be763ab3bf4..df3f9d090529 100644 --- a/drivers/bluetooth/btmtksdio.c +++ b/drivers/bluetooth/btmtksdio.c @@ -12,10 +12,12 @@ #include <asm/unaligned.h> #include <linux/atomic.h> +#include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/pm_runtime.h> #include <linux/skbuff.h> @@ -83,6 +85,7 @@ MODULE_DEVICE_TABLE(sdio, btmtksdio_table); #define MTK_REG_CHCR 0xc #define C_INT_CLR_CTRL BIT(1) +#define BT_RST_DONE BIT(8) /* CHISR have the same bits field definition with CHIER */ #define MTK_REG_CHISR 0x10 @@ -114,6 +117,7 @@ MODULE_DEVICE_TABLE(sdio, btmtksdio_table); #define BTMTKSDIO_HW_TX_READY 2 #define BTMTKSDIO_FUNC_ENABLED 3 #define BTMTKSDIO_PATCH_ENABLED 4 +#define BTMTKSDIO_HW_RESET_ACTIVE 5 struct mtkbtsdio_hdr { __le16 len; @@ -133,6 +137,8 @@ struct btmtksdio_dev { struct sk_buff *evt_skb; const struct btmtksdio_data *data; + + struct gpio_desc *reset; }; static int mtk_hci_wmt_sync(struct hci_dev *hdev, @@ -297,6 +303,11 @@ static u32 btmtksdio_drv_own_query_79xx(struct btmtksdio_dev *bdev) return sdio_readl(bdev->func, MTK_REG_PD2HRM0R, NULL); } +static u32 btmtksdio_chcr_query(struct btmtksdio_dev *bdev) +{ + return sdio_readl(bdev->func, MTK_REG_CHCR, NULL); +} + static int btmtksdio_fw_pmctrl(struct btmtksdio_dev *bdev) { u32 status; @@ -370,13 +381,6 @@ static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb) struct hci_event_hdr *hdr = (void *)skb->data; int err; - /* Fix up the vendor event id with 0xff for vendor specific instead - * of 0xe4 so that event send via monitoring socket can be parsed - * properly. - */ - if (hdr->evt == 0xe4) - hdr->evt = HCI_EV_VENDOR; - /* When someone waits for the WMT event, the skb is being cloned * and being processed the events from there then. */ @@ -392,7 +396,7 @@ static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb) if (err < 0) goto err_free_skb; - if (hdr->evt == HCI_EV_VENDOR) { + if (hdr->evt == HCI_EV_WMT) { if (test_and_clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state)) { /* Barrier to sync with other CPUs */ @@ -967,6 +971,28 @@ static int btmtksdio_sco_setting(struct hci_dev *hdev) return btmtksdio_mtk_reg_write(hdev, MT7921_PINMUX_1, val, ~0); } +static int btmtksdio_reset_setting(struct hci_dev *hdev) +{ + int err; + u32 val; + + err = btmtksdio_mtk_reg_read(hdev, MT7921_PINMUX_1, &val); + if (err < 0) + return err; + + val |= 0x20; /* set the pin (bit field 11:8) work as GPIO mode */ + err = btmtksdio_mtk_reg_write(hdev, MT7921_PINMUX_1, val, ~0); + if (err < 0) + return err; + + err = btmtksdio_mtk_reg_read(hdev, MT7921_BTSYS_RST, &val); + if (err < 0) + return err; + + val |= MT7921_BTSYS_RST_WITH_GPIO; + return btmtksdio_mtk_reg_write(hdev, MT7921_BTSYS_RST, val, ~0); +} + static int btmtksdio_setup(struct hci_dev *hdev) { struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); @@ -974,13 +1000,32 @@ static int btmtksdio_setup(struct hci_dev *hdev) unsigned long long duration; char fwname[64]; int err, dev_id; - u32 fw_version = 0; + u32 fw_version = 0, val; calltime = ktime_get(); set_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state); switch (bdev->data->chipid) { case 0x7921: + if (test_bit(BTMTKSDIO_HW_RESET_ACTIVE, &bdev->tx_state)) { + err = btmtksdio_mtk_reg_read(hdev, MT7921_DLSTATUS, + &val); + if (err < 0) + return err; + + val &= ~BT_DL_STATE; + err = btmtksdio_mtk_reg_write(hdev, MT7921_DLSTATUS, + val, ~0); + if (err < 0) + return err; + + btmtksdio_fw_pmctrl(bdev); + msleep(20); + btmtksdio_drv_pmctrl(bdev); + + clear_bit(BTMTKSDIO_HW_RESET_ACTIVE, &bdev->tx_state); + } + err = btmtksdio_mtk_reg_read(hdev, 0x70010200, &dev_id); if (err < 0) { bt_dev_err(hdev, "Failed to get device id (%d)", err); @@ -1015,6 +1060,16 @@ static int btmtksdio_setup(struct hci_dev *hdev) return err; } + /* Enable GPIO reset mechanism */ + if (bdev->reset) { + err = btmtksdio_reset_setting(hdev); + if (err < 0) { + bt_dev_err(hdev, "Failed to enable Reset setting (%d)", err); + devm_gpiod_put(bdev->dev, bdev->reset); + bdev->reset = NULL; + } + } + break; case 0x7663: case 0x7668: @@ -1111,6 +1166,47 @@ static int btmtksdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb) return 0; } +static void btmtksdio_cmd_timeout(struct hci_dev *hdev) +{ + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + u32 status; + int err; + + if (!bdev->reset || bdev->data->chipid != 0x7921) + return; + + pm_runtime_get_sync(bdev->dev); + + if (test_and_set_bit(BTMTKSDIO_HW_RESET_ACTIVE, &bdev->tx_state)) + return; + + sdio_claim_host(bdev->func); + + sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL); + skb_queue_purge(&bdev->txq); + cancel_work_sync(&bdev->txrx_work); + + gpiod_set_value_cansleep(bdev->reset, 1); + msleep(100); + gpiod_set_value_cansleep(bdev->reset, 0); + + err = readx_poll_timeout(btmtksdio_chcr_query, bdev, status, + status & BT_RST_DONE, 100000, 2000000); + if (err < 0) { + bt_dev_err(hdev, "Failed to reset (%d)", err); + goto err; + } + + clear_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state); +err: + sdio_release_host(bdev->func); + + pm_runtime_put_noidle(bdev->dev); + pm_runtime_disable(bdev->dev); + + hci_reset_dev(hdev); +} + static bool btmtksdio_sdio_wakeup(struct hci_dev *hdev) { struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); @@ -1130,8 +1226,8 @@ static bool btmtksdio_sdio_wakeup(struct hci_dev *hdev) &bt_awake, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) may_wakeup = false; - - kfree_skb(skb); + else + kfree_skb(skb); } return may_wakeup; @@ -1172,6 +1268,7 @@ static int btmtksdio_probe(struct sdio_func *func, hdev->open = btmtksdio_open; hdev->close = btmtksdio_close; + hdev->cmd_timeout = btmtksdio_cmd_timeout; hdev->flush = btmtksdio_flush; hdev->setup = btmtksdio_setup; hdev->shutdown = btmtksdio_shutdown; @@ -1216,6 +1313,13 @@ static int btmtksdio_probe(struct sdio_func *func, if (err) bt_dev_err(hdev, "failed to initialize device wakeup"); + bdev->dev->of_node = of_find_compatible_node(NULL, NULL, + "mediatek,mt7921s-bluetooth"); + bdev->reset = devm_gpiod_get_optional(bdev->dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(bdev->reset)) + err = PTR_ERR(bdev->reset); + return err; } diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index aefa0ee293f3..1bb00b7547fb 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -36,33 +36,33 @@ static bool reset = true; static struct usb_driver btusb_driver; -#define BTUSB_IGNORE 0x01 -#define BTUSB_DIGIANSWER 0x02 -#define BTUSB_CSR 0x04 -#define BTUSB_SNIFFER 0x08 -#define BTUSB_BCM92035 0x10 -#define BTUSB_BROKEN_ISOC 0x20 -#define BTUSB_WRONG_SCO_MTU 0x40 -#define BTUSB_ATH3012 0x80 -#define BTUSB_INTEL_COMBINED 0x100 -#define BTUSB_INTEL_BOOT 0x200 -#define BTUSB_BCM_PATCHRAM 0x400 -#define BTUSB_MARVELL 0x800 -#define BTUSB_SWAVE 0x1000 -#define BTUSB_AMP 0x4000 -#define BTUSB_QCA_ROME 0x8000 -#define BTUSB_BCM_APPLE 0x10000 -#define BTUSB_REALTEK 0x20000 -#define BTUSB_BCM2045 0x40000 -#define BTUSB_IFNUM_2 0x80000 -#define BTUSB_CW6622 0x100000 -#define BTUSB_MEDIATEK 0x200000 -#define BTUSB_WIDEBAND_SPEECH 0x400000 -#define BTUSB_VALID_LE_STATES 0x800000 -#define BTUSB_QCA_WCN6855 0x1000000 -#define BTUSB_INTEL_BROKEN_SHUTDOWN_LED 0x2000000 -#define BTUSB_INTEL_BROKEN_INITIAL_NCMD 0x4000000 -#define BTUSB_INTEL_NO_WBS_SUPPORT 0x8000000 +#define BTUSB_IGNORE BIT(0) +#define BTUSB_DIGIANSWER BIT(1) +#define BTUSB_CSR BIT(2) +#define BTUSB_SNIFFER BIT(3) +#define BTUSB_BCM92035 BIT(4) +#define BTUSB_BROKEN_ISOC BIT(5) +#define BTUSB_WRONG_SCO_MTU BIT(6) +#define BTUSB_ATH3012 BIT(7) +#define BTUSB_INTEL_COMBINED BIT(8) +#define BTUSB_INTEL_BOOT BIT(9) +#define BTUSB_BCM_PATCHRAM BIT(10) +#define BTUSB_MARVELL BIT(11) +#define BTUSB_SWAVE BIT(12) +#define BTUSB_AMP BIT(13) +#define BTUSB_QCA_ROME BIT(14) +#define BTUSB_BCM_APPLE BIT(15) +#define BTUSB_REALTEK BIT(16) +#define BTUSB_BCM2045 BIT(17) +#define BTUSB_IFNUM_2 BIT(18) +#define BTUSB_CW6622 BIT(19) +#define BTUSB_MEDIATEK BIT(20) +#define BTUSB_WIDEBAND_SPEECH BIT(21) +#define BTUSB_VALID_LE_STATES BIT(22) +#define BTUSB_QCA_WCN6855 BIT(23) +#define BTUSB_INTEL_BROKEN_SHUTDOWN_LED BIT(24) +#define BTUSB_INTEL_BROKEN_INITIAL_NCMD BIT(25) +#define BTUSB_INTEL_NO_WBS_SUPPORT BIT(26) static const struct usb_device_id btusb_table[] = { /* Generic Bluetooth USB device */ @@ -384,6 +384,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_COMBINED }, { USB_DEVICE(0x8087, 0x0032), .driver_info = BTUSB_INTEL_COMBINED }, { USB_DEVICE(0x8087, 0x0033), .driver_info = BTUSB_INTEL_COMBINED }, + { USB_DEVICE(0x8087, 0x0035), .driver_info = BTUSB_INTEL_COMBINED }, { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR }, { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL_COMBINED | BTUSB_INTEL_NO_WBS_SUPPORT | @@ -434,6 +435,11 @@ static const struct usb_device_id blacklist_table[] = { /* Additional MediaTek MT7615E Bluetooth devices */ { USB_DEVICE(0x13d3, 0x3560), .driver_info = BTUSB_MEDIATEK}, + /* Additional MediaTek MT7663 Bluetooth devices */ + { USB_DEVICE(0x043e, 0x310c), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + /* Additional MediaTek MT7668 Bluetooth devices */ { USB_DEVICE(0x043e, 0x3109), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | @@ -449,6 +455,9 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3564), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x13d3, 0x3567), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, @@ -487,6 +496,8 @@ static const struct usb_device_id blacklist_table[] = { /* Additional Realtek 8761BU Bluetooth devices */ { USB_DEVICE(0x0b05, 0x190e), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x2550, 0x8761), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, /* Additional Realtek 8821AE Bluetooth devices */ { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK }, @@ -2250,7 +2261,6 @@ static void btusb_mtk_wmt_recv(struct urb *urb) { struct hci_dev *hdev = urb->context; struct btusb_data *data = hci_get_drvdata(hdev); - struct hci_event_hdr *hdr; struct sk_buff *skb; int err; @@ -2270,13 +2280,6 @@ static void btusb_mtk_wmt_recv(struct urb *urb) hci_skb_pkt_type(skb) = HCI_EVENT_PKT; skb_put_data(skb, urb->transfer_buffer, urb->actual_length); - hdr = (void *)skb->data; - /* Fix up the vendor event id with 0xff for vendor specific - * instead of 0xe4 so that event send via monitoring socket can - * be parsed properly. - */ - hdr->evt = 0xff; - /* When someone waits for the WMT event, the skb is being cloned * and being processed the events from there then. */ @@ -2993,6 +2996,7 @@ static int btusb_set_bdaddr_wcn6855(struct hci_dev *hdev, #define QCA_PATCH_UPDATED 0x80 #define QCA_DFU_TIMEOUT 3000 #define QCA_FLAG_MULTI_NVM 0x80 +#define QCA_BT_RESET_WAIT_MS 100 #define WCN6855_2_0_RAM_VERSION_GF 0x400c1200 #define WCN6855_2_1_RAM_VERSION_GF 0x400c1211 @@ -3319,6 +3323,13 @@ static int btusb_setup_qca(struct hci_dev *hdev) err = btusb_setup_qca_load_nvm(hdev, &ver, info); if (err < 0) return err; + + /* WCN6855 2.1 will reset to apply firmware downloaded here, so + * wait ~100ms for reset Done then go ahead, otherwise, it maybe + * cause potential enable failure. + */ + if (info->rom_version == 0x00130201) + msleep(QCA_BT_RESET_WAIT_MS); } return 0; diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index d634a27bc850..bd090d92a548 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -1513,6 +1513,8 @@ static const struct of_device_id bcm_bluetooth_of_match[] = { { .compatible = "brcm,bcm4330-bt" }, { .compatible = "brcm,bcm4334-bt" }, { .compatible = "brcm,bcm4345c5" }, + { .compatible = "brcm,bcm43430a0-bt" }, + { .compatible = "brcm,bcm43430a1-bt" }, { .compatible = "brcm,bcm43438-bt", .data = &bcm43438_device_data }, { .compatible = "brcm,bcm43540-bt", .data = &bcm4354_device_data }, { .compatible = "brcm,bcm4335a0" }, diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c index 3a258a677df8..b79895810c52 100644 --- a/drivers/bus/mhi/pci_generic.c +++ b/drivers/bus/mhi/pci_generic.c @@ -366,6 +366,7 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = { .config = &modem_foxconn_sdx55_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, + .mru_default = 32768, .sideband_wake = false, }; @@ -401,6 +402,7 @@ static const struct mhi_pci_dev_info mhi_mv31_info = { .config = &modem_mv31_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, + .mru_default = 32768, }; static const struct mhi_channel_config mhi_sierra_em919x_channels[] = { diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c index fd87a59837fa..5eb0fe73ddc4 100644 --- a/drivers/bus/moxtet.c +++ b/drivers/bus/moxtet.c @@ -815,7 +815,7 @@ static int moxtet_probe(struct spi_device *spi) return 0; } -static int moxtet_remove(struct spi_device *spi) +static void moxtet_remove(struct spi_device *spi) { struct moxtet *moxtet = spi_get_drvdata(spi); @@ -828,8 +828,6 @@ static int moxtet_remove(struct spi_device *spi) device_for_each_child(moxtet->dev, NULL, __unregister); mutex_destroy(&moxtet->lock); - - return 0; } static const struct of_device_id moxtet_dt_ids[] = { diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c index 7c617edff4ca..3170d59d660c 100644 --- a/drivers/char/tpm/st33zp24/i2c.c +++ b/drivers/char/tpm/st33zp24/i2c.c @@ -267,11 +267,8 @@ static int st33zp24_i2c_probe(struct i2c_client *client, static int st33zp24_i2c_remove(struct i2c_client *client) { struct tpm_chip *chip = i2c_get_clientdata(client); - int ret; - ret = st33zp24_remove(chip); - if (ret) - return ret; + st33zp24_remove(chip); return 0; } diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c index a75dafd39445..22d184884694 100644 --- a/drivers/char/tpm/st33zp24/spi.c +++ b/drivers/char/tpm/st33zp24/spi.c @@ -381,16 +381,11 @@ static int st33zp24_spi_probe(struct spi_device *dev) * @param: client, the spi_device description (TPM SPI description). * @return: 0 in case of success. */ -static int st33zp24_spi_remove(struct spi_device *dev) +static void st33zp24_spi_remove(struct spi_device *dev) { struct tpm_chip *chip = spi_get_drvdata(dev); - int ret; - ret = st33zp24_remove(chip); - if (ret) - return ret; - - return 0; + st33zp24_remove(chip); } static const struct spi_device_id st33zp24_spi_id[] = { diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c index ce9efb73c144..15b393e92c8e 100644 --- a/drivers/char/tpm/st33zp24/st33zp24.c +++ b/drivers/char/tpm/st33zp24/st33zp24.c @@ -511,10 +511,9 @@ _tpm_clean_answer: } EXPORT_SYMBOL(st33zp24_probe); -int st33zp24_remove(struct tpm_chip *chip) +void st33zp24_remove(struct tpm_chip *chip) { tpm_chip_unregister(chip); - return 0; } EXPORT_SYMBOL(st33zp24_remove); diff --git a/drivers/char/tpm/st33zp24/st33zp24.h b/drivers/char/tpm/st33zp24/st33zp24.h index 6747be1e2502..b387a476c555 100644 --- a/drivers/char/tpm/st33zp24/st33zp24.h +++ b/drivers/char/tpm/st33zp24/st33zp24.h @@ -34,5 +34,5 @@ int st33zp24_pm_resume(struct device *dev); int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops, struct device *dev, int irq, int io_lpcpd); -int st33zp24_remove(struct tpm_chip *chip); +void st33zp24_remove(struct tpm_chip *chip); #endif /* __LOCAL_ST33ZP24_H__ */ diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c index aaa59a00eeae..184396b3af50 100644 --- a/drivers/char/tpm/tpm_tis_spi_main.c +++ b/drivers/char/tpm/tpm_tis_spi_main.c @@ -254,13 +254,12 @@ static int tpm_tis_spi_driver_probe(struct spi_device *spi) static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_spi_resume); -static int tpm_tis_spi_remove(struct spi_device *dev) +static void tpm_tis_spi_remove(struct spi_device *dev) { struct tpm_chip *chip = spi_get_drvdata(dev); tpm_chip_unregister(chip); tpm_tis_remove(chip); - return 0; } static const struct spi_device_id tpm_tis_spi_id[] = { diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 2359889a35a0..e3c430539a17 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1957,6 +1957,13 @@ static void virtcons_remove(struct virtio_device *vdev) list_del(&portdev->list); spin_unlock_irq(&pdrvdata_lock); + /* Device is going away, exit any polling for buffers */ + virtio_break_device(vdev); + if (use_multiport(portdev)) + flush_work(&portdev->control_work); + else + flush_work(&portdev->config_work); + /* Disable interrupts for vqs */ virtio_reset_device(vdev); /* Finish up work that's lined up */ diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index ad4256d54361..d4d67fbae869 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -231,6 +231,8 @@ config COMMON_CLK_GEMINI config COMMON_CLK_LAN966X bool "Generic Clock Controller driver for LAN966X SoC" + depends on HAS_IOMEM + depends on OF help This driver provides support for Generic Clock Controller(GCK) on LAN966X SoC. GCK generates and supplies clock to various peripherals diff --git a/drivers/clk/clk-lmk04832.c b/drivers/clk/clk-lmk04832.c index 8f02c0b88000..f416f8bc2898 100644 --- a/drivers/clk/clk-lmk04832.c +++ b/drivers/clk/clk-lmk04832.c @@ -1544,14 +1544,12 @@ err_disable_oscin: return ret; } -static int lmk04832_remove(struct spi_device *spi) +static void lmk04832_remove(struct spi_device *spi) { struct lmk04832 *lmk = spi_get_drvdata(spi); clk_disable_unprepare(lmk->oscin); of_clk_del_provider(spi->dev.of_node); - - return 0; } static const struct spi_device_id lmk04832_id[] = { { "lmk04832", LMK04832 }, diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c index 744d136b721b..15d61793f53b 100644 --- a/drivers/clk/ingenic/jz4725b-cgu.c +++ b/drivers/clk/ingenic/jz4725b-cgu.c @@ -139,11 +139,10 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = { }, [JZ4725B_CLK_I2S] = { - "i2s", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE, + "i2s", CGU_CLK_MUX | CGU_CLK_DIV, .parents = { JZ4725B_CLK_EXT, JZ4725B_CLK_PLL_HALF, -1, -1 }, .mux = { CGU_REG_CPCCR, 31, 1 }, .div = { CGU_REG_I2SCDR, 0, 1, 9, -1, -1, -1 }, - .gate = { CGU_REG_CLKGR, 6 }, }, [JZ4725B_CLK_SPI] = { diff --git a/drivers/clk/qcom/dispcc-sc7180.c b/drivers/clk/qcom/dispcc-sc7180.c index 538e4963c915..5d2ae297e741 100644 --- a/drivers/clk/qcom/dispcc-sc7180.c +++ b/drivers/clk/qcom/dispcc-sc7180.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2019, 2022, The Linux Foundation. All rights reserved. */ #include <linux/clk-provider.h> @@ -625,6 +625,9 @@ static struct clk_branch disp_cc_mdss_vsync_clk = { static struct gdsc mdss_gdsc = { .gdscr = 0x3000, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "mdss_gdsc", }, diff --git a/drivers/clk/qcom/dispcc-sc7280.c b/drivers/clk/qcom/dispcc-sc7280.c index 4ef4ae231794..ad596d567f6a 100644 --- a/drivers/clk/qcom/dispcc-sc7280.c +++ b/drivers/clk/qcom/dispcc-sc7280.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022, The Linux Foundation. All rights reserved. */ #include <linux/clk-provider.h> @@ -787,6 +787,9 @@ static struct clk_branch disp_cc_sleep_clk = { static struct gdsc disp_cc_mdss_core_gdsc = { .gdscr = 0x1004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "disp_cc_mdss_core_gdsc", }, diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c index 566fdfa0a15b..db9379634fb2 100644 --- a/drivers/clk/qcom/dispcc-sm8250.c +++ b/drivers/clk/qcom/dispcc-sm8250.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2020, 2022, The Linux Foundation. All rights reserved. */ #include <linux/clk-provider.h> @@ -1126,6 +1126,9 @@ static struct clk_branch disp_cc_mdss_vsync_clk = { static struct gdsc mdss_gdsc = { .gdscr = 0x3000, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "mdss_gdsc", }, diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c index 71aa630fa4bd..f09499999eb3 100644 --- a/drivers/clk/qcom/gcc-msm8994.c +++ b/drivers/clk/qcom/gcc-msm8994.c @@ -108,42 +108,6 @@ static const struct clk_parent_data gcc_xo_gpll0_gpll4[] = { { .hw = &gpll4.clkr.hw }, }; -static struct clk_rcg2 system_noc_clk_src = { - .cmd_rcgr = 0x0120, - .hid_width = 5, - .parent_map = gcc_xo_gpll0_map, - .clkr.hw.init = &(struct clk_init_data){ - .name = "system_noc_clk_src", - .parent_data = gcc_xo_gpll0, - .num_parents = ARRAY_SIZE(gcc_xo_gpll0), - .ops = &clk_rcg2_ops, - }, -}; - -static struct clk_rcg2 config_noc_clk_src = { - .cmd_rcgr = 0x0150, - .hid_width = 5, - .parent_map = gcc_xo_gpll0_map, - .clkr.hw.init = &(struct clk_init_data){ - .name = "config_noc_clk_src", - .parent_data = gcc_xo_gpll0, - .num_parents = ARRAY_SIZE(gcc_xo_gpll0), - .ops = &clk_rcg2_ops, - }, -}; - -static struct clk_rcg2 periph_noc_clk_src = { - .cmd_rcgr = 0x0190, - .hid_width = 5, - .parent_map = gcc_xo_gpll0_map, - .clkr.hw.init = &(struct clk_init_data){ - .name = "periph_noc_clk_src", - .parent_data = gcc_xo_gpll0, - .num_parents = ARRAY_SIZE(gcc_xo_gpll0), - .ops = &clk_rcg2_ops, - }, -}; - static struct freq_tbl ftbl_ufs_axi_clk_src[] = { F(50000000, P_GPLL0, 12, 0, 0), F(100000000, P_GPLL0, 6, 0, 0), @@ -1150,8 +1114,6 @@ static struct clk_branch gcc_blsp1_ahb_clk = { .enable_mask = BIT(17), .hw.init = &(struct clk_init_data){ .name = "gcc_blsp1_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -1435,8 +1397,6 @@ static struct clk_branch gcc_blsp2_ahb_clk = { .enable_mask = BIT(15), .hw.init = &(struct clk_init_data){ .name = "gcc_blsp2_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -1764,8 +1724,6 @@ static struct clk_branch gcc_lpass_q6_axi_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_lpass_q6_axi_clk", - .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -1778,8 +1736,6 @@ static struct clk_branch gcc_mss_q6_bimc_axi_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_mss_q6_bimc_axi_clk", - .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -1807,9 +1763,6 @@ static struct clk_branch gcc_pcie_0_cfg_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pcie_0_cfg_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -1822,9 +1775,6 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pcie_0_mstr_axi_clk", - .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -1854,9 +1804,6 @@ static struct clk_branch gcc_pcie_0_slv_axi_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pcie_0_slv_axi_clk", - .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -1884,9 +1831,6 @@ static struct clk_branch gcc_pcie_1_cfg_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pcie_1_cfg_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -1899,9 +1843,6 @@ static struct clk_branch gcc_pcie_1_mstr_axi_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pcie_1_mstr_axi_clk", - .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -1930,9 +1871,6 @@ static struct clk_branch gcc_pcie_1_slv_axi_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pcie_1_slv_axi_clk", - .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -1960,8 +1898,6 @@ static struct clk_branch gcc_pdm_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pdm_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -1989,9 +1925,6 @@ static struct clk_branch gcc_sdcc1_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_sdcc1_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2004,9 +1937,6 @@ static struct clk_branch gcc_sdcc2_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_sdcc2_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2034,9 +1964,6 @@ static struct clk_branch gcc_sdcc3_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_sdcc3_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2064,9 +1991,6 @@ static struct clk_branch gcc_sdcc4_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_sdcc4_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2124,8 +2048,6 @@ static struct clk_branch gcc_tsif_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_tsif_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -2153,8 +2075,6 @@ static struct clk_branch gcc_ufs_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_ufs_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -2198,8 +2118,6 @@ static struct clk_branch gcc_ufs_rx_symbol_0_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_ufs_rx_symbol_0_clk", - .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -2213,8 +2131,6 @@ static struct clk_branch gcc_ufs_rx_symbol_1_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_ufs_rx_symbol_1_clk", - .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -2243,8 +2159,6 @@ static struct clk_branch gcc_ufs_tx_symbol_0_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_ufs_tx_symbol_0_clk", - .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -2258,8 +2172,6 @@ static struct clk_branch gcc_ufs_tx_symbol_1_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_ufs_tx_symbol_1_clk", - .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -2364,8 +2276,6 @@ static struct clk_branch gcc_usb_hs_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_usb_hs_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -2488,8 +2398,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = { .enable_mask = BIT(10), .hw.init = &(struct clk_init_data){ .name = "gcc_boot_rom_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -2503,8 +2411,6 @@ static struct clk_branch gcc_prng_ahb_clk = { .enable_mask = BIT(13), .hw.init = &(struct clk_init_data){ .name = "gcc_prng_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -2547,9 +2453,6 @@ static struct clk_regmap *gcc_msm8994_clocks[] = { [GPLL0] = &gpll0.clkr, [GPLL4_EARLY] = &gpll4_early.clkr, [GPLL4] = &gpll4.clkr, - [CONFIG_NOC_CLK_SRC] = &config_noc_clk_src.clkr, - [PERIPH_NOC_CLK_SRC] = &periph_noc_clk_src.clkr, - [SYSTEM_NOC_CLK_SRC] = &system_noc_clk_src.clkr, [UFS_AXI_CLK_SRC] = &ufs_axi_clk_src.clkr, [USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr, [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr, @@ -2696,6 +2599,15 @@ static struct clk_regmap *gcc_msm8994_clocks[] = { [USB_SS_PHY_LDO] = &usb_ss_phy_ldo.clkr, [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr, [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr, + + /* + * The following clocks should NOT be managed by this driver, but they once were + * mistakengly added. Now they are only here to indicate that they are not defined + * on purpose, even though the names will stay in the header file (for ABI sanity). + */ + [CONFIG_NOC_CLK_SRC] = NULL, + [PERIPH_NOC_CLK_SRC] = NULL, + [SYSTEM_NOC_CLK_SRC] = NULL, }; static struct gdsc *gcc_msm8994_gdscs[] = { diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c index 7e1dd8ccfa38..44520efc6c72 100644 --- a/drivers/clk/qcom/gdsc.c +++ b/drivers/clk/qcom/gdsc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved. */ #include <linux/bitops.h> @@ -35,9 +35,14 @@ #define CFG_GDSCR_OFFSET 0x4 /* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */ -#define EN_REST_WAIT_VAL (0x2 << 20) -#define EN_FEW_WAIT_VAL (0x8 << 16) -#define CLK_DIS_WAIT_VAL (0x2 << 12) +#define EN_REST_WAIT_VAL 0x2 +#define EN_FEW_WAIT_VAL 0x8 +#define CLK_DIS_WAIT_VAL 0x2 + +/* Transition delay shifts */ +#define EN_REST_WAIT_SHIFT 20 +#define EN_FEW_WAIT_SHIFT 16 +#define CLK_DIS_WAIT_SHIFT 12 #define RETAIN_MEM BIT(14) #define RETAIN_PERIPH BIT(13) @@ -380,7 +385,18 @@ static int gdsc_init(struct gdsc *sc) */ mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK | EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK; - val = EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL; + + if (!sc->en_rest_wait_val) + sc->en_rest_wait_val = EN_REST_WAIT_VAL; + if (!sc->en_few_wait_val) + sc->en_few_wait_val = EN_FEW_WAIT_VAL; + if (!sc->clk_dis_wait_val) + sc->clk_dis_wait_val = CLK_DIS_WAIT_VAL; + + val = sc->en_rest_wait_val << EN_REST_WAIT_SHIFT | + sc->en_few_wait_val << EN_FEW_WAIT_SHIFT | + sc->clk_dis_wait_val << CLK_DIS_WAIT_SHIFT; + ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val); if (ret) return ret; diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h index d7cc4c21a9d4..ad313d7210bd 100644 --- a/drivers/clk/qcom/gdsc.h +++ b/drivers/clk/qcom/gdsc.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved. */ #ifndef __QCOM_GDSC_H__ @@ -22,6 +22,9 @@ struct reset_controller_dev; * @cxcs: offsets of branch registers to toggle mem/periph bits in * @cxc_count: number of @cxcs * @pwrsts: Possible powerdomain power states + * @en_rest_wait_val: transition delay value for receiving enr ack signal + * @en_few_wait_val: transition delay value for receiving enf ack signal + * @clk_dis_wait_val: transition delay value for halting clock * @resets: ids of resets associated with this gdsc * @reset_count: number of @resets * @rcdev: reset controller @@ -36,6 +39,9 @@ struct gdsc { unsigned int clamp_io_ctrl; unsigned int *cxcs; unsigned int cxc_count; + unsigned int en_rest_wait_val; + unsigned int en_few_wait_val; + unsigned int clk_dis_wait_val; const u8 pwrsts; /* Powerdomain allowable state bitfields */ #define PWRSTS_OFF BIT(0) diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c index b6f97960d8ee..1fccb457fcc5 100644 --- a/drivers/clocksource/timer-ti-dm-systimer.c +++ b/drivers/clocksource/timer-ti-dm-systimer.c @@ -241,8 +241,7 @@ static void __init dmtimer_systimer_assign_alwon(void) bool quirk_unreliable_oscillator = false; /* Quirk unreliable 32 KiHz oscillator with incomplete dts */ - if (of_machine_is_compatible("ti,omap3-beagle") || - of_machine_is_compatible("timll,omap3-devkit8000")) { + if (of_machine_is_compatible("ti,omap3-beagle-ab4")) { quirk_unreliable_oscillator = true; counter_32k = -ENODEV; } diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index b8d95536ee22..80f535cc8a75 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1518,6 +1518,10 @@ static int cpufreq_online(unsigned int cpu) kobject_uevent(&policy->kobj, KOBJ_ADD); + /* Callback for handling stuff after policy is ready */ + if (cpufreq_driver->ready) + cpufreq_driver->ready(policy); + if (cpufreq_thermal_control_enabled(cpufreq_driver)) policy->cdev = of_cpufreq_cooling_register(policy); diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index 05f3d7876e44..effbb680b453 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -388,7 +388,7 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index) snprintf(data->irq_name, sizeof(data->irq_name), "dcvsh-irq-%u", policy->cpu); ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq, - IRQF_ONESHOT, data->irq_name, data); + IRQF_ONESHOT | IRQF_NO_AUTOEN, data->irq_name, data); if (ret) { dev_err(&pdev->dev, "Error registering %s: %d\n", data->irq_name, ret); return 0; @@ -542,6 +542,14 @@ static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) return 0; } +static void qcom_cpufreq_ready(struct cpufreq_policy *policy) +{ + struct qcom_cpufreq_data *data = policy->driver_data; + + if (data->throttle_irq >= 0) + enable_irq(data->throttle_irq); +} + static struct freq_attr *qcom_cpufreq_hw_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, &cpufreq_freq_attr_scaling_boost_freqs, @@ -561,6 +569,7 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = { .fast_switch = qcom_cpufreq_hw_fast_switch, .name = "qcom-cpufreq-hw", .attr = qcom_cpufreq_hw_attr, + .ready = qcom_cpufreq_ready, }; static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index a1da2b4b6d73..1476156af74b 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1681,8 +1681,10 @@ static void at_xdmac_tasklet(struct tasklet_struct *t) __func__, atchan->irq_status); if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) && - !(atchan->irq_status & error_mask)) + !(atchan->irq_status & error_mask)) { + spin_unlock_irq(&atchan->lock); return; + } if (atchan->irq_status & error_mask) at_xdmac_handle_error(atchan); diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c index 8a6bf291a73f..daafea5bc35d 100644 --- a/drivers/dma/ptdma/ptdma-dev.c +++ b/drivers/dma/ptdma/ptdma-dev.c @@ -207,7 +207,7 @@ int pt_core_init(struct pt_device *pt) if (!cmd_q->qbase) { dev_err(dev, "unable to allocate command queue\n"); ret = -ENOMEM; - goto e_dma_alloc; + goto e_destroy_pool; } cmd_q->qidx = 0; @@ -229,8 +229,10 @@ int pt_core_init(struct pt_device *pt) /* Request an irq */ ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt); - if (ret) - goto e_pool; + if (ret) { + dev_err(dev, "unable to allocate an IRQ\n"); + goto e_free_dma; + } /* Update the device registers with queue information. */ cmd_q->qcontrol &= ~CMD_Q_SIZE; @@ -250,21 +252,20 @@ int pt_core_init(struct pt_device *pt) /* Register the DMA engine support */ ret = pt_dmaengine_register(pt); if (ret) - goto e_dmaengine; + goto e_free_irq; /* Set up debugfs entries */ ptdma_debugfs_setup(pt); return 0; -e_dmaengine: +e_free_irq: free_irq(pt->pt_irq, pt); -e_dma_alloc: +e_free_dma: dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma); -e_pool: - dev_err(dev, "unable to allocate an IRQ\n"); +e_destroy_pool: dma_pool_destroy(pt->cmd_q.dma_pool); return ret; diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 481f45c77ce1..13d12d660cc2 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -1868,8 +1868,13 @@ static int rcar_dmac_probe(struct platform_device *pdev) dmac->dev = &pdev->dev; platform_set_drvdata(pdev, dmac); - dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); - dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); + ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); + if (ret) + return ret; + + ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); + if (ret) + return ret; ret = rcar_dmac_parse_of(&pdev->dev, dmac); if (ret < 0) diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 158e5e7defae..b26ed690f03c 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -115,8 +115,10 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) ret = pm_runtime_get(schan->dev); spin_unlock_irq(&schan->chan_lock); - if (ret < 0) + if (ret < 0) { dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret); + pm_runtime_put(schan->dev); + } pm_runtime_barrier(schan->dev); diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c index a42164389ebc..d5d55732adba 100644 --- a/drivers/dma/stm32-dmamux.c +++ b/drivers/dma/stm32-dmamux.c @@ -292,10 +292,12 @@ static int stm32_dmamux_probe(struct platform_device *pdev) ret = of_dma_router_register(node, stm32_dmamux_route_allocate, &stm32_dmamux->dmarouter); if (ret) - goto err_clk; + goto pm_disable; return 0; +pm_disable: + pm_runtime_disable(&pdev->dev); err_clk: clk_disable_unprepare(stm32_dmamux->clk); diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 9d9aabdec96b..f5677d81bd2d 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -215,7 +215,7 @@ void *edac_align_ptr(void **p, unsigned int size, int n_elems) else return (char *)ptr; - r = (unsigned long)p % align; + r = (unsigned long)ptr % align; if (r == 0) return (char *)ptr; diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index b406b3f78f46..d76bab3aaac4 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -2112,7 +2112,7 @@ static void __exit scmi_driver_exit(void) } module_exit(scmi_driver_exit); -MODULE_ALIAS("platform: arm-scmi"); +MODULE_ALIAS("platform:arm-scmi"); MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); MODULE_DESCRIPTION("ARM SCMI protocol driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/firmware/efi/libstub/riscv-stub.c b/drivers/firmware/efi/libstub/riscv-stub.c index 380e4e251399..9c460843442f 100644 --- a/drivers/firmware/efi/libstub/riscv-stub.c +++ b/drivers/firmware/efi/libstub/riscv-stub.c @@ -25,7 +25,7 @@ typedef void __noreturn (*jump_kernel_func)(unsigned int, unsigned long); static u32 hartid; -static u32 get_boot_hartid_from_fdt(void) +static int get_boot_hartid_from_fdt(void) { const void *fdt; int chosen_node, len; @@ -33,23 +33,26 @@ static u32 get_boot_hartid_from_fdt(void) fdt = get_efi_config_table(DEVICE_TREE_GUID); if (!fdt) - return U32_MAX; + return -EINVAL; chosen_node = fdt_path_offset(fdt, "/chosen"); if (chosen_node < 0) - return U32_MAX; + return -EINVAL; prop = fdt_getprop((void *)fdt, chosen_node, "boot-hartid", &len); if (!prop || len != sizeof(u32)) - return U32_MAX; + return -EINVAL; - return fdt32_to_cpu(*prop); + hartid = fdt32_to_cpu(*prop); + return 0; } efi_status_t check_platform_features(void) { - hartid = get_boot_hartid_from_fdt(); - if (hartid == U32_MAX) { + int ret; + + ret = get_boot_hartid_from_fdt(); + if (ret) { efi_err("/chosen/boot-hartid missing or invalid!\n"); return EFI_UNSUPPORTED; } diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index abdc8a6a3963..cae590bd08f2 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -742,6 +742,7 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, { const struct efivar_operations *ops; efi_status_t status; + unsigned long varsize; if (!__efivars) return -EINVAL; @@ -764,15 +765,17 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, return efivar_entry_set_nonblocking(name, vendor, attributes, size, data); + varsize = size + ucs2_strsize(name, 1024); if (!block) { if (down_trylock(&efivars_lock)) return -EBUSY; + status = check_var_size_nonblocking(attributes, varsize); } else { if (down_interruptible(&efivars_lock)) return -EINTR; + status = check_var_size(attributes, varsize); } - status = check_var_size(attributes, size + ucs2_strsize(name, 1024)); if (status != EFI_SUCCESS) { up(&efivars_lock); return -ENOSPC; diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c index 4a55cdf089d6..e00c33310517 100644 --- a/drivers/gpio/gpio-74x164.c +++ b/drivers/gpio/gpio-74x164.c @@ -163,15 +163,13 @@ exit_destroy: return ret; } -static int gen_74x164_remove(struct spi_device *spi) +static void gen_74x164_remove(struct spi_device *spi) { struct gen_74x164_chip *chip = spi_get_drvdata(spi); gpiod_set_value_cansleep(chip->gpiod_oe, 0); gpiochip_remove(&chip->gpio_chip); mutex_destroy(&chip->lock); - - return 0; } static const struct spi_device_id gen_74x164_spi_ids[] = { diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c index 869dc952cf45..0cb2664085cf 100644 --- a/drivers/gpio/gpio-aggregator.c +++ b/drivers/gpio/gpio-aggregator.c @@ -278,7 +278,8 @@ static int gpio_fwd_get(struct gpio_chip *chip, unsigned int offset) { struct gpiochip_fwd *fwd = gpiochip_get_data(chip); - return gpiod_get_value(fwd->descs[offset]); + return chip->can_sleep ? gpiod_get_value_cansleep(fwd->descs[offset]) + : gpiod_get_value(fwd->descs[offset]); } static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, @@ -293,7 +294,10 @@ static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, for_each_set_bit(i, mask, fwd->chip.ngpio) descs[j++] = fwd->descs[i]; - error = gpiod_get_array_value(j, descs, NULL, values); + if (fwd->chip.can_sleep) + error = gpiod_get_array_value_cansleep(j, descs, NULL, values); + else + error = gpiod_get_array_value(j, descs, NULL, values); if (error) return error; @@ -328,7 +332,10 @@ static void gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value) { struct gpiochip_fwd *fwd = gpiochip_get_data(chip); - gpiod_set_value(fwd->descs[offset], value); + if (chip->can_sleep) + gpiod_set_value_cansleep(fwd->descs[offset], value); + else + gpiod_set_value(fwd->descs[offset], value); } static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, @@ -343,7 +350,10 @@ static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, descs[j++] = fwd->descs[i]; } - gpiod_set_array_value(j, descs, NULL, values); + if (fwd->chip.can_sleep) + gpiod_set_array_value_cansleep(j, descs, NULL, values); + else + gpiod_set_array_value(j, descs, NULL, values); } static void gpio_fwd_set_multiple_locked(struct gpio_chip *chip, diff --git a/drivers/gpio/gpio-max3191x.c b/drivers/gpio/gpio-max3191x.c index 51cd6f98d1c7..161c4751c5f7 100644 --- a/drivers/gpio/gpio-max3191x.c +++ b/drivers/gpio/gpio-max3191x.c @@ -443,14 +443,12 @@ static int max3191x_probe(struct spi_device *spi) return 0; } -static int max3191x_remove(struct spi_device *spi) +static void max3191x_remove(struct spi_device *spi) { struct max3191x_chip *max3191x = spi_get_drvdata(spi); gpiochip_remove(&max3191x->gpio); mutex_destroy(&max3191x->lock); - - return 0; } static int __init max3191x_register_driver(struct spi_driver *sdrv) diff --git a/drivers/gpio/gpio-max7301.c b/drivers/gpio/gpio-max7301.c index 5862d73bf325..11813f41d460 100644 --- a/drivers/gpio/gpio-max7301.c +++ b/drivers/gpio/gpio-max7301.c @@ -64,11 +64,9 @@ static int max7301_probe(struct spi_device *spi) return ret; } -static int max7301_remove(struct spi_device *spi) +static void max7301_remove(struct spi_device *spi) { __max730x_remove(&spi->dev); - - return 0; } static const struct spi_device_id max7301_id[] = { diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c index 31d2be1bebc8..cd9b16dbe1a9 100644 --- a/drivers/gpio/gpio-mc33880.c +++ b/drivers/gpio/gpio-mc33880.c @@ -134,7 +134,7 @@ exit_destroy: return ret; } -static int mc33880_remove(struct spi_device *spi) +static void mc33880_remove(struct spi_device *spi) { struct mc33880 *mc; @@ -142,8 +142,6 @@ static int mc33880_remove(struct spi_device *spi) gpiochip_remove(&mc->chip); mutex_destroy(&mc->lock); - - return 0; } static struct spi_driver mc33880_driver = { diff --git a/drivers/gpio/gpio-pisosr.c b/drivers/gpio/gpio-pisosr.c index 8e04054cf07e..81a47ae09ff8 100644 --- a/drivers/gpio/gpio-pisosr.c +++ b/drivers/gpio/gpio-pisosr.c @@ -163,15 +163,13 @@ static int pisosr_gpio_probe(struct spi_device *spi) return 0; } -static int pisosr_gpio_remove(struct spi_device *spi) +static void pisosr_gpio_remove(struct spi_device *spi) { struct pisosr_gpio *gpio = spi_get_drvdata(spi); gpiochip_remove(&gpio->chip); mutex_destroy(&gpio->lock); - - return 0; } static const struct spi_device_id pisosr_gpio_id_table[] = { diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c index a4c4e4584f5b..099e358d2491 100644 --- a/drivers/gpio/gpio-rockchip.c +++ b/drivers/gpio/gpio-rockchip.c @@ -410,10 +410,8 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type) level = rockchip_gpio_readl(bank, bank->gpio_regs->int_type); polarity = rockchip_gpio_readl(bank, bank->gpio_regs->int_polarity); - switch (type) { - case IRQ_TYPE_EDGE_BOTH: + if (type == IRQ_TYPE_EDGE_BOTH) { if (bank->gpio_type == GPIO_TYPE_V2) { - bank->toggle_edge_mode &= ~mask; rockchip_gpio_writel_bit(bank, d->hwirq, 1, bank->gpio_regs->int_bothedge); goto out; @@ -431,30 +429,34 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type) else polarity |= mask; } - break; - case IRQ_TYPE_EDGE_RISING: - bank->toggle_edge_mode &= ~mask; - level |= mask; - polarity |= mask; - break; - case IRQ_TYPE_EDGE_FALLING: - bank->toggle_edge_mode &= ~mask; - level |= mask; - polarity &= ~mask; - break; - case IRQ_TYPE_LEVEL_HIGH: - bank->toggle_edge_mode &= ~mask; - level &= ~mask; - polarity |= mask; - break; - case IRQ_TYPE_LEVEL_LOW: - bank->toggle_edge_mode &= ~mask; - level &= ~mask; - polarity &= ~mask; - break; - default: - ret = -EINVAL; - goto out; + } else { + if (bank->gpio_type == GPIO_TYPE_V2) { + rockchip_gpio_writel_bit(bank, d->hwirq, 0, + bank->gpio_regs->int_bothedge); + } else { + bank->toggle_edge_mode &= ~mask; + } + switch (type) { + case IRQ_TYPE_EDGE_RISING: + level |= mask; + polarity |= mask; + break; + case IRQ_TYPE_EDGE_FALLING: + level |= mask; + polarity &= ~mask; + break; + case IRQ_TYPE_LEVEL_HIGH: + level &= ~mask; + polarity |= mask; + break; + case IRQ_TYPE_LEVEL_LOW: + level &= ~mask; + polarity &= ~mask; + break; + default: + ret = -EINVAL; + goto out; + } } rockchip_gpio_writel(bank, level, bank->gpio_regs->int_type); diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c index 403f9e833d6a..7d82388b4ab7 100644 --- a/drivers/gpio/gpio-sifive.c +++ b/drivers/gpio/gpio-sifive.c @@ -223,7 +223,7 @@ static int sifive_gpio_probe(struct platform_device *pdev) NULL, chip->base + SIFIVE_GPIO_OUTPUT_EN, chip->base + SIFIVE_GPIO_INPUT_EN, - 0); + BGPIOF_READ_OUTPUT_REG_SET); if (ret) { dev_err(dev, "unable to init generic GPIO\n"); return ret; diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c index 04b137eca8da..8e5d87984a48 100644 --- a/drivers/gpio/gpio-sim.c +++ b/drivers/gpio/gpio-sim.c @@ -547,7 +547,7 @@ struct gpio_sim_bank { * * So we need to store the pointer to the parent struct here. We can * dereference it anywhere we need with no checks and no locking as - * it's guaranteed to survive the childred and protected by configfs + * it's guaranteed to survive the children and protected by configfs * locks. * * Same for other structures. @@ -570,6 +570,11 @@ static struct gpio_sim_bank *to_gpio_sim_bank(struct config_item *item) return container_of(group, struct gpio_sim_bank, group); } +static bool gpio_sim_bank_has_label(struct gpio_sim_bank *bank) +{ + return bank->label && *bank->label; +} + static struct gpio_sim_device * gpio_sim_bank_get_device(struct gpio_sim_bank *bank) { @@ -770,9 +775,15 @@ static int gpio_sim_add_hogs(struct gpio_sim_device *dev) * point the device doesn't exist yet and so dev_name() * is not available. */ - hog->chip_label = kasprintf(GFP_KERNEL, - "gpio-sim.%u-%s", dev->id, - fwnode_get_name(bank->swnode)); + if (gpio_sim_bank_has_label(bank)) + hog->chip_label = kstrdup(bank->label, + GFP_KERNEL); + else + hog->chip_label = kasprintf(GFP_KERNEL, + "gpio-sim.%u-%s", + dev->id, + fwnode_get_name( + bank->swnode)); if (!hog->chip_label) { gpio_sim_remove_hogs(dev); return -ENOMEM; @@ -816,7 +827,7 @@ gpio_sim_make_bank_swnode(struct gpio_sim_bank *bank, properties[prop_idx++] = PROPERTY_ENTRY_U32("ngpios", bank->num_lines); - if (bank->label && (strlen(bank->label) > 0)) + if (gpio_sim_bank_has_label(bank)) properties[prop_idx++] = PROPERTY_ENTRY_STRING("gpio-sim,label", bank->label); @@ -1311,7 +1322,7 @@ static void gpio_sim_hog_config_item_release(struct config_item *item) kfree(hog); } -struct configfs_item_operations gpio_sim_hog_config_item_ops = { +static struct configfs_item_operations gpio_sim_hog_config_item_ops = { .release = gpio_sim_hog_config_item_release, }; diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c index 34b36a8c035f..031fe105b58e 100644 --- a/drivers/gpio/gpio-tegra186.c +++ b/drivers/gpio/gpio-tegra186.c @@ -343,9 +343,12 @@ static int tegra186_gpio_of_xlate(struct gpio_chip *chip, return offset + pin; } +#define to_tegra_gpio(x) container_of((x), struct tegra_gpio, gpio) + static void tegra186_irq_ack(struct irq_data *data) { - struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; base = tegra186_gpio_get_base(gpio, data->hwirq); @@ -357,7 +360,8 @@ static void tegra186_irq_ack(struct irq_data *data) static void tegra186_irq_mask(struct irq_data *data) { - struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; u32 value; @@ -372,7 +376,8 @@ static void tegra186_irq_mask(struct irq_data *data) static void tegra186_irq_unmask(struct irq_data *data) { - struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; u32 value; @@ -387,7 +392,8 @@ static void tegra186_irq_unmask(struct irq_data *data) static int tegra186_irq_set_type(struct irq_data *data, unsigned int type) { - struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; u32 value; @@ -1069,6 +1075,7 @@ static const struct tegra_gpio_soc tegra241_main_soc = { .ports = tegra241_main_ports, .name = "tegra241-gpio", .instance = 0, + .num_irqs_per_bank = 8, }; #define TEGRA241_AON_GPIO_PORT(_name, _bank, _port, _pins) \ @@ -1089,6 +1096,7 @@ static const struct tegra_gpio_soc tegra241_aon_soc = { .ports = tegra241_aon_ports, .name = "tegra241-gpio-aon", .instance = 1, + .num_irqs_per_bank = 8, }; static const struct of_device_id tegra186_gpio_of_match[] = { diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c index d885032cf814..d918d2df4de2 100644 --- a/drivers/gpio/gpio-ts4900.c +++ b/drivers/gpio/gpio-ts4900.c @@ -1,7 +1,7 @@ /* * Digital I/O driver for Technologic Systems I2C FPGA Core * - * Copyright (C) 2015 Technologic Systems + * Copyright (C) 2015, 2018 Technologic Systems * Copyright (C) 2016 Savoir-Faire Linux * * This program is free software; you can redistribute it and/or @@ -55,19 +55,33 @@ static int ts4900_gpio_direction_input(struct gpio_chip *chip, { struct ts4900_gpio_priv *priv = gpiochip_get_data(chip); - /* - * This will clear the output enable bit, the other bits are - * dontcare when this is cleared + /* Only clear the OE bit here, requires a RMW. Prevents potential issue + * with OE and data getting to the physical pin at different times. */ - return regmap_write(priv->regmap, offset, 0); + return regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OE, 0); } static int ts4900_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) { struct ts4900_gpio_priv *priv = gpiochip_get_data(chip); + unsigned int reg; int ret; + /* If changing from an input to an output, we need to first set the + * proper data bit to what is requested and then set OE bit. This + * prevents a glitch that can occur on the IO line + */ + regmap_read(priv->regmap, offset, ®); + if (!(reg & TS4900_GPIO_OE)) { + if (value) + reg = TS4900_GPIO_OUT; + else + reg &= ~TS4900_GPIO_OUT; + + regmap_write(priv->regmap, offset, reg); + } + if (value) ret = regmap_write(priv->regmap, offset, TS4900_GPIO_OE | TS4900_GPIO_OUT); diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index c0f6a25c3279..a5495ad31c9c 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -307,7 +307,8 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip, if (IS_ERR(desc)) return desc; - ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout); + /* ACPI uses hundredths of milliseconds units */ + ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout * 10); if (ret) dev_warn(chip->parent, "Failed to set debounce-timeout for pin 0x%04X, err %d\n", @@ -1035,7 +1036,8 @@ int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int ind if (ret < 0) return ret; - ret = gpio_set_debounce_timeout(desc, info.debounce); + /* ACPI uses hundredths of milliseconds units */ + ret = gpio_set_debounce_timeout(desc, info.debounce * 10); if (ret) return ret; diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c index c7b5446d01fd..ffa0256cad5a 100644 --- a/drivers/gpio/gpiolib-cdev.c +++ b/drivers/gpio/gpiolib-cdev.c @@ -330,7 +330,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) goto out_free_lh; } - ret = gpiod_request(desc, lh->label); + ret = gpiod_request_user(desc, lh->label); if (ret) goto out_free_lh; lh->descs[i] = desc; @@ -1378,7 +1378,7 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip) goto out_free_linereq; } - ret = gpiod_request(desc, lr->label); + ret = gpiod_request_user(desc, lr->label); if (ret) goto out_free_linereq; @@ -1764,7 +1764,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) } } - ret = gpiod_request(desc, le->label); + ret = gpiod_request_user(desc, le->label); if (ret) goto out_free_le; le->desc = desc; diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c index 4098bc7f88b7..44c1ad51b3fe 100644 --- a/drivers/gpio/gpiolib-sysfs.c +++ b/drivers/gpio/gpiolib-sysfs.c @@ -475,12 +475,9 @@ static ssize_t export_store(struct class *class, * they may be undone on its behalf too. */ - status = gpiod_request(desc, "sysfs"); - if (status) { - if (status == -EPROBE_DEFER) - status = -ENODEV; + status = gpiod_request_user(desc, "sysfs"); + if (status) goto done; - } status = gpiod_set_transitory(desc, false); if (!status) { diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 3859911b61e9..defb7c464b87 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1701,11 +1701,6 @@ static inline void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gc) */ int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset) { -#ifdef CONFIG_PINCTRL - if (list_empty(&gc->gpiodev->pin_ranges)) - return 0; -#endif - return pinctrl_gpio_request(gc->gpiodev->base + offset); } EXPORT_SYMBOL_GPL(gpiochip_generic_request); @@ -1717,11 +1712,6 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_request); */ void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset) { -#ifdef CONFIG_PINCTRL - if (list_empty(&gc->gpiodev->pin_ranges)) - return; -#endif - pinctrl_gpio_free(gc->gpiodev->base + offset); } EXPORT_SYMBOL_GPL(gpiochip_generic_free); @@ -2227,6 +2217,16 @@ static int gpio_set_bias(struct gpio_desc *desc) return gpio_set_config_with_argument_optional(desc, bias, arg); } +/** + * gpio_set_debounce_timeout() - Set debounce timeout + * @desc: GPIO descriptor to set the debounce timeout + * @debounce: Debounce timeout in microseconds + * + * The function calls the certain GPIO driver to set debounce timeout + * in the hardware. + * + * Returns 0 on success, or negative error code otherwise. + */ int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce) { return gpio_set_config_with_argument_optional(desc, @@ -3147,6 +3147,16 @@ int gpiod_to_irq(const struct gpio_desc *desc) return retirq; } +#ifdef CONFIG_GPIOLIB_IRQCHIP + if (gc->irq.chip) { + /* + * Avoid race condition with other code, which tries to lookup + * an IRQ before the irqchip has been properly registered, + * i.e. while gpiochip is still being brought up. + */ + return -EPROBE_DEFER; + } +#endif return -ENXIO; } EXPORT_SYMBOL_GPL(gpiod_to_irq); diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index 30bc3f80f83e..c31f4626915d 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -135,6 +135,18 @@ struct gpio_desc { int gpiod_request(struct gpio_desc *desc, const char *label); void gpiod_free(struct gpio_desc *desc); + +static inline int gpiod_request_user(struct gpio_desc *desc, const char *label) +{ + int ret; + + ret = gpiod_request(desc, label); + if (ret == -EPROBE_DEFER) + ret = -ENODEV; + + return ret; +} + int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id, unsigned long lflags, enum gpiod_flags dflags); int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 82011e75ed85..c4387b38229c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -1141,7 +1141,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev, if (ret) return ret; - if (!dev->mode_config.allow_fb_modifiers) { + if (!dev->mode_config.allow_fb_modifiers && !adev->enable_virtual_display) { drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI, "GFX9+ requires FB check based on format modifier\n"); ret = check_tiling_flags_gfx6(rfb); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 63a089992645..0ead08ba58c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2011,6 +2011,9 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, return -ENODEV; } + if (amdgpu_aspm == -1 && !pcie_aspm_enabled(pdev)) + amdgpu_aspm = 0; + if (amdgpu_virtual_display || amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK)) supports_atomic = true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index d99c8779b51e..5224d9a39737 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -391,7 +391,6 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev, int index) { struct drm_plane *plane; - uint64_t modifiers[] = {DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID}; int ret; plane = kzalloc(sizeof(*plane), GFP_KERNEL); @@ -402,7 +401,7 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev, &amdgpu_vkms_plane_funcs, amdgpu_vkms_formats, ARRAY_SIZE(amdgpu_vkms_formats), - modifiers, type, NULL); + NULL, type, NULL); if (ret) { kfree(plane); return ERR_PTR(ret); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b37fc7d7d2c7..418341a67517 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -768,11 +768,17 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, * Check if all VM PDs/PTs are ready for updates * * Returns: - * True if eviction list is empty. + * True if VM is not evicting. */ bool amdgpu_vm_ready(struct amdgpu_vm *vm) { - return list_empty(&vm->evicted); + bool ret; + + amdgpu_vm_eviction_lock(vm); + ret = !vm->evicting; + amdgpu_vm_eviction_unlock(vm); + + return ret && list_empty(&vm->evicted); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c index b4eddf6e98a6..ff738e9725ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c @@ -543,7 +543,9 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev) adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines); - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3)) { + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 3): /* Get SA disabled bitmap from eFuse setting */ efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SA_UNIT_DISABLE); efuse_setting &= CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK; @@ -566,6 +568,9 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev) disabled_sa = tmp; WREG32_SOC15(GC, 0, mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP, disabled_sa); + break; + default: + break; } } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index e8e4749e9c79..f0638db57111 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -2057,6 +2057,10 @@ static int sdma_v4_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* SMU saves SDMA state for us */ + if (adev->in_s0ix) + return 0; + return sdma_v4_0_hw_fini(adev); } @@ -2064,6 +2068,10 @@ static int sdma_v4_0_resume(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* SMU restores SDMA state for us */ + if (adev->in_s0ix) + return 0; + return sdma_v4_0_hw_init(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 0fc1747e4a70..12f80fdc1fbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -619,8 +619,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev) static int soc15_asic_reset(struct amdgpu_device *adev) { /* original raven doesn't have full asic reset */ - if ((adev->apu_flags & AMD_APU_IS_RAVEN) && - !(adev->apu_flags & AMD_APU_IS_RAVEN2)) + if ((adev->apu_flags & AMD_APU_IS_RAVEN) || + (adev->apu_flags & AMD_APU_IS_RAVEN2)) return 0; switch (soc15_asic_reset_method(adev)) { @@ -1114,8 +1114,11 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_VCN_MGCG; + /* + * MMHUB PG needs to be disabled for Picasso for + * stability reasons. + */ adev->pg_flags = AMD_PG_SUPPORT_SDMA | - AMD_PG_SUPPORT_MMHUB | AMD_PG_SUPPORT_VCN; } else { adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 7f9773f8dab6..075429bea427 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3653,7 +3653,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) /* Use GRPH_PFLIP interrupt */ for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; - i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1; + i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1; i++) { r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq); if (r) { @@ -4256,6 +4256,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } #endif + /* Disable vblank IRQs aggressively for power-saving. */ + adev_to_drm(adev)->vblank_disable_immediate = true; + /* loops over all connectors on the board */ for (i = 0; i < link_cnt; i++) { struct dc_link *link = NULL; @@ -4301,19 +4304,17 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) update_connector_ext_caps(aconnector); if (psr_feature_enabled) amdgpu_dm_set_psr_caps(link); + + /* TODO: Fix vblank control helpers to delay PSR entry to allow this when + * PSR is also supported. + */ + if (link->psr_settings.psr_feature_enabled) + adev_to_drm(adev)->vblank_disable_immediate = false; } } - /* - * Disable vblank IRQs aggressively for power-saving. - * - * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR - * is also supported. - */ - adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled; - /* Software is initialized. Now we can register interrupt handlers. */ switch (adev->asic_type) { #if defined(CONFIG_DRM_AMD_DC_SI) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index f977f29907df..10c7be40dfb0 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -473,8 +473,10 @@ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) clk_mgr_base->bw_params->dc_mode_softmax_memclk = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK); /* Refresh bounding box */ + DC_FP_START(); clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box( clk_mgr->base.ctx->dc, clk_mgr_base->bw_params); + DC_FP_END(); } static bool dcn3_is_smu_present(struct clk_mgr *clk_mgr_base) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c index a1011f3273f3..de3f4643eeef 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c @@ -120,7 +120,11 @@ static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000); if (result == VBIOSSMC_Result_Failed) { - ASSERT(0); + if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu && + param == TABLE_WATERMARKS) + DC_LOG_WARNING("Watermarks table not configured properly by SMU"); + else + ASSERT(0); REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK); return -1; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 6f5528d34093..ba1aa994db4b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -985,10 +985,13 @@ static bool dc_construct(struct dc *dc, goto fail; #ifdef CONFIG_DRM_AMD_DC_DCN dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present; -#endif - if (dc->res_pool->funcs->update_bw_bounding_box) + if (dc->res_pool->funcs->update_bw_bounding_box) { + DC_FP_START(); dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); + DC_FP_END(); + } +#endif /* Creation of current_state must occur after dc->dml * is initialized in dc_create_resource_pool because @@ -1220,6 +1223,8 @@ struct dc *dc_create(const struct dc_init_data *init_params) dc->caps.max_dp_protocol_version = DP_VERSION_1_4; + dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator; + if (dc->res_pool->dmcu != NULL) dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index b3912ff9dc91..18757c158523 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1964,10 +1964,6 @@ enum dc_status dc_remove_stream_from_ctx( dc->res_pool, del_pipe->stream_res.stream_enc, false); - /* Release link encoder from stream in new dc_state. */ - if (dc->res_pool->funcs->link_enc_unassign) - dc->res_pool->funcs->link_enc_unassign(new_ctx, del_pipe->stream); - #if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(del_pipe)) { update_hpo_dp_stream_engine_usage( diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 288e7b01f561..b51864890621 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -202,6 +202,7 @@ struct dc_caps { bool edp_dsc_support; bool vbios_lttpr_aware; bool vbios_lttpr_enable; + uint32_t max_otg_num; }; struct dc_bug_wa { diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 26ec69bb5db9..eb2755bdb30e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1834,9 +1834,29 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) break; } } - // We are trying to enable eDP, don't power down VDD - if (can_apply_edp_fast_boot) + + /* + * TO-DO: So far the code logic below only addresses single eDP case. + * For dual eDP case, there are a few things that need to be + * implemented first: + * + * 1. Change the fastboot logic above, so eDP link[0 or 1]'s + * stream[0 or 1] will all be checked. + * + * 2. Change keep_edp_vdd_on to an array, and maintain keep_edp_vdd_on + * for each eDP. + * + * Once above 2 things are completed, we can then change the logic below + * correspondingly, so dual eDP case will be fully covered. + */ + + // We are trying to enable eDP, don't power down VDD if eDP stream is existing + if ((edp_stream_num == 1 && edp_streams[0] != NULL) || can_apply_edp_fast_boot) { keep_edp_vdd_on = true; + DC_LOG_EVENT_LINK_TRAINING("Keep eDP Vdd on\n"); + } else { + DC_LOG_EVENT_LINK_TRAINING("No eDP stream enabled, turn eDP Vdd off\n"); + } } // Check seamless boot support diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 2bc93df023ad..2a72517e2b28 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -1069,7 +1069,7 @@ static const struct dc_debug_options debug_defaults_drv = { .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, - .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c index 90c73a1cb986..5e3bcaf12cac 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c @@ -138,8 +138,11 @@ static uint32_t convert_and_clamp( ret_val = wm_ns * refclk_mhz; ret_val /= 1000; - if (ret_val > clamp_value) + if (ret_val > clamp_value) { + /* clamping WMs is abnormal, unexpected and may lead to underflow*/ + ASSERT(0); ret_val = clamp_value; + } return ret_val; } @@ -159,7 +162,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) { hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); @@ -193,7 +196,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) { hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value); } else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns) @@ -203,7 +206,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) { hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); @@ -237,7 +240,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) { hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value); } else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns) @@ -247,7 +250,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) { hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); @@ -281,7 +284,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) { hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value); } else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns) @@ -291,7 +294,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) { hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); @@ -325,7 +328,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) { hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value); } else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns) @@ -351,7 +354,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" @@ -367,7 +370,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->a.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" @@ -383,7 +386,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n" @@ -399,7 +402,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->a.cstate_pstate.cstate_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n" @@ -416,7 +419,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" @@ -432,7 +435,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->b.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" @@ -448,7 +451,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n" @@ -464,7 +467,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->b.cstate_pstate.cstate_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n" @@ -481,7 +484,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n" @@ -497,7 +500,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->c.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" @@ -513,7 +516,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n" @@ -529,7 +532,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->c.cstate_pstate.cstate_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n" @@ -546,7 +549,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n" @@ -562,7 +565,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->d.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" @@ -578,7 +581,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n" @@ -594,7 +597,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->d.cstate_pstate.cstate_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n" @@ -625,7 +628,7 @@ static bool hubbub31_program_pstate_watermarks( watermarks->a.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.pstate_change_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" @@ -642,7 +645,7 @@ static bool hubbub31_program_pstate_watermarks( watermarks->b.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.pstate_change_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" @@ -659,7 +662,7 @@ static bool hubbub31_program_pstate_watermarks( watermarks->c.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.pstate_change_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n" @@ -676,7 +679,7 @@ static bool hubbub31_program_pstate_watermarks( watermarks->d.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.pstate_change_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index e2cae97f4ff1..48cc009d9bdf 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -3462,8 +3462,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr || attr == &sensor_dev_attr_power2_cap.dev_attr.attr || attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr || - attr == &sensor_dev_attr_power2_label.dev_attr.attr || - attr == &sensor_dev_attr_power1_label.dev_attr.attr)) + attr == &sensor_dev_attr_power2_label.dev_attr.attr)) return 0; return effective_mode; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index a4207293158c..5488a0edb942 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -421,6 +421,36 @@ static int sienna_cichlid_store_powerplay_table(struct smu_context *smu) return 0; } +static int sienna_cichlid_patch_pptable_quirk(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t *board_reserved; + uint16_t *freq_table_gfx; + uint32_t i; + + /* Fix some OEM SKU specific stability issues */ + GET_PPTABLE_MEMBER(BoardReserved, &board_reserved); + if ((adev->pdev->device == 0x73DF) && + (adev->pdev->revision == 0XC3) && + (adev->pdev->subsystem_device == 0x16C2) && + (adev->pdev->subsystem_vendor == 0x1043)) + board_reserved[0] = 1387; + + GET_PPTABLE_MEMBER(FreqTableGfx, &freq_table_gfx); + if ((adev->pdev->device == 0x73DF) && + (adev->pdev->revision == 0XC3) && + ((adev->pdev->subsystem_device == 0x16C2) || + (adev->pdev->subsystem_device == 0x133C)) && + (adev->pdev->subsystem_vendor == 0x1043)) { + for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) { + if (freq_table_gfx[i] > 2500) + freq_table_gfx[i] = 2500; + } + } + + return 0; +} + static int sienna_cichlid_setup_pptable(struct smu_context *smu) { int ret = 0; @@ -441,7 +471,7 @@ static int sienna_cichlid_setup_pptable(struct smu_context *smu) if (ret) return ret; - return ret; + return sienna_cichlid_patch_pptable_quirk(smu); } static int sienna_cichlid_tables_init(struct smu_context *smu) @@ -1238,21 +1268,37 @@ static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu) &dpm_context->dpm_tables.soc_table; struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; + struct amdgpu_device *adev = smu->adev; pstate_table->gfxclk_pstate.min = gfx_table->min; pstate_table->gfxclk_pstate.peak = gfx_table->max; - if (gfx_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK) - pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK; pstate_table->uclk_pstate.min = mem_table->min; pstate_table->uclk_pstate.peak = mem_table->max; - if (mem_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK) - pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK; pstate_table->socclk_pstate.min = soc_table->min; pstate_table->socclk_pstate.peak = soc_table->max; - if (soc_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK) + + switch (adev->asic_type) { + case CHIP_SIENNA_CICHLID: + case CHIP_NAVY_FLOUNDER: + pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK; + pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK; pstate_table->socclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK; + break; + case CHIP_DIMGREY_CAVEFISH: + pstate_table->gfxclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK; + pstate_table->uclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK; + pstate_table->socclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK; + break; + case CHIP_BEIGE_GOBY: + pstate_table->gfxclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK; + pstate_table->uclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK; + pstate_table->socclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK; + break; + default: + break; + } return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h index 38cd0ece24f6..42f705c7a36f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h @@ -33,6 +33,14 @@ typedef enum { #define SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK 960 #define SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK 1000 +#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK 1950 +#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK 960 +#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK 676 + +#define BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK 2200 +#define BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK 960 +#define BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK 1000 + extern void sienna_cichlid_set_ppt_funcs(struct smu_context *smu); #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index caf1775d48ef..0bc84b709a93 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -282,14 +282,9 @@ static int yellow_carp_post_smu_init(struct smu_context *smu) static int yellow_carp_mode_reset(struct smu_context *smu, int type) { - int ret = 0, index = 0; - - index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, - SMU_MSG_GfxDeviceDriverReset); - if (index < 0) - return index == -EACCES ? 0 : index; + int ret = 0; - ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, type, NULL); if (ret) dev_err(smu->adev->dev, "Failed to mode reset!\n"); diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig index 58a242871b28..6e3f1d600541 100644 --- a/drivers/gpu/drm/arm/Kconfig +++ b/drivers/gpu/drm/arm/Kconfig @@ -6,6 +6,7 @@ config DRM_HDLCD depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST) depends on COMMON_CLK select DRM_KMS_HELPER + select DRM_GEM_CMA_HELPER help Choose this option if you have an ARM High Definition Colour LCD controller. diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c index a7389a0facfb..af07eeb47ca0 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.c +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -7,6 +7,7 @@ */ #include <linux/bitfield.h> +#include <linux/bits.h> #include <linux/clk.h> #include <linux/irq.h> #include <linux/math64.h> @@ -196,12 +197,9 @@ static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps) /* * ui2bc - UI time periods to byte clock cycles */ -static u32 ui2bc(struct nwl_dsi *dsi, unsigned long long ui) +static u32 ui2bc(unsigned int ui) { - u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); - - return DIV64_U64_ROUND_UP(ui * dsi->lanes, - dsi->mode.clock * 1000 * bpp); + return DIV_ROUND_UP(ui, BITS_PER_BYTE); } /* @@ -232,12 +230,12 @@ static int nwl_dsi_config_host(struct nwl_dsi *dsi) } /* values in byte clock cycles */ - cycles = ui2bc(dsi, cfg->clk_pre); + cycles = ui2bc(cfg->clk_pre); DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_pre: 0x%x\n", cycles); nwl_dsi_write(dsi, NWL_DSI_CFG_T_PRE, cycles); cycles = ps2bc(dsi, cfg->lpx + cfg->clk_prepare + cfg->clk_zero); DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap (pre): 0x%x\n", cycles); - cycles += ui2bc(dsi, cfg->clk_pre); + cycles += ui2bc(cfg->clk_pre); DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_post: 0x%x\n", cycles); nwl_dsi_write(dsi, NWL_DSI_CFG_T_POST, cycles); cycles = ps2bc(dsi, cfg->hs_exit); diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index dab8f76618f3..68d8415e6c28 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -1802,6 +1802,7 @@ static inline void ti_sn_gpio_unregister(void) {} static void ti_sn65dsi86_runtime_disable(void *data) { + pm_runtime_dont_use_autosuspend(data); pm_runtime_disable(data); } @@ -1861,11 +1862,11 @@ static int ti_sn65dsi86_probe(struct i2c_client *client, "failed to get reference clock\n"); pm_runtime_enable(dev); + pm_runtime_set_autosuspend_delay(pdata->dev, 500); + pm_runtime_use_autosuspend(pdata->dev); ret = devm_add_action_or_reset(dev, ti_sn65dsi86_runtime_disable, dev); if (ret) return ret; - pm_runtime_set_autosuspend_delay(pdata->dev, 500); - pm_runtime_use_autosuspend(pdata->dev); ti_sn65dsi86_debugfs_init(pdata); diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 9781722519c3..54d62fdb4ef9 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -76,15 +76,17 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, state->mode_blob = NULL; if (mode) { + struct drm_property_blob *blob; + drm_mode_convert_to_umode(&umode, mode); - state->mode_blob = - drm_property_create_blob(state->crtc->dev, - sizeof(umode), - &umode); - if (IS_ERR(state->mode_blob)) - return PTR_ERR(state->mode_blob); + blob = drm_property_create_blob(crtc->dev, + sizeof(umode), &umode); + if (IS_ERR(blob)) + return PTR_ERR(blob); drm_mode_copy(&state->mode, mode); + + state->mode_blob = blob; state->enable = true; drm_dbg_atomic(crtc->dev, "Set [MODE:%s] for [CRTC:%d:%s] state %p\n", diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index a50c82bc2b2f..76a8c707c34b 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -2330,6 +2330,9 @@ EXPORT_SYMBOL(drm_connector_atomic_hdr_metadata_equal); void drm_connector_set_vrr_capable_property( struct drm_connector *connector, bool capable) { + if (!connector->vrr_capable_property) + return; + drm_object_property_set_value(&connector->base, connector->vrr_capable_property, capable); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 12893e7be89b..f5f5de362ff2 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -5345,6 +5345,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi if (!(edid->input & DRM_EDID_INPUT_DIGITAL)) return quirks; + info->color_formats |= DRM_COLOR_FORMAT_RGB444; drm_parse_cea_ext(connector, edid); /* @@ -5393,7 +5394,6 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n", connector->name, info->bpc); - info->color_formats |= DRM_COLOR_FORMAT_RGB444; if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444) info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422) diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index cefd0cbf9deb..dc275c466c9c 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -512,6 +512,7 @@ int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct * */ vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); vma->vm_flags &= ~VM_PFNMAP; + vma->vm_flags |= VM_DONTEXPAND; if (cma_obj->map_noncoherent) { vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); diff --git a/drivers/gpu/drm/drm_privacy_screen.c b/drivers/gpu/drm/drm_privacy_screen.c index beaf99e9120a..b688841c18e4 100644 --- a/drivers/gpu/drm/drm_privacy_screen.c +++ b/drivers/gpu/drm/drm_privacy_screen.c @@ -269,7 +269,7 @@ EXPORT_SYMBOL(drm_privacy_screen_get_state); * * The notifier is called with no locks held. The new hw_state and sw_state * can be retrieved using the drm_privacy_screen_get_state() function. - * A pointer to the drm_privacy_screen's struct is passed as the void *data + * A pointer to the drm_privacy_screen's struct is passed as the ``void *data`` * argument of the notifier_block's notifier_call. * * The notifier will NOT be called when changes are made through diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index 12571ac45540..c04264f70ad1 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -678,7 +678,6 @@ static int decon_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct decon_context *ctx; struct device_node *i80_if_timings; - struct resource *res; int ret; if (!dev->of_node) @@ -728,16 +727,11 @@ static int decon_probe(struct platform_device *pdev) goto err_iounmap; } - res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, - ctx->i80_if ? "lcd_sys" : "vsync"); - if (!res) { - dev_err(dev, "irq request failed.\n"); - ret = -ENXIO; + ret = platform_get_irq_byname(pdev, ctx->i80_if ? "lcd_sys" : "vsync"); + if (ret < 0) goto err_iounmap; - } - ret = devm_request_irq(dev, res->start, decon_irq_handler, - 0, "drm_decon", ctx); + ret = devm_request_irq(dev, ret, decon_irq_handler, 0, "drm_decon", ctx); if (ret) { dev_err(dev, "irq request failed.\n"); goto err_iounmap; diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 32a36572b894..d13f5e3a030d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1334,8 +1334,10 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi, int ret; int te_gpio_irq; - dsi->te_gpio = devm_gpiod_get_optional(dsi->dev, "te", GPIOD_IN); - if (IS_ERR(dsi->te_gpio)) { + dsi->te_gpio = gpiod_get_optional(panel, "te", GPIOD_IN); + if (!dsi->te_gpio) { + return 0; + } else if (IS_ERR(dsi->te_gpio)) { dev_err(dsi->dev, "gpio request failed with %ld\n", PTR_ERR(dsi->te_gpio)); return PTR_ERR(dsi->te_gpio); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 023f54ee61a8..0ee32e4b1e43 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -1267,7 +1267,6 @@ static int fimc_probe(struct platform_device *pdev) struct exynos_drm_ipp_formats *formats; struct device *dev = &pdev->dev; struct fimc_context *ctx; - struct resource *res; int ret; int i, j, num_limits, num_formats; @@ -1330,14 +1329,12 @@ static int fimc_probe(struct platform_device *pdev) return PTR_ERR(ctx->regs); /* resource irq */ - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res) { - dev_err(dev, "failed to request irq resource.\n"); - return -ENOENT; - } + ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; - ret = devm_request_irq(dev, res->start, fimc_irq_handler, - 0, dev_name(dev), ctx); + ret = devm_request_irq(dev, ret, fimc_irq_handler, + 0, dev_name(dev), ctx); if (ret < 0) { dev_err(dev, "failed to request irq.\n"); return ret; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index c735e53939d8..7d5a483a54de 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -1133,7 +1133,6 @@ static int fimd_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct fimd_context *ctx; struct device_node *i80_if_timings; - struct resource *res; int ret; if (!dev->of_node) @@ -1206,15 +1205,11 @@ static int fimd_probe(struct platform_device *pdev) if (IS_ERR(ctx->regs)) return PTR_ERR(ctx->regs); - res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, - ctx->i80_if ? "lcd_sys" : "vsync"); - if (!res) { - dev_err(dev, "irq request failed.\n"); - return -ENXIO; - } + ret = platform_get_irq_byname(pdev, ctx->i80_if ? "lcd_sys" : "vsync"); + if (ret < 0) + return ret; - ret = devm_request_irq(dev, res->start, fimd_irq_handler, - 0, "drm_fimd", ctx); + ret = devm_request_irq(dev, ret, fimd_irq_handler, 0, "drm_fimd", ctx); if (ret) { dev_err(dev, "irq request failed.\n"); return ret; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 166a80262896..964dceb28c1e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -1220,7 +1220,6 @@ static int gsc_probe(struct platform_device *pdev) struct gsc_driverdata *driver_data; struct exynos_drm_ipp_formats *formats; struct gsc_context *ctx; - struct resource *res; int num_formats, ret, i, j; ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); @@ -1275,13 +1274,10 @@ static int gsc_probe(struct platform_device *pdev) return PTR_ERR(ctx->regs); /* resource irq */ - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res) { - dev_err(dev, "failed to request irq resource.\n"); - return -ENOENT; - } + ctx->irq = platform_get_irq(pdev, 0); + if (ctx->irq < 0) + return ctx->irq; - ctx->irq = res->start; ret = devm_request_irq(dev, ctx->irq, gsc_irq_handler, 0, dev_name(dev), ctx); if (ret < 0) { diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 41c54f1f60bc..e5204be86093 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -809,19 +809,17 @@ static int mixer_resources_init(struct mixer_context *mixer_ctx) return -ENXIO; } - res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_IRQ, 0); - if (res == NULL) { - dev_err(dev, "get interrupt resource failed.\n"); - return -ENXIO; - } + ret = platform_get_irq(mixer_ctx->pdev, 0); + if (ret < 0) + return ret; + mixer_ctx->irq = ret; - ret = devm_request_irq(dev, res->start, mixer_irq_handler, - 0, "drm_mixer", mixer_ctx); + ret = devm_request_irq(dev, mixer_ctx->irq, mixer_irq_handler, + 0, "drm_mixer", mixer_ctx); if (ret) { dev_err(dev, "request interrupt failed.\n"); return ret; } - mixer_ctx->irq = res->start; return 0; } diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index a4c94dc2e216..cfd932514da2 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -101,6 +101,7 @@ config DRM_I915_USERPTR config DRM_I915_GVT bool "Enable Intel GVT-g graphics virtualization host support" depends on DRM_I915 + depends on X86 depends on 64BIT default n help diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 2da4aacc956b..8ac196e814d5 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -825,6 +825,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) unsigned int max_bw_point = 0, max_bw = 0; unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points; unsigned int num_psf_gv_points = dev_priv->max_bw[0].num_psf_gv_points; + bool changed = false; u32 mask = 0; /* FIXME earlier gens need some checks too */ @@ -868,6 +869,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) new_bw_state->data_rate[crtc->pipe] = new_data_rate; new_bw_state->num_active_planes[crtc->pipe] = new_active_planes; + changed = true; + drm_dbg_kms(&dev_priv->drm, "pipe %c data rate %u num active planes %u\n", pipe_name(crtc->pipe), @@ -875,7 +878,19 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) new_bw_state->num_active_planes[crtc->pipe]); } - if (!new_bw_state) + old_bw_state = intel_atomic_get_old_bw_state(state); + new_bw_state = intel_atomic_get_new_bw_state(state); + + if (new_bw_state && + intel_can_enable_sagv(dev_priv, old_bw_state) != + intel_can_enable_sagv(dev_priv, new_bw_state)) + changed = true; + + /* + * If none of our inputs (data rates, number of active + * planes, SAGV yes/no) changed then nothing to do here. + */ + if (!changed) return 0; ret = intel_atomic_lock_global_state(&new_bw_state->base); @@ -961,7 +976,6 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) */ new_bw_state->qgv_points_mask = ~allowed_points & mask; - old_bw_state = intel_atomic_get_old_bw_state(state); /* * If the actual mask had changed we need to make sure that * the commits are serialized(in case this is a nomodeset, nonblocking) diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h index 46c6eecbd917..0ceaed1c9656 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.h +++ b/drivers/gpu/drm/i915/display/intel_bw.h @@ -30,19 +30,19 @@ struct intel_bw_state { */ u8 pipe_sagv_reject; + /* bitmask of active pipes */ + u8 active_pipes; + /* * Current QGV points mask, which restricts * some particular SAGV states, not to confuse * with pipe_sagv_mask. */ - u8 qgv_points_mask; + u16 qgv_points_mask; unsigned int data_rate[I915_MAX_PIPES]; u8 num_active_planes[I915_MAX_PIPES]; - /* bitmask of active pipes */ - u8 active_pipes; - int min_cdclk; }; diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index bf7ce684dd8e..bb4a85445fc6 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -10673,6 +10673,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev, vlv_wm_sanitize(dev_priv); } else if (DISPLAY_VER(dev_priv) >= 9) { skl_wm_get_hw_state(dev_priv); + skl_wm_sanitize(dev_priv); } else if (HAS_PCH_SPLIT(dev_priv)) { ilk_wm_get_hw_state(dev_priv); } diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c index c1439fcb5a95..3ff149df4a77 100644 --- a/drivers/gpu/drm/i915/display/intel_drrs.c +++ b/drivers/gpu/drm/i915/display/intel_drrs.c @@ -405,6 +405,7 @@ intel_drrs_init(struct intel_connector *connector, struct drm_display_mode *fixed_mode) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_encoder *encoder = connector->encoder; struct drm_display_mode *downclock_mode = NULL; INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_drrs_downclock_work); @@ -416,6 +417,13 @@ intel_drrs_init(struct intel_connector *connector, return NULL; } + if ((DISPLAY_VER(dev_priv) < 8 && !HAS_GMCH(dev_priv)) && + encoder->port != PORT_A) { + drm_dbg_kms(&dev_priv->drm, + "DRRS only supported on eDP port A\n"); + return NULL; + } + if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); return NULL; diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 160fd2bdafe5..957feeccff3f 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -1115,7 +1115,8 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */ if (DISPLAY_VER(i915) >= 11 && - (plane_state->view.color_plane[0].y + drm_rect_height(&plane_state->uapi.src)) & 3) { + (plane_state->view.color_plane[0].y + + (drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) { plane_state->no_fbc_reason = "plane end Y offset misaligned"; return false; } diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index 0065111593a6..4a2662838cd8 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -360,6 +360,21 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, port++; } + /* + * The port numbering and mapping here is bizarre. The now-obsolete + * swsci spec supports ports numbered [0..4]. Port E is handled as a + * special case, but port F and beyond are not. The functionality is + * supposed to be obsolete for new platforms. Just bail out if the port + * number is out of bounds after mapping. + */ + if (port > 4) { + drm_dbg_kms(&dev_priv->drm, + "[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n", + intel_encoder->base.base.id, intel_encoder->base.name, + port_name(intel_encoder->port), port); + return -EINVAL; + } + if (!enable) parm |= 4 << 8; diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c index 09f405e4d363..92ff654f54f5 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.c +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c @@ -34,7 +34,7 @@ void intel_snps_phy_wait_for_calibration(struct drm_i915_private *dev_priv) if (intel_de_wait_for_clear(dev_priv, ICL_PHY_MISC(phy), DG2_PHY_DP_TX_ACK_MASK, 25)) DRM_ERROR("SNPS PHY %c failed to calibrate after 25ms.\n", - phy); + phy_name(phy)); } } diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index dbd7d0d83a14..7784c30fe893 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -691,6 +691,8 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_encoder *encoder = &dig_port->base; + intel_wakeref_t tc_cold_wref; + enum intel_display_power_domain domain; int active_links = 0; mutex_lock(&dig_port->tc_lock); @@ -702,12 +704,11 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port) drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED); drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref); - if (active_links) { - enum intel_display_power_domain domain; - intel_wakeref_t tc_cold_wref = tc_cold_block(dig_port, &domain); - dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port); + tc_cold_wref = tc_cold_block(dig_port, &domain); + dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port); + if (active_links) { if (!icl_tc_phy_is_connected(dig_port)) drm_dbg_kms(&i915->drm, "Port %s: PHY disconnected with %d active link(s)\n", @@ -716,10 +717,23 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port) dig_port->tc_lock_wakeref = tc_cold_block(dig_port, &dig_port->tc_lock_power_domain); - - tc_cold_unblock(dig_port, domain, tc_cold_wref); + } else { + /* + * TBT-alt is the default mode in any case the PHY ownership is not + * held (regardless of the sink's connected live state), so + * we'll just switch to disconnected mode from it here without + * a note. + */ + if (dig_port->tc_mode != TC_PORT_TBT_ALT) + drm_dbg_kms(&i915->drm, + "Port %s: PHY left in %s mode on disabled port, disconnecting it\n", + dig_port->tc_port_name, + tc_port_mode_name(dig_port->tc_mode)); + icl_tc_phy_disconnect(dig_port); } + tc_cold_unblock(dig_port, domain, tc_cold_wref); + drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n", dig_port->tc_port_name, tc_port_mode_name(dig_port->tc_mode)); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index de3fe79b665a..1f880c8c66e7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -842,11 +842,9 @@ void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj) } else if (obj->mm.madv != I915_MADV_WILLNEED) { bo->priority = I915_TTM_PRIO_PURGE; } else if (!i915_gem_object_has_pages(obj)) { - if (bo->priority < I915_TTM_PRIO_HAS_PAGES) - bo->priority = I915_TTM_PRIO_HAS_PAGES; + bo->priority = I915_TTM_PRIO_NO_PAGES; } else { - if (bo->priority > I915_TTM_PRIO_NO_PAGES) - bo->priority = I915_TTM_PRIO_NO_PAGES; + bo->priority = I915_TTM_PRIO_HAS_PAGES; } ttm_bo_move_to_lru_tail(bo, bo->resource, NULL); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c index ee9612a3ee5e..e130c820ae4e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c @@ -427,11 +427,17 @@ __i915_ttm_move(struct ttm_buffer_object *bo, if (!IS_ERR(fence)) goto out; - } else if (move_deps) { - int err = i915_deps_sync(move_deps, ctx); + } else { + int err = PTR_ERR(fence); + + if (err == -EINTR || err == -ERESTARTSYS || err == -EAGAIN) + return fence; - if (err) - return ERR_PTR(err); + if (move_deps) { + err = i915_deps_sync(move_deps, ctx); + if (err) + return ERR_PTR(err); + } } /* Error intercept failed or no accelerated migration to start with */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c index 13b27b8ff74e..ba21ace973da 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c @@ -110,7 +110,7 @@ static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id) { u32 request[] = { GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, - SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 2), + SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1), id, }; diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 99d1781fa5f0..af79b39048f7 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1148,7 +1148,7 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, ops->set_pfn(se, s->shadow_page.mfn); } -/** +/* * Check if can do 2M page * @vgpu: target vgpu * @entry: target pfn's gtt entry @@ -2193,7 +2193,7 @@ static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, } /** - * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read + * intel_vgpu_emulate_ggtt_mmio_read - emulate GTT MMIO register read * @vgpu: a vGPU * @off: register offset * @p_data: data will be returned to guest diff --git a/drivers/gpu/drm/i915/i915_mm.h b/drivers/gpu/drm/i915/i915_mm.h index 76f1d53bdf34..3ad22bbe80eb 100644 --- a/drivers/gpu/drm/i915/i915_mm.h +++ b/drivers/gpu/drm/i915/i915_mm.h @@ -6,6 +6,7 @@ #ifndef __I915_MM_H__ #define __I915_MM_H__ +#include <linux/bug.h> #include <linux/types.h> struct vm_area_struct; diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c index da8f82c2342f..fc8a68f3a2ed 100644 --- a/drivers/gpu/drm/i915/intel_pch.c +++ b/drivers/gpu/drm/i915/intel_pch.c @@ -108,6 +108,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) /* Comet Lake V PCH is based on KBP, which is SPT compatible */ return PCH_SPT; case INTEL_PCH_ICP_DEVICE_ID_TYPE: + case INTEL_PCH_ICP2_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); return PCH_ICP; @@ -123,7 +124,6 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) !IS_GEN9_BC(dev_priv)); return PCH_TGP; case INTEL_PCH_JSP_DEVICE_ID_TYPE: - case INTEL_PCH_JSP2_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv)); return PCH_JSP; diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h index 6bff77521094..4ba0f1967cca 100644 --- a/drivers/gpu/drm/i915/intel_pch.h +++ b/drivers/gpu/drm/i915/intel_pch.h @@ -50,11 +50,11 @@ enum intel_pch { #define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680 #define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380 #define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480 +#define INTEL_PCH_ICP2_DEVICE_ID_TYPE 0x3880 #define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00 #define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080 #define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380 #define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80 -#define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880 #define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80 #define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180 #define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 434b1f8b7fe3..fae4f7818d28 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4029,6 +4029,17 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) return ret; } + if (intel_can_enable_sagv(dev_priv, new_bw_state) != + intel_can_enable_sagv(dev_priv, old_bw_state)) { + ret = intel_atomic_serialize_global_state(&new_bw_state->base); + if (ret) + return ret; + } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { + ret = intel_atomic_lock_global_state(&new_bw_state->base); + if (ret) + return ret; + } + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal; @@ -4044,17 +4055,6 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) intel_can_enable_sagv(dev_priv, new_bw_state); } - if (intel_can_enable_sagv(dev_priv, new_bw_state) != - intel_can_enable_sagv(dev_priv, old_bw_state)) { - ret = intel_atomic_serialize_global_state(&new_bw_state->base); - if (ret) - return ret; - } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { - ret = intel_atomic_lock_global_state(&new_bw_state->base); - if (ret) - return ret; - } - return 0; } @@ -4717,6 +4717,10 @@ static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = { }; static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = { + /* + * Keep the join_mbus cases first so check_mbus_joined() + * will prefer them over the !join_mbus cases. + */ { .active_pipes = BIT(PIPE_A), .dbuf_mask = { @@ -4732,6 +4736,20 @@ static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = { .join_mbus = true, }, { + .active_pipes = BIT(PIPE_A), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + .join_mbus = false, + }, + { + .active_pipes = BIT(PIPE_B), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + .join_mbus = false, + }, + { .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), @@ -4835,7 +4853,7 @@ static bool check_mbus_joined(u8 active_pipes, { int i; - for (i = 0; i < dbuf_slices[i].active_pipes; i++) { + for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { if (dbuf_slices[i].active_pipes == active_pipes) return dbuf_slices[i].join_mbus; } @@ -4847,13 +4865,14 @@ static bool adlp_check_mbus_joined(u8 active_pipes) return check_mbus_joined(active_pipes, adlp_allowed_dbufs); } -static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, +static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus, const struct dbuf_slice_conf_entry *dbuf_slices) { int i; - for (i = 0; i < dbuf_slices[i].active_pipes; i++) { - if (dbuf_slices[i].active_pipes == active_pipes) + for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { + if (dbuf_slices[i].active_pipes == active_pipes && + dbuf_slices[i].join_mbus == join_mbus) return dbuf_slices[i].dbuf_mask[pipe]; } return 0; @@ -4864,7 +4883,7 @@ static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, * returns correspondent DBuf slice mask as stated in BSpec for particular * platform. */ -static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes) +static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) { /* * FIXME: For ICL this is still a bit unclear as prev BSpec revision @@ -4878,37 +4897,41 @@ static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes) * still here - we will need it once those additional constraints * pop up. */ - return compute_dbuf_slices(pipe, active_pipes, icl_allowed_dbufs); + return compute_dbuf_slices(pipe, active_pipes, join_mbus, + icl_allowed_dbufs); } -static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes) +static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) { - return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs); + return compute_dbuf_slices(pipe, active_pipes, join_mbus, + tgl_allowed_dbufs); } -static u32 adlp_compute_dbuf_slices(enum pipe pipe, u32 active_pipes) +static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) { - return compute_dbuf_slices(pipe, active_pipes, adlp_allowed_dbufs); + return compute_dbuf_slices(pipe, active_pipes, join_mbus, + adlp_allowed_dbufs); } -static u32 dg2_compute_dbuf_slices(enum pipe pipe, u32 active_pipes) +static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) { - return compute_dbuf_slices(pipe, active_pipes, dg2_allowed_dbufs); + return compute_dbuf_slices(pipe, active_pipes, join_mbus, + dg2_allowed_dbufs); } -static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes) +static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; if (IS_DG2(dev_priv)) - return dg2_compute_dbuf_slices(pipe, active_pipes); + return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus); else if (IS_ALDERLAKE_P(dev_priv)) - return adlp_compute_dbuf_slices(pipe, active_pipes); + return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus); else if (DISPLAY_VER(dev_priv) == 12) - return tgl_compute_dbuf_slices(pipe, active_pipes); + return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus); else if (DISPLAY_VER(dev_priv) == 11) - return icl_compute_dbuf_slices(pipe, active_pipes); + return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus); /* * For anything else just return one slice yet. * Should be extended for other platforms. @@ -6127,11 +6150,16 @@ skl_compute_ddb(struct intel_atomic_state *state) return ret; } + if (IS_ALDERLAKE_P(dev_priv)) + new_dbuf_state->joined_mbus = + adlp_check_mbus_joined(new_dbuf_state->active_pipes); + for_each_intel_crtc(&dev_priv->drm, crtc) { enum pipe pipe = crtc->pipe; new_dbuf_state->slices[pipe] = - skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes); + skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes, + new_dbuf_state->joined_mbus); if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe]) continue; @@ -6143,9 +6171,6 @@ skl_compute_ddb(struct intel_atomic_state *state) new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state); - if (IS_ALDERLAKE_P(dev_priv)) - new_dbuf_state->joined_mbus = adlp_check_mbus_joined(new_dbuf_state->active_pipes); - if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices || old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) { ret = intel_atomic_serialize_global_state(&new_dbuf_state->base); @@ -6626,6 +6651,7 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) enum pipe pipe = crtc->pipe; unsigned int mbus_offset; enum plane_id plane_id; + u8 slices; skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal); crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal; @@ -6645,19 +6671,22 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_uv); } - dbuf_state->slices[pipe] = - skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes); - dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state); /* * Used for checking overlaps, so we need absolute * offsets instead of MBUS relative offsets. */ - mbus_offset = mbus_ddb_offset(dev_priv, dbuf_state->slices[pipe]); + slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes, + dbuf_state->joined_mbus); + mbus_offset = mbus_ddb_offset(dev_priv, slices); crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start; crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end; + /* The slices actually used by the planes on the pipe */ + dbuf_state->slices[pipe] = + skl_ddb_dbuf_slice_mask(dev_priv, &crtc_state->wm.skl.ddb); + drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n", crtc->base.base.id, crtc->base.name, @@ -6669,6 +6698,74 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices; } +static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915) +{ + const struct intel_dbuf_state *dbuf_state = + to_intel_dbuf_state(i915->dbuf.obj.state); + struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; + struct intel_crtc *crtc; + + for_each_intel_crtc(&i915->drm, crtc) { + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + entries[crtc->pipe] = crtc_state->wm.skl.ddb; + } + + for_each_intel_crtc(&i915->drm, crtc) { + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + u8 slices; + + slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes, + dbuf_state->joined_mbus); + if (dbuf_state->slices[crtc->pipe] & ~slices) + return true; + + if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries, + I915_MAX_PIPES, crtc->pipe)) + return true; + } + + return false; +} + +void skl_wm_sanitize(struct drm_i915_private *i915) +{ + struct intel_crtc *crtc; + + /* + * On TGL/RKL (at least) the BIOS likes to assign the planes + * to the wrong DBUF slices. This will cause an infinite loop + * in skl_commit_modeset_enables() as it can't find a way to + * transition between the old bogus DBUF layout to the new + * proper DBUF layout without DBUF allocation overlaps between + * the planes (which cannot be allowed or else the hardware + * may hang). If we detect a bogus DBUF layout just turn off + * all the planes so that skl_commit_modeset_enables() can + * simply ignore them. + */ + if (!skl_dbuf_is_misconfigured(i915)) + return; + + drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n"); + + for_each_intel_crtc(&i915->drm, crtc) { + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + if (plane_state->uapi.visible) + intel_plane_disable_noatomic(crtc, plane); + + drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0); + + memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb)); + } +} + static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h index 990cdcaf85ce..d2243653a893 100644 --- a/drivers/gpu/drm/i915/intel_pm.h +++ b/drivers/gpu/drm/i915/intel_pm.h @@ -47,6 +47,7 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, struct skl_pipe_wm *out); void g4x_wm_sanitize(struct drm_i915_private *dev_priv); void vlv_wm_sanitize(struct drm_i915_private *dev_priv); +void skl_wm_sanitize(struct drm_i915_private *dev_priv); bool intel_can_enable_sagv(struct drm_i915_private *dev_priv, const struct intel_bw_state *bw_state); void intel_sagv_pre_plane_update(struct intel_atomic_state *state); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 53f1ccb78849..64c2708efc9e 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -68,9 +68,7 @@ static noinline depot_stack_handle_t __save_depot_stack(void) static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) { spin_lock_init(&rpm->debug.lock); - - if (rpm->available) - stack_depot_init(); + stack_depot_init(); } static noinline depot_stack_handle_t diff --git a/drivers/gpu/drm/imx/dcss/Kconfig b/drivers/gpu/drm/imx/dcss/Kconfig index 7374f1952762..5c2b2277afbf 100644 --- a/drivers/gpu/drm/imx/dcss/Kconfig +++ b/drivers/gpu/drm/imx/dcss/Kconfig @@ -2,6 +2,7 @@ config DRM_IMX_DCSS tristate "i.MX8MQ DCSS" select IMX_IRQSTEER select DRM_KMS_HELPER + select DRM_GEM_CMA_HELPER select VIDEOMODE_HELPERS depends on DRM && ARCH_MXC && ARM64 help diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 5d90d2eb0019..bced4c7d668e 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -786,18 +786,101 @@ void mtk_dsi_ddp_stop(struct device *dev) mtk_dsi_poweroff(dsi); } +static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi) +{ + int ret; + + ret = drm_simple_encoder_init(drm, &dsi->encoder, + DRM_MODE_ENCODER_DSI); + if (ret) { + DRM_ERROR("Failed to encoder init to drm\n"); + return ret; + } + + dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev); + + ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) + goto err_cleanup_encoder; + + dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder); + if (IS_ERR(dsi->connector)) { + DRM_ERROR("Unable to create bridge connector\n"); + ret = PTR_ERR(dsi->connector); + goto err_cleanup_encoder; + } + drm_connector_attach_encoder(dsi->connector, &dsi->encoder); + + return 0; + +err_cleanup_encoder: + drm_encoder_cleanup(&dsi->encoder); + return ret; +} + +static int mtk_dsi_bind(struct device *dev, struct device *master, void *data) +{ + int ret; + struct drm_device *drm = data; + struct mtk_dsi *dsi = dev_get_drvdata(dev); + + ret = mtk_dsi_encoder_init(drm, dsi); + if (ret) + return ret; + + return device_reset_optional(dev); +} + +static void mtk_dsi_unbind(struct device *dev, struct device *master, + void *data) +{ + struct mtk_dsi *dsi = dev_get_drvdata(dev); + + drm_encoder_cleanup(&dsi->encoder); +} + +static const struct component_ops mtk_dsi_component_ops = { + .bind = mtk_dsi_bind, + .unbind = mtk_dsi_unbind, +}; + static int mtk_dsi_host_attach(struct mipi_dsi_host *host, struct mipi_dsi_device *device) { struct mtk_dsi *dsi = host_to_dsi(host); + struct device *dev = host->dev; + int ret; dsi->lanes = device->lanes; dsi->format = device->format; dsi->mode_flags = device->mode_flags; + dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0); + if (IS_ERR(dsi->next_bridge)) + return PTR_ERR(dsi->next_bridge); + + drm_bridge_add(&dsi->bridge); + + ret = component_add(host->dev, &mtk_dsi_component_ops); + if (ret) { + DRM_ERROR("failed to add dsi_host component: %d\n", ret); + drm_bridge_remove(&dsi->bridge); + return ret; + } return 0; } +static int mtk_dsi_host_detach(struct mipi_dsi_host *host, + struct mipi_dsi_device *device) +{ + struct mtk_dsi *dsi = host_to_dsi(host); + + component_del(host->dev, &mtk_dsi_component_ops); + drm_bridge_remove(&dsi->bridge); + return 0; +} + static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi) { int ret; @@ -938,73 +1021,14 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host, static const struct mipi_dsi_host_ops mtk_dsi_ops = { .attach = mtk_dsi_host_attach, + .detach = mtk_dsi_host_detach, .transfer = mtk_dsi_host_transfer, }; -static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi) -{ - int ret; - - ret = drm_simple_encoder_init(drm, &dsi->encoder, - DRM_MODE_ENCODER_DSI); - if (ret) { - DRM_ERROR("Failed to encoder init to drm\n"); - return ret; - } - - dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev); - - ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL, - DRM_BRIDGE_ATTACH_NO_CONNECTOR); - if (ret) - goto err_cleanup_encoder; - - dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder); - if (IS_ERR(dsi->connector)) { - DRM_ERROR("Unable to create bridge connector\n"); - ret = PTR_ERR(dsi->connector); - goto err_cleanup_encoder; - } - drm_connector_attach_encoder(dsi->connector, &dsi->encoder); - - return 0; - -err_cleanup_encoder: - drm_encoder_cleanup(&dsi->encoder); - return ret; -} - -static int mtk_dsi_bind(struct device *dev, struct device *master, void *data) -{ - int ret; - struct drm_device *drm = data; - struct mtk_dsi *dsi = dev_get_drvdata(dev); - - ret = mtk_dsi_encoder_init(drm, dsi); - if (ret) - return ret; - - return device_reset_optional(dev); -} - -static void mtk_dsi_unbind(struct device *dev, struct device *master, - void *data) -{ - struct mtk_dsi *dsi = dev_get_drvdata(dev); - - drm_encoder_cleanup(&dsi->encoder); -} - -static const struct component_ops mtk_dsi_component_ops = { - .bind = mtk_dsi_bind, - .unbind = mtk_dsi_unbind, -}; - static int mtk_dsi_probe(struct platform_device *pdev) { struct mtk_dsi *dsi; struct device *dev = &pdev->dev; - struct drm_panel *panel; struct resource *regs; int irq_num; int ret; @@ -1021,19 +1045,6 @@ static int mtk_dsi_probe(struct platform_device *pdev) return ret; } - ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0, - &panel, &dsi->next_bridge); - if (ret) - goto err_unregister_host; - - if (panel) { - dsi->next_bridge = devm_drm_panel_bridge_add(dev, panel); - if (IS_ERR(dsi->next_bridge)) { - ret = PTR_ERR(dsi->next_bridge); - goto err_unregister_host; - } - } - dsi->driver_data = of_device_get_match_data(dev); dsi->engine_clk = devm_clk_get(dev, "engine"); @@ -1098,14 +1109,6 @@ static int mtk_dsi_probe(struct platform_device *pdev) dsi->bridge.of_node = dev->of_node; dsi->bridge.type = DRM_MODE_CONNECTOR_DSI; - drm_bridge_add(&dsi->bridge); - - ret = component_add(&pdev->dev, &mtk_dsi_component_ops); - if (ret) { - dev_err(&pdev->dev, "failed to add component: %d\n", ret); - goto err_unregister_host; - } - return 0; err_unregister_host: @@ -1118,8 +1121,6 @@ static int mtk_dsi_remove(struct platform_device *pdev) struct mtk_dsi *dsi = platform_get_drvdata(pdev); mtk_output_dsi_disable(dsi); - drm_bridge_remove(&dsi->bridge); - component_del(&pdev->dev, &mtk_dsi_component_ops); mipi_dsi_host_unregister(&dsi->host); return 0; diff --git a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c index f043b484055b..ed626fdc08e8 100644 --- a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c +++ b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c @@ -293,15 +293,13 @@ static int y030xx067a_probe(struct spi_device *spi) return 0; } -static int y030xx067a_remove(struct spi_device *spi) +static void y030xx067a_remove(struct spi_device *spi) { struct y030xx067a *priv = spi_get_drvdata(spi); drm_panel_remove(&priv->panel); drm_panel_disable(&priv->panel); drm_panel_unprepare(&priv->panel); - - return 0; } static const struct drm_display_mode y030xx067a_modes[] = { diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c index 8e84df9a0033..3dfafa585127 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c @@ -896,14 +896,12 @@ static int ili9322_probe(struct spi_device *spi) return 0; } -static int ili9322_remove(struct spi_device *spi) +static void ili9322_remove(struct spi_device *spi) { struct ili9322 *ili = spi_get_drvdata(spi); ili9322_power_off(ili); drm_panel_remove(&ili->panel); - - return 0; } /* diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c index 2c3378a259b1..a07ef26234e5 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c @@ -728,7 +728,7 @@ static int ili9341_probe(struct spi_device *spi) return -1; } -static int ili9341_remove(struct spi_device *spi) +static void ili9341_remove(struct spi_device *spi) { const struct spi_device_id *id = spi_get_device_id(spi); struct ili9341 *ili = spi_get_drvdata(spi); @@ -741,7 +741,6 @@ static int ili9341_remove(struct spi_device *spi) drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); } - return 0; } static void ili9341_shutdown(struct spi_device *spi) diff --git a/drivers/gpu/drm/panel/panel-innolux-ej030na.c b/drivers/gpu/drm/panel/panel-innolux-ej030na.c index c558de3f99be..e3b1daa0cb72 100644 --- a/drivers/gpu/drm/panel/panel-innolux-ej030na.c +++ b/drivers/gpu/drm/panel/panel-innolux-ej030na.c @@ -219,15 +219,13 @@ static int ej030na_probe(struct spi_device *spi) return 0; } -static int ej030na_remove(struct spi_device *spi) +static void ej030na_remove(struct spi_device *spi) { struct ej030na *priv = spi_get_drvdata(spi); drm_panel_remove(&priv->panel); drm_panel_disable(&priv->panel); drm_panel_unprepare(&priv->panel); - - return 0; } static const struct drm_display_mode ej030na_modes[] = { diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c index f3183b68704f..9d0d4faa3f58 100644 --- a/drivers/gpu/drm/panel/panel-lg-lb035q02.c +++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c @@ -203,14 +203,12 @@ static int lb035q02_probe(struct spi_device *spi) return 0; } -static int lb035q02_remove(struct spi_device *spi) +static void lb035q02_remove(struct spi_device *spi) { struct lb035q02_device *lcd = spi_get_drvdata(spi); drm_panel_remove(&lcd->panel); drm_panel_disable(&lcd->panel); - - return 0; } static const struct of_device_id lb035q02_of_match[] = { diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c index 8e5160af1de5..cf246d15b7b6 100644 --- a/drivers/gpu/drm/panel/panel-lg-lg4573.c +++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c @@ -266,14 +266,12 @@ static int lg4573_probe(struct spi_device *spi) return 0; } -static int lg4573_remove(struct spi_device *spi) +static void lg4573_remove(struct spi_device *spi) { struct lg4573 *ctx = spi_get_drvdata(spi); lg4573_display_off(ctx); drm_panel_remove(&ctx->panel); - - return 0; } static const struct of_device_id lg4573_of_match[] = { diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c index 6e5ab1debc8b..81c5c541a351 100644 --- a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c @@ -212,15 +212,13 @@ static int nl8048_probe(struct spi_device *spi) return 0; } -static int nl8048_remove(struct spi_device *spi) +static void nl8048_remove(struct spi_device *spi) { struct nl8048_panel *lcd = spi_get_drvdata(spi); drm_panel_remove(&lcd->panel); drm_panel_disable(&lcd->panel); drm_panel_unprepare(&lcd->panel); - - return 0; } static const struct of_device_id nl8048_of_match[] = { diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c index d036853db865..f58cfb10b58a 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c @@ -292,7 +292,7 @@ static int nt39016_probe(struct spi_device *spi) return 0; } -static int nt39016_remove(struct spi_device *spi) +static void nt39016_remove(struct spi_device *spi) { struct nt39016 *panel = spi_get_drvdata(spi); @@ -300,8 +300,6 @@ static int nt39016_remove(struct spi_device *spi) nt39016_disable(&panel->drm_panel); nt39016_unprepare(&panel->drm_panel); - - return 0; } static const struct drm_display_mode kd035g6_display_modes[] = { diff --git a/drivers/gpu/drm/panel/panel-samsung-db7430.c b/drivers/gpu/drm/panel/panel-samsung-db7430.c index ead479719f00..04640c5256a8 100644 --- a/drivers/gpu/drm/panel/panel-samsung-db7430.c +++ b/drivers/gpu/drm/panel/panel-samsung-db7430.c @@ -314,12 +314,11 @@ static int db7430_probe(struct spi_device *spi) return 0; } -static int db7430_remove(struct spi_device *spi) +static void db7430_remove(struct spi_device *spi) { struct db7430 *db = spi_get_drvdata(spi); drm_panel_remove(&db->panel); - return 0; } /* diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c index c4b388850a13..01eb211f32f7 100644 --- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c +++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c @@ -358,14 +358,12 @@ static int ld9040_probe(struct spi_device *spi) return 0; } -static int ld9040_remove(struct spi_device *spi) +static void ld9040_remove(struct spi_device *spi) { struct ld9040 *ctx = spi_get_drvdata(spi); ld9040_power_off(ctx); drm_panel_remove(&ctx->panel); - - return 0; } static const struct of_device_id ld9040_of_match[] = { diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c b/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c index 1696ceb36aa0..2adb223a895c 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c @@ -291,12 +291,11 @@ static int s6d27a1_probe(struct spi_device *spi) return 0; } -static int s6d27a1_remove(struct spi_device *spi) +static void s6d27a1_remove(struct spi_device *spi) { struct s6d27a1 *ctx = spi_get_drvdata(spi); drm_panel_remove(&ctx->panel); - return 0; } static const struct of_device_id s6d27a1_match[] = { diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c index c178d962b0d5..d99afcc672ca 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c @@ -62,10 +62,9 @@ static int s6e63m0_spi_probe(struct spi_device *spi) s6e63m0_spi_dcs_write, false); } -static int s6e63m0_spi_remove(struct spi_device *spi) +static void s6e63m0_spi_remove(struct spi_device *spi) { s6e63m0_remove(&spi->dev); - return 0; } static const struct of_device_id s6e63m0_spi_of_match[] = { diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 9e46db5e359c..3c08f9827acf 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -588,6 +588,7 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) err = panel_dpi_probe(dev, panel); if (err) goto free_ddc; + desc = panel->desc; } else { if (!of_get_display_timing(dev->of_node, "panel-timing", &dt)) panel_simple_parse_panel_timing_node(dev, panel, &dt); diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c index 61e565524542..bbc4569cbcdc 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c @@ -387,13 +387,11 @@ static int st7789v_probe(struct spi_device *spi) return 0; } -static int st7789v_remove(struct spi_device *spi) +static void st7789v_remove(struct spi_device *spi) { struct st7789v *ctx = spi_get_drvdata(spi); drm_panel_remove(&ctx->panel); - - return 0; } static const struct of_device_id st7789v_of_match[] = { diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c index ba0b3ead150f..0d7541a33f87 100644 --- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c @@ -655,7 +655,7 @@ static int acx565akm_probe(struct spi_device *spi) return 0; } -static int acx565akm_remove(struct spi_device *spi) +static void acx565akm_remove(struct spi_device *spi) { struct acx565akm_panel *lcd = spi_get_drvdata(spi); @@ -666,8 +666,6 @@ static int acx565akm_remove(struct spi_device *spi) drm_panel_disable(&lcd->panel); drm_panel_unprepare(&lcd->panel); - - return 0; } static const struct of_device_id acx565akm_of_match[] = { diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c index ba0c00d1a001..4dbf8b88f264 100644 --- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c @@ -350,15 +350,13 @@ static int td028ttec1_probe(struct spi_device *spi) return 0; } -static int td028ttec1_remove(struct spi_device *spi) +static void td028ttec1_remove(struct spi_device *spi) { struct td028ttec1_panel *lcd = spi_get_drvdata(spi); drm_panel_remove(&lcd->panel); drm_panel_disable(&lcd->panel); drm_panel_unprepare(&lcd->panel); - - return 0; } static const struct of_device_id td028ttec1_of_match[] = { diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c index 1866cdb8f9c1..cf4609bb9b1d 100644 --- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c @@ -463,7 +463,7 @@ static int td043mtea1_probe(struct spi_device *spi) return 0; } -static int td043mtea1_remove(struct spi_device *spi) +static void td043mtea1_remove(struct spi_device *spi) { struct td043mtea1_panel *lcd = spi_get_drvdata(spi); @@ -472,8 +472,6 @@ static int td043mtea1_remove(struct spi_device *spi) drm_panel_unprepare(&lcd->panel); sysfs_remove_group(&spi->dev.kobj, &td043mtea1_attr_group); - - return 0; } static const struct of_device_id td043mtea1_of_match[] = { diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c index e3791dad6830..0b1f5a11a055 100644 --- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c +++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c @@ -450,12 +450,11 @@ static int tpg110_probe(struct spi_device *spi) return 0; } -static int tpg110_remove(struct spi_device *spi) +static void tpg110_remove(struct spi_device *spi) { struct tpg110 *tpg = spi_get_drvdata(spi); drm_panel_remove(&tpg->panel); - return 0; } static const struct of_device_id tpg110_match[] = { diff --git a/drivers/gpu/drm/panel/panel-widechips-ws2401.c b/drivers/gpu/drm/panel/panel-widechips-ws2401.c index 8bc976f54b80..236f3cb2b594 100644 --- a/drivers/gpu/drm/panel/panel-widechips-ws2401.c +++ b/drivers/gpu/drm/panel/panel-widechips-ws2401.c @@ -407,12 +407,11 @@ static int ws2401_probe(struct spi_device *spi) return 0; } -static int ws2401_remove(struct spi_device *spi) +static void ws2401_remove(struct spi_device *spi) { struct ws2401 *ws = spi_get_drvdata(spi); drm_panel_remove(&ws->panel); - return 0; } /* diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 0fce73b9a646..70bd84b7ef2b 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -198,7 +198,8 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, * so don't register a backlight device */ if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && - (rdev->pdev->device == 0x6741)) + (rdev->pdev->device == 0x6741) && + !dmi_match(DMI_PRODUCT_NAME, "iMac12,1")) return; if (!radeon_encoder->enc_priv) diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 377f9cdb5b53..84013faa4756 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -470,8 +470,8 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, int32_t *msg, msg_type, handle; unsigned img_size = 0; void *ptr; - - int i, r; + long r; + int i; if (offset & 0x3F) { DRM_ERROR("UVD messages must be 64 byte aligned!\n"); @@ -481,13 +481,13 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false, MAX_SCHEDULE_TIMEOUT); if (r <= 0) { - DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); + DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r); return r ? r : -ETIME; } r = radeon_bo_kmap(bo, &ptr); if (r) { - DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); + DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r); return r; } diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index 830bdd5e9b7c..8677c8271678 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -529,13 +529,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, return ret; } - ret = clk_prepare_enable(hdmi->vpll_clk); - if (ret) { - DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n", - ret); - return ret; - } - hdmi->phy = devm_phy_optional_get(dev, "hdmi"); if (IS_ERR(hdmi->phy)) { ret = PTR_ERR(hdmi->phy); @@ -544,6 +537,13 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, return ret; } + ret = clk_prepare_enable(hdmi->vpll_clk); + if (ret) { + DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n", + ret); + return ret; + } + drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs); drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c index 1f7353f0684a..798b542e5916 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c @@ -902,6 +902,7 @@ static const struct vop_win_phy rk3399_win01_data = { .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0), .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1), .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12), + .x_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 21), .y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22), .act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0), .dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0), @@ -912,6 +913,7 @@ static const struct vop_win_phy rk3399_win01_data = { .uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16), .src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0), .dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0), + .channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0), }; /* @@ -922,11 +924,11 @@ static const struct vop_win_phy rk3399_win01_data = { static const struct vop_win_data rk3399_vop_win_data[] = { { .base = 0x00, .phy = &rk3399_win01_data, .type = DRM_PLANE_TYPE_PRIMARY }, - { .base = 0x40, .phy = &rk3288_win01_data, + { .base = 0x40, .phy = &rk3368_win01_data, .type = DRM_PLANE_TYPE_OVERLAY }, - { .base = 0x00, .phy = &rk3288_win23_data, + { .base = 0x00, .phy = &rk3368_win23_data, .type = DRM_PLANE_TYPE_OVERLAY }, - { .base = 0x50, .phy = &rk3288_win23_data, + { .base = 0x50, .phy = &rk3368_win23_data, .type = DRM_PLANE_TYPE_CURSOR }, }; diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig index 8cf5aeb9db6c..201f5175ecfe 100644 --- a/drivers/gpu/drm/tegra/Kconfig +++ b/drivers/gpu/drm/tegra/Kconfig @@ -5,6 +5,7 @@ config DRM_TEGRA depends on COMMON_CLK depends on DRM depends on OF + select DRM_DP_AUX_BUS select DRM_KMS_HELPER select DRM_MIPI_DSI select DRM_PANEL diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c index 1f96e416fa08..d7a731d287d2 100644 --- a/drivers/gpu/drm/tegra/dpaux.c +++ b/drivers/gpu/drm/tegra/dpaux.c @@ -19,6 +19,7 @@ #include <linux/workqueue.h> #include <drm/drm_dp_helper.h> +#include <drm/drm_dp_aux_bus.h> #include <drm/drm_panel.h> #include "dp.h" @@ -570,6 +571,12 @@ static int tegra_dpaux_probe(struct platform_device *pdev) list_add_tail(&dpaux->list, &dpaux_list); mutex_unlock(&dpaux_lock); + err = devm_of_dp_aux_populate_ep_devices(&dpaux->aux); + if (err < 0) { + dev_err(dpaux->dev, "failed to populate AUX bus: %d\n", err); + return err; + } + return 0; } diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c index 223ab2ceb7e6..3762d87759d9 100644 --- a/drivers/gpu/drm/tegra/falcon.c +++ b/drivers/gpu/drm/tegra/falcon.c @@ -63,7 +63,7 @@ static void falcon_copy_firmware_image(struct falcon *falcon, /* copy the whole thing taking into account endianness */ for (i = 0; i < firmware->size / sizeof(u32); i++) - virt[i] = le32_to_cpu(((u32 *)firmware->data)[i]); + virt[i] = le32_to_cpu(((__le32 *)firmware->data)[i]); } static int falcon_parse_firmware_image(struct falcon *falcon) diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c index 9b33c05732aa..ebb025543f8d 100644 --- a/drivers/gpu/drm/tiny/hx8357d.c +++ b/drivers/gpu/drm/tiny/hx8357d.c @@ -263,14 +263,12 @@ static int hx8357d_probe(struct spi_device *spi) return 0; } -static int hx8357d_remove(struct spi_device *spi) +static void hx8357d_remove(struct spi_device *spi) { struct drm_device *drm = spi_get_drvdata(spi); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); - - return 0; } static void hx8357d_shutdown(struct spi_device *spi) diff --git a/drivers/gpu/drm/tiny/ili9163.c b/drivers/gpu/drm/tiny/ili9163.c index bcc181351236..fc8ed245b0bc 100644 --- a/drivers/gpu/drm/tiny/ili9163.c +++ b/drivers/gpu/drm/tiny/ili9163.c @@ -193,14 +193,12 @@ static int ili9163_probe(struct spi_device *spi) return 0; } -static int ili9163_remove(struct spi_device *spi) +static void ili9163_remove(struct spi_device *spi) { struct drm_device *drm = spi_get_drvdata(spi); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); - - return 0; } static void ili9163_shutdown(struct spi_device *spi) diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c index 976d3209f164..cc92eb9f2a07 100644 --- a/drivers/gpu/drm/tiny/ili9225.c +++ b/drivers/gpu/drm/tiny/ili9225.c @@ -411,14 +411,12 @@ static int ili9225_probe(struct spi_device *spi) return 0; } -static int ili9225_remove(struct spi_device *spi) +static void ili9225_remove(struct spi_device *spi) { struct drm_device *drm = spi_get_drvdata(spi); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); - - return 0; } static void ili9225_shutdown(struct spi_device *spi) diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c index 37e0c33399c8..5b8cc770ee7b 100644 --- a/drivers/gpu/drm/tiny/ili9341.c +++ b/drivers/gpu/drm/tiny/ili9341.c @@ -225,14 +225,12 @@ static int ili9341_probe(struct spi_device *spi) return 0; } -static int ili9341_remove(struct spi_device *spi) +static void ili9341_remove(struct spi_device *spi) { struct drm_device *drm = spi_get_drvdata(spi); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); - - return 0; } static void ili9341_shutdown(struct spi_device *spi) diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c index e9a63f4b2993..6d655e18e0aa 100644 --- a/drivers/gpu/drm/tiny/ili9486.c +++ b/drivers/gpu/drm/tiny/ili9486.c @@ -243,14 +243,12 @@ static int ili9486_probe(struct spi_device *spi) return 0; } -static int ili9486_remove(struct spi_device *spi) +static void ili9486_remove(struct spi_device *spi) { struct drm_device *drm = spi_get_drvdata(spi); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); - - return 0; } static void ili9486_shutdown(struct spi_device *spi) diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c index 023de49e7a8e..5e060f6910bb 100644 --- a/drivers/gpu/drm/tiny/mi0283qt.c +++ b/drivers/gpu/drm/tiny/mi0283qt.c @@ -233,14 +233,12 @@ static int mi0283qt_probe(struct spi_device *spi) return 0; } -static int mi0283qt_remove(struct spi_device *spi) +static void mi0283qt_remove(struct spi_device *spi) { struct drm_device *drm = spi_get_drvdata(spi); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); - - return 0; } static void mi0283qt_shutdown(struct spi_device *spi) diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c index 97a775c48cea..beeeb170d0b1 100644 --- a/drivers/gpu/drm/tiny/repaper.c +++ b/drivers/gpu/drm/tiny/repaper.c @@ -1140,14 +1140,12 @@ static int repaper_probe(struct spi_device *spi) return 0; } -static int repaper_remove(struct spi_device *spi) +static void repaper_remove(struct spi_device *spi) { struct drm_device *drm = spi_get_drvdata(spi); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); - - return 0; } static void repaper_shutdown(struct spi_device *spi) diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c index 51b9b9fb3ead..3f38faa1cd8c 100644 --- a/drivers/gpu/drm/tiny/st7586.c +++ b/drivers/gpu/drm/tiny/st7586.c @@ -360,14 +360,12 @@ static int st7586_probe(struct spi_device *spi) return 0; } -static int st7586_remove(struct spi_device *spi) +static void st7586_remove(struct spi_device *spi) { struct drm_device *drm = spi_get_drvdata(spi); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); - - return 0; } static void st7586_shutdown(struct spi_device *spi) diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c index fc40dd10efa8..29d618093e94 100644 --- a/drivers/gpu/drm/tiny/st7735r.c +++ b/drivers/gpu/drm/tiny/st7735r.c @@ -247,14 +247,12 @@ static int st7735r_probe(struct spi_device *spi) return 0; } -static int st7735r_remove(struct spi_device *spi) +static void st7735r_remove(struct spi_device *spi) { struct drm_device *drm = spi_get_drvdata(spi); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); - - return 0; } static void st7735r_shutdown(struct spi_device *spi) diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 287dbc89ad64..783890e8d43a 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -525,9 +525,11 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc) if (ret) return ret; - ret = pm_runtime_put(&vc4_hdmi->pdev->dev); - if (ret) - return ret; + /* + * post_crtc_powerdown will have called pm_runtime_put, so we + * don't need it here otherwise we'll get the reference counting + * wrong. + */ return 0; } @@ -671,7 +673,6 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc, const struct drm_display_mode *mode = &crtc_state->adjusted_mode; struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); - mode = &crtc_state->adjusted_mode; if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) { vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 1000, mode->clock * 9 / 10) * 1000; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 053fbaf765ca..3a1626f261e5 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -196,14 +196,8 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force) if (gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) connected = true; } else { - unsigned long flags; - u32 hotplug; - - spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); - hotplug = HDMI_READ(HDMI_HOTPLUG); - spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); - - if (hotplug & VC4_HDMI_HOTPLUG_CONNECTED) + if (vc4_hdmi->variant->hp_detect && + vc4_hdmi->variant->hp_detect(vc4_hdmi)) connected = true; } @@ -1251,6 +1245,7 @@ static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder, unsigned long long tmds_rate; if (vc4_hdmi->variant->unsupported_odd_h_timings && + !(mode->flags & DRM_MODE_FLAG_DBLCLK) && ((mode->hdisplay % 2) || (mode->hsync_start % 2) || (mode->hsync_end % 2) || (mode->htotal % 2))) return -EINVAL; @@ -1298,6 +1293,7 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder, struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); if (vc4_hdmi->variant->unsupported_odd_h_timings && + !(mode->flags & DRM_MODE_FLAG_DBLCLK) && ((mode->hdisplay % 2) || (mode->hsync_start % 2) || (mode->hsync_end % 2) || (mode->htotal % 2))) return MODE_H_ILLEGAL; @@ -1343,6 +1339,18 @@ static u32 vc5_hdmi_channel_map(struct vc4_hdmi *vc4_hdmi, u32 channel_mask) return channel_map; } +static bool vc5_hdmi_hp_detect(struct vc4_hdmi *vc4_hdmi) +{ + unsigned long flags; + u32 hotplug; + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + hotplug = HDMI_READ(HDMI_HOTPLUG); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + + return !!(hotplug & VC4_HDMI_HOTPLUG_CONNECTED); +} + /* HDMI audio codec callbacks */ static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate) @@ -1741,6 +1749,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi) dev_err(dev, "Couldn't register the HDMI codec: %ld\n", PTR_ERR(codec_pdev)); return PTR_ERR(codec_pdev); } + vc4_hdmi->audio.codec_pdev = codec_pdev; dai_link->cpus = &vc4_hdmi->audio.cpu; dai_link->codecs = &vc4_hdmi->audio.codec; @@ -1780,6 +1789,12 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi) } +static void vc4_hdmi_audio_exit(struct vc4_hdmi *vc4_hdmi) +{ + platform_device_unregister(vc4_hdmi->audio.codec_pdev); + vc4_hdmi->audio.codec_pdev = NULL; +} + static irqreturn_t vc4_hdmi_hpd_irq_thread(int irq, void *priv) { struct vc4_hdmi *vc4_hdmi = priv; @@ -2504,7 +2519,8 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) * vc4_hdmi_disable_scrambling() will thus run at boot, make * sure it's disabled, and avoid any inconsistency. */ - vc4_hdmi->scdc_enabled = true; + if (variant->max_pixel_clock > HDMI_14_MAX_TMDS_CLK) + vc4_hdmi->scdc_enabled = true; ret = variant->init_resources(vc4_hdmi); if (ret) @@ -2651,6 +2667,7 @@ static void vc4_hdmi_unbind(struct device *dev, struct device *master, kfree(vc4_hdmi->hdmi_regset.regs); kfree(vc4_hdmi->hd_regset.regs); + vc4_hdmi_audio_exit(vc4_hdmi); vc4_hdmi_cec_exit(vc4_hdmi); vc4_hdmi_hotplug_exit(vc4_hdmi); vc4_hdmi_connector_destroy(&vc4_hdmi->connector); @@ -2723,6 +2740,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = { .phy_rng_disable = vc5_hdmi_phy_rng_disable, .channel_map = vc5_hdmi_channel_map, .supports_hdr = true, + .hp_detect = vc5_hdmi_hp_detect, }; static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = { @@ -2751,6 +2769,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = { .phy_rng_disable = vc5_hdmi_phy_rng_disable, .channel_map = vc5_hdmi_channel_map, .supports_hdr = true, + .hp_detect = vc5_hdmi_hp_detect, }; static const struct of_device_id vc4_hdmi_dt_match[] = { diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h index 36c0b082a43b..6ffdd4ec5fb6 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi.h @@ -102,6 +102,9 @@ struct vc4_hdmi_variant { /* Enables HDR metadata */ bool supports_hdr; + + /* Callback for hardware specific hotplug detect */ + bool (*hp_detect)(struct vc4_hdmi *vc4_hdmi); }; /* HDMI audio information */ @@ -113,6 +116,7 @@ struct vc4_hdmi_audio { struct snd_soc_dai_link_component platform; struct snd_dmaengine_dai_dma_data dma_data; struct hdmi_audio_infoframe infoframe; + struct platform_device *codec_pdev; bool streaming; }; diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c index e08e331e46ae..f87a8705f518 100644 --- a/drivers/gpu/host1x/syncpt.c +++ b/drivers/gpu/host1x/syncpt.c @@ -137,8 +137,15 @@ void host1x_syncpt_restore(struct host1x *host) struct host1x_syncpt *sp_base = host->syncpt; unsigned int i; - for (i = 0; i < host1x_syncpt_nb_pts(host); i++) + for (i = 0; i < host1x_syncpt_nb_pts(host); i++) { + /* + * Unassign syncpt from channels for purposes of Tegra186 + * syncpoint protection. This prevents any channel from + * accessing it until it is reassigned. + */ + host1x_hw_syncpt_assign_to_channel(host, sp_base + i, NULL); host1x_hw_syncpt_restore(host, sp_base + i); + } for (i = 0; i < host1x_syncpt_nb_bases(host); i++) host1x_hw_syncpt_restore_wait_base(host, sp_base + i); @@ -227,27 +234,12 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout, void *ref; struct host1x_waitlist *waiter; int err = 0, check_count = 0; - u32 val; if (value) - *value = 0; - - /* first check cache */ - if (host1x_syncpt_is_expired(sp, thresh)) { - if (value) - *value = host1x_syncpt_load(sp); + *value = host1x_syncpt_load(sp); + if (host1x_syncpt_is_expired(sp, thresh)) return 0; - } - - /* try to read from register */ - val = host1x_hw_syncpt_load(sp->host, sp); - if (host1x_syncpt_is_expired(sp, thresh)) { - if (value) - *value = val; - - goto done; - } if (!timeout) { err = -EAGAIN; @@ -352,13 +344,6 @@ int host1x_syncpt_init(struct host1x *host) for (i = 0; i < host->info->nb_pts; i++) { syncpt[i].id = i; syncpt[i].host = host; - - /* - * Unassign syncpt from channels for purposes of Tegra186 - * syncpoint protection. This prevents any channel from - * accessing it until it is reassigned. - */ - host1x_hw_syncpt_assign_to_channel(host, &syncpt[i], NULL); } for (i = 0; i < host->info->nb_bases; i++) diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c index 2503be0253d3..19fa734a9a79 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c +++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c @@ -37,11 +37,11 @@ static int amd_sfh_wait_response_v2(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_ { union cmd_response cmd_resp; - /* Get response with status within a max of 800 ms timeout */ + /* Get response with status within a max of 1600 ms timeout */ if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG(0), cmd_resp.resp, (cmd_resp.response_v2.response == sensor_sts && cmd_resp.response_v2.status == 0 && (sid == 0xff || - cmd_resp.response_v2.sensor_id == sid)), 500, 800000)) + cmd_resp.response_v2.sensor_id == sid)), 500, 1600000)) return cmd_resp.response_v2.response; return SENSOR_DISABLED; @@ -53,6 +53,7 @@ static void amd_start_sensor_v2(struct amd_mp2_dev *privdata, struct amd_mp2_sen cmd_base.ul = 0; cmd_base.cmd_v2.cmd_id = ENABLE_SENSOR; + cmd_base.cmd_v2.intr_disable = 1; cmd_base.cmd_v2.period = info.period; cmd_base.cmd_v2.sensor_id = info.sensor_idx; cmd_base.cmd_v2.length = 16; @@ -70,6 +71,7 @@ static void amd_stop_sensor_v2(struct amd_mp2_dev *privdata, u16 sensor_idx) cmd_base.ul = 0; cmd_base.cmd_v2.cmd_id = DISABLE_SENSOR; + cmd_base.cmd_v2.intr_disable = 1; cmd_base.cmd_v2.period = 0; cmd_base.cmd_v2.sensor_id = sensor_idx; cmd_base.cmd_v2.length = 16; @@ -83,12 +85,51 @@ static void amd_stop_all_sensor_v2(struct amd_mp2_dev *privdata) union sfh_cmd_base cmd_base; cmd_base.cmd_v2.cmd_id = STOP_ALL_SENSORS; + cmd_base.cmd_v2.intr_disable = 1; cmd_base.cmd_v2.period = 0; cmd_base.cmd_v2.sensor_id = 0; writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0); } +static void amd_sfh_clear_intr_v2(struct amd_mp2_dev *privdata) +{ + if (readl(privdata->mmio + AMD_P2C_MSG(4))) { + writel(0, privdata->mmio + AMD_P2C_MSG(4)); + writel(0xf, privdata->mmio + AMD_P2C_MSG(5)); + } +} + +static void amd_sfh_clear_intr(struct amd_mp2_dev *privdata) +{ + if (privdata->mp2_ops->clear_intr) + privdata->mp2_ops->clear_intr(privdata); +} + +static irqreturn_t amd_sfh_irq_handler(int irq, void *data) +{ + amd_sfh_clear_intr(data); + + return IRQ_HANDLED; +} + +static int amd_sfh_irq_init_v2(struct amd_mp2_dev *privdata) +{ + int rc; + + pci_intx(privdata->pdev, true); + + rc = devm_request_irq(&privdata->pdev->dev, privdata->pdev->irq, + amd_sfh_irq_handler, 0, DRIVER_NAME, privdata); + if (rc) { + dev_err(&privdata->pdev->dev, "failed to request irq %d err=%d\n", + privdata->pdev->irq, rc); + return rc; + } + + return 0; +} + void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info) { union sfh_cmd_param cmd_param; @@ -193,6 +234,8 @@ static void amd_mp2_pci_remove(void *privdata) struct amd_mp2_dev *mp2 = privdata; amd_sfh_hid_client_deinit(privdata); mp2->mp2_ops->stop_all(mp2); + pci_intx(mp2->pdev, false); + amd_sfh_clear_intr(mp2); } static const struct amd_mp2_ops amd_sfh_ops_v2 = { @@ -200,6 +243,8 @@ static const struct amd_mp2_ops amd_sfh_ops_v2 = { .stop = amd_stop_sensor_v2, .stop_all = amd_stop_all_sensor_v2, .response = amd_sfh_wait_response_v2, + .clear_intr = amd_sfh_clear_intr_v2, + .init_intr = amd_sfh_irq_init_v2, }; static const struct amd_mp2_ops amd_sfh_ops = { @@ -225,6 +270,14 @@ static void mp2_select_ops(struct amd_mp2_dev *privdata) } } +static int amd_sfh_irq_init(struct amd_mp2_dev *privdata) +{ + if (privdata->mp2_ops->init_intr) + return privdata->mp2_ops->init_intr(privdata); + + return 0; +} + static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct amd_mp2_dev *privdata; @@ -261,9 +314,20 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i mp2_select_ops(privdata); + rc = amd_sfh_irq_init(privdata); + if (rc) { + dev_err(&pdev->dev, "amd_sfh_irq_init failed\n"); + return rc; + } + rc = amd_sfh_hid_client_init(privdata); - if (rc) + if (rc) { + amd_sfh_clear_intr(privdata); + dev_err(&pdev->dev, "amd_sfh_hid_client_init failed\n"); return rc; + } + + amd_sfh_clear_intr(privdata); return devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata); } @@ -290,6 +354,9 @@ static int __maybe_unused amd_mp2_pci_resume(struct device *dev) } } + schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP)); + amd_sfh_clear_intr(mp2); + return 0; } @@ -312,6 +379,9 @@ static int __maybe_unused amd_mp2_pci_suspend(struct device *dev) } } + cancel_delayed_work_sync(&cl_data->work_buffer); + amd_sfh_clear_intr(mp2); + return 0; } diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h index ae30e059f847..97b99861fae2 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h +++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h @@ -49,7 +49,7 @@ union sfh_cmd_base { } s; struct { u32 cmd_id : 4; - u32 intr_enable : 1; + u32 intr_disable : 1; u32 rsvd1 : 3; u32 length : 7; u32 mem_type : 1; @@ -141,5 +141,7 @@ struct amd_mp2_ops { void (*stop)(struct amd_mp2_dev *privdata, u16 sensor_idx); void (*stop_all)(struct amd_mp2_dev *privdata); int (*response)(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts); + void (*clear_intr)(struct amd_mp2_dev *privdata); + int (*init_intr)(struct amd_mp2_dev *privdata); }; #endif diff --git a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c index be41f83b0289..76095bd53c65 100644 --- a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c +++ b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c @@ -27,6 +27,7 @@ #define HID_USAGE_SENSOR_STATE_READY_ENUM 0x02 #define HID_USAGE_SENSOR_STATE_INITIALIZING_ENUM 0x05 #define HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM 0x04 +#define ILLUMINANCE_MASK GENMASK(14, 0) int get_report_descriptor(int sensor_idx, u8 *rep_desc) { @@ -246,7 +247,8 @@ u8 get_input_report(u8 current_index, int sensor_idx, int report_id, struct amd_ get_common_inputs(&als_input.common_property, report_id); /* For ALS ,V2 Platforms uses C2P_MSG5 register instead of DRAM access method */ if (supported_input == V2_STATUS) - als_input.illuminance_value = (int)readl(privdata->mmio + AMD_C2P_MSG(5)); + als_input.illuminance_value = + readl(privdata->mmio + AMD_C2P_MSG(5)) & ILLUMINANCE_MASK; else als_input.illuminance_value = (int)sensor_virt_addr[0] / AMD_SFH_FW_MULTIPLIER; diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 24802a4a636e..7dc89dc6b0f0 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -691,49 +691,49 @@ static const struct hid_device_id apple_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI), .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO), - .driver_data = APPLE_HAS_FN }, + .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI), .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO), - .driver_data = APPLE_HAS_FN }, + .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS), .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI), .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO), - .driver_data = APPLE_HAS_FN }, + .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS), .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI), .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO), - .driver_data = APPLE_HAS_FN }, + .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS), .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI), .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO), - .driver_data = APPLE_HAS_FN }, + .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS), .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI), .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO), - .driver_data = APPLE_HAS_FN }, + .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS), .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI), .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO), - .driver_data = APPLE_HAS_FN }, + .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS), .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI), .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO), - .driver_data = APPLE_HAS_FN }, + .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS), .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI), diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 26c31d759914..81e7e404a5fc 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -860,7 +860,9 @@ static const char *keys[KEY_MAX + 1] = { [KEY_F22] = "F22", [KEY_F23] = "F23", [KEY_F24] = "F24", [KEY_PLAYCD] = "PlayCD", [KEY_PAUSECD] = "PauseCD", [KEY_PROG3] = "Prog3", - [KEY_PROG4] = "Prog4", [KEY_SUSPEND] = "Suspend", + [KEY_PROG4] = "Prog4", + [KEY_ALL_APPLICATIONS] = "AllApplications", + [KEY_SUSPEND] = "Suspend", [KEY_CLOSE] = "Close", [KEY_PLAY] = "Play", [KEY_FASTFORWARD] = "FastForward", [KEY_BASSBOOST] = "BassBoost", [KEY_PRINT] = "Print", [KEY_HP] = "HP", @@ -969,6 +971,7 @@ static const char *keys[KEY_MAX + 1] = { [KEY_ASSISTANT] = "Assistant", [KEY_KBD_LAYOUT_NEXT] = "KbdLayoutNext", [KEY_EMOJI_PICKER] = "EmojiPicker", + [KEY_DICTATE] = "Dictate", [KEY_BRIGHTNESS_MIN] = "BrightnessMin", [KEY_BRIGHTNESS_MAX] = "BrightnessMax", [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto", diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c index 8e960d7b233b..2876cb6a7dca 100644 --- a/drivers/hid/hid-elo.c +++ b/drivers/hid/hid-elo.c @@ -228,7 +228,6 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct elo_priv *priv; int ret; - struct usb_device *udev; if (!hid_is_usb(hdev)) return -EINVAL; @@ -238,8 +237,7 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id) return -ENOMEM; INIT_DELAYED_WORK(&priv->work, elo_work); - udev = interface_to_usbdev(to_usb_interface(hdev->dev.parent)); - priv->usbdev = usb_get_dev(udev); + priv->usbdev = interface_to_usbdev(to_usb_interface(hdev->dev.parent)); hid_set_drvdata(hdev, priv); @@ -270,8 +268,6 @@ static void elo_remove(struct hid_device *hdev) { struct elo_priv *priv = hid_get_drvdata(hdev); - usb_put_dev(priv->usbdev); - hid_hw_stop(hdev); cancel_delayed_work_sync(&priv->work); kfree(priv); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 85975031389b..78bd3ddda442 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -1370,6 +1370,7 @@ #define USB_VENDOR_ID_UGTIZER 0x2179 #define USB_DEVICE_ID_UGTIZER_TABLET_GP0610 0x0053 #define USB_DEVICE_ID_UGTIZER_TABLET_GT5040 0x0077 +#define USB_DEVICE_ID_UGTIZER_TABLET_WP5540 0x0004 #define USB_VENDOR_ID_VIEWSONIC 0x0543 #define USB_DEVICE_ID_VIEWSONIC_PD1011 0xe621 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 112901d2d8d2..56ec27398a00 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -992,6 +992,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x0cd: map_key_clear(KEY_PLAYPAUSE); break; case 0x0cf: map_key_clear(KEY_VOICECOMMAND); break; + case 0x0d8: map_key_clear(KEY_DICTATE); break; case 0x0d9: map_key_clear(KEY_EMOJI_PICKER); break; case 0x0e0: map_abs_clear(ABS_VOLUME); break; @@ -1083,6 +1084,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x29d: map_key_clear(KEY_KBD_LAYOUT_NEXT); break; + case 0x2a2: map_key_clear(KEY_ALL_APPLICATIONS); break; + case 0x2c7: map_key_clear(KEY_KBDINPUTASSIST_PREV); break; case 0x2c8: map_key_clear(KEY_KBDINPUTASSIST_NEXT); break; case 0x2c9: map_key_clear(KEY_KBDINPUTASSIST_PREVGROUP); break; diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index 7106b921b53c..c358778e070b 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -1068,6 +1068,7 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev, workitem.reports_supported |= STD_KEYBOARD; break; case 0x0f: + case 0x11: device_type = "eQUAD Lightspeed 1.2"; logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem); workitem.reports_supported |= STD_KEYBOARD; diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c index b6a9a0f3966e..2204de889739 100644 --- a/drivers/hid/hid-nintendo.c +++ b/drivers/hid/hid-nintendo.c @@ -2128,6 +2128,10 @@ static int nintendo_hid_probe(struct hid_device *hdev, spin_lock_init(&ctlr->lock); ctlr->rumble_queue = alloc_workqueue("hid-nintendo-rumble_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM, 0); + if (!ctlr->rumble_queue) { + ret = -ENOMEM; + goto err; + } INIT_WORK(&ctlr->rumble_worker, joycon_rumble_worker); ret = hid_parse(hdev); diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 9af1dc8ae3a2..c066ba901867 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -187,6 +187,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_WP5540), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT }, diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c index 03b935ff02d5..c3e6d69fdfbd 100644 --- a/drivers/hid/hid-thrustmaster.c +++ b/drivers/hid/hid-thrustmaster.c @@ -64,7 +64,9 @@ struct tm_wheel_info { */ static const struct tm_wheel_info tm_wheels_infos[] = { {0x0306, 0x0006, "Thrustmaster T150RS"}, + {0x0200, 0x0005, "Thrustmaster T300RS (Missing Attachment)"}, {0x0206, 0x0005, "Thrustmaster T300RS"}, + {0x0209, 0x0005, "Thrustmaster T300RS (Open Wheel Attachment)"}, {0x0204, 0x0005, "Thrustmaster T300 Ferrari Alcantara Edition"}, {0x0002, 0x0002, "Thrustmaster T500RS"} //{0x0407, 0x0001, "Thrustmaster TMX"} @@ -158,6 +160,12 @@ static void thrustmaster_interrupts(struct hid_device *hdev) return; } + if (usbif->cur_altsetting->desc.bNumEndpoints < 2) { + kfree(send_buf); + hid_err(hdev, "Wrong number of endpoints?\n"); + return; + } + ep = &usbif->cur_altsetting->endpoint[1]; b_ep = ep->desc.bEndpointAddress; diff --git a/drivers/hid/hid-vivaldi.c b/drivers/hid/hid-vivaldi.c index efa6140915f4..42ceb2058a09 100644 --- a/drivers/hid/hid-vivaldi.c +++ b/drivers/hid/hid-vivaldi.c @@ -144,7 +144,7 @@ out: static int vivaldi_input_configured(struct hid_device *hdev, struct hid_input *hidinput) { - return sysfs_create_group(&hdev->dev.kobj, &input_attribute_group); + return devm_device_add_group(&hdev->dev, &input_attribute_group); } static const struct hid_device_id vivaldi_table[] = { diff --git a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c index b4dad66fa954..ec6c73f75ffe 100644 --- a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c +++ b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c @@ -27,7 +27,6 @@ struct i2c_hid_of_goodix { struct regulator *vdd; struct notifier_block nb; - struct mutex regulator_mutex; struct gpio_desc *reset_gpio; const struct goodix_i2c_hid_timing_data *timings; }; @@ -67,8 +66,6 @@ static int ihid_goodix_vdd_notify(struct notifier_block *nb, container_of(nb, struct i2c_hid_of_goodix, nb); int ret = NOTIFY_OK; - mutex_lock(&ihid_goodix->regulator_mutex); - switch (event) { case REGULATOR_EVENT_PRE_DISABLE: gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1); @@ -87,8 +84,6 @@ static int ihid_goodix_vdd_notify(struct notifier_block *nb, break; } - mutex_unlock(&ihid_goodix->regulator_mutex); - return ret; } @@ -102,8 +97,6 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client, if (!ihid_goodix) return -ENOMEM; - mutex_init(&ihid_goodix->regulator_mutex); - ihid_goodix->ops.power_up = goodix_i2c_hid_power_up; ihid_goodix->ops.power_down = goodix_i2c_hid_power_down; @@ -130,25 +123,28 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client, * long. Holding the controller in reset apparently draws extra * power. */ - mutex_lock(&ihid_goodix->regulator_mutex); ihid_goodix->nb.notifier_call = ihid_goodix_vdd_notify; ret = devm_regulator_register_notifier(ihid_goodix->vdd, &ihid_goodix->nb); - if (ret) { - mutex_unlock(&ihid_goodix->regulator_mutex); + if (ret) return dev_err_probe(&client->dev, ret, "regulator notifier request failed\n"); - } /* * If someone else is holding the regulator on (or the regulator is * an always-on one) we might never be told to deassert reset. Do it - * now. Here we'll assume that someone else might have _just - * barely_ turned the regulator on so we'll do the full - * "post_power_delay" just in case. + * now... and temporarily bump the regulator reference count just to + * make sure it is impossible for this to race with our own notifier! + * We also assume that someone else might have _just barely_ turned + * the regulator on so we'll do the full "post_power_delay" just in + * case. */ - if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd)) + if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd)) { + ret = regulator_enable(ihid_goodix->vdd); + if (ret) + return ret; goodix_i2c_hid_deassert_reset(ihid_goodix, true); - mutex_unlock(&ihid_goodix->regulator_mutex); + regulator_disable(ihid_goodix->vdd); + } return i2c_hid_core_probe(client, &ihid_goodix->ops, 0x0001, 0); } diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c index eb2833d2b5d0..832885198643 100644 --- a/drivers/hv/hv_utils_transport.c +++ b/drivers/hv/hv_utils_transport.c @@ -13,7 +13,7 @@ #include "hv_utils_transport.h" static DEFINE_SPINLOCK(hvt_list_lock); -static struct list_head hvt_list = LIST_HEAD_INIT(hvt_list); +static LIST_HEAD(hvt_list); static void hvt_reset(struct hvutil_transport *hvt) { diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 17bf55fe3169..12a2b37e87f3 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -2028,8 +2028,10 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel) kobj->kset = dev->channels_kset; ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL, "%u", relid); - if (ret) + if (ret) { + kobject_put(kobj); return ret; + } ret = sysfs_create_group(kobj, &vmbus_chan_group); @@ -2038,6 +2040,7 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel) * The calling functions' error handling paths will cleanup the * empty channel directory. */ + kobject_put(kobj); dev_err(device, "Unable to set up channel sysfs files\n"); return ret; } @@ -2079,7 +2082,6 @@ struct hv_device *vmbus_device_create(const guid_t *type, return child_device_obj; } -static u64 vmbus_dma_mask = DMA_BIT_MASK(64); /* * vmbus_device_register - Register the child device */ @@ -2120,8 +2122,9 @@ int vmbus_device_register(struct hv_device *child_device_obj) } hv_debug_add_dev_dir(child_device_obj); - child_device_obj->device.dma_mask = &vmbus_dma_mask; child_device_obj->device.dma_parms = &child_device_obj->dma_parms; + child_device_obj->device.dma_mask = &child_device_obj->dma_mask; + dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64)); return 0; err_kset_unregister: diff --git a/drivers/hwmon/adcxx.c b/drivers/hwmon/adcxx.c index e5bc5ce09f4e..de37bce24fa6 100644 --- a/drivers/hwmon/adcxx.c +++ b/drivers/hwmon/adcxx.c @@ -194,7 +194,7 @@ out_err: return status; } -static int adcxx_remove(struct spi_device *spi) +static void adcxx_remove(struct spi_device *spi) { struct adcxx *adc = spi_get_drvdata(spi); int i; @@ -205,8 +205,6 @@ static int adcxx_remove(struct spi_device *spi) device_remove_file(&spi->dev, &ad_input[i].dev_attr); mutex_unlock(&adc->lock); - - return 0; } static const struct spi_device_id adcxx_ids[] = { diff --git a/drivers/hwmon/adt7310.c b/drivers/hwmon/adt7310.c index c40cac16af68..832d9ec64934 100644 --- a/drivers/hwmon/adt7310.c +++ b/drivers/hwmon/adt7310.c @@ -88,10 +88,9 @@ static int adt7310_spi_probe(struct spi_device *spi) &adt7310_spi_ops); } -static int adt7310_spi_remove(struct spi_device *spi) +static void adt7310_spi_remove(struct spi_device *spi) { adt7x10_remove(&spi->dev, spi->irq); - return 0; } static const struct spi_device_id adt7310_id[] = { diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index 3501a3ead4ba..3ae961986fc3 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -214,12 +214,14 @@ static int hwmon_thermal_add_sensor(struct device *dev, int index) tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata, &hwmon_thermal_ops); - /* - * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV, - * so ignore that error but forward any other error. - */ - if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV)) - return PTR_ERR(tzd); + if (IS_ERR(tzd)) { + if (PTR_ERR(tzd) != -ENODEV) + return PTR_ERR(tzd); + dev_info(dev, "temp%d_input not attached to any thermal zone\n", + index + 1); + devm_kfree(dev, tdata); + return 0; + } err = devm_add_action(dev, hwmon_thermal_remove_sensor, &tdata->node); if (err) diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c index 5fcfd57df61e..4c5487aeb3cf 100644 --- a/drivers/hwmon/max1111.c +++ b/drivers/hwmon/max1111.c @@ -254,7 +254,7 @@ err_remove: return err; } -static int max1111_remove(struct spi_device *spi) +static void max1111_remove(struct spi_device *spi) { struct max1111_data *data = spi_get_drvdata(spi); @@ -265,7 +265,6 @@ static int max1111_remove(struct spi_device *spi) sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group); sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group); mutex_destroy(&data->drvdata_lock); - return 0; } static const struct spi_device_id max1111_ids[] = { diff --git a/drivers/hwmon/max31722.c b/drivers/hwmon/max31722.c index 4cf4fe6809a3..93e048ee4955 100644 --- a/drivers/hwmon/max31722.c +++ b/drivers/hwmon/max31722.c @@ -100,7 +100,7 @@ static int max31722_probe(struct spi_device *spi) return 0; } -static int max31722_remove(struct spi_device *spi) +static void max31722_remove(struct spi_device *spi) { struct max31722_data *data = spi_get_drvdata(spi); int ret; @@ -111,8 +111,6 @@ static int max31722_remove(struct spi_device *spi) if (ret) /* There is nothing we can do about this ... */ dev_warn(&spi->dev, "Failed to put device in stand-by mode\n"); - - return 0; } static int __maybe_unused max31722_suspend(struct device *dev) diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index 414204f5704c..9c9e9f4ccb9e 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c @@ -59,7 +59,7 @@ static const struct platform_device_id ntc_thermistor_id[] = { [NTC_NCP15XH103] = { "ncp15xh103", TYPE_NCPXXXH103 }, [NTC_NCP18WB473] = { "ncp18wb473", TYPE_NCPXXWB473 }, [NTC_NCP21WB473] = { "ncp21wb473", TYPE_NCPXXWB473 }, - [NTC_SSG1404001221] = { "ssg1404-001221", TYPE_NCPXXWB473 }, + [NTC_SSG1404001221] = { "ssg1404_001221", TYPE_NCPXXWB473 }, [NTC_LAST] = { }, }; diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index 776ee2237be2..ac2fbee1ba9c 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -911,6 +911,11 @@ static int pmbus_get_boolean(struct i2c_client *client, struct pmbus_boolean *b, pmbus_update_sensor_data(client, s2); regval = status & mask; + if (regval) { + ret = pmbus_write_byte_data(client, page, reg, regval); + if (ret) + goto unlock; + } if (s1 && s2) { s64 v1, v2; diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 42da31c1ab70..8a6c6ee28556 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -488,7 +488,7 @@ config I2C_BRCMSTB config I2C_CADENCE tristate "Cadence I2C Controller" - depends on ARCH_ZYNQ || ARM64 || XTENSA + depends on ARCH_ZYNQ || ARM64 || XTENSA || COMPILE_TEST help Say yes here to select Cadence I2C Host Controller. This controller is e.g. used by Xilinx Zynq. @@ -680,7 +680,7 @@ config I2C_IMG config I2C_IMX tristate "IMX I2C interface" - depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE + depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE || COMPILE_TEST select I2C_SLAVE help Say Y here if you want to use the IIC bus controller on @@ -935,7 +935,7 @@ config I2C_QCOM_GENI config I2C_QUP tristate "Qualcomm QUP based I2C controller" - depends on ARCH_QCOM + depends on ARCH_QCOM || COMPILE_TEST help If you say yes to this option, support will be included for the built-in I2C interface on the Qualcomm SoCs. diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index dfc534065595..5149454eef4a 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -23,6 +23,11 @@ #define BCM2835_I2C_FIFO 0x10 #define BCM2835_I2C_DIV 0x14 #define BCM2835_I2C_DEL 0x18 +/* + * 16-bit field for the number of SCL cycles to wait after rising SCL + * before deciding the slave is not responding. 0 disables the + * timeout detection. + */ #define BCM2835_I2C_CLKT 0x1c #define BCM2835_I2C_C_READ BIT(0) @@ -474,6 +479,12 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) adap->dev.of_node = pdev->dev.of_node; adap->quirks = of_device_get_match_data(&pdev->dev); + /* + * Disable the hardware clock stretching timeout. SMBUS + * specifies a limit for how long the device can stretch the + * clock, but core I2C doesn't. + */ + bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_CLKT, 0); bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 0); ret = i2c_add_adapter(adap); diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c index 490ee3962645..b00f35c0b066 100644 --- a/drivers/i2c/busses/i2c-brcmstb.c +++ b/drivers/i2c/busses/i2c-brcmstb.c @@ -673,7 +673,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev) /* set the data in/out register size for compatible SoCs */ if (of_device_is_compatible(dev->device->of_node, - "brcmstb,brcmper-i2c")) + "brcm,brcmper-i2c")) dev->data_regsz = sizeof(u8); else dev->data_regsz = sizeof(u32); diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c index c1de8eb66169..cf54f1cb4c57 100644 --- a/drivers/i2c/busses/i2c-qcom-cci.c +++ b/drivers/i2c/busses/i2c-qcom-cci.c @@ -558,7 +558,7 @@ static int cci_probe(struct platform_device *pdev) cci->master[idx].adap.quirks = &cci->data->quirks; cci->master[idx].adap.algo = &cci_algo; cci->master[idx].adap.dev.parent = dev; - cci->master[idx].adap.dev.of_node = child; + cci->master[idx].adap.dev.of_node = of_node_get(child); cci->master[idx].master = idx; cci->master[idx].cci = cci; @@ -643,8 +643,10 @@ static int cci_probe(struct platform_device *pdev) continue; ret = i2c_add_adapter(&cci->master[i].adap); - if (ret < 0) + if (ret < 0) { + of_node_put(cci->master[i].adap.dev.of_node); goto error_i2c; + } } pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); @@ -655,9 +657,11 @@ static int cci_probe(struct platform_device *pdev) return 0; error_i2c: - for (; i >= 0; i--) { - if (cci->master[i].cci) + for (--i ; i >= 0; i--) { + if (cci->master[i].cci) { i2c_del_adapter(&cci->master[i].adap); + of_node_put(cci->master[i].adap.dev.of_node); + } } error: disable_irq(cci->irq); @@ -673,8 +677,10 @@ static int cci_remove(struct platform_device *pdev) int i; for (i = 0; i < cci->data->num_masters; i++) { - if (cci->master[i].cci) + if (cci->master[i].cci) { i2c_del_adapter(&cci->master[i].adap); + of_node_put(cci->master[i].adap.dev.of_node); + } cci_halt(cci, i); } diff --git a/drivers/iio/accel/bma400_spi.c b/drivers/iio/accel/bma400_spi.c index 9f622e37477b..9040a717b247 100644 --- a/drivers/iio/accel/bma400_spi.c +++ b/drivers/iio/accel/bma400_spi.c @@ -87,11 +87,9 @@ static int bma400_spi_probe(struct spi_device *spi) return bma400_probe(&spi->dev, regmap, id->name); } -static int bma400_spi_remove(struct spi_device *spi) +static void bma400_spi_remove(struct spi_device *spi) { bma400_remove(&spi->dev); - - return 0; } static const struct spi_device_id bma400_spi_ids[] = { diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index e6081dd0a880..d11f668016a6 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -1783,11 +1783,14 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(dev, "Unable to register iio device\n"); - goto err_trigger_unregister; + goto err_pm_cleanup; } return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(dev); + pm_runtime_disable(dev); err_trigger_unregister: bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1); err_buffer_cleanup: diff --git a/drivers/iio/accel/bmc150-accel-spi.c b/drivers/iio/accel/bmc150-accel-spi.c index 11559567cb39..80007cc2d044 100644 --- a/drivers/iio/accel/bmc150-accel-spi.c +++ b/drivers/iio/accel/bmc150-accel-spi.c @@ -35,11 +35,9 @@ static int bmc150_accel_probe(struct spi_device *spi) true); } -static int bmc150_accel_remove(struct spi_device *spi) +static void bmc150_accel_remove(struct spi_device *spi) { bmc150_accel_core_remove(&spi->dev); - - return 0; } static const struct acpi_device_id bmc150_accel_acpi_match[] = { diff --git a/drivers/iio/accel/bmi088-accel-spi.c b/drivers/iio/accel/bmi088-accel-spi.c index 758ad2f12896..06d99d9949f3 100644 --- a/drivers/iio/accel/bmi088-accel-spi.c +++ b/drivers/iio/accel/bmi088-accel-spi.c @@ -56,11 +56,9 @@ static int bmi088_accel_probe(struct spi_device *spi) true); } -static int bmi088_accel_remove(struct spi_device *spi) +static void bmi088_accel_remove(struct spi_device *spi) { bmi088_accel_core_remove(&spi->dev); - - return 0; } static const struct spi_device_id bmi088_accel_id[] = { diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c index 32989d91b982..f7fd9e046588 100644 --- a/drivers/iio/accel/fxls8962af-core.c +++ b/drivers/iio/accel/fxls8962af-core.c @@ -173,12 +173,20 @@ struct fxls8962af_data { u16 upper_thres; }; -const struct regmap_config fxls8962af_regmap_conf = { +const struct regmap_config fxls8962af_i2c_regmap_conf = { .reg_bits = 8, .val_bits = 8, .max_register = FXLS8962AF_MAX_REG, }; -EXPORT_SYMBOL_GPL(fxls8962af_regmap_conf); +EXPORT_SYMBOL_GPL(fxls8962af_i2c_regmap_conf); + +const struct regmap_config fxls8962af_spi_regmap_conf = { + .reg_bits = 8, + .pad_bits = 8, + .val_bits = 8, + .max_register = FXLS8962AF_MAX_REG, +}; +EXPORT_SYMBOL_GPL(fxls8962af_spi_regmap_conf); enum { fxls8962af_idx_x, diff --git a/drivers/iio/accel/fxls8962af-i2c.c b/drivers/iio/accel/fxls8962af-i2c.c index cfb004b20455..6bde9891effb 100644 --- a/drivers/iio/accel/fxls8962af-i2c.c +++ b/drivers/iio/accel/fxls8962af-i2c.c @@ -18,7 +18,7 @@ static int fxls8962af_probe(struct i2c_client *client) { struct regmap *regmap; - regmap = devm_regmap_init_i2c(client, &fxls8962af_regmap_conf); + regmap = devm_regmap_init_i2c(client, &fxls8962af_i2c_regmap_conf); if (IS_ERR(regmap)) { dev_err(&client->dev, "Failed to initialize i2c regmap\n"); return PTR_ERR(regmap); diff --git a/drivers/iio/accel/fxls8962af-spi.c b/drivers/iio/accel/fxls8962af-spi.c index 57108d3d480b..6f4dff3238d3 100644 --- a/drivers/iio/accel/fxls8962af-spi.c +++ b/drivers/iio/accel/fxls8962af-spi.c @@ -18,7 +18,7 @@ static int fxls8962af_probe(struct spi_device *spi) { struct regmap *regmap; - regmap = devm_regmap_init_spi(spi, &fxls8962af_regmap_conf); + regmap = devm_regmap_init_spi(spi, &fxls8962af_spi_regmap_conf); if (IS_ERR(regmap)) { dev_err(&spi->dev, "Failed to initialize spi regmap\n"); return PTR_ERR(regmap); diff --git a/drivers/iio/accel/fxls8962af.h b/drivers/iio/accel/fxls8962af.h index b67572c3ef06..9cbe98c3ba9a 100644 --- a/drivers/iio/accel/fxls8962af.h +++ b/drivers/iio/accel/fxls8962af.h @@ -17,6 +17,7 @@ int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq); int fxls8962af_core_remove(struct device *dev); extern const struct dev_pm_ops fxls8962af_pm_ops; -extern const struct regmap_config fxls8962af_regmap_conf; +extern const struct regmap_config fxls8962af_i2c_regmap_conf; +extern const struct regmap_config fxls8962af_spi_regmap_conf; #endif /* _FXLS8962AF_H_ */ diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c index 0fe570316848..ac74cdcd2bc8 100644 --- a/drivers/iio/accel/kxcjk-1013.c +++ b/drivers/iio/accel/kxcjk-1013.c @@ -1590,11 +1590,14 @@ static int kxcjk1013_probe(struct i2c_client *client, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(&client->dev, "unable to register iio device\n"); - goto err_buffer_cleanup; + goto err_pm_cleanup; } return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); err_buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); err_trigger_unregister: diff --git a/drivers/iio/accel/kxsd9-spi.c b/drivers/iio/accel/kxsd9-spi.c index 441e6b764281..57c451cfb9e5 100644 --- a/drivers/iio/accel/kxsd9-spi.c +++ b/drivers/iio/accel/kxsd9-spi.c @@ -32,11 +32,9 @@ static int kxsd9_spi_probe(struct spi_device *spi) spi_get_device_id(spi)->name); } -static int kxsd9_spi_remove(struct spi_device *spi) +static void kxsd9_spi_remove(struct spi_device *spi) { kxsd9_common_remove(&spi->dev); - - return 0; } static const struct spi_device_id kxsd9_spi_id[] = { diff --git a/drivers/iio/accel/mma7455_spi.c b/drivers/iio/accel/mma7455_spi.c index ecf690692dcc..b746031551a3 100644 --- a/drivers/iio/accel/mma7455_spi.c +++ b/drivers/iio/accel/mma7455_spi.c @@ -22,11 +22,9 @@ static int mma7455_spi_probe(struct spi_device *spi) return mma7455_core_probe(&spi->dev, regmap, id->name); } -static int mma7455_spi_remove(struct spi_device *spi) +static void mma7455_spi_remove(struct spi_device *spi) { mma7455_core_remove(&spi->dev); - - return 0; } static const struct spi_device_id mma7455_spi_ids[] = { diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c index 4c359fb05480..c53a3398b14c 100644 --- a/drivers/iio/accel/mma9551.c +++ b/drivers/iio/accel/mma9551.c @@ -495,11 +495,14 @@ static int mma9551_probe(struct i2c_client *client, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(&client->dev, "unable to register iio device\n"); - goto out_poweroff; + goto err_pm_cleanup; } return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); out_poweroff: mma9551_set_device_state(client, false); diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c index 0570ab1cc064..5ff6bc70708b 100644 --- a/drivers/iio/accel/mma9553.c +++ b/drivers/iio/accel/mma9553.c @@ -1134,12 +1134,15 @@ static int mma9553_probe(struct i2c_client *client, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(&client->dev, "unable to register iio device\n"); - goto out_poweroff; + goto err_pm_cleanup; } dev_dbg(&indio_dev->dev, "Registered device %s\n", name); return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); out_poweroff: mma9551_set_device_state(client, false); return ret; diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c index 43ecacbdc95a..83c81072511e 100644 --- a/drivers/iio/accel/sca3000.c +++ b/drivers/iio/accel/sca3000.c @@ -1524,7 +1524,7 @@ error_ret: return ret; } -static int sca3000_remove(struct spi_device *spi) +static void sca3000_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct sca3000_state *st = iio_priv(indio_dev); @@ -1535,8 +1535,6 @@ static int sca3000_remove(struct spi_device *spi) sca3000_stop_all_interrupts(st); if (spi->irq) free_irq(spi->irq, indio_dev); - - return 0; } static const struct spi_device_id sca3000_id[] = { diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index bc2cfa5f9592..b400bbe291aa 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -76,7 +76,7 @@ #define AD7124_CONFIG_REF_SEL(x) FIELD_PREP(AD7124_CONFIG_REF_SEL_MSK, x) #define AD7124_CONFIG_PGA_MSK GENMASK(2, 0) #define AD7124_CONFIG_PGA(x) FIELD_PREP(AD7124_CONFIG_PGA_MSK, x) -#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(7, 6) +#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(6, 5) #define AD7124_CONFIG_IN_BUFF(x) FIELD_PREP(AD7124_CONFIG_IN_BUFF_MSK, x) /* AD7124_FILTER_X */ diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c index 1d345d66742d..c17d9b5fbaf6 100644 --- a/drivers/iio/adc/ad7266.c +++ b/drivers/iio/adc/ad7266.c @@ -479,7 +479,7 @@ error_disable_reg: return ret; } -static int ad7266_remove(struct spi_device *spi) +static void ad7266_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad7266_state *st = iio_priv(indio_dev); @@ -488,8 +488,6 @@ static int ad7266_remove(struct spi_device *spi) iio_triggered_buffer_cleanup(indio_dev); if (!IS_ERR(st->reg)) regulator_disable(st->reg); - - return 0; } static const struct spi_device_id ad7266_id[] = { diff --git a/drivers/iio/adc/ltc2496.c b/drivers/iio/adc/ltc2496.c index dd956a7c216e..5a55f79f2574 100644 --- a/drivers/iio/adc/ltc2496.c +++ b/drivers/iio/adc/ltc2496.c @@ -78,13 +78,11 @@ static int ltc2496_probe(struct spi_device *spi) return ltc2497core_probe(dev, indio_dev); } -static int ltc2496_remove(struct spi_device *spi) +static void ltc2496_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); ltc2497core_remove(indio_dev); - - return 0; } static const struct of_device_id ltc2496_of_match[] = { diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c index 8d1cff28cae0..b4c69acb33e3 100644 --- a/drivers/iio/adc/mcp320x.c +++ b/drivers/iio/adc/mcp320x.c @@ -459,15 +459,13 @@ reg_disable: return ret; } -static int mcp320x_remove(struct spi_device *spi) +static void mcp320x_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct mcp320x *adc = iio_priv(indio_dev); iio_device_unregister(indio_dev); regulator_disable(adc->reg); - - return 0; } static const struct of_device_id mcp320x_dt_ids[] = { diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c index 13535f148c4c..1cb4590fe412 100644 --- a/drivers/iio/adc/mcp3911.c +++ b/drivers/iio/adc/mcp3911.c @@ -321,7 +321,7 @@ reg_disable: return ret; } -static int mcp3911_remove(struct spi_device *spi) +static void mcp3911_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct mcp3911 *adc = iio_priv(indio_dev); @@ -331,8 +331,6 @@ static int mcp3911_remove(struct spi_device *spi) clk_disable_unprepare(adc->clki); if (adc->vref) regulator_disable(adc->vref); - - return 0; } static const struct of_device_id mcp3911_dt_ids[] = { diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c index 42ea8bc7e780..adc5ceaef8c9 100644 --- a/drivers/iio/adc/men_z188_adc.c +++ b/drivers/iio/adc/men_z188_adc.c @@ -103,6 +103,7 @@ static int men_z188_probe(struct mcb_device *dev, struct z188_adc *adc; struct iio_dev *indio_dev; struct resource *mem; + int ret; indio_dev = devm_iio_device_alloc(&dev->dev, sizeof(struct z188_adc)); if (!indio_dev) @@ -128,8 +129,14 @@ static int men_z188_probe(struct mcb_device *dev, adc->mem = mem; mcb_set_drvdata(dev, indio_dev); - return iio_device_register(indio_dev); + ret = iio_device_register(indio_dev); + if (ret) + goto err_unmap; + + return 0; +err_unmap: + iounmap(adc->base); err: mcb_release_mem(mem); return -ENXIO; diff --git a/drivers/iio/adc/ti-adc12138.c b/drivers/iio/adc/ti-adc12138.c index 6eb62b564dae..59d75d09604f 100644 --- a/drivers/iio/adc/ti-adc12138.c +++ b/drivers/iio/adc/ti-adc12138.c @@ -503,7 +503,7 @@ err_clk_disable: return ret; } -static int adc12138_remove(struct spi_device *spi) +static void adc12138_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct adc12138 *adc = iio_priv(indio_dev); @@ -514,8 +514,6 @@ static int adc12138_remove(struct spi_device *spi) regulator_disable(adc->vref_n); regulator_disable(adc->vref_p); clk_disable_unprepare(adc->cclk); - - return 0; } static const struct of_device_id adc12138_dt_ids[] = { diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c index a7efa3eada2c..e3658b969c5b 100644 --- a/drivers/iio/adc/ti-ads7950.c +++ b/drivers/iio/adc/ti-ads7950.c @@ -662,7 +662,7 @@ error_destroy_mutex: return ret; } -static int ti_ads7950_remove(struct spi_device *spi) +static void ti_ads7950_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ti_ads7950_state *st = iio_priv(indio_dev); @@ -672,8 +672,6 @@ static int ti_ads7950_remove(struct spi_device *spi) iio_triggered_buffer_cleanup(indio_dev); regulator_disable(st->reg); mutex_destroy(&st->slock); - - return 0; } static const struct spi_device_id ti_ads7950_id[] = { diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c index 2e24717d7f55..22c2583eedd0 100644 --- a/drivers/iio/adc/ti-ads8688.c +++ b/drivers/iio/adc/ti-ads8688.c @@ -479,7 +479,7 @@ err_regulator_disable: return ret; } -static int ads8688_remove(struct spi_device *spi) +static void ads8688_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ads8688_state *st = iio_priv(indio_dev); @@ -489,8 +489,6 @@ static int ads8688_remove(struct spi_device *spi) if (!IS_ERR(st->reg)) regulator_disable(st->reg); - - return 0; } static const struct spi_device_id ads8688_id[] = { diff --git a/drivers/iio/adc/ti-tlc4541.c b/drivers/iio/adc/ti-tlc4541.c index 403b787f9f7e..2406eda9dfc6 100644 --- a/drivers/iio/adc/ti-tlc4541.c +++ b/drivers/iio/adc/ti-tlc4541.c @@ -224,7 +224,7 @@ error_disable_reg: return ret; } -static int tlc4541_remove(struct spi_device *spi) +static void tlc4541_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct tlc4541_state *st = iio_priv(indio_dev); @@ -232,8 +232,6 @@ static int tlc4541_remove(struct spi_device *spi) iio_device_unregister(indio_dev); iio_triggered_buffer_cleanup(indio_dev); regulator_disable(st->reg); - - return 0; } static const struct of_device_id tlc4541_dt_ids[] = { diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c index d84ae6b008c1..e8fc4d01f30b 100644 --- a/drivers/iio/adc/ti-tsc2046.c +++ b/drivers/iio/adc/ti-tsc2046.c @@ -388,7 +388,7 @@ static int tsc2046_adc_update_scan_mode(struct iio_dev *indio_dev, mutex_lock(&priv->slock); size = 0; - for_each_set_bit(ch_idx, active_scan_mask, indio_dev->num_channels) { + for_each_set_bit(ch_idx, active_scan_mask, ARRAY_SIZE(priv->l)) { size += tsc2046_adc_group_set_layout(priv, group, ch_idx); tsc2046_adc_group_set_cmd(priv, group, ch_idx); group++; @@ -548,7 +548,7 @@ static int tsc2046_adc_setup_spi_msg(struct tsc2046_adc_priv *priv) * enabled. */ size = 0; - for (ch_idx = 0; ch_idx < priv->dcfg->num_channels; ch_idx++) + for (ch_idx = 0; ch_idx < ARRAY_SIZE(priv->l); ch_idx++) size += tsc2046_adc_group_set_layout(priv, ch_idx, ch_idx); priv->tx = devm_kzalloc(&priv->spi->dev, size, GFP_KERNEL); diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c index 5271073bb74e..acd230a6af35 100644 --- a/drivers/iio/addac/ad74413r.c +++ b/drivers/iio/addac/ad74413r.c @@ -134,7 +134,6 @@ struct ad74413r_state { #define AD74413R_CH_EN_MASK(x) BIT(x) #define AD74413R_REG_DIN_COMP_OUT 0x25 -#define AD74413R_DIN_COMP_OUT_SHIFT_X(x) x #define AD74413R_REG_ADC_RESULT_X(x) (0x26 + (x)) #define AD74413R_ADC_RESULT_MAX GENMASK(15, 0) @@ -288,7 +287,7 @@ static void ad74413r_gpio_set_multiple(struct gpio_chip *chip, unsigned int offset = 0; int ret; - for_each_set_bit_from(offset, mask, AD74413R_CHANNEL_MAX) { + for_each_set_bit_from(offset, mask, chip->ngpio) { unsigned int real_offset = st->gpo_gpio_offsets[offset]; ret = ad74413r_set_gpo_config(st, real_offset, @@ -316,7 +315,7 @@ static int ad74413r_gpio_get(struct gpio_chip *chip, unsigned int offset) if (ret) return ret; - status &= AD74413R_DIN_COMP_OUT_SHIFT_X(real_offset); + status &= BIT(real_offset); return status ? 1 : 0; } @@ -334,11 +333,10 @@ static int ad74413r_gpio_get_multiple(struct gpio_chip *chip, if (ret) return ret; - for_each_set_bit_from(offset, mask, AD74413R_CHANNEL_MAX) { + for_each_set_bit_from(offset, mask, chip->ngpio) { unsigned int real_offset = st->comp_gpio_offsets[offset]; - if (val & BIT(real_offset)) - *bits |= offset; + __assign_bit(offset, bits, val & BIT(real_offset)); } return ret; @@ -840,7 +838,7 @@ static int ad74413r_update_scan_mode(struct iio_dev *indio_dev, { struct ad74413r_state *st = iio_priv(indio_dev); struct spi_transfer *xfer = st->adc_samples_xfer; - u8 *rx_buf = &st->adc_samples_buf.rx_buf[-1 * AD74413R_FRAME_SIZE]; + u8 *rx_buf = st->adc_samples_buf.rx_buf; u8 *tx_buf = st->adc_samples_tx_buf; unsigned int channel; int ret = -EINVAL; @@ -894,9 +892,10 @@ static int ad74413r_update_scan_mode(struct iio_dev *indio_dev, spi_message_add_tail(xfer, &st->adc_samples_msg); - xfer++; tx_buf += AD74413R_FRAME_SIZE; - rx_buf += AD74413R_FRAME_SIZE; + if (xfer != st->adc_samples_xfer) + rx_buf += AD74413R_FRAME_SIZE; + xfer++; } xfer->rx_buf = rx_buf; diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c index cfcf18a0bce8..1134ae12e531 100644 --- a/drivers/iio/amplifiers/ad8366.c +++ b/drivers/iio/amplifiers/ad8366.c @@ -298,7 +298,7 @@ error_disable_reg: return ret; } -static int ad8366_remove(struct spi_device *spi) +static void ad8366_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad8366_state *st = iio_priv(indio_dev); @@ -308,8 +308,6 @@ static int ad8366_remove(struct spi_device *spi) if (!IS_ERR(reg)) regulator_disable(reg); - - return 0; } static const struct spi_device_id ad8366_id[] = { diff --git a/drivers/iio/common/ssp_sensors/ssp_dev.c b/drivers/iio/common/ssp_sensors/ssp_dev.c index 1aee87100038..eafaf4529df5 100644 --- a/drivers/iio/common/ssp_sensors/ssp_dev.c +++ b/drivers/iio/common/ssp_sensors/ssp_dev.c @@ -586,7 +586,7 @@ err_setup_irq: return ret; } -static int ssp_remove(struct spi_device *spi) +static void ssp_remove(struct spi_device *spi) { struct ssp_data *data = spi_get_drvdata(spi); @@ -608,8 +608,6 @@ static int ssp_remove(struct spi_device *spi) mutex_destroy(&data->pending_lock); mfd_remove_devices(&spi->dev); - - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c index 2d3b14c407d8..ecbc6a51d60f 100644 --- a/drivers/iio/dac/ad5360.c +++ b/drivers/iio/dac/ad5360.c @@ -521,7 +521,7 @@ error_free_channels: return ret; } -static int ad5360_remove(struct spi_device *spi) +static void ad5360_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad5360_state *st = iio_priv(indio_dev); @@ -531,8 +531,6 @@ static int ad5360_remove(struct spi_device *spi) kfree(indio_dev->channels); regulator_bulk_disable(st->chip_info->num_vrefs, st->vref_reg); - - return 0; } static const struct spi_device_id ad5360_ids[] = { diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c index e38860a6a9f3..82e1d9bd773e 100644 --- a/drivers/iio/dac/ad5380.c +++ b/drivers/iio/dac/ad5380.c @@ -488,11 +488,9 @@ static int ad5380_spi_probe(struct spi_device *spi) return ad5380_probe(&spi->dev, regmap, id->driver_data, id->name); } -static int ad5380_spi_remove(struct spi_device *spi) +static void ad5380_spi_remove(struct spi_device *spi) { ad5380_remove(&spi->dev); - - return 0; } static const struct spi_device_id ad5380_spi_ids[] = { diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c index 1c9b54c012a7..14cfabacbea5 100644 --- a/drivers/iio/dac/ad5446.c +++ b/drivers/iio/dac/ad5446.c @@ -491,11 +491,9 @@ static int ad5446_spi_probe(struct spi_device *spi) &ad5446_spi_chip_info[id->driver_data]); } -static int ad5446_spi_remove(struct spi_device *spi) +static void ad5446_spi_remove(struct spi_device *spi) { ad5446_remove(&spi->dev); - - return 0; } static struct spi_driver ad5446_spi_driver = { diff --git a/drivers/iio/dac/ad5449.c b/drivers/iio/dac/ad5449.c index f5e93c6acc9d..bad9bdaafa94 100644 --- a/drivers/iio/dac/ad5449.c +++ b/drivers/iio/dac/ad5449.c @@ -330,7 +330,7 @@ error_disable_reg: return ret; } -static int ad5449_spi_remove(struct spi_device *spi) +static void ad5449_spi_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad5449 *st = iio_priv(indio_dev); @@ -338,8 +338,6 @@ static int ad5449_spi_remove(struct spi_device *spi) iio_device_unregister(indio_dev); regulator_bulk_disable(st->chip_info->num_channels, st->vref_reg); - - return 0; } static const struct spi_device_id ad5449_spi_ids[] = { diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c index b631261efa97..8507573aa13e 100644 --- a/drivers/iio/dac/ad5504.c +++ b/drivers/iio/dac/ad5504.c @@ -336,7 +336,7 @@ error_disable_reg: return ret; } -static int ad5504_remove(struct spi_device *spi) +static void ad5504_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad5504_state *st = iio_priv(indio_dev); @@ -345,8 +345,6 @@ static int ad5504_remove(struct spi_device *spi) if (!IS_ERR(st->reg)) regulator_disable(st->reg); - - return 0; } static const struct spi_device_id ad5504_id[] = { diff --git a/drivers/iio/dac/ad5592r.c b/drivers/iio/dac/ad5592r.c index 6bfd7951e18c..0f7abfa75bec 100644 --- a/drivers/iio/dac/ad5592r.c +++ b/drivers/iio/dac/ad5592r.c @@ -130,11 +130,9 @@ static int ad5592r_spi_probe(struct spi_device *spi) return ad5592r_probe(&spi->dev, id->name, &ad5592r_rw_ops); } -static int ad5592r_spi_remove(struct spi_device *spi) +static void ad5592r_spi_remove(struct spi_device *spi) { ad5592r_remove(&spi->dev); - - return 0; } static const struct spi_device_id ad5592r_spi_ids[] = { diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c index 3c98941b9f99..371e812850eb 100644 --- a/drivers/iio/dac/ad5624r_spi.c +++ b/drivers/iio/dac/ad5624r_spi.c @@ -293,7 +293,7 @@ error_disable_reg: return ret; } -static int ad5624r_remove(struct spi_device *spi) +static void ad5624r_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad5624r_state *st = iio_priv(indio_dev); @@ -301,8 +301,6 @@ static int ad5624r_remove(struct spi_device *spi) iio_device_unregister(indio_dev); if (!IS_ERR(st->reg)) regulator_disable(st->reg); - - return 0; } static const struct spi_device_id ad5624r_id[] = { diff --git a/drivers/iio/dac/ad5686-spi.c b/drivers/iio/dac/ad5686-spi.c index 2628810fdbb1..d26fb29b6b04 100644 --- a/drivers/iio/dac/ad5686-spi.c +++ b/drivers/iio/dac/ad5686-spi.c @@ -95,11 +95,9 @@ static int ad5686_spi_probe(struct spi_device *spi) ad5686_spi_write, ad5686_spi_read); } -static int ad5686_spi_remove(struct spi_device *spi) +static void ad5686_spi_remove(struct spi_device *spi) { ad5686_remove(&spi->dev); - - return 0; } static const struct spi_device_id ad5686_spi_id[] = { diff --git a/drivers/iio/dac/ad5761.c b/drivers/iio/dac/ad5761.c index e37e095e94fc..4cb8471db81e 100644 --- a/drivers/iio/dac/ad5761.c +++ b/drivers/iio/dac/ad5761.c @@ -394,7 +394,7 @@ disable_regulator_err: return ret; } -static int ad5761_remove(struct spi_device *spi) +static void ad5761_remove(struct spi_device *spi) { struct iio_dev *iio_dev = spi_get_drvdata(spi); struct ad5761_state *st = iio_priv(iio_dev); @@ -403,8 +403,6 @@ static int ad5761_remove(struct spi_device *spi) if (!IS_ERR_OR_NULL(st->vref_reg)) regulator_disable(st->vref_reg); - - return 0; } static const struct spi_device_id ad5761_id[] = { diff --git a/drivers/iio/dac/ad5764.c b/drivers/iio/dac/ad5764.c index ae089b9145cb..d235a8047ba0 100644 --- a/drivers/iio/dac/ad5764.c +++ b/drivers/iio/dac/ad5764.c @@ -332,7 +332,7 @@ error_disable_reg: return ret; } -static int ad5764_remove(struct spi_device *spi) +static void ad5764_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad5764_state *st = iio_priv(indio_dev); @@ -341,8 +341,6 @@ static int ad5764_remove(struct spi_device *spi) if (st->chip_info->int_vref == 0) regulator_bulk_disable(ARRAY_SIZE(st->vref_reg), st->vref_reg); - - return 0; } static const struct spi_device_id ad5764_ids[] = { diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c index 7b4579d73d18..2b14914b4050 100644 --- a/drivers/iio/dac/ad5791.c +++ b/drivers/iio/dac/ad5791.c @@ -428,7 +428,7 @@ error_disable_reg_pos: return ret; } -static int ad5791_remove(struct spi_device *spi) +static void ad5791_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad5791_state *st = iio_priv(indio_dev); @@ -439,8 +439,6 @@ static int ad5791_remove(struct spi_device *spi) if (!IS_ERR(st->reg_vss)) regulator_disable(st->reg_vss); - - return 0; } static const struct spi_device_id ad5791_id[] = { diff --git a/drivers/iio/dac/ad8801.c b/drivers/iio/dac/ad8801.c index 5ecfdad54dec..6be35c92d435 100644 --- a/drivers/iio/dac/ad8801.c +++ b/drivers/iio/dac/ad8801.c @@ -193,7 +193,7 @@ error_disable_vrefh_reg: return ret; } -static int ad8801_remove(struct spi_device *spi) +static void ad8801_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad8801_state *state = iio_priv(indio_dev); @@ -202,8 +202,6 @@ static int ad8801_remove(struct spi_device *spi) if (state->vrefl_reg) regulator_disable(state->vrefl_reg); regulator_disable(state->vrefh_reg); - - return 0; } static const struct spi_device_id ad8801_ids[] = { diff --git a/drivers/iio/dac/ltc1660.c b/drivers/iio/dac/ltc1660.c index f6ec9bf5815e..c76233c9bb72 100644 --- a/drivers/iio/dac/ltc1660.c +++ b/drivers/iio/dac/ltc1660.c @@ -206,15 +206,13 @@ error_disable_reg: return ret; } -static int ltc1660_remove(struct spi_device *spi) +static void ltc1660_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ltc1660_priv *priv = iio_priv(indio_dev); iio_device_unregister(indio_dev); regulator_disable(priv->vref_reg); - - return 0; } static const struct of_device_id ltc1660_dt_ids[] = { diff --git a/drivers/iio/dac/ltc2632.c b/drivers/iio/dac/ltc2632.c index 53e4b887d372..aed46c80757e 100644 --- a/drivers/iio/dac/ltc2632.c +++ b/drivers/iio/dac/ltc2632.c @@ -372,7 +372,7 @@ static int ltc2632_probe(struct spi_device *spi) return iio_device_register(indio_dev); } -static int ltc2632_remove(struct spi_device *spi) +static void ltc2632_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ltc2632_state *st = iio_priv(indio_dev); @@ -381,8 +381,6 @@ static int ltc2632_remove(struct spi_device *spi) if (st->vref_reg) regulator_disable(st->vref_reg); - - return 0; } static const struct spi_device_id ltc2632_id[] = { diff --git a/drivers/iio/dac/mcp4922.c b/drivers/iio/dac/mcp4922.c index 0ae414ee1716..cb9e60e71b91 100644 --- a/drivers/iio/dac/mcp4922.c +++ b/drivers/iio/dac/mcp4922.c @@ -172,7 +172,7 @@ error_disable_reg: return ret; } -static int mcp4922_remove(struct spi_device *spi) +static void mcp4922_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct mcp4922_state *state; @@ -180,8 +180,6 @@ static int mcp4922_remove(struct spi_device *spi) iio_device_unregister(indio_dev); state = iio_priv(indio_dev); regulator_disable(state->vref_reg); - - return 0; } static const struct spi_device_id mcp4922_id[] = { diff --git a/drivers/iio/dac/ti-dac082s085.c b/drivers/iio/dac/ti-dac082s085.c index 6beda2193683..4e1156e6deb2 100644 --- a/drivers/iio/dac/ti-dac082s085.c +++ b/drivers/iio/dac/ti-dac082s085.c @@ -313,7 +313,7 @@ err: return ret; } -static int ti_dac_remove(struct spi_device *spi) +static void ti_dac_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ti_dac_chip *ti_dac = iio_priv(indio_dev); @@ -321,8 +321,6 @@ static int ti_dac_remove(struct spi_device *spi) iio_device_unregister(indio_dev); mutex_destroy(&ti_dac->lock); regulator_disable(ti_dac->vref); - - return 0; } static const struct of_device_id ti_dac_of_id[] = { diff --git a/drivers/iio/dac/ti-dac7311.c b/drivers/iio/dac/ti-dac7311.c index 99f275829ec2..e10d17e60ed3 100644 --- a/drivers/iio/dac/ti-dac7311.c +++ b/drivers/iio/dac/ti-dac7311.c @@ -292,7 +292,7 @@ err: return ret; } -static int ti_dac_remove(struct spi_device *spi) +static void ti_dac_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ti_dac_chip *ti_dac = iio_priv(indio_dev); @@ -300,7 +300,6 @@ static int ti_dac_remove(struct spi_device *spi) iio_device_unregister(indio_dev); mutex_destroy(&ti_dac->lock); regulator_disable(ti_dac->vref); - return 0; } static const struct of_device_id ti_dac_of_id[] = { diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c index 3d9eba716b69..f3521330f6fb 100644 --- a/drivers/iio/frequency/adf4350.c +++ b/drivers/iio/frequency/adf4350.c @@ -589,7 +589,7 @@ error_disable_clk: return ret; } -static int adf4350_remove(struct spi_device *spi) +static void adf4350_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct adf4350_state *st = iio_priv(indio_dev); @@ -604,8 +604,6 @@ static int adf4350_remove(struct spi_device *spi) if (!IS_ERR(reg)) regulator_disable(reg); - - return 0; } static const struct of_device_id adf4350_of_match[] = { diff --git a/drivers/iio/frequency/admv1013.c b/drivers/iio/frequency/admv1013.c index 6cdeb50143af..3f3c478e9baa 100644 --- a/drivers/iio/frequency/admv1013.c +++ b/drivers/iio/frequency/admv1013.c @@ -348,7 +348,7 @@ static int admv1013_update_mixer_vgate(struct admv1013_state *st) vcm = regulator_get_voltage(st->reg); - if (vcm >= 0 && vcm < 1800000) + if (vcm < 1800000) mixer_vgate = (2389 * vcm / 1000000 + 8100) / 100; else if (vcm > 1800000 && vcm < 2600000) mixer_vgate = (2375 * vcm / 1000000 + 125) / 100; diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c index 17b939a367ad..81a6d09788bd 100644 --- a/drivers/iio/gyro/bmg160_core.c +++ b/drivers/iio/gyro/bmg160_core.c @@ -1188,11 +1188,14 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(dev, "unable to register iio device\n"); - goto err_buffer_cleanup; + goto err_pm_cleanup; } return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(dev); + pm_runtime_disable(dev); err_buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); err_trigger_unregister: diff --git a/drivers/iio/gyro/bmg160_spi.c b/drivers/iio/gyro/bmg160_spi.c index 745962e1e423..fc2e453527b9 100644 --- a/drivers/iio/gyro/bmg160_spi.c +++ b/drivers/iio/gyro/bmg160_spi.c @@ -27,11 +27,9 @@ static int bmg160_spi_probe(struct spi_device *spi) return bmg160_core_probe(&spi->dev, regmap, spi->irq, id->name); } -static int bmg160_spi_remove(struct spi_device *spi) +static void bmg160_spi_remove(struct spi_device *spi) { bmg160_core_remove(&spi->dev); - - return 0; } static const struct spi_device_id bmg160_spi_id[] = { diff --git a/drivers/iio/gyro/fxas21002c_spi.c b/drivers/iio/gyro/fxas21002c_spi.c index 77ceebef4e34..c3ac169facf9 100644 --- a/drivers/iio/gyro/fxas21002c_spi.c +++ b/drivers/iio/gyro/fxas21002c_spi.c @@ -34,11 +34,9 @@ static int fxas21002c_spi_probe(struct spi_device *spi) return fxas21002c_core_probe(&spi->dev, regmap, spi->irq, id->name); } -static int fxas21002c_spi_remove(struct spi_device *spi) +static void fxas21002c_spi_remove(struct spi_device *spi) { fxas21002c_core_remove(&spi->dev); - - return 0; } static const struct spi_device_id fxas21002c_spi_id[] = { diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c index 273f16dcaff8..856ec901b091 100644 --- a/drivers/iio/health/afe4403.c +++ b/drivers/iio/health/afe4403.c @@ -570,7 +570,7 @@ err_disable_reg: return ret; } -static int afe4403_remove(struct spi_device *spi) +static void afe4403_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct afe4403_data *afe = iio_priv(indio_dev); @@ -586,8 +586,6 @@ static int afe4403_remove(struct spi_device *spi) ret = regulator_disable(afe->regulator); if (ret) dev_warn(afe->dev, "Unable to disable regulator\n"); - - return 0; } static const struct spi_device_id afe4403_ids[] = { diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c index ed129321a14d..f9b4540db1f4 100644 --- a/drivers/iio/imu/adis16480.c +++ b/drivers/iio/imu/adis16480.c @@ -1403,6 +1403,7 @@ static int adis16480_probe(struct spi_device *spi) { const struct spi_device_id *id = spi_get_device_id(spi); const struct adis_data *adis16480_data; + irq_handler_t trigger_handler = NULL; struct iio_dev *indio_dev; struct adis16480 *st; int ret; @@ -1474,8 +1475,12 @@ static int adis16480_probe(struct spi_device *spi) st->clk_freq = st->chip_info->int_clk; } + /* Only use our trigger handler if burst mode is supported */ + if (adis16480_data->burst_len) + trigger_handler = adis16480_trigger_handler; + ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev, - adis16480_trigger_handler); + trigger_handler); if (ret) return ret; diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c index 1dabfd615dab..f89724481df9 100644 --- a/drivers/iio/imu/kmx61.c +++ b/drivers/iio/imu/kmx61.c @@ -1385,7 +1385,7 @@ static int kmx61_probe(struct i2c_client *client, ret = iio_device_register(data->acc_indio_dev); if (ret < 0) { dev_err(&client->dev, "Failed to register acc iio device\n"); - goto err_buffer_cleanup_mag; + goto err_pm_cleanup; } ret = iio_device_register(data->mag_indio_dev); @@ -1398,6 +1398,9 @@ static int kmx61_probe(struct i2c_client *client, err_iio_unregister_acc: iio_device_unregister(data->acc_indio_dev); +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); err_buffer_cleanup_mag: if (client->irq > 0) iio_triggered_buffer_cleanup(data->mag_indio_dev); diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c index 727b4b6ac696..93f0c6bce502 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c @@ -1374,8 +1374,12 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor, if (err < 0) return err; + /* + * we need to wait for sensor settling time before + * reading data in order to avoid corrupted samples + */ delay = 1000000000 / sensor->odr; - usleep_range(delay, 2 * delay); + usleep_range(3 * delay, 4 * delay); err = st_lsm6dsx_read_locked(hw, addr, &data, sizeof(data)); if (err < 0) diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 94eb9f6cf128..208b5193c621 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -1569,9 +1569,17 @@ static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg } if (copy_to_user(ival, &fd, sizeof(fd))) { - put_unused_fd(fd); - ret = -EFAULT; - goto error_free_ib; + /* + * "Leak" the fd, as there's not much we can do about this + * anyway. 'fd' might have been closed already, as + * anon_inode_getfd() called fd_install() on it, which made + * it reachable by userland. + * + * Instead of allowing a malicious user to play tricks with + * us, rely on the process exit path to do any necessary + * cleanup, as in releasing the file, if still needed. + */ + return -EFAULT; } return 0; diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c index f96f53175349..3d4d21f979fa 100644 --- a/drivers/iio/magnetometer/bmc150_magn.c +++ b/drivers/iio/magnetometer/bmc150_magn.c @@ -962,13 +962,14 @@ int bmc150_magn_probe(struct device *dev, struct regmap *regmap, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(dev, "unable to register iio device\n"); - goto err_disable_runtime_pm; + goto err_pm_cleanup; } dev_dbg(dev, "Registered device %s\n", name); return 0; -err_disable_runtime_pm: +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); err_buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); diff --git a/drivers/iio/magnetometer/bmc150_magn_spi.c b/drivers/iio/magnetometer/bmc150_magn_spi.c index c6ed3ea8460a..4c570412d65c 100644 --- a/drivers/iio/magnetometer/bmc150_magn_spi.c +++ b/drivers/iio/magnetometer/bmc150_magn_spi.c @@ -29,11 +29,9 @@ static int bmc150_magn_spi_probe(struct spi_device *spi) return bmc150_magn_probe(&spi->dev, regmap, spi->irq, id->name); } -static int bmc150_magn_spi_remove(struct spi_device *spi) +static void bmc150_magn_spi_remove(struct spi_device *spi) { bmc150_magn_remove(&spi->dev); - - return 0; } static const struct spi_device_id bmc150_magn_spi_id[] = { diff --git a/drivers/iio/magnetometer/hmc5843_spi.c b/drivers/iio/magnetometer/hmc5843_spi.c index 89cf59a62c28..a99dd9b33e95 100644 --- a/drivers/iio/magnetometer/hmc5843_spi.c +++ b/drivers/iio/magnetometer/hmc5843_spi.c @@ -74,11 +74,9 @@ static int hmc5843_spi_probe(struct spi_device *spi) id->driver_data, id->name); } -static int hmc5843_spi_remove(struct spi_device *spi) +static void hmc5843_spi_remove(struct spi_device *spi) { hmc5843_common_remove(&spi->dev); - - return 0; } static const struct spi_device_id hmc5843_id[] = { diff --git a/drivers/iio/potentiometer/max5487.c b/drivers/iio/potentiometer/max5487.c index 007c2bd324cb..42723c996c9f 100644 --- a/drivers/iio/potentiometer/max5487.c +++ b/drivers/iio/potentiometer/max5487.c @@ -112,7 +112,7 @@ static int max5487_spi_probe(struct spi_device *spi) return iio_device_register(indio_dev); } -static int max5487_spi_remove(struct spi_device *spi) +static void max5487_spi_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); int ret; @@ -123,8 +123,6 @@ static int max5487_spi_remove(struct spi_device *spi) ret = max5487_write_cmd(spi, MAX5487_COPY_AB_TO_NV); if (ret) dev_warn(&spi->dev, "Failed to save wiper regs to NV regs\n"); - - return 0; } static const struct spi_device_id max5487_id[] = { diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c index 9fa2dcd71760..7ccd960ced5d 100644 --- a/drivers/iio/pressure/ms5611_spi.c +++ b/drivers/iio/pressure/ms5611_spi.c @@ -107,11 +107,9 @@ static int ms5611_spi_probe(struct spi_device *spi) spi_get_device_id(spi)->driver_data); } -static int ms5611_spi_remove(struct spi_device *spi) +static void ms5611_spi_remove(struct spi_device *spi) { ms5611_remove(spi_get_drvdata(spi)); - - return 0; } static const struct of_device_id ms5611_spi_matches[] = { diff --git a/drivers/iio/pressure/zpa2326_spi.c b/drivers/iio/pressure/zpa2326_spi.c index 85201a4bae44..ee8ed77536ca 100644 --- a/drivers/iio/pressure/zpa2326_spi.c +++ b/drivers/iio/pressure/zpa2326_spi.c @@ -57,11 +57,9 @@ static int zpa2326_probe_spi(struct spi_device *spi) spi->irq, ZPA2326_DEVICE_ID, regmap); } -static int zpa2326_remove_spi(struct spi_device *spi) +static void zpa2326_remove_spi(struct spi_device *spi) { zpa2326_remove(&spi->dev); - - return 0; } static const struct spi_device_id zpa2326_spi_ids[] = { diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index c447526288f4..50c53409ceb6 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -3370,22 +3370,30 @@ err: static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, const struct sockaddr *dst_addr) { - if (!src_addr || !src_addr->sa_family) { - src_addr = (struct sockaddr *) &id->route.addr.src_addr; - src_addr->sa_family = dst_addr->sa_family; - if (IS_ENABLED(CONFIG_IPV6) && - dst_addr->sa_family == AF_INET6) { - struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; - struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; - src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; - if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) - id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id; - } else if (dst_addr->sa_family == AF_IB) { - ((struct sockaddr_ib *) src_addr)->sib_pkey = - ((struct sockaddr_ib *) dst_addr)->sib_pkey; - } - } - return rdma_bind_addr(id, src_addr); + struct sockaddr_storage zero_sock = {}; + + if (src_addr && src_addr->sa_family) + return rdma_bind_addr(id, src_addr); + + /* + * When the src_addr is not specified, automatically supply an any addr + */ + zero_sock.ss_family = dst_addr->sa_family; + if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) { + struct sockaddr_in6 *src_addr6 = + (struct sockaddr_in6 *)&zero_sock; + struct sockaddr_in6 *dst_addr6 = + (struct sockaddr_in6 *)dst_addr; + + src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; + if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) + id->route.addr.dev_addr.bound_dev_if = + dst_addr6->sin6_scope_id; + } else if (dst_addr->sa_family == AF_IB) { + ((struct sockaddr_ib *)&zero_sock)->sib_pkey = + ((struct sockaddr_ib *)dst_addr)->sib_pkey; + } + return rdma_bind_addr(id, (struct sockaddr *)&zero_sock); } /* diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c index 0b61df52332a..290ea8ac3838 100644 --- a/drivers/infiniband/hw/mlx5/cong.c +++ b/drivers/infiniband/hw/mlx5/cong.c @@ -433,8 +433,7 @@ void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num) dev->port[port_num].dbg_cc_params = dbg_cc_params; - dbg_cc_params->root = debugfs_create_dir("cc_params", - mdev->priv.dbg_root); + dbg_cc_params->root = debugfs_create_dir("cc_params", mlx5_debugfs_get_dev_root(mdev)); for (i = 0; i < MLX5_IB_DBG_CC_MAX; i++) { dbg_cc_params->params[i].offset = i; diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index 08b7f6bc56c3..fc036b4794fd 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -1055,7 +1055,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)( int cmd_out_len = uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT); void *cmd_out; - int err; + int err, err2; int uid; c = devx_ufile2uctx(attrs); @@ -1076,14 +1076,16 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)( return PTR_ERR(cmd_out); MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid); - err = mlx5_cmd_exec(dev->mdev, cmd_in, - uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN), - cmd_out, cmd_out_len); - if (err) + err = mlx5_cmd_do(dev->mdev, cmd_in, + uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN), + cmd_out, cmd_out_len); + if (err && err != -EREMOTEIO) return err; - return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out, + err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out, cmd_out_len); + + return err2 ?: err; } static void devx_obj_build_destroy_cmd(void *in, void *out, void *din, @@ -1457,7 +1459,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)( u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; struct devx_obj *obj; u16 obj_type = 0; - int err; + int err, err2 = 0; int uid; u32 obj_id; u16 opcode; @@ -1497,15 +1499,18 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)( !is_apu_cq(dev, cmd_in)) { obj->flags |= DEVX_OBJ_FLAGS_CQ; obj->core_cq.comp = devx_cq_comp; - err = mlx5_core_create_cq(dev->mdev, &obj->core_cq, - cmd_in, cmd_in_len, cmd_out, - cmd_out_len); + err = mlx5_create_cq(dev->mdev, &obj->core_cq, + cmd_in, cmd_in_len, cmd_out, + cmd_out_len); } else { - err = mlx5_cmd_exec(dev->mdev, cmd_in, - cmd_in_len, - cmd_out, cmd_out_len); + err = mlx5_cmd_do(dev->mdev, cmd_in, cmd_in_len, + cmd_out, cmd_out_len); } + if (err == -EREMOTEIO) + err2 = uverbs_copy_to(attrs, + MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, + cmd_out, cmd_out_len); if (err) goto obj_free; @@ -1548,7 +1553,7 @@ obj_destroy: sizeof(out)); obj_free: kfree(obj); - return err; + return err2 ?: err; } static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)( @@ -1563,7 +1568,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)( &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); void *cmd_out; - int err; + int err, err2; int uid; if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id)) @@ -1586,14 +1591,16 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)( MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid); devx_set_umem_valid(cmd_in); - err = mlx5_cmd_exec(mdev->mdev, cmd_in, - uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN), - cmd_out, cmd_out_len); - if (err) + err = mlx5_cmd_do(mdev->mdev, cmd_in, + uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN), + cmd_out, cmd_out_len); + if (err && err != -EREMOTEIO) return err; - return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT, + err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT, cmd_out, cmd_out_len); + + return err2 ?: err; } static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)( @@ -1607,7 +1614,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)( struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context( &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); void *cmd_out; - int err; + int err, err2; int uid; struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); @@ -1629,14 +1636,16 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)( return PTR_ERR(cmd_out); MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid); - err = mlx5_cmd_exec(mdev->mdev, cmd_in, - uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN), - cmd_out, cmd_out_len); - if (err) + err = mlx5_cmd_do(mdev->mdev, cmd_in, + uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN), + cmd_out, cmd_out_len); + if (err && err != -EREMOTEIO) return err; - return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT, + err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT, cmd_out, cmd_out_len); + + return err2 ?: err; } struct devx_async_event_queue { diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 85f526c861e9..32a0ea820573 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -4178,7 +4178,7 @@ static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev) if (!mlx5_debugfs_root) return 0; - root = debugfs_create_dir("delay_drop", dev->mdev->priv.dbg_root); + root = debugfs_create_dir("delay_drop", mlx5_debugfs_get_dev_root(dev->mdev)); dev->delay_drop.dir_debugfs = root; debugfs_create_atomic_t("num_timeout_events", 0400, root, diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 157d862fb864..32cb7068f0ca 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -140,6 +140,19 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key); } +static void create_mkey_warn(struct mlx5_ib_dev *dev, int status, void *out) +{ + if (status == -ENXIO) /* core driver is not available */ + return; + + mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status); + if (status != -EREMOTEIO) /* driver specific failure */ + return; + + /* Failed in FW, print cmd out failure details */ + mlx5_cmd_out_err(dev->mdev, MLX5_CMD_OP_CREATE_MKEY, 0, out); +} + static void create_mkey_callback(int status, struct mlx5_async_work *context) { struct mlx5_ib_mr *mr = @@ -149,7 +162,7 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context) unsigned long flags; if (status) { - mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status); + create_mkey_warn(dev, status, mr->out); kfree(mr); spin_lock_irqsave(&ent->lock, flags); ent->pending--; @@ -683,7 +696,7 @@ static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) if (!mlx5_debugfs_root || dev->is_rep) return; - cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root); + cache->root = debugfs_create_dir("mr_cache", mlx5_debugfs_get_dev_root(dev->mdev)); for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { ent = &cache->ent[i]; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 29475cf8c7c3..b7fe47107d76 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -4465,6 +4465,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in, MLX5_ST_SZ_BYTES(create_dct_in), out, sizeof(out)); + err = mlx5_cmd_check(dev->mdev, err, qp->dct.in, out); if (err) return err; resp.dctn = qp->dct.mdct.mqp.qpn; diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c index 8844eacf2380..542e4c63a8de 100644 --- a/drivers/infiniband/hw/mlx5/qpc.c +++ b/drivers/infiniband/hw/mlx5/qpc.c @@ -220,7 +220,7 @@ int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct, init_completion(&dct->drained); MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT); - err = mlx5_cmd_exec(dev->mdev, in, inlen, out, outlen); + err = mlx5_cmd_do(dev->mdev, in, inlen, out, outlen); if (err) return err; diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index 0a3b28142c05..41c272980f91 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c @@ -541,7 +541,7 @@ static struct attribute *port_diagc_attributes[] = { }; static const struct attribute_group port_diagc_group = { - .name = "linkcontrol", + .name = "diag_counters", .attrs = port_diagc_attributes, }; diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index 7c3f98e57889..759b85f03331 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -2682,6 +2682,8 @@ static void rtrs_clt_dev_release(struct device *dev) struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess, dev); + mutex_destroy(&clt->paths_ev_mutex); + mutex_destroy(&clt->paths_mutex); kfree(clt); } @@ -2711,6 +2713,8 @@ static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num, return ERR_PTR(-ENOMEM); } + clt->dev.class = rtrs_clt_dev_class; + clt->dev.release = rtrs_clt_dev_release; uuid_gen(&clt->paths_uuid); INIT_LIST_HEAD_RCU(&clt->paths_list); clt->paths_num = paths_num; @@ -2727,53 +2731,51 @@ static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num, init_waitqueue_head(&clt->permits_wait); mutex_init(&clt->paths_ev_mutex); mutex_init(&clt->paths_mutex); + device_initialize(&clt->dev); - clt->dev.class = rtrs_clt_dev_class; - clt->dev.release = rtrs_clt_dev_release; err = dev_set_name(&clt->dev, "%s", sessname); if (err) - goto err; + goto err_put; + /* * Suppress user space notification until * sysfs files are created */ dev_set_uevent_suppress(&clt->dev, true); - err = device_register(&clt->dev); - if (err) { - put_device(&clt->dev); - goto err; - } + err = device_add(&clt->dev); + if (err) + goto err_put; clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj); if (!clt->kobj_paths) { err = -ENOMEM; - goto err_dev; + goto err_del; } err = rtrs_clt_create_sysfs_root_files(clt); if (err) { kobject_del(clt->kobj_paths); kobject_put(clt->kobj_paths); - goto err_dev; + goto err_del; } dev_set_uevent_suppress(&clt->dev, false); kobject_uevent(&clt->dev.kobj, KOBJ_ADD); return clt; -err_dev: - device_unregister(&clt->dev); -err: +err_del: + device_del(&clt->dev); +err_put: free_percpu(clt->pcpu_path); - kfree(clt); + put_device(&clt->dev); return ERR_PTR(err); } static void free_clt(struct rtrs_clt_sess *clt) { - free_permits(clt); free_percpu(clt->pcpu_path); - mutex_destroy(&clt->paths_ev_mutex); - mutex_destroy(&clt->paths_mutex); - /* release callback will free clt in last put */ + + /* + * release callback will free clt and destroy mutexes in last put + */ device_unregister(&clt->dev); } @@ -2890,6 +2892,7 @@ void rtrs_clt_close(struct rtrs_clt_sess *clt) rtrs_clt_destroy_path_files(clt_path, NULL); kobject_put(&clt_path->kobj); } + free_permits(clt); free_clt(clt); } EXPORT_SYMBOL(rtrs_clt_close); diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index e174e853f8a4..285b766e4e70 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -4047,9 +4047,11 @@ static void srp_remove_one(struct ib_device *device, void *client_data) spin_unlock(&host->target_lock); /* - * Wait for tl_err and target port removal tasks. + * srp_queue_remove_work() queues a call to + * srp_remove_target(). The latter function cancels + * target->tl_err_work so waiting for the remove works to + * finish is sufficient. */ - flush_workqueue(system_long_wq); flush_workqueue(srp_remove_wq); kfree(host); diff --git a/drivers/input/input.c b/drivers/input/input.c index ccaeb2426385..c3139bc2aa0d 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -2285,6 +2285,12 @@ int input_register_device(struct input_dev *dev) /* KEY_RESERVED is not supposed to be transmitted to userspace. */ __clear_bit(KEY_RESERVED, dev->keybit); + /* Buttonpads should not map BTN_RIGHT and/or BTN_MIDDLE. */ + if (test_bit(INPUT_PROP_BUTTONPAD, dev->propbit)) { + __clear_bit(BTN_RIGHT, dev->keybit); + __clear_bit(BTN_MIDDLE, dev->keybit); + } + /* Make sure that bitmasks not mentioned in dev->evbit are clean. */ input_cleanse_bitmasks(dev); diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 0c607da9ee10..9417ee0b1eff 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -556,7 +556,7 @@ config KEYBOARD_PMIC8XXX config KEYBOARD_SAMSUNG tristate "Samsung keypad support" - depends on HAVE_CLK + depends on HAS_IOMEM && HAVE_CLK select INPUT_MATRIXKMAP help Say Y here if you want to use the keypad on your Samsung mobile diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c index eda1b23002b5..d1f5354d5ea2 100644 --- a/drivers/input/keyboard/applespi.c +++ b/drivers/input/keyboard/applespi.c @@ -1858,7 +1858,7 @@ static void applespi_drain_reads(struct applespi_data *applespi) spin_unlock_irqrestore(&applespi->cmd_msg_lock, flags); } -static int applespi_remove(struct spi_device *spi) +static void applespi_remove(struct spi_device *spi) { struct applespi_data *applespi = spi_get_drvdata(spi); @@ -1871,8 +1871,6 @@ static int applespi_remove(struct spi_device *spi) applespi_drain_reads(applespi); debugfs_remove_recursive(applespi->debugfs_root); - - return 0; } static void applespi_shutdown(struct spi_device *spi) diff --git a/drivers/input/misc/adxl34x-spi.c b/drivers/input/misc/adxl34x-spi.c index 6e51c9bc619f..91e44d4c66f7 100644 --- a/drivers/input/misc/adxl34x-spi.c +++ b/drivers/input/misc/adxl34x-spi.c @@ -87,13 +87,11 @@ static int adxl34x_spi_probe(struct spi_device *spi) return 0; } -static int adxl34x_spi_remove(struct spi_device *spi) +static void adxl34x_spi_remove(struct spi_device *spi) { struct adxl34x *ac = spi_get_drvdata(spi); adxl34x_remove(ac); - - return 0; } static int __maybe_unused adxl34x_spi_suspend(struct device *dev) diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 47af62c12267..e1758d5ffe42 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -186,55 +186,21 @@ static int elan_get_fwinfo(u16 ic_type, u8 iap_version, u16 *validpage_count, return 0; } -static int elan_enable_power(struct elan_tp_data *data) +static int elan_set_power(struct elan_tp_data *data, bool on) { int repeat = ETP_RETRY_COUNT; int error; - error = regulator_enable(data->vcc); - if (error) { - dev_err(&data->client->dev, - "failed to enable regulator: %d\n", error); - return error; - } - do { - error = data->ops->power_control(data->client, true); + error = data->ops->power_control(data->client, on); if (error >= 0) return 0; msleep(30); } while (--repeat > 0); - dev_err(&data->client->dev, "failed to enable power: %d\n", error); - return error; -} - -static int elan_disable_power(struct elan_tp_data *data) -{ - int repeat = ETP_RETRY_COUNT; - int error; - - do { - error = data->ops->power_control(data->client, false); - if (!error) { - error = regulator_disable(data->vcc); - if (error) { - dev_err(&data->client->dev, - "failed to disable regulator: %d\n", - error); - /* Attempt to power the chip back up */ - data->ops->power_control(data->client, true); - break; - } - - return 0; - } - - msleep(30); - } while (--repeat > 0); - - dev_err(&data->client->dev, "failed to disable power: %d\n", error); + dev_err(&data->client->dev, "failed to set power %s: %d\n", + on ? "on" : "off", error); return error; } @@ -1399,9 +1365,19 @@ static int __maybe_unused elan_suspend(struct device *dev) /* Enable wake from IRQ */ data->irq_wake = (enable_irq_wake(client->irq) == 0); } else { - ret = elan_disable_power(data); + ret = elan_set_power(data, false); + if (ret) + goto err; + + ret = regulator_disable(data->vcc); + if (ret) { + dev_err(dev, "error %d disabling regulator\n", ret); + /* Attempt to power the chip back up */ + elan_set_power(data, true); + } } +err: mutex_unlock(&data->sysfs_mutex); return ret; } @@ -1412,12 +1388,18 @@ static int __maybe_unused elan_resume(struct device *dev) struct elan_tp_data *data = i2c_get_clientdata(client); int error; - if (device_may_wakeup(dev) && data->irq_wake) { + if (!device_may_wakeup(dev)) { + error = regulator_enable(data->vcc); + if (error) { + dev_err(dev, "error %d enabling regulator\n", error); + goto err; + } + } else if (data->irq_wake) { disable_irq_wake(client->irq); data->irq_wake = false; } - error = elan_enable_power(data); + error = elan_set_power(data, true); if (error) { dev_err(dev, "power up when resuming failed: %d\n", error); goto err; diff --git a/drivers/input/mouse/psmouse-smbus.c b/drivers/input/mouse/psmouse-smbus.c index a472489ccbad..164f6c757f6b 100644 --- a/drivers/input/mouse/psmouse-smbus.c +++ b/drivers/input/mouse/psmouse-smbus.c @@ -75,6 +75,8 @@ static void psmouse_smbus_detach_i2c_client(struct i2c_client *client) "Marking SMBus companion %s as gone\n", dev_name(&smbdev->client->dev)); smbdev->dead = true; + device_link_remove(&smbdev->client->dev, + &smbdev->psmouse->ps2dev.serio->dev); serio_rescan(smbdev->psmouse->ps2dev.serio); } else { list_del(&smbdev->node); @@ -174,6 +176,8 @@ static void psmouse_smbus_disconnect(struct psmouse *psmouse) kfree(smbdev); } else { smbdev->dead = true; + device_link_remove(&smbdev->client->dev, + &psmouse->ps2dev.serio->dev); psmouse_dbg(smbdev->psmouse, "posting removal request for SMBus companion %s\n", dev_name(&smbdev->client->dev)); @@ -270,6 +274,12 @@ int psmouse_smbus_init(struct psmouse *psmouse, if (smbdev->client) { /* We have our companion device */ + if (!device_link_add(&smbdev->client->dev, + &psmouse->ps2dev.serio->dev, + DL_FLAG_STATELESS)) + psmouse_warn(psmouse, + "failed to set up link with iSMBus companion %s\n", + dev_name(&smbdev->client->dev)); return 0; } diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index a25a77dd9a32..bed68a68f330 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -1411,13 +1411,11 @@ static int ads7846_probe(struct spi_device *spi) return 0; } -static int ads7846_remove(struct spi_device *spi) +static void ads7846_remove(struct spi_device *spi) { struct ads7846 *ts = spi_get_drvdata(spi); ads7846_stop(ts); - - return 0; } static struct spi_driver ads7846_driver = { diff --git a/drivers/input/touchscreen/cyttsp4_spi.c b/drivers/input/touchscreen/cyttsp4_spi.c index 2aec41eb76b7..5d7db84f2749 100644 --- a/drivers/input/touchscreen/cyttsp4_spi.c +++ b/drivers/input/touchscreen/cyttsp4_spi.c @@ -164,12 +164,10 @@ static int cyttsp4_spi_probe(struct spi_device *spi) return PTR_ERR_OR_ZERO(ts); } -static int cyttsp4_spi_remove(struct spi_device *spi) +static void cyttsp4_spi_remove(struct spi_device *spi) { struct cyttsp4 *ts = spi_get_drvdata(spi); cyttsp4_remove(ts); - - return 0; } static struct spi_driver cyttsp4_spi_driver = { diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index a3bfc7a41679..752e8ba4fecb 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -18,6 +18,7 @@ #include <linux/delay.h> #include <linux/irq.h> #include <linux/interrupt.h> +#include <linux/platform_data/x86/soc.h> #include <linux/slab.h> #include <linux/acpi.h> #include <linux/of.h> @@ -805,21 +806,6 @@ static int goodix_reset(struct goodix_ts_data *ts) } #ifdef ACPI_GPIO_SUPPORT -#include <asm/cpu_device_id.h> -#include <asm/intel-family.h> - -static const struct x86_cpu_id baytrail_cpu_ids[] = { - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT, X86_FEATURE_ANY, }, - {} -}; - -static inline bool is_byt(void) -{ - const struct x86_cpu_id *id = x86_match_cpu(baytrail_cpu_ids); - - return !!id; -} - static const struct acpi_gpio_params first_gpio = { 0, 0, false }; static const struct acpi_gpio_params second_gpio = { 1, 0, false }; @@ -878,7 +864,7 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts) const struct acpi_gpio_mapping *gpio_mapping = NULL; struct device *dev = &ts->client->dev; LIST_HEAD(resources); - int ret; + int irq, ret; ts->gpio_count = 0; ts->gpio_int_idx = -1; @@ -891,6 +877,20 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts) acpi_dev_free_resource_list(&resources); + /* + * CHT devices should have a GpioInt + a regular GPIO ACPI resource. + * Some CHT devices have a bug (where the also is bogus Interrupt + * resource copied from a previous BYT based generation). i2c-core-acpi + * will use the non-working Interrupt resource, fix this up. + */ + if (soc_intel_is_cht() && ts->gpio_count == 2 && ts->gpio_int_idx != -1) { + irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(dev), 0); + if (irq > 0 && irq != ts->client->irq) { + dev_warn(dev, "Overriding IRQ %d -> %d\n", ts->client->irq, irq); + ts->client->irq = irq; + } + } + if (ts->gpio_count == 2 && ts->gpio_int_idx == 0) { ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO; gpio_mapping = acpi_goodix_int_first_gpios; @@ -903,7 +903,7 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts) dev_info(dev, "Using ACPI INTI and INTO methods for IRQ pin access\n"); ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_METHOD; gpio_mapping = acpi_goodix_reset_only_gpios; - } else if (is_byt() && ts->gpio_count == 2 && ts->gpio_int_idx == -1) { + } else if (soc_intel_is_byt() && ts->gpio_count == 2 && ts->gpio_int_idx == -1) { dev_info(dev, "No ACPI GpioInt resource, assuming that the GPIO order is reset, int\n"); ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO; gpio_mapping = acpi_goodix_int_last_gpios; diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c index a2f55920b9b2..555dfe98b3c4 100644 --- a/drivers/input/touchscreen/tsc2005.c +++ b/drivers/input/touchscreen/tsc2005.c @@ -64,11 +64,9 @@ static int tsc2005_probe(struct spi_device *spi) tsc2005_cmd); } -static int tsc2005_remove(struct spi_device *spi) +static void tsc2005_remove(struct spi_device *spi) { tsc200x_remove(&spi->dev); - - return 0; } #ifdef CONFIG_OF diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c index 7c82c4f5fa6b..129ebc810de8 100644 --- a/drivers/input/touchscreen/zinitix.c +++ b/drivers/input/touchscreen/zinitix.c @@ -571,8 +571,20 @@ static SIMPLE_DEV_PM_OPS(zinitix_pm_ops, zinitix_suspend, zinitix_resume); #ifdef CONFIG_OF static const struct of_device_id zinitix_of_match[] = { + { .compatible = "zinitix,bt402" }, + { .compatible = "zinitix,bt403" }, + { .compatible = "zinitix,bt404" }, + { .compatible = "zinitix,bt412" }, + { .compatible = "zinitix,bt413" }, + { .compatible = "zinitix,bt431" }, + { .compatible = "zinitix,bt432" }, + { .compatible = "zinitix,bt531" }, { .compatible = "zinitix,bt532" }, + { .compatible = "zinitix,bt538" }, { .compatible = "zinitix,bt541" }, + { .compatible = "zinitix,bt548" }, + { .compatible = "zinitix,bt554" }, + { .compatible = "zinitix,at100" }, { } }; MODULE_DEVICE_TABLE(of, zinitix_of_match); diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index 416815a525d6..bb95edf74415 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -14,6 +14,7 @@ extern irqreturn_t amd_iommu_int_thread(int irq, void *data); extern irqreturn_t amd_iommu_int_handler(int irq, void *data); extern void amd_iommu_apply_erratum_63(u16 devid); +extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu); extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); extern int amd_iommu_init_devices(void); extern void amd_iommu_uninit_devices(void); diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index ffc89c4fb120..47108ed44fbb 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -110,6 +110,7 @@ #define PASID_MASK 0x0000ffff /* MMIO status bits */ +#define MMIO_STATUS_EVT_OVERFLOW_INT_MASK (1 << 0) #define MMIO_STATUS_EVT_INT_MASK (1 << 1) #define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2) #define MMIO_STATUS_PPR_INT_MASK (1 << 6) diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index b10fb52ea442..7bfe37e52e21 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -658,6 +658,16 @@ static int __init alloc_command_buffer(struct amd_iommu *iommu) } /* + * This function restarts event logging in case the IOMMU experienced + * an event log buffer overflow. + */ +void amd_iommu_restart_event_logging(struct amd_iommu *iommu) +{ + iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); + iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); +} + +/* * This function resets the command buffer if the IOMMU stopped fetching * commands from it. */ diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c index b1bf4125b0f7..6608d1717574 100644 --- a/drivers/iommu/amd/io_pgtable.c +++ b/drivers/iommu/amd/io_pgtable.c @@ -492,18 +492,18 @@ static void v1_free_pgtable(struct io_pgtable *iop) dom = container_of(pgtable, struct protection_domain, iop); - /* Update data structure */ - amd_iommu_domain_clr_pt_root(dom); - - /* Make changes visible to IOMMUs */ - amd_iommu_domain_update(dom); - /* Page-table is not visible to IOMMU anymore, so free it */ BUG_ON(pgtable->mode < PAGE_MODE_NONE || pgtable->mode > PAGE_MODE_6_LEVEL); free_sub_pt(pgtable->root, pgtable->mode, &freelist); + /* Update data structure */ + amd_iommu_domain_clr_pt_root(dom); + + /* Make changes visible to IOMMUs */ + amd_iommu_domain_update(dom); + put_pages_list(&freelist); } diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 461f1844ed1f..a18b549951bb 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -764,7 +764,8 @@ amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { } #endif /* !CONFIG_IRQ_REMAP */ #define AMD_IOMMU_INT_MASK \ - (MMIO_STATUS_EVT_INT_MASK | \ + (MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \ + MMIO_STATUS_EVT_INT_MASK | \ MMIO_STATUS_PPR_INT_MASK | \ MMIO_STATUS_GALOG_INT_MASK) @@ -774,7 +775,7 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data) u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); while (status & AMD_IOMMU_INT_MASK) { - /* Enable EVT and PPR and GA interrupts again */ + /* Enable interrupt sources again */ writel(AMD_IOMMU_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); @@ -795,6 +796,11 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data) } #endif + if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) { + pr_info_ratelimited("IOMMU event log overflow\n"); + amd_iommu_restart_event_logging(iommu); + } + /* * Hardware bug: ERBT1312 * When re-enabling interrupt (by writing 1 diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 92fea3fbbb11..5b196cfe9ed2 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -2738,7 +2738,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, spin_unlock_irqrestore(&device_domain_lock, flags); /* PASID table is mandatory for a PCI device in scalable mode. */ - if (dev && dev_is_pci(dev) && sm_supported(iommu)) { + if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) { ret = intel_pasid_alloc_table(dev); if (ret) { dev_err(dev, "PASID table allocation failed\n"); diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index e900e3c46903..2561ce8a2ce8 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -808,8 +808,10 @@ static struct tegra_smmu *tegra_smmu_find(struct device_node *np) return NULL; mc = platform_get_drvdata(pdev); - if (!mc) + if (!mc) { + put_device(&pdev->dev); return NULL; + } return mc->smmu; } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 9e93ff2b6375..cd772973114a 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -5517,6 +5517,9 @@ int __init its_lpi_memreserve_init(void) if (!efi_enabled(EFI_CONFIG_TABLES)) return 0; + if (list_empty(&its_nodes)) + return 0; + gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID; state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "irqchip/arm/gicv3/memreserve:online", diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 259065d271ef..09cc98266d30 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -398,3 +398,4 @@ out_free_priv: IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init); IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */ +IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_init); /* for firmware driver */ diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index bd087cca1c1d..af17459c1a5c 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -2005,7 +2005,11 @@ setup_hw(struct hfc_pci *hc) } /* Allocate memory for FIFOS */ /* the memory needs to be on a 32k boundary within the first 4G */ - dma_set_mask(&hc->pdev->dev, 0xFFFF8000); + if (dma_set_mask(&hc->pdev->dev, 0xFFFF8000)) { + printk(KERN_WARNING + "HFC-PCI: No usable DMA configuration!\n"); + return -EIO; + } buffer = dma_alloc_coherent(&hc->pdev->dev, 0x8000, &hc->hw.dmahandle, GFP_KERNEL); /* We silently assume the address is okay if nonzero */ diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c index e11ca6bbc7f4..c3b2c99b5cd5 100644 --- a/drivers/isdn/mISDN/dsp_pipeline.c +++ b/drivers/isdn/mISDN/dsp_pipeline.c @@ -192,7 +192,7 @@ void dsp_pipeline_destroy(struct dsp_pipeline *pipeline) int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) { int found = 0; - char *dup, *tok, *name, *args; + char *dup, *next, *tok, *name, *args; struct dsp_element_entry *entry, *n; struct dsp_pipeline_entry *pipeline_entry; struct mISDN_dsp_element *elem; @@ -203,10 +203,10 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) if (!list_empty(&pipeline->list)) _dsp_pipeline_destroy(pipeline); - dup = kstrdup(cfg, GFP_ATOMIC); + dup = next = kstrdup(cfg, GFP_ATOMIC); if (!dup) return 0; - while ((tok = strsep(&dup, "|"))) { + while ((tok = strsep(&next, "|"))) { if (!strlen(tok)) continue; name = strsep(&tok, "("); diff --git a/drivers/leds/leds-cr0014114.c b/drivers/leds/leds-cr0014114.c index d03cfd3c0bfb..c87686bd7c18 100644 --- a/drivers/leds/leds-cr0014114.c +++ b/drivers/leds/leds-cr0014114.c @@ -266,14 +266,12 @@ static int cr0014114_probe(struct spi_device *spi) return 0; } -static int cr0014114_remove(struct spi_device *spi) +static void cr0014114_remove(struct spi_device *spi) { struct cr0014114 *priv = spi_get_drvdata(spi); cancel_delayed_work_sync(&priv->work); mutex_destroy(&priv->lock); - - return 0; } static const struct of_device_id cr0014114_dt_ids[] = { diff --git a/drivers/leds/leds-dac124s085.c b/drivers/leds/leds-dac124s085.c index 20dc9b9d7dea..cf5fb1195f87 100644 --- a/drivers/leds/leds-dac124s085.c +++ b/drivers/leds/leds-dac124s085.c @@ -85,15 +85,13 @@ eledcr: return ret; } -static int dac124s085_remove(struct spi_device *spi) +static void dac124s085_remove(struct spi_device *spi) { struct dac124s085 *dac = spi_get_drvdata(spi); int i; for (i = 0; i < ARRAY_SIZE(dac->leds); i++) led_classdev_unregister(&dac->leds[i].ldev); - - return 0; } static struct spi_driver dac124s085_driver = { diff --git a/drivers/leds/leds-el15203000.c b/drivers/leds/leds-el15203000.c index f9eb59a25570..7e7b617bcd56 100644 --- a/drivers/leds/leds-el15203000.c +++ b/drivers/leds/leds-el15203000.c @@ -315,13 +315,11 @@ static int el15203000_probe(struct spi_device *spi) return el15203000_probe_dt(priv); } -static int el15203000_remove(struct spi_device *spi) +static void el15203000_remove(struct spi_device *spi) { struct el15203000 *priv = spi_get_drvdata(spi); mutex_destroy(&priv->lock); - - return 0; } static const struct of_device_id el15203000_dt_ids[] = { diff --git a/drivers/leds/leds-spi-byte.c b/drivers/leds/leds-spi-byte.c index f1964c96fb15..2bc5c99daf51 100644 --- a/drivers/leds/leds-spi-byte.c +++ b/drivers/leds/leds-spi-byte.c @@ -130,13 +130,11 @@ static int spi_byte_probe(struct spi_device *spi) return 0; } -static int spi_byte_remove(struct spi_device *spi) +static void spi_byte_remove(struct spi_device *spi) { struct spi_byte_led *led = spi_get_drvdata(spi); mutex_destroy(&led->mutex); - - return 0; } static struct spi_driver spi_byte_driver = { diff --git a/drivers/md/dm.c b/drivers/md/dm.c index dcbd6d201619..997ace47bbd5 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2077,7 +2077,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait) set_bit(DMF_FREEING, &md->flags); spin_unlock(&_minor_lock); - blk_set_queue_dying(md->queue); + blk_mark_disk_dead(md->disk); /* * Take suspend_lock so that presuspend and postsuspend methods diff --git a/drivers/media/spi/cxd2880-spi.c b/drivers/media/spi/cxd2880-spi.c index 6f2a66bc87fb..6be4e5528879 100644 --- a/drivers/media/spi/cxd2880-spi.c +++ b/drivers/media/spi/cxd2880-spi.c @@ -625,7 +625,7 @@ fail_regulator: return ret; } -static int +static void cxd2880_spi_remove(struct spi_device *spi) { struct cxd2880_dvb_spi *dvb_spi = spi_get_drvdata(spi); @@ -643,8 +643,6 @@ cxd2880_spi_remove(struct spi_device *spi) kfree(dvb_spi); pr_info("cxd2880_spi remove ok.\n"); - - return 0; } static const struct spi_device_id cxd2880_spi_id[] = { diff --git a/drivers/media/spi/gs1662.c b/drivers/media/spi/gs1662.c index f86ef1ca1288..75c21a93e6d0 100644 --- a/drivers/media/spi/gs1662.c +++ b/drivers/media/spi/gs1662.c @@ -458,13 +458,11 @@ static int gs_probe(struct spi_device *spi) return ret; } -static int gs_remove(struct spi_device *spi) +static void gs_remove(struct spi_device *spi) { struct v4l2_subdev *sd = spi_get_drvdata(spi); v4l2_device_unregister_subdev(sd); - - return 0; } static struct spi_driver gs_driver = { diff --git a/drivers/media/tuners/msi001.c b/drivers/media/tuners/msi001.c index 44247049a319..ad6c72c1ed04 100644 --- a/drivers/media/tuners/msi001.c +++ b/drivers/media/tuners/msi001.c @@ -472,7 +472,7 @@ err: return ret; } -static int msi001_remove(struct spi_device *spi) +static void msi001_remove(struct spi_device *spi) { struct v4l2_subdev *sd = spi_get_drvdata(spi); struct msi001_dev *dev = sd_to_msi001_dev(sd); @@ -486,7 +486,6 @@ static int msi001_remove(struct spi_device *spi) v4l2_device_unregister_subdev(&dev->sd); v4l2_ctrl_handler_free(&dev->hdl); kfree(dev); - return 0; } static const struct spi_device_id msi001_id_table[] = { diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c index 9fe06dda3782..03620c8efe34 100644 --- a/drivers/mfd/arizona-spi.c +++ b/drivers/mfd/arizona-spi.c @@ -206,13 +206,11 @@ static int arizona_spi_probe(struct spi_device *spi) return arizona_dev_init(arizona); } -static int arizona_spi_remove(struct spi_device *spi) +static void arizona_spi_remove(struct spi_device *spi) { struct arizona *arizona = spi_get_drvdata(spi); arizona_dev_exit(arizona); - - return 0; } static const struct spi_device_id arizona_spi_ids[] = { diff --git a/drivers/mfd/da9052-spi.c b/drivers/mfd/da9052-spi.c index 5faf3766a5e2..b79a57b45c1e 100644 --- a/drivers/mfd/da9052-spi.c +++ b/drivers/mfd/da9052-spi.c @@ -55,12 +55,11 @@ static int da9052_spi_probe(struct spi_device *spi) return da9052_device_init(da9052, id->driver_data); } -static int da9052_spi_remove(struct spi_device *spi) +static void da9052_spi_remove(struct spi_device *spi) { struct da9052 *da9052 = spi_get_drvdata(spi); da9052_device_exit(da9052); - return 0; } static const struct spi_device_id da9052_spi_id[] = { diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c index 70fa18b04ad2..2280f756f422 100644 --- a/drivers/mfd/ezx-pcap.c +++ b/drivers/mfd/ezx-pcap.c @@ -392,7 +392,7 @@ static int pcap_add_subdev(struct pcap_chip *pcap, return ret; } -static int ezx_pcap_remove(struct spi_device *spi) +static void ezx_pcap_remove(struct spi_device *spi) { struct pcap_chip *pcap = spi_get_drvdata(spi); unsigned long flags; @@ -412,8 +412,6 @@ static int ezx_pcap_remove(struct spi_device *spi) irq_set_chip_and_handler(i, NULL, NULL); destroy_workqueue(pcap->workqueue); - - return 0; } static int ezx_pcap_probe(struct spi_device *spi) diff --git a/drivers/mfd/madera-spi.c b/drivers/mfd/madera-spi.c index e860f5ff0933..da84eb50e53a 100644 --- a/drivers/mfd/madera-spi.c +++ b/drivers/mfd/madera-spi.c @@ -112,13 +112,11 @@ static int madera_spi_probe(struct spi_device *spi) return madera_dev_init(madera); } -static int madera_spi_remove(struct spi_device *spi) +static void madera_spi_remove(struct spi_device *spi) { struct madera *madera = spi_get_drvdata(spi); madera_dev_exit(madera); - - return 0; } static const struct spi_device_id madera_spi_ids[] = { diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c index 4d8913d647e6..f803527e5819 100644 --- a/drivers/mfd/mc13xxx-spi.c +++ b/drivers/mfd/mc13xxx-spi.c @@ -166,10 +166,9 @@ static int mc13xxx_spi_probe(struct spi_device *spi) return mc13xxx_common_init(&spi->dev); } -static int mc13xxx_spi_remove(struct spi_device *spi) +static void mc13xxx_spi_remove(struct spi_device *spi) { mc13xxx_common_exit(&spi->dev); - return 0; } static struct spi_driver mc13xxx_spi_driver = { diff --git a/drivers/mfd/rsmu_spi.c b/drivers/mfd/rsmu_spi.c index fec2b4ec477c..d2f3d8f1e05a 100644 --- a/drivers/mfd/rsmu_spi.c +++ b/drivers/mfd/rsmu_spi.c @@ -220,13 +220,11 @@ static int rsmu_spi_probe(struct spi_device *client) return rsmu_core_init(rsmu); } -static int rsmu_spi_remove(struct spi_device *client) +static void rsmu_spi_remove(struct spi_device *client) { struct rsmu_ddata *rsmu = spi_get_drvdata(client); rsmu_core_exit(rsmu); - - return 0; } static const struct spi_device_id rsmu_spi_id[] = { diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c index 6c5915016be5..ad8055a0e286 100644 --- a/drivers/mfd/stmpe-spi.c +++ b/drivers/mfd/stmpe-spi.c @@ -102,13 +102,11 @@ stmpe_spi_probe(struct spi_device *spi) return stmpe_probe(&spi_ci, id->driver_data); } -static int stmpe_spi_remove(struct spi_device *spi) +static void stmpe_spi_remove(struct spi_device *spi) { struct stmpe *stmpe = spi_get_drvdata(spi); stmpe_remove(stmpe); - - return 0; } static const struct of_device_id stmpe_spi_of_match[] = { diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c index d701926aa46e..bba38fbc781d 100644 --- a/drivers/mfd/tps65912-spi.c +++ b/drivers/mfd/tps65912-spi.c @@ -50,13 +50,11 @@ static int tps65912_spi_probe(struct spi_device *spi) return tps65912_device_init(tps); } -static int tps65912_spi_remove(struct spi_device *spi) +static void tps65912_spi_remove(struct spi_device *spi) { struct tps65912 *tps = spi_get_drvdata(spi); tps65912_device_exit(tps); - - return 0; } static const struct spi_device_id tps65912_spi_id_table[] = { diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c index a9e75d80ad36..263055bda48b 100644 --- a/drivers/misc/ad525x_dpot-spi.c +++ b/drivers/misc/ad525x_dpot-spi.c @@ -90,10 +90,9 @@ static int ad_dpot_spi_probe(struct spi_device *spi) spi_get_device_id(spi)->name); } -static int ad_dpot_spi_remove(struct spi_device *spi) +static void ad_dpot_spi_remove(struct spi_device *spi) { ad_dpot_remove(&spi->dev); - return 0; } static const struct spi_device_id ad_dpot_spi_id[] = { diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c index bb9c4512c968..9fbfe784d710 100644 --- a/drivers/misc/eeprom/ee1004.c +++ b/drivers/misc/eeprom/ee1004.c @@ -114,6 +114,9 @@ static ssize_t ee1004_eeprom_read(struct i2c_client *client, char *buf, if (offset + count > EE1004_PAGE_SIZE) count = EE1004_PAGE_SIZE - offset; + if (count > I2C_SMBUS_BLOCK_MAX) + count = I2C_SMBUS_BLOCK_MAX; + return i2c_smbus_read_i2c_block_data_or_emulated(client, offset, count, buf); } diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c index 1f15399e5cb4..b630625b3024 100644 --- a/drivers/misc/eeprom/eeprom_93xx46.c +++ b/drivers/misc/eeprom/eeprom_93xx46.c @@ -555,14 +555,12 @@ static int eeprom_93xx46_probe(struct spi_device *spi) return 0; } -static int eeprom_93xx46_remove(struct spi_device *spi) +static void eeprom_93xx46_remove(struct spi_device *spi) { struct eeprom_93xx46_dev *edev = spi_get_drvdata(spi); if (!(edev->pdata->flags & EE_READONLY)) device_remove_file(&spi->dev, &dev_attr_erase); - - return 0; } static struct spi_driver eeprom_93xx46_driver = { diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 4ccbf43e6bfa..aa1682b94a23 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -1288,7 +1288,14 @@ static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp) } if (copy_to_user(argp, &bp, sizeof(bp))) { - dma_buf_put(buf->dmabuf); + /* + * The usercopy failed, but we can't do much about it, as + * dma_buf_fd() already called fd_install() and made the + * file descriptor accessible for the current process. It + * might already be closed and dmabuf no longer valid when + * we reach this point. Therefore "leak" the fd and rely on + * the process exit path to do any required cleanup. + */ return -EFAULT; } diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c index 98828030b5a4..bac4df2e5231 100644 --- a/drivers/misc/lattice-ecp3-config.c +++ b/drivers/misc/lattice-ecp3-config.c @@ -211,13 +211,11 @@ static int lattice_ecp3_probe(struct spi_device *spi) return 0; } -static int lattice_ecp3_remove(struct spi_device *spi) +static void lattice_ecp3_remove(struct spi_device *spi) { struct fpga_data *data = spi_get_drvdata(spi); wait_for_completion(&data->fw_loaded); - - return 0; } static const struct spi_device_id lattice_ecp3_id[] = { diff --git a/drivers/misc/lis3lv02d/lis3lv02d_spi.c b/drivers/misc/lis3lv02d/lis3lv02d_spi.c index 9e40dfb60742..203a108b8883 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d_spi.c +++ b/drivers/misc/lis3lv02d/lis3lv02d_spi.c @@ -96,15 +96,13 @@ static int lis302dl_spi_probe(struct spi_device *spi) return lis3lv02d_init_device(&lis3_dev); } -static int lis302dl_spi_remove(struct spi_device *spi) +static void lis302dl_spi_remove(struct spi_device *spi) { struct lis3lv02d *lis3 = spi_get_drvdata(spi); lis3lv02d_joystick_disable(lis3); lis3lv02d_poweroff(lis3); lis3lv02d_remove_fs(&lis3_dev); - - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c index dab7b92db790..50644f83e78c 100644 --- a/drivers/misc/sgi-xp/xpnet.c +++ b/drivers/misc/sgi-xp/xpnet.c @@ -247,7 +247,7 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg) xpnet_device->stats.rx_packets++; xpnet_device->stats.rx_bytes += skb->len + ETH_HLEN; - netif_rx_ni(skb); + netif_rx(skb); xpc_received(partid, channel, (void *)msg); } diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 4e61b28a002f..8d718aa56d33 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1682,31 +1682,31 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) struct mmc_card *card = mq->card; struct mmc_host *host = card->host; blk_status_t error = BLK_STS_OK; - int retries = 0; do { u32 status; int err; + int retries = 0; - mmc_blk_rw_rq_prep(mqrq, card, 1, mq); + while (retries++ <= MMC_READ_SINGLE_RETRIES) { + mmc_blk_rw_rq_prep(mqrq, card, 1, mq); - mmc_wait_for_req(host, mrq); + mmc_wait_for_req(host, mrq); - err = mmc_send_status(card, &status); - if (err) - goto error_exit; - - if (!mmc_host_is_spi(host) && - !mmc_ready_for_data(status)) { - err = mmc_blk_fix_state(card, req); + err = mmc_send_status(card, &status); if (err) goto error_exit; - } - if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES) - continue; + if (!mmc_host_is_spi(host) && + !mmc_ready_for_data(status)) { + err = mmc_blk_fix_state(card, req); + if (err) + goto error_exit; + } - retries = 0; + if (!mrq->cmd->error) + break; + } if (mrq->cmd->error || mrq->data->error || diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index a576181e9db0..106dd204b1a7 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1489,7 +1489,7 @@ nomem: } -static int mmc_spi_remove(struct spi_device *spi) +static void mmc_spi_remove(struct spi_device *spi) { struct mmc_host *mmc = dev_get_drvdata(&spi->dev); struct mmc_spi_host *host = mmc_priv(mmc); @@ -1507,7 +1507,6 @@ static int mmc_spi_remove(struct spi_device *spi) spi->max_speed_hz = mmc->f_max; mmc_spi_put_pdata(spi); mmc_free_host(mmc); - return 0; } static const struct spi_device_id mmc_spi_dev_ids[] = { diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c index a8b31bddf14b..008df9d8898d 100644 --- a/drivers/mtd/devices/mchp23k256.c +++ b/drivers/mtd/devices/mchp23k256.c @@ -209,13 +209,11 @@ static int mchp23k256_probe(struct spi_device *spi) return 0; } -static int mchp23k256_remove(struct spi_device *spi) +static void mchp23k256_remove(struct spi_device *spi) { struct mchp23k256_flash *flash = spi_get_drvdata(spi); WARN_ON(mtd_device_unregister(&flash->mtd)); - - return 0; } static const struct of_device_id mchp23k256_of_table[] = { diff --git a/drivers/mtd/devices/mchp48l640.c b/drivers/mtd/devices/mchp48l640.c index 231a10790196..a3fd426df74b 100644 --- a/drivers/mtd/devices/mchp48l640.c +++ b/drivers/mtd/devices/mchp48l640.c @@ -341,13 +341,11 @@ static int mchp48l640_probe(struct spi_device *spi) return 0; } -static int mchp48l640_remove(struct spi_device *spi) +static void mchp48l640_remove(struct spi_device *spi) { struct mchp48l640_flash *flash = spi_get_drvdata(spi); WARN_ON(mtd_device_unregister(&flash->mtd)); - - return 0; } static const struct of_device_id mchp48l640_of_table[] = { diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index 734878abaa23..134e27328597 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c @@ -916,7 +916,7 @@ static int dataflash_probe(struct spi_device *spi) return status; } -static int dataflash_remove(struct spi_device *spi) +static void dataflash_remove(struct spi_device *spi) { struct dataflash *flash = spi_get_drvdata(spi); @@ -925,8 +925,6 @@ static int dataflash_remove(struct spi_device *spi) WARN_ON(mtd_device_unregister(&flash->mtd)); kfree(flash); - - return 0; } static struct spi_driver dataflash_driver = { diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c index 6ed6c51fac69..d503821a3e60 100644 --- a/drivers/mtd/devices/phram.c +++ b/drivers/mtd/devices/phram.c @@ -264,16 +264,20 @@ static int phram_setup(const char *val) } } - if (erasesize) - div_u64_rem(len, (uint32_t)erasesize, &rem); - if (len == 0 || erasesize == 0 || erasesize > len - || erasesize > UINT_MAX || rem) { + || erasesize > UINT_MAX) { parse_err("illegal erasesize or len\n"); ret = -EINVAL; goto error; } + div_u64_rem(len, (uint32_t)erasesize, &rem); + if (rem) { + parse_err("len is not multiple of erasesize\n"); + ret = -EINVAL; + goto error; + } + ret = register_device(name, start, len, (uint32_t)erasesize); if (ret) goto error; diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c index 7f124c1bfa40..8813994ce9f4 100644 --- a/drivers/mtd/devices/sst25l.c +++ b/drivers/mtd/devices/sst25l.c @@ -398,13 +398,11 @@ static int sst25l_probe(struct spi_device *spi) return 0; } -static int sst25l_remove(struct spi_device *spi) +static void sst25l_remove(struct spi_device *spi) { struct sst25l_flash *flash = spi_get_drvdata(spi); WARN_ON(mtd_device_unregister(&flash->mtd)); - - return 0; } static struct spi_driver sst25l_driver = { diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 70f492dce158..eef87b28d6c8 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -546,6 +546,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd) config.stride = 1; config.read_only = true; config.root_only = true; + config.ignore_wp = true; config.no_of_node = !of_device_is_compatible(node, "nvmem-cells"); config.priv = mtd; @@ -833,6 +834,7 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd, config.owner = THIS_MODULE; config.type = NVMEM_TYPE_OTP; config.root_only = true; + config.ignore_wp = true; config.reg_read = reg_read; config.size = size; config.of_node = np; diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index 20408b7db540..820e5dc3bc9b 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -42,7 +42,7 @@ config MTD_NAND_OMAP2 tristate "OMAP2, OMAP3, OMAP4 and Keystone NAND controller" depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST depends on HAS_IOMEM - select OMAP_GPMC if ARCH_K3 + depends on OMAP_GPMC help Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4 and Keystone platforms. diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index f75929783b94..aee78f5f4f15 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -2106,7 +2106,7 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip, mtd->oobsize / trans, host->hwcfg.sector_size_1k); - if (!ret) { + if (ret != -EBADMSG) { *err_addr = brcmnand_get_uncorrecc_addr(ctrl); if (*err_addr) diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 1b64c5a5140d..ded4df473928 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -2285,7 +2285,7 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip, this->hw.must_apply_timings = false; ret = gpmi_nfc_apply_timings(this); if (ret) - return ret; + goto out_pm; } dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs); @@ -2414,6 +2414,7 @@ unmap: this->bch = false; +out_pm: pm_runtime_mark_last_busy(this->dev); pm_runtime_put_autosuspend(this->dev); diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c index efe0ffe4f1ab..9054559e52dd 100644 --- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c +++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c @@ -68,9 +68,14 @@ static struct ingenic_ecc *ingenic_ecc_get(struct device_node *np) struct ingenic_ecc *ecc; pdev = of_find_device_by_node(np); - if (!pdev || !platform_get_drvdata(pdev)) + if (!pdev) return ERR_PTR(-EPROBE_DEFER); + if (!platform_get_drvdata(pdev)) { + put_device(&pdev->dev); + return ERR_PTR(-EPROBE_DEFER); + } + ecc = platform_get_drvdata(pdev); clk_prepare_enable(ecc->clk); diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index 7c6efa3b6255..1a77542c6d67 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -2,7 +2,6 @@ /* * Copyright (c) 2016, The Linux Foundation. All rights reserved. */ - #include <linux/clk.h> #include <linux/slab.h> #include <linux/bitops.h> @@ -3073,10 +3072,6 @@ static int qcom_nandc_probe(struct platform_device *pdev) if (dma_mapping_error(dev, nandc->base_dma)) return -ENXIO; - ret = qcom_nandc_alloc(nandc); - if (ret) - goto err_nandc_alloc; - ret = clk_prepare_enable(nandc->core_clk); if (ret) goto err_core_clk; @@ -3085,6 +3080,10 @@ static int qcom_nandc_probe(struct platform_device *pdev) if (ret) goto err_aon_clk; + ret = qcom_nandc_alloc(nandc); + if (ret) + goto err_nandc_alloc; + ret = qcom_nandc_setup(nandc); if (ret) goto err_setup; @@ -3096,15 +3095,14 @@ static int qcom_nandc_probe(struct platform_device *pdev) return 0; err_setup: + qcom_nandc_unalloc(nandc); +err_nandc_alloc: clk_disable_unprepare(nandc->aon_clk); err_aon_clk: clk_disable_unprepare(nandc->core_clk); err_core_clk: - qcom_nandc_unalloc(nandc); -err_nandc_alloc: dma_unmap_resource(dev, res->start, resource_size(res), DMA_BIDIRECTIONAL, 0); - return ret; } diff --git a/drivers/mtd/parsers/qcomsmempart.c b/drivers/mtd/parsers/qcomsmempart.c index 06a818cd2433..4311b89d8df0 100644 --- a/drivers/mtd/parsers/qcomsmempart.c +++ b/drivers/mtd/parsers/qcomsmempart.c @@ -58,11 +58,11 @@ static int parse_qcomsmem_part(struct mtd_info *mtd, const struct mtd_partition **pparts, struct mtd_part_parser_data *data) { + size_t len = SMEM_FLASH_PTABLE_HDR_LEN; + int ret, i, j, tmpparts, numparts = 0; struct smem_flash_pentry *pentry; struct smem_flash_ptable *ptable; - size_t len = SMEM_FLASH_PTABLE_HDR_LEN; struct mtd_partition *parts; - int ret, i, numparts; char *name, *c; if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS) @@ -75,7 +75,8 @@ static int parse_qcomsmem_part(struct mtd_info *mtd, pr_debug("Parsing partition table info from SMEM\n"); ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len); if (IS_ERR(ptable)) { - pr_err("Error reading partition table header\n"); + if (PTR_ERR(ptable) != -EPROBE_DEFER) + pr_err("Error reading partition table header\n"); return PTR_ERR(ptable); } @@ -87,8 +88,8 @@ static int parse_qcomsmem_part(struct mtd_info *mtd, } /* Ensure that # of partitions is less than the max we have allocated */ - numparts = le32_to_cpu(ptable->numparts); - if (numparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) { + tmpparts = le32_to_cpu(ptable->numparts); + if (tmpparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) { pr_err("Partition numbers exceed the max limit\n"); return -EINVAL; } @@ -116,11 +117,17 @@ static int parse_qcomsmem_part(struct mtd_info *mtd, return PTR_ERR(ptable); } + for (i = 0; i < tmpparts; i++) { + pentry = &ptable->pentry[i]; + if (pentry->name[0] != '\0') + numparts++; + } + parts = kcalloc(numparts, sizeof(*parts), GFP_KERNEL); if (!parts) return -ENOMEM; - for (i = 0; i < numparts; i++) { + for (i = 0, j = 0; i < tmpparts; i++) { pentry = &ptable->pentry[i]; if (pentry->name[0] == '\0') continue; @@ -135,24 +142,25 @@ static int parse_qcomsmem_part(struct mtd_info *mtd, for (c = name; *c != '\0'; c++) *c = tolower(*c); - parts[i].name = name; - parts[i].offset = le32_to_cpu(pentry->offset) * mtd->erasesize; - parts[i].mask_flags = pentry->attr; - parts[i].size = le32_to_cpu(pentry->length) * mtd->erasesize; + parts[j].name = name; + parts[j].offset = le32_to_cpu(pentry->offset) * mtd->erasesize; + parts[j].mask_flags = pentry->attr; + parts[j].size = le32_to_cpu(pentry->length) * mtd->erasesize; pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n", i, pentry->name, le32_to_cpu(pentry->offset), le32_to_cpu(pentry->length), pentry->attr); + j++; } pr_debug("SMEM partition table found: ver: %d len: %d\n", - le32_to_cpu(ptable->version), numparts); + le32_to_cpu(ptable->version), tmpparts); *pparts = parts; return numparts; out_free_parts: - while (--i >= 0) - kfree(parts[i].name); + while (--j >= 0) + kfree(parts[j].name); kfree(parts); *pparts = NULL; @@ -166,6 +174,8 @@ static void parse_qcomsmem_cleanup(const struct mtd_partition *pparts, for (i = 0; i < nr_parts; i++) kfree(pparts[i].name); + + kfree(pparts); } static const struct of_device_id qcomsmem_of_match_table[] = { diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 50b23e71065f..3f1192d3c52d 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -31,7 +31,7 @@ obj-$(CONFIG_TUN) += tun.o obj-$(CONFIG_TAP) += tap.o obj-$(CONFIG_VETH) += veth.o obj-$(CONFIG_VIRTIO_NET) += virtio_net.o -obj-$(CONFIG_VXLAN) += vxlan.o +obj-$(CONFIG_VXLAN) += vxlan/ obj-$(CONFIG_GENEVE) += geneve.o obj-$(CONFIG_BAREUDP) += bareudp.o obj-$(CONFIG_GTP) += gtp.o diff --git a/drivers/net/amt.c b/drivers/net/amt.c index f1a36d7e2151..10455c9b9da0 100644 --- a/drivers/net/amt.c +++ b/drivers/net/amt.c @@ -2373,7 +2373,7 @@ static bool amt_membership_query_handler(struct amt_dev *amt, skb->pkt_type = PACKET_MULTICAST; skb->ip_summed = CHECKSUM_NONE; len = skb->len; - if (netif_rx(skb) == NET_RX_SUCCESS) { + if (__netif_rx(skb) == NET_RX_SUCCESS) { amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true); dev_sw_netstats_rx_add(amt->dev, len); } else { @@ -2470,7 +2470,7 @@ report: skb->pkt_type = PACKET_MULTICAST; skb->ip_summed = CHECKSUM_NONE; len = skb->len; - if (netif_rx(skb) == NET_RX_SUCCESS) { + if (__netif_rx(skb) == NET_RX_SUCCESS) { amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_UPDATE, true); dev_sw_netstats_rx_add(amt->dev, len); diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 6382e1937cca..c580acb8b1d3 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -138,6 +138,9 @@ static int com20020pci_probe(struct pci_dev *pdev, return -ENOMEM; ci = (struct com20020_pci_card_info *)id->driver_data; + if (!ci) + return -EINVAL; + priv->ci = ci; mm = &ci->misc_map; diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 9fd1d6cba3cd..a86b1f71762e 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -225,7 +225,7 @@ static inline int __check_agg_selection_timer(struct port *port) if (bond == NULL) return 0; - return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0; + return atomic_read(&BOND_AD_INFO(bond).agg_select_timer) ? 1 : 0; } /** @@ -1995,7 +1995,7 @@ static void ad_marker_response_received(struct bond_marker *marker, */ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout) { - BOND_AD_INFO(bond).agg_select_timer = timeout; + atomic_set(&BOND_AD_INFO(bond).agg_select_timer, timeout); } /** @@ -2279,6 +2279,28 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond) } /** + * bond_agg_timer_advance - advance agg_select_timer + * @bond: bonding structure + * + * Return true when agg_select_timer reaches 0. + */ +static bool bond_agg_timer_advance(struct bonding *bond) +{ + int val, nval; + + while (1) { + val = atomic_read(&BOND_AD_INFO(bond).agg_select_timer); + if (!val) + return false; + nval = val - 1; + if (atomic_cmpxchg(&BOND_AD_INFO(bond).agg_select_timer, + val, nval) == val) + break; + } + return nval == 0; +} + +/** * bond_3ad_state_machine_handler - handle state machines timeout * @work: work context to fetch bonding struct to work on from * @@ -2313,9 +2335,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work) if (!bond_has_slaves(bond)) goto re_arm; - /* check if agg_select_timer timer after initialize is timed out */ - if (BOND_AD_INFO(bond).agg_select_timer && - !(--BOND_AD_INFO(bond).agg_select_timer)) { + if (bond_agg_timer_advance(bond)) { slave = bond_first_slave_rcu(bond); port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 617c2bf8c5a7..55e0ba2a163d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -88,6 +88,7 @@ #if IS_ENABLED(CONFIG_TLS_DEVICE) #include <net/tls.h> #endif +#include <net/ip6_route.h> #include "bonding_priv.h" @@ -2379,10 +2380,9 @@ static int __bond_release_one(struct net_device *bond_dev, bond_select_active_slave(bond); } - if (!bond_has_slaves(bond)) { - bond_set_carrier(bond); + bond_set_carrier(bond); + if (!bond_has_slaves(bond)) eth_hw_addr_random(bond_dev); - } unblock_netpoll_tx(); synchronize_rcu(); @@ -2794,31 +2794,15 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip) return ret; } -/* We go to the (large) trouble of VLAN tagging ARP frames because - * switches in VLAN mode (especially if ports are configured as - * "native" to a VLAN) might not pass non-tagged frames. - */ -static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip, - __be32 src_ip, struct bond_vlan_tag *tags) +static bool bond_handle_vlan(struct slave *slave, struct bond_vlan_tag *tags, + struct sk_buff *skb) { - struct sk_buff *skb; - struct bond_vlan_tag *outer_tag = tags; - struct net_device *slave_dev = slave->dev; struct net_device *bond_dev = slave->bond->dev; - - slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n", - arp_op, &dest_ip, &src_ip); - - skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip, - NULL, slave_dev->dev_addr, NULL); - - if (!skb) { - net_err_ratelimited("ARP packet allocation failed\n"); - return; - } + struct net_device *slave_dev = slave->dev; + struct bond_vlan_tag *outer_tag = tags; if (!tags || tags->vlan_proto == VLAN_N_VID) - goto xmit; + return true; tags++; @@ -2835,7 +2819,7 @@ static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip, tags->vlan_id); if (!skb) { net_err_ratelimited("failed to insert inner VLAN tag\n"); - return; + return false; } tags++; @@ -2848,8 +2832,34 @@ static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip, outer_tag->vlan_id); } -xmit: - arp_xmit(skb); + return true; +} + +/* We go to the (large) trouble of VLAN tagging ARP frames because + * switches in VLAN mode (especially if ports are configured as + * "native" to a VLAN) might not pass non-tagged frames. + */ +static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip, + __be32 src_ip, struct bond_vlan_tag *tags) +{ + struct net_device *bond_dev = slave->bond->dev; + struct net_device *slave_dev = slave->dev; + struct sk_buff *skb; + + slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n", + arp_op, &dest_ip, &src_ip); + + skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip, + NULL, slave_dev->dev_addr, NULL); + + if (!skb) { + net_err_ratelimited("ARP packet allocation failed\n"); + return; + } + + if (bond_handle_vlan(slave, tags, skb)) + arp_xmit(skb); + return; } /* Validate the device path between the @start_dev and the @end_dev. @@ -2966,30 +2976,17 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 slave->target_last_arp_rx[i] = jiffies; } -int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, - struct slave *slave) +static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, + struct slave *slave) { struct arphdr *arp = (struct arphdr *)skb->data; struct slave *curr_active_slave, *curr_arp_slave; unsigned char *arp_ptr; __be32 sip, tip; - int is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP); unsigned int alen; - if (!slave_do_arp_validate(bond, slave)) { - if ((slave_do_arp_validate_only(bond) && is_arp) || - !slave_do_arp_validate_only(bond)) - slave->last_rx = jiffies; - return RX_HANDLER_ANOTHER; - } else if (!is_arp) { - return RX_HANDLER_ANOTHER; - } - alen = arp_hdr_len(bond->dev); - slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n", - __func__, skb->dev->name); - if (alen > skb_headlen(skb)) { arp = kmalloc(alen, GFP_ATOMIC); if (!arp) @@ -3060,6 +3057,216 @@ out_unlock: return RX_HANDLER_ANOTHER; } +#if IS_ENABLED(CONFIG_IPV6) +static void bond_ns_send(struct slave *slave, const struct in6_addr *daddr, + const struct in6_addr *saddr, struct bond_vlan_tag *tags) +{ + struct net_device *bond_dev = slave->bond->dev; + struct net_device *slave_dev = slave->dev; + struct in6_addr mcaddr; + struct sk_buff *skb; + + slave_dbg(bond_dev, slave_dev, "NS on slave: dst %pI6c src %pI6c\n", + daddr, saddr); + + skb = ndisc_ns_create(slave_dev, daddr, saddr, 0); + if (!skb) { + net_err_ratelimited("NS packet allocation failed\n"); + return; + } + + addrconf_addr_solict_mult(daddr, &mcaddr); + if (bond_handle_vlan(slave, tags, skb)) + ndisc_send_skb(skb, &mcaddr, saddr); +} + +static void bond_ns_send_all(struct bonding *bond, struct slave *slave) +{ + struct in6_addr *targets = bond->params.ns_targets; + struct bond_vlan_tag *tags; + struct dst_entry *dst; + struct in6_addr saddr; + struct flowi6 fl6; + int i; + + for (i = 0; i < BOND_MAX_NS_TARGETS && !ipv6_addr_any(&targets[i]); i++) { + slave_dbg(bond->dev, slave->dev, "%s: target %pI6c\n", + __func__, &targets[i]); + tags = NULL; + + /* Find out through which dev should the packet go */ + memset(&fl6, 0, sizeof(struct flowi6)); + fl6.daddr = targets[i]; + fl6.flowi6_oif = bond->dev->ifindex; + + dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6); + if (dst->error) { + dst_release(dst); + /* there's no route to target - try to send arp + * probe to generate any traffic (arp_validate=0) + */ + if (bond->params.arp_validate) + pr_warn_once("%s: no route to ns_ip6_target %pI6c and arp_validate is set\n", + bond->dev->name, + &targets[i]); + bond_ns_send(slave, &targets[i], &in6addr_any, tags); + continue; + } + + /* bond device itself */ + if (dst->dev == bond->dev) + goto found; + + rcu_read_lock(); + tags = bond_verify_device_path(bond->dev, dst->dev, 0); + rcu_read_unlock(); + + if (!IS_ERR_OR_NULL(tags)) + goto found; + + /* Not our device - skip */ + slave_dbg(bond->dev, slave->dev, "no path to ns_ip6_target %pI6c via dst->dev %s\n", + &targets[i], dst->dev ? dst->dev->name : "NULL"); + + dst_release(dst); + continue; + +found: + if (!ipv6_dev_get_saddr(dev_net(dst->dev), dst->dev, &targets[i], 0, &saddr)) + bond_ns_send(slave, &targets[i], &saddr, tags); + dst_release(dst); + kfree(tags); + } +} + +static int bond_confirm_addr6(struct net_device *dev, + struct netdev_nested_priv *priv) +{ + struct in6_addr *addr = (struct in6_addr *)priv->data; + + return ipv6_chk_addr(dev_net(dev), addr, dev, 0); +} + +static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr) +{ + struct netdev_nested_priv priv = { + .data = addr, + }; + int ret = false; + + if (bond_confirm_addr6(bond->dev, &priv)) + return true; + + rcu_read_lock(); + if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_confirm_addr6, &priv)) + ret = true; + rcu_read_unlock(); + + return ret; +} + +static void bond_validate_ns(struct bonding *bond, struct slave *slave, + struct in6_addr *saddr, struct in6_addr *daddr) +{ + int i; + + if (ipv6_addr_any(saddr) || !bond_has_this_ip6(bond, daddr)) { + slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n", + __func__, saddr, daddr); + return; + } + + i = bond_get_targets_ip6(bond->params.ns_targets, saddr); + if (i == -1) { + slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c not found in targets\n", + __func__, saddr); + return; + } + slave->last_rx = jiffies; + slave->target_last_arp_rx[i] = jiffies; +} + +static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond, + struct slave *slave) +{ + struct slave *curr_active_slave, *curr_arp_slave; + struct icmp6hdr *hdr = icmp6_hdr(skb); + struct in6_addr *saddr, *daddr; + + if (skb->pkt_type == PACKET_OTHERHOST || + skb->pkt_type == PACKET_LOOPBACK || + hdr->icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT) + goto out; + + saddr = &ipv6_hdr(skb)->saddr; + daddr = &ipv6_hdr(skb)->daddr; + + slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI6c tip %pI6c\n", + __func__, slave->dev->name, bond_slave_state(slave), + bond->params.arp_validate, slave_do_arp_validate(bond, slave), + saddr, daddr); + + curr_active_slave = rcu_dereference(bond->curr_active_slave); + curr_arp_slave = rcu_dereference(bond->current_arp_slave); + + /* We 'trust' the received ARP enough to validate it if: + * see bond_arp_rcv(). + */ + if (bond_is_active_slave(slave)) + bond_validate_ns(bond, slave, saddr, daddr); + else if (curr_active_slave && + time_after(slave_last_rx(bond, curr_active_slave), + curr_active_slave->last_link_up)) + bond_validate_ns(bond, slave, saddr, daddr); + else if (curr_arp_slave && + bond_time_in_interval(bond, + dev_trans_start(curr_arp_slave->dev), 1)) + bond_validate_ns(bond, slave, saddr, daddr); + +out: + return RX_HANDLER_ANOTHER; +} +#endif + +int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond, + struct slave *slave) +{ +#if IS_ENABLED(CONFIG_IPV6) + bool is_ipv6 = skb->protocol == __cpu_to_be16(ETH_P_IPV6); +#endif + bool is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP); + + slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n", + __func__, skb->dev->name); + + /* Use arp validate logic for both ARP and NS */ + if (!slave_do_arp_validate(bond, slave)) { + if ((slave_do_arp_validate_only(bond) && is_arp) || +#if IS_ENABLED(CONFIG_IPV6) + (slave_do_arp_validate_only(bond) && is_ipv6) || +#endif + !slave_do_arp_validate_only(bond)) + slave->last_rx = jiffies; + return RX_HANDLER_ANOTHER; + } else if (is_arp) { + return bond_arp_rcv(skb, bond, slave); +#if IS_ENABLED(CONFIG_IPV6) + } else if (is_ipv6) { + return bond_na_rcv(skb, bond, slave); +#endif + } else { + return RX_HANDLER_ANOTHER; + } +} + +static void bond_send_validate(struct bonding *bond, struct slave *slave) +{ + bond_arp_send_all(bond, slave); +#if IS_ENABLED(CONFIG_IPV6) + bond_ns_send_all(bond, slave); +#endif +} + /* function to verify if we're in the arp_interval timeslice, returns true if * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval + * arp_interval/2) . the arp_interval/2 is needed for really fast networks. @@ -3155,7 +3362,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond) * to be unstable during low/no traffic periods */ if (bond_slave_is_up(slave)) - bond_arp_send_all(bond, slave); + bond_send_validate(bond, slave); } rcu_read_unlock(); @@ -3369,7 +3576,7 @@ static bool bond_ab_arp_probe(struct bonding *bond) curr_active_slave->dev->name); if (curr_active_slave) { - bond_arp_send_all(bond, curr_active_slave); + bond_send_validate(bond, curr_active_slave); return should_notify_rtnl; } @@ -3421,7 +3628,7 @@ static bool bond_ab_arp_probe(struct bonding *bond) bond_set_slave_link_state(new_slave, BOND_LINK_BACK, BOND_SLAVE_NOTIFY_LATER); bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER); - bond_arp_send_all(bond, new_slave); + bond_send_validate(bond, new_slave); new_slave->last_link_up = jiffies; rcu_assign_pointer(bond->current_arp_slave, new_slave); @@ -3957,7 +4164,7 @@ static int bond_open(struct net_device *bond_dev) if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ queue_delayed_work(bond->wq, &bond->arp_work, 0); - bond->recv_probe = bond_arp_rcv; + bond->recv_probe = bond_rcv_validate; } if (BOND_MODE(bond) == BOND_MODE_8023AD) { @@ -5938,6 +6145,7 @@ static int bond_check_params(struct bond_params *params) strscpy_pad(params->primary, primary, sizeof(params->primary)); memcpy(params->arp_targets, arp_target, sizeof(arp_target)); + memset(params->ns_targets, 0, sizeof(struct in6_addr) * BOND_MAX_NS_TARGETS); return 0; } diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index 1007bf6d385d..f427fa1737c7 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -14,6 +14,7 @@ #include <net/netlink.h> #include <net/rtnetlink.h> #include <net/bonding.h> +#include <net/ipv6.h> static size_t bond_get_slave_size(const struct net_device *bond_dev, const struct net_device *slave_dev) @@ -111,6 +112,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = { [IFLA_BOND_TLB_DYNAMIC_LB] = { .type = NLA_U8 }, [IFLA_BOND_PEER_NOTIF_DELAY] = { .type = NLA_U32 }, [IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 }, + [IFLA_BOND_NS_IP6_TARGET] = { .type = NLA_NESTED }, }; static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = { @@ -272,6 +274,40 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[], if (err) return err; } +#if IS_ENABLED(CONFIG_IPV6) + if (data[IFLA_BOND_NS_IP6_TARGET]) { + struct nlattr *attr; + int i = 0, rem; + + bond_option_ns_ip6_targets_clear(bond); + nla_for_each_nested(attr, data[IFLA_BOND_NS_IP6_TARGET], rem) { + struct in6_addr addr6; + + if (nla_len(attr) < sizeof(addr6)) { + NL_SET_ERR_MSG(extack, "Invalid IPv6 address"); + return -EINVAL; + } + + addr6 = nla_get_in6_addr(attr); + + if (ipv6_addr_type(&addr6) & IPV6_ADDR_LINKLOCAL) { + NL_SET_ERR_MSG(extack, "Invalid IPv6 addr6"); + return -EINVAL; + } + + bond_opt_initextra(&newval, &addr6, sizeof(addr6)); + err = __bond_opt_set(bond, BOND_OPT_NS_TARGETS, + &newval); + if (err) + break; + i++; + } + if (i == 0 && bond->params.arp_interval) + netdev_warn(bond->dev, "Removing last ns target with arp_interval on\n"); + if (err) + return err; + } +#endif if (data[IFLA_BOND_ARP_VALIDATE]) { int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]); @@ -526,6 +562,9 @@ static size_t bond_get_size(const struct net_device *bond_dev) nla_total_size(sizeof(u8)) + /* IFLA_BOND_TLB_DYNAMIC_LB */ nla_total_size(sizeof(u32)) + /* IFLA_BOND_PEER_NOTIF_DELAY */ nla_total_size(sizeof(u8)) + /* IFLA_BOND_MISSED_MAX */ + /* IFLA_BOND_NS_IP6_TARGET */ + nla_total_size(sizeof(struct nlattr)) + + nla_total_size(sizeof(struct in6_addr)) * BOND_MAX_NS_TARGETS + 0; } @@ -603,6 +642,26 @@ static int bond_fill_info(struct sk_buff *skb, bond->params.arp_all_targets)) goto nla_put_failure; +#if IS_ENABLED(CONFIG_IPV6) + targets = nla_nest_start(skb, IFLA_BOND_NS_IP6_TARGET); + if (!targets) + goto nla_put_failure; + + targets_added = 0; + for (i = 0; i < BOND_MAX_NS_TARGETS; i++) { + if (!ipv6_addr_any(&bond->params.ns_targets[i])) { + if (nla_put_in6_addr(skb, i, &bond->params.ns_targets[i])) + goto nla_put_failure; + targets_added = 1; + } + } + + if (targets_added) + nla_nest_end(skb, targets); + else + nla_nest_cancel(skb, targets); +#endif + primary = rtnl_dereference(bond->primary_slave); if (primary && nla_put_u32(skb, IFLA_BOND_PRIMARY, primary->dev->ifindex)) diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 2e8484a91a0e..64f7db2627ce 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -34,6 +34,10 @@ static int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target); static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target); static int bond_option_arp_ip_targets_set(struct bonding *bond, const struct bond_opt_value *newval); +#if IS_ENABLED(CONFIG_IPV6) +static int bond_option_ns_ip6_targets_set(struct bonding *bond, + const struct bond_opt_value *newval); +#endif static int bond_option_arp_validate_set(struct bonding *bond, const struct bond_opt_value *newval); static int bond_option_arp_all_targets_set(struct bonding *bond, @@ -295,6 +299,15 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = { .flags = BOND_OPTFLAG_RAWVAL, .set = bond_option_arp_ip_targets_set }, +#if IS_ENABLED(CONFIG_IPV6) + [BOND_OPT_NS_TARGETS] = { + .id = BOND_OPT_NS_TARGETS, + .name = "ns_ip6_target", + .desc = "NS targets in ffff:ffff::ffff:ffff form", + .flags = BOND_OPTFLAG_RAWVAL, + .set = bond_option_ns_ip6_targets_set + }, +#endif [BOND_OPT_DOWNDELAY] = { .id = BOND_OPT_DOWNDELAY, .name = "downdelay", @@ -1052,7 +1065,7 @@ static int bond_option_arp_interval_set(struct bonding *bond, cancel_delayed_work_sync(&bond->arp_work); } else { /* arp_validate can be set only in active-backup mode */ - bond->recv_probe = bond_arp_rcv; + bond->recv_probe = bond_rcv_validate; cancel_delayed_work_sync(&bond->mii_work); queue_delayed_work(bond->wq, &bond->arp_work, 0); } @@ -1184,6 +1197,65 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond, return ret; } +#if IS_ENABLED(CONFIG_IPV6) +static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot, + struct in6_addr *target, + unsigned long last_rx) +{ + struct in6_addr *targets = bond->params.ns_targets; + struct list_head *iter; + struct slave *slave; + + if (slot >= 0 && slot < BOND_MAX_NS_TARGETS) { + bond_for_each_slave(bond, slave, iter) + slave->target_last_arp_rx[slot] = last_rx; + targets[slot] = *target; + } +} + +void bond_option_ns_ip6_targets_clear(struct bonding *bond) +{ + struct in6_addr addr_any = in6addr_any; + int i; + + for (i = 0; i < BOND_MAX_NS_TARGETS; i++) + _bond_options_ns_ip6_target_set(bond, i, &addr_any, 0); +} + +static int bond_option_ns_ip6_targets_set(struct bonding *bond, + const struct bond_opt_value *newval) +{ + struct in6_addr *target = (struct in6_addr *)newval->extra; + struct in6_addr *targets = bond->params.ns_targets; + struct in6_addr addr_any = in6addr_any; + int index; + + if (!bond_is_ip6_target_ok(target)) { + netdev_err(bond->dev, "invalid NS target %pI6c specified for addition\n", + target); + return -EINVAL; + } + + if (bond_get_targets_ip6(targets, target) != -1) { /* dup */ + netdev_err(bond->dev, "NS target %pI6c is already present\n", + target); + return -EINVAL; + } + + index = bond_get_targets_ip6(targets, &addr_any); /* first free slot */ + if (index == -1) { + netdev_err(bond->dev, "NS target table is full!\n"); + return -EINVAL; + } + + netdev_dbg(bond->dev, "Adding NS target %pI6c\n", target); + + _bond_options_ns_ip6_target_set(bond, index, target, jiffies); + + return 0; +} +#endif + static int bond_option_arp_validate_set(struct bonding *bond, const struct bond_opt_value *newval) { diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c index 6a6cdd0bb258..69b0a3751dff 100644 --- a/drivers/net/bonding/bond_sysfs_slave.c +++ b/drivers/net/bonding/bond_sysfs_slave.c @@ -15,14 +15,8 @@ struct slave_attribute { ssize_t (*show)(struct slave *, char *); }; -#define SLAVE_ATTR(_name, _mode, _show) \ -const struct slave_attribute slave_attr_##_name = { \ - .attr = {.name = __stringify(_name), \ - .mode = _mode }, \ - .show = _show, \ -}; #define SLAVE_ATTR_RO(_name) \ - SLAVE_ATTR(_name, 0444, _name##_show) +const struct slave_attribute slave_attr_##_name = __ATTR_RO(_name) static ssize_t state_show(struct slave *slave, char *buf) { diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c index 2a7af611d43a..688075859ae4 100644 --- a/drivers/net/caif/caif_serial.c +++ b/drivers/net/caif/caif_serial.c @@ -196,7 +196,7 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data, skb_reset_mac_header(skb); debugfs_rx(ser, data, count); /* Push received packet up the stack. */ - ret = netif_rx_ni(skb); + ret = netif_rx(skb); if (!ret) { ser->dev->stats.rx_packets++; ser->dev->stats.rx_bytes += count; diff --git a/drivers/net/can/c_can/c_can_ethtool.c b/drivers/net/can/c_can/c_can_ethtool.c index 6655146294fc..8a826a6813bd 100644 --- a/drivers/net/can/c_can/c_can_ethtool.c +++ b/drivers/net/can/c_can/c_can_ethtool.c @@ -11,14 +11,6 @@ #include "c_can.h" -static void c_can_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *info) -{ - struct c_can_priv *priv = netdev_priv(netdev); - strscpy(info->driver, "c_can", sizeof(info->driver)); - strscpy(info->bus_info, dev_name(priv->device), sizeof(info->bus_info)); -} - static void c_can_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, @@ -33,7 +25,6 @@ static void c_can_get_ringparam(struct net_device *netdev, } static const struct ethtool_ops c_can_ethtool_ops = { - .get_drvinfo = c_can_get_drvinfo, .get_ringparam = c_can_get_ringparam, }; diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c index d5fca3bfaf9a..2103bcca9012 100644 --- a/drivers/net/can/dev/bittiming.c +++ b/drivers/net/can/dev/bittiming.c @@ -24,7 +24,7 @@ */ static int can_update_sample_point(const struct can_bittiming_const *btc, - unsigned int sample_point_nominal, unsigned int tseg, + const unsigned int sample_point_nominal, const unsigned int tseg, unsigned int *tseg1_ptr, unsigned int *tseg2_ptr, unsigned int *sample_point_error_ptr) { @@ -63,7 +63,7 @@ can_update_sample_point(const struct can_bittiming_const *btc, return best_sample_point; } -int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, +int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt, const struct can_bittiming_const *btc) { struct can_priv *priv = netdev_priv(dev); @@ -208,10 +208,10 @@ void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const, * prescaler value brp. You can find more information in the header * file linux/can/netlink.h. */ -static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt, +static int can_fixup_bittiming(const struct net_device *dev, struct can_bittiming *bt, const struct can_bittiming_const *btc) { - struct can_priv *priv = netdev_priv(dev); + const struct can_priv *priv = netdev_priv(dev); unsigned int tseg1, alltseg; u64 brp64; @@ -244,25 +244,21 @@ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt, /* Checks the validity of predefined bitrate settings */ static int -can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt, +can_validate_bitrate(const struct net_device *dev, const struct can_bittiming *bt, const u32 *bitrate_const, const unsigned int bitrate_const_cnt) { - struct can_priv *priv = netdev_priv(dev); unsigned int i; for (i = 0; i < bitrate_const_cnt; i++) { if (bt->bitrate == bitrate_const[i]) - break; + return 0; } - if (i >= priv->bitrate_const_cnt) - return -EINVAL; - - return 0; + return -EINVAL; } -int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt, +int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt, const struct can_bittiming_const *btc, const u32 *bitrate_const, const unsigned int bitrate_const_cnt) diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c index c192f25f9695..e7ab45f1c43b 100644 --- a/drivers/net/can/dev/dev.c +++ b/drivers/net/can/dev/dev.c @@ -154,7 +154,7 @@ static void can_restart(struct net_device *dev) cf->can_id |= CAN_ERR_RESTARTED; - netif_rx_ni(skb); + netif_rx(skb); restart: netdev_dbg(dev, "restarted\n"); diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c index 04687b15b250..41645a24384c 100644 --- a/drivers/net/can/m_can/tcan4x5x-core.c +++ b/drivers/net/can/m_can/tcan4x5x-core.c @@ -388,7 +388,7 @@ out_power: return ret; } -static int tcan4x5x_can_remove(struct spi_device *spi) +static void tcan4x5x_can_remove(struct spi_device *spi) { struct tcan4x5x_priv *priv = spi_get_drvdata(spi); @@ -397,8 +397,6 @@ static int tcan4x5x_can_remove(struct spi_device *spi) tcan4x5x_power_enable(priv->power, 0); m_can_class_free_dev(priv->cdev.net); - - return 0; } static const struct of_device_id tcan4x5x_of_match[] = { diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index b7dc1c32875f..1e121e04208c 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -44,6 +44,7 @@ enum rcanfd_chip_id { RENESAS_RCAR_GEN3 = 0, RENESAS_RZG2L, + RENESAS_R8A779A0, }; /* Global register bits */ @@ -79,6 +80,7 @@ enum rcanfd_chip_id { #define RCANFD_GSTS_GNOPM (BIT(0) | BIT(1) | BIT(2) | BIT(3)) /* RSCFDnCFDGERFL / RSCFDnGERFL */ +#define RCANFD_GERFL_EEF0_7 GENMASK(23, 16) #define RCANFD_GERFL_EEF1 BIT(17) #define RCANFD_GERFL_EEF0 BIT(16) #define RCANFD_GERFL_CMPOF BIT(3) /* CAN FD only */ @@ -86,20 +88,26 @@ enum rcanfd_chip_id { #define RCANFD_GERFL_MES BIT(1) #define RCANFD_GERFL_DEF BIT(0) -#define RCANFD_GERFL_ERR(gpriv, x) ((x) & (RCANFD_GERFL_EEF1 |\ - RCANFD_GERFL_EEF0 | RCANFD_GERFL_MES |\ - (gpriv->fdmode ?\ - RCANFD_GERFL_CMPOF : 0))) +#define RCANFD_GERFL_ERR(gpriv, x) \ + ((x) & (reg_v3u(gpriv, RCANFD_GERFL_EEF0_7, \ + RCANFD_GERFL_EEF0 | RCANFD_GERFL_EEF1) | \ + RCANFD_GERFL_MES | \ + ((gpriv)->fdmode ? RCANFD_GERFL_CMPOF : 0))) /* AFL Rx rules registers */ /* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */ -#define RCANFD_GAFLCFG_SETRNC(n, x) (((x) & 0xff) << (24 - n * 8)) -#define RCANFD_GAFLCFG_GETRNC(n, x) (((x) >> (24 - n * 8)) & 0xff) +#define RCANFD_GAFLCFG_SETRNC(gpriv, n, x) \ + (((x) & reg_v3u(gpriv, 0x1ff, 0xff)) << \ + (reg_v3u(gpriv, 16, 24) - (n) * reg_v3u(gpriv, 16, 8))) + +#define RCANFD_GAFLCFG_GETRNC(gpriv, n, x) \ + (((x) >> (reg_v3u(gpriv, 16, 24) - (n) * reg_v3u(gpriv, 16, 8))) & \ + reg_v3u(gpriv, 0x1ff, 0xff)) /* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */ #define RCANFD_GAFLECTR_AFLDAE BIT(8) -#define RCANFD_GAFLECTR_AFLPN(x) ((x) & 0x1f) +#define RCANFD_GAFLECTR_AFLPN(gpriv, x) ((x) & reg_v3u(gpriv, 0x7f, 0x1f)) /* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */ #define RCANFD_GAFLID_GAFLLB BIT(29) @@ -116,9 +124,15 @@ enum rcanfd_chip_id { #define RCANFD_CFG_BRP(x) (((x) & 0x3ff) << 0) /* RSCFDnCFDCmNCFG - CAN FD only */ -#define RCANFD_NCFG_NTSEG2(x) (((x) & 0x1f) << 24) -#define RCANFD_NCFG_NTSEG1(x) (((x) & 0x7f) << 16) -#define RCANFD_NCFG_NSJW(x) (((x) & 0x1f) << 11) +#define RCANFD_NCFG_NTSEG2(gpriv, x) \ + (((x) & reg_v3u(gpriv, 0x7f, 0x1f)) << reg_v3u(gpriv, 25, 24)) + +#define RCANFD_NCFG_NTSEG1(gpriv, x) \ + (((x) & reg_v3u(gpriv, 0xff, 0x7f)) << reg_v3u(gpriv, 17, 16)) + +#define RCANFD_NCFG_NSJW(gpriv, x) \ + (((x) & reg_v3u(gpriv, 0x7f, 0x1f)) << reg_v3u(gpriv, 10, 11)) + #define RCANFD_NCFG_NBRP(x) (((x) & 0x3ff) << 0) /* RSCFDnCFDCmCTR / RSCFDnCmCTR */ @@ -180,11 +194,18 @@ enum rcanfd_chip_id { /* RSCFDnCFDCmDCFG */ #define RCANFD_DCFG_DSJW(x) (((x) & 0x7) << 24) -#define RCANFD_DCFG_DTSEG2(x) (((x) & 0x7) << 20) -#define RCANFD_DCFG_DTSEG1(x) (((x) & 0xf) << 16) + +#define RCANFD_DCFG_DTSEG2(gpriv, x) \ + (((x) & reg_v3u(gpriv, 0x0f, 0x7)) << reg_v3u(gpriv, 16, 20)) + +#define RCANFD_DCFG_DTSEG1(gpriv, x) \ + (((x) & reg_v3u(gpriv, 0x1f, 0xf)) << reg_v3u(gpriv, 8, 16)) + #define RCANFD_DCFG_DBRP(x) (((x) & 0xff) << 0) /* RSCFDnCFDCmFDCFG */ +#define RCANFD_FDCFG_CLOE BIT(30) +#define RCANFD_FDCFG_FDOE BIT(28) #define RCANFD_FDCFG_TDCE BIT(9) #define RCANFD_FDCFG_TDCOC BIT(8) #define RCANFD_FDCFG_TDCO(x) (((x) & 0x7f) >> 16) @@ -219,10 +240,10 @@ enum rcanfd_chip_id { /* Common FIFO bits */ /* RSCFDnCFDCFCCk */ -#define RCANFD_CFCC_CFTML(x) (((x) & 0xf) << 20) -#define RCANFD_CFCC_CFM(x) (((x) & 0x3) << 16) +#define RCANFD_CFCC_CFTML(gpriv, x) (((x) & 0xf) << reg_v3u(gpriv, 16, 20)) +#define RCANFD_CFCC_CFM(gpriv, x) (((x) & 0x3) << reg_v3u(gpriv, 8, 16)) #define RCANFD_CFCC_CFIM BIT(12) -#define RCANFD_CFCC_CFDC(x) (((x) & 0x7) << 8) +#define RCANFD_CFCC_CFDC(gpriv, x) (((x) & 0x7) << reg_v3u(gpriv, 21, 8)) #define RCANFD_CFCC_CFPLS(x) (((x) & 0x7) << 4) #define RCANFD_CFCC_CFTXIE BIT(2) #define RCANFD_CFCC_CFE BIT(0) @@ -282,33 +303,31 @@ enum rcanfd_chip_id { #define RCANFD_GTSC (0x0094) /* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */ #define RCANFD_GAFLECTR (0x0098) -/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */ -#define RCANFD_GAFLCFG0 (0x009c) -/* RSCFDnCFDGAFLCFG1 / RSCFDnGAFLCFG1 */ -#define RCANFD_GAFLCFG1 (0x00a0) +/* RSCFDnCFDGAFLCFG / RSCFDnGAFLCFG */ +#define RCANFD_GAFLCFG(ch) (0x009c + (0x04 * ((ch) / 2))) /* RSCFDnCFDRMNB / RSCFDnRMNB */ #define RCANFD_RMNB (0x00a4) /* RSCFDnCFDRMND / RSCFDnRMND */ #define RCANFD_RMND(y) (0x00a8 + (0x04 * (y))) /* RSCFDnCFDRFCCx / RSCFDnRFCCx */ -#define RCANFD_RFCC(x) (0x00b8 + (0x04 * (x))) +#define RCANFD_RFCC(gpriv, x) (reg_v3u(gpriv, 0x00c0, 0x00b8) + (0x04 * (x))) /* RSCFDnCFDRFSTSx / RSCFDnRFSTSx */ -#define RCANFD_RFSTS(x) (0x00d8 + (0x04 * (x))) +#define RCANFD_RFSTS(gpriv, x) (RCANFD_RFCC(gpriv, x) + 0x20) /* RSCFDnCFDRFPCTRx / RSCFDnRFPCTRx */ -#define RCANFD_RFPCTR(x) (0x00f8 + (0x04 * (x))) +#define RCANFD_RFPCTR(gpriv, x) (RCANFD_RFCC(gpriv, x) + 0x40) /* Common FIFO Control registers */ /* RSCFDnCFDCFCCx / RSCFDnCFCCx */ -#define RCANFD_CFCC(ch, idx) (0x0118 + (0x0c * (ch)) + \ - (0x04 * (idx))) +#define RCANFD_CFCC(gpriv, ch, idx) \ + (reg_v3u(gpriv, 0x0120, 0x0118) + (0x0c * (ch)) + (0x04 * (idx))) /* RSCFDnCFDCFSTSx / RSCFDnCFSTSx */ -#define RCANFD_CFSTS(ch, idx) (0x0178 + (0x0c * (ch)) + \ - (0x04 * (idx))) +#define RCANFD_CFSTS(gpriv, ch, idx) \ + (reg_v3u(gpriv, 0x01e0, 0x0178) + (0x0c * (ch)) + (0x04 * (idx))) /* RSCFDnCFDCFPCTRx / RSCFDnCFPCTRx */ -#define RCANFD_CFPCTR(ch, idx) (0x01d8 + (0x0c * (ch)) + \ - (0x04 * (idx))) +#define RCANFD_CFPCTR(gpriv, ch, idx) \ + (reg_v3u(gpriv, 0x0240, 0x01d8) + (0x0c * (ch)) + (0x04 * (idx))) /* RSCFDnCFDFESTS / RSCFDnFESTS */ #define RCANFD_FESTS (0x0238) @@ -387,22 +406,23 @@ enum rcanfd_chip_id { #define RCANFD_C_RMDF1(q) (0x060c + (0x10 * (q))) /* RSCFDnRFXXx -> RCANFD_C_RFXX(x) */ -#define RCANFD_C_RFOFFSET (0x0e00) -#define RCANFD_C_RFID(x) (RCANFD_C_RFOFFSET + (0x10 * (x))) -#define RCANFD_C_RFPTR(x) (RCANFD_C_RFOFFSET + 0x04 + \ - (0x10 * (x))) -#define RCANFD_C_RFDF(x, df) (RCANFD_C_RFOFFSET + 0x08 + \ - (0x10 * (x)) + (0x04 * (df))) +#define RCANFD_C_RFOFFSET (0x0e00) +#define RCANFD_C_RFID(x) (RCANFD_C_RFOFFSET + (0x10 * (x))) +#define RCANFD_C_RFPTR(x) (RCANFD_C_RFOFFSET + 0x04 + (0x10 * (x))) +#define RCANFD_C_RFDF(x, df) \ + (RCANFD_C_RFOFFSET + 0x08 + (0x10 * (x)) + (0x04 * (df))) /* RSCFDnCFXXk -> RCANFD_C_CFXX(ch, k) */ #define RCANFD_C_CFOFFSET (0x0e80) -#define RCANFD_C_CFID(ch, idx) (RCANFD_C_CFOFFSET + (0x30 * (ch)) + \ - (0x10 * (idx))) -#define RCANFD_C_CFPTR(ch, idx) (RCANFD_C_CFOFFSET + 0x04 + \ - (0x30 * (ch)) + (0x10 * (idx))) -#define RCANFD_C_CFDF(ch, idx, df) (RCANFD_C_CFOFFSET + 0x08 + \ - (0x30 * (ch)) + (0x10 * (idx)) + \ - (0x04 * (df))) + +#define RCANFD_C_CFID(ch, idx) \ + (RCANFD_C_CFOFFSET + (0x30 * (ch)) + (0x10 * (idx))) + +#define RCANFD_C_CFPTR(ch, idx) \ + (RCANFD_C_CFOFFSET + 0x04 + (0x30 * (ch)) + (0x10 * (idx))) + +#define RCANFD_C_CFDF(ch, idx, df) \ + (RCANFD_C_CFOFFSET + 0x08 + (0x30 * (ch)) + (0x10 * (idx)) + (0x04 * (df))) /* RSCFDnTMXXp -> RCANFD_C_TMXX(p) */ #define RCANFD_C_TMID(p) (0x1000 + (0x10 * (p))) @@ -415,6 +435,12 @@ enum rcanfd_chip_id { /* RSCFDnRPGACCr */ #define RCANFD_C_RPGACC(r) (0x1900 + (0x04 * (r))) +/* R-Car V3U Classical and CAN FD mode specific register map */ +#define RCANFD_V3U_CFDCFG (0x1314) +#define RCANFD_V3U_DCFG(m) (0x1400 + (0x20 * (m))) + +#define RCANFD_V3U_GAFL_OFFSET (0x1800) + /* CAN FD mode specific register map */ /* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */ @@ -434,26 +460,28 @@ enum rcanfd_chip_id { #define RCANFD_F_RMDF(q, b) (0x200c + (0x04 * (b)) + (0x20 * (q))) /* RSCFDnCFDRFXXx -> RCANFD_F_RFXX(x) */ -#define RCANFD_F_RFOFFSET (0x3000) -#define RCANFD_F_RFID(x) (RCANFD_F_RFOFFSET + (0x80 * (x))) -#define RCANFD_F_RFPTR(x) (RCANFD_F_RFOFFSET + 0x04 + \ - (0x80 * (x))) -#define RCANFD_F_RFFDSTS(x) (RCANFD_F_RFOFFSET + 0x08 + \ - (0x80 * (x))) -#define RCANFD_F_RFDF(x, df) (RCANFD_F_RFOFFSET + 0x0c + \ - (0x80 * (x)) + (0x04 * (df))) +#define RCANFD_F_RFOFFSET(gpriv) reg_v3u(gpriv, 0x6000, 0x3000) +#define RCANFD_F_RFID(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + (0x80 * (x))) +#define RCANFD_F_RFPTR(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x04 + (0x80 * (x))) +#define RCANFD_F_RFFDSTS(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x08 + (0x80 * (x))) +#define RCANFD_F_RFDF(gpriv, x, df) \ + (RCANFD_F_RFOFFSET(gpriv) + 0x0c + (0x80 * (x)) + (0x04 * (df))) /* RSCFDnCFDCFXXk -> RCANFD_F_CFXX(ch, k) */ -#define RCANFD_F_CFOFFSET (0x3400) -#define RCANFD_F_CFID(ch, idx) (RCANFD_F_CFOFFSET + (0x180 * (ch)) + \ - (0x80 * (idx))) -#define RCANFD_F_CFPTR(ch, idx) (RCANFD_F_CFOFFSET + 0x04 + \ - (0x180 * (ch)) + (0x80 * (idx))) -#define RCANFD_F_CFFDCSTS(ch, idx) (RCANFD_F_CFOFFSET + 0x08 + \ - (0x180 * (ch)) + (0x80 * (idx))) -#define RCANFD_F_CFDF(ch, idx, df) (RCANFD_F_CFOFFSET + 0x0c + \ - (0x180 * (ch)) + (0x80 * (idx)) + \ - (0x04 * (df))) +#define RCANFD_F_CFOFFSET(gpriv) reg_v3u(gpriv, 0x6400, 0x3400) + +#define RCANFD_F_CFID(gpriv, ch, idx) \ + (RCANFD_F_CFOFFSET(gpriv) + (0x180 * (ch)) + (0x80 * (idx))) + +#define RCANFD_F_CFPTR(gpriv, ch, idx) \ + (RCANFD_F_CFOFFSET(gpriv) + 0x04 + (0x180 * (ch)) + (0x80 * (idx))) + +#define RCANFD_F_CFFDCSTS(gpriv, ch, idx) \ + (RCANFD_F_CFOFFSET(gpriv) + 0x08 + (0x180 * (ch)) + (0x80 * (idx))) + +#define RCANFD_F_CFDF(gpriv, ch, idx, df) \ + (RCANFD_F_CFOFFSET(gpriv) + 0x0c + (0x180 * (ch)) + (0x80 * (idx)) + \ + (0x04 * (df))) /* RSCFDnCFDTMXXp -> RCANFD_F_TMXX(p) */ #define RCANFD_F_TMID(p) (0x4000 + (0x20 * (p))) @@ -470,7 +498,7 @@ enum rcanfd_chip_id { #define RCANFD_FIFO_DEPTH 8 /* Tx FIFO depth */ #define RCANFD_NAPI_WEIGHT 8 /* Rx poll quota */ -#define RCANFD_NUM_CHANNELS 2 /* Two channels max */ +#define RCANFD_NUM_CHANNELS 8 /* Eight channels max */ #define RCANFD_CHANNELS_MASK BIT((RCANFD_NUM_CHANNELS) - 1) #define RCANFD_GAFL_PAGENUM(entry) ((entry) / 16) @@ -521,6 +549,7 @@ struct rcar_canfd_global { struct reset_control *rstc1; struct reset_control *rstc2; enum rcanfd_chip_id chip_id; + u32 max_channels; }; /* CAN FD mode nominal rate constants */ @@ -563,6 +592,17 @@ static const struct can_bittiming_const rcar_canfd_bittiming_const = { }; /* Helper functions */ +static inline bool is_v3u(struct rcar_canfd_global *gpriv) +{ + return gpriv->chip_id == RENESAS_R8A779A0; +} + +static inline u32 reg_v3u(struct rcar_canfd_global *gpriv, + u32 v3u, u32 not_v3u) +{ + return is_v3u(gpriv) ? v3u : not_v3u; +} + static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg) { u32 data = readl(reg); @@ -628,6 +668,25 @@ static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev) can_free_echo_skb(ndev, i, NULL); } +static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv) +{ + if (is_v3u(gpriv)) { + if (gpriv->fdmode) + rcar_canfd_set_bit(gpriv->base, RCANFD_V3U_CFDCFG, + RCANFD_FDCFG_FDOE); + else + rcar_canfd_set_bit(gpriv->base, RCANFD_V3U_CFDCFG, + RCANFD_FDCFG_CLOE); + } else { + if (gpriv->fdmode) + rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG, + RCANFD_GRMCFG_RCMC); + else + rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG, + RCANFD_GRMCFG_RCMC); + } +} + static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv) { u32 sts, ch; @@ -660,15 +719,10 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv) rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0x0); /* Set the controller into appropriate mode */ - if (gpriv->fdmode) - rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG, - RCANFD_GRMCFG_RCMC); - else - rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG, - RCANFD_GRMCFG_RCMC); + rcar_canfd_set_mode(gpriv); /* Transition all Channels to reset mode */ - for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) { + for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) { rcar_canfd_clear_bit(gpriv->base, RCANFD_CCTR(ch), RCANFD_CCTR_CSLPR); @@ -709,7 +763,7 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv) rcar_canfd_set_bit(gpriv->base, RCANFD_GCFG, cfg); /* Channel configuration settings */ - for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) { + for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) { rcar_canfd_set_bit(gpriv->base, RCANFD_CCTR(ch), RCANFD_CCTR_ERRD); rcar_canfd_update_bit(gpriv->base, RCANFD_CCTR(ch), @@ -729,20 +783,22 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv, start = 0; /* Channel 0 always starts from 0th rule */ } else { /* Get number of Channel 0 rules and adjust */ - cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG0); - start = RCANFD_GAFLCFG_GETRNC(0, cfg); + cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG(ch)); + start = RCANFD_GAFLCFG_GETRNC(gpriv, 0, cfg); } /* Enable write access to entry */ page = RCANFD_GAFL_PAGENUM(start); rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLECTR, - (RCANFD_GAFLECTR_AFLPN(page) | + (RCANFD_GAFLECTR_AFLPN(gpriv, page) | RCANFD_GAFLECTR_AFLDAE)); /* Write number of rules for channel */ - rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG0, - RCANFD_GAFLCFG_SETRNC(ch, num_rules)); - if (gpriv->fdmode) + rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(ch), + RCANFD_GAFLCFG_SETRNC(gpriv, ch, num_rules)); + if (is_v3u(gpriv)) + offset = RCANFD_V3U_GAFL_OFFSET; + else if (gpriv->fdmode) offset = RCANFD_F_GAFL_OFFSET; else offset = RCANFD_C_GAFL_OFFSET; @@ -754,8 +810,8 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv, /* Any data length accepted */ rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, start), 0); /* Place the msg in corresponding Rx FIFO entry */ - rcar_canfd_write(gpriv->base, RCANFD_GAFLP1(offset, start), - RCANFD_GAFLP1_GAFLFDP(ridx)); + rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, start), + RCANFD_GAFLP1_GAFLFDP(ridx)); /* Disable write access to page */ rcar_canfd_clear_bit(gpriv->base, @@ -779,7 +835,7 @@ static void rcar_canfd_configure_rx(struct rcar_canfd_global *gpriv, u32 ch) cfg = (RCANFD_RFCC_RFIM | RCANFD_RFCC_RFDC(rfdc) | RCANFD_RFCC_RFPLS(rfpls) | RCANFD_RFCC_RFIE); - rcar_canfd_write(gpriv->base, RCANFD_RFCC(ridx), cfg); + rcar_canfd_write(gpriv->base, RCANFD_RFCC(gpriv, ridx), cfg); } static void rcar_canfd_configure_tx(struct rcar_canfd_global *gpriv, u32 ch) @@ -801,15 +857,15 @@ static void rcar_canfd_configure_tx(struct rcar_canfd_global *gpriv, u32 ch) else cfpls = 0; /* b000 - Max 8 bytes payload */ - cfg = (RCANFD_CFCC_CFTML(cftml) | RCANFD_CFCC_CFM(cfm) | - RCANFD_CFCC_CFIM | RCANFD_CFCC_CFDC(cfdc) | + cfg = (RCANFD_CFCC_CFTML(gpriv, cftml) | RCANFD_CFCC_CFM(gpriv, cfm) | + RCANFD_CFCC_CFIM | RCANFD_CFCC_CFDC(gpriv, cfdc) | RCANFD_CFCC_CFPLS(cfpls) | RCANFD_CFCC_CFTXIE); - rcar_canfd_write(gpriv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX), cfg); + rcar_canfd_write(gpriv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX), cfg); if (gpriv->fdmode) /* Clear FD mode specific control/status register */ rcar_canfd_write(gpriv->base, - RCANFD_F_CFFDCSTS(ch, RCANFD_CFFIFO_IDX), 0); + RCANFD_F_CFFDCSTS(gpriv, ch, RCANFD_CFFIFO_IDX), 0); } static void rcar_canfd_enable_global_interrupts(struct rcar_canfd_global *gpriv) @@ -890,20 +946,20 @@ static void rcar_canfd_global_error(struct net_device *ndev) } if (gerfl & RCANFD_GERFL_MES) { sts = rcar_canfd_read(priv->base, - RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX)); + RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX)); if (sts & RCANFD_CFSTS_CFMLT) { netdev_dbg(ndev, "Tx Message Lost flag\n"); stats->tx_dropped++; rcar_canfd_write(priv->base, - RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX), + RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX), sts & ~RCANFD_CFSTS_CFMLT); } - sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx)); + sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx)); if (sts & RCANFD_RFSTS_RFMLT) { netdev_dbg(ndev, "Rx Message Lost flag\n"); stats->rx_dropped++; - rcar_canfd_write(priv->base, RCANFD_RFSTS(ridx), + rcar_canfd_write(priv->base, RCANFD_RFSTS(gpriv, ridx), sts & ~RCANFD_RFSTS_RFMLT); } } @@ -1038,6 +1094,7 @@ static void rcar_canfd_error(struct net_device *ndev, u32 cerfl, static void rcar_canfd_tx_done(struct net_device *ndev) { struct rcar_canfd_channel *priv = netdev_priv(ndev); + struct rcar_canfd_global *gpriv = priv->gpriv; struct net_device_stats *stats = &ndev->stats; u32 sts; unsigned long flags; @@ -1053,7 +1110,7 @@ static void rcar_canfd_tx_done(struct net_device *ndev) spin_lock_irqsave(&priv->tx_lock, flags); priv->tx_tail++; sts = rcar_canfd_read(priv->base, - RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX)); + RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX)); unsent = RCANFD_CFSTS_CFMC(sts); /* Wake producer only when there is room */ @@ -1069,7 +1126,7 @@ static void rcar_canfd_tx_done(struct net_device *ndev) } while (1); /* Clear interrupt */ - rcar_canfd_write(priv->base, RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX), + rcar_canfd_write(priv->base, RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX), sts & ~RCANFD_CFSTS_CFTXIF); can_led_event(ndev, CAN_LED_EVENT_TX); } @@ -1091,7 +1148,7 @@ static irqreturn_t rcar_canfd_global_err_interrupt(int irq, void *dev_id) struct rcar_canfd_global *gpriv = dev_id; u32 ch; - for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) + for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) rcar_canfd_handle_global_err(gpriv, ch); return IRQ_HANDLED; @@ -1104,12 +1161,12 @@ static void rcar_canfd_handle_global_receive(struct rcar_canfd_global *gpriv, u3 u32 sts; /* Handle Rx interrupts */ - sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx)); + sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx)); if (likely(sts & RCANFD_RFSTS_RFIF)) { if (napi_schedule_prep(&priv->napi)) { /* Disable Rx FIFO interrupts */ rcar_canfd_clear_bit(priv->base, - RCANFD_RFCC(ridx), + RCANFD_RFCC(gpriv, ridx), RCANFD_RFCC_RFIE); __napi_schedule(&priv->napi); } @@ -1121,7 +1178,7 @@ static irqreturn_t rcar_canfd_global_receive_fifo_interrupt(int irq, void *dev_i struct rcar_canfd_global *gpriv = dev_id; u32 ch; - for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) + for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) rcar_canfd_handle_global_receive(gpriv, ch); return IRQ_HANDLED; @@ -1135,7 +1192,7 @@ static irqreturn_t rcar_canfd_global_interrupt(int irq, void *dev_id) /* Global error interrupts still indicate a condition specific * to a channel. RxFIFO interrupt is a global interrupt. */ - for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) { + for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) { rcar_canfd_handle_global_err(gpriv, ch); rcar_canfd_handle_global_receive(gpriv, ch); } @@ -1181,7 +1238,7 @@ static void rcar_canfd_handle_channel_tx(struct rcar_canfd_global *gpriv, u32 ch /* Handle Tx interrupts */ sts = rcar_canfd_read(priv->base, - RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX)); + RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX)); if (likely(sts & RCANFD_CFSTS_CFTXIF)) rcar_canfd_tx_done(ndev); } @@ -1191,7 +1248,7 @@ static irqreturn_t rcar_canfd_channel_tx_interrupt(int irq, void *dev_id) struct rcar_canfd_global *gpriv = dev_id; u32 ch; - for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) + for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) rcar_canfd_handle_channel_tx(gpriv, ch); return IRQ_HANDLED; @@ -1223,7 +1280,7 @@ static irqreturn_t rcar_canfd_channel_err_interrupt(int irq, void *dev_id) struct rcar_canfd_global *gpriv = dev_id; u32 ch; - for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) + for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) rcar_canfd_handle_channel_err(gpriv, ch); return IRQ_HANDLED; @@ -1235,7 +1292,7 @@ static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id) u32 ch; /* Common FIFO is a per channel resource */ - for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) { + for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) { rcar_canfd_handle_channel_err(gpriv, ch); rcar_canfd_handle_channel_tx(gpriv, ch); } @@ -1246,6 +1303,7 @@ static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id) static void rcar_canfd_set_bittiming(struct net_device *dev) { struct rcar_canfd_channel *priv = netdev_priv(dev); + struct rcar_canfd_global *gpriv = priv->gpriv; const struct can_bittiming *bt = &priv->can.bittiming; const struct can_bittiming *dbt = &priv->can.data_bittiming; u16 brp, sjw, tseg1, tseg2; @@ -1260,8 +1318,8 @@ static void rcar_canfd_set_bittiming(struct net_device *dev) if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { /* CAN FD only mode */ - cfg = (RCANFD_NCFG_NTSEG1(tseg1) | RCANFD_NCFG_NBRP(brp) | - RCANFD_NCFG_NSJW(sjw) | RCANFD_NCFG_NTSEG2(tseg2)); + cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) | RCANFD_NCFG_NBRP(brp) | + RCANFD_NCFG_NSJW(gpriv, sjw) | RCANFD_NCFG_NTSEG2(gpriv, tseg2)); rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg); netdev_dbg(priv->ndev, "nrate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n", @@ -1273,16 +1331,25 @@ static void rcar_canfd_set_bittiming(struct net_device *dev) tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1; tseg2 = dbt->phase_seg2 - 1; - cfg = (RCANFD_DCFG_DTSEG1(tseg1) | RCANFD_DCFG_DBRP(brp) | - RCANFD_DCFG_DSJW(sjw) | RCANFD_DCFG_DTSEG2(tseg2)); + cfg = (RCANFD_DCFG_DTSEG1(gpriv, tseg1) | RCANFD_DCFG_DBRP(brp) | + RCANFD_DCFG_DSJW(sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2)); rcar_canfd_write(priv->base, RCANFD_F_DCFG(ch), cfg); netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n", brp, sjw, tseg1, tseg2); } else { /* Classical CAN only mode */ - cfg = (RCANFD_CFG_TSEG1(tseg1) | RCANFD_CFG_BRP(brp) | - RCANFD_CFG_SJW(sjw) | RCANFD_CFG_TSEG2(tseg2)); + if (is_v3u(gpriv)) { + cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) | + RCANFD_NCFG_NBRP(brp) | + RCANFD_NCFG_NSJW(gpriv, sjw) | + RCANFD_NCFG_NTSEG2(gpriv, tseg2)); + } else { + cfg = (RCANFD_CFG_TSEG1(tseg1) | + RCANFD_CFG_BRP(brp) | + RCANFD_CFG_SJW(sjw) | + RCANFD_CFG_TSEG2(tseg2)); + } rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg); netdev_dbg(priv->ndev, @@ -1294,6 +1361,7 @@ static void rcar_canfd_set_bittiming(struct net_device *dev) static int rcar_canfd_start(struct net_device *ndev) { struct rcar_canfd_channel *priv = netdev_priv(ndev); + struct rcar_canfd_global *gpriv = priv->gpriv; int err = -EOPNOTSUPP; u32 sts, ch = priv->channel; u32 ridx = ch + RCANFD_RFFIFO_IDX; @@ -1315,9 +1383,9 @@ static int rcar_canfd_start(struct net_device *ndev) } /* Enable Common & Rx FIFO */ - rcar_canfd_set_bit(priv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX), + rcar_canfd_set_bit(priv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX), RCANFD_CFCC_CFE); - rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFE); + rcar_canfd_set_bit(priv->base, RCANFD_RFCC(gpriv, ridx), RCANFD_RFCC_RFE); priv->can.state = CAN_STATE_ERROR_ACTIVE; return 0; @@ -1365,6 +1433,7 @@ out_clock: static void rcar_canfd_stop(struct net_device *ndev) { struct rcar_canfd_channel *priv = netdev_priv(ndev); + struct rcar_canfd_global *gpriv = priv->gpriv; int err; u32 sts, ch = priv->channel; u32 ridx = ch + RCANFD_RFFIFO_IDX; @@ -1382,9 +1451,9 @@ static void rcar_canfd_stop(struct net_device *ndev) rcar_canfd_disable_channel_interrupts(priv); /* Disable Common & Rx FIFO */ - rcar_canfd_clear_bit(priv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX), + rcar_canfd_clear_bit(priv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX), RCANFD_CFCC_CFE); - rcar_canfd_clear_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFE); + rcar_canfd_clear_bit(priv->base, RCANFD_RFCC(gpriv, ridx), RCANFD_RFCC_RFE); /* Set the state as STOPPED */ priv->can.state = CAN_STATE_STOPPED; @@ -1408,6 +1477,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct rcar_canfd_channel *priv = netdev_priv(ndev); + struct rcar_canfd_global *gpriv = priv->gpriv; struct canfd_frame *cf = (struct canfd_frame *)skb->data; u32 sts = 0, id, dlc; unsigned long flags; @@ -1428,11 +1498,11 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb, dlc = RCANFD_CFPTR_CFDLC(can_fd_len2dlc(cf->len)); - if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_v3u(gpriv)) { rcar_canfd_write(priv->base, - RCANFD_F_CFID(ch, RCANFD_CFFIFO_IDX), id); + RCANFD_F_CFID(gpriv, ch, RCANFD_CFFIFO_IDX), id); rcar_canfd_write(priv->base, - RCANFD_F_CFPTR(ch, RCANFD_CFFIFO_IDX), dlc); + RCANFD_F_CFPTR(gpriv, ch, RCANFD_CFFIFO_IDX), dlc); if (can_is_canfd_skb(skb)) { /* CAN FD frame format */ @@ -1445,10 +1515,10 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb, } rcar_canfd_write(priv->base, - RCANFD_F_CFFDCSTS(ch, RCANFD_CFFIFO_IDX), sts); + RCANFD_F_CFFDCSTS(gpriv, ch, RCANFD_CFFIFO_IDX), sts); rcar_canfd_put_data(priv, cf, - RCANFD_F_CFDF(ch, RCANFD_CFFIFO_IDX, 0)); + RCANFD_F_CFDF(gpriv, ch, RCANFD_CFFIFO_IDX, 0)); } else { rcar_canfd_write(priv->base, RCANFD_C_CFID(ch, RCANFD_CFFIFO_IDX), id); @@ -1471,7 +1541,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb, * pointer for the Common FIFO */ rcar_canfd_write(priv->base, - RCANFD_CFPCTR(ch, RCANFD_CFFIFO_IDX), 0xff); + RCANFD_CFPCTR(gpriv, ch, RCANFD_CFFIFO_IDX), 0xff); spin_unlock_irqrestore(&priv->tx_lock, flags); return NETDEV_TX_OK; @@ -1480,18 +1550,21 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb, static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv) { struct net_device_stats *stats = &priv->ndev->stats; + struct rcar_canfd_global *gpriv = priv->gpriv; struct canfd_frame *cf; struct sk_buff *skb; u32 sts = 0, id, dlc; u32 ch = priv->channel; u32 ridx = ch + RCANFD_RFFIFO_IDX; - if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { - id = rcar_canfd_read(priv->base, RCANFD_F_RFID(ridx)); - dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(ridx)); + if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_v3u(gpriv)) { + id = rcar_canfd_read(priv->base, RCANFD_F_RFID(gpriv, ridx)); + dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(gpriv, ridx)); - sts = rcar_canfd_read(priv->base, RCANFD_F_RFFDSTS(ridx)); - if (sts & RCANFD_RFFDSTS_RFFDF) + sts = rcar_canfd_read(priv->base, RCANFD_F_RFFDSTS(gpriv, ridx)); + + if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) && + sts & RCANFD_RFFDSTS_RFFDF) skb = alloc_canfd_skb(priv->ndev, &cf); else skb = alloc_can_skb(priv->ndev, @@ -1529,12 +1602,14 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv) if (sts & RCANFD_RFFDSTS_RFBRS) cf->flags |= CANFD_BRS; - rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(ridx, 0)); + rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0)); } } else { cf->len = can_cc_dlc2len(RCANFD_RFPTR_RFDLC(dlc)); if (id & RCANFD_RFID_RFRTR) cf->can_id |= CAN_RTR_FLAG; + else if (is_v3u(gpriv)) + rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0)); else rcar_canfd_get_data(priv, cf, RCANFD_C_RFDF(ridx, 0)); } @@ -1542,7 +1617,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv) /* Write 0xff to RFPC to increment the CPU-side * pointer of the Rx FIFO */ - rcar_canfd_write(priv->base, RCANFD_RFPCTR(ridx), 0xff); + rcar_canfd_write(priv->base, RCANFD_RFPCTR(gpriv, ridx), 0xff); can_led_event(priv->ndev, CAN_LED_EVENT_RX); @@ -1556,13 +1631,14 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota) { struct rcar_canfd_channel *priv = container_of(napi, struct rcar_canfd_channel, napi); + struct rcar_canfd_global *gpriv = priv->gpriv; int num_pkts; u32 sts; u32 ch = priv->channel; u32 ridx = ch + RCANFD_RFFIFO_IDX; for (num_pkts = 0; num_pkts < quota; num_pkts++) { - sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx)); + sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx)); /* Check FIFO empty condition */ if (sts & RCANFD_RFSTS_RFEMP) break; @@ -1571,7 +1647,7 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota) /* Clear interrupt bit */ if (sts & RCANFD_RFSTS_RFIF) - rcar_canfd_write(priv->base, RCANFD_RFSTS(ridx), + rcar_canfd_write(priv->base, RCANFD_RFSTS(gpriv, ridx), sts & ~RCANFD_RFSTS_RFIF); } @@ -1579,7 +1655,7 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota) if (num_pkts < quota) { if (napi_complete_done(napi, num_pkts)) { /* Enable Rx FIFO interrupts */ - rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx), + rcar_canfd_set_bit(priv->base, RCANFD_RFCC(gpriv, ridx), RCANFD_RFCC_RFIE); } } @@ -1715,15 +1791,15 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch, netif_napi_add(ndev, &priv->napi, rcar_canfd_rx_poll, RCANFD_NAPI_WEIGHT); + spin_lock_init(&priv->tx_lock); + devm_can_led_init(ndev); + gpriv->ch[priv->channel] = priv; err = register_candev(ndev); if (err) { dev_err(&pdev->dev, "register_candev() failed, error %d\n", err); goto fail_candev; } - spin_lock_init(&priv->tx_lock); - devm_can_led_init(ndev); - gpriv->ch[priv->channel] = priv; dev_info(&pdev->dev, "device registered (channel %u)\n", priv->channel); return 0; @@ -1756,21 +1832,24 @@ static int rcar_canfd_probe(struct platform_device *pdev) int g_err_irq, g_recc_irq; bool fdmode = true; /* CAN FD only mode - default */ enum rcanfd_chip_id chip_id; + int max_channels; + char name[9] = "channelX"; + int i; chip_id = (uintptr_t)of_device_get_match_data(&pdev->dev); + max_channels = chip_id == RENESAS_R8A779A0 ? 8 : 2; if (of_property_read_bool(pdev->dev.of_node, "renesas,no-can-fd")) fdmode = false; /* Classical CAN only mode */ - of_child = of_get_child_by_name(pdev->dev.of_node, "channel0"); - if (of_child && of_device_is_available(of_child)) - channels_mask |= BIT(0); /* Channel 0 */ - - of_child = of_get_child_by_name(pdev->dev.of_node, "channel1"); - if (of_child && of_device_is_available(of_child)) - channels_mask |= BIT(1); /* Channel 1 */ + for (i = 0; i < max_channels; ++i) { + name[7] = '0' + i; + of_child = of_get_child_by_name(pdev->dev.of_node, name); + if (of_child && of_device_is_available(of_child)) + channels_mask |= BIT(i); + } - if (chip_id == RENESAS_RCAR_GEN3) { + if (chip_id != RENESAS_RZG2L) { ch_irq = platform_get_irq_byname_optional(pdev, "ch_int"); if (ch_irq < 0) { /* For backward compatibility get irq by index */ @@ -1806,6 +1885,7 @@ static int rcar_canfd_probe(struct platform_device *pdev) gpriv->channels_mask = channels_mask; gpriv->fdmode = fdmode; gpriv->chip_id = chip_id; + gpriv->max_channels = max_channels; if (gpriv->chip_id == RENESAS_RZG2L) { gpriv->rstc1 = devm_reset_control_get_exclusive(&pdev->dev, "rstp_n"); @@ -1847,7 +1927,7 @@ static int rcar_canfd_probe(struct platform_device *pdev) } fcan_freq = clk_get_rate(gpriv->can_clk); - if (gpriv->fcan == RCANFD_CANFDCLK && gpriv->chip_id == RENESAS_RCAR_GEN3) + if (gpriv->fcan == RCANFD_CANFDCLK && gpriv->chip_id != RENESAS_RZG2L) /* CANFD clock is further divided by (1/2) within the IP */ fcan_freq /= 2; @@ -1859,7 +1939,7 @@ static int rcar_canfd_probe(struct platform_device *pdev) gpriv->base = addr; /* Request IRQ that's common for both channels */ - if (gpriv->chip_id == RENESAS_RCAR_GEN3) { + if (gpriv->chip_id != RENESAS_RZG2L) { err = devm_request_irq(&pdev->dev, ch_irq, rcar_canfd_channel_interrupt, 0, "canfd.ch_int", gpriv); @@ -1925,7 +2005,7 @@ static int rcar_canfd_probe(struct platform_device *pdev) rcar_canfd_configure_controller(gpriv); /* Configure per channel attributes */ - for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) { + for_each_set_bit(ch, &gpriv->channels_mask, max_channels) { /* Configure Channel's Rx fifo */ rcar_canfd_configure_rx(gpriv, ch); @@ -1951,7 +2031,7 @@ static int rcar_canfd_probe(struct platform_device *pdev) goto fail_mode; } - for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) { + for_each_set_bit(ch, &gpriv->channels_mask, max_channels) { err = rcar_canfd_channel_probe(gpriv, ch, fcan_freq); if (err) goto fail_channel; @@ -1963,7 +2043,7 @@ static int rcar_canfd_probe(struct platform_device *pdev) return 0; fail_channel: - for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) + for_each_set_bit(ch, &gpriv->channels_mask, max_channels) rcar_canfd_channel_remove(gpriv, ch); fail_mode: rcar_canfd_disable_global_interrupts(gpriv); @@ -1984,7 +2064,7 @@ static int rcar_canfd_remove(struct platform_device *pdev) rcar_canfd_reset_controller(gpriv); rcar_canfd_disable_global_interrupts(gpriv); - for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) { + for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) { rcar_canfd_disable_channel_interrupts(gpriv->ch[ch]); rcar_canfd_channel_remove(gpriv, ch); } @@ -2014,6 +2094,7 @@ static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend, static const __maybe_unused struct of_device_id rcar_canfd_of_table[] = { { .compatible = "renesas,rcar-gen3-canfd", .data = (void *)RENESAS_RCAR_GEN3 }, { .compatible = "renesas,rzg2l-canfd", .data = (void *)RENESAS_RZG2L }, + { .compatible = "renesas,r8a779a0-canfd", .data = (void *)RENESAS_R8A779A0 }, { } }; diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index 27783fbf011f..ec294d0c5722 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -221,7 +221,7 @@ static void slc_bump(struct slcan *sl) if (!(cf.can_id & CAN_RTR_FLAG)) sl->dev->stats.rx_bytes += cf.len; - netif_rx_ni(skb); + netif_rx(skb); } /* parse tty input stream */ diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c index d74e895bddf7..8d27ac66ca7f 100644 --- a/drivers/net/can/softing/softing_main.c +++ b/drivers/net/can/softing/softing_main.c @@ -392,13 +392,10 @@ static int softing_netdev_open(struct net_device *ndev) static int softing_netdev_stop(struct net_device *ndev) { - int ret; - netif_stop_queue(ndev); /* softing cycle does close_candev() */ - ret = softing_startstop(ndev, 0); - return ret; + return softing_startstop(ndev, 0); } static int softing_candev_set_mode(struct net_device *ndev, enum can_mode mode) diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c index cfcc14fe3e42..a5b2952b8d0f 100644 --- a/drivers/net/can/spi/hi311x.c +++ b/drivers/net/can/spi/hi311x.c @@ -356,7 +356,7 @@ static void hi3110_hw_rx(struct spi_device *spi) can_led_event(priv->net, CAN_LED_EVENT_RX); - netif_rx_ni(skb); + netif_rx(skb); } static void hi3110_hw_sleep(struct spi_device *spi) @@ -677,7 +677,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id) tx_state = txerr >= rxerr ? new_state : 0; rx_state = txerr <= rxerr ? new_state : 0; can_change_state(net, cf, tx_state, rx_state); - netif_rx_ni(skb); + netif_rx(skb); if (new_state == CAN_STATE_BUS_OFF) { can_bus_off(net); @@ -718,7 +718,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id) cf->data[6] = hi3110_read(spi, HI3110_READ_TEC); cf->data[7] = hi3110_read(spi, HI3110_READ_REC); netdev_dbg(priv->net, "Bus Error\n"); - netif_rx_ni(skb); + netif_rx(skb); } } @@ -948,7 +948,7 @@ static int hi3110_can_probe(struct spi_device *spi) return dev_err_probe(dev, ret, "Probe failed\n"); } -static int hi3110_can_remove(struct spi_device *spi) +static void hi3110_can_remove(struct spi_device *spi) { struct hi3110_priv *priv = spi_get_drvdata(spi); struct net_device *net = priv->net; @@ -960,8 +960,6 @@ static int hi3110_can_remove(struct spi_device *spi) clk_disable_unprepare(priv->clk); free_candev(net); - - return 0; } static int __maybe_unused hi3110_can_suspend(struct device *dev) diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index 025e07cb7439..fc747bff5eeb 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -740,7 +740,7 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx) can_led_event(priv->net, CAN_LED_EVENT_RX); - netif_rx_ni(skb); + netif_rx(skb); } static void mcp251x_hw_sleep(struct spi_device *spi) @@ -987,7 +987,7 @@ static void mcp251x_error_skb(struct net_device *net, int can_id, int data1) if (skb) { frame->can_id |= can_id; frame->data[1] = data1; - netif_rx_ni(skb); + netif_rx(skb); } else { netdev_err(net, "cannot allocate error skb\n"); } @@ -1427,7 +1427,7 @@ out_free: return ret; } -static int mcp251x_can_remove(struct spi_device *spi) +static void mcp251x_can_remove(struct spi_device *spi) { struct mcp251x_priv *priv = spi_get_drvdata(spi); struct net_device *net = priv->net; @@ -1442,8 +1442,6 @@ static int mcp251x_can_remove(struct spi_device *spi) clk_disable_unprepare(priv->clk); free_candev(net); - - return 0; } static int __maybe_unused mcp251x_can_suspend(struct device *dev) diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c index 2f9a623d381d..0d96097a2547 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c @@ -78,7 +78,7 @@ int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv) if (err) return err; - /* FIFO 1 - TX */ + /* TX FIFO */ val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK, tx_ring->obj_num - 1) | MCP251XFD_REG_FIFOCON_TXEN | @@ -99,7 +99,7 @@ int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv) MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED); err = regmap_write(priv->map_reg, - MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO), + MCP251XFD_REG_FIFOCON(priv->tx->fifo_nr), val); if (err) return err; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index b5986df6eca0..3da17cadbd63 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -112,6 +112,22 @@ static const char *mcp251xfd_get_mode_str(const u8 mode) return "<unknown>"; } +static const char * +mcp251xfd_get_osc_str(const u32 osc, const u32 osc_reference) +{ + switch (~osc & osc_reference & + (MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY)) { + case MCP251XFD_REG_OSC_PLLRDY: + return "PLL"; + case MCP251XFD_REG_OSC_OSCRDY: + return "Oscillator"; + case MCP251XFD_REG_OSC_PLLRDY | MCP251XFD_REG_OSC_OSCRDY: + return "Oscillator/PLL"; + } + + return "<unknown>"; +} + static inline int mcp251xfd_vdd_enable(const struct mcp251xfd_priv *priv) { if (!priv->reg_vdd) @@ -178,6 +194,11 @@ static int mcp251xfd_clks_and_vdd_disable(const struct mcp251xfd_priv *priv) return 0; } +static inline bool mcp251xfd_reg_invalid(u32 reg) +{ + return reg == 0x0 || reg == 0xffffffff; +} + static inline int mcp251xfd_chip_get_mode(const struct mcp251xfd_priv *priv, u8 *mode) { @@ -197,34 +218,55 @@ static int __mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv, const u8 mode_req, bool nowait) { - u32 con, con_reqop; + u32 con = 0, con_reqop, osc = 0; + u8 mode; int err; con_reqop = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, mode_req); err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CON, MCP251XFD_REG_CON_REQOP_MASK, con_reqop); - if (err) + if (err == -EBADMSG) { + netdev_err(priv->ndev, + "Failed to set Requested Operation Mode.\n"); + + return -ENODEV; + } else if (err) { return err; + } if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait) return 0; err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con, + !mcp251xfd_reg_invalid(con) && FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con) == mode_req, MCP251XFD_POLL_SLEEP_US, MCP251XFD_POLL_TIMEOUT_US); - if (err) { - u8 mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con); + if (err != -ETIMEDOUT && err != -EBADMSG) + return err; + + /* Ignore return value. + * Print below error messages, even if this fails. + */ + regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc); + if (mcp251xfd_reg_invalid(con)) { netdev_err(priv->ndev, - "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u).\n", - mcp251xfd_get_mode_str(mode_req), mode_req, - mcp251xfd_get_mode_str(mode), mode); - return err; + "Failed to read CAN Control Register (con=0x%08x, osc=0x%08x).\n", + con, osc); + + return -ENODEV; } - return 0; + mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con); + netdev_err(priv->ndev, + "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u) (con=0x%08x, osc=0x%08x).\n", + mcp251xfd_get_mode_str(mode_req), mode_req, + mcp251xfd_get_mode_str(mode), mode, + con, osc); + + return -ETIMEDOUT; } static inline int @@ -241,27 +283,58 @@ mcp251xfd_chip_set_mode_nowait(const struct mcp251xfd_priv *priv, return __mcp251xfd_chip_set_mode(priv, mode_req, true); } -static inline bool mcp251xfd_osc_invalid(u32 reg) +static int +mcp251xfd_chip_wait_for_osc_ready(const struct mcp251xfd_priv *priv, + u32 osc_reference, u32 osc_mask) { - return reg == 0x0 || reg == 0xffffffff; + u32 osc; + int err; + + err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc, + !mcp251xfd_reg_invalid(osc) && + (osc & osc_mask) == osc_reference, + MCP251XFD_OSC_STAB_SLEEP_US, + MCP251XFD_OSC_STAB_TIMEOUT_US); + if (err != -ETIMEDOUT) + return err; + + if (mcp251xfd_reg_invalid(osc)) { + netdev_err(priv->ndev, + "Failed to read Oscillator Configuration Register (osc=0x%08x).\n", + osc); + return -ENODEV; + } + + netdev_err(priv->ndev, + "Timeout waiting for %s ready (osc=0x%08x, osc_reference=0x%08x, osc_mask=0x%08x).\n", + mcp251xfd_get_osc_str(osc, osc_reference), + osc, osc_reference, osc_mask); + + return -ETIMEDOUT; } -static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv) +static int mcp251xfd_chip_wake(const struct mcp251xfd_priv *priv) { u32 osc, osc_reference, osc_mask; int err; - /* Set Power On Defaults for "Clock Output Divisor" and remove - * "Oscillator Disable" bit. + /* For normal sleep on MCP2517FD and MCP2518FD, clearing + * "Oscillator Disable" will wake the chip. For low power mode + * on MCP2518FD, asserting the chip select will wake the + * chip. Writing to the Oscillator register will wake it in + * both cases. */ osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK, MCP251XFD_REG_OSC_CLKODIV_10); + + /* We cannot check for the PLL ready bit (either set or + * unset), as the PLL might be enabled. This can happen if the + * system reboots, while the mcp251xfd stays powered. + */ osc_reference = MCP251XFD_REG_OSC_OSCRDY; - osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY; + osc_mask = MCP251XFD_REG_OSC_OSCRDY; - /* Note: - * - * If the controller is in Sleep Mode the following write only + /* If the controller is in Sleep Mode the following write only * removes the "Oscillator Disable" bit and powers it up. All * other bits are unaffected. */ @@ -269,24 +342,31 @@ static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv) if (err) return err; - /* Wait for "Oscillator Ready" bit */ - err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc, - (osc & osc_mask) == osc_reference, - MCP251XFD_OSC_STAB_SLEEP_US, - MCP251XFD_OSC_STAB_TIMEOUT_US); - if (mcp251xfd_osc_invalid(osc)) { - netdev_err(priv->ndev, - "Failed to detect %s (osc=0x%08x).\n", - mcp251xfd_get_model_str(priv), osc); - return -ENODEV; - } else if (err == -ETIMEDOUT) { - netdev_err(priv->ndev, - "Timeout waiting for Oscillator Ready (osc=0x%08x, osc_reference=0x%08x)\n", - osc, osc_reference); - return -ETIMEDOUT; + /* Sometimes the PLL is stuck enabled, the controller never + * sets the OSC Ready bit, and we get an -ETIMEDOUT. Our + * caller takes care of retry. + */ + return mcp251xfd_chip_wait_for_osc_ready(priv, osc_reference, osc_mask); +} + +static inline int mcp251xfd_chip_sleep(const struct mcp251xfd_priv *priv) +{ + if (priv->pll_enable) { + u32 osc; + int err; + + /* Turn off PLL */ + osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK, + MCP251XFD_REG_OSC_CLKODIV_10); + err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc); + if (err) + netdev_err(priv->ndev, + "Failed to disable PLL.\n"); + + priv->spi->max_speed_hz = priv->spi_max_speed_hz_slow; } - return err; + return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP); } static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv) @@ -294,10 +374,10 @@ static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv) const __be16 cmd = mcp251xfd_cmd_reset(); int err; - /* The Set Mode and SPI Reset command only seems to works if - * the controller is not in Sleep Mode. + /* The Set Mode and SPI Reset command only works if the + * controller is not in Sleep Mode. */ - err = mcp251xfd_chip_clock_enable(priv); + err = mcp251xfd_chip_wake(priv); if (err) return err; @@ -311,34 +391,29 @@ static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv) static int mcp251xfd_chip_softreset_check(const struct mcp251xfd_priv *priv) { - u32 osc, osc_reference; + u32 osc_reference, osc_mask; u8 mode; int err; - err = mcp251xfd_chip_get_mode(priv, &mode); - if (err) - return err; - - if (mode != MCP251XFD_REG_CON_MODE_CONFIG) { - netdev_info(priv->ndev, - "Controller not in Config Mode after reset, but in %s Mode (%u).\n", - mcp251xfd_get_mode_str(mode), mode); - return -ETIMEDOUT; - } - + /* Check for reset defaults of OSC reg. + * This will take care of stabilization period. + */ osc_reference = MCP251XFD_REG_OSC_OSCRDY | FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK, MCP251XFD_REG_OSC_CLKODIV_10); + osc_mask = osc_reference | MCP251XFD_REG_OSC_PLLRDY; + err = mcp251xfd_chip_wait_for_osc_ready(priv, osc_reference, osc_mask); + if (err) + return err; - /* check reset defaults of OSC reg */ - err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc); + err = mcp251xfd_chip_get_mode(priv, &mode); if (err) return err; - if (osc != osc_reference) { + if (mode != MCP251XFD_REG_CON_MODE_CONFIG) { netdev_info(priv->ndev, - "Controller failed to reset. osc=0x%08x, reference value=0x%08x.\n", - osc, osc_reference); + "Controller not in Config Mode after reset, but in %s Mode (%u).\n", + mcp251xfd_get_mode_str(mode), mode); return -ETIMEDOUT; } @@ -374,7 +449,7 @@ static int mcp251xfd_chip_softreset(const struct mcp251xfd_priv *priv) static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv) { - u32 osc; + u32 osc, osc_reference, osc_mask; int err; /* Activate Low Power Mode on Oscillator Disable. This only @@ -384,10 +459,29 @@ static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv) osc = MCP251XFD_REG_OSC_LPMEN | FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK, MCP251XFD_REG_OSC_CLKODIV_10); + osc_reference = MCP251XFD_REG_OSC_OSCRDY; + osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY; + + if (priv->pll_enable) { + osc |= MCP251XFD_REG_OSC_PLLEN; + osc_reference |= MCP251XFD_REG_OSC_PLLRDY; + } + err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc); if (err) return err; + err = mcp251xfd_chip_wait_for_osc_ready(priv, osc_reference, osc_mask); + if (err) + return err; + + priv->spi->max_speed_hz = priv->spi_max_speed_hz_fast; + + return 0; +} + +static int mcp251xfd_chip_timestamp_init(const struct mcp251xfd_priv *priv) +{ /* Set Time Base Counter Prescaler to 1. * * This means an overflow of the 32 bit Time Base Counter @@ -628,14 +722,14 @@ static int mcp251xfd_chip_interrupts_disable(const struct mcp251xfd_priv *priv) return regmap_write(priv->map_reg, MCP251XFD_REG_CRC, 0); } -static int mcp251xfd_chip_stop(struct mcp251xfd_priv *priv, - const enum can_state state) +static void mcp251xfd_chip_stop(struct mcp251xfd_priv *priv, + const enum can_state state) { priv->can.state = state; mcp251xfd_chip_interrupts_disable(priv); mcp251xfd_chip_rx_int_disable(priv); - return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP); + mcp251xfd_chip_sleep(priv); } static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv) @@ -650,6 +744,10 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv) if (err) goto out_chip_stop; + err = mcp251xfd_chip_timestamp_init(priv); + if (err) + goto out_chip_stop; + err = mcp251xfd_set_bittiming(priv); if (err) goto out_chip_stop; @@ -662,7 +760,9 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv) if (err) goto out_chip_stop; - mcp251xfd_ring_init(priv); + err = mcp251xfd_ring_init(priv); + if (err) + goto out_chip_stop; err = mcp251xfd_chip_fifo_init(priv); if (err) @@ -1284,6 +1384,20 @@ static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv) return 0; } +static int mcp251xfd_read_regs_status(struct mcp251xfd_priv *priv) +{ + const int val_bytes = regmap_get_val_bytes(priv->map_reg); + size_t len; + + if (priv->rx_ring_num == 1) + len = sizeof(priv->regs_status.intf); + else + len = sizeof(priv->regs_status); + + return regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT, + &priv->regs_status, len / val_bytes); +} + #define mcp251xfd_handle(priv, irq, ...) \ ({ \ struct mcp251xfd_priv *_priv = (priv); \ @@ -1300,7 +1414,6 @@ static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv) static irqreturn_t mcp251xfd_irq(int irq, void *dev_id) { struct mcp251xfd_priv *priv = dev_id; - const int val_bytes = regmap_get_val_bytes(priv->map_reg); irqreturn_t handled = IRQ_NONE; int err; @@ -1312,21 +1425,28 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id) if (!rx_pending) break; + /* Assume 1st RX-FIFO pending, if other FIFOs + * are pending the main IRQ handler will take + * care. + */ + priv->regs_status.rxif = BIT(priv->rx[0]->fifo_nr); err = mcp251xfd_handle(priv, rxif); if (err) goto out_fail; handled = IRQ_HANDLED; - } while (1); + + /* We don't know which RX-FIFO is pending, but only + * handle the 1st RX-FIFO. Leave loop here if we have + * more than 1 RX-FIFO to avoid starvation. + */ + } while (priv->rx_ring_num == 1); do { u32 intf_pending, intf_pending_clearable; bool set_normal_mode = false; - err = regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT, - &priv->regs_status, - sizeof(priv->regs_status) / - val_bytes); + err = mcp251xfd_read_regs_status(priv); if (err) goto out_fail; @@ -1621,8 +1741,9 @@ static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv) } static int -mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, - u32 *dev_id, u32 *effective_speed_hz) +mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id, + u32 *effective_speed_hz_slow, + u32 *effective_speed_hz_fast) { struct mcp251xfd_map_buf_nocrc *buf_rx; struct mcp251xfd_map_buf_nocrc *buf_tx; @@ -1641,16 +1762,20 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, xfer[0].tx_buf = buf_tx; xfer[0].len = sizeof(buf_tx->cmd); + xfer[0].speed_hz = priv->spi_max_speed_hz_slow; xfer[1].rx_buf = buf_rx->data; xfer[1].len = sizeof(dev_id); + xfer[1].speed_hz = priv->spi_max_speed_hz_fast; mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID); + err = spi_sync_transfer(priv->spi, xfer, ARRAY_SIZE(xfer)); if (err) goto out_kfree_buf_tx; *dev_id = be32_to_cpup((__be32 *)buf_rx->data); - *effective_speed_hz = xfer->effective_speed_hz; + *effective_speed_hz_slow = xfer[0].effective_speed_hz; + *effective_speed_hz_fast = xfer[1].effective_speed_hz; out_kfree_buf_tx: kfree(buf_tx); @@ -1666,34 +1791,45 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, static int mcp251xfd_register_done(const struct mcp251xfd_priv *priv) { - u32 dev_id, effective_speed_hz; + u32 dev_id, effective_speed_hz_slow, effective_speed_hz_fast; + unsigned long clk_rate; int err; err = mcp251xfd_register_get_dev_id(priv, &dev_id, - &effective_speed_hz); + &effective_speed_hz_slow, + &effective_speed_hz_fast); if (err) return err; + clk_rate = clk_get_rate(priv->clk); + netdev_info(priv->ndev, - "%s rev%lu.%lu (%cRX_INT %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD c:%u.%02uMHz m:%u.%02uMHz r:%u.%02uMHz e:%u.%02uMHz) successfully initialized.\n", + "%s rev%lu.%lu (%cRX_INT %cPLL %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD o:%lu.%02luMHz c:%u.%02uMHz m:%u.%02uMHz rs:%u.%02uMHz es:%u.%02uMHz rf:%u.%02uMHz ef:%u.%02uMHz) successfully initialized.\n", mcp251xfd_get_model_str(priv), FIELD_GET(MCP251XFD_REG_DEVID_ID_MASK, dev_id), FIELD_GET(MCP251XFD_REG_DEVID_REV_MASK, dev_id), priv->rx_int ? '+' : '-', + priv->pll_enable ? '+' : '-', MCP251XFD_QUIRK_ACTIVE(MAB_NO_WARN), MCP251XFD_QUIRK_ACTIVE(CRC_REG), MCP251XFD_QUIRK_ACTIVE(CRC_RX), MCP251XFD_QUIRK_ACTIVE(CRC_TX), MCP251XFD_QUIRK_ACTIVE(ECC), MCP251XFD_QUIRK_ACTIVE(HALF_DUPLEX), + clk_rate / 1000000, + clk_rate % 1000000 / 1000 / 10, priv->can.clock.freq / 1000000, priv->can.clock.freq % 1000000 / 1000 / 10, priv->spi_max_speed_hz_orig / 1000000, priv->spi_max_speed_hz_orig % 1000000 / 1000 / 10, - priv->spi->max_speed_hz / 1000000, - priv->spi->max_speed_hz % 1000000 / 1000 / 10, - effective_speed_hz / 1000000, - effective_speed_hz % 1000000 / 1000 / 10); + priv->spi_max_speed_hz_slow / 1000000, + priv->spi_max_speed_hz_slow % 1000000 / 1000 / 10, + effective_speed_hz_slow / 1000000, + effective_speed_hz_slow % 1000000 / 1000 / 10, + priv->spi_max_speed_hz_fast / 1000000, + priv->spi_max_speed_hz_fast % 1000000 / 1000 / 10, + effective_speed_hz_fast / 1000000, + effective_speed_hz_fast % 1000000 / 1000 / 10); return 0; } @@ -1719,19 +1855,25 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv) if (err == -ENODEV) goto out_runtime_disable; if (err) - goto out_chip_set_mode_sleep; + goto out_chip_sleep; + + err = mcp251xfd_chip_clock_init(priv); + if (err == -ENODEV) + goto out_runtime_disable; + if (err) + goto out_chip_sleep; err = mcp251xfd_register_chip_detect(priv); if (err) - goto out_chip_set_mode_sleep; + goto out_chip_sleep; err = mcp251xfd_register_check_rx_int(priv); if (err) - goto out_chip_set_mode_sleep; + goto out_chip_sleep; err = register_candev(ndev); if (err) - goto out_chip_set_mode_sleep; + goto out_chip_sleep; err = mcp251xfd_register_done(priv); if (err) @@ -1741,7 +1883,7 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv) * disable the clocks and vdd. If CONFIG_PM is not enabled, * the clocks and vdd will stay powered. */ - err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP); + err = mcp251xfd_chip_sleep(priv); if (err) goto out_unregister_candev; @@ -1751,8 +1893,8 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv) out_unregister_candev: unregister_candev(ndev); - out_chip_set_mode_sleep: - mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP); + out_chip_sleep: + mcp251xfd_chip_sleep(priv); out_runtime_disable: pm_runtime_disable(ndev->dev.parent); out_runtime_put_noidle: @@ -1768,10 +1910,10 @@ static inline void mcp251xfd_unregister(struct mcp251xfd_priv *priv) unregister_candev(ndev); - pm_runtime_get_sync(ndev->dev.parent); - pm_runtime_put_noidle(ndev->dev.parent); - mcp251xfd_clks_and_vdd_disable(priv); - pm_runtime_disable(ndev->dev.parent); + if (pm_runtime_enabled(ndev->dev.parent)) + pm_runtime_disable(ndev->dev.parent); + else + mcp251xfd_clks_and_vdd_disable(priv); } static const struct of_device_id mcp251xfd_of_match[] = { @@ -1814,6 +1956,7 @@ static int mcp251xfd_probe(struct spi_device *spi) struct gpio_desc *rx_int; struct regulator *reg_vdd, *reg_xceiver; struct clk *clk; + bool pll_enable = false; u32 freq = 0; int err; @@ -1864,12 +2007,8 @@ static int mcp251xfd_probe(struct spi_device *spi) return -ERANGE; } - if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER) { - dev_err(&spi->dev, - "Oscillator frequency (%u Hz) is too low and PLL is not supported.\n", - freq); - return -ERANGE; - } + if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER) + pll_enable = true; ndev = alloc_candev(sizeof(struct mcp251xfd_priv), MCP251XFD_TX_OBJ_NUM_MAX); @@ -1885,6 +2024,8 @@ static int mcp251xfd_probe(struct spi_device *spi) priv = netdev_priv(ndev); spi_set_drvdata(spi, priv); priv->can.clock.freq = freq; + if (pll_enable) + priv->can.clock.freq *= MCP251XFD_OSC_PLL_MULTIPLIER; priv->can.do_set_mode = mcp251xfd_set_mode; priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter; priv->can.bittiming_const = &mcp251xfd_bittiming_const; @@ -1897,6 +2038,7 @@ static int mcp251xfd_probe(struct spi_device *spi) priv->spi = spi; priv->rx_int = rx_int; priv->clk = clk; + priv->pll_enable = pll_enable; priv->reg_vdd = reg_vdd; priv->reg_xceiver = reg_xceiver; @@ -1934,7 +2076,16 @@ static int mcp251xfd_probe(struct spi_device *spi) * */ priv->spi_max_speed_hz_orig = spi->max_speed_hz; - spi->max_speed_hz = min(spi->max_speed_hz, freq / 2 / 1000 * 850); + priv->spi_max_speed_hz_slow = min(spi->max_speed_hz, + freq / 2 / 1000 * 850); + if (priv->pll_enable) + priv->spi_max_speed_hz_fast = min(spi->max_speed_hz, + freq * + MCP251XFD_OSC_PLL_MULTIPLIER / + 2 / 1000 * 850); + else + priv->spi_max_speed_hz_fast = priv->spi_max_speed_hz_slow; + spi->max_speed_hz = priv->spi_max_speed_hz_slow; spi->bits_per_word = 8; spi->rt = true; err = spi_setup(spi); @@ -1951,8 +2102,11 @@ static int mcp251xfd_probe(struct spi_device *spi) goto out_free_candev; err = mcp251xfd_register(priv); - if (err) + if (err) { + dev_err_probe(&spi->dev, err, "Failed to detect %s.\n", + mcp251xfd_get_model_str(priv)); goto out_can_rx_offload_del; + } return 0; @@ -1966,7 +2120,7 @@ static int mcp251xfd_probe(struct spi_device *spi) return err; } -static int mcp251xfd_remove(struct spi_device *spi) +static void mcp251xfd_remove(struct spi_device *spi) { struct mcp251xfd_priv *priv = spi_get_drvdata(spi); struct net_device *ndev = priv->ndev; @@ -1975,8 +2129,6 @@ static int mcp251xfd_remove(struct spi_device *spi) mcp251xfd_unregister(priv); spi->max_speed_hz = priv->spi_max_speed_hz_orig; free_candev(ndev); - - return 0; } static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device) diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c index ffae8fdd3af0..c991b30bc9f0 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c @@ -207,10 +207,10 @@ static void mcp251xfd_dump_tx_ring(const struct mcp251xfd_priv *priv, .val = tx->base, }, { .key = MCP251XFD_DUMP_OBJECT_RING_KEY_NR, - .val = 0, + .val = tx->nr, }, { .key = MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR, - .val = MCP251XFD_TX_FIFO, + .val = tx->fifo_nr, }, { .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM, .val = tx->obj_num, diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c index 7b120c716228..217510c12af5 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c @@ -2,8 +2,8 @@ // // mcp251xfd - Microchip MCP251xFD Family CAN controller driver // -// Copyright (c) 2019, 2020 Pengutronix, -// Marc Kleine-Budde <kernel@pengutronix.de> +// Copyright (c) 2019, 2020, 2021 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> // #include "mcp251xfd.h" @@ -47,22 +47,32 @@ mcp251xfd_regmap_nocrc_gather_write(void *context, return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer)); } -static inline bool mcp251xfd_update_bits_read_reg(unsigned int reg) +static inline bool +mcp251xfd_update_bits_read_reg(const struct mcp251xfd_priv *priv, + unsigned int reg) { + struct mcp251xfd_rx_ring *ring; + int n; + switch (reg) { case MCP251XFD_REG_INT: case MCP251XFD_REG_TEFCON: - case MCP251XFD_REG_FIFOCON(MCP251XFD_RX_FIFO(0)): case MCP251XFD_REG_FLTCON(0): case MCP251XFD_REG_ECCSTAT: case MCP251XFD_REG_CRC: return false; case MCP251XFD_REG_CON: - case MCP251XFD_REG_FIFOSTA(MCP251XFD_RX_FIFO(0)): case MCP251XFD_REG_OSC: case MCP251XFD_REG_ECCCON: return true; default: + mcp251xfd_for_each_rx_ring(priv, ring, n) { + if (reg == MCP251XFD_REG_FIFOCON(ring->fifo_nr)) + return false; + if (reg == MCP251XFD_REG_FIFOSTA(ring->fifo_nr)) + return true; + } + WARN(1, "Status of reg 0x%04x unknown.\n", reg); } @@ -92,7 +102,7 @@ mcp251xfd_regmap_nocrc_update_bits(void *context, unsigned int reg, last_byte = mcp251xfd_last_byte_set(mask); len = last_byte - first_byte + 1; - if (mcp251xfd_update_bits_read_reg(reg)) { + if (mcp251xfd_update_bits_read_reg(priv, reg)) { struct spi_transfer xfer[2] = { }; struct spi_message msg; @@ -368,7 +378,7 @@ mcp251xfd_regmap_crc_read(void *context, * to the caller. It will take care of both cases. * */ - if (reg == MCP251XFD_REG_OSC) { + if (reg == MCP251XFD_REG_OSC && val_len == sizeof(__le32)) { err = 0; goto out; } diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c index 92f9e9b01289..848b8b2ecb5f 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c @@ -53,6 +53,49 @@ mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv, } static void +mcp251xfd_ring_init_tef(struct mcp251xfd_priv *priv, u16 *base) +{ + struct mcp251xfd_tef_ring *tef_ring; + struct spi_transfer *xfer; + u32 val; + u16 addr; + u8 len; + int i; + + /* TEF */ + tef_ring = priv->tef; + tef_ring->head = 0; + tef_ring->tail = 0; + + /* TEF- and TX-FIFO have same number of objects */ + *base = mcp251xfd_get_tef_obj_addr(priv->tx->obj_num); + + /* FIFO increment TEF tail pointer */ + addr = MCP251XFD_REG_TEFCON; + val = MCP251XFD_REG_TEFCON_UINC; + len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf, + addr, val, val); + + for (i = 0; i < ARRAY_SIZE(tef_ring->uinc_xfer); i++) { + xfer = &tef_ring->uinc_xfer[i]; + xfer->tx_buf = &tef_ring->uinc_buf; + xfer->len = len; + xfer->cs_change = 1; + xfer->cs_change_delay.value = 0; + xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; + } + + /* "cs_change == 1" on the last transfer results in an active + * chip select after the complete SPI message. This causes the + * controller to interpret the next register access as + * data. Set "cs_change" of the last transfer to "0" to + * properly deactivate the chip select at the end of the + * message. + */ + xfer->cs_change = 0; +} + +static void mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv, const struct mcp251xfd_tx_ring *ring, struct mcp251xfd_tx_obj *tx_obj, @@ -88,81 +131,55 @@ mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv, ARRAY_SIZE(tx_obj->xfer)); } -void mcp251xfd_ring_init(struct mcp251xfd_priv *priv) +static void +mcp251xfd_ring_init_tx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr) { - struct mcp251xfd_tef_ring *tef_ring; struct mcp251xfd_tx_ring *tx_ring; - struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL; struct mcp251xfd_tx_obj *tx_obj; - struct spi_transfer *xfer; u32 val; u16 addr; u8 len; - int i, j; - - netdev_reset_queue(priv->ndev); - - /* TEF */ - tef_ring = priv->tef; - tef_ring->head = 0; - tef_ring->tail = 0; - - /* FIFO increment TEF tail pointer */ - addr = MCP251XFD_REG_TEFCON; - val = MCP251XFD_REG_TEFCON_UINC; - len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf, - addr, val, val); - - for (j = 0; j < ARRAY_SIZE(tef_ring->uinc_xfer); j++) { - xfer = &tef_ring->uinc_xfer[j]; - xfer->tx_buf = &tef_ring->uinc_buf; - xfer->len = len; - xfer->cs_change = 1; - xfer->cs_change_delay.value = 0; - xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; - } - - /* "cs_change == 1" on the last transfer results in an active - * chip select after the complete SPI message. This causes the - * controller to interpret the next register access as - * data. Set "cs_change" of the last transfer to "0" to - * properly deactivate the chip select at the end of the - * message. - */ - xfer->cs_change = 0; + int i; - /* TX */ tx_ring = priv->tx; tx_ring->head = 0; tx_ring->tail = 0; - tx_ring->base = mcp251xfd_get_tef_obj_addr(tx_ring->obj_num); + tx_ring->base = *base; + tx_ring->nr = 0; + tx_ring->fifo_nr = *fifo_nr; + + *base = mcp251xfd_get_tx_obj_addr(tx_ring, tx_ring->obj_num); + *fifo_nr += 1; /* FIFO request to send */ - addr = MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO); + addr = MCP251XFD_REG_FIFOCON(tx_ring->fifo_nr); val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC; len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf, addr, val, val); mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i) mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i); +} + +static void +mcp251xfd_ring_init_rx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr) +{ + struct mcp251xfd_rx_ring *rx_ring; + struct spi_transfer *xfer; + u32 val; + u16 addr; + u8 len; + int i, j; - /* RX */ mcp251xfd_for_each_rx_ring(priv, rx_ring, i) { rx_ring->head = 0; rx_ring->tail = 0; + rx_ring->base = *base; rx_ring->nr = i; - rx_ring->fifo_nr = MCP251XFD_RX_FIFO(i); - - if (!prev_rx_ring) - rx_ring->base = - mcp251xfd_get_tx_obj_addr(tx_ring, - tx_ring->obj_num); - else - rx_ring->base = prev_rx_ring->base + - prev_rx_ring->obj_size * - prev_rx_ring->obj_num; + rx_ring->fifo_nr = *fifo_nr; - prev_rx_ring = rx_ring; + *base = mcp251xfd_get_rx_obj_addr(rx_ring, rx_ring->obj_num); + *fifo_nr += 1; /* FIFO increment RX tail pointer */ addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr); @@ -190,6 +207,74 @@ void mcp251xfd_ring_init(struct mcp251xfd_priv *priv) } } +int mcp251xfd_ring_init(struct mcp251xfd_priv *priv) +{ + const struct mcp251xfd_rx_ring *rx_ring; + u16 base = 0, ram_used; + u8 fifo_nr = 1; + int i; + + netdev_reset_queue(priv->ndev); + + mcp251xfd_ring_init_tef(priv, &base); + mcp251xfd_ring_init_rx(priv, &base, &fifo_nr); + mcp251xfd_ring_init_tx(priv, &base, &fifo_nr); + + /* mcp251xfd_handle_rxif() will iterate over all RX rings. + * Rings with their corresponding bit set in + * priv->regs_status.rxif are read out. + * + * If the chip is configured for only 1 RX-FIFO, and if there + * is an RX interrupt pending (RXIF in INT register is set), + * it must be the 1st RX-FIFO. + * + * We mark the RXIF of the 1st FIFO as pending here, so that + * we can skip the read of the RXIF register in + * mcp251xfd_read_regs_status() for the 1 RX-FIFO only case. + * + * If we use more than 1 RX-FIFO, this value gets overwritten + * in mcp251xfd_read_regs_status(), so set it unconditionally + * here. + */ + priv->regs_status.rxif = BIT(priv->rx[0]->fifo_nr); + + netdev_dbg(priv->ndev, + "FIFO setup: TEF: 0x%03x: %2d*%zu bytes = %4zu bytes\n", + mcp251xfd_get_tef_obj_addr(0), + priv->tx->obj_num, sizeof(struct mcp251xfd_hw_tef_obj), + priv->tx->obj_num * sizeof(struct mcp251xfd_hw_tef_obj)); + + mcp251xfd_for_each_rx_ring(priv, rx_ring, i) { + netdev_dbg(priv->ndev, + "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n", + rx_ring->nr, rx_ring->fifo_nr, + mcp251xfd_get_rx_obj_addr(rx_ring, 0), + rx_ring->obj_num, rx_ring->obj_size, + rx_ring->obj_num * rx_ring->obj_size); + } + + netdev_dbg(priv->ndev, + "FIFO setup: TX: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n", + priv->tx->fifo_nr, + mcp251xfd_get_tx_obj_addr(priv->tx, 0), + priv->tx->obj_num, priv->tx->obj_size, + priv->tx->obj_num * priv->tx->obj_size); + + netdev_dbg(priv->ndev, + "FIFO setup: free: %4u bytes\n", + MCP251XFD_RAM_SIZE - (base - MCP251XFD_RAM_START)); + + ram_used = base - MCP251XFD_RAM_START; + if (ram_used > MCP251XFD_RAM_SIZE) { + netdev_err(priv->ndev, + "Error during ring configuration, using more RAM (%u bytes) than available (%u bytes).\n", + ram_used, MCP251XFD_RAM_SIZE); + return -ENOMEM; + } + + return 0; +} + void mcp251xfd_ring_free(struct mcp251xfd_priv *priv) { int i; @@ -249,21 +334,5 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv) } priv->rx_ring_num = i; - netdev_dbg(priv->ndev, - "FIFO setup: TEF: %d*%d bytes = %d bytes, TX: %d*%d bytes = %d bytes\n", - tx_obj_num, tef_obj_size, tef_obj_size * tx_obj_num, - tx_obj_num, tx_obj_size, tx_obj_size * tx_obj_num); - - mcp251xfd_for_each_rx_ring(priv, rx_ring, i) { - netdev_dbg(priv->ndev, - "FIFO setup: RX-%d: %d*%d bytes = %d bytes\n", - i, rx_ring->obj_num, rx_ring->obj_size, - rx_ring->obj_size * rx_ring->obj_num); - } - - netdev_dbg(priv->ndev, - "FIFO setup: free: %d bytes\n", - ram_free); - return 0; } diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c index 63f2526464b3..e6d39876065a 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c @@ -19,7 +19,7 @@ static inline int mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv, const struct mcp251xfd_rx_ring *ring, - u8 *rx_head) + u8 *rx_head, bool *fifo_empty) { u32 fifo_sta; int err; @@ -30,6 +30,7 @@ mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv, return err; *rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta); + *fifo_empty = !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF); return 0; } @@ -84,10 +85,12 @@ mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv, { u32 new_head; u8 chip_rx_head; + bool fifo_empty; int err; - err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head); - if (err) + err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head, + &fifo_empty); + if (err || fifo_empty) return err; /* chip_rx_head, is the next RX-Object filled by the HW. @@ -251,6 +254,9 @@ int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv) int err, n; mcp251xfd_for_each_rx_ring(priv, ring, n) { + if (!(priv->regs_status.rxif & BIT(ring->fifo_nr))) + continue; + err = mcp251xfd_handle_rxif_ring(priv, ring); if (err) return err; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h index f551c900803e..87cc13d455c1 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h @@ -2,8 +2,8 @@ * * mcp251xfd - Microchip MCP251xFD Family CAN controller driver * - * Copyright (c) 2019 Pengutronix, - * Marc Kleine-Budde <kernel@pengutronix.de> + * Copyright (c) 2019, 2020, 2021 Pengutronix, + * Marc Kleine-Budde <kernel@pengutronix.de> * Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org> */ @@ -383,8 +383,6 @@ #endif #define MCP251XFD_NAPI_WEIGHT 32 -#define MCP251XFD_TX_FIFO 1 -#define MCP251XFD_RX_FIFO(x) (MCP251XFD_TX_FIFO + 1 + (x)) /* SPI commands */ #define MCP251XFD_SPI_INSTRUCTION_RESET 0x0000 @@ -412,6 +410,15 @@ static_assert(MCP251XFD_TIMESTAMP_WORK_DELAY_SEC < #define MCP251XFD_SANITIZE_SPI 1 #define MCP251XFD_SANITIZE_CAN 1 +/* FIFO and Ring */ +#define MCP251XFD_FIFO_TEF_NUM 1U +#define MCP251XFD_FIFO_RX_NUM_MAX 1U +#define MCP251XFD_FIFO_TX_NUM 1U + +static_assert(MCP251XFD_FIFO_TEF_NUM == 1U); +static_assert(MCP251XFD_FIFO_TEF_NUM == MCP251XFD_FIFO_TX_NUM); +static_assert(MCP251XFD_FIFO_RX_NUM_MAX <= 4U); + /* Silence TX MAB overflow warnings */ #define MCP251XFD_QUIRK_MAB_NO_WARN BIT(0) /* Use CRC to access registers */ @@ -521,6 +528,8 @@ struct mcp251xfd_tx_ring { unsigned int tail; u16 base; + u8 nr; + u8 fifo_nr; u8 obj_num; u8 obj_size; @@ -561,6 +570,7 @@ struct mcp251xfd_ecc { struct mcp251xfd_regs_status { u32 intf; + u32 rxif; }; enum mcp251xfd_model { @@ -592,10 +602,12 @@ struct mcp251xfd_priv { struct spi_device *spi; u32 spi_max_speed_hz_orig; + u32 spi_max_speed_hz_fast; + u32 spi_max_speed_hz_slow; - struct mcp251xfd_tef_ring tef[1]; - struct mcp251xfd_tx_ring tx[1]; - struct mcp251xfd_rx_ring *rx[1]; + struct mcp251xfd_tef_ring tef[MCP251XFD_FIFO_TEF_NUM]; + struct mcp251xfd_rx_ring *rx[MCP251XFD_FIFO_RX_NUM_MAX]; + struct mcp251xfd_tx_ring tx[MCP251XFD_FIFO_TX_NUM]; u8 rx_ring_num; @@ -608,6 +620,7 @@ struct mcp251xfd_priv { struct gpio_desc *rx_int; struct clk *clk; + bool pll_enable; struct regulator *reg_vdd; struct regulator *reg_xceiver; @@ -776,7 +789,7 @@ mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv, int err; err = regmap_read(priv->map_reg, - MCP251XFD_REG_FIFOSTA(MCP251XFD_TX_FIFO), + MCP251XFD_REG_FIFOSTA(priv->tx->fifo_nr), &fifo_sta); if (err) return err; @@ -879,7 +892,7 @@ u16 mcp251xfd_crc16_compute2(const void *cmd, size_t cmd_size, const void *data, size_t data_size); u16 mcp251xfd_crc16_compute(const void *data, size_t data_size); int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv); -void mcp251xfd_ring_init(struct mcp251xfd_priv *priv); +int mcp251xfd_ring_init(struct mcp251xfd_priv *priv); void mcp251xfd_ring_free(struct mcp251xfd_priv *priv); int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv); int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv); diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c index 2ed2370a3166..2d73ebbf3836 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_core.c +++ b/drivers/net/can/usb/etas_es58x/es58x_core.c @@ -1787,7 +1787,7 @@ static int es58x_open(struct net_device *netdev) struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; int ret; - if (atomic_inc_return(&es58x_dev->opened_channel_cnt) == 1) { + if (!es58x_dev->opened_channel_cnt) { ret = es58x_alloc_rx_urbs(es58x_dev); if (ret) return ret; @@ -1805,12 +1805,13 @@ static int es58x_open(struct net_device *netdev) if (ret) goto free_urbs; + es58x_dev->opened_channel_cnt++; netif_start_queue(netdev); return ret; free_urbs: - if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt)) + if (!es58x_dev->opened_channel_cnt) es58x_free_urbs(es58x_dev); netdev_err(netdev, "%s: Could not open the network device: %pe\n", __func__, ERR_PTR(ret)); @@ -1845,7 +1846,8 @@ static int es58x_stop(struct net_device *netdev) es58x_flush_pending_tx_msg(netdev); - if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt)) + es58x_dev->opened_channel_cnt--; + if (!es58x_dev->opened_channel_cnt) es58x_free_urbs(es58x_dev); return 0; @@ -2215,7 +2217,6 @@ static struct es58x_device *es58x_init_es58x_dev(struct usb_interface *intf, init_usb_anchor(&es58x_dev->tx_urbs_idle); init_usb_anchor(&es58x_dev->tx_urbs_busy); atomic_set(&es58x_dev->tx_urbs_idle_cnt, 0); - atomic_set(&es58x_dev->opened_channel_cnt, 0); usb_set_intfdata(intf, es58x_dev); es58x_dev->rx_pipe = usb_rcvbulkpipe(es58x_dev->udev, diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h index 826a15871573..e5033cb5e695 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_core.h +++ b/drivers/net/can/usb/etas_es58x/es58x_core.h @@ -373,8 +373,6 @@ struct es58x_operators { * queue wake/stop logic should prevent this URB from getting * empty. Please refer to es58x_get_tx_urb() for more details. * @tx_urbs_idle_cnt: number of urbs in @tx_urbs_idle. - * @opened_channel_cnt: number of channels opened (c.f. es58x_open() - * and es58x_stop()). * @ktime_req_ns: kernel timestamp when es58x_set_realtime_diff_ns() * was called. * @realtime_diff_ns: difference in nanoseconds between the clocks of @@ -384,6 +382,10 @@ struct es58x_operators { * in RX branches. * @rx_max_packet_size: Maximum length of bulk-in URB. * @num_can_ch: Number of CAN channel (i.e. number of elements of @netdev). + * @opened_channel_cnt: number of channels opened. Free of race + * conditions because its two users (net_device_ops:ndo_open() + * and net_device_ops:ndo_close()) guarantee that the network + * stack big kernel lock (a.k.a. rtnl_mutex) is being hold. * @rx_cmd_buf_len: Length of @rx_cmd_buf. * @rx_cmd_buf: The device might split the URB commands in an * arbitrary amount of pieces. This buffer is used to concatenate @@ -406,7 +408,6 @@ struct es58x_device { struct usb_anchor tx_urbs_busy; struct usb_anchor tx_urbs_idle; atomic_t tx_urbs_idle_cnt; - atomic_t opened_channel_cnt; u64 ktime_req_ns; s64 realtime_diff_ns; @@ -415,6 +416,7 @@ struct es58x_device { u16 rx_max_packet_size; u8 num_can_ch; + u8 opened_channel_cnt; u16 rx_cmd_buf_len; union es58x_urb_cmd rx_cmd_buf; diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c index ec87126e1a7d..c97ffa71fd75 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_fd.c +++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c @@ -69,7 +69,8 @@ static int es58x_fd_echo_msg(struct net_device *netdev, int i, num_element; u32 rcv_packet_idx; - const u32 mask = GENMASK(31, sizeof(echo_msg->packet_idx) * 8); + const u32 mask = GENMASK(BITS_PER_TYPE(mask) - 1, + BITS_PER_TYPE(echo_msg->packet_idx)); num_element = es58x_msg_num_element(es58x_dev->dev, es58x_fd_urb_cmd->echo_msg, @@ -172,12 +173,11 @@ static int es58x_fd_rx_event_msg(struct net_device *netdev, const struct es58x_fd_rx_event_msg *rx_event_msg; int ret; + rx_event_msg = &es58x_fd_urb_cmd->rx_event_msg; ret = es58x_check_msg_len(es58x_dev->dev, *rx_event_msg, msg_len); if (ret) return ret; - rx_event_msg = &es58x_fd_urb_cmd->rx_event_msg; - return es58x_rx_err_msg(netdev, rx_event_msg->error_code, rx_event_msg->event_code, get_unaligned_le64(&rx_event_msg->timestamp)); diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index b487e3fe770a..67408e316062 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -9,11 +9,12 @@ * Many thanks to all socketcan devs! */ +#include <linux/bitfield.h> #include <linux/ethtool.h> #include <linux/init.h> -#include <linux/signal.h> #include <linux/module.h> #include <linux/netdevice.h> +#include <linux/signal.h> #include <linux/usb.h> #include <linux/can.h> @@ -21,14 +22,20 @@ #include <linux/can/error.h> /* Device specific constants */ -#define USB_GSUSB_1_VENDOR_ID 0x1d50 -#define USB_GSUSB_1_PRODUCT_ID 0x606f +#define USB_GSUSB_1_VENDOR_ID 0x1d50 +#define USB_GSUSB_1_PRODUCT_ID 0x606f -#define USB_CANDLELIGHT_VENDOR_ID 0x1209 +#define USB_CANDLELIGHT_VENDOR_ID 0x1209 #define USB_CANDLELIGHT_PRODUCT_ID 0x2323 -#define GSUSB_ENDPOINT_IN 1 -#define GSUSB_ENDPOINT_OUT 2 +#define USB_CES_CANEXT_FD_VENDOR_ID 0x1cd2 +#define USB_CES_CANEXT_FD_PRODUCT_ID 0x606f + +#define USB_ABE_CANDEBUGGER_FD_VENDOR_ID 0x16d0 +#define USB_ABE_CANDEBUGGER_FD_PRODUCT_ID 0x10b8 + +#define GSUSB_ENDPOINT_IN 1 +#define GSUSB_ENDPOINT_OUT 2 /* Device specific constants */ enum gs_usb_breq { @@ -40,6 +47,11 @@ enum gs_usb_breq { GS_USB_BREQ_DEVICE_CONFIG, GS_USB_BREQ_TIMESTAMP, GS_USB_BREQ_IDENTIFY, + GS_USB_BREQ_GET_USER_ID, + GS_USB_BREQ_QUIRK_CANTACT_PRO_DATA_BITTIMING = GS_USB_BREQ_GET_USER_ID, + GS_USB_BREQ_SET_USER_ID, + GS_USB_BREQ_DATA_BITTIMING, + GS_USB_BREQ_BT_CONST_EXT, }; enum gs_can_mode { @@ -87,11 +99,18 @@ struct gs_device_config { __le32 hw_version; } __packed; -#define GS_CAN_MODE_NORMAL 0 -#define GS_CAN_MODE_LISTEN_ONLY BIT(0) -#define GS_CAN_MODE_LOOP_BACK BIT(1) -#define GS_CAN_MODE_TRIPLE_SAMPLE BIT(2) -#define GS_CAN_MODE_ONE_SHOT BIT(3) +#define GS_CAN_MODE_NORMAL 0 +#define GS_CAN_MODE_LISTEN_ONLY BIT(0) +#define GS_CAN_MODE_LOOP_BACK BIT(1) +#define GS_CAN_MODE_TRIPLE_SAMPLE BIT(2) +#define GS_CAN_MODE_ONE_SHOT BIT(3) +#define GS_CAN_MODE_HW_TIMESTAMP BIT(4) +/* GS_CAN_FEATURE_IDENTIFY BIT(5) */ +/* GS_CAN_FEATURE_USER_ID BIT(6) */ +#define GS_CAN_MODE_PAD_PKTS_TO_MAX_PKT_SIZE BIT(7) +#define GS_CAN_MODE_FD BIT(8) +/* GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9) */ +/* GS_CAN_FEATURE_BT_CONST_EXT BIT(10) */ struct gs_device_mode { __le32 mode; @@ -116,12 +135,25 @@ struct gs_identify_mode { __le32 mode; } __packed; -#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0) -#define GS_CAN_FEATURE_LOOP_BACK BIT(1) -#define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2) -#define GS_CAN_FEATURE_ONE_SHOT BIT(3) -#define GS_CAN_FEATURE_HW_TIMESTAMP BIT(4) -#define GS_CAN_FEATURE_IDENTIFY BIT(5) +#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0) +#define GS_CAN_FEATURE_LOOP_BACK BIT(1) +#define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2) +#define GS_CAN_FEATURE_ONE_SHOT BIT(3) +#define GS_CAN_FEATURE_HW_TIMESTAMP BIT(4) +#define GS_CAN_FEATURE_IDENTIFY BIT(5) +#define GS_CAN_FEATURE_USER_ID BIT(6) +#define GS_CAN_FEATURE_PAD_PKTS_TO_MAX_PKT_SIZE BIT(7) +#define GS_CAN_FEATURE_FD BIT(8) +#define GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9) +#define GS_CAN_FEATURE_BT_CONST_EXT BIT(10) +#define GS_CAN_FEATURE_MASK GENMASK(10, 0) + +/* internal quirks - keep in GS_CAN_FEATURE space for now */ + +/* CANtact Pro original firmware: + * BREQ DATA_BITTIMING overlaps with GET_USER_ID + */ +#define GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO BIT(31) struct gs_device_bt_const { __le32 feature; @@ -136,7 +168,50 @@ struct gs_device_bt_const { __le32 brp_inc; } __packed; -#define GS_CAN_FLAG_OVERFLOW 1 +struct gs_device_bt_const_extended { + __le32 feature; + __le32 fclk_can; + __le32 tseg1_min; + __le32 tseg1_max; + __le32 tseg2_min; + __le32 tseg2_max; + __le32 sjw_max; + __le32 brp_min; + __le32 brp_max; + __le32 brp_inc; + + __le32 dtseg1_min; + __le32 dtseg1_max; + __le32 dtseg2_min; + __le32 dtseg2_max; + __le32 dsjw_max; + __le32 dbrp_min; + __le32 dbrp_max; + __le32 dbrp_inc; +} __packed; + +#define GS_CAN_FLAG_OVERFLOW BIT(0) +#define GS_CAN_FLAG_FD BIT(1) +#define GS_CAN_FLAG_BRS BIT(2) +#define GS_CAN_FLAG_ESI BIT(3) + +struct classic_can { + u8 data[8]; +} __packed; + +struct classic_can_quirk { + u8 data[8]; + u8 quirk; +} __packed; + +struct canfd { + u8 data[64]; +} __packed; + +struct canfd_quirk { + u8 data[64]; + u8 quirk; +} __packed; struct gs_host_frame { u32 echo_id; @@ -147,7 +222,12 @@ struct gs_host_frame { u8 flags; u8 reserved; - u8 data[8]; + union { + DECLARE_FLEX_ARRAY(struct classic_can, classic_can); + DECLARE_FLEX_ARRAY(struct classic_can_quirk, classic_can_quirk); + DECLARE_FLEX_ARRAY(struct canfd, canfd); + DECLARE_FLEX_ARRAY(struct canfd_quirk, canfd_quirk); + }; } __packed; /* The GS USB devices make use of the same flags and masks as in * linux/can.h and linux/can/error.h, and no additional mapping is necessary. @@ -158,9 +238,9 @@ struct gs_host_frame { /* Only launch a max of GS_MAX_RX_URBS usb requests at a time. */ #define GS_MAX_RX_URBS 30 /* Maximum number of interfaces the driver supports per device. - * Current hardware only supports 2 interfaces. The future may vary. + * Current hardware only supports 3 interfaces. The future may vary. */ -#define GS_MAX_INTF 2 +#define GS_MAX_INTF 3 struct gs_tx_context { struct gs_can *dev; @@ -176,9 +256,12 @@ struct gs_can { struct usb_device *udev; struct usb_interface *iface; - struct can_bittiming_const bt_const; + struct can_bittiming_const bt_const, data_bt_const; unsigned int channel; /* channel number */ + u32 feature; + unsigned int hf_size_tx; + /* This lock prevents a race condition between xmit and receive. */ spinlock_t tx_ctx_lock; struct gs_tx_context tx_context[GS_MAX_TX_URBS]; @@ -191,8 +274,9 @@ struct gs_can { struct gs_usb { struct gs_can *canch[GS_MAX_INTF]; struct usb_anchor rx_submitted; - atomic_t active_channels; struct usb_device *udev; + unsigned int hf_size_rx; + u8 active_channels; }; /* 'allocate' a tx context. @@ -258,11 +342,7 @@ static int gs_cmd_reset(struct gs_can *gsdev) usb_sndctrlpipe(interface_to_usbdev(intf), 0), GS_USB_BREQ_MODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, - gsdev->channel, - 0, - dm, - sizeof(*dm), - 1000); + gsdev->channel, 0, dm, sizeof(*dm), 1000); kfree(dm); @@ -304,6 +384,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) struct gs_host_frame *hf = urb->transfer_buffer; struct gs_tx_context *txc; struct can_frame *cf; + struct canfd_frame *cfd; struct sk_buff *skb; BUG_ON(!usbcan); @@ -332,18 +413,33 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) return; if (hf->echo_id == -1) { /* normal rx */ - skb = alloc_can_skb(dev->netdev, &cf); - if (!skb) - return; + if (hf->flags & GS_CAN_FLAG_FD) { + skb = alloc_canfd_skb(dev->netdev, &cfd); + if (!skb) + return; + + cfd->can_id = le32_to_cpu(hf->can_id); + cfd->len = can_fd_dlc2len(hf->can_dlc); + if (hf->flags & GS_CAN_FLAG_BRS) + cfd->flags |= CANFD_BRS; + if (hf->flags & GS_CAN_FLAG_ESI) + cfd->flags |= CANFD_ESI; + + memcpy(cfd->data, hf->canfd->data, cfd->len); + } else { + skb = alloc_can_skb(dev->netdev, &cf); + if (!skb) + return; - cf->can_id = le32_to_cpu(hf->can_id); + cf->can_id = le32_to_cpu(hf->can_id); + can_frame_set_cc_len(cf, hf->can_dlc, dev->can.ctrlmode); - can_frame_set_cc_len(cf, hf->can_dlc, dev->can.ctrlmode); - memcpy(cf->data, hf->data, 8); + memcpy(cf->data, hf->classic_can->data, 8); - /* ERROR frames tell us information about the controller */ - if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG) - gs_update_state(dev, cf); + /* ERROR frames tell us information about the controller */ + if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG) + gs_update_state(dev, cf); + } netdev->stats.rx_packets++; netdev->stats.rx_bytes += hf->can_dlc; @@ -392,14 +488,10 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) } resubmit_urb: - usb_fill_bulk_urb(urb, - usbcan->udev, + usb_fill_bulk_urb(urb, usbcan->udev, usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN), - hf, - sizeof(struct gs_host_frame), - gs_usb_receive_bulk_callback, - usbcan - ); + hf, dev->parent->hf_size_rx, + gs_usb_receive_bulk_callback, usbcan); rc = usb_submit_urb(urb, GFP_ATOMIC); @@ -436,11 +528,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev) usb_sndctrlpipe(interface_to_usbdev(intf), 0), GS_USB_BREQ_BITTIMING, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, - dev->channel, - 0, - dbt, - sizeof(*dbt), - 1000); + dev->channel, 0, dbt, sizeof(*dbt), 1000); kfree(dbt); @@ -451,6 +539,44 @@ static int gs_usb_set_bittiming(struct net_device *netdev) return (rc > 0) ? 0 : rc; } +static int gs_usb_set_data_bittiming(struct net_device *netdev) +{ + struct gs_can *dev = netdev_priv(netdev); + struct can_bittiming *bt = &dev->can.data_bittiming; + struct usb_interface *intf = dev->iface; + struct gs_device_bittiming *dbt; + u8 request = GS_USB_BREQ_DATA_BITTIMING; + int rc; + + dbt = kmalloc(sizeof(*dbt), GFP_KERNEL); + if (!dbt) + return -ENOMEM; + + dbt->prop_seg = cpu_to_le32(bt->prop_seg); + dbt->phase_seg1 = cpu_to_le32(bt->phase_seg1); + dbt->phase_seg2 = cpu_to_le32(bt->phase_seg2); + dbt->sjw = cpu_to_le32(bt->sjw); + dbt->brp = cpu_to_le32(bt->brp); + + if (dev->feature & GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO) + request = GS_USB_BREQ_QUIRK_CANTACT_PRO_DATA_BITTIMING; + + /* request bit timings */ + rc = usb_control_msg(interface_to_usbdev(intf), + usb_sndctrlpipe(interface_to_usbdev(intf), 0), + request, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, + dev->channel, 0, dbt, sizeof(*dbt), 1000); + + kfree(dbt); + + if (rc < 0) + dev_err(netdev->dev.parent, + "Couldn't set data bittimings (err=%d)", rc); + + return (rc > 0) ? 0 : rc; +} + static void gs_usb_xmit_callback(struct urb *urb) { struct gs_tx_context *txc = urb->context; @@ -460,10 +586,8 @@ static void gs_usb_xmit_callback(struct urb *urb) if (urb->status) netdev_info(netdev, "usb xmit fail %u\n", txc->echo_id); - usb_free_coherent(urb->dev, - urb->transfer_buffer_length, - urb->transfer_buffer, - urb->transfer_dma); + usb_free_coherent(urb->dev, urb->transfer_buffer_length, + urb->transfer_buffer, urb->transfer_dma); } static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, @@ -474,6 +598,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, struct urb *urb; struct gs_host_frame *hf; struct can_frame *cf; + struct canfd_frame *cfd; int rc; unsigned int idx; struct gs_tx_context *txc; @@ -491,7 +616,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, if (!urb) goto nomem_urb; - hf = usb_alloc_coherent(dev->udev, sizeof(*hf), GFP_ATOMIC, + hf = usb_alloc_coherent(dev->udev, dev->hf_size_tx, GFP_ATOMIC, &urb->transfer_dma); if (!hf) { netdev_err(netdev, "No memory left for USB buffer\n"); @@ -510,19 +635,31 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, hf->flags = 0; hf->reserved = 0; - cf = (struct can_frame *)skb->data; + if (can_is_canfd_skb(skb)) { + cfd = (struct canfd_frame *)skb->data; - hf->can_id = cpu_to_le32(cf->can_id); - hf->can_dlc = can_get_cc_dlc(cf, dev->can.ctrlmode); + hf->can_id = cpu_to_le32(cfd->can_id); + hf->can_dlc = can_fd_len2dlc(cfd->len); + hf->flags |= GS_CAN_FLAG_FD; + if (cfd->flags & CANFD_BRS) + hf->flags |= GS_CAN_FLAG_BRS; + if (cfd->flags & CANFD_ESI) + hf->flags |= GS_CAN_FLAG_ESI; - memcpy(hf->data, cf->data, cf->len); + memcpy(hf->canfd->data, cfd->data, cfd->len); + } else { + cf = (struct can_frame *)skb->data; + + hf->can_id = cpu_to_le32(cf->can_id); + hf->can_dlc = can_get_cc_dlc(cf, dev->can.ctrlmode); + + memcpy(hf->classic_can->data, cf->data, cf->len); + } usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT), - hf, - sizeof(*hf), - gs_usb_xmit_callback, - txc); + hf, dev->hf_size_tx, + gs_usb_xmit_callback, txc); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &dev->tx_submitted); @@ -539,10 +676,8 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, gs_free_tx_context(txc); usb_unanchor_urb(urb); - usb_free_coherent(dev->udev, - sizeof(*hf), - hf, - urb->transfer_dma); + usb_free_coherent(dev->udev, urb->transfer_buffer_length, + urb->transfer_buffer, urb->transfer_dma); if (rc == -ENODEV) { netif_device_detach(netdev); @@ -562,10 +697,8 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; badidx: - usb_free_coherent(dev->udev, - sizeof(*hf), - hf, - urb->transfer_dma); + usb_free_coherent(dev->udev, urb->transfer_buffer_length, + urb->transfer_buffer, urb->transfer_dma); nomem_hf: usb_free_urb(urb); @@ -582,6 +715,7 @@ static int gs_can_open(struct net_device *netdev) struct gs_usb *parent = dev->parent; int rc, i; struct gs_device_mode *dm; + struct gs_host_frame *hf; u32 ctrlmode; u32 flags = 0; @@ -589,7 +723,22 @@ static int gs_can_open(struct net_device *netdev) if (rc) return rc; - if (atomic_add_return(1, &parent->active_channels) == 1) { + ctrlmode = dev->can.ctrlmode; + if (ctrlmode & CAN_CTRLMODE_FD) { + flags |= GS_CAN_MODE_FD; + + if (dev->feature & GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX) + dev->hf_size_tx = struct_size(hf, canfd_quirk, 1); + else + dev->hf_size_tx = struct_size(hf, canfd, 1); + } else { + if (dev->feature & GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX) + dev->hf_size_tx = struct_size(hf, classic_can_quirk, 1); + else + dev->hf_size_tx = struct_size(hf, classic_can, 1); + } + + if (!parent->active_channels) { for (i = 0; i < GS_MAX_RX_URBS; i++) { struct urb *urb; u8 *buf; @@ -601,7 +750,7 @@ static int gs_can_open(struct net_device *netdev) /* alloc rx buffer */ buf = usb_alloc_coherent(dev->udev, - sizeof(struct gs_host_frame), + dev->parent->hf_size_rx, GFP_KERNEL, &urb->transfer_dma); if (!buf) { @@ -617,9 +766,8 @@ static int gs_can_open(struct net_device *netdev) usb_rcvbulkpipe(dev->udev, GSUSB_ENDPOINT_IN), buf, - sizeof(struct gs_host_frame), - gs_usb_receive_bulk_callback, - parent); + dev->parent->hf_size_rx, + gs_usb_receive_bulk_callback, parent); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &parent->rx_submitted); @@ -630,8 +778,7 @@ static int gs_can_open(struct net_device *netdev) netif_device_detach(dev->netdev); netdev_err(netdev, - "usb_submit failed (err=%d)\n", - rc); + "usb_submit failed (err=%d)\n", rc); usb_unanchor_urb(urb); usb_free_urb(urb); @@ -650,8 +797,6 @@ static int gs_can_open(struct net_device *netdev) return -ENOMEM; /* flags */ - ctrlmode = dev->can.ctrlmode; - if (ctrlmode & CAN_CTRLMODE_LOOPBACK) flags |= GS_CAN_MODE_LOOP_BACK; else if (ctrlmode & CAN_CTRLMODE_LISTENONLY) @@ -672,13 +817,8 @@ static int gs_can_open(struct net_device *netdev) rc = usb_control_msg(interface_to_usbdev(dev->iface), usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0), GS_USB_BREQ_MODE, - USB_DIR_OUT | USB_TYPE_VENDOR | - USB_RECIP_INTERFACE, - dev->channel, - 0, - dm, - sizeof(*dm), - 1000); + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, + dev->channel, 0, dm, sizeof(*dm), 1000); if (rc < 0) { netdev_err(netdev, "Couldn't start device (err=%d)\n", rc); @@ -690,6 +830,7 @@ static int gs_can_open(struct net_device *netdev) dev->can.state = CAN_STATE_ERROR_ACTIVE; + parent->active_channels++; if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) netif_start_queue(netdev); @@ -705,7 +846,8 @@ static int gs_can_close(struct net_device *netdev) netif_stop_queue(netdev); /* Stop polling */ - if (atomic_dec_and_test(&parent->active_channels)) + parent->active_channels--; + if (!parent->active_channels) usb_kill_anchored_urbs(&parent->rx_submitted); /* Stop sending URBs */ @@ -753,16 +895,10 @@ static int gs_usb_set_identify(struct net_device *netdev, bool do_identify) imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF); rc = usb_control_msg(interface_to_usbdev(dev->iface), - usb_sndctrlpipe(interface_to_usbdev(dev->iface), - 0), + usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0), GS_USB_BREQ_IDENTIFY, - USB_DIR_OUT | USB_TYPE_VENDOR | - USB_RECIP_INTERFACE, - dev->channel, - 0, - imode, - sizeof(*imode), - 100); + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, + dev->channel, 0, imode, sizeof(*imode), 100); kfree(imode); @@ -801,6 +937,7 @@ static struct gs_can *gs_make_candev(unsigned int channel, struct net_device *netdev; int rc; struct gs_device_bt_const *bt_const; + struct gs_device_bt_const_extended *bt_const_extended; u32 feature; bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL); @@ -812,11 +949,7 @@ static struct gs_can *gs_make_candev(unsigned int channel, usb_rcvctrlpipe(interface_to_usbdev(intf), 0), GS_USB_BREQ_BT_CONST, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, - channel, - 0, - bt_const, - sizeof(*bt_const), - 1000); + channel, 0, bt_const, sizeof(*bt_const), 1000); if (rc < 0) { dev_err(&intf->dev, @@ -873,6 +1006,7 @@ static struct gs_can *gs_make_candev(unsigned int channel, dev->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC; feature = le32_to_cpu(bt_const->feature); + dev->feature = FIELD_GET(GS_CAN_FEATURE_MASK, feature); if (feature & GS_CAN_FEATURE_LISTEN_ONLY) dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY; @@ -885,7 +1019,37 @@ static struct gs_can *gs_make_candev(unsigned int channel, if (feature & GS_CAN_FEATURE_ONE_SHOT) dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; - SET_NETDEV_DEV(netdev, &intf->dev); + if (feature & GS_CAN_FEATURE_FD) { + dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD; + /* The data bit timing will be overwritten, if + * GS_CAN_FEATURE_BT_CONST_EXT is set. + */ + dev->can.data_bittiming_const = &dev->bt_const; + dev->can.do_set_data_bittiming = gs_usb_set_data_bittiming; + } + + /* The CANtact Pro from LinkLayer Labs is based on the + * LPC54616 µC, which is affected by the NXP LPC USB transfer + * erratum. However, the current firmware (version 2) doesn't + * set the GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX bit. Set the + * feature GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX to workaround + * this issue. + * + * For the GS_USB_BREQ_DATA_BITTIMING USB control message the + * CANtact Pro firmware uses a request value, which is already + * used by the candleLight firmware for a different purpose + * (GS_USB_BREQ_GET_USER_ID). Set the feature + * GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO to workaround this + * issue. + */ + if (dev->udev->descriptor.idVendor == cpu_to_le16(USB_GSUSB_1_VENDOR_ID) && + dev->udev->descriptor.idProduct == cpu_to_le16(USB_GSUSB_1_PRODUCT_ID) && + dev->udev->manufacturer && dev->udev->product && + !strcmp(dev->udev->manufacturer, "LinkLayer Labs") && + !strcmp(dev->udev->product, "CANtact Pro") && + (le32_to_cpu(dconf->sw_version) <= 2)) + dev->feature |= GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX | + GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO; if (le32_to_cpu(dconf->sw_version) > 1) if (feature & GS_CAN_FEATURE_IDENTIFY) @@ -893,6 +1057,45 @@ static struct gs_can *gs_make_candev(unsigned int channel, kfree(bt_const); + /* fetch extended bit timing constants if device has feature + * GS_CAN_FEATURE_FD and GS_CAN_FEATURE_BT_CONST_EXT + */ + if (feature & GS_CAN_FEATURE_FD && + feature & GS_CAN_FEATURE_BT_CONST_EXT) { + bt_const_extended = kmalloc(sizeof(*bt_const_extended), GFP_KERNEL); + if (!bt_const_extended) + return ERR_PTR(-ENOMEM); + + rc = usb_control_msg(interface_to_usbdev(intf), + usb_rcvctrlpipe(interface_to_usbdev(intf), 0), + GS_USB_BREQ_BT_CONST_EXT, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, + channel, 0, bt_const_extended, + sizeof(*bt_const_extended), + 1000); + if (rc < 0) { + dev_err(&intf->dev, + "Couldn't get extended bit timing const for channel (err=%d)\n", + rc); + kfree(bt_const_extended); + return ERR_PTR(rc); + } + + strcpy(dev->data_bt_const.name, "gs_usb"); + dev->data_bt_const.tseg1_min = le32_to_cpu(bt_const_extended->dtseg1_min); + dev->data_bt_const.tseg1_max = le32_to_cpu(bt_const_extended->dtseg1_max); + dev->data_bt_const.tseg2_min = le32_to_cpu(bt_const_extended->dtseg2_min); + dev->data_bt_const.tseg2_max = le32_to_cpu(bt_const_extended->dtseg2_max); + dev->data_bt_const.sjw_max = le32_to_cpu(bt_const_extended->dsjw_max); + dev->data_bt_const.brp_min = le32_to_cpu(bt_const_extended->dbrp_min); + dev->data_bt_const.brp_max = le32_to_cpu(bt_const_extended->dbrp_max); + dev->data_bt_const.brp_inc = le32_to_cpu(bt_const_extended->dbrp_inc); + + dev->can.data_bittiming_const = &dev->data_bt_const; + } + + SET_NETDEV_DEV(netdev, &intf->dev); + rc = register_candev(dev->netdev); if (rc) { free_candev(dev->netdev); @@ -913,6 +1116,8 @@ static void gs_destroy_candev(struct gs_can *dev) static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { + struct usb_device *udev = interface_to_usbdev(intf); + struct gs_host_frame *hf; struct gs_usb *dev; int rc = -ENOMEM; unsigned int icount, i; @@ -926,21 +1131,16 @@ static int gs_usb_probe(struct usb_interface *intf, hconf->byte_order = cpu_to_le32(0x0000beef); /* send host config */ - rc = usb_control_msg(interface_to_usbdev(intf), - usb_sndctrlpipe(interface_to_usbdev(intf), 0), + rc = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), GS_USB_BREQ_HOST_FORMAT, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, - 1, - intf->cur_altsetting->desc.bInterfaceNumber, - hconf, - sizeof(*hconf), - 1000); + 1, intf->cur_altsetting->desc.bInterfaceNumber, + hconf, sizeof(*hconf), 1000); kfree(hconf); if (rc < 0) { - dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", - rc); + dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", rc); return rc; } @@ -949,15 +1149,11 @@ static int gs_usb_probe(struct usb_interface *intf, return -ENOMEM; /* read device config */ - rc = usb_control_msg(interface_to_usbdev(intf), - usb_rcvctrlpipe(interface_to_usbdev(intf), 0), + rc = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), GS_USB_BREQ_DEVICE_CONFIG, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, - 1, - intf->cur_altsetting->desc.bInterfaceNumber, - dconf, - sizeof(*dconf), - 1000); + 1, intf->cur_altsetting->desc.bInterfaceNumber, + dconf, sizeof(*dconf), 1000); if (rc < 0) { dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n", rc); @@ -983,11 +1179,13 @@ static int gs_usb_probe(struct usb_interface *intf, } init_usb_anchor(&dev->rx_submitted); - - atomic_set(&dev->active_channels, 0); + /* default to classic CAN, switch to CAN-FD if at least one of + * our channels support CAN-FD. + */ + dev->hf_size_rx = struct_size(hf, classic_can, 1); usb_set_intfdata(intf, dev); - dev->udev = interface_to_usbdev(intf); + dev->udev = udev; for (i = 0; i < icount; i++) { dev->canch[i] = gs_make_candev(i, intf, dconf); @@ -1006,6 +1204,9 @@ static int gs_usb_probe(struct usb_interface *intf, return rc; } dev->canch[i]->parent = dev; + + if (dev->canch[i]->can.ctrlmode_supported & CAN_CTRLMODE_FD) + dev->hf_size_rx = struct_size(hf, canfd, 1); } kfree(dconf); @@ -1015,8 +1216,9 @@ static int gs_usb_probe(struct usb_interface *intf, static void gs_usb_disconnect(struct usb_interface *intf) { - unsigned i; struct gs_usb *dev = usb_get_intfdata(intf); + unsigned int i; + usb_set_intfdata(intf, NULL); if (!dev) { @@ -1037,16 +1239,20 @@ static const struct usb_device_id gs_usb_table[] = { USB_GSUSB_1_PRODUCT_ID, 0) }, { USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID, USB_CANDLELIGHT_PRODUCT_ID, 0) }, + { USB_DEVICE_INTERFACE_NUMBER(USB_CES_CANEXT_FD_VENDOR_ID, + USB_CES_CANEXT_FD_PRODUCT_ID, 0) }, + { USB_DEVICE_INTERFACE_NUMBER(USB_ABE_CANDEBUGGER_FD_VENDOR_ID, + USB_ABE_CANDEBUGGER_FD_PRODUCT_ID, 0) }, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, gs_usb_table); static struct usb_driver gs_usb_driver = { - .name = "gs_usb", - .probe = gs_usb_probe, + .name = "gs_usb", + .probe = gs_usb_probe, .disconnect = gs_usb_disconnect, - .id_table = gs_usb_table, + .id_table = gs_usb_table, }; module_usb_driver(gs_usb_driver); diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index c4b4d3d0a387..e67658b53d02 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -205,12 +205,10 @@ MODULE_DEVICE_TABLE(usb, kvaser_usb_table); int kvaser_usb_send_cmd(const struct kvaser_usb *dev, void *cmd, int len) { - int actual_len; /* Not used */ - return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, dev->bulk_out->bEndpointAddress), - cmd, len, &actual_len, KVASER_USB_TIMEOUT); + cmd, len, NULL, KVASER_USB_TIMEOUT); } int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len, diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c index c42f18845b02..a15619d883ec 100644 --- a/drivers/net/can/vcan.c +++ b/drivers/net/can/vcan.c @@ -80,7 +80,7 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev) skb->dev = dev; skb->ip_summed = CHECKSUM_UNNECESSARY; - netif_rx_ni(skb); + netif_rx(skb); } static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev) diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index 47ccc15a3486..94a0c9c6a509 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -33,28 +33,33 @@ struct vxcan_priv { struct net_device __rcu *peer; }; -static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t vxcan_xmit(struct sk_buff *oskb, struct net_device *dev) { struct vxcan_priv *priv = netdev_priv(dev); struct net_device *peer; - struct canfd_frame *cfd = (struct canfd_frame *)skb->data; + struct canfd_frame *cfd = (struct canfd_frame *)oskb->data; struct net_device_stats *peerstats, *srcstats = &dev->stats; + struct sk_buff *skb; u8 len; - if (can_dropped_invalid_skb(dev, skb)) + if (can_dropped_invalid_skb(dev, oskb)) return NETDEV_TX_OK; rcu_read_lock(); peer = rcu_dereference(priv->peer); if (unlikely(!peer)) { - kfree_skb(skb); + kfree_skb(oskb); dev->stats.tx_dropped++; goto out_unlock; } - skb = can_create_echo_skb(skb); - if (!skb) + skb = skb_clone(oskb, GFP_ATOMIC); + if (skb) { + consume_skb(oskb); + } else { + kfree(oskb); goto out_unlock; + } /* reset CAN GW hop counter */ skb->csum_start = 0; @@ -63,7 +68,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev) skb->ip_summed = CHECKSUM_UNNECESSARY; len = cfd->can_id & CAN_RTR_FLAG ? 0 : cfd->len; - if (netif_rx_ni(skb) == NET_RX_SUCCESS) { + if (netif_rx(skb) == NET_RX_SUCCESS) { srcstats->tx_packets++; srcstats->tx_bytes += len; peerstats = &peer->stats; @@ -148,7 +153,7 @@ static void vxcan_setup(struct net_device *dev) dev->hard_header_len = 0; dev->addr_len = 0; dev->tx_queue_len = 0; - dev->flags = (IFF_NOARP|IFF_ECHO); + dev->flags = IFF_NOARP; dev->netdev_ops = &vxcan_netdev_ops; dev->needs_free_netdev = true; diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 1674b561c9a2..e562c5ab1149 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -1215,10 +1215,11 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota) } if (work_done < quota) { - napi_complete_done(napi, work_done); - ier = priv->read_reg(priv, XCAN_IER_OFFSET); - ier |= xcan_rx_int_mask(priv); - priv->write_reg(priv, XCAN_IER_OFFSET, ier); + if (napi_complete_done(napi, work_done)) { + ier = priv->read_reg(priv, XCAN_IER_OFFSET); + ier |= xcan_rx_int_mask(priv); + priv->write_reg(priv, XCAN_IER_OFFSET, ier); + } } return work_done; } diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 8d51c1019dcd..37a3dabdce31 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -72,6 +72,7 @@ source "drivers/net/dsa/realtek/Kconfig" config NET_DSA_SMSC_LAN9303 tristate + depends on VLAN_8021Q || VLAN_8021Q=n select NET_DSA_TAG_LAN9303 select REGMAP help diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index a3b98992f180..122e63762979 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1309,46 +1309,50 @@ void b53_port_event(struct dsa_switch *ds, int port) } EXPORT_SYMBOL(b53_port_event); -void b53_phylink_validate(struct dsa_switch *ds, int port, - unsigned long *supported, - struct phylink_link_state *state) +static void b53_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) { struct b53_device *dev = ds->priv; - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - if (dev->ops->serdes_phylink_validate) - dev->ops->serdes_phylink_validate(dev, port, mask, state); + /* Internal ports need GMII for PHYLIB */ + __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces); + + /* These switches appear to support MII and RevMII too, but beyond + * this, the code gives very few clues. FIXME: We probably need more + * interface modes here. + * + * According to b53_srab_mux_init(), ports 3..5 can support: + * SGMII, MII, GMII, RGMII or INTERNAL depending on the MUX setting. + * However, the interface mode read from the MUX configuration is + * not passed back to DSA, so phylink uses NA. + * DT can specify RGMII for ports 0, 1. + * For MDIO, port 8 can be RGMII_TXID. + */ + __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces); - /* Allow all the expected bits */ - phylink_set(mask, Autoneg); - phylink_set_port_modes(mask); - phylink_set(mask, Pause); - phylink_set(mask, Asym_Pause); + config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100; - /* With the exclusion of 5325/5365, MII, Reverse MII and 802.3z, we - * support Gigabit, including Half duplex. + /* 5325/5365 are not capable of gigabit speeds, everything else is. + * Note: the original code also exclulded Gigagbit for MII, RevMII + * and 802.3z modes. MII and RevMII are not able to work above 100M, + * so will be excluded by the generic validator implementation. + * However, the exclusion of Gigabit for 802.3z just seems wrong. */ - if (state->interface != PHY_INTERFACE_MODE_MII && - state->interface != PHY_INTERFACE_MODE_REVMII && - !phy_interface_mode_is_8023z(state->interface) && - !(is5325(dev) || is5365(dev))) { - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseT_Half); - } + if (!(is5325(dev) || is5365(dev))) + config->mac_capabilities |= MAC_1000; - if (!phy_interface_mode_is_8023z(state->interface)) { - phylink_set(mask, 10baseT_Half); - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 100baseT_Full); - } + /* Get the implementation specific capabilities */ + if (dev->ops->phylink_get_caps) + dev->ops->phylink_get_caps(dev, port, config); - linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); - - phylink_helper_basex_speed(state); + /* This driver does not make use of the speed, duplex, pause or the + * advertisement in its mac_config, so it is safe to mark this driver + * as non-legacy. + */ + config->legacy_pre_march2020 = false; } -EXPORT_SYMBOL(b53_phylink_validate); int b53_phylink_mac_link_state(struct dsa_switch *ds, int port, struct phylink_link_state *state) @@ -1704,7 +1708,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, } int b53_fdb_add(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct dsa_db db) { struct b53_device *priv = ds->priv; int ret; @@ -1724,7 +1729,8 @@ int b53_fdb_add(struct dsa_switch *ds, int port, EXPORT_SYMBOL(b53_fdb_add); int b53_fdb_del(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct dsa_db db) { struct b53_device *priv = ds->priv; int ret; @@ -1825,7 +1831,8 @@ int b53_fdb_dump(struct dsa_switch *ds, int port, EXPORT_SYMBOL(b53_fdb_dump); int b53_mdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) { struct b53_device *priv = ds->priv; int ret; @@ -1845,7 +1852,8 @@ int b53_mdb_add(struct dsa_switch *ds, int port, EXPORT_SYMBOL(b53_mdb_add); int b53_mdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) { struct b53_device *priv = ds->priv; int ret; @@ -1861,7 +1869,7 @@ int b53_mdb_del(struct dsa_switch *ds, int port, EXPORT_SYMBOL(b53_mdb_del); int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, - bool *tx_fwd_offload) + bool *tx_fwd_offload, struct netlink_ext_ack *extack) { struct b53_device *dev = ds->priv; s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; @@ -2259,7 +2267,7 @@ static const struct dsa_switch_ops b53_switch_ops = { .phy_read = b53_phy_read16, .phy_write = b53_phy_write16, .adjust_link = b53_adjust_link, - .phylink_validate = b53_phylink_validate, + .phylink_get_caps = b53_phylink_get_caps, .phylink_mac_link_state = b53_phylink_mac_link_state, .phylink_mac_config = b53_phylink_mac_config, .phylink_mac_an_restart = b53_phylink_mac_an_restart, diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index b41dc8ac2ca8..86e7eb7924e7 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -46,6 +46,8 @@ struct b53_io_ops { int (*phy_write16)(struct b53_device *dev, int addr, int reg, u16 value); int (*irq_enable)(struct b53_device *dev, int port); void (*irq_disable)(struct b53_device *dev, int port); + void (*phylink_get_caps)(struct b53_device *dev, int port, + struct phylink_config *config); u8 (*serdes_map_lane)(struct b53_device *dev, int port); int (*serdes_link_state)(struct b53_device *dev, int port, struct phylink_link_state *state); @@ -56,9 +58,6 @@ struct b53_io_ops { void (*serdes_link_set)(struct b53_device *dev, int port, unsigned int mode, phy_interface_t interface, bool link_up); - void (*serdes_phylink_validate)(struct b53_device *dev, int port, - unsigned long *supported, - struct phylink_link_state *state); }; #define B53_INVALID_LANE 0xff @@ -325,7 +324,7 @@ void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data); int b53_get_sset_count(struct dsa_switch *ds, int port, int sset); void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data); int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, - bool *tx_fwd_offload); + bool *tx_fwd_offload, struct netlink_ext_ack *extack); void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge); void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state); void b53_br_fast_age(struct dsa_switch *ds, int port); @@ -337,9 +336,6 @@ int b53_br_flags(struct dsa_switch *ds, int port, struct netlink_ext_ack *extack); int b53_setup_devlink_resources(struct dsa_switch *ds); void b53_port_event(struct dsa_switch *ds, int port); -void b53_phylink_validate(struct dsa_switch *ds, int port, - unsigned long *supported, - struct phylink_link_state *state); int b53_phylink_mac_link_state(struct dsa_switch *ds, int port, struct phylink_link_state *state); void b53_phylink_mac_config(struct dsa_switch *ds, int port, @@ -363,15 +359,19 @@ int b53_vlan_add(struct dsa_switch *ds, int port, int b53_vlan_del(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan); int b53_fdb_add(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid); + const unsigned char *addr, u16 vid, + struct dsa_db db); int b53_fdb_del(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid); + const unsigned char *addr, u16 vid, + struct dsa_db db); int b53_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, void *data); int b53_mdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb); + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db); int b53_mdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb); + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db); int b53_mirror_add(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror, bool ingress); enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c index 5ae3d9783b68..555e5b372321 100644 --- a/drivers/net/dsa/b53/b53_serdes.c +++ b/drivers/net/dsa/b53/b53_serdes.c @@ -158,9 +158,8 @@ void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode, } EXPORT_SYMBOL(b53_serdes_link_set); -void b53_serdes_phylink_validate(struct b53_device *dev, int port, - unsigned long *supported, - struct phylink_link_state *state) +void b53_serdes_phylink_get_caps(struct b53_device *dev, int port, + struct phylink_config *config) { u8 lane = b53_serdes_map_lane(dev, port); @@ -169,16 +168,24 @@ void b53_serdes_phylink_validate(struct b53_device *dev, int port, switch (lane) { case 0: - phylink_set(supported, 2500baseX_Full); + /* It appears lane 0 supports 2500base-X and 1000base-X */ + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + config->supported_interfaces); + config->mac_capabilities |= MAC_2500FD; fallthrough; case 1: - phylink_set(supported, 1000baseX_Full); + /* It appears lane 1 only supports 1000base-X and SGMII */ + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_SGMII, + config->supported_interfaces); + config->mac_capabilities |= MAC_1000FD; break; default: break; } } -EXPORT_SYMBOL(b53_serdes_phylink_validate); +EXPORT_SYMBOL(b53_serdes_phylink_get_caps); int b53_serdes_init(struct b53_device *dev, int port) { diff --git a/drivers/net/dsa/b53/b53_serdes.h b/drivers/net/dsa/b53/b53_serdes.h index 55d280fe38e4..f47d5caa7557 100644 --- a/drivers/net/dsa/b53/b53_serdes.h +++ b/drivers/net/dsa/b53/b53_serdes.h @@ -115,9 +115,8 @@ void b53_serdes_config(struct b53_device *dev, int port, unsigned int mode, void b53_serdes_an_restart(struct b53_device *dev, int port); void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode, phy_interface_t interface, bool link_up); -void b53_serdes_phylink_validate(struct b53_device *dev, int port, - unsigned long *supported, - struct phylink_link_state *state); +void b53_serdes_phylink_get_caps(struct b53_device *dev, int port, + struct phylink_config *config); #if IS_ENABLED(CONFIG_B53_SERDES) int b53_serdes_init(struct b53_device *dev, int port); #else diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c index 2b88f03e5252..0e54b2a0c211 100644 --- a/drivers/net/dsa/b53/b53_spi.c +++ b/drivers/net/dsa/b53/b53_spi.c @@ -314,7 +314,7 @@ static int b53_spi_probe(struct spi_device *spi) return 0; } -static int b53_spi_remove(struct spi_device *spi) +static void b53_spi_remove(struct spi_device *spi) { struct b53_device *dev = spi_get_drvdata(spi); @@ -322,8 +322,6 @@ static int b53_spi_remove(struct spi_device *spi) b53_switch_remove(dev); spi_set_drvdata(spi, NULL); - - return 0; } static void b53_spi_shutdown(struct spi_device *spi) diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c index 4591bb1c05d2..c51b716657db 100644 --- a/drivers/net/dsa/b53/b53_srab.c +++ b/drivers/net/dsa/b53/b53_srab.c @@ -443,6 +443,39 @@ static void b53_srab_irq_disable(struct b53_device *dev, int port) } } +static void b53_srab_phylink_get_caps(struct b53_device *dev, int port, + struct phylink_config *config) +{ + struct b53_srab_priv *priv = dev->priv; + struct b53_srab_port_priv *p = &priv->port_intrs[port]; + + switch (p->mode) { + case PHY_INTERFACE_MODE_SGMII: +#if IS_ENABLED(CONFIG_B53_SERDES) + /* If p->mode indicates SGMII mode, that essentially means we + * are using a serdes. As the serdes for the capabilities. + */ + b53_serdes_phylink_get_caps(dev, port, config); +#endif + break; + + case PHY_INTERFACE_MODE_NA: + break; + + case PHY_INTERFACE_MODE_RGMII: + /* If we support RGMII, support all RGMII modes, since + * that dictates the PHY delay settings. + */ + phy_interface_set_rgmii(config->supported_interfaces); + break; + + default: + /* Some other mode (e.g. MII, GMII etc) */ + __set_bit(p->mode, config->supported_interfaces); + break; + } +} + static const struct b53_io_ops b53_srab_ops = { .read8 = b53_srab_read8, .read16 = b53_srab_read16, @@ -456,13 +489,13 @@ static const struct b53_io_ops b53_srab_ops = { .write64 = b53_srab_write64, .irq_enable = b53_srab_irq_enable, .irq_disable = b53_srab_irq_disable, + .phylink_get_caps = b53_srab_phylink_get_caps, #if IS_ENABLED(CONFIG_B53_SERDES) .serdes_map_lane = b53_srab_serdes_map_lane, .serdes_link_state = b53_serdes_link_state, .serdes_config = b53_serdes_config, .serdes_an_restart = b53_serdes_an_restart, .serdes_link_set = b53_serdes_link_set, - .serdes_phylink_validate = b53_serdes_phylink_validate, #endif }; diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index 33daaf10c488..263e41191c29 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -168,7 +168,8 @@ static int dsa_loop_phy_write(struct dsa_switch *ds, int port, static int dsa_loop_port_bridge_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, - bool *tx_fwd_offload) + bool *tx_fwd_offload, + struct netlink_ext_ack *extack) { dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n", __func__, port, bridge.dev->name); diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c index 726f267cb228..ac1f3b3a7040 100644 --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c @@ -675,7 +675,8 @@ static int hellcreek_bridge_flags(struct dsa_switch *ds, int port, static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, - bool *tx_fwd_offload) + bool *tx_fwd_offload, + struct netlink_ext_ack *extack) { struct hellcreek *hellcreek = ds->priv; @@ -827,7 +828,8 @@ static int hellcreek_fdb_get(struct hellcreek *hellcreek, } static int hellcreek_fdb_add(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct dsa_db db) { struct hellcreek_fdb_entry entry = { 0 }; struct hellcreek *hellcreek = ds->priv; @@ -872,7 +874,8 @@ out: } static int hellcreek_fdb_del(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct dsa_db db) { struct hellcreek_fdb_entry entry = { 0 }; struct hellcreek *hellcreek = ds->priv; diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c index b3bc948d6145..ffd06cf8c44f 100644 --- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c +++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c @@ -331,7 +331,7 @@ static void hellcreek_get_rxts(struct hellcreek *hellcreek, shwt = skb_hwtstamps(skb); memset(shwt, 0, sizeof(*shwt)); shwt->hwtstamp = ns_to_ktime(ns); - netif_rx_ni(skb); + netif_rx(skb); } } diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index d55784d19fa4..e03ff1f267bb 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -10,6 +10,7 @@ #include <linux/mii.h> #include <linux/phy.h> #include <linux/if_bridge.h> +#include <linux/if_vlan.h> #include <linux/etherdevice.h> #include "lan9303.h" @@ -1083,28 +1084,35 @@ static void lan9303_adjust_link(struct dsa_switch *ds, int port, static int lan9303_port_enable(struct dsa_switch *ds, int port, struct phy_device *phy) { + struct dsa_port *dp = dsa_to_port(ds, port); struct lan9303 *chip = ds->priv; - if (!dsa_is_user_port(ds, port)) + if (!dsa_port_is_user(dp)) return 0; + vlan_vid_add(dp->cpu_dp->master, htons(ETH_P_8021Q), port); + return lan9303_enable_processing_port(chip, port); } static void lan9303_port_disable(struct dsa_switch *ds, int port) { + struct dsa_port *dp = dsa_to_port(ds, port); struct lan9303 *chip = ds->priv; - if (!dsa_is_user_port(ds, port)) + if (!dsa_port_is_user(dp)) return; + vlan_vid_del(dp->cpu_dp->master, htons(ETH_P_8021Q), port); + lan9303_disable_processing_port(chip, port); lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN); } static int lan9303_port_bridge_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, - bool *tx_fwd_offload) + bool *tx_fwd_offload, + struct netlink_ext_ack *extack) { struct lan9303 *chip = ds->priv; @@ -1181,7 +1189,8 @@ static void lan9303_port_fast_age(struct dsa_switch *ds, int port) } static int lan9303_port_fdb_add(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct dsa_db db) { struct lan9303 *chip = ds->priv; @@ -1193,8 +1202,8 @@ static int lan9303_port_fdb_add(struct dsa_switch *ds, int port, } static int lan9303_port_fdb_del(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) - + const unsigned char *addr, u16 vid, + struct dsa_db db) { struct lan9303 *chip = ds->priv; @@ -1238,7 +1247,8 @@ static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port, } static int lan9303_port_mdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) { struct lan9303 *chip = ds->priv; int err; @@ -1253,7 +1263,8 @@ static int lan9303_port_mdb_add(struct dsa_switch *ds, int port, } static int lan9303_port_mdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) { struct lan9303 *chip = ds->priv; @@ -1310,7 +1321,7 @@ static int lan9303_probe_reset_gpio(struct lan9303 *chip, struct device_node *np) { chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset", - GPIOD_OUT_LOW); + GPIOD_OUT_HIGH); if (IS_ERR(chip->reset_gpio)) return PTR_ERR(chip->reset_gpio); diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 320ee7fe91a8..a416240d001b 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -213,6 +213,7 @@ #define GSWIP_MAC_CTRL_0_GMII_MII 0x0001 #define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002 #define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC)) +#define GSWIP_MAC_CTRL_2_LCHKL BIT(2) /* Frame Length Check Long Enable */ #define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */ /* Ethernet Switch Fetch DMA Port Control Register */ @@ -239,6 +240,15 @@ #define XRX200_GPHY_FW_ALIGN (16 * 1024) +/* Maximum packet size supported by the switch. In theory this should be 10240, + * but long packets currently cause lock-ups with an MTU of over 2526. Medium + * packets are sometimes dropped (e.g. TCP over 2477, UDP over 2516-2519, ICMP + * over 2526), hence an MTU value of 2400 seems safe. This issue only affects + * packet reception. This is probably caused by the PPA engine, which is on the + * RX part of the device. Packet transmission works properly up to 10240. + */ +#define GSWIP_MAX_PACKET_LENGTH 2400 + struct gswip_hw_info { int max_ports; int cpu_port; @@ -863,10 +873,6 @@ static int gswip_setup(struct dsa_switch *ds) gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS, GSWIP_PCE_PCTRL_0p(cpu_port)); - gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN, - GSWIP_MAC_CTRL_2p(cpu_port)); - gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8 + ETH_FCS_LEN, - GSWIP_MAC_FLEN); gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD, GSWIP_BM_QUEUE_GCTRL); @@ -883,6 +889,8 @@ static int gswip_setup(struct dsa_switch *ds) return err; } + ds->mtu_enforcement_ingress = true; + gswip_port_enable(ds, cpu_port, NULL); ds->configure_vlan_while_not_filtering = false; @@ -1152,7 +1160,8 @@ static int gswip_vlan_remove(struct gswip_priv *priv, static int gswip_port_bridge_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, - bool *tx_fwd_offload) + bool *tx_fwd_offload, + struct netlink_ext_ack *extack) { struct net_device *br = bridge.dev; struct gswip_priv *priv = ds->priv; @@ -1389,13 +1398,15 @@ static int gswip_port_fdb(struct dsa_switch *ds, int port, } static int gswip_port_fdb_add(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct dsa_db db) { return gswip_port_fdb(ds, port, addr, vid, true); } static int gswip_port_fdb_del(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct dsa_db db) { return gswip_port_fdb(ds, port, addr, vid, false); } @@ -1446,6 +1457,39 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port, return 0; } +static int gswip_port_max_mtu(struct dsa_switch *ds, int port) +{ + /* Includes 8 bytes for special header. */ + return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN; +} + +static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) +{ + struct gswip_priv *priv = ds->priv; + int cpu_port = priv->hw_info->cpu_port; + + /* CPU port always has maximum mtu of user ports, so use it to set + * switch frame size, including 8 byte special header. + */ + if (port == cpu_port) { + new_mtu += 8; + gswip_switch_w(priv, VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN, + GSWIP_MAC_FLEN); + } + + /* Enable MLEN for ports with non-standard MTUs, including the special + * header on the CPU port added above. + */ + if (new_mtu != ETH_DATA_LEN) + gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN, + GSWIP_MAC_CTRL_2p(port)); + else + gswip_switch_mask(priv, GSWIP_MAC_CTRL_2_MLEN, 0, + GSWIP_MAC_CTRL_2p(port)); + + return 0; +} + static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port, struct phylink_config *config) { @@ -1791,6 +1835,8 @@ static const struct dsa_switch_ops gswip_xrx200_switch_ops = { .port_fdb_add = gswip_port_fdb_add, .port_fdb_del = gswip_port_fdb_del, .port_fdb_dump = gswip_port_fdb_dump, + .port_change_mtu = gswip_port_change_mtu, + .port_max_mtu = gswip_port_max_mtu, .phylink_get_caps = gswip_xrx200_phylink_get_caps, .phylink_mac_config = gswip_phylink_mac_config, .phylink_mac_link_down = gswip_phylink_mac_link_down, @@ -1815,6 +1861,8 @@ static const struct dsa_switch_ops gswip_xrx300_switch_ops = { .port_fdb_add = gswip_port_fdb_add, .port_fdb_del = gswip_port_fdb_del, .port_fdb_dump = gswip_port_fdb_dump, + .port_change_mtu = gswip_port_change_mtu, + .port_max_mtu = gswip_port_max_mtu, .phylink_get_caps = gswip_xrx300_phylink_get_caps, .phylink_mac_config = gswip_phylink_mac_config, .phylink_mac_link_down = gswip_phylink_mac_link_down, @@ -2176,8 +2224,8 @@ static int gswip_remove(struct platform_device *pdev) if (priv->ds->slave_mii_bus) { mdiobus_unregister(priv->ds->slave_mii_bus); - mdiobus_free(priv->ds->slave_mii_bus); of_node_put(priv->ds->slave_mii_bus->dev.of_node); + mdiobus_free(priv->ds->slave_mii_bus); } for (i = 0; i < priv->num_gphy_fw; i++) diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c index 866767b70d65..673589dc88ab 100644 --- a/drivers/net/dsa/microchip/ksz8795_spi.c +++ b/drivers/net/dsa/microchip/ksz8795_spi.c @@ -87,7 +87,7 @@ static int ksz8795_spi_probe(struct spi_device *spi) return 0; } -static int ksz8795_spi_remove(struct spi_device *spi) +static void ksz8795_spi_remove(struct spi_device *spi) { struct ksz_device *dev = spi_get_drvdata(spi); @@ -95,8 +95,6 @@ static int ksz8795_spi_remove(struct spi_device *spi) ksz_switch_remove(dev); spi_set_drvdata(spi, NULL); - - return 0; } static void ksz8795_spi_shutdown(struct spi_device *spi) diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index a85d990896b0..a4699481c746 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -11,6 +11,7 @@ #include <linux/platform_data/microchip-ksz.h> #include <linux/phy.h> #include <linux/if_bridge.h> +#include <linux/if_vlan.h> #include <net/dsa.h> #include <net/switchdev.h> @@ -64,6 +65,100 @@ static const struct { { 0x83, "tx_discards" }, }; +struct ksz9477_stats_raw { + u64 rx_hi; + u64 rx_undersize; + u64 rx_fragments; + u64 rx_oversize; + u64 rx_jabbers; + u64 rx_symbol_err; + u64 rx_crc_err; + u64 rx_align_err; + u64 rx_mac_ctrl; + u64 rx_pause; + u64 rx_bcast; + u64 rx_mcast; + u64 rx_ucast; + u64 rx_64_or_less; + u64 rx_65_127; + u64 rx_128_255; + u64 rx_256_511; + u64 rx_512_1023; + u64 rx_1024_1522; + u64 rx_1523_2000; + u64 rx_2001; + u64 tx_hi; + u64 tx_late_col; + u64 tx_pause; + u64 tx_bcast; + u64 tx_mcast; + u64 tx_ucast; + u64 tx_deferred; + u64 tx_total_col; + u64 tx_exc_col; + u64 tx_single_col; + u64 tx_mult_col; + u64 rx_total; + u64 tx_total; + u64 rx_discards; + u64 tx_discards; +}; + +static void ksz9477_r_mib_stats64(struct ksz_device *dev, int port) +{ + struct rtnl_link_stats64 *stats; + struct ksz9477_stats_raw *raw; + struct ksz_port_mib *mib; + + mib = &dev->ports[port].mib; + stats = &mib->stats64; + raw = (struct ksz9477_stats_raw *)mib->counters; + + spin_lock(&mib->stats64_lock); + + stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast; + stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast; + + /* HW counters are counting bytes + FCS which is not acceptable + * for rtnl_link_stats64 interface + */ + stats->rx_bytes = raw->rx_total - stats->rx_packets * ETH_FCS_LEN; + stats->tx_bytes = raw->tx_total - stats->tx_packets * ETH_FCS_LEN; + + stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments + + raw->rx_oversize; + + stats->rx_crc_errors = raw->rx_crc_err; + stats->rx_frame_errors = raw->rx_align_err; + stats->rx_dropped = raw->rx_discards; + stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + + stats->rx_frame_errors + stats->rx_dropped; + + stats->tx_window_errors = raw->tx_late_col; + stats->tx_fifo_errors = raw->tx_discards; + stats->tx_aborted_errors = raw->tx_exc_col; + stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors + + stats->tx_aborted_errors; + + stats->multicast = raw->rx_mcast; + stats->collisions = raw->tx_total_col; + + spin_unlock(&mib->stats64_lock); +} + +static void ksz9477_get_stats64(struct dsa_switch *ds, int port, + struct rtnl_link_stats64 *s) +{ + struct ksz_device *dev = ds->priv; + struct ksz_port_mib *mib; + + mib = &dev->ports[port].mib; + + spin_lock(&mib->stats64_lock); + memcpy(s, &mib->stats64, sizeof(*s)); + spin_unlock(&mib->stats64_lock); +} + static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set) { regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0); @@ -88,6 +183,29 @@ static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset, bits, set ? bits : 0); } +static int ksz9477_change_mtu(struct dsa_switch *ds, int port, int mtu) +{ + struct ksz_device *dev = ds->priv; + u16 frame_size, max_frame = 0; + int i; + + frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; + + /* Cache the per-port MTU setting */ + dev->ports[port].max_frame = frame_size; + + for (i = 0; i < dev->port_cnt; i++) + max_frame = max(max_frame, dev->ports[i].max_frame); + + return regmap_update_bits(dev->regmap[1], REG_SW_MTU__2, + REG_SW_MTU_MASK, max_frame); +} + +static int ksz9477_max_mtu(struct dsa_switch *ds, int port) +{ + return KSZ9477_MAX_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN; +} + static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev) { unsigned int val; @@ -546,7 +664,8 @@ static int ksz9477_port_vlan_del(struct dsa_switch *ds, int port, } static int ksz9477_port_fdb_add(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct dsa_db db) { struct ksz_device *dev = ds->priv; u32 alu_table[4]; @@ -603,7 +722,8 @@ exit: } static int ksz9477_port_fdb_del(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct dsa_db db) { struct ksz_device *dev = ds->priv; u32 alu_table[4]; @@ -745,7 +865,8 @@ exit: } static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) { struct ksz_device *dev = ds->priv; u32 static_table[4]; @@ -820,7 +941,8 @@ exit: } static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) { struct ksz_device *dev = ds->priv; u32 static_table[4]; @@ -1318,8 +1440,14 @@ static int ksz9477_setup(struct dsa_switch *ds) /* Do not work correctly with tail tagging. */ ksz_cfg(dev, REG_SW_MAC_CTRL_0, SW_CHECK_LENGTH, false); - /* accept packet up to 2000bytes */ - ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_LEGAL_PACKET_DISABLE, true); + /* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */ + ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true); + + /* Now we can configure default MTU value */ + ret = regmap_update_bits(dev->regmap[1], REG_SW_MTU__2, REG_SW_MTU_MASK, + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); + if (ret) + return ret; ksz9477_config_cpu_port(ds); @@ -1365,6 +1493,9 @@ static const struct dsa_switch_ops ksz9477_switch_ops = { .port_mdb_del = ksz9477_port_mdb_del, .port_mirror_add = ksz9477_port_mirror_add, .port_mirror_del = ksz9477_port_mirror_del, + .get_stats64 = ksz9477_get_stats64, + .port_change_mtu = ksz9477_change_mtu, + .port_max_mtu = ksz9477_max_mtu, }; static u32 ksz9477_get_port_addr(int port, int offset) @@ -1524,6 +1655,7 @@ static int ksz9477_switch_init(struct ksz_device *dev) if (!dev->ports) return -ENOMEM; for (i = 0; i < dev->port_cnt; i++) { + spin_lock_init(&dev->ports[i].mib.stats64_lock); mutex_init(&dev->ports[i].mib.cnt_mutex); dev->ports[i].mib.counters = devm_kzalloc(dev->dev, @@ -1552,6 +1684,7 @@ static const struct ksz_dev_ops ksz9477_dev_ops = { .port_setup = ksz9477_port_setup, .r_mib_cnt = ksz9477_r_mib_cnt, .r_mib_pkt = ksz9477_r_mib_pkt, + .r_mib_stat64 = ksz9477_r_mib_stats64, .freeze_mib = ksz9477_freeze_mib, .port_init_cnt = ksz9477_port_init_cnt, .shutdown = ksz9477_reset_switch, diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c index f3afb8b8c4cc..cbc0b20e7e1b 100644 --- a/drivers/net/dsa/microchip/ksz9477_i2c.c +++ b/drivers/net/dsa/microchip/ksz9477_i2c.c @@ -92,6 +92,7 @@ static const struct of_device_id ksz9477_dt_ids[] = { { .compatible = "microchip,ksz9893" }, { .compatible = "microchip,ksz9563" }, { .compatible = "microchip,ksz9567" }, + { .compatible = "microchip,ksz8563" }, {}, }; MODULE_DEVICE_TABLE(of, ksz9477_dt_ids); diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h index 16939f29faa5..0bd58467181f 100644 --- a/drivers/net/dsa/microchip/ksz9477_reg.h +++ b/drivers/net/dsa/microchip/ksz9477_reg.h @@ -176,6 +176,7 @@ #define REG_SW_MAC_ADDR_5 0x0307 #define REG_SW_MTU__2 0x0308 +#define REG_SW_MTU_MASK GENMASK(13, 0) #define REG_SW_ISP_TPID__2 0x030A @@ -1662,4 +1663,6 @@ /* 148,800 frames * 67 ms / 100 */ #define BROADCAST_STORM_VALUE 9969 +#define KSZ9477_MAX_FRAME_SIZE 9000 + #endif /* KSZ9477_REGS_H */ diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c index e3cb0e6c9f6f..940bb9665f15 100644 --- a/drivers/net/dsa/microchip/ksz9477_spi.c +++ b/drivers/net/dsa/microchip/ksz9477_spi.c @@ -65,7 +65,7 @@ static int ksz9477_spi_probe(struct spi_device *spi) return 0; } -static int ksz9477_spi_remove(struct spi_device *spi) +static void ksz9477_spi_remove(struct spi_device *spi) { struct ksz_device *dev = spi_get_drvdata(spi); @@ -73,8 +73,6 @@ static int ksz9477_spi_remove(struct spi_device *spi) ksz_switch_remove(dev); spi_set_drvdata(spi, NULL); - - return 0; } static void ksz9477_spi_shutdown(struct spi_device *spi) diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 7e33ec73f803..8014b18d9391 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -26,7 +26,7 @@ void ksz_update_port_member(struct ksz_device *dev, int port) struct dsa_switch *ds = dev->ds; u8 port_member = 0, cpu_port; const struct dsa_port *dp; - int i; + int i, j; if (!dsa_is_user_port(ds, port)) return; @@ -45,13 +45,33 @@ void ksz_update_port_member(struct ksz_device *dev, int port) continue; if (!dsa_port_bridge_same(dp, other_dp)) continue; + if (other_p->stp_state != BR_STATE_FORWARDING) + continue; - if (other_p->stp_state == BR_STATE_FORWARDING && - p->stp_state == BR_STATE_FORWARDING) { + if (p->stp_state == BR_STATE_FORWARDING) { val |= BIT(port); port_member |= BIT(i); } + /* Retain port [i]'s relationship to other ports than [port] */ + for (j = 0; j < ds->num_ports; j++) { + const struct dsa_port *third_dp; + struct ksz_port *third_p; + + if (j == i) + continue; + if (j == port) + continue; + if (!dsa_is_user_port(ds, j)) + continue; + third_p = &dev->ports[j]; + if (third_p->stp_state != BR_STATE_FORWARDING) + continue; + third_dp = dsa_to_port(ds, j); + if (dsa_port_bridge_same(other_dp, third_dp)) + val |= BIT(j); + } + dev->dev_ops->cfg_port_member(dev, i, val | cpu_port); } @@ -110,6 +130,10 @@ static void ksz_mib_read_work(struct work_struct *work) } port_r_cnt(dev, i); p->read = false; + + if (dev->dev_ops->r_mib_stat64) + dev->dev_ops->r_mib_stat64(dev, i); + mutex_unlock(&mib->cnt_mutex); } @@ -193,7 +217,8 @@ EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats); int ksz_port_bridge_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, - bool *tx_fwd_offload) + bool *tx_fwd_offload, + struct netlink_ext_ack *extack) { /* port_stp_state_set() will be called after to put the port in * appropriate state so there is no need to do anything. @@ -252,7 +277,8 @@ int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, EXPORT_SYMBOL_GPL(ksz_port_fdb_dump); int ksz_port_mdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) { struct ksz_device *dev = ds->priv; struct alu_struct alu; @@ -297,7 +323,8 @@ int ksz_port_mdb_add(struct dsa_switch *ds, int port, EXPORT_SYMBOL_GPL(ksz_port_mdb_add); int ksz_port_mdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) { struct ksz_device *dev = ds->priv; struct alu_struct alu; @@ -449,7 +476,7 @@ int ksz_switch_register(struct ksz_device *dev, } /* Read MIB counters every 30 seconds to avoid overflow. */ - dev->mib_read_interval = msecs_to_jiffies(30000); + dev->mib_read_interval = msecs_to_jiffies(5000); /* Start the MIB timer. */ schedule_delayed_work(&dev->mib_read, 0); diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index 3db63f62f0a1..fa39ee73cbd2 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -22,6 +22,8 @@ struct ksz_port_mib { struct mutex cnt_mutex; /* structure access */ u8 cnt_ptr; u64 *counters; + struct rtnl_link_stats64 stats64; + struct spinlock stats64_lock; }; struct ksz_port { @@ -39,6 +41,7 @@ struct ksz_port { struct ksz_port_mib mib; phy_interface_t interface; + u16 max_frame; }; struct ksz_device { @@ -128,6 +131,7 @@ struct ksz_dev_ops { u64 *cnt); void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr, u64 *dropped, u64 *cnt); + void (*r_mib_stat64)(struct ksz_device *dev, int port); void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze); void (*port_init_cnt)(struct ksz_device *dev, int port); int (*shutdown)(struct ksz_device *dev); @@ -156,16 +160,19 @@ void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode, int ksz_sset_count(struct dsa_switch *ds, int port, int sset); void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf); int ksz_port_bridge_join(struct dsa_switch *ds, int port, - struct dsa_bridge bridge, bool *tx_fwd_offload); + struct dsa_bridge bridge, bool *tx_fwd_offload, + struct netlink_ext_ack *extack); void ksz_port_bridge_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge); void ksz_port_fast_age(struct dsa_switch *ds, int port); int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, void *data); int ksz_port_mdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb); + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db); int ksz_port_mdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb); + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db); int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy); /* Common register access functions */ diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index f74f25f479ed..669f008528ec 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -1186,7 +1186,8 @@ mt7530_port_bridge_flags(struct dsa_switch *ds, int port, static int mt7530_port_bridge_join(struct dsa_switch *ds, int port, - struct dsa_bridge bridge, bool *tx_fwd_offload) + struct dsa_bridge bridge, bool *tx_fwd_offload, + struct netlink_ext_ack *extack) { struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; u32 port_bitmap = BIT(MT7530_CPU_PORT); @@ -1349,7 +1350,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port, static int mt7530_port_fdb_add(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct dsa_db db) { struct mt7530_priv *priv = ds->priv; int ret; @@ -1365,7 +1367,8 @@ mt7530_port_fdb_add(struct dsa_switch *ds, int port, static int mt7530_port_fdb_del(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct dsa_db db) { struct mt7530_priv *priv = ds->priv; int ret; @@ -1416,7 +1419,8 @@ err: static int mt7530_port_mdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) { struct mt7530_priv *priv = ds->priv; const u8 *addr = mdb->addr; @@ -1442,7 +1446,8 @@ mt7530_port_mdb_add(struct dsa_switch *ds, int port, static int mt7530_port_mdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) { struct mt7530_priv *priv = ds->priv; const u8 *addr = mdb->addr; @@ -2936,7 +2941,7 @@ mt753x_phylink_validate(struct dsa_switch *ds, int port, phylink_set_port_modes(mask); - if (state->interface != PHY_INTERFACE_MODE_TRGMII || + if (state->interface != PHY_INTERFACE_MODE_TRGMII && !phy_interface_mode_is_8023z(state->interface)) { phylink_set(mask, 10baseT_Half); phylink_set(mask, 10baseT_Full); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 85527fe4fcc8..84b90fc36c58 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -580,6 +580,25 @@ static const u8 mv88e6185_phy_interface_modes[] = { [MV88E6185_PORT_STS_CMODE_PHY] = PHY_INTERFACE_MODE_SGMII, }; +static void mv88e6095_phylink_get_caps(struct mv88e6xxx_chip *chip, int port, + struct phylink_config *config) +{ + u8 cmode = chip->ports[port].cmode; + + config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100; + + if (mv88e6xxx_phy_is_internal(chip->ds, port)) { + __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces); + } else { + if (cmode < ARRAY_SIZE(mv88e6185_phy_interface_modes) && + mv88e6185_phy_interface_modes[cmode]) + __set_bit(mv88e6185_phy_interface_modes[cmode], + config->supported_interfaces); + + config->mac_capabilities |= MAC_1000FD; + } +} + static void mv88e6185_phylink_get_caps(struct mv88e6xxx_chip *chip, int port, struct phylink_config *config) { @@ -1606,15 +1625,16 @@ static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port) ds = dsa_switch_find(dst->index, dev); dp = ds ? dsa_to_port(ds, port) : NULL; - if (dp && dp->lag_dev) { + if (dp && dp->lag) { /* As the PVT is used to limit flooding of * FORWARD frames, which use the LAG ID as the * source port, we must translate dev/port to * the special "LAG device" in the PVT, using - * the LAG ID as the port number. + * the LAG ID (one-based) as the port number + * (zero-based). */ dev = MV88E6XXX_G2_PVT_ADDR_DEV_TRUNK; - port = dsa_lag_id(dst, dp->lag_dev); + port = dsa_port_lag_id_get(dp) - 1; } } @@ -1652,7 +1672,7 @@ static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port) struct mv88e6xxx_chip *chip = ds->priv; int err; - if (dsa_to_port(ds, port)->lag_dev) + if (dsa_to_port(ds, port)->lag) /* Hardware is incapable of fast-aging a LAG through a * regular ATU move operation. Until we have something * more fancy in place this is a no-op. @@ -2404,6 +2424,13 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, if (!mv88e6xxx_max_vid(chip)) return -EOPNOTSUPP; + /* The ATU removal procedure needs the FID to be mapped in the VTU, + * but FDB deletion runs concurrently with VLAN deletion. Flush the DSA + * switchdev workqueue to ensure that all FDB entries are deleted + * before we remove the VLAN. + */ + dsa_flush_workqueue(); + mv88e6xxx_reg_lock(chip); err = mv88e6xxx_port_get_pvid(chip, port, &pvid); @@ -2429,7 +2456,8 @@ unlock: } static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct dsa_db db) { struct mv88e6xxx_chip *chip = ds->priv; int err; @@ -2443,7 +2471,8 @@ static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, } static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct dsa_db db) { struct mv88e6xxx_chip *chip = ds->priv; int err; @@ -2589,7 +2618,8 @@ static int mv88e6xxx_map_virtual_bridge_to_pvt(struct dsa_switch *ds, static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, - bool *tx_fwd_offload) + bool *tx_fwd_offload, + struct netlink_ext_ack *extack) { struct mv88e6xxx_chip *chip = ds->priv; int err; @@ -2655,7 +2685,8 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds, int tree_index, int sw_index, - int port, struct dsa_bridge bridge) + int port, struct dsa_bridge bridge, + struct netlink_ext_ack *extack) { struct mv88e6xxx_chip *chip = ds->priv; int err; @@ -3803,7 +3834,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = { .reset = mv88e6185_g1_reset, .vtu_getnext = mv88e6185_g1_vtu_getnext, .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, - .phylink_get_caps = mv88e6185_phylink_get_caps, + .phylink_get_caps = mv88e6095_phylink_get_caps, .set_max_frame_size = mv88e6185_g1_set_max_frame_size, }; @@ -3850,7 +3881,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .rmu_disable = mv88e6085_g1_rmu_disable, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, - .phylink_get_caps = mv88e6185_phylink_get_caps, + .phylink_get_caps = mv88e6095_phylink_get_caps, .set_max_frame_size = mv88e6185_g1_set_max_frame_size, }; @@ -5975,7 +6006,8 @@ static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds, int port, } static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) { struct mv88e6xxx_chip *chip = ds->priv; int err; @@ -5989,7 +6021,8 @@ static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port, } static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) { struct mv88e6xxx_chip *chip = ds->priv; int err; @@ -6077,7 +6110,7 @@ static int mv88e6xxx_port_pre_bridge_flags(struct dsa_switch *ds, int port, const struct mv88e6xxx_ops *ops; if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | - BR_BCAST_FLOOD)) + BR_BCAST_FLOOD | BR_PORT_LOCKED)) return -EINVAL; ops = chip->info->ops; @@ -6135,6 +6168,13 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port, goto out; } + if (flags.mask & BR_PORT_LOCKED) { + bool locked = !!(flags.val & BR_PORT_LOCKED); + + err = mv88e6xxx_port_set_lock(chip, port, locked); + if (err) + goto out; + } out: mv88e6xxx_reg_unlock(chip); @@ -6142,21 +6182,20 @@ out: } static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds, - struct net_device *lag, + struct dsa_lag lag, struct netdev_lag_upper_info *info) { struct mv88e6xxx_chip *chip = ds->priv; struct dsa_port *dp; - int id, members = 0; + int members = 0; if (!mv88e6xxx_has_lag(chip)) return false; - id = dsa_lag_id(ds->dst, lag); - if (id < 0 || id >= ds->num_lag_ids) + if (!lag.id) return false; - dsa_lag_foreach_port(dp, ds->dst, lag) + dsa_lag_foreach_port(dp, ds->dst, &lag) /* Includes the port joining the LAG */ members++; @@ -6176,20 +6215,21 @@ static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds, return true; } -static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds, struct net_device *lag) +static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds, struct dsa_lag lag) { struct mv88e6xxx_chip *chip = ds->priv; struct dsa_port *dp; u16 map = 0; int id; - id = dsa_lag_id(ds->dst, lag); + /* DSA LAG IDs are one-based, hardware is zero-based */ + id = lag.id - 1; /* Build the map of all ports to distribute flows destined for * this LAG. This can be either a local user port, or a DSA * port if the LAG port is on a remote chip. */ - dsa_lag_foreach_port(dp, ds->dst, lag) + dsa_lag_foreach_port(dp, ds->dst, &lag) map |= BIT(dsa_towards_port(ds, dp->ds->index, dp->index)); return mv88e6xxx_g2_trunk_mapping_write(chip, id, map); @@ -6234,8 +6274,8 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds) { struct mv88e6xxx_chip *chip = ds->priv; unsigned int id, num_tx; - struct net_device *lag; struct dsa_port *dp; + struct dsa_lag *lag; int i, err, nth; u16 mask[8]; u16 ivec; @@ -6244,8 +6284,8 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds) ivec = BIT(mv88e6xxx_num_ports(chip)) - 1; /* Disable all masks for ports that _are_ members of a LAG. */ - list_for_each_entry(dp, &ds->dst->ports, list) { - if (!dp->lag_dev || dp->ds != ds) + dsa_switch_for_each_port(dp, ds) { + if (!dp->lag) continue; ivec &= ~BIT(dp->index); @@ -6258,7 +6298,7 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds) * are in the Tx set. */ dsa_lags_foreach_id(id, ds->dst) { - lag = dsa_lag_dev(ds->dst, id); + lag = dsa_lag_by_id(ds->dst, id); if (!lag) continue; @@ -6294,7 +6334,7 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds) } static int mv88e6xxx_lag_sync_masks_map(struct dsa_switch *ds, - struct net_device *lag) + struct dsa_lag lag) { int err; @@ -6318,7 +6358,7 @@ static int mv88e6xxx_port_lag_change(struct dsa_switch *ds, int port) } static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port, - struct net_device *lag, + struct dsa_lag lag, struct netdev_lag_upper_info *info) { struct mv88e6xxx_chip *chip = ds->priv; @@ -6327,7 +6367,8 @@ static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port, if (!mv88e6xxx_lag_can_offload(ds, lag, info)) return -EOPNOTSUPP; - id = dsa_lag_id(ds->dst, lag); + /* DSA LAG IDs are one-based */ + id = lag.id - 1; mv88e6xxx_reg_lock(chip); @@ -6350,7 +6391,7 @@ err_unlock: } static int mv88e6xxx_port_lag_leave(struct dsa_switch *ds, int port, - struct net_device *lag) + struct dsa_lag lag) { struct mv88e6xxx_chip *chip = ds->priv; int err_sync, err_trunk; @@ -6375,7 +6416,7 @@ static int mv88e6xxx_crosschip_lag_change(struct dsa_switch *ds, int sw_index, } static int mv88e6xxx_crosschip_lag_join(struct dsa_switch *ds, int sw_index, - int port, struct net_device *lag, + int port, struct dsa_lag lag, struct netdev_lag_upper_info *info) { struct mv88e6xxx_chip *chip = ds->priv; @@ -6398,7 +6439,7 @@ unlock: } static int mv88e6xxx_crosschip_lag_leave(struct dsa_switch *ds, int sw_index, - int port, struct net_device *lag) + int port, struct dsa_lag lag) { struct mv88e6xxx_chip *chip = ds->priv; int err_sync, err_pvt; diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c index 389f8a6ec0ab..331b4ca089ff 100644 --- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c +++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c @@ -301,7 +301,7 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip, shwt->hwtstamp = ns_to_ktime(ns); status &= ~MV88E6XXX_PTP_TS_VALID; } - netif_rx_ni(skb); + netif_rx(skb); } } diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index ceb450113f88..795b3128768f 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -550,6 +550,9 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, mode = PHY_INTERFACE_MODE_1000BASEX; switch (mode) { + case PHY_INTERFACE_MODE_RMII: + cmode = MV88E6XXX_PORT_STS_CMODE_RMII; + break; case PHY_INTERFACE_MODE_1000BASEX: cmode = MV88E6XXX_PORT_STS_CMODE_1000BASEX; break; @@ -610,6 +613,8 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, chip->ports[port].cmode = cmode; lane = mv88e6xxx_serdes_get_lane(chip, port); + if (lane == -ENODEV) + return 0; if (lane < 0) return lane; @@ -1234,6 +1239,35 @@ int mv88e6xxx_port_set_mirror(struct mv88e6xxx_chip *chip, int port, return err; } +int mv88e6xxx_port_set_lock(struct mv88e6xxx_chip *chip, int port, + bool locked) +{ + u16 reg; + int err; + + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, ®); + if (err) + return err; + + reg &= ~MV88E6XXX_PORT_CTL0_SA_FILT_MASK; + if (locked) + reg |= MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_LOCK; + + err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg); + if (err) + return err; + + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR, ®); + if (err) + return err; + + reg &= ~MV88E6XXX_PORT_ASSOC_VECTOR_LOCKED_PORT; + if (locked) + reg |= MV88E6XXX_PORT_ASSOC_VECTOR_LOCKED_PORT; + + return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR, reg); +} + int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port, u16 mode) { diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index 3a13db2ec27b..e0a705d82019 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -147,7 +147,11 @@ /* Offset 0x04: Port Control Register */ #define MV88E6XXX_PORT_CTL0 0x04 #define MV88E6XXX_PORT_CTL0_USE_CORE_TAG 0x8000 -#define MV88E6XXX_PORT_CTL0_DROP_ON_LOCK 0x4000 +#define MV88E6XXX_PORT_CTL0_SA_FILT_MASK 0xc000 +#define MV88E6XXX_PORT_CTL0_SA_FILT_DISABLED 0x0000 +#define MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_LOCK 0x4000 +#define MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_UNLOCK 0x8000 +#define MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_CPU 0xc000 #define MV88E6XXX_PORT_CTL0_EGRESS_MODE_MASK 0x3000 #define MV88E6XXX_PORT_CTL0_EGRESS_MODE_UNMODIFIED 0x0000 #define MV88E6XXX_PORT_CTL0_EGRESS_MODE_UNTAGGED 0x1000 @@ -370,6 +374,9 @@ int mv88e6xxx_port_set_fid(struct mv88e6xxx_chip *chip, int port, u16 fid); int mv88e6xxx_port_get_pvid(struct mv88e6xxx_chip *chip, int port, u16 *pvid); int mv88e6xxx_port_set_pvid(struct mv88e6xxx_chip *chip, int port, u16 pvid); +int mv88e6xxx_port_set_lock(struct mv88e6xxx_chip *chip, int port, + bool locked); + int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port, u16 mode); int mv88e6095_port_tag_remap(struct mv88e6xxx_chip *chip, int port); diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 9957772201d5..35b436a491e1 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -25,21 +25,151 @@ #include <net/dsa.h> #include "felix.h" -static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid, - bool pvid, bool untagged) +/* Translate the DSA database API into the ocelot switch library API, + * which uses VID 0 for all ports that aren't part of a bridge, + * and expects the bridge_dev to be NULL in that case. + */ +static struct net_device *felix_classify_db(struct dsa_db db) +{ + switch (db.type) { + case DSA_DB_PORT: + case DSA_DB_LAG: + return NULL; + case DSA_DB_BRIDGE: + return db.bridge.dev; + default: + return ERR_PTR(-EOPNOTSUPP); + } +} + +/* We are called before felix_npi_port_init(), so ocelot->npi is -1. */ +static int felix_migrate_fdbs_to_npi_port(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, + struct dsa_db db) +{ + struct net_device *bridge_dev = felix_classify_db(db); + struct ocelot *ocelot = ds->priv; + int cpu = ocelot->num_phys_ports; + int err; + + err = ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev); + if (err) + return err; + + return ocelot_fdb_add(ocelot, cpu, addr, vid, bridge_dev); +} + +static int felix_migrate_mdbs_to_npi_port(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, + struct dsa_db db) +{ + struct net_device *bridge_dev = felix_classify_db(db); + struct switchdev_obj_port_mdb mdb; + struct ocelot *ocelot = ds->priv; + int cpu = ocelot->num_phys_ports; + int err; + + memset(&mdb, 0, sizeof(mdb)); + ether_addr_copy(mdb.addr, addr); + mdb.vid = vid; + + err = ocelot_port_mdb_del(ocelot, port, &mdb, bridge_dev); + if (err) + return err; + + return ocelot_port_mdb_add(ocelot, cpu, &mdb, bridge_dev); +} + +static void felix_migrate_pgid_bit(struct dsa_switch *ds, int from, int to, + int pgid) +{ + struct ocelot *ocelot = ds->priv; + bool on; + u32 val; + + val = ocelot_read_rix(ocelot, ANA_PGID_PGID, pgid); + on = !!(val & BIT(from)); + val &= ~BIT(from); + if (on) + val |= BIT(to); + else + val &= ~BIT(to); + + ocelot_write_rix(ocelot, val, ANA_PGID_PGID, pgid); +} + +static void felix_migrate_flood_to_npi_port(struct dsa_switch *ds, int port) +{ + struct ocelot *ocelot = ds->priv; + + felix_migrate_pgid_bit(ds, port, ocelot->num_phys_ports, PGID_UC); + felix_migrate_pgid_bit(ds, port, ocelot->num_phys_ports, PGID_MC); + felix_migrate_pgid_bit(ds, port, ocelot->num_phys_ports, PGID_BC); +} + +static void +felix_migrate_flood_to_tag_8021q_port(struct dsa_switch *ds, int port) +{ + struct ocelot *ocelot = ds->priv; + + felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_UC); + felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_MC); + felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_BC); +} + +/* ocelot->npi was already set to -1 by felix_npi_port_deinit, so + * ocelot_fdb_add() will not redirect FDB entries towards the + * CPU port module here, which is what we want. + */ +static int +felix_migrate_fdbs_to_tag_8021q_port(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, + struct dsa_db db) +{ + struct net_device *bridge_dev = felix_classify_db(db); + struct ocelot *ocelot = ds->priv; + int cpu = ocelot->num_phys_ports; + int err; + + err = ocelot_fdb_del(ocelot, cpu, addr, vid, bridge_dev); + if (err) + return err; + + return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev); +} + +static int +felix_migrate_mdbs_to_tag_8021q_port(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, + struct dsa_db db) +{ + struct net_device *bridge_dev = felix_classify_db(db); + struct switchdev_obj_port_mdb mdb; + struct ocelot *ocelot = ds->priv; + int cpu = ocelot->num_phys_ports; + int err; + + memset(&mdb, 0, sizeof(mdb)); + ether_addr_copy(mdb.addr, addr); + mdb.vid = vid; + + err = ocelot_port_mdb_del(ocelot, cpu, &mdb, bridge_dev); + if (err) + return err; + + return ocelot_port_mdb_add(ocelot, port, &mdb, bridge_dev); +} + +/* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that + * the tagger can perform RX source port identification. + */ +static int felix_tag_8021q_vlan_add_rx(struct felix *felix, int port, u16 vid) { struct ocelot_vcap_filter *outer_tagging_rule; struct ocelot *ocelot = &felix->ocelot; struct dsa_switch *ds = felix->ds; int key_length, upstream, err; - /* We don't need to install the rxvlan into the other ports' filtering - * tables, because we're just pushing the rxvlan when sending towards - * the CPU - */ - if (!pvid) - return 0; - key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length; upstream = dsa_upstream_port(ds, port); @@ -50,7 +180,7 @@ static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid, outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY; outer_tagging_rule->prio = 1; - outer_tagging_rule->id.cookie = port; + outer_tagging_rule->id.cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port); outer_tagging_rule->id.tc_offload = false; outer_tagging_rule->block_id = VCAP_ES0; outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; @@ -71,21 +201,32 @@ static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid, return err; } -static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid, - bool pvid, bool untagged) +static int felix_tag_8021q_vlan_del_rx(struct felix *felix, int port, u16 vid) +{ + struct ocelot_vcap_filter *outer_tagging_rule; + struct ocelot_vcap_block *block_vcap_es0; + struct ocelot *ocelot = &felix->ocelot; + + block_vcap_es0 = &ocelot->block[VCAP_ES0]; + + outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0, + port, false); + if (!outer_tagging_rule) + return -ENOENT; + + return ocelot_vcap_filter_del(ocelot, outer_tagging_rule); +} + +/* Set up VCAP IS1 rules for stripping the tag_8021q VLAN on TX and VCAP IS2 + * rules for steering those tagged packets towards the correct destination port + */ +static int felix_tag_8021q_vlan_add_tx(struct felix *felix, int port, u16 vid) { struct ocelot_vcap_filter *untagging_rule, *redirect_rule; struct ocelot *ocelot = &felix->ocelot; struct dsa_switch *ds = felix->ds; int upstream, err; - /* tag_8021q.c assumes we are implementing this via port VLAN - * membership, which we aren't. So we don't need to add any VCAP filter - * for the CPU port. - */ - if (ocelot->ports[port]->is_dsa_8021q_cpu) - return 0; - untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); if (!untagging_rule) return -ENOMEM; @@ -103,7 +244,7 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid, untagging_rule->vlan.vid.value = vid; untagging_rule->vlan.vid.mask = VLAN_VID_MASK; untagging_rule->prio = 1; - untagging_rule->id.cookie = port; + untagging_rule->id.cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port); untagging_rule->id.tc_offload = false; untagging_rule->block_id = VCAP_IS1; untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; @@ -124,7 +265,7 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid, redirect_rule->ingress_port_mask = BIT(upstream); redirect_rule->pag = port; redirect_rule->prio = 1; - redirect_rule->id.cookie = port; + redirect_rule->id.cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port); redirect_rule->id.tc_offload = false; redirect_rule->block_id = VCAP_IS2; redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; @@ -142,49 +283,7 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid, return 0; } -static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, - u16 flags) -{ - bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED; - bool pvid = flags & BRIDGE_VLAN_INFO_PVID; - struct ocelot *ocelot = ds->priv; - - if (vid_is_dsa_8021q_rxvlan(vid)) - return felix_tag_8021q_rxvlan_add(ocelot_to_felix(ocelot), - port, vid, pvid, untagged); - - if (vid_is_dsa_8021q_txvlan(vid)) - return felix_tag_8021q_txvlan_add(ocelot_to_felix(ocelot), - port, vid, pvid, untagged); - - return 0; -} - -static int felix_tag_8021q_rxvlan_del(struct felix *felix, int port, u16 vid) -{ - struct ocelot_vcap_filter *outer_tagging_rule; - struct ocelot_vcap_block *block_vcap_es0; - struct ocelot *ocelot = &felix->ocelot; - - block_vcap_es0 = &ocelot->block[VCAP_ES0]; - - outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0, - port, false); - /* In rxvlan_add, we had the "if (!pvid) return 0" logic to avoid - * installing outer tagging ES0 rules where they weren't needed. - * But in rxvlan_del, the API doesn't give us the "flags" anymore, - * so that forces us to be slightly sloppy here, and just assume that - * if we didn't find an outer_tagging_rule it means that there was - * none in the first place, i.e. rxvlan_del is called on a non-pvid - * port. This is most probably true though. - */ - if (!outer_tagging_rule) - return 0; - - return ocelot_vcap_filter_del(ocelot, outer_tagging_rule); -} - -static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid) +static int felix_tag_8021q_vlan_del_tx(struct felix *felix, int port, u16 vid) { struct ocelot_vcap_filter *untagging_rule, *redirect_rule; struct ocelot_vcap_block *block_vcap_is1; @@ -192,16 +291,13 @@ static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid) struct ocelot *ocelot = &felix->ocelot; int err; - if (ocelot->ports[port]->is_dsa_8021q_cpu) - return 0; - block_vcap_is1 = &ocelot->block[VCAP_IS1]; block_vcap_is2 = &ocelot->block[VCAP_IS2]; untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, port, false); if (!untagging_rule) - return 0; + return -ENOENT; err = ocelot_vcap_filter_del(ocelot, untagging_rule); if (err) @@ -210,22 +306,54 @@ static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid) redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, port, false); if (!redirect_rule) - return 0; + return -ENOENT; return ocelot_vcap_filter_del(ocelot, redirect_rule); } +static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, + u16 flags) +{ + struct ocelot *ocelot = ds->priv; + int err; + + /* tag_8021q.c assumes we are implementing this via port VLAN + * membership, which we aren't. So we don't need to add any VCAP filter + * for the CPU port. + */ + if (!dsa_is_user_port(ds, port)) + return 0; + + err = felix_tag_8021q_vlan_add_rx(ocelot_to_felix(ocelot), port, vid); + if (err) + return err; + + err = felix_tag_8021q_vlan_add_tx(ocelot_to_felix(ocelot), port, vid); + if (err) { + felix_tag_8021q_vlan_del_rx(ocelot_to_felix(ocelot), port, vid); + return err; + } + + return 0; +} + static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) { struct ocelot *ocelot = ds->priv; + int err; + + if (!dsa_is_user_port(ds, port)) + return 0; - if (vid_is_dsa_8021q_rxvlan(vid)) - return felix_tag_8021q_rxvlan_del(ocelot_to_felix(ocelot), - port, vid); + err = felix_tag_8021q_vlan_del_rx(ocelot_to_felix(ocelot), port, vid); + if (err) + return err; - if (vid_is_dsa_8021q_txvlan(vid)) - return felix_tag_8021q_txvlan_del(ocelot_to_felix(ocelot), - port, vid); + err = felix_tag_8021q_vlan_del_tx(ocelot_to_felix(ocelot), port, vid); + if (err) { + felix_tag_8021q_vlan_add_rx(ocelot_to_felix(ocelot), port, vid); + return err; + } return 0; } @@ -241,8 +369,7 @@ static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port) { mutex_lock(&ocelot->fwd_domain_lock); - ocelot->ports[port]->is_dsa_8021q_cpu = true; - ocelot->npi = -1; + ocelot_port_set_dsa_8021q_cpu(ocelot, port); /* Overwrite PGID_CPU with the non-tagging port */ ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU); @@ -256,7 +383,7 @@ static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port) { mutex_lock(&ocelot->fwd_domain_lock); - ocelot->ports[port]->is_dsa_8021q_cpu = false; + ocelot_port_unset_dsa_8021q_cpu(ocelot, port); /* Restore PGID_CPU */ ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID, @@ -267,148 +394,81 @@ static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port) mutex_unlock(&ocelot->fwd_domain_lock); } -/* Set up a VCAP IS2 rule for delivering PTP frames to the CPU port module. - * If the quirk_no_xtr_irq is in place, then also copy those PTP frames to the - * tag_8021q CPU port. +/* On switches with no extraction IRQ wired, trapped packets need to be + * replicated over Ethernet as well, otherwise we'd get no notification of + * their arrival when using the ocelot-8021q tagging protocol. */ -static int felix_setup_mmio_filtering(struct felix *felix) +static int felix_update_trapping_destinations(struct dsa_switch *ds, + bool using_tag_8021q) { - unsigned long user_ports = dsa_user_ports(felix->ds); - struct ocelot_vcap_filter *redirect_rule; - struct ocelot_vcap_filter *tagging_rule; - struct ocelot *ocelot = &felix->ocelot; - struct dsa_switch *ds = felix->ds; - int cpu = -1, port, ret; + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + struct ocelot_vcap_filter *trap; + enum ocelot_mask_mode mask_mode; + unsigned long port_mask; + struct dsa_port *dp; + bool cpu_copy_ena; + int cpu = -1, err; - tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); - if (!tagging_rule) - return -ENOMEM; + if (!felix->info->quirk_no_xtr_irq) + return 0; - redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); - if (!redirect_rule) { - kfree(tagging_rule); - return -ENOMEM; + /* Figure out the current CPU port */ + dsa_switch_for_each_cpu_port(dp, ds) { + cpu = dp->index; + break; } - for (port = 0; port < ocelot->num_phys_ports; port++) { - if (dsa_is_cpu_port(ds, port)) { - cpu = port; - break; - } - } + /* We are sure that "cpu" was found, otherwise + * dsa_tree_setup_default_cpu() would have failed earlier. + */ - if (cpu < 0) { - kfree(tagging_rule); - kfree(redirect_rule); - return -EINVAL; - } + /* Make sure all traps are set up for that destination */ + list_for_each_entry(trap, &ocelot->traps, trap_list) { + /* Figure out the current trapping destination */ + if (using_tag_8021q) { + /* Redirect to the tag_8021q CPU port. If timestamps + * are necessary, also copy trapped packets to the CPU + * port module. + */ + mask_mode = OCELOT_MASK_MODE_REDIRECT; + port_mask = BIT(cpu); + cpu_copy_ena = !!trap->take_ts; + } else { + /* Trap packets only to the CPU port module, which is + * redirected to the NPI port (the DSA CPU port) + */ + mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; + port_mask = 0; + cpu_copy_ena = true; + } - tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE; - *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588); - *(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff); - tagging_rule->ingress_port_mask = user_ports; - tagging_rule->prio = 1; - tagging_rule->id.cookie = ocelot->num_phys_ports; - tagging_rule->id.tc_offload = false; - tagging_rule->block_id = VCAP_IS1; - tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; - tagging_rule->lookup = 0; - tagging_rule->action.pag_override_mask = 0xff; - tagging_rule->action.pag_val = ocelot->num_phys_ports; - - ret = ocelot_vcap_filter_add(ocelot, tagging_rule, NULL); - if (ret) { - kfree(tagging_rule); - kfree(redirect_rule); - return ret; - } + if (trap->action.mask_mode == mask_mode && + trap->action.port_mask == port_mask && + trap->action.cpu_copy_ena == cpu_copy_ena) + continue; - redirect_rule->key_type = OCELOT_VCAP_KEY_ANY; - redirect_rule->ingress_port_mask = user_ports; - redirect_rule->pag = ocelot->num_phys_ports; - redirect_rule->prio = 1; - redirect_rule->id.cookie = ocelot->num_phys_ports; - redirect_rule->id.tc_offload = false; - redirect_rule->block_id = VCAP_IS2; - redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; - redirect_rule->lookup = 0; - redirect_rule->action.cpu_copy_ena = true; - if (felix->info->quirk_no_xtr_irq) { - /* Redirect to the tag_8021q CPU but also copy PTP packets to - * the CPU port module - */ - redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; - redirect_rule->action.port_mask = BIT(cpu); - } else { - /* Trap PTP packets only to the CPU port module (which is - * redirected to the NPI port) - */ - redirect_rule->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; - redirect_rule->action.port_mask = 0; - } + trap->action.mask_mode = mask_mode; + trap->action.port_mask = port_mask; + trap->action.cpu_copy_ena = cpu_copy_ena; - ret = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL); - if (ret) { - ocelot_vcap_filter_del(ocelot, tagging_rule); - kfree(redirect_rule); - return ret; + err = ocelot_vcap_filter_replace(ocelot, trap); + if (err) + return err; } - /* The ownership of the CPU port module's queues might have just been - * transferred to the tag_8021q tagger from the NPI-based tagger. - * So there might still be all sorts of crap in the queues. On the - * other hand, the MMIO-based matching of PTP frames is very brittle, - * so we need to be careful that there are no extra frames to be - * dequeued over MMIO, since we would never know to discard them. - */ - ocelot_drain_cpu_queue(ocelot, 0); - return 0; } -static int felix_teardown_mmio_filtering(struct felix *felix) -{ - struct ocelot_vcap_filter *tagging_rule, *redirect_rule; - struct ocelot_vcap_block *block_vcap_is1; - struct ocelot_vcap_block *block_vcap_is2; - struct ocelot *ocelot = &felix->ocelot; - int err; - - block_vcap_is1 = &ocelot->block[VCAP_IS1]; - block_vcap_is2 = &ocelot->block[VCAP_IS2]; - - tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, - ocelot->num_phys_ports, - false); - if (!tagging_rule) - return -ENOENT; - - err = ocelot_vcap_filter_del(ocelot, tagging_rule); - if (err) - return err; - - redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, - ocelot->num_phys_ports, - false); - if (!redirect_rule) - return -ENOENT; - - return ocelot_vcap_filter_del(ocelot, redirect_rule); -} - static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu) { struct ocelot *ocelot = ds->priv; - struct felix *felix = ocelot_to_felix(ocelot); - unsigned long cpu_flood; - int port, err; + struct dsa_port *dp; + int err; felix_8021q_cpu_port_init(ocelot, cpu); - for (port = 0; port < ds->num_ports; port++) { - if (dsa_is_unused_port(ds, port)) - continue; - + dsa_switch_for_each_available_port(dp, ds) { /* This overwrites ocelot_init(): * Do not forward BPDU frames to the CPU port module, * for 2 reasons: @@ -421,28 +481,43 @@ static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu) */ ocelot_write_gix(ocelot, ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0), - ANA_PORT_CPU_FWD_BPDU_CFG, port); + ANA_PORT_CPU_FWD_BPDU_CFG, dp->index); } - /* In tag_8021q mode, the CPU port module is unused, except for PTP - * frames. So we want to disable flooding of any kind to the CPU port - * module, since packets going there will end in a black hole. - */ - cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)); - ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC); - ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC); - ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_BC); - err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD)); if (err) return err; - err = felix_setup_mmio_filtering(felix); + err = dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_tag_8021q_port); if (err) goto out_tag_8021q_unregister; + err = dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_tag_8021q_port); + if (err) + goto out_migrate_fdbs; + + felix_migrate_flood_to_tag_8021q_port(ds, cpu); + + err = felix_update_trapping_destinations(ds, true); + if (err) + goto out_migrate_flood; + + /* The ownership of the CPU port module's queues might have just been + * transferred to the tag_8021q tagger from the NPI-based tagger. + * So there might still be all sorts of crap in the queues. On the + * other hand, the MMIO-based matching of PTP frames is very brittle, + * so we need to be careful that there are no extra frames to be + * dequeued over MMIO, since we would never know to discard them. + */ + ocelot_drain_cpu_queue(ocelot, 0); + return 0; +out_migrate_flood: + felix_migrate_flood_to_npi_port(ds, cpu); + dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_npi_port); +out_migrate_fdbs: + dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_npi_port); out_tag_8021q_unregister: dsa_tag_8021q_unregister(ds); return err; @@ -451,27 +526,24 @@ out_tag_8021q_unregister: static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu) { struct ocelot *ocelot = ds->priv; - struct felix *felix = ocelot_to_felix(ocelot); - int err, port; + struct dsa_port *dp; + int err; - err = felix_teardown_mmio_filtering(felix); + err = felix_update_trapping_destinations(ds, false); if (err) dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d", err); dsa_tag_8021q_unregister(ds); - for (port = 0; port < ds->num_ports; port++) { - if (dsa_is_unused_port(ds, port)) - continue; - + dsa_switch_for_each_available_port(dp, ds) { /* Restore the logic from ocelot_init: * do not forward BPDU frames to the front ports. */ ocelot_write_gix(ocelot, ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff), ANA_PORT_CPU_FWD_BPDU_CFG, - port); + dp->index); } felix_8021q_cpu_port_deinit(ocelot, cpu); @@ -523,27 +595,26 @@ static void felix_npi_port_deinit(struct ocelot *ocelot, int port) static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu) { struct ocelot *ocelot = ds->priv; - unsigned long cpu_flood; + int err; - felix_npi_port_init(ocelot, cpu); + err = dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_npi_port); + if (err) + return err; - /* Include the CPU port module (and indirectly, the NPI port) - * in the forwarding mask for unknown unicast - the hardware - * default value for ANA_FLOODING_FLD_UNICAST excludes - * BIT(ocelot->num_phys_ports), and so does ocelot_init, - * since Ocelot relies on whitelisting MAC addresses towards - * PGID_CPU. - * We do this because DSA does not yet perform RX filtering, - * and the NPI port does not perform source address learning, - * so traffic sent to Linux is effectively unknown from the - * switch's perspective. - */ - cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)); - ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_UC); - ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_MC); - ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_BC); + err = dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_npi_port); + if (err) + goto out_migrate_fdbs; + + felix_migrate_flood_to_npi_port(ds, cpu); + + felix_npi_port_init(ocelot, cpu); return 0; + +out_migrate_fdbs: + dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_tag_8021q_port); + + return err; } static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu) @@ -659,35 +730,97 @@ static int felix_fdb_dump(struct dsa_switch *ds, int port, } static int felix_fdb_add(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct dsa_db db) { + struct net_device *bridge_dev = felix_classify_db(db); struct ocelot *ocelot = ds->priv; - return ocelot_fdb_add(ocelot, port, addr, vid); + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + + if (dsa_is_cpu_port(ds, port) && !bridge_dev && + dsa_fdb_present_in_other_db(ds, port, addr, vid, db)) + return 0; + + return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev); } static int felix_fdb_del(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct dsa_db db) +{ + struct net_device *bridge_dev = felix_classify_db(db); + struct ocelot *ocelot = ds->priv; + + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + + if (dsa_is_cpu_port(ds, port) && !bridge_dev && + dsa_fdb_present_in_other_db(ds, port, addr, vid, db)) + return 0; + + return ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev); +} + +static int felix_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag lag, + const unsigned char *addr, u16 vid, + struct dsa_db db) +{ + struct net_device *bridge_dev = felix_classify_db(db); + struct ocelot *ocelot = ds->priv; + + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + + return ocelot_lag_fdb_add(ocelot, lag.dev, addr, vid, bridge_dev); +} + +static int felix_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag lag, + const unsigned char *addr, u16 vid, + struct dsa_db db) { + struct net_device *bridge_dev = felix_classify_db(db); struct ocelot *ocelot = ds->priv; - return ocelot_fdb_del(ocelot, port, addr, vid); + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + + return ocelot_lag_fdb_del(ocelot, lag.dev, addr, vid, bridge_dev); } static int felix_mdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) { + struct net_device *bridge_dev = felix_classify_db(db); struct ocelot *ocelot = ds->priv; - return ocelot_port_mdb_add(ocelot, port, mdb); + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + + if (dsa_is_cpu_port(ds, port) && !bridge_dev && + dsa_mdb_present_in_other_db(ds, port, mdb, db)) + return 0; + + return ocelot_port_mdb_add(ocelot, port, mdb, bridge_dev); } static int felix_mdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) { + struct net_device *bridge_dev = felix_classify_db(db); struct ocelot *ocelot = ds->priv; - return ocelot_port_mdb_del(ocelot, port, mdb); + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + + if (dsa_is_cpu_port(ds, port) && !bridge_dev && + dsa_mdb_present_in_other_db(ds, port, mdb, db)) + return 0; + + return ocelot_port_mdb_del(ocelot, port, mdb, bridge_dev); } static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port, @@ -719,13 +852,13 @@ static int felix_bridge_flags(struct dsa_switch *ds, int port, } static int felix_bridge_join(struct dsa_switch *ds, int port, - struct dsa_bridge bridge, bool *tx_fwd_offload) + struct dsa_bridge bridge, bool *tx_fwd_offload, + struct netlink_ext_ack *extack) { struct ocelot *ocelot = ds->priv; - ocelot_port_bridge_join(ocelot, port, bridge.dev); - - return 0; + return ocelot_port_bridge_join(ocelot, port, bridge.dev, bridge.num, + extack); } static void felix_bridge_leave(struct dsa_switch *ds, int port, @@ -737,20 +870,20 @@ static void felix_bridge_leave(struct dsa_switch *ds, int port, } static int felix_lag_join(struct dsa_switch *ds, int port, - struct net_device *bond, + struct dsa_lag lag, struct netdev_lag_upper_info *info) { struct ocelot *ocelot = ds->priv; - return ocelot_port_lag_join(ocelot, port, bond, info); + return ocelot_port_lag_join(ocelot, port, lag.dev, info); } static int felix_lag_leave(struct dsa_switch *ds, int port, - struct net_device *bond) + struct dsa_lag lag) { struct ocelot *ocelot = ds->priv; - ocelot_port_lag_leave(ocelot, port, bond); + ocelot_port_lag_leave(ocelot, port, lag.dev); return 0; } @@ -822,6 +955,21 @@ static int felix_vlan_del(struct dsa_switch *ds, int port, return ocelot_vlan_del(ocelot, port, vlan->vid); } +static void felix_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) +{ + struct ocelot *ocelot = ds->priv; + + /* This driver does not make use of the speed, duplex, pause or the + * advertisement in its mac_config, so it is safe to mark this driver + * as non-legacy. + */ + config->legacy_pre_march2020 = false; + + __set_bit(ocelot->ports[port]->phy_mode, + config->supported_interfaces); +} + static void felix_phylink_validate(struct dsa_switch *ds, int port, unsigned long *supported, struct phylink_link_state *state) @@ -833,16 +981,18 @@ static void felix_phylink_validate(struct dsa_switch *ds, int port, felix->info->phylink_validate(ocelot, port, supported, state); } -static void felix_phylink_mac_config(struct dsa_switch *ds, int port, - unsigned int link_an_mode, - const struct phylink_link_state *state) +static struct phylink_pcs *felix_phylink_mac_select_pcs(struct dsa_switch *ds, + int port, + phy_interface_t iface) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); - struct dsa_port *dp = dsa_to_port(ds, port); + struct phylink_pcs *pcs = NULL; if (felix->pcs && felix->pcs[port]) - phylink_set_pcs(dp->pl, felix->pcs[port]); + pcs = felix->pcs[port]; + + return pcs; } static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port, @@ -924,11 +1074,28 @@ static int felix_get_ts_info(struct dsa_switch *ds, int port, return ocelot_get_ts_info(ocelot, port, info); } +static const u32 felix_phy_match_table[PHY_INTERFACE_MODE_MAX] = { + [PHY_INTERFACE_MODE_INTERNAL] = OCELOT_PORT_MODE_INTERNAL, + [PHY_INTERFACE_MODE_SGMII] = OCELOT_PORT_MODE_SGMII, + [PHY_INTERFACE_MODE_QSGMII] = OCELOT_PORT_MODE_QSGMII, + [PHY_INTERFACE_MODE_USXGMII] = OCELOT_PORT_MODE_USXGMII, + [PHY_INTERFACE_MODE_2500BASEX] = OCELOT_PORT_MODE_2500BASEX, +}; + +static int felix_validate_phy_mode(struct felix *felix, int port, + phy_interface_t phy_mode) +{ + u32 modes = felix->info->port_modes[port]; + + if (felix_phy_match_table[phy_mode] & modes) + return 0; + return -EOPNOTSUPP; +} + static int felix_parse_ports_node(struct felix *felix, struct device_node *ports_node, phy_interface_t *port_phy_modes) { - struct ocelot *ocelot = &felix->ocelot; struct device *dev = felix->ocelot.dev; struct device_node *child; @@ -955,7 +1122,7 @@ static int felix_parse_ports_node(struct felix *felix, return -ENODEV; } - err = felix->info->prevalidate_phy_mode(ocelot, port, phy_mode); + err = felix_validate_phy_mode(felix, port, phy_mode); if (err < 0) { dev_err(dev, "Unsupported PHY mode %s on port %d\n", phy_modes(phy_mode), port); @@ -1192,7 +1359,9 @@ static int felix_setup(struct dsa_switch *ds) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); - int port, err; + unsigned long cpu_flood; + struct dsa_port *dp; + int err; err = felix_init_structs(felix, ds->num_ports); if (err) @@ -1211,45 +1380,45 @@ static int felix_setup(struct dsa_switch *ds) } } - for (port = 0; port < ds->num_ports; port++) { - if (dsa_is_unused_port(ds, port)) - continue; - - ocelot_init_port(ocelot, port); + dsa_switch_for_each_available_port(dp, ds) { + ocelot_init_port(ocelot, dp->index); /* Set the default QoS Classification based on PCP and DEI * bits of vlan tag. */ - felix_port_qos_map_init(ocelot, port); + felix_port_qos_map_init(ocelot, dp->index); } err = ocelot_devlink_sb_register(ocelot); if (err) goto out_deinit_ports; - for (port = 0; port < ds->num_ports; port++) { - if (!dsa_is_cpu_port(ds, port)) - continue; - + dsa_switch_for_each_cpu_port(dp, ds) { /* The initial tag protocol is NPI which always returns 0, so * there's no real point in checking for errors. */ - felix_set_tag_protocol(ds, port, felix->tag_proto); + felix_set_tag_protocol(ds, dp->index, felix->tag_proto); + + /* Start off with flooding disabled towards the NPI port + * (actually CPU port module). + */ + cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)); + ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC); + ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC); + break; } ds->mtu_enforcement_ingress = true; ds->assisted_learning_on_cpu_port = true; + ds->fdb_isolation = true; + ds->max_num_bridges = ds->num_ports; return 0; out_deinit_ports: - for (port = 0; port < ocelot->num_phys_ports; port++) { - if (dsa_is_unused_port(ds, port)) - continue; - - ocelot_deinit_port(ocelot, port); - } + dsa_switch_for_each_available_port(dp, ds) + ocelot_deinit_port(ocelot, dp->index); ocelot_deinit_timestamp(ocelot); ocelot_deinit(ocelot); @@ -1265,22 +1434,15 @@ static void felix_teardown(struct dsa_switch *ds) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); - int port; - - for (port = 0; port < ds->num_ports; port++) { - if (!dsa_is_cpu_port(ds, port)) - continue; + struct dsa_port *dp; - felix_del_tag_protocol(ds, port, felix->tag_proto); + dsa_switch_for_each_cpu_port(dp, ds) { + felix_del_tag_protocol(ds, dp->index, felix->tag_proto); break; } - for (port = 0; port < ocelot->num_phys_ports; port++) { - if (dsa_is_unused_port(ds, port)) - continue; - - ocelot_deinit_port(ocelot, port); - } + dsa_switch_for_each_available_port(dp, ds) + ocelot_deinit_port(ocelot, dp->index); ocelot_devlink_sb_unregister(ocelot); ocelot_deinit_timestamp(ocelot); @@ -1302,14 +1464,23 @@ static int felix_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr) { struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + bool using_tag_8021q; + int err; + + err = ocelot_hwstamp_set(ocelot, port, ifr); + if (err) + return err; - return ocelot_hwstamp_set(ocelot, port, ifr); + using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q; + + return felix_update_trapping_destinations(ds, using_tag_8021q); } -static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type) +static bool felix_check_xtr_pkt(struct ocelot *ocelot) { struct felix *felix = ocelot_to_felix(ocelot); - int err, grp = 0; + int err = 0, grp = 0; if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q) return false; @@ -1317,9 +1488,6 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type) if (!felix->info->quirk_no_xtr_irq) return false; - if (ptp_type == PTP_CLASS_NONE) - return false; - while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) { struct sk_buff *skb; unsigned int type; @@ -1349,8 +1517,12 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type) } out: - if (err < 0) + if (err < 0) { + dev_err_ratelimited(ocelot->dev, + "Error during packet extraction: %pe\n", + ERR_PTR(err)); ocelot_drain_cpu_queue(ocelot, 0); + } return true; } @@ -1370,7 +1542,7 @@ static bool felix_rxtstamp(struct dsa_switch *ds, int port, * MMIO in the CPU port module, and inject that into the stack from * ocelot_xtr_poll(). */ - if (felix_check_xtr_pkt(ocelot, type)) { + if (felix_check_xtr_pkt(ocelot)) { kfree_skb(skb); return true; } @@ -1430,8 +1602,17 @@ static int felix_cls_flower_add(struct dsa_switch *ds, int port, struct flow_cls_offload *cls, bool ingress) { struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + bool using_tag_8021q; + int err; + + err = ocelot_cls_flower_replace(ocelot, port, cls, ingress); + if (err) + return err; + + using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q; - return ocelot_cls_flower_replace(ocelot, port, cls, ingress); + return felix_update_trapping_destinations(ds, using_tag_8021q); } static int felix_cls_flower_del(struct dsa_switch *ds, int port, @@ -1629,14 +1810,17 @@ const struct dsa_switch_ops felix_switch_ops = { .get_ethtool_stats = felix_get_ethtool_stats, .get_sset_count = felix_get_sset_count, .get_ts_info = felix_get_ts_info, + .phylink_get_caps = felix_phylink_get_caps, .phylink_validate = felix_phylink_validate, - .phylink_mac_config = felix_phylink_mac_config, + .phylink_mac_select_pcs = felix_phylink_mac_select_pcs, .phylink_mac_link_down = felix_phylink_mac_link_down, .phylink_mac_link_up = felix_phylink_mac_link_up, .port_fast_age = felix_port_fast_age, .port_fdb_dump = felix_fdb_dump, .port_fdb_add = felix_fdb_add, .port_fdb_del = felix_fdb_del, + .lag_fdb_add = felix_lag_fdb_add, + .lag_fdb_del = felix_lag_fdb_del, .port_mdb_add = felix_mdb_add, .port_mdb_del = felix_mdb_del, .port_pre_bridge_flags = felix_pre_bridge_flags, diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h index 9395ac119d33..f083b06fdfe9 100644 --- a/drivers/net/dsa/ocelot/felix.h +++ b/drivers/net/dsa/ocelot/felix.h @@ -7,6 +7,12 @@ #define ocelot_to_felix(o) container_of((o), struct felix, ocelot) #define FELIX_MAC_QUIRKS OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION +#define OCELOT_PORT_MODE_INTERNAL BIT(0) +#define OCELOT_PORT_MODE_SGMII BIT(1) +#define OCELOT_PORT_MODE_QSGMII BIT(2) +#define OCELOT_PORT_MODE_2500BASEX BIT(3) +#define OCELOT_PORT_MODE_USXGMII BIT(4) + /* Platform-specific information */ struct felix_info { const struct resource *target_io_res; @@ -15,6 +21,7 @@ struct felix_info { const struct reg_field *regfields; const u32 *const *map; const struct ocelot_ops *ops; + const u32 *port_modes; int num_mact_rows; const struct ocelot_stat_layout *stats_layout; unsigned int num_stats; @@ -44,8 +51,6 @@ struct felix_info { void (*phylink_validate)(struct ocelot *ocelot, int port, unsigned long *supported, struct phylink_link_state *state); - int (*prevalidate_phy_mode)(struct ocelot *ocelot, int port, - phy_interface_t phy_mode); int (*port_setup_tc)(struct dsa_switch *ds, int port, enum tc_setup_type type, void *type_data); void (*port_sched_speed_set)(struct ocelot *ocelot, int port, diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 33f0ceae381d..ead3316742f6 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -18,12 +18,27 @@ #include <linux/pci.h> #include "felix.h" +#define VSC9959_NUM_PORTS 6 + #define VSC9959_TAS_GCL_ENTRY_MAX 63 #define VSC9959_VCAP_POLICER_BASE 63 #define VSC9959_VCAP_POLICER_MAX 383 #define VSC9959_SWITCH_PCI_BAR 4 #define VSC9959_IMDIO_PCI_BAR 0 +#define VSC9959_PORT_MODE_SERDES (OCELOT_PORT_MODE_SGMII | \ + OCELOT_PORT_MODE_QSGMII | \ + OCELOT_PORT_MODE_2500BASEX | \ + OCELOT_PORT_MODE_USXGMII) + +static const u32 vsc9959_port_modes[VSC9959_NUM_PORTS] = { + VSC9959_PORT_MODE_SERDES, + VSC9959_PORT_MODE_SERDES, + VSC9959_PORT_MODE_SERDES, + VSC9959_PORT_MODE_SERDES, + OCELOT_PORT_MODE_INTERNAL, +}; + static const u32 vsc9959_ana_regmap[] = { REG(ANA_ADVLEARN, 0x0089a0), REG(ANA_VLANMASK, 0x0089a4), @@ -944,15 +959,8 @@ static void vsc9959_phylink_validate(struct ocelot *ocelot, int port, unsigned long *supported, struct phylink_link_state *state) { - struct ocelot_port *ocelot_port = ocelot->ports[port]; __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - if (state->interface != PHY_INTERFACE_MODE_NA && - state->interface != ocelot_port->phy_mode) { - linkmode_zero(supported); - return; - } - phylink_set_port_modes(mask); phylink_set(mask, Autoneg); phylink_set(mask, Pause); @@ -975,27 +983,6 @@ static void vsc9959_phylink_validate(struct ocelot *ocelot, int port, linkmode_and(state->advertising, state->advertising, mask); } -static int vsc9959_prevalidate_phy_mode(struct ocelot *ocelot, int port, - phy_interface_t phy_mode) -{ - switch (phy_mode) { - case PHY_INTERFACE_MODE_INTERNAL: - if (port != 4 && port != 5) - return -ENOTSUPP; - return 0; - case PHY_INTERFACE_MODE_SGMII: - case PHY_INTERFACE_MODE_QSGMII: - case PHY_INTERFACE_MODE_USXGMII: - case PHY_INTERFACE_MODE_2500BASEX: - /* Not supported on internal to-CPU ports */ - if (port == 4 || port == 5) - return -ENOTSUPP; - return 0; - default: - return -ENOTSUPP; - } -} - /* Watermark encode * Bit 8: Unit; 0:1, 1:16 * Bit 7-0: Value to be multiplied with unit @@ -2231,14 +2218,14 @@ static const struct felix_info felix_info_vsc9959 = { .vcap_pol_base2 = 0, .vcap_pol_max2 = 0, .num_mact_rows = 2048, - .num_ports = 6, + .num_ports = VSC9959_NUM_PORTS, .num_tx_queues = OCELOT_NUM_TC, .quirk_no_xtr_irq = true, .ptp_caps = &vsc9959_ptp_caps, .mdio_bus_alloc = vsc9959_mdio_bus_alloc, .mdio_bus_free = vsc9959_mdio_bus_free, .phylink_validate = vsc9959_phylink_validate, - .prevalidate_phy_mode = vsc9959_prevalidate_phy_mode, + .port_modes = vsc9959_port_modes, .port_setup_tc = vsc9959_port_setup_tc, .port_sched_speed_set = vsc9959_sched_speed_set, .init_regmap = ocelot_regmap_init, diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c index f2f1608a476c..68ef8f111bbe 100644 --- a/drivers/net/dsa/ocelot/seville_vsc9953.c +++ b/drivers/net/dsa/ocelot/seville_vsc9953.c @@ -14,11 +14,29 @@ #include <linux/iopoll.h> #include "felix.h" +#define VSC9953_NUM_PORTS 10 + #define VSC9953_VCAP_POLICER_BASE 11 #define VSC9953_VCAP_POLICER_MAX 31 #define VSC9953_VCAP_POLICER_BASE2 120 #define VSC9953_VCAP_POLICER_MAX2 161 +#define VSC9953_PORT_MODE_SERDES (OCELOT_PORT_MODE_SGMII | \ + OCELOT_PORT_MODE_QSGMII) + +static const u32 vsc9953_port_modes[VSC9953_NUM_PORTS] = { + VSC9953_PORT_MODE_SERDES, + VSC9953_PORT_MODE_SERDES, + VSC9953_PORT_MODE_SERDES, + VSC9953_PORT_MODE_SERDES, + VSC9953_PORT_MODE_SERDES, + VSC9953_PORT_MODE_SERDES, + VSC9953_PORT_MODE_SERDES, + VSC9953_PORT_MODE_SERDES, + OCELOT_PORT_MODE_INTERNAL, + OCELOT_PORT_MODE_INTERNAL, +}; + static const u32 vsc9953_ana_regmap[] = { REG(ANA_ADVLEARN, 0x00b500), REG(ANA_VLANMASK, 0x00b504), @@ -917,15 +935,8 @@ static void vsc9953_phylink_validate(struct ocelot *ocelot, int port, unsigned long *supported, struct phylink_link_state *state) { - struct ocelot_port *ocelot_port = ocelot->ports[port]; __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - if (state->interface != PHY_INTERFACE_MODE_NA && - state->interface != ocelot_port->phy_mode) { - linkmode_zero(supported); - return; - } - phylink_set_port_modes(mask); phylink_set(mask, Autoneg); phylink_set(mask, Pause); @@ -945,25 +956,6 @@ static void vsc9953_phylink_validate(struct ocelot *ocelot, int port, linkmode_and(state->advertising, state->advertising, mask); } -static int vsc9953_prevalidate_phy_mode(struct ocelot *ocelot, int port, - phy_interface_t phy_mode) -{ - switch (phy_mode) { - case PHY_INTERFACE_MODE_INTERNAL: - if (port != 8 && port != 9) - return -ENOTSUPP; - return 0; - case PHY_INTERFACE_MODE_SGMII: - case PHY_INTERFACE_MODE_QSGMII: - /* Not supported on internal to-CPU ports */ - if (port == 8 || port == 9) - return -ENOTSUPP; - return 0; - default: - return -ENOTSUPP; - } -} - /* Watermark encode * Bit 9: Unit; 0:1, 1:16 * Bit 8-0: Value to be multiplied with unit @@ -1101,12 +1093,12 @@ static const struct felix_info seville_info_vsc9953 = { .vcap_pol_base2 = VSC9953_VCAP_POLICER_BASE2, .vcap_pol_max2 = VSC9953_VCAP_POLICER_MAX2, .num_mact_rows = 2048, - .num_ports = 10, + .num_ports = VSC9953_NUM_PORTS, .num_tx_queues = OCELOT_NUM_TC, .mdio_bus_alloc = vsc9953_mdio_bus_alloc, .mdio_bus_free = vsc9953_mdio_bus_free, .phylink_validate = vsc9953_phylink_validate, - .prevalidate_phy_mode = vsc9953_prevalidate_phy_mode, + .port_modes = vsc9953_port_modes, .init_regmap = ocelot_regmap_init, }; diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index c09d1569e66b..ee0dbf324268 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -1632,220 +1632,6 @@ qca8k_parse_port_config(struct qca8k_priv *priv) return 0; } -static int -qca8k_setup(struct dsa_switch *ds) -{ - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - int cpu_port, ret, i; - u32 mask; - - cpu_port = qca8k_find_cpu_port(ds); - if (cpu_port < 0) { - dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6"); - return cpu_port; - } - - /* Parse CPU port config to be later used in phy_link mac_config */ - ret = qca8k_parse_port_config(priv); - if (ret) - return ret; - - ret = qca8k_setup_mdio_bus(priv); - if (ret) - return ret; - - ret = qca8k_setup_of_pws_reg(priv); - if (ret) - return ret; - - ret = qca8k_setup_mac_pwr_sel(priv); - if (ret) - return ret; - - /* Make sure MAC06 is disabled */ - ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL, - QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN); - if (ret) { - dev_err(priv->dev, "failed disabling MAC06 exchange"); - return ret; - } - - /* Enable CPU Port */ - ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, - QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); - if (ret) { - dev_err(priv->dev, "failed enabling CPU port"); - return ret; - } - - /* Enable MIB counters */ - ret = qca8k_mib_init(priv); - if (ret) - dev_warn(priv->dev, "mib init failed"); - - /* Initial setup of all ports */ - for (i = 0; i < QCA8K_NUM_PORTS; i++) { - /* Disable forwarding by default on all ports */ - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), - QCA8K_PORT_LOOKUP_MEMBER, 0); - if (ret) - return ret; - - /* Enable QCA header mode on all cpu ports */ - if (dsa_is_cpu_port(ds, i)) { - ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i), - FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) | - FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL)); - if (ret) { - dev_err(priv->dev, "failed enabling QCA header mode"); - return ret; - } - } - - /* Disable MAC by default on all user ports */ - if (dsa_is_user_port(ds, i)) - qca8k_port_set_status(priv, i, 0); - } - - /* Forward all unknown frames to CPU port for Linux processing - * Notice that in multi-cpu config only one port should be set - * for igmp, unknown, multicast and broadcast packet - */ - ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1, - FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) | - FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) | - FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) | - FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port))); - if (ret) - return ret; - - /* Setup connection between CPU port & user ports - * Configure specific switch configuration for ports - */ - for (i = 0; i < QCA8K_NUM_PORTS; i++) { - /* CPU port gets connected to all user ports of the switch */ - if (dsa_is_cpu_port(ds, i)) { - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), - QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds)); - if (ret) - return ret; - } - - /* Individual user ports get connected to CPU port only */ - if (dsa_is_user_port(ds, i)) { - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), - QCA8K_PORT_LOOKUP_MEMBER, - BIT(cpu_port)); - if (ret) - return ret; - - /* Enable ARP Auto-learning by default */ - ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i), - QCA8K_PORT_LOOKUP_LEARN); - if (ret) - return ret; - - /* For port based vlans to work we need to set the - * default egress vid - */ - ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i), - QCA8K_EGREES_VLAN_PORT_MASK(i), - QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF)); - if (ret) - return ret; - - ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i), - QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) | - QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF)); - if (ret) - return ret; - } - - /* The port 5 of the qca8337 have some problem in flood condition. The - * original legacy driver had some specific buffer and priority settings - * for the different port suggested by the QCA switch team. Add this - * missing settings to improve switch stability under load condition. - * This problem is limited to qca8337 and other qca8k switch are not affected. - */ - if (priv->switch_id == QCA8K_ID_QCA8337) { - switch (i) { - /* The 2 CPU port and port 5 requires some different - * priority than any other ports. - */ - case 0: - case 5: - case 6: - mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) | - QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) | - QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) | - QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) | - QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) | - QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) | - QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e); - break; - default: - mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) | - QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) | - QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) | - QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) | - QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19); - } - qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask); - - mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) | - QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN | - QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | - QCA8K_PORT_HOL_CTRL1_WRED_EN; - qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i), - QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK | - QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN | - QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | - QCA8K_PORT_HOL_CTRL1_WRED_EN, - mask); - } - - /* Set initial MTU for every port. - * We have only have a general MTU setting. So track - * every port and set the max across all port. - * Set per port MTU to 1500 as the MTU change function - * will add the overhead and if its set to 1518 then it - * will apply the overhead again and we will end up with - * MTU of 1536 instead of 1518 - */ - priv->port_mtu[i] = ETH_DATA_LEN; - } - - /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */ - if (priv->switch_id == QCA8K_ID_QCA8327) { - mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) | - QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496); - qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH, - QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK | - QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, - mask); - } - - /* Setup our port MTUs to match power on defaults */ - ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN); - if (ret) - dev_warn(priv->dev, "failed setting MTU settings"); - - /* Flush the FDB table */ - qca8k_fdb_flush(priv); - - /* We don't have interrupts for link changes, so we need to poll */ - ds->pcs_poll = true; - - /* Set min a max ageing value supported */ - ds->ageing_time_min = 7000; - ds->ageing_time_max = 458745000; - - /* Set max number of LAGs supported */ - ds->num_lag_ids = QCA8K_NUM_LAGS; - - return 0; -} - static void qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index, u32 reg) @@ -1887,13 +1673,41 @@ qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_inde cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6); } +static struct phylink_pcs * +qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port, + phy_interface_t interface) +{ + struct qca8k_priv *priv = ds->priv; + struct phylink_pcs *pcs = NULL; + + switch (interface) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + switch (port) { + case 0: + pcs = &priv->pcs_port_0.pcs; + break; + + case 6: + pcs = &priv->pcs_port_6.pcs; + break; + } + break; + + default: + break; + } + + return pcs; +} + static void qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, const struct phylink_link_state *state) { struct qca8k_priv *priv = ds->priv; - int cpu_port_index, ret; - u32 reg, val; + int cpu_port_index; + u32 reg; switch (port) { case 0: /* 1st CPU port */ @@ -1959,70 +1773,6 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, case PHY_INTERFACE_MODE_1000BASEX: /* Enable SGMII on the port */ qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN); - - /* Enable/disable SerDes auto-negotiation as necessary */ - ret = qca8k_read(priv, QCA8K_REG_PWS, &val); - if (ret) - return; - if (phylink_autoneg_inband(mode)) - val &= ~QCA8K_PWS_SERDES_AEN_DIS; - else - val |= QCA8K_PWS_SERDES_AEN_DIS; - qca8k_write(priv, QCA8K_REG_PWS, val); - - /* Configure the SGMII parameters */ - ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val); - if (ret) - return; - - val |= QCA8K_SGMII_EN_SD; - - if (priv->ports_config.sgmii_enable_pll) - val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX | - QCA8K_SGMII_EN_TX; - - if (dsa_is_cpu_port(ds, port)) { - /* CPU port, we're talking to the CPU MAC, be a PHY */ - val &= ~QCA8K_SGMII_MODE_CTRL_MASK; - val |= QCA8K_SGMII_MODE_CTRL_PHY; - } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { - val &= ~QCA8K_SGMII_MODE_CTRL_MASK; - val |= QCA8K_SGMII_MODE_CTRL_MAC; - } else if (state->interface == PHY_INTERFACE_MODE_1000BASEX) { - val &= ~QCA8K_SGMII_MODE_CTRL_MASK; - val |= QCA8K_SGMII_MODE_CTRL_BASEX; - } - - qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val); - - /* From original code is reported port instability as SGMII also - * require delay set. Apply advised values here or take them from DT. - */ - if (state->interface == PHY_INTERFACE_MODE_SGMII) - qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); - - /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and - * falling edge is set writing in the PORT0 PAD reg - */ - if (priv->switch_id == QCA8K_ID_QCA8327 || - priv->switch_id == QCA8K_ID_QCA8337) - reg = QCA8K_REG_PORT0_PAD_CTRL; - - val = 0; - - /* SGMII Clock phase configuration */ - if (priv->ports_config.sgmii_rx_clk_falling_edge) - val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE; - - if (priv->ports_config.sgmii_tx_clk_falling_edge) - val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE; - - if (val) - ret = qca8k_rmw(priv, reg, - QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE | - QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE, - val); - break; default: dev_err(ds->dev, "xMII mode %s not supported for port %d\n", @@ -2064,48 +1814,8 @@ static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port, config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD; -} - -static int -qca8k_phylink_mac_link_state(struct dsa_switch *ds, int port, - struct phylink_link_state *state) -{ - struct qca8k_priv *priv = ds->priv; - u32 reg; - int ret; - - ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), ®); - if (ret < 0) - return ret; - - state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP); - state->an_complete = state->link; - state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO); - state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL : - DUPLEX_HALF; - switch (reg & QCA8K_PORT_STATUS_SPEED) { - case QCA8K_PORT_STATUS_SPEED_10: - state->speed = SPEED_10; - break; - case QCA8K_PORT_STATUS_SPEED_100: - state->speed = SPEED_100; - break; - case QCA8K_PORT_STATUS_SPEED_1000: - state->speed = SPEED_1000; - break; - default: - state->speed = SPEED_UNKNOWN; - break; - } - - state->pause = MLO_PAUSE_NONE; - if (reg & QCA8K_PORT_STATUS_RXFLOW) - state->pause |= MLO_PAUSE_RX; - if (reg & QCA8K_PORT_STATUS_TXFLOW) - state->pause |= MLO_PAUSE_TX; - - return 1; + config->legacy_pre_march2020 = false; } static void @@ -2158,6 +1868,164 @@ qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode, qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg); } +static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct qca8k_pcs, pcs); +} + +static void qca8k_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) +{ + struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv; + int port = pcs_to_qca8k_pcs(pcs)->port; + u32 reg; + int ret; + + ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), ®); + if (ret < 0) { + state->link = false; + return; + } + + state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP); + state->an_complete = state->link; + state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO); + state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL : + DUPLEX_HALF; + + switch (reg & QCA8K_PORT_STATUS_SPEED) { + case QCA8K_PORT_STATUS_SPEED_10: + state->speed = SPEED_10; + break; + case QCA8K_PORT_STATUS_SPEED_100: + state->speed = SPEED_100; + break; + case QCA8K_PORT_STATUS_SPEED_1000: + state->speed = SPEED_1000; + break; + default: + state->speed = SPEED_UNKNOWN; + break; + } + + if (reg & QCA8K_PORT_STATUS_RXFLOW) + state->pause |= MLO_PAUSE_RX; + if (reg & QCA8K_PORT_STATUS_TXFLOW) + state->pause |= MLO_PAUSE_TX; +} + +static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv; + int cpu_port_index, ret, port; + u32 reg, val; + + port = pcs_to_qca8k_pcs(pcs)->port; + switch (port) { + case 0: + reg = QCA8K_REG_PORT0_PAD_CTRL; + cpu_port_index = QCA8K_CPU_PORT0; + break; + + case 6: + reg = QCA8K_REG_PORT6_PAD_CTRL; + cpu_port_index = QCA8K_CPU_PORT6; + break; + + default: + WARN_ON(1); + return -EINVAL; + } + + /* Enable/disable SerDes auto-negotiation as necessary */ + ret = qca8k_read(priv, QCA8K_REG_PWS, &val); + if (ret) + return ret; + if (phylink_autoneg_inband(mode)) + val &= ~QCA8K_PWS_SERDES_AEN_DIS; + else + val |= QCA8K_PWS_SERDES_AEN_DIS; + qca8k_write(priv, QCA8K_REG_PWS, val); + + /* Configure the SGMII parameters */ + ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val); + if (ret) + return ret; + + val |= QCA8K_SGMII_EN_SD; + + if (priv->ports_config.sgmii_enable_pll) + val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX | + QCA8K_SGMII_EN_TX; + + if (dsa_is_cpu_port(priv->ds, port)) { + /* CPU port, we're talking to the CPU MAC, be a PHY */ + val &= ~QCA8K_SGMII_MODE_CTRL_MASK; + val |= QCA8K_SGMII_MODE_CTRL_PHY; + } else if (interface == PHY_INTERFACE_MODE_SGMII) { + val &= ~QCA8K_SGMII_MODE_CTRL_MASK; + val |= QCA8K_SGMII_MODE_CTRL_MAC; + } else if (interface == PHY_INTERFACE_MODE_1000BASEX) { + val &= ~QCA8K_SGMII_MODE_CTRL_MASK; + val |= QCA8K_SGMII_MODE_CTRL_BASEX; + } + + qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val); + + /* From original code is reported port instability as SGMII also + * require delay set. Apply advised values here or take them from DT. + */ + if (interface == PHY_INTERFACE_MODE_SGMII) + qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); + /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and + * falling edge is set writing in the PORT0 PAD reg + */ + if (priv->switch_id == QCA8K_ID_QCA8327 || + priv->switch_id == QCA8K_ID_QCA8337) + reg = QCA8K_REG_PORT0_PAD_CTRL; + + val = 0; + + /* SGMII Clock phase configuration */ + if (priv->ports_config.sgmii_rx_clk_falling_edge) + val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE; + + if (priv->ports_config.sgmii_tx_clk_falling_edge) + val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE; + + if (val) + ret = qca8k_rmw(priv, reg, + QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE | + QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE, + val); + + return 0; +} + +static void qca8k_pcs_an_restart(struct phylink_pcs *pcs) +{ +} + +static const struct phylink_pcs_ops qca8k_pcs_ops = { + .pcs_get_state = qca8k_pcs_get_state, + .pcs_config = qca8k_pcs_config, + .pcs_an_restart = qca8k_pcs_an_restart, +}; + +static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs, + int port) +{ + qpcs->pcs.ops = &qca8k_pcs_ops; + + /* We don't have interrupts for link changes, so we need to poll */ + qpcs->pcs.poll = true; + qpcs->priv = priv; + qpcs->port = port; +} + static void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) { @@ -2379,7 +2247,8 @@ qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) static int qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, - bool *tx_fwd_offload) + bool *tx_fwd_offload, + struct netlink_ext_ack *extack) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; int port_mask, cpu_port; @@ -2530,7 +2399,8 @@ qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr, static int qca8k_port_fdb_add(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct dsa_db db) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; u16 port_mask = BIT(port); @@ -2540,7 +2410,8 @@ qca8k_port_fdb_add(struct dsa_switch *ds, int port, static int qca8k_port_fdb_del(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct dsa_db db) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; u16 port_mask = BIT(port); @@ -2577,7 +2448,8 @@ qca8k_port_fdb_dump(struct dsa_switch *ds, int port, static int qca8k_port_mdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) { struct qca8k_priv *priv = ds->priv; const u8 *addr = mdb->addr; @@ -2588,7 +2460,8 @@ qca8k_port_mdb_add(struct dsa_switch *ds, int port, static int qca8k_port_mdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) { struct qca8k_priv *priv = ds->priv; const u8 *addr = mdb->addr; @@ -2779,18 +2652,16 @@ qca8k_get_tag_protocol(struct dsa_switch *ds, int port, } static bool -qca8k_lag_can_offload(struct dsa_switch *ds, - struct net_device *lag, +qca8k_lag_can_offload(struct dsa_switch *ds, struct dsa_lag lag, struct netdev_lag_upper_info *info) { struct dsa_port *dp; - int id, members = 0; + int members = 0; - id = dsa_lag_id(ds->dst, lag); - if (id < 0 || id >= ds->num_lag_ids) + if (!lag.id) return false; - dsa_lag_foreach_port(dp, ds->dst, lag) + dsa_lag_foreach_port(dp, ds->dst, &lag) /* Includes the port joining the LAG */ members++; @@ -2808,16 +2679,14 @@ qca8k_lag_can_offload(struct dsa_switch *ds, } static int -qca8k_lag_setup_hash(struct dsa_switch *ds, - struct net_device *lag, +qca8k_lag_setup_hash(struct dsa_switch *ds, struct dsa_lag lag, struct netdev_lag_upper_info *info) { + struct net_device *lag_dev = lag.dev; struct qca8k_priv *priv = ds->priv; bool unique_lag = true; + unsigned int i; u32 hash = 0; - int i, id; - - id = dsa_lag_id(ds->dst, lag); switch (info->hash_type) { case NETDEV_LAG_HASH_L23: @@ -2834,7 +2703,7 @@ qca8k_lag_setup_hash(struct dsa_switch *ds, /* Check if we are the unique configured LAG */ dsa_lags_foreach_id(i, ds->dst) - if (i != id && dsa_lag_dev(ds->dst, i)) { + if (i != lag.id && dsa_lag_by_id(ds->dst, i)) { unique_lag = false; break; } @@ -2849,7 +2718,7 @@ qca8k_lag_setup_hash(struct dsa_switch *ds, if (unique_lag) { priv->lag_hash_mode = hash; } else if (priv->lag_hash_mode != hash) { - netdev_err(lag, "Error: Mismatched Hash Mode across different lag is not supported\n"); + netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n"); return -EOPNOTSUPP; } @@ -2859,13 +2728,14 @@ qca8k_lag_setup_hash(struct dsa_switch *ds, static int qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port, - struct net_device *lag, bool delete) + struct dsa_lag lag, bool delete) { struct qca8k_priv *priv = ds->priv; int ret, id, i; u32 val; - id = dsa_lag_id(ds->dst, lag); + /* DSA LAG IDs are one-based, hardware is zero-based */ + id = lag.id - 1; /* Read current port member */ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val); @@ -2927,8 +2797,7 @@ qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port, } static int -qca8k_port_lag_join(struct dsa_switch *ds, int port, - struct net_device *lag, +qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag, struct netdev_lag_upper_info *info) { int ret; @@ -2945,7 +2814,7 @@ qca8k_port_lag_join(struct dsa_switch *ds, int port, static int qca8k_port_lag_leave(struct dsa_switch *ds, int port, - struct net_device *lag) + struct dsa_lag lag) { return qca8k_lag_refresh_portmap(ds, port, lag, true); } @@ -2990,6 +2859,220 @@ static int qca8k_connect_tag_protocol(struct dsa_switch *ds, return 0; } +static int +qca8k_setup(struct dsa_switch *ds) +{ + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; + int cpu_port, ret, i; + u32 mask; + + cpu_port = qca8k_find_cpu_port(ds); + if (cpu_port < 0) { + dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6"); + return cpu_port; + } + + /* Parse CPU port config to be later used in phy_link mac_config */ + ret = qca8k_parse_port_config(priv); + if (ret) + return ret; + + ret = qca8k_setup_mdio_bus(priv); + if (ret) + return ret; + + ret = qca8k_setup_of_pws_reg(priv); + if (ret) + return ret; + + ret = qca8k_setup_mac_pwr_sel(priv); + if (ret) + return ret; + + qca8k_setup_pcs(priv, &priv->pcs_port_0, 0); + qca8k_setup_pcs(priv, &priv->pcs_port_6, 6); + + /* Make sure MAC06 is disabled */ + ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL, + QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN); + if (ret) { + dev_err(priv->dev, "failed disabling MAC06 exchange"); + return ret; + } + + /* Enable CPU Port */ + ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, + QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); + if (ret) { + dev_err(priv->dev, "failed enabling CPU port"); + return ret; + } + + /* Enable MIB counters */ + ret = qca8k_mib_init(priv); + if (ret) + dev_warn(priv->dev, "mib init failed"); + + /* Initial setup of all ports */ + for (i = 0; i < QCA8K_NUM_PORTS; i++) { + /* Disable forwarding by default on all ports */ + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), + QCA8K_PORT_LOOKUP_MEMBER, 0); + if (ret) + return ret; + + /* Enable QCA header mode on all cpu ports */ + if (dsa_is_cpu_port(ds, i)) { + ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i), + FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) | + FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL)); + if (ret) { + dev_err(priv->dev, "failed enabling QCA header mode"); + return ret; + } + } + + /* Disable MAC by default on all user ports */ + if (dsa_is_user_port(ds, i)) + qca8k_port_set_status(priv, i, 0); + } + + /* Forward all unknown frames to CPU port for Linux processing + * Notice that in multi-cpu config only one port should be set + * for igmp, unknown, multicast and broadcast packet + */ + ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1, + FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) | + FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) | + FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) | + FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port))); + if (ret) + return ret; + + /* Setup connection between CPU port & user ports + * Configure specific switch configuration for ports + */ + for (i = 0; i < QCA8K_NUM_PORTS; i++) { + /* CPU port gets connected to all user ports of the switch */ + if (dsa_is_cpu_port(ds, i)) { + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), + QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds)); + if (ret) + return ret; + } + + /* Individual user ports get connected to CPU port only */ + if (dsa_is_user_port(ds, i)) { + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), + QCA8K_PORT_LOOKUP_MEMBER, + BIT(cpu_port)); + if (ret) + return ret; + + /* Enable ARP Auto-learning by default */ + ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i), + QCA8K_PORT_LOOKUP_LEARN); + if (ret) + return ret; + + /* For port based vlans to work we need to set the + * default egress vid + */ + ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i), + QCA8K_EGREES_VLAN_PORT_MASK(i), + QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF)); + if (ret) + return ret; + + ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i), + QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) | + QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF)); + if (ret) + return ret; + } + + /* The port 5 of the qca8337 have some problem in flood condition. The + * original legacy driver had some specific buffer and priority settings + * for the different port suggested by the QCA switch team. Add this + * missing settings to improve switch stability under load condition. + * This problem is limited to qca8337 and other qca8k switch are not affected. + */ + if (priv->switch_id == QCA8K_ID_QCA8337) { + switch (i) { + /* The 2 CPU port and port 5 requires some different + * priority than any other ports. + */ + case 0: + case 5: + case 6: + mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) | + QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) | + QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) | + QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) | + QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) | + QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) | + QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e); + break; + default: + mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) | + QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) | + QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) | + QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) | + QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19); + } + qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask); + + mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) | + QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN | + QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | + QCA8K_PORT_HOL_CTRL1_WRED_EN; + qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i), + QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK | + QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN | + QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | + QCA8K_PORT_HOL_CTRL1_WRED_EN, + mask); + } + + /* Set initial MTU for every port. + * We have only have a general MTU setting. So track + * every port and set the max across all port. + * Set per port MTU to 1500 as the MTU change function + * will add the overhead and if its set to 1518 then it + * will apply the overhead again and we will end up with + * MTU of 1536 instead of 1518 + */ + priv->port_mtu[i] = ETH_DATA_LEN; + } + + /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */ + if (priv->switch_id == QCA8K_ID_QCA8327) { + mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) | + QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496); + qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH, + QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK | + QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, + mask); + } + + /* Setup our port MTUs to match power on defaults */ + ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN); + if (ret) + dev_warn(priv->dev, "failed setting MTU settings"); + + /* Flush the FDB table */ + qca8k_fdb_flush(priv); + + /* Set min a max ageing value supported */ + ds->ageing_time_min = 7000; + ds->ageing_time_max = 458745000; + + /* Set max number of LAGs supported */ + ds->num_lag_ids = QCA8K_NUM_LAGS; + + return 0; +} + static const struct dsa_switch_ops qca8k_switch_ops = { .get_tag_protocol = qca8k_get_tag_protocol, .setup = qca8k_setup, @@ -3018,7 +3101,7 @@ static const struct dsa_switch_ops qca8k_switch_ops = { .port_vlan_add = qca8k_port_vlan_add, .port_vlan_del = qca8k_port_vlan_del, .phylink_get_caps = qca8k_phylink_get_caps, - .phylink_mac_link_state = qca8k_phylink_mac_link_state, + .phylink_mac_select_pcs = qca8k_phylink_mac_select_pcs, .phylink_mac_config = qca8k_phylink_mac_config, .phylink_mac_link_down = qca8k_phylink_mac_link_down, .phylink_mac_link_up = qca8k_phylink_mac_link_up, diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index c3d3c2269b1d..f375627174c8 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -376,6 +376,12 @@ struct qca8k_mdio_cache { u16 hi; }; +struct qca8k_pcs { + struct phylink_pcs pcs; + struct qca8k_priv *priv; + int port; +}; + struct qca8k_priv { u8 switch_id; u8 switch_revision; @@ -397,6 +403,8 @@ struct qca8k_priv { struct qca8k_mgmt_eth_data mgmt_eth_data; struct qca8k_mib_eth_data mib_eth_data; struct qca8k_mdio_cache mdio_cache; + struct qca8k_pcs pcs_port_0; + struct qca8k_pcs pcs_port_6; }; struct qca8k_mib_desc { diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c index 0c5f2bdced9d..31e1f100e48e 100644 --- a/drivers/net/dsa/realtek/realtek-mdio.c +++ b/drivers/net/dsa/realtek/realtek-mdio.c @@ -98,6 +98,20 @@ out_unlock: return ret; } +static void realtek_mdio_lock(void *ctx) +{ + struct realtek_priv *priv = ctx; + + mutex_lock(&priv->map_lock); +} + +static void realtek_mdio_unlock(void *ctx) +{ + struct realtek_priv *priv = ctx; + + mutex_unlock(&priv->map_lock); +} + static const struct regmap_config realtek_mdio_regmap_config = { .reg_bits = 10, /* A4..A0 R4..R0 */ .val_bits = 16, @@ -108,6 +122,21 @@ static const struct regmap_config realtek_mdio_regmap_config = { .reg_read = realtek_mdio_read, .reg_write = realtek_mdio_write, .cache_type = REGCACHE_NONE, + .lock = realtek_mdio_lock, + .unlock = realtek_mdio_unlock, +}; + +static const struct regmap_config realtek_mdio_nolock_regmap_config = { + .reg_bits = 10, /* A4..A0 R4..R0 */ + .val_bits = 16, + .reg_stride = 1, + /* PHY regs are at 0x8000 */ + .max_register = 0xffff, + .reg_format_endian = REGMAP_ENDIAN_BIG, + .reg_read = realtek_mdio_read, + .reg_write = realtek_mdio_write, + .cache_type = REGCACHE_NONE, + .disable_locking = true, }; static int realtek_mdio_probe(struct mdio_device *mdiodev) @@ -115,8 +144,9 @@ static int realtek_mdio_probe(struct mdio_device *mdiodev) struct realtek_priv *priv; struct device *dev = &mdiodev->dev; const struct realtek_variant *var; - int ret; + struct regmap_config rc; struct device_node *np; + int ret; var = of_device_get_match_data(dev); if (!var) @@ -126,13 +156,25 @@ static int realtek_mdio_probe(struct mdio_device *mdiodev) if (!priv) return -ENOMEM; - priv->map = devm_regmap_init(dev, NULL, priv, &realtek_mdio_regmap_config); + mutex_init(&priv->map_lock); + + rc = realtek_mdio_regmap_config; + rc.lock_arg = priv; + priv->map = devm_regmap_init(dev, NULL, priv, &rc); if (IS_ERR(priv->map)) { ret = PTR_ERR(priv->map); dev_err(dev, "regmap init failed: %d\n", ret); return ret; } + rc = realtek_mdio_nolock_regmap_config; + priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc); + if (IS_ERR(priv->map_nolock)) { + ret = PTR_ERR(priv->map_nolock); + dev_err(dev, "regmap init failed: %d\n", ret); + return ret; + } + priv->mdio_addr = mdiodev->addr; priv->bus = mdiodev->bus; priv->dev = &mdiodev->dev; @@ -152,6 +194,21 @@ static int realtek_mdio_probe(struct mdio_device *mdiodev) /* TODO: if power is software controlled, set up any regulators here */ priv->leds_disabled = of_property_read_bool(np, "realtek,disable-leds"); + priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(priv->reset)) { + dev_err(dev, "failed to get RESET GPIO\n"); + return PTR_ERR(priv->reset); + } + + if (priv->reset) { + gpiod_set_value(priv->reset, 1); + dev_dbg(dev, "asserted RESET\n"); + msleep(REALTEK_HW_STOP_DELAY); + gpiod_set_value(priv->reset, 0); + msleep(REALTEK_HW_START_DELAY); + dev_dbg(dev, "deasserted RESET\n"); + } + ret = priv->ops->detect(priv); if (ret) { dev_err(dev, "unable to detect switch\n"); @@ -185,6 +242,10 @@ static void realtek_mdio_remove(struct mdio_device *mdiodev) dsa_unregister_switch(priv->ds); + /* leave the device reset asserted */ + if (priv->reset) + gpiod_set_value(priv->reset, 1); + dev_set_drvdata(&mdiodev->dev, NULL); } diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c index 946fbbd70153..2243d3da55b2 100644 --- a/drivers/net/dsa/realtek/realtek-smi.c +++ b/drivers/net/dsa/realtek/realtek-smi.c @@ -43,8 +43,6 @@ #include "realtek.h" #define REALTEK_SMI_ACK_RETRY_COUNT 5 -#define REALTEK_SMI_HW_STOP_DELAY 25 /* msecs */ -#define REALTEK_SMI_HW_START_DELAY 100 /* msecs */ static inline void realtek_smi_clk_delay(struct realtek_priv *priv) { @@ -313,7 +311,21 @@ static int realtek_smi_read(void *ctx, u32 reg, u32 *val) return realtek_smi_read_reg(priv, reg, val); } -static const struct regmap_config realtek_smi_mdio_regmap_config = { +static void realtek_smi_lock(void *ctx) +{ + struct realtek_priv *priv = ctx; + + mutex_lock(&priv->map_lock); +} + +static void realtek_smi_unlock(void *ctx) +{ + struct realtek_priv *priv = ctx; + + mutex_unlock(&priv->map_lock); +} + +static const struct regmap_config realtek_smi_regmap_config = { .reg_bits = 10, /* A4..A0 R4..R0 */ .val_bits = 16, .reg_stride = 1, @@ -323,6 +335,21 @@ static const struct regmap_config realtek_smi_mdio_regmap_config = { .reg_read = realtek_smi_read, .reg_write = realtek_smi_write, .cache_type = REGCACHE_NONE, + .lock = realtek_smi_lock, + .unlock = realtek_smi_unlock, +}; + +static const struct regmap_config realtek_smi_nolock_regmap_config = { + .reg_bits = 10, /* A4..A0 R4..R0 */ + .val_bits = 16, + .reg_stride = 1, + /* PHY regs are at 0x8000 */ + .max_register = 0xffff, + .reg_format_endian = REGMAP_ENDIAN_BIG, + .reg_read = realtek_smi_read, + .reg_write = realtek_smi_write, + .cache_type = REGCACHE_NONE, + .disable_locking = true, }; static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum) @@ -387,6 +414,7 @@ static int realtek_smi_probe(struct platform_device *pdev) const struct realtek_variant *var; struct device *dev = &pdev->dev; struct realtek_priv *priv; + struct regmap_config rc; struct device_node *np; int ret; @@ -397,14 +425,26 @@ static int realtek_smi_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; priv->chip_data = (void *)priv + sizeof(*priv); - priv->map = devm_regmap_init(dev, NULL, priv, - &realtek_smi_mdio_regmap_config); + + mutex_init(&priv->map_lock); + + rc = realtek_smi_regmap_config; + rc.lock_arg = priv; + priv->map = devm_regmap_init(dev, NULL, priv, &rc); if (IS_ERR(priv->map)) { ret = PTR_ERR(priv->map); dev_err(dev, "regmap init failed: %d\n", ret); return ret; } + rc = realtek_smi_nolock_regmap_config; + priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc); + if (IS_ERR(priv->map_nolock)) { + ret = PTR_ERR(priv->map_nolock); + dev_err(dev, "regmap init failed: %d\n", ret); + return ret; + } + /* Link forward and backward */ priv->dev = dev; priv->clk_delay = var->clk_delay; @@ -420,16 +460,19 @@ static int realtek_smi_probe(struct platform_device *pdev) /* TODO: if power is software controlled, set up any regulators here */ - /* Assert then deassert RESET */ - priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(priv->reset)) { dev_err(dev, "failed to get RESET GPIO\n"); return PTR_ERR(priv->reset); } - msleep(REALTEK_SMI_HW_STOP_DELAY); - gpiod_set_value(priv->reset, 0); - msleep(REALTEK_SMI_HW_START_DELAY); - dev_info(dev, "deasserted RESET\n"); + if (priv->reset) { + gpiod_set_value(priv->reset, 1); + dev_dbg(dev, "asserted RESET\n"); + msleep(REALTEK_HW_STOP_DELAY); + gpiod_set_value(priv->reset, 0); + msleep(REALTEK_HW_START_DELAY); + dev_dbg(dev, "deasserted RESET\n"); + } /* Fetch MDIO pins */ priv->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW); @@ -474,7 +517,10 @@ static int realtek_smi_remove(struct platform_device *pdev) dsa_unregister_switch(priv->ds); if (priv->slave_mii_bus) of_node_put(priv->slave_mii_bus->dev.of_node); - gpiod_set_value(priv->reset, 1); + + /* leave the device reset asserted */ + if (priv->reset) + gpiod_set_value(priv->reset, 1); platform_set_drvdata(pdev, NULL); diff --git a/drivers/net/dsa/realtek/realtek.h b/drivers/net/dsa/realtek/realtek.h index ed5abf6cb3d6..4fa7c6ba874a 100644 --- a/drivers/net/dsa/realtek/realtek.h +++ b/drivers/net/dsa/realtek/realtek.h @@ -5,14 +5,17 @@ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org> */ -#ifndef _REALTEK_SMI_H -#define _REALTEK_SMI_H +#ifndef _REALTEK_H +#define _REALTEK_H #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/gpio/consumer.h> #include <net/dsa.h> +#define REALTEK_HW_STOP_DELAY 25 /* msecs */ +#define REALTEK_HW_START_DELAY 100 /* msecs */ + struct realtek_ops; struct dentry; struct inode; @@ -49,6 +52,8 @@ struct realtek_priv { struct gpio_desc *mdc; struct gpio_desc *mdio; struct regmap *map; + struct regmap *map_nolock; + struct mutex map_lock; struct mii_bus *slave_mii_bus; struct mii_bus *bus; int mdio_addr; @@ -142,4 +147,4 @@ void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data); extern const struct realtek_variant rtl8366rb_variant; extern const struct realtek_variant rtl8365mb_variant; -#endif /* _REALTEK_SMI_H */ +#endif /* _REALTEK_H */ diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c index 2ed592147c20..3d70e8a77ecf 100644 --- a/drivers/net/dsa/realtek/rtl8365mb.c +++ b/drivers/net/dsa/realtek/rtl8365mb.c @@ -566,6 +566,7 @@ struct rtl8365mb_port { * @chip_ver: chip silicon revision * @port_mask: mask of all ports * @learn_limit_max: maximum number of L2 addresses the chip can learn + * @cpu: CPU tagging and CPU port configuration for this chip * @mib_lock: prevent concurrent reads of MIB counters * @ports: per-port data * @jam_table: chip-specific initialization jam table @@ -580,6 +581,7 @@ struct rtl8365mb { u32 chip_ver; u32 port_mask; u32 learn_limit_max; + struct rtl8365mb_cpu cpu; struct mutex mib_lock; struct rtl8365mb_port ports[RTL8365MB_MAX_NUM_PORTS]; const struct rtl8365mb_jam_tbl_entry *jam_table; @@ -590,7 +592,7 @@ static int rtl8365mb_phy_poll_busy(struct realtek_priv *priv) { u32 val; - return regmap_read_poll_timeout(priv->map, + return regmap_read_poll_timeout(priv->map_nolock, RTL8365MB_INDIRECT_ACCESS_STATUS_REG, val, !val, 10, 100); } @@ -604,7 +606,7 @@ static int rtl8365mb_phy_ocp_prepare(struct realtek_priv *priv, int phy, /* Set OCP prefix */ val = FIELD_GET(RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK, ocp_addr); ret = regmap_update_bits( - priv->map, RTL8365MB_GPHY_OCP_MSB_0_REG, + priv->map_nolock, RTL8365MB_GPHY_OCP_MSB_0_REG, RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK, FIELD_PREP(RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK, val)); if (ret) @@ -617,8 +619,8 @@ static int rtl8365mb_phy_ocp_prepare(struct realtek_priv *priv, int phy, ocp_addr >> 1); val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK, ocp_addr >> 6); - ret = regmap_write(priv->map, RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG, - val); + ret = regmap_write(priv->map_nolock, + RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG, val); if (ret) return ret; @@ -631,36 +633,42 @@ static int rtl8365mb_phy_ocp_read(struct realtek_priv *priv, int phy, u32 val; int ret; + mutex_lock(&priv->map_lock); + ret = rtl8365mb_phy_poll_busy(priv); if (ret) - return ret; + goto out; ret = rtl8365mb_phy_ocp_prepare(priv, phy, ocp_addr); if (ret) - return ret; + goto out; /* Execute read operation */ val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK, RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) | FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK, RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ); - ret = regmap_write(priv->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val); + ret = regmap_write(priv->map_nolock, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, + val); if (ret) - return ret; + goto out; ret = rtl8365mb_phy_poll_busy(priv); if (ret) - return ret; + goto out; /* Get PHY register data */ - ret = regmap_read(priv->map, RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG, - &val); + ret = regmap_read(priv->map_nolock, + RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG, &val); if (ret) - return ret; + goto out; *data = val & 0xFFFF; - return 0; +out: + mutex_unlock(&priv->map_lock); + + return ret; } static int rtl8365mb_phy_ocp_write(struct realtek_priv *priv, int phy, @@ -669,32 +677,38 @@ static int rtl8365mb_phy_ocp_write(struct realtek_priv *priv, int phy, u32 val; int ret; + mutex_lock(&priv->map_lock); + ret = rtl8365mb_phy_poll_busy(priv); if (ret) - return ret; + goto out; ret = rtl8365mb_phy_ocp_prepare(priv, phy, ocp_addr); if (ret) - return ret; + goto out; /* Set PHY register data */ - ret = regmap_write(priv->map, RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG, - data); + ret = regmap_write(priv->map_nolock, + RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG, data); if (ret) - return ret; + goto out; /* Execute write operation */ val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK, RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) | FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK, RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE); - ret = regmap_write(priv->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val); + ret = regmap_write(priv->map_nolock, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, + val); if (ret) - return ret; + goto out; ret = rtl8365mb_phy_poll_busy(priv); if (ret) - return ret; + goto out; + +out: + mutex_unlock(&priv->map_lock); return 0; } @@ -770,6 +784,16 @@ static enum dsa_tag_protocol rtl8365mb_get_tag_protocol(struct dsa_switch *ds, int port, enum dsa_tag_protocol mp) { + struct realtek_priv *priv = ds->priv; + struct rtl8365mb_cpu *cpu; + struct rtl8365mb *mb; + + mb = priv->chip_data; + cpu = &mb->cpu; + + if (cpu->position == RTL8365MB_CPU_POS_BEFORE_CRC) + return DSA_TAG_PROTO_RTL8_4T; + return DSA_TAG_PROTO_RTL8_4; } @@ -1725,8 +1749,10 @@ static void rtl8365mb_irq_teardown(struct realtek_priv *priv) } } -static int rtl8365mb_cpu_config(struct realtek_priv *priv, const struct rtl8365mb_cpu *cpu) +static int rtl8365mb_cpu_config(struct realtek_priv *priv) { + struct rtl8365mb *mb = priv->chip_data; + struct rtl8365mb_cpu *cpu = &mb->cpu; u32 val; int ret; @@ -1752,6 +1778,37 @@ static int rtl8365mb_cpu_config(struct realtek_priv *priv, const struct rtl8365m return 0; } +static int rtl8365mb_change_tag_protocol(struct dsa_switch *ds, int cpu_index, + enum dsa_tag_protocol proto) +{ + struct realtek_priv *priv = ds->priv; + struct rtl8365mb_cpu *cpu; + struct rtl8365mb *mb; + + mb = priv->chip_data; + cpu = &mb->cpu; + + switch (proto) { + case DSA_TAG_PROTO_RTL8_4: + cpu->format = RTL8365MB_CPU_FORMAT_8BYTES; + cpu->position = RTL8365MB_CPU_POS_AFTER_SA; + break; + case DSA_TAG_PROTO_RTL8_4T: + cpu->format = RTL8365MB_CPU_FORMAT_8BYTES; + cpu->position = RTL8365MB_CPU_POS_BEFORE_CRC; + break; + /* The switch also supports a 4-byte format, similar to rtl4a but with + * the same 0x04 8-bit version and probably 8-bit port source/dest. + * There is no public doc about it. Not supported yet and it will probably + * never be. + */ + default: + return -EPROTONOSUPPORT; + } + + return rtl8365mb_cpu_config(priv); +} + static int rtl8365mb_switch_init(struct realtek_priv *priv) { struct rtl8365mb *mb = priv->chip_data; @@ -1798,13 +1855,14 @@ static int rtl8365mb_reset_chip(struct realtek_priv *priv) static int rtl8365mb_setup(struct dsa_switch *ds) { struct realtek_priv *priv = ds->priv; - struct rtl8365mb_cpu cpu = {0}; + struct rtl8365mb_cpu *cpu; struct dsa_port *cpu_dp; struct rtl8365mb *mb; int ret; int i; mb = priv->chip_data; + cpu = &mb->cpu; ret = rtl8365mb_reset_chip(priv); if (ret) { @@ -1827,21 +1885,14 @@ static int rtl8365mb_setup(struct dsa_switch *ds) dev_info(priv->dev, "no interrupt support\n"); /* Configure CPU tagging */ - cpu.trap_port = RTL8365MB_MAX_NUM_PORTS; dsa_switch_for_each_cpu_port(cpu_dp, priv->ds) { - cpu.mask |= BIT(cpu_dp->index); + cpu->mask |= BIT(cpu_dp->index); - if (cpu.trap_port == RTL8365MB_MAX_NUM_PORTS) - cpu.trap_port = cpu_dp->index; + if (cpu->trap_port == RTL8365MB_MAX_NUM_PORTS) + cpu->trap_port = cpu_dp->index; } - - cpu.enable = cpu.mask > 0; - cpu.insert = RTL8365MB_CPU_INSERT_TO_ALL; - cpu.position = RTL8365MB_CPU_POS_AFTER_SA; - cpu.rx_length = RTL8365MB_CPU_RXLEN_64BYTES; - cpu.format = RTL8365MB_CPU_FORMAT_8BYTES; - - ret = rtl8365mb_cpu_config(priv, &cpu); + cpu->enable = cpu->mask > 0; + ret = rtl8365mb_cpu_config(priv); if (ret) goto out_teardown_irq; @@ -1853,7 +1904,7 @@ static int rtl8365mb_setup(struct dsa_switch *ds) continue; /* Forward only to the CPU */ - ret = rtl8365mb_port_set_isolation(priv, i, cpu.mask); + ret = rtl8365mb_port_set_isolation(priv, i, cpu->mask); if (ret) goto out_teardown_irq; @@ -1983,6 +2034,12 @@ static int rtl8365mb_detect(struct realtek_priv *priv) mb->jam_table = rtl8365mb_init_jam_8365mb_vc; mb->jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc); + mb->cpu.trap_port = RTL8365MB_MAX_NUM_PORTS; + mb->cpu.insert = RTL8365MB_CPU_INSERT_TO_ALL; + mb->cpu.position = RTL8365MB_CPU_POS_AFTER_SA; + mb->cpu.rx_length = RTL8365MB_CPU_RXLEN_64BYTES; + mb->cpu.format = RTL8365MB_CPU_FORMAT_8BYTES; + break; default: dev_err(priv->dev, @@ -1996,6 +2053,7 @@ static int rtl8365mb_detect(struct realtek_priv *priv) static const struct dsa_switch_ops rtl8365mb_switch_ops_smi = { .get_tag_protocol = rtl8365mb_get_tag_protocol, + .change_tag_protocol = rtl8365mb_change_tag_protocol, .setup = rtl8365mb_setup, .teardown = rtl8365mb_teardown, .phylink_get_caps = rtl8365mb_phylink_get_caps, @@ -2014,6 +2072,7 @@ static const struct dsa_switch_ops rtl8365mb_switch_ops_smi = { static const struct dsa_switch_ops rtl8365mb_switch_ops_mdio = { .get_tag_protocol = rtl8365mb_get_tag_protocol, + .change_tag_protocol = rtl8365mb_change_tag_protocol, .setup = rtl8365mb_setup, .teardown = rtl8365mb_teardown, .phylink_get_caps = rtl8365mb_phylink_get_caps, diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c index fb6565e68401..1a3406b9e64c 100644 --- a/drivers/net/dsa/realtek/rtl8366rb.c +++ b/drivers/net/dsa/realtek/rtl8366rb.c @@ -1189,7 +1189,8 @@ rtl8366rb_port_disable(struct dsa_switch *ds, int port) static int rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, - bool *tx_fwd_offload) + bool *tx_fwd_offload, + struct netlink_ext_ack *extack) { struct realtek_priv *priv = ds->priv; unsigned int port_bitmap = 0; diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c index 7dcdd784aea4..fad5afe3819c 100644 --- a/drivers/net/dsa/sja1105/sja1105_flower.c +++ b/drivers/net/dsa/sja1105/sja1105_flower.c @@ -300,6 +300,46 @@ static int sja1105_flower_parse_key(struct sja1105_private *priv, return -EOPNOTSUPP; } +static int sja1105_policer_validate(const struct flow_action *action, + const struct flow_action_entry *act, + struct netlink_ext_ack *extack) +{ + if (act->police.exceed.act_id != FLOW_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when exceed action is not drop"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && + act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is not pipe or ok"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && + !flow_action_is_last_entry(action, act)) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is ok, but action is not last"); + return -EOPNOTSUPP; + } + + if (act->police.peakrate_bytes_ps || + act->police.avrate || act->police.overhead) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when peakrate/avrate/overhead is configured"); + return -EOPNOTSUPP; + } + + if (act->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, + "QoS offload not support packets per second"); + return -EOPNOTSUPP; + } + + return 0; +} + int sja1105_cls_flower_add(struct dsa_switch *ds, int port, struct flow_cls_offload *cls, bool ingress) { @@ -321,12 +361,9 @@ int sja1105_cls_flower_add(struct dsa_switch *ds, int port, flow_action_for_each(i, act, &rule->action) { switch (act->id) { case FLOW_ACTION_POLICE: - if (act->police.rate_pkt_ps) { - NL_SET_ERR_MSG_MOD(extack, - "QoS offload not support packets per second"); - rc = -EOPNOTSUPP; + rc = sja1105_policer_validate(&rule->action, act, extack); + if (rc) goto out; - } rc = sja1105_flower_policer(priv, port, extack, cookie, &key, diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index b513713be610..3358e979342c 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -393,10 +393,8 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv) .start_dynspc = 0, /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */ .poly = 0x97, - /* This selects between Independent VLAN Learning (IVL) and - * Shared VLAN Learning (SVL) - */ - .shared_learn = true, + /* Always use Independent VLAN Learning (IVL) */ + .shared_learn = false, /* Don't discard management traffic based on ENFPORT - * we don't perform SMAC port enforcement anyway, so * what we are setting here doesn't matter. @@ -1358,37 +1356,16 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port, return sja1105_clocking_setup_port(priv, port); } -/* The SJA1105 MAC programming model is through the static config (the xMII - * Mode table cannot be dynamically reconfigured), and we have to program - * that early (earlier than PHYLINK calls us, anyway). - * So just error out in case the connected PHY attempts to change the initial - * system interface MII protocol from what is defined in the DT, at least for - * now. - */ -static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port, - phy_interface_t interface) -{ - return priv->phy_mode[port] != interface; -} - -static void sja1105_mac_config(struct dsa_switch *ds, int port, - unsigned int mode, - const struct phylink_link_state *state) +static struct phylink_pcs * +sja1105_mac_select_pcs(struct dsa_switch *ds, int port, phy_interface_t iface) { - struct dsa_port *dp = dsa_to_port(ds, port); struct sja1105_private *priv = ds->priv; - struct dw_xpcs *xpcs; - - if (sja1105_phy_mode_mismatch(priv, port, state->interface)) { - dev_err(ds->dev, "Changing PHY mode to %s not supported!\n", - phy_modes(state->interface)); - return; - } - - xpcs = priv->xpcs[port]; + struct dw_xpcs *xpcs = priv->xpcs[port]; if (xpcs) - phylink_set_pcs(dp->pl, &xpcs->pcs); + return &xpcs->pcs; + + return NULL; } static void sja1105_mac_link_down(struct dsa_switch *ds, int port, @@ -1412,48 +1389,53 @@ static void sja1105_mac_link_up(struct dsa_switch *ds, int port, sja1105_inhibit_tx(priv, BIT(port), false); } -static void sja1105_phylink_validate(struct dsa_switch *ds, int port, - unsigned long *supported, - struct phylink_link_state *state) +static void sja1105_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) { - /* Construct a new mask which exhaustively contains all link features - * supported by the MAC, and then apply that (logical AND) to what will - * be sent to the PHY for "marketing". - */ - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; struct sja1105_private *priv = ds->priv; struct sja1105_xmii_params_entry *mii; + phy_interface_t phy_mode; - mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; - - /* include/linux/phylink.h says: - * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink - * expects the MAC driver to return all supported link modes. + /* This driver does not make use of the speed, duplex, pause or the + * advertisement in its mac_config, so it is safe to mark this driver + * as non-legacy. */ - if (state->interface != PHY_INTERFACE_MODE_NA && - sja1105_phy_mode_mismatch(priv, port, state->interface)) { - linkmode_zero(supported); - return; + config->legacy_pre_march2020 = false; + + phy_mode = priv->phy_mode[port]; + if (phy_mode == PHY_INTERFACE_MODE_SGMII || + phy_mode == PHY_INTERFACE_MODE_2500BASEX) { + /* Changing the PHY mode on SERDES ports is possible and makes + * sense, because that is done through the XPCS. We allow + * changes between SGMII and 2500base-X. + */ + if (priv->info->supports_sgmii[port]) + __set_bit(PHY_INTERFACE_MODE_SGMII, + config->supported_interfaces); + + if (priv->info->supports_2500basex[port]) + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + config->supported_interfaces); + } else { + /* The SJA1105 MAC programming model is through the static + * config (the xMII Mode table cannot be dynamically + * reconfigured), and we have to program that early. + */ + __set_bit(phy_mode, config->supported_interfaces); } /* The MAC does not support pause frames, and also doesn't * support half-duplex traffic modes. */ - phylink_set(mask, Autoneg); - phylink_set(mask, MII); - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Full); - phylink_set(mask, 100baseT1_Full); + config->mac_capabilities = MAC_10FD | MAC_100FD; + + mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; if (mii->xmii_mode[port] == XMII_MODE_RGMII || mii->xmii_mode[port] == XMII_MODE_SGMII) - phylink_set(mask, 1000baseT_Full); - if (priv->info->supports_2500basex[port]) { - phylink_set(mask, 2500baseT_Full); - phylink_set(mask, 2500baseX_Full); - } + config->mac_capabilities |= MAC_1000FD; - linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); + if (priv->info->supports_2500basex[port]) + config->mac_capabilities |= MAC_2500FD; } static int @@ -1819,25 +1801,52 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port, } static int sja1105_fdb_add(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct dsa_db db) { struct sja1105_private *priv = ds->priv; + if (!vid) { + switch (db.type) { + case DSA_DB_PORT: + vid = dsa_tag_8021q_standalone_vid(db.dp); + break; + case DSA_DB_BRIDGE: + vid = dsa_tag_8021q_bridge_vid(db.bridge.num); + break; + default: + return -EOPNOTSUPP; + } + } + return priv->info->fdb_add_cmd(ds, port, addr, vid); } static int sja1105_fdb_del(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct dsa_db db) { struct sja1105_private *priv = ds->priv; + if (!vid) { + switch (db.type) { + case DSA_DB_PORT: + vid = dsa_tag_8021q_standalone_vid(db.dp); + break; + case DSA_DB_BRIDGE: + vid = dsa_tag_8021q_bridge_vid(db.bridge.num); + break; + default: + return -EOPNOTSUPP; + } + } + return priv->info->fdb_del_cmd(ds, port, addr, vid); } static int sja1105_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, void *data) { - struct dsa_port *dp = dsa_to_port(ds, port); struct sja1105_private *priv = ds->priv; struct device *dev = ds->dev; int i; @@ -1874,7 +1883,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port, u64_to_ether_addr(l2_lookup.macaddr, macaddr); /* We need to hide the dsa_8021q VLANs from the user. */ - if (!dsa_port_is_vlan_filtering(dp)) + if (vid_is_dsa_8021q(l2_lookup.vlanid)) l2_lookup.vlanid = 0; rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data); if (rc) @@ -1885,7 +1894,15 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port, static void sja1105_fast_age(struct dsa_switch *ds, int port) { + struct dsa_port *dp = dsa_to_port(ds, port); struct sja1105_private *priv = ds->priv; + struct dsa_db db = { + .type = DSA_DB_BRIDGE, + .bridge = { + .dev = dsa_port_bridge_dev_get(dp), + .num = dsa_port_bridge_num_get(dp), + }, + }; int i; for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { @@ -1913,7 +1930,7 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port) u64_to_ether_addr(l2_lookup.macaddr, macaddr); - rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid); + rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db); if (rc) { dev_err(ds->dev, "Failed to delete FDB entry %pM vid %lld: %pe\n", @@ -1924,15 +1941,17 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port) } static int sja1105_mdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) { - return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid); + return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid, db); } static int sja1105_mdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) { - return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid); + return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid, db); } /* Common function for unicast and broadcast flood configuration. @@ -2075,7 +2094,8 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, static int sja1105_bridge_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, - bool *tx_fwd_offload) + bool *tx_fwd_offload, + struct netlink_ext_ack *extack) { int rc; @@ -2083,7 +2103,7 @@ static int sja1105_bridge_join(struct dsa_switch *ds, int port, if (rc) return rc; - rc = dsa_tag_8021q_bridge_tx_fwd_offload(ds, port, bridge); + rc = dsa_tag_8021q_bridge_join(ds, port, bridge); if (rc) { sja1105_bridge_member(ds, port, bridge, false); return rc; @@ -2097,7 +2117,7 @@ static int sja1105_bridge_join(struct dsa_switch *ds, int port, static void sja1105_bridge_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) { - dsa_tag_8021q_bridge_tx_fwd_unoffload(ds, port, bridge); + dsa_tag_8021q_bridge_leave(ds, port, bridge); sja1105_bridge_member(ds, port, bridge, false); } @@ -2357,7 +2377,6 @@ sja1105_get_tag_protocol(struct dsa_switch *ds, int port, int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, struct netlink_ext_ack *extack) { - struct sja1105_l2_lookup_params_entry *l2_lookup_params; struct sja1105_general_params_entry *general_params; struct sja1105_private *priv = ds->priv; struct sja1105_table *table; @@ -2395,28 +2414,6 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, general_params->incl_srcpt1 = enabled; general_params->incl_srcpt0 = enabled; - /* VLAN filtering => independent VLAN learning. - * No VLAN filtering (or best effort) => shared VLAN learning. - * - * In shared VLAN learning mode, untagged traffic still gets - * pvid-tagged, and the FDB table gets populated with entries - * containing the "real" (pvid or from VLAN tag) VLAN ID. - * However the switch performs a masked L2 lookup in the FDB, - * effectively only looking up a frame's DMAC (and not VID) for the - * forwarding decision. - * - * This is extremely convenient for us, because in modes with - * vlan_filtering=0, dsa_8021q actually installs unique pvid's into - * each front panel port. This is good for identification but breaks - * learning badly - the VID of the learnt FDB entry is unique, aka - * no frames coming from any other port are going to have it. So - * for forwarding purposes, this is as though learning was broken - * (all frames get flooded). - */ - table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; - l2_lookup_params = table->entries; - l2_lookup_params->shared_learn = !enabled; - for (port = 0; port < ds->num_ports; port++) { if (dsa_is_unused_port(ds, port)) continue; @@ -2525,7 +2522,7 @@ static int sja1105_bridge_vlan_add(struct dsa_switch *ds, int port, */ if (vid_is_dsa_8021q(vlan->vid)) { NL_SET_ERR_MSG_MOD(extack, - "Range 1024-3071 reserved for dsa_8021q operation"); + "Range 3072-4095 reserved for dsa_8021q operation"); return -EBUSY; } @@ -3102,6 +3099,7 @@ static int sja1105_setup(struct dsa_switch *ds) */ ds->vlan_filtering_is_global = true; ds->untag_bridge_pvid = true; + ds->fdb_isolation = true; /* tag_8021q has 3 bits for the VBID, and the value 0 is reserved */ ds->max_num_bridges = 7; @@ -3152,8 +3150,8 @@ static const struct dsa_switch_ops sja1105_switch_ops = { .set_ageing_time = sja1105_set_ageing_time, .port_change_mtu = sja1105_change_mtu, .port_max_mtu = sja1105_get_max_mtu, - .phylink_validate = sja1105_phylink_validate, - .phylink_mac_config = sja1105_mac_config, + .phylink_get_caps = sja1105_phylink_get_caps, + .phylink_mac_select_pcs = sja1105_mac_select_pcs, .phylink_mac_link_up = sja1105_mac_link_up, .phylink_mac_link_down = sja1105_mac_link_down, .get_strings = sja1105_get_strings, @@ -3346,18 +3344,16 @@ static int sja1105_probe(struct spi_device *spi) return dsa_register_switch(priv->ds); } -static int sja1105_remove(struct spi_device *spi) +static void sja1105_remove(struct spi_device *spi) { struct sja1105_private *priv = spi_get_drvdata(spi); if (!priv) - return 0; + return; dsa_unregister_switch(priv->ds); spi_set_drvdata(spi, NULL); - - return 0; } static void sja1105_shutdown(struct spi_device *spi) diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c index be3068a935af..30fb2cc40164 100644 --- a/drivers/net/dsa/sja1105/sja1105_ptp.c +++ b/drivers/net/dsa/sja1105/sja1105_ptp.c @@ -399,7 +399,7 @@ static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp) ts = sja1105_tstamp_reconstruct(ds, ticks, ts); shwt->hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts)); - netif_rx_ni(skb); + netif_rx(skb); } if (ptp_data->extts_enabled) diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c index f5dca6a9b0f9..b7e95d60a6e4 100644 --- a/drivers/net/dsa/sja1105/sja1105_vl.c +++ b/drivers/net/dsa/sja1105/sja1105_vl.c @@ -296,6 +296,19 @@ static bool sja1105_vl_key_lower(struct sja1105_vl_lookup_entry *a, return false; } +/* FIXME: this should change when the bridge upper of the port changes. */ +static u16 sja1105_port_get_tag_8021q_vid(struct dsa_port *dp) +{ + unsigned long bridge_num; + + if (!dp->bridge) + return dsa_tag_8021q_standalone_vid(dp); + + bridge_num = dsa_port_bridge_num_get(dp); + + return dsa_tag_8021q_bridge_vid(bridge_num); +} + static int sja1105_init_virtual_links(struct sja1105_private *priv, struct netlink_ext_ack *extack) { @@ -394,8 +407,9 @@ static int sja1105_init_virtual_links(struct sja1105_private *priv, vl_lookup[k].vlanid = rule->key.vl.vid; vl_lookup[k].vlanprior = rule->key.vl.pcp; } else { + /* FIXME */ struct dsa_port *dp = dsa_to_port(priv->ds, port); - u16 vid = dsa_tag_8021q_rx_vid(dp); + u16 vid = sja1105_port_get_tag_8021q_vid(dp); vl_lookup[k].vlanid = vid; vl_lookup[k].vlanprior = 0; diff --git a/drivers/net/dsa/vitesse-vsc73xx-spi.c b/drivers/net/dsa/vitesse-vsc73xx-spi.c index 645398901e05..3110895358d8 100644 --- a/drivers/net/dsa/vitesse-vsc73xx-spi.c +++ b/drivers/net/dsa/vitesse-vsc73xx-spi.c @@ -159,18 +159,16 @@ static int vsc73xx_spi_probe(struct spi_device *spi) return vsc73xx_probe(&vsc_spi->vsc); } -static int vsc73xx_spi_remove(struct spi_device *spi) +static void vsc73xx_spi_remove(struct spi_device *spi) { struct vsc73xx_spi *vsc_spi = spi_get_drvdata(spi); if (!vsc_spi) - return 0; + return; vsc73xx_remove(&vsc_spi->vsc); spi_set_drvdata(spi, NULL); - - return 0; } static void vsc73xx_spi_shutdown(struct spi_device *spi) diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c index bc06fe6bac6b..3887ed33c5fe 100644 --- a/drivers/net/dsa/xrs700x/xrs700x.c +++ b/drivers/net/dsa/xrs700x/xrs700x.c @@ -534,7 +534,8 @@ static int xrs700x_bridge_common(struct dsa_switch *ds, int port, } static int xrs700x_bridge_join(struct dsa_switch *ds, int port, - struct dsa_bridge bridge, bool *tx_fwd_offload) + struct dsa_bridge bridge, bool *tx_fwd_offload, + struct netlink_ext_ack *extack) { return xrs700x_bridge_common(ds, port, bridge, true); } diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c index e320cccba61a..90cd7bdf06f5 100644 --- a/drivers/net/ethernet/8390/mcf8390.c +++ b/drivers/net/ethernet/8390/mcf8390.c @@ -405,12 +405,12 @@ static int mcf8390_init(struct net_device *dev) static int mcf8390_probe(struct platform_device *pdev) { struct net_device *dev; - struct resource *mem, *irq; + struct resource *mem; resource_size_t msize; - int ret; + int ret, irq; - irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (irq == NULL) { + irq = platform_get_irq(pdev, 0); + if (irq < 0) { dev_err(&pdev->dev, "no IRQ specified?\n"); return -ENXIO; } @@ -433,7 +433,7 @@ static int mcf8390_probe(struct platform_device *pdev) SET_NETDEV_DEV(dev, &pdev->dev); platform_set_drvdata(pdev, dev); - dev->irq = irq->start; + dev->irq = irq; dev->base_addr = mem->start; ret = mcf8390_init(dev); diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index db3ec4768159..bd4cb9d7c35d 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -78,6 +78,7 @@ source "drivers/net/ethernet/ezchip/Kconfig" source "drivers/net/ethernet/faraday/Kconfig" source "drivers/net/ethernet/freescale/Kconfig" source "drivers/net/ethernet/fujitsu/Kconfig" +source "drivers/net/ethernet/fungible/Kconfig" source "drivers/net/ethernet/google/Kconfig" source "drivers/net/ethernet/hisilicon/Kconfig" source "drivers/net/ethernet/huawei/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 8a87c1083d1d..8ef43e0c33c0 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -41,6 +41,7 @@ obj-$(CONFIG_NET_VENDOR_EZCHIP) += ezchip/ obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/ obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/ obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/ +obj-$(CONFIG_NET_VENDOR_FUNGIBLE) += fungible/ obj-$(CONFIG_NET_VENDOR_GOOGLE) += google/ obj-$(CONFIG_NET_VENDOR_HISILICON) += hisilicon/ obj-$(CONFIG_NET_VENDOR_HUAWEI) += huawei/ diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c index db97170da8c7..7f247ccbe6ba 100644 --- a/drivers/net/ethernet/altera/altera_sgdma.c +++ b/drivers/net/ethernet/altera/altera_sgdma.c @@ -513,7 +513,7 @@ static int sgdma_txbusy(struct altera_tse_private *priv) { int delay = 0; - /* if DMA is busy, wait for current transactino to finish */ + /* if DMA is busy, wait for current transaction to finish */ while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status)) & SGDMA_STSREG_BUSY) && (delay++ < 100)) udelay(1); diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 993b2fb42961..a3816264c35c 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -72,7 +72,7 @@ MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list"); */ #define ALTERA_RXDMABUFFER_SIZE 2048 -/* Allow network stack to resume queueing packets after we've +/* Allow network stack to resume queuing packets after we've * finished transmitting at least 1/4 of the packets in the queue. */ #define TSE_TX_THRESH(x) (x->tx_ring_size / 4) @@ -390,7 +390,7 @@ static int tse_rx(struct altera_tse_private *priv, int limit) "RCV pktstatus %08X pktlength %08X\n", pktstatus, pktlength); - /* DMA trasfer from TSE starts with 2 aditional bytes for + /* DMA transfer from TSE starts with 2 additional bytes for * IP payload alignment. Status returned by get_rx_status() * contains DMA transfer length. Packet is 2 bytes shorter. */ @@ -1044,7 +1044,7 @@ static void altera_tse_set_mcfilterall(struct net_device *dev) csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4); } -/* Set or clear the multicast filter for this adaptor +/* Set or clear the multicast filter for this adapter */ static void tse_set_rx_mode_hashfilter(struct net_device *dev) { @@ -1064,7 +1064,7 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev) spin_unlock(&priv->mac_cfg_lock); } -/* Set or clear the multicast filter for this adaptor +/* Set or clear the multicast filter for this adapter */ static void tse_set_rx_mode(struct net_device *dev) { diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c index 9acf589b1178..87f40c2ba904 100644 --- a/drivers/net/ethernet/arc/emac_mdio.c +++ b/drivers/net/ethernet/arc/emac_mdio.c @@ -132,6 +132,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv) { struct arc_emac_mdio_bus_data *data = &priv->bus_data; struct device_node *np = priv->dev->of_node; + const char *name = "Synopsys MII Bus"; struct mii_bus *bus; int error; @@ -142,7 +143,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv) priv->bus = bus; bus->priv = priv; bus->parent = priv->dev; - bus->name = "Synopsys MII Bus"; + bus->name = name; bus->read = &arc_mdio_read; bus->write = &arc_mdio_write; bus->reset = &arc_mdio_reset; @@ -167,7 +168,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv) if (error) { mdiobus_free(bus); return dev_err_probe(priv->dev, error, - "cannot register MDIO bus %s\n", bus->name); + "cannot register MDIO bus %s\n", name); } return 0; diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c index e7a9f9863258..6ba5b024a7be 100644 --- a/drivers/net/ethernet/asix/ax88796c_main.c +++ b/drivers/net/ethernet/asix/ax88796c_main.c @@ -433,7 +433,7 @@ ax88796c_skb_return(struct ax88796c_device *ax_local, netif_info(ax_local, rx_status, ndev, "< rx, len %zu, type 0x%x\n", skb->len + sizeof(struct ethhdr), skb->protocol); - status = netif_rx_ni(skb); + status = netif_rx(skb); if (status != NET_RX_SUCCESS && net_ratelimit()) netif_info(ax_local, rx_err, ndev, "netif_rx status %d\n", status); @@ -1102,7 +1102,7 @@ err: return ret; } -static int ax88796c_remove(struct spi_device *spi) +static void ax88796c_remove(struct spi_device *spi) { struct ax88796c_device *ax_local = dev_get_drvdata(&spi->dev); struct net_device *ndev = ax_local->ndev; @@ -1112,8 +1112,6 @@ static int ax88796c_remove(struct spi_device *spi) netif_info(ax_local, probe, ndev, "removing network device %s %s\n", dev_driver_string(&spi->dev), dev_name(&spi->dev)); - - return 0; } #ifdef CONFIG_OF diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index da595242bc13..f50604f3e541 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -900,7 +900,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter, atl1c_clean_buffer(pdev, buffer_info); } - netdev_reset_queue(adapter->netdev); + netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, queue)); /* Zero out Tx-buffers */ memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) * diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index b04e423c446a..c1b97e8c55ef 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1716,17 +1716,17 @@ static int bcm_enet_probe(struct platform_device *pdev) struct bcm_enet_priv *priv; struct net_device *dev; struct bcm63xx_enet_platform_data *pd; - struct resource *res_irq, *res_irq_rx, *res_irq_tx; + int irq, irq_rx, irq_tx; struct mii_bus *bus; int i, ret; if (!bcm_enet_shared_base[0]) return -EPROBE_DEFER; - res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1); - res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2); - if (!res_irq || !res_irq_rx || !res_irq_tx) + irq = platform_get_irq(pdev, 0); + irq_rx = platform_get_irq(pdev, 1); + irq_tx = platform_get_irq(pdev, 2); + if (irq < 0 || irq_rx < 0 || irq_tx < 0) return -ENODEV; dev = alloc_etherdev(sizeof(*priv)); @@ -1748,9 +1748,9 @@ static int bcm_enet_probe(struct platform_device *pdev) goto out; } - dev->irq = priv->irq = res_irq->start; - priv->irq_rx = res_irq_rx->start; - priv->irq_tx = res_irq_tx->start; + dev->irq = priv->irq = irq; + priv->irq_rx = irq_rx; + priv->irq_tx = irq_tx; priv->mac_clk = devm_clk_get(&pdev->dev, "enet"); if (IS_ERR(priv->mac_clk)) { diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index c6412c523637..b4381cd41979 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -172,6 +172,7 @@ static int bgmac_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct bgmac *bgmac; + struct resource *regs; int ret; bgmac = bgmac_alloc(&pdev->dev); @@ -208,15 +209,23 @@ static int bgmac_probe(struct platform_device *pdev) if (IS_ERR(bgmac->plat.base)) return PTR_ERR(bgmac->plat.base); - bgmac->plat.idm_base = devm_platform_ioremap_resource_byname(pdev, "idm_base"); - if (IS_ERR(bgmac->plat.idm_base)) - return PTR_ERR(bgmac->plat.idm_base); - else + /* The idm_base resource is optional for some platforms */ + regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base"); + if (regs) { + bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(bgmac->plat.idm_base)) + return PTR_ERR(bgmac->plat.idm_base); bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK; + } - bgmac->plat.nicpm_base = devm_platform_ioremap_resource_byname(pdev, "nicpm_base"); - if (IS_ERR(bgmac->plat.nicpm_base)) - return PTR_ERR(bgmac->plat.nicpm_base); + /* The nicpm_base resource is optional for some platforms */ + regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base"); + if (regs) { + bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev, + regs); + if (IS_ERR(bgmac->plat.nicpm_base)) + return PTR_ERR(bgmac->plat.nicpm_base); + } bgmac->read = platform_bgmac_read; bgmac->write = platform_bgmac_write; diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index e20aafeb4ca9..b97ed9b5f685 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -8216,7 +8216,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) rc = dma_set_coherent_mask(&pdev->dev, persist_dma_mask); if (rc) { dev_err(&pdev->dev, - "pci_set_consistent_dma_mask failed, aborting\n"); + "dma_set_coherent_mask failed, aborting\n"); goto err_out_unmap; } } else if ((rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 774c1f1a57c3..eedb48d945ed 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -100,6 +100,9 @@ MODULE_LICENSE("GPL"); MODULE_FIRMWARE(FW_FILE_NAME_E1); MODULE_FIRMWARE(FW_FILE_NAME_E1H); MODULE_FIRMWARE(FW_FILE_NAME_E2); +MODULE_FIRMWARE(FW_FILE_NAME_E1_V15); +MODULE_FIRMWARE(FW_FILE_NAME_E1H_V15); +MODULE_FIRMWARE(FW_FILE_NAME_E2_V15); int bnx2x_num_queues; module_param_named(num_queues, bnx2x_num_queues, int, 0444); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index c313221348c5..2de02950086f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4776,8 +4776,10 @@ static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) return rc; req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); - req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); - req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); + if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) { + req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); + req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); + } req->mask = cpu_to_le32(vnic->rx_mask); return hwrm_req_send_silent(bp, req); } @@ -7820,6 +7822,19 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp) return 0; } +static void bnxt_remap_fw_health_regs(struct bnxt *bp) +{ + if (!bp->fw_health) + return; + + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) { + bp->fw_health->status_reliable = true; + bp->fw_health->resets_reliable = true; + } else { + bnxt_try_map_fw_health_reg(bp); + } +} + static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) { struct bnxt_fw_health *fw_health = bp->fw_health; @@ -8672,6 +8687,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) vnic->uc_filter_count = 1; vnic->rx_mask = 0; + if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state)) + goto skip_rx_mask; + if (bp->dev->flags & IFF_BROADCAST) vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; @@ -8681,7 +8699,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) if (bp->dev->flags & IFF_ALLMULTI) { vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; vnic->mc_list_count = 0; - } else { + } else if (bp->dev->flags & IFF_MULTICAST) { u32 mask = 0; bnxt_mc_list_updated(bp, &mask); @@ -8692,6 +8710,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) if (rc) goto err_out; +skip_rx_mask: rc = bnxt_hwrm_set_coal(bp); if (rc) netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", @@ -9281,7 +9300,7 @@ void bnxt_tx_enable(struct bnxt *bp) /* Make sure napi polls see @dev_state change */ synchronize_net(); netif_tx_wake_all_queues(bp->dev); - if (bp->link_info.link_up) + if (BNXT_LINK_IS_UP(bp)) netif_carrier_on(bp->dev); } @@ -9311,7 +9330,7 @@ static char *bnxt_report_fec(struct bnxt_link_info *link_info) void bnxt_report_link(struct bnxt *bp) { - if (bp->link_info.link_up) { + if (BNXT_LINK_IS_UP(bp)) { const char *signal = ""; const char *flow_ctrl; const char *duplex; @@ -9397,7 +9416,7 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) if (rc) goto hwrm_phy_qcaps_exit; - bp->phy_flags = resp->flags; + bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8); if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { struct ethtool_eee *eee = &bp->eee; u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); @@ -9447,7 +9466,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state) struct bnxt_link_info *link_info = &bp->link_info; struct hwrm_port_phy_qcfg_output *resp; struct hwrm_port_phy_qcfg_input *req; - u8 link_up = link_info->link_up; + u8 link_state = link_info->link_state; bool support_changed = false; int rc; @@ -9548,14 +9567,14 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state) /* TODO: need to add more logic to report VF link */ if (chng_link_state) { if (link_info->phy_link_status == BNXT_LINK_LINK) - link_info->link_up = 1; + link_info->link_state = BNXT_LINK_STATE_UP; else - link_info->link_up = 0; - if (link_up != link_info->link_up) + link_info->link_state = BNXT_LINK_STATE_DOWN; + if (link_state != link_info->link_state) bnxt_report_link(bp); } else { - /* alwasy link down if not require to update link state */ - link_info->link_up = 0; + /* always link down if not require to update link state */ + link_info->link_state = BNXT_LINK_STATE_DOWN; } hwrm_req_drop(bp, req); @@ -9755,7 +9774,18 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp) return rc; req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); - return hwrm_req_send(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + mutex_lock(&bp->link_lock); + /* Device is not obliged link down in certain scenarios, even + * when forced. Setting the state unknown is consistent with + * driver startup and will force link state to be reported + * during subsequent open based on PORT_PHY_QCFG. + */ + bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN; + mutex_unlock(&bp->link_lock); + } + return rc; } static int bnxt_fw_reset_via_optee(struct bnxt *bp) @@ -9883,8 +9913,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) resc_reinit = true; if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE) fw_reset = true; - else if (bp->fw_health && !bp->fw_health->status_reliable) - bnxt_try_map_fw_health_reg(bp); + else + bnxt_remap_fw_health_regs(bp); if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); @@ -10186,7 +10216,7 @@ static int bnxt_update_phy_setting(struct bnxt *bp) /* The last close may have shutdown the link, so need to call * PHY_CFG to bring it back up. */ - if (!bp->link_info.link_up) + if (!BNXT_LINK_IS_UP(bp)) update_link = true; if (!bnxt_eee_config_ok(bp)) @@ -10364,13 +10394,15 @@ int bnxt_half_open_nic(struct bnxt *bp) goto half_open_err; } - rc = bnxt_alloc_mem(bp, false); + rc = bnxt_alloc_mem(bp, true); if (rc) { netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); goto half_open_err; } - rc = bnxt_init_nic(bp, false); + set_bit(BNXT_STATE_HALF_OPEN, &bp->state); + rc = bnxt_init_nic(bp, true); if (rc) { + clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); goto half_open_err; } @@ -10378,7 +10410,7 @@ int bnxt_half_open_nic(struct bnxt *bp) half_open_err: bnxt_free_skbs(bp); - bnxt_free_mem(bp, false); + bnxt_free_mem(bp, true); dev_close(bp->dev); return rc; } @@ -10388,9 +10420,10 @@ half_open_err: */ void bnxt_half_close_nic(struct bnxt *bp) { - bnxt_hwrm_resource_free(bp, false, false); + bnxt_hwrm_resource_free(bp, false, true); bnxt_free_skbs(bp); - bnxt_free_mem(bp, false); + bnxt_free_mem(bp, true); + clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); } void bnxt_reenable_sriov(struct bnxt *bp) @@ -10806,7 +10839,7 @@ static void bnxt_set_rx_mode(struct net_device *dev) if (dev->flags & IFF_ALLMULTI) { mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; vnic->mc_list_count = 0; - } else { + } else if (dev->flags & IFF_MULTICAST) { mc_update = bnxt_mc_list_updated(bp, &mask); } @@ -10883,9 +10916,10 @@ skip_uc: !bnxt_promisc_ok(bp)) vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); - if (rc && vnic->mc_list_count) { + if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) { netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", rc); + vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; vnic->mc_list_count = 0; rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); @@ -11414,7 +11448,7 @@ static void bnxt_timer(struct timer_list *t) if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) bnxt_fw_health_check(bp); - if (bp->link_info.link_up && bp->stats_coal_ticks) { + if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) { set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); bnxt_queue_sp_work(bp); } @@ -12115,11 +12149,6 @@ int bnxt_fw_init_one(struct bnxt *bp) if (rc) return rc; - /* In case fw capabilities have changed, destroy the unneeded - * reporters and create newly capable ones. - */ - bnxt_dl_fw_reporters_destroy(bp, false); - bnxt_dl_fw_reporters_create(bp); bnxt_fw_init_one_p3(bp); return 0; } @@ -12948,7 +12977,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) cancel_delayed_work_sync(&bp->fw_reset_task); bp->sp_event = 0; - bnxt_dl_fw_reporters_destroy(bp, true); + bnxt_dl_fw_reporters_destroy(bp); bnxt_dl_unregister(bp); bnxt_shutdown_tc(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 4b023e35c765..447a9406b8a2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1175,7 +1175,11 @@ struct bnxt_link_info { #define BNXT_PHY_STATE_ENABLED 0 #define BNXT_PHY_STATE_DISABLED 1 - u8 link_up; + u8 link_state; +#define BNXT_LINK_STATE_UNKNOWN 0 +#define BNXT_LINK_STATE_DOWN 1 +#define BNXT_LINK_STATE_UP 2 +#define BNXT_LINK_IS_UP(bp) ((bp)->link_info.link_state == BNXT_LINK_STATE_UP) u8 duplex; #define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF #define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL @@ -1921,6 +1925,7 @@ struct bnxt { #define BNXT_STATE_RECOVER 12 #define BNXT_STATE_FW_NON_FATAL_COND 13 #define BNXT_STATE_FW_ACTIVATE_RESET 14 +#define BNXT_STATE_HALF_OPEN 15 /* For offline ethtool tests */ #define BNXT_NO_FW_ACCESS(bp) \ (test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \ @@ -2099,8 +2104,8 @@ struct bnxt { u32 lpi_tmr_lo; u32 lpi_tmr_hi; - /* copied from flags in hwrm_port_phy_qcaps_output */ - u8 phy_flags; + /* copied from flags and flags2 in hwrm_port_phy_qcaps_output */ + u32 phy_flags; #define BNXT_PHY_FL_EEE_CAP PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED #define BNXT_PHY_FL_EXT_LPBK PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED #define BNXT_PHY_FL_AN_PHY_LPBK PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED @@ -2109,6 +2114,8 @@ struct bnxt { #define BNXT_PHY_FL_NO_PHY_LPBK PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED #define BNXT_PHY_FL_FW_MANAGED_LKDN PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN #define BNXT_PHY_FL_NO_FCS PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS +#define BNXT_PHY_FL_NO_PAUSE (PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED << 8) +#define BNXT_PHY_FL_NO_PFC (PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED << 8) u8 num_tests; struct bnxt_test_info *test_info; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index 217ff597cdf2..caab3d626a2a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -627,7 +627,8 @@ static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) int rc; if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || - !(bp->dcbx_cap & DCB_CAP_DCBX_HOST)) + !(bp->dcbx_cap & DCB_CAP_DCBX_HOST) || + (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) return -EINVAL; if (!my_pfc) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 4da31b1b84f9..0c17f90d44a2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -241,37 +241,37 @@ static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = { .recover = bnxt_fw_recover, }; -void bnxt_dl_fw_reporters_create(struct bnxt *bp) +static struct devlink_health_reporter * +__bnxt_dl_reporter_create(struct bnxt *bp, + const struct devlink_health_reporter_ops *ops) { - struct bnxt_fw_health *health = bp->fw_health; - - if (!health || health->fw_reporter) - return; + struct devlink_health_reporter *reporter; - health->fw_reporter = - devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_reporter_ops, - 0, bp); - if (IS_ERR(health->fw_reporter)) { - netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n", - PTR_ERR(health->fw_reporter)); - health->fw_reporter = NULL; - bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; + reporter = devlink_health_reporter_create(bp->dl, ops, 0, bp); + if (IS_ERR(reporter)) { + netdev_warn(bp->dev, "Failed to create %s health reporter, rc = %ld\n", + ops->name, PTR_ERR(reporter)); + return NULL; } + + return reporter; } -void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all) +void bnxt_dl_fw_reporters_create(struct bnxt *bp) { - struct bnxt_fw_health *health = bp->fw_health; + struct bnxt_fw_health *fw_health = bp->fw_health; - if (!health) - return; + if (fw_health && !fw_health->fw_reporter) + fw_health->fw_reporter = __bnxt_dl_reporter_create(bp, &bnxt_dl_fw_reporter_ops); +} - if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && !all) - return; +void bnxt_dl_fw_reporters_destroy(struct bnxt *bp) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; - if (health->fw_reporter) { - devlink_health_reporter_destroy(health->fw_reporter); - health->fw_reporter = NULL; + if (fw_health && fw_health->fw_reporter) { + devlink_health_reporter_destroy(fw_health->fw_reporter); + fw_health->fw_reporter = NULL; } } @@ -367,6 +367,16 @@ bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack, } } +/* Live patch status in NVM */ +#define BNXT_LIVEPATCH_NOT_INSTALLED 0 +#define BNXT_LIVEPATCH_INSTALLED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL +#define BNXT_LIVEPATCH_REMOVED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE +#define BNXT_LIVEPATCH_MASK (FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL | \ + FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) +#define BNXT_LIVEPATCH_ACTIVATED BNXT_LIVEPATCH_MASK + +#define BNXT_LIVEPATCH_STATE(flags) ((flags) & BNXT_LIVEPATCH_MASK) + static int bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack) { @@ -374,8 +384,9 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack) struct hwrm_fw_livepatch_query_input *query_req; struct hwrm_fw_livepatch_output *patch_resp; struct hwrm_fw_livepatch_input *patch_req; + u16 flags, live_patch_state; + bool activated = false; u32 installed = 0; - u16 flags; u8 target; int rc; @@ -394,7 +405,6 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack) hwrm_req_drop(bp, query_req); return rc; } - patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE; patch_req->loadtype = FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL; patch_resp = hwrm_req_hold(bp, patch_req); @@ -407,12 +417,20 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack) } flags = le16_to_cpu(query_resp->status_flags); - if (~flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL) + live_patch_state = BNXT_LIVEPATCH_STATE(flags); + + if (live_patch_state == BNXT_LIVEPATCH_NOT_INSTALLED) continue; - if ((flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) && - !strncmp(query_resp->active_ver, query_resp->install_ver, - sizeof(query_resp->active_ver))) + + if (live_patch_state == BNXT_LIVEPATCH_ACTIVATED) { + activated = true; continue; + } + + if (live_patch_state == BNXT_LIVEPATCH_INSTALLED) + patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE; + else if (live_patch_state == BNXT_LIVEPATCH_REMOVED) + patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE; patch_req->fw_target = target; rc = hwrm_req_send(bp, patch_req); @@ -424,8 +442,13 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack) } if (!rc && !installed) { - NL_SET_ERR_MSG_MOD(extack, "No live patches found"); - rc = -ENOENT; + if (activated) { + NL_SET_ERR_MSG_MOD(extack, "Live patch already activated"); + rc = -EEXIST; + } else { + NL_SET_ERR_MSG_MOD(extack, "No live patches found"); + rc = -ENOENT; + } } hwrm_req_drop(bp, query_req); hwrm_req_drop(bp, patch_req); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h index a715458abc30..b8105065367b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -75,7 +75,7 @@ void bnxt_devlink_health_fw_report(struct bnxt *bp); void bnxt_dl_health_fw_status_update(struct bnxt *bp, bool healthy); void bnxt_dl_health_fw_recovery_done(struct bnxt *bp); void bnxt_dl_fw_reporters_create(struct bnxt *bp); -void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all); +void bnxt_dl_fw_reporters_destroy(struct bnxt *bp); int bnxt_dl_register(struct bnxt *bp); void bnxt_dl_unregister(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 5edbee92f5c4..22e965e18fbc 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -26,6 +26,7 @@ #include "bnxt_hsi.h" #include "bnxt.h" #include "bnxt_hwrm.h" +#include "bnxt_ulp.h" #include "bnxt_xdp.h" #include "bnxt_ptp.h" #include "bnxt_ethtool.h" @@ -1661,15 +1662,19 @@ static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info, static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info, struct ethtool_link_ksettings *lk_ksettings) { + struct bnxt *bp = container_of(link_info, struct bnxt, link_info); u16 fw_speeds = link_info->support_speeds; BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported); fw_speeds = link_info->support_pam4_speeds; BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, supported); - ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause); - ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, - Asym_Pause); + if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) { + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, + Pause); + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, + Asym_Pause); + } if (link_info->support_auto_speeds || link_info->support_pam4_auto_speeds) @@ -1900,7 +1905,8 @@ static int bnxt_set_link_ksettings(struct net_device *dev, /* any change to autoneg will cause link change, therefore the * driver should put back the original pause setting in autoneg */ - set_pause = true; + if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) + set_pause = true; } else { u8 phy_type = link_info->phy_type; @@ -1972,6 +1978,9 @@ static int bnxt_get_fecparam(struct net_device *dev, case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: fec->active_fec |= ETHTOOL_FEC_LLRS; break; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: + fec->active_fec |= ETHTOOL_FEC_OFF; + break; } return 0; } @@ -2089,7 +2098,7 @@ static int bnxt_set_pauseparam(struct net_device *dev, struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info = &bp->link_info; - if (!BNXT_PHY_CFG_ABLE(bp)) + if (!BNXT_PHY_CFG_ABLE(bp) || (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) return -EOPNOTSUPP; mutex_lock(&bp->link_lock); @@ -2100,9 +2109,7 @@ static int bnxt_set_pauseparam(struct net_device *dev, } link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; - if (bp->hwrm_spec_code >= 0x10201) - link_info->req_flow_ctrl = - PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; + link_info->req_flow_ctrl = 0; } else { /* when transition from auto pause to force pause, * force a link change @@ -2131,7 +2138,7 @@ static u32 bnxt_get_link(struct net_device *dev) struct bnxt *bp = netdev_priv(dev); /* TODO: handle MF, VF, driver close case */ - return bp->link_info.link_up; + return BNXT_LINK_IS_UP(bp); } int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp, @@ -2508,6 +2515,7 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware u8 *kmem = NULL; u32 modify_len; u32 item_len; + u8 cmd_err; u16 index; int rc; @@ -2591,6 +2599,8 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware } rc = hwrm_req_send_silent(bp, install); + if (!rc) + break; if (defrag_attempted) { /* We have tried to defragment already in the previous @@ -2599,15 +2609,24 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware break; } - if (rc && ((struct hwrm_err_output *)resp)->cmd_err == - NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { + cmd_err = ((struct hwrm_err_output *)resp)->cmd_err; + + switch (cmd_err) { + case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK: + netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure Anti-rollback detected\n"); + rc = -EALREADY; + break; + case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR: install->flags = cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); rc = hwrm_req_send_silent(bp, install); + if (!rc) + break; + + cmd_err = ((struct hwrm_err_output *)resp)->cmd_err; - if (rc && ((struct hwrm_err_output *)resp)->cmd_err == - NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) { + if (cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) { /* FW has cleared NVM area, driver will create * UPDATE directory and try the flash again */ @@ -2617,11 +2636,13 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware BNX_DIR_TYPE_UPDATE, BNX_DIR_ORDINAL_FIRST, 0, 0, item_len, NULL, 0); - } else if (rc) { - netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc); + if (!rc) + break; } - } else if (rc) { - netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc); + fallthrough; + default: + netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x cmd_err :%x\n", + rc, cmd_err); } } while (defrag_attempted && !rc); @@ -3323,7 +3344,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp, return rc; fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; - if (bp->link_info.link_up) + if (BNXT_LINK_IS_UP(bp)) fw_speed = bp->link_info.link_speed; else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB) fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; @@ -3457,7 +3478,7 @@ static int bnxt_run_loopback(struct bnxt *bp) if (!skb) return -ENOMEM; data = skb_put(skb, pkt_size); - eth_broadcast_addr(data); + ether_addr_copy(&data[i], bp->dev->dev_addr); i += ETH_ALEN; ether_addr_copy(&data[i], bp->dev->dev_addr); i += ETH_ALEN; @@ -3551,9 +3572,12 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, if (!offline) { bnxt_run_fw_tests(bp, test_mask, &test_results); } else { - rc = bnxt_close_nic(bp, false, false); - if (rc) + bnxt_ulp_stop(bp); + rc = bnxt_close_nic(bp, true, false); + if (rc) { + bnxt_ulp_start(bp, rc); return; + } bnxt_run_fw_tests(bp, test_mask, &test_results); buf[BNXT_MACLPBK_TEST_IDX] = 1; @@ -3563,6 +3587,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, if (rc) { bnxt_hwrm_mac_loopback(bp, false); etest->flags |= ETH_TEST_FL_FAILED; + bnxt_ulp_start(bp, rc); return; } if (bnxt_run_loopback(bp)) @@ -3588,7 +3613,8 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, } bnxt_hwrm_phy_loopback(bp, false, false); bnxt_half_close_nic(bp); - rc = bnxt_open_nic(bp, false, true); + rc = bnxt_open_nic(bp, true, true); + bnxt_ulp_start(bp, rc); } if (rc || bnxt_test_irq(bp)) { buf[BNXT_IRQ_TEST_IDX] = 1; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c index 566c9487ef55..b01d42928a53 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c @@ -644,17 +644,23 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) /* Last byte of resp contains valid bit */ valid = ((u8 *)ctx->resp) + len - 1; - for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { + for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; ) { /* make sure we read from updated DMA memory */ dma_rmb(); if (*valid) break; - usleep_range(1, 5); + if (j < 10) { + udelay(1); + j++; + } else { + usleep_range(20, 30); + j += 20; + } } if (j >= HWRM_VALID_BIT_DELAY_USEC) { hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n", - hwrm_total_timeout(i), req_type, + hwrm_total_timeout(i) + j, req_type, le16_to_cpu(ctx->req->seq_id), len, *valid); goto exit; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h index d52bd2d63aec..c98032e38188 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h @@ -90,7 +90,7 @@ static inline unsigned int hwrm_total_timeout(unsigned int n) } -#define HWRM_VALID_BIT_DELAY_USEC 150 +#define HWRM_VALID_BIT_DELAY_USEC 50000 static inline bool bnxt_cfa_hwrm_message(u16 req_type) { diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index e31a5a397f11..f55d9d9c01a8 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -40,6 +40,13 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + + if (!device_can_wakeup(kdev)) { + wol->supported = 0; + wol->wolopts = 0; + return; + } wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; wol->wolopts = priv->wolopts; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 4c231159b562..800d5ced5800 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1575,7 +1575,14 @@ static int macb_poll(struct napi_struct *napi, int budget) if (work_done < budget) { napi_complete_done(napi, work_done); - /* Packets received while interrupts were disabled */ + /* RSR bits only seem to propagate to raise interrupts when + * interrupts are enabled at the time, so if bits are already + * set due to packets received while interrupts were disabled, + * they will not cause another interrupt to be generated when + * interrupts are re-enabled. + * Check for this case here. This has been seen to happen + * around 30% of the time under heavy network load. + */ status = macb_readl(bp, RSR); if (status) { if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) @@ -1583,6 +1590,22 @@ static int macb_poll(struct napi_struct *napi, int budget) napi_reschedule(napi); } else { queue_writel(queue, IER, bp->rx_intr_mask); + + /* In rare cases, packets could have been received in + * the window between the check above and re-enabling + * interrupts. Therefore, a double-check is required + * to avoid losing a wakeup. This can potentially race + * with the interrupt handler doing the same actions + * if an interrupt is raised just after enabling them, + * but this should be harmless. + */ + status = macb_readl(bp, RSR); + if (unlikely(status)) { + queue_writel(queue, IDR, bp->rx_intr_mask); + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(queue, ISR, MACB_BIT(RCOMP)); + napi_schedule(napi); + } } } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 8e07192e409f..ba28aa444e5a 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1539,7 +1539,7 @@ static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) * compute the delta in terms of coprocessor clocks. */ delta = (u64)ppb << 32; - div64_u64(delta, oct->coproc_clock_rate); + do_div(delta, oct->coproc_clock_rate); spin_lock_irqsave(&lio->ptp_lock, flags); comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP); @@ -1672,7 +1672,7 @@ static void liquidio_ptp_init(struct octeon_device *oct) u64 clock_comp, cfg; clock_comp = (u64)NSEC_PER_SEC << 32; - div64_u64(clock_comp, oct->coproc_clock_rate); + do_div(clock_comp, oct->coproc_clock_rate); lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP); /* Enable */ diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 63521312cb90..174b1e156669 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -3349,6 +3349,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } if (!adapter->registered_device_map) { dev_err(&pdev->dev, "could not register any net devices\n"); + err = -ENODEV; goto out_free_dev; } diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c index da41eee2f25c..a06003bfa04b 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c @@ -3613,6 +3613,8 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10); adapter->params.pci.vpd_cap_addr = pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD); + if (!adapter->params.pci.vpd_cap_addr) + return -ENODEV; ret = get_vpd_params(adapter, &adapter->params.vpd); if (ret < 0) return ret; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c index 28fd2de9e4cf..1672d3afe5be 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c @@ -8,6 +8,46 @@ #include "cxgb4_filter.h" #include "cxgb4_tc_flower.h" +static int cxgb4_policer_validate(const struct flow_action *action, + const struct flow_action_entry *act, + struct netlink_ext_ack *extack) +{ + if (act->police.exceed.act_id != FLOW_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when exceed action is not drop"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && + act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is not pipe or ok"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && + !flow_action_is_last_entry(action, act)) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is ok, but action is not last"); + return -EOPNOTSUPP; + } + + if (act->police.peakrate_bytes_ps || + act->police.avrate || act->police.overhead) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when peakrate/avrate/overhead is configured"); + return -EOPNOTSUPP; + } + + if (act->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, + "QoS offload not support packets per second"); + return -EOPNOTSUPP; + } + + return 0; +} + static int cxgb4_matchall_egress_validate(struct net_device *dev, struct tc_cls_matchall_offload *cls) { @@ -48,11 +88,10 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev, flow_action_for_each(i, entry, actions) { switch (entry->id) { case FLOW_ACTION_POLICE: - if (entry->police.rate_pkt_ps) { - NL_SET_ERR_MSG_MOD(extack, - "QoS offload not support packets per second"); - return -EOPNOTSUPP; - } + ret = cxgb4_policer_validate(actions, entry, extack); + if (ret) + return ret; + /* Convert bytes per second to bits per second */ if (entry->police.rate_bytes_ps * 8 > max_link_rate) { NL_SET_ERR_MSG_MOD(extack, @@ -150,11 +189,11 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev, flow_action_for_each(i, entry, &cls->rule->action) if (entry->id == FLOW_ACTION_POLICE) break; - if (entry->police.rate_pkt_ps) { - NL_SET_ERR_MSG_MOD(extack, - "QoS offload not support packets per second"); - return -EOPNOTSUPP; - } + + ret = cxgb4_policer_validate(&cls->rule->action, entry, extack); + if (ret) + return ret; + /* Convert from bytes per second to Kbps */ p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000); p.u.params.channel = pi->tx_chan; diff --git a/drivers/net/ethernet/davicom/Kconfig b/drivers/net/ethernet/davicom/Kconfig index 7af86b6d4150..02e0caff98e3 100644 --- a/drivers/net/ethernet/davicom/Kconfig +++ b/drivers/net/ethernet/davicom/Kconfig @@ -3,6 +3,19 @@ # Davicom device configuration # +config NET_VENDOR_DAVICOM + bool "Davicom devices" + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Davicom devices. If you say Y, you will be asked + for your specific card in the following selections. + +if NET_VENDOR_DAVICOM + config DM9000 tristate "DM9000 support" depends on ARM || MIPS || COLDFIRE || NIOS2 || COMPILE_TEST @@ -22,3 +35,21 @@ config DM9000_FORCE_SIMPLE_PHY_POLL bit to determine if the link is up or down instead of the more costly MII PHY reads. Note, this will not work if the chip is operating with an external PHY. + +config DM9051 + tristate "DM9051 SPI support" + depends on SPI + select CRC32 + select MDIO + select PHYLIB + select REGMAP_SPI + help + Support for DM9051 SPI chipset. + + To compile this driver as a module, choose M here. The module + will be called dm9051. + + The SPI mode for the host's SPI master to access DM9051 is mode + 0 on the SPI bus. + +endif # NET_VENDOR_DAVICOM diff --git a/drivers/net/ethernet/davicom/Makefile b/drivers/net/ethernet/davicom/Makefile index 173c87d21076..225f85bc1f53 100644 --- a/drivers/net/ethernet/davicom/Makefile +++ b/drivers/net/ethernet/davicom/Makefile @@ -4,3 +4,4 @@ # obj-$(CONFIG_DM9000) += dm9000.o +obj-$(CONFIG_DM9051) += dm9051.o diff --git a/drivers/net/ethernet/davicom/dm9051.c b/drivers/net/ethernet/davicom/dm9051.c new file mode 100644 index 000000000000..a523ddda7609 --- /dev/null +++ b/drivers/net/ethernet/davicom/dm9051.c @@ -0,0 +1,1260 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Davicom Semiconductor,Inc. + * Davicom DM9051 SPI Fast Ethernet Linux driver + */ + +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/interrupt.h> +#include <linux/iopoll.h> +#include <linux/irq.h> +#include <linux/mii.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/phy.h> +#include <linux/regmap.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/spi/spi.h> +#include <linux/types.h> + +#include "dm9051.h" + +#define DRVNAME_9051 "dm9051" + +/** + * struct rx_ctl_mach - rx activities record + * @status_err_counter: rx status error counter + * @large_err_counter: rx get large packet length error counter + * @rx_err_counter: receive packet error counter + * @tx_err_counter: transmit packet error counter + * @fifo_rst_counter: reset operation counter + * + * To keep track for the driver operation statistics + */ +struct rx_ctl_mach { + u16 status_err_counter; + u16 large_err_counter; + u16 rx_err_counter; + u16 tx_err_counter; + u16 fifo_rst_counter; +}; + +/** + * struct dm9051_rxctrl - dm9051 driver rx control + * @hash_table: Multicast hash-table data + * @rcr_all: KS_RXCR1 register setting + * + * The settings needs to control the receive filtering + * such as the multicast hash-filter and the receive register settings + */ +struct dm9051_rxctrl { + u16 hash_table[4]; + u8 rcr_all; +}; + +/** + * struct dm9051_rxhdr - rx packet data header + * @headbyte: lead byte equal to 0x01 notifies a valid packet + * @status: status bits for the received packet + * @rxlen: packet length + * + * The Rx packed, entered into the FIFO memory, start with these + * four bytes which is the Rx header, followed by the ethernet + * packet data and ends with an appended 4-byte CRC data. + * Both Rx packet and CRC data are for check purpose and finally + * are dropped by this driver + */ +struct dm9051_rxhdr { + u8 headbyte; + u8 status; + __le16 rxlen; +}; + +/** + * struct board_info - maintain the saved data + * @spidev: spi device structure + * @ndev: net device structure + * @mdiobus: mii bus structure + * @phydev: phy device structure + * @txq: tx queue structure + * @regmap_dm: regmap for register read/write + * @regmap_dmbulk: extra regmap for bulk read/write + * @rxctrl_work: Work queue for updating RX mode and multicast lists + * @tx_work: Work queue for tx packets + * @pause: ethtool pause parameter structure + * @spi_lockm: between threads lock structure + * @reg_mutex: regmap access lock structure + * @bc: rx control statistics structure + * @rxhdr: rx header structure + * @rctl: rx control setting structure + * @msg_enable: message level value + * @imr_all: to store operating imr value for register DM9051_IMR + * @lcr_all: to store operating rcr value for register DM9051_LMCR + * + * The saved data variables, keep up to date for retrieval back to use + */ +struct board_info { + u32 msg_enable; + struct spi_device *spidev; + struct net_device *ndev; + struct mii_bus *mdiobus; + struct phy_device *phydev; + struct sk_buff_head txq; + struct regmap *regmap_dm; + struct regmap *regmap_dmbulk; + struct work_struct rxctrl_work; + struct work_struct tx_work; + struct ethtool_pauseparam pause; + struct mutex spi_lockm; + struct mutex reg_mutex; + struct rx_ctl_mach bc; + struct dm9051_rxhdr rxhdr; + struct dm9051_rxctrl rctl; + u8 imr_all; + u8 lcr_all; +}; + +static int dm9051_set_reg(struct board_info *db, unsigned int reg, unsigned int val) +{ + int ret; + + ret = regmap_write(db->regmap_dm, reg, val); + if (ret < 0) + netif_err(db, drv, db->ndev, "%s: error %d set reg %02x\n", + __func__, ret, reg); + return ret; +} + +static int dm9051_update_bits(struct board_info *db, unsigned int reg, unsigned int mask, + unsigned int val) +{ + int ret; + + ret = regmap_update_bits(db->regmap_dm, reg, mask, val); + if (ret < 0) + netif_err(db, drv, db->ndev, "%s: error %d update bits reg %02x\n", + __func__, ret, reg); + return ret; +} + +/* skb buffer exhausted, just discard the received data + */ +static int dm9051_dumpblk(struct board_info *db, u8 reg, size_t count) +{ + struct net_device *ndev = db->ndev; + unsigned int rb; + int ret; + + /* no skb buffer, + * both reg and &rb must be noinc, + * read once one byte via regmap_read + */ + do { + ret = regmap_read(db->regmap_dm, reg, &rb); + if (ret < 0) { + netif_err(db, drv, ndev, "%s: error %d dumping read reg %02x\n", + __func__, ret, reg); + break; + } + } while (--count); + + return ret; +} + +static int dm9051_set_regs(struct board_info *db, unsigned int reg, const void *val, + size_t val_count) +{ + int ret; + + ret = regmap_bulk_write(db->regmap_dmbulk, reg, val, val_count); + if (ret < 0) + netif_err(db, drv, db->ndev, "%s: error %d bulk writing regs %02x\n", + __func__, ret, reg); + return ret; +} + +static int dm9051_get_regs(struct board_info *db, unsigned int reg, void *val, + size_t val_count) +{ + int ret; + + ret = regmap_bulk_read(db->regmap_dmbulk, reg, val, val_count); + if (ret < 0) + netif_err(db, drv, db->ndev, "%s: error %d bulk reading regs %02x\n", + __func__, ret, reg); + return ret; +} + +static int dm9051_write_mem(struct board_info *db, unsigned int reg, const void *buff, + size_t len) +{ + int ret; + + ret = regmap_noinc_write(db->regmap_dm, reg, buff, len); + if (ret < 0) + netif_err(db, drv, db->ndev, "%s: error %d noinc writing regs %02x\n", + __func__, ret, reg); + return ret; +} + +static int dm9051_read_mem(struct board_info *db, unsigned int reg, void *buff, + size_t len) +{ + int ret; + + ret = regmap_noinc_read(db->regmap_dm, reg, buff, len); + if (ret < 0) + netif_err(db, drv, db->ndev, "%s: error %d noinc reading regs %02x\n", + __func__, ret, reg); + return ret; +} + +/* waiting tx-end rather than tx-req + * got faster + */ +static int dm9051_nsr_poll(struct board_info *db) +{ + unsigned int mval; + int ret; + + ret = regmap_read_poll_timeout(db->regmap_dm, DM9051_NSR, mval, + mval & (NSR_TX2END | NSR_TX1END), 1, 20); + if (ret == -ETIMEDOUT) + netdev_err(db->ndev, "timeout in checking for tx end\n"); + return ret; +} + +static int dm9051_epcr_poll(struct board_info *db) +{ + unsigned int mval; + int ret; + + ret = regmap_read_poll_timeout(db->regmap_dm, DM9051_EPCR, mval, + !(mval & EPCR_ERRE), 100, 10000); + if (ret == -ETIMEDOUT) + netdev_err(db->ndev, "eeprom/phy in processing get timeout\n"); + return ret; +} + +static int dm9051_irq_flag(struct board_info *db) +{ + struct spi_device *spi = db->spidev; + int irq_type = irq_get_trigger_type(spi->irq); + + if (irq_type) + return irq_type; + + return IRQF_TRIGGER_LOW; +} + +static unsigned int dm9051_intcr_value(struct board_info *db) +{ + return (dm9051_irq_flag(db) == IRQF_TRIGGER_LOW) ? + INTCR_POL_LOW : INTCR_POL_HIGH; +} + +static int dm9051_set_fcr(struct board_info *db) +{ + u8 fcr = 0; + + if (db->pause.rx_pause) + fcr |= FCR_BKPM | FCR_FLCE; + if (db->pause.tx_pause) + fcr |= FCR_TXPEN; + + return dm9051_set_reg(db, DM9051_FCR, fcr); +} + +static int dm9051_set_recv(struct board_info *db) +{ + int ret; + + ret = dm9051_set_regs(db, DM9051_MAR, db->rctl.hash_table, sizeof(db->rctl.hash_table)); + if (ret) + return ret; + + return dm9051_set_reg(db, DM9051_RCR, db->rctl.rcr_all); /* enable rx */ +} + +static int dm9051_core_reset(struct board_info *db) +{ + int ret; + + db->bc.fifo_rst_counter++; + + ret = regmap_write(db->regmap_dm, DM9051_NCR, NCR_RST); /* NCR reset */ + if (ret) + return ret; + ret = regmap_write(db->regmap_dm, DM9051_MBNDRY, MBNDRY_BYTE); /* MemBound */ + if (ret) + return ret; + ret = regmap_write(db->regmap_dm, DM9051_PPCR, PPCR_PAUSE_COUNT); /* Pause Count */ + if (ret) + return ret; + ret = regmap_write(db->regmap_dm, DM9051_LMCR, db->lcr_all); /* LEDMode1 */ + if (ret) + return ret; + + return dm9051_set_reg(db, DM9051_INTCR, dm9051_intcr_value(db)); +} + +static int dm9051_update_fcr(struct board_info *db) +{ + u8 fcr = 0; + + if (db->pause.rx_pause) + fcr |= FCR_BKPM | FCR_FLCE; + if (db->pause.tx_pause) + fcr |= FCR_TXPEN; + + return dm9051_update_bits(db, DM9051_FCR, FCR_RXTX_BITS, fcr); +} + +static int dm9051_disable_interrupt(struct board_info *db) +{ + return dm9051_set_reg(db, DM9051_IMR, IMR_PAR); /* disable int */ +} + +static int dm9051_enable_interrupt(struct board_info *db) +{ + return dm9051_set_reg(db, DM9051_IMR, db->imr_all); /* enable int */ +} + +static int dm9051_stop_mrcmd(struct board_info *db) +{ + return dm9051_set_reg(db, DM9051_ISR, ISR_STOP_MRCMD); /* to stop mrcmd */ +} + +static int dm9051_clear_interrupt(struct board_info *db) +{ + return dm9051_update_bits(db, DM9051_ISR, ISR_CLR_INT, ISR_CLR_INT); +} + +static int dm9051_eeprom_read(struct board_info *db, int offset, u8 *to) +{ + int ret; + + ret = regmap_write(db->regmap_dm, DM9051_EPAR, offset); + if (ret) + return ret; + + ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_ERPRR); + if (ret) + return ret; + + ret = dm9051_epcr_poll(db); + if (ret) + return ret; + + ret = regmap_write(db->regmap_dm, DM9051_EPCR, 0); + if (ret) + return ret; + + return regmap_bulk_read(db->regmap_dmbulk, DM9051_EPDRL, to, 2); +} + +static int dm9051_eeprom_write(struct board_info *db, int offset, u8 *data) +{ + int ret; + + ret = regmap_write(db->regmap_dm, DM9051_EPAR, offset); + if (ret) + return ret; + + ret = regmap_bulk_write(db->regmap_dmbulk, DM9051_EPDRL, data, 2); + if (ret < 0) + return ret; + + ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_WEP | EPCR_ERPRW); + if (ret) + return ret; + + ret = dm9051_epcr_poll(db); + if (ret) + return ret; + + return regmap_write(db->regmap_dm, DM9051_EPCR, 0); +} + +static int dm9051_phyread(void *context, unsigned int reg, unsigned int *val) +{ + struct board_info *db = context; + int ret; + + ret = regmap_write(db->regmap_dm, DM9051_EPAR, DM9051_PHY | reg); + if (ret) + return ret; + + ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_ERPRR | EPCR_EPOS); + if (ret) + return ret; + + ret = dm9051_epcr_poll(db); + if (ret) + return ret; + + ret = regmap_write(db->regmap_dm, DM9051_EPCR, 0); + if (ret) + return ret; + + /* this is a 4 bytes data, clear to zero since following regmap_bulk_read + * only fill lower 2 bytes + */ + *val = 0; + return regmap_bulk_read(db->regmap_dmbulk, DM9051_EPDRL, val, 2); +} + +static int dm9051_phywrite(void *context, unsigned int reg, unsigned int val) +{ + struct board_info *db = context; + int ret; + + ret = regmap_write(db->regmap_dm, DM9051_EPAR, DM9051_PHY | reg); + if (ret) + return ret; + + ret = regmap_bulk_write(db->regmap_dmbulk, DM9051_EPDRL, &val, 2); + if (ret < 0) + return ret; + + ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_EPOS | EPCR_ERPRW); + if (ret) + return ret; + + ret = dm9051_epcr_poll(db); + if (ret) + return ret; + + return regmap_write(db->regmap_dm, DM9051_EPCR, 0); +} + +static int dm9051_mdio_read(struct mii_bus *bus, int addr, int regnum) +{ + struct board_info *db = bus->priv; + unsigned int val = 0xffff; + int ret; + + if (addr == DM9051_PHY_ADDR) { + ret = dm9051_phyread(db, regnum, &val); + if (ret) + return ret; + } + + return val; +} + +static int dm9051_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) +{ + struct board_info *db = bus->priv; + + if (addr == DM9051_PHY_ADDR) + return dm9051_phywrite(db, regnum, val); + + return -ENODEV; +} + +static void dm9051_reg_lock_mutex(void *dbcontext) +{ + struct board_info *db = dbcontext; + + mutex_lock(&db->reg_mutex); +} + +static void dm9051_reg_unlock_mutex(void *dbcontext) +{ + struct board_info *db = dbcontext; + + mutex_unlock(&db->reg_mutex); +} + +static struct regmap_config regconfigdm = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0xff, + .reg_stride = 1, + .cache_type = REGCACHE_NONE, + .read_flag_mask = 0, + .write_flag_mask = DM_SPI_WR, + .val_format_endian = REGMAP_ENDIAN_LITTLE, + .lock = dm9051_reg_lock_mutex, + .unlock = dm9051_reg_unlock_mutex, +}; + +static struct regmap_config regconfigdmbulk = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0xff, + .reg_stride = 1, + .cache_type = REGCACHE_NONE, + .read_flag_mask = 0, + .write_flag_mask = DM_SPI_WR, + .val_format_endian = REGMAP_ENDIAN_LITTLE, + .lock = dm9051_reg_lock_mutex, + .unlock = dm9051_reg_unlock_mutex, + .use_single_read = true, + .use_single_write = true, +}; + +static int dm9051_map_init(struct spi_device *spi, struct board_info *db) +{ + /* create two regmap instances, + * split read/write and bulk_read/bulk_write to individual regmap + * to resolve regmap execution confliction problem + */ + regconfigdm.lock_arg = db; + db->regmap_dm = devm_regmap_init_spi(db->spidev, ®configdm); + if (IS_ERR(db->regmap_dm)) + return PTR_ERR(db->regmap_dm); + + regconfigdmbulk.lock_arg = db; + db->regmap_dmbulk = devm_regmap_init_spi(db->spidev, ®configdmbulk); + if (IS_ERR(db->regmap_dmbulk)) + return PTR_ERR(db->regmap_dmbulk); + + return 0; +} + +static int dm9051_map_chipid(struct board_info *db) +{ + struct device *dev = &db->spidev->dev; + unsigned short wid; + u8 buff[6]; + int ret; + + ret = dm9051_get_regs(db, DM9051_VIDL, buff, sizeof(buff)); + if (ret < 0) + return ret; + + wid = get_unaligned_le16(buff + 2); + if (wid != DM9051_ID) { + dev_err(dev, "chipid error as %04x !\n", wid); + return -ENODEV; + } + + dev_info(dev, "chip %04x found\n", wid); + return 0; +} + +/* Read DM9051_PAR registers which is the mac address loaded from EEPROM while power-on + */ +static int dm9051_map_etherdev_par(struct net_device *ndev, struct board_info *db) +{ + u8 addr[ETH_ALEN]; + int ret; + + ret = dm9051_get_regs(db, DM9051_PAR, addr, sizeof(addr)); + if (ret < 0) + return ret; + + if (!is_valid_ether_addr(addr)) { + eth_hw_addr_random(ndev); + + ret = dm9051_set_regs(db, DM9051_PAR, ndev->dev_addr, sizeof(ndev->dev_addr)); + if (ret < 0) + return ret; + + dev_dbg(&db->spidev->dev, "Use random MAC address\n"); + return 0; + } + + eth_hw_addr_set(ndev, addr); + return 0; +} + +/* ethtool-ops + */ +static void dm9051_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + strscpy(info->driver, DRVNAME_9051, sizeof(info->driver)); +} + +static void dm9051_set_msglevel(struct net_device *ndev, u32 value) +{ + struct board_info *db = to_dm9051_board(ndev); + + db->msg_enable = value; +} + +static u32 dm9051_get_msglevel(struct net_device *ndev) +{ + struct board_info *db = to_dm9051_board(ndev); + + return db->msg_enable; +} + +static int dm9051_get_eeprom_len(struct net_device *dev) +{ + return 128; +} + +static int dm9051_get_eeprom(struct net_device *ndev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct board_info *db = to_dm9051_board(ndev); + int offset = ee->offset; + int len = ee->len; + int i, ret; + + if ((len | offset) & 1) + return -EINVAL; + + ee->magic = DM_EEPROM_MAGIC; + + for (i = 0; i < len; i += 2) { + ret = dm9051_eeprom_read(db, (offset + i) / 2, data + i); + if (ret) + break; + } + return ret; +} + +static int dm9051_set_eeprom(struct net_device *ndev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct board_info *db = to_dm9051_board(ndev); + int offset = ee->offset; + int len = ee->len; + int i, ret; + + if ((len | offset) & 1) + return -EINVAL; + + if (ee->magic != DM_EEPROM_MAGIC) + return -EINVAL; + + for (i = 0; i < len; i += 2) { + ret = dm9051_eeprom_write(db, (offset + i) / 2, data + i); + if (ret) + break; + } + return ret; +} + +static void dm9051_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct board_info *db = to_dm9051_board(ndev); + + *pause = db->pause; +} + +static int dm9051_set_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct board_info *db = to_dm9051_board(ndev); + + db->pause = *pause; + + if (pause->autoneg == AUTONEG_DISABLE) + return dm9051_update_fcr(db); + + phy_set_sym_pause(db->phydev, pause->rx_pause, pause->tx_pause, + pause->autoneg); + phy_start_aneg(db->phydev); + return 0; +} + +static const struct ethtool_ops dm9051_ethtool_ops = { + .get_drvinfo = dm9051_get_drvinfo, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_msglevel = dm9051_get_msglevel, + .set_msglevel = dm9051_set_msglevel, + .nway_reset = phy_ethtool_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = dm9051_get_eeprom_len, + .get_eeprom = dm9051_get_eeprom, + .set_eeprom = dm9051_set_eeprom, + .get_pauseparam = dm9051_get_pauseparam, + .set_pauseparam = dm9051_set_pauseparam, +}; + +static int dm9051_all_start(struct board_info *db) +{ + int ret; + + /* GPR power on of the internal phy + */ + ret = dm9051_set_reg(db, DM9051_GPR, 0); + if (ret) + return ret; + + /* dm9051 chip registers could not be accessed within 1 ms + * after GPR power on, delay 1 ms is essential + */ + msleep(1); + + ret = dm9051_core_reset(db); + if (ret) + return ret; + + return dm9051_enable_interrupt(db); +} + +static int dm9051_all_stop(struct board_info *db) +{ + int ret; + + /* GPR power off of the internal phy, + * The internal phy still could be accessed after this GPR power off control + */ + ret = dm9051_set_reg(db, DM9051_GPR, GPR_PHY_OFF); + if (ret) + return ret; + + return dm9051_set_reg(db, DM9051_RCR, RCR_RX_DISABLE); +} + +/* fifo reset while rx error found + */ +static int dm9051_all_restart(struct board_info *db) +{ + struct net_device *ndev = db->ndev; + int ret; + + ret = dm9051_core_reset(db); + if (ret) + return ret; + + ret = dm9051_enable_interrupt(db); + if (ret) + return ret; + + netdev_dbg(ndev, " rxstatus_Er & rxlen_Er %d, RST_c %d\n", + db->bc.status_err_counter + db->bc.large_err_counter, + db->bc.fifo_rst_counter); + + ret = dm9051_set_recv(db); + if (ret) + return ret; + + return dm9051_set_fcr(db); +} + +/* read packets from the fifo memory + * return value, + * > 0 - read packet number, caller can repeat the rx operation + * 0 - no error, caller need stop further rx operation + * -EBUSY - read data error, caller escape from rx operation + */ +static int dm9051_loop_rx(struct board_info *db) +{ + struct net_device *ndev = db->ndev; + unsigned int rxbyte; + int ret, rxlen; + struct sk_buff *skb; + u8 *rdptr; + int scanrr = 0; + + do { + ret = dm9051_read_mem(db, DM_SPI_MRCMDX, &rxbyte, 2); + if (ret) + return ret; + + if ((rxbyte & GENMASK(7, 0)) != DM9051_PKT_RDY) + break; /* exhaust-empty */ + + ret = dm9051_read_mem(db, DM_SPI_MRCMD, &db->rxhdr, DM_RXHDR_SIZE); + if (ret) + return ret; + + ret = dm9051_stop_mrcmd(db); + if (ret) + return ret; + + rxlen = le16_to_cpu(db->rxhdr.rxlen); + if (db->rxhdr.status & RSR_ERR_BITS || rxlen > DM9051_PKT_MAX) { + netdev_dbg(ndev, "rxhdr-byte (%02x)\n", + db->rxhdr.headbyte); + + if (db->rxhdr.status & RSR_ERR_BITS) { + db->bc.status_err_counter++; + netdev_dbg(ndev, "check rxstatus-error (%02x)\n", + db->rxhdr.status); + } else { + db->bc.large_err_counter++; + netdev_dbg(ndev, "check rxlen large-error (%d > %d)\n", + rxlen, DM9051_PKT_MAX); + } + return dm9051_all_restart(db); + } + + skb = dev_alloc_skb(rxlen); + if (!skb) { + ret = dm9051_dumpblk(db, DM_SPI_MRCMD, rxlen); + if (ret) + return ret; + return scanrr; + } + + rdptr = skb_put(skb, rxlen - 4); + ret = dm9051_read_mem(db, DM_SPI_MRCMD, rdptr, rxlen); + if (ret) { + db->bc.rx_err_counter++; + dev_kfree_skb(skb); + return ret; + } + + ret = dm9051_stop_mrcmd(db); + if (ret) + return ret; + + skb->protocol = eth_type_trans(skb, db->ndev); + if (db->ndev->features & NETIF_F_RXCSUM) + skb_checksum_none_assert(skb); + netif_rx(skb); + db->ndev->stats.rx_bytes += rxlen; + db->ndev->stats.rx_packets++; + scanrr++; + } while (!ret); + + return scanrr; +} + +/* transmit a packet, + * return value, + * 0 - succeed + * -ETIMEDOUT - timeout error + */ +static int dm9051_single_tx(struct board_info *db, u8 *buff, unsigned int len) +{ + int ret; + + ret = dm9051_nsr_poll(db); + if (ret) + return ret; + + ret = dm9051_write_mem(db, DM_SPI_MWCMD, buff, len); + if (ret) + return ret; + + ret = dm9051_set_regs(db, DM9051_TXPLL, &len, 2); + if (ret < 0) + return ret; + + return dm9051_set_reg(db, DM9051_TCR, TCR_TXREQ); +} + +static int dm9051_loop_tx(struct board_info *db) +{ + struct net_device *ndev = db->ndev; + int ntx = 0; + int ret; + + while (!skb_queue_empty(&db->txq)) { + struct sk_buff *skb; + unsigned int len; + + skb = skb_dequeue(&db->txq); + if (skb) { + ntx++; + ret = dm9051_single_tx(db, skb->data, skb->len); + len = skb->len; + dev_kfree_skb(skb); + if (ret < 0) { + db->bc.tx_err_counter++; + return 0; + } + ndev->stats.tx_bytes += len; + ndev->stats.tx_packets++; + } + + if (netif_queue_stopped(ndev) && + (skb_queue_len(&db->txq) < DM9051_TX_QUE_LO_WATER)) + netif_wake_queue(ndev); + } + + return ntx; +} + +static irqreturn_t dm9051_rx_threaded_irq(int irq, void *pw) +{ + struct board_info *db = pw; + int result, result_tx; + + mutex_lock(&db->spi_lockm); + + result = dm9051_disable_interrupt(db); + if (result) + goto out_unlock; + + result = dm9051_clear_interrupt(db); + if (result) + goto out_unlock; + + do { + result = dm9051_loop_rx(db); /* threaded irq rx */ + if (result < 0) + goto out_unlock; + result_tx = dm9051_loop_tx(db); /* more tx better performance */ + if (result_tx < 0) + goto out_unlock; + } while (result > 0); + + dm9051_enable_interrupt(db); + + /* To exit and has mutex unlock while rx or tx error + */ +out_unlock: + mutex_unlock(&db->spi_lockm); + + return IRQ_HANDLED; +} + +static void dm9051_tx_delay(struct work_struct *work) +{ + struct board_info *db = container_of(work, struct board_info, tx_work); + int result; + + mutex_lock(&db->spi_lockm); + + result = dm9051_loop_tx(db); + if (result < 0) + netdev_err(db->ndev, "transmit packet error\n"); + + mutex_unlock(&db->spi_lockm); +} + +static void dm9051_rxctl_delay(struct work_struct *work) +{ + struct board_info *db = container_of(work, struct board_info, rxctrl_work); + struct net_device *ndev = db->ndev; + int result; + + mutex_lock(&db->spi_lockm); + + result = dm9051_set_regs(db, DM9051_PAR, ndev->dev_addr, sizeof(ndev->dev_addr)); + if (result < 0) + goto out_unlock; + + dm9051_set_recv(db); + + /* To has mutex unlock and return from this function if regmap function fail + */ +out_unlock: + mutex_unlock(&db->spi_lockm); +} + +/* Open network device + * Called when the network device is marked active, such as a user executing + * 'ifconfig up' on the device + */ +static int dm9051_open(struct net_device *ndev) +{ + struct board_info *db = to_dm9051_board(ndev); + struct spi_device *spi = db->spidev; + int ret; + + db->imr_all = IMR_PAR | IMR_PRM; + db->lcr_all = LMCR_MODE1; + db->rctl.rcr_all = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN; + memset(db->rctl.hash_table, 0, sizeof(db->rctl.hash_table)); + + ndev->irq = spi->irq; /* by dts */ + ret = request_threaded_irq(spi->irq, NULL, dm9051_rx_threaded_irq, + dm9051_irq_flag(db) | IRQF_ONESHOT, + ndev->name, db); + if (ret < 0) { + netdev_err(ndev, "failed to get irq\n"); + return ret; + } + + phy_support_sym_pause(db->phydev); + phy_start(db->phydev); + + /* flow control parameters init */ + db->pause.rx_pause = true; + db->pause.tx_pause = true; + db->pause.autoneg = AUTONEG_DISABLE; + + if (db->phydev->autoneg) + db->pause.autoneg = AUTONEG_ENABLE; + + ret = dm9051_all_start(db); + if (ret) { + phy_stop(db->phydev); + free_irq(spi->irq, db); + return ret; + } + + netif_wake_queue(ndev); + + return 0; +} + +/* Close network device + * Called to close down a network device which has been active. Cancel any + * work, shutdown the RX and TX process and then place the chip into a low + * power state while it is not being used + */ +static int dm9051_stop(struct net_device *ndev) +{ + struct board_info *db = to_dm9051_board(ndev); + int ret; + + ret = dm9051_all_stop(db); + if (ret) + return ret; + + flush_work(&db->tx_work); + flush_work(&db->rxctrl_work); + + phy_stop(db->phydev); + + free_irq(db->spidev->irq, db); + + netif_stop_queue(ndev); + + skb_queue_purge(&db->txq); + + return 0; +} + +/* event: play a schedule starter in condition + */ +static netdev_tx_t dm9051_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct board_info *db = to_dm9051_board(ndev); + + skb_queue_tail(&db->txq, skb); + if (skb_queue_len(&db->txq) > DM9051_TX_QUE_HI_WATER) + netif_stop_queue(ndev); /* enforce limit queue size */ + + schedule_work(&db->tx_work); + + return NETDEV_TX_OK; +} + +/* event: play with a schedule starter + */ +static void dm9051_set_rx_mode(struct net_device *ndev) +{ + struct board_info *db = to_dm9051_board(ndev); + struct dm9051_rxctrl rxctrl; + struct netdev_hw_addr *ha; + u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN; + u32 hash_val; + + memset(&rxctrl, 0, sizeof(rxctrl)); + + /* rx control */ + if (ndev->flags & IFF_PROMISC) { + rcr |= RCR_PRMSC; + netdev_dbg(ndev, "set_multicast rcr |= RCR_PRMSC, rcr= %02x\n", rcr); + } + + if (ndev->flags & IFF_ALLMULTI) { + rcr |= RCR_ALL; + netdev_dbg(ndev, "set_multicast rcr |= RCR_ALLMULTI, rcr= %02x\n", rcr); + } + + rxctrl.rcr_all = rcr; + + /* broadcast address */ + rxctrl.hash_table[0] = 0; + rxctrl.hash_table[1] = 0; + rxctrl.hash_table[2] = 0; + rxctrl.hash_table[3] = 0x8000; + + /* the multicast address in Hash Table : 64 bits */ + netdev_for_each_mc_addr(ha, ndev) { + hash_val = ether_crc_le(ETH_ALEN, ha->addr) & GENMASK(5, 0); + rxctrl.hash_table[hash_val / 16] |= BIT(0) << (hash_val % 16); + } + + /* schedule work to do the actual set of the data if needed */ + + if (memcmp(&db->rctl, &rxctrl, sizeof(rxctrl))) { + memcpy(&db->rctl, &rxctrl, sizeof(rxctrl)); + schedule_work(&db->rxctrl_work); + } +} + +/* event: write into the mac registers and eeprom directly + */ +static int dm9051_set_mac_address(struct net_device *ndev, void *p) +{ + struct board_info *db = to_dm9051_board(ndev); + int ret; + + ret = eth_prepare_mac_addr_change(ndev, p); + if (ret < 0) + return ret; + + eth_commit_mac_addr_change(ndev, p); + return dm9051_set_regs(db, DM9051_PAR, ndev->dev_addr, sizeof(ndev->dev_addr)); +} + +static const struct net_device_ops dm9051_netdev_ops = { + .ndo_open = dm9051_open, + .ndo_stop = dm9051_stop, + .ndo_start_xmit = dm9051_start_xmit, + .ndo_set_rx_mode = dm9051_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = dm9051_set_mac_address, +}; + +static void dm9051_operation_clear(struct board_info *db) +{ + db->bc.status_err_counter = 0; + db->bc.large_err_counter = 0; + db->bc.rx_err_counter = 0; + db->bc.tx_err_counter = 0; + db->bc.fifo_rst_counter = 0; +} + +static int dm9051_mdio_register(struct board_info *db) +{ + struct spi_device *spi = db->spidev; + int ret; + + db->mdiobus = devm_mdiobus_alloc(&spi->dev); + if (!db->mdiobus) + return -ENOMEM; + + db->mdiobus->priv = db; + db->mdiobus->read = dm9051_mdio_read; + db->mdiobus->write = dm9051_mdio_write; + db->mdiobus->name = "dm9051-mdiobus"; + db->mdiobus->phy_mask = (u32)~BIT(1); + db->mdiobus->parent = &spi->dev; + snprintf(db->mdiobus->id, MII_BUS_ID_SIZE, + "dm9051-%s.%u", dev_name(&spi->dev), spi->chip_select); + + ret = devm_mdiobus_register(&spi->dev, db->mdiobus); + if (ret) + dev_err(&spi->dev, "Could not register MDIO bus\n"); + + return ret; +} + +static void dm9051_handle_link_change(struct net_device *ndev) +{ + struct board_info *db = to_dm9051_board(ndev); + + phy_print_status(db->phydev); + + /* only write pause settings to mac. since mac and phy are integrated + * together, such as link state, speed and duplex are sync already + */ + if (db->phydev->link) { + if (db->phydev->pause) { + db->pause.rx_pause = true; + db->pause.tx_pause = true; + } + dm9051_update_fcr(db); + } +} + +/* phy connect as poll mode + */ +static int dm9051_phy_connect(struct board_info *db) +{ + char phy_id[MII_BUS_ID_SIZE + 3]; + + snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, + db->mdiobus->id, DM9051_PHY_ADDR); + + db->phydev = phy_connect(db->ndev, phy_id, dm9051_handle_link_change, + PHY_INTERFACE_MODE_MII); + if (IS_ERR(db->phydev)) + return PTR_ERR_OR_ZERO(db->phydev); + return 0; +} + +static int dm9051_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct net_device *ndev; + struct board_info *db; + int ret; + + ndev = devm_alloc_etherdev(dev, sizeof(struct board_info)); + if (!ndev) + return -ENOMEM; + + SET_NETDEV_DEV(ndev, dev); + dev_set_drvdata(dev, ndev); + + db = netdev_priv(ndev); + + db->msg_enable = 0; + db->spidev = spi; + db->ndev = ndev; + + ndev->netdev_ops = &dm9051_netdev_ops; + ndev->ethtool_ops = &dm9051_ethtool_ops; + + mutex_init(&db->spi_lockm); + mutex_init(&db->reg_mutex); + + INIT_WORK(&db->rxctrl_work, dm9051_rxctl_delay); + INIT_WORK(&db->tx_work, dm9051_tx_delay); + + ret = dm9051_map_init(spi, db); + if (ret) + return ret; + + ret = dm9051_map_chipid(db); + if (ret) + return ret; + + ret = dm9051_map_etherdev_par(ndev, db); + if (ret < 0) + return ret; + + ret = dm9051_mdio_register(db); + if (ret) + return ret; + + ret = dm9051_phy_connect(db); + if (ret) + return ret; + + dm9051_operation_clear(db); + skb_queue_head_init(&db->txq); + + ret = devm_register_netdev(dev, ndev); + if (ret) { + phy_disconnect(db->phydev); + return dev_err_probe(dev, ret, "device register failed"); + } + + return 0; +} + +static void dm9051_drv_remove(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct net_device *ndev = dev_get_drvdata(dev); + struct board_info *db = to_dm9051_board(ndev); + + phy_disconnect(db->phydev); +} + +static const struct of_device_id dm9051_match_table[] = { + { .compatible = "davicom,dm9051" }, + {} +}; + +static const struct spi_device_id dm9051_id_table[] = { + { "dm9051", 0 }, + {} +}; + +static struct spi_driver dm9051_driver = { + .driver = { + .name = DRVNAME_9051, + .of_match_table = dm9051_match_table, + }, + .probe = dm9051_probe, + .remove = dm9051_drv_remove, + .id_table = dm9051_id_table, +}; +module_spi_driver(dm9051_driver); + +MODULE_AUTHOR("Joseph CHANG <joseph_chang@davicom.com.tw>"); +MODULE_DESCRIPTION("Davicom DM9051 network SPI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/davicom/dm9051.h b/drivers/net/ethernet/davicom/dm9051.h new file mode 100644 index 000000000000..fef3120edd7c --- /dev/null +++ b/drivers/net/ethernet/davicom/dm9051.h @@ -0,0 +1,162 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Davicom Semiconductor,Inc. + * Davicom DM9051 SPI Fast Ethernet Linux driver + */ + +#ifndef _DM9051_H_ +#define _DM9051_H_ + +#include <linux/bits.h> +#include <linux/netdevice.h> +#include <linux/types.h> + +#define DM9051_ID 0x9051 + +#define DM9051_NCR 0x00 +#define DM9051_NSR 0x01 +#define DM9051_TCR 0x02 +#define DM9051_RCR 0x05 +#define DM9051_BPTR 0x08 +#define DM9051_FCR 0x0A +#define DM9051_EPCR 0x0B +#define DM9051_EPAR 0x0C +#define DM9051_EPDRL 0x0D +#define DM9051_EPDRH 0x0E +#define DM9051_PAR 0x10 +#define DM9051_MAR 0x16 +#define DM9051_GPCR 0x1E +#define DM9051_GPR 0x1F + +#define DM9051_VIDL 0x28 +#define DM9051_VIDH 0x29 +#define DM9051_PIDL 0x2A +#define DM9051_PIDH 0x2B +#define DM9051_SMCR 0x2F +#define DM9051_ATCR 0x30 +#define DM9051_SPIBCR 0x38 +#define DM9051_INTCR 0x39 +#define DM9051_PPCR 0x3D + +#define DM9051_MPCR 0x55 +#define DM9051_LMCR 0x57 +#define DM9051_MBNDRY 0x5E + +#define DM9051_MRRL 0x74 +#define DM9051_MRRH 0x75 +#define DM9051_MWRL 0x7A +#define DM9051_MWRH 0x7B +#define DM9051_TXPLL 0x7C +#define DM9051_TXPLH 0x7D +#define DM9051_ISR 0x7E +#define DM9051_IMR 0x7F + +#define DM_SPI_MRCMDX 0x70 +#define DM_SPI_MRCMD 0x72 +#define DM_SPI_MWCMD 0x78 + +#define DM_SPI_WR 0x80 + +/* dm9051 Ethernet controller registers bits + */ +/* 0x00 */ +#define NCR_WAKEEN BIT(6) +#define NCR_FDX BIT(3) +#define NCR_RST BIT(0) +/* 0x01 */ +#define NSR_SPEED BIT(7) +#define NSR_LINKST BIT(6) +#define NSR_WAKEST BIT(5) +#define NSR_TX2END BIT(3) +#define NSR_TX1END BIT(2) +/* 0x02 */ +#define TCR_DIS_JABBER_TIMER BIT(6) /* for Jabber Packet support */ +#define TCR_TXREQ BIT(0) +/* 0x05 */ +#define RCR_DIS_WATCHDOG_TIMER BIT(6) /* for Jabber Packet support */ +#define RCR_DIS_LONG BIT(5) +#define RCR_DIS_CRC BIT(4) +#define RCR_ALL BIT(3) +#define RCR_PRMSC BIT(1) +#define RCR_RXEN BIT(0) +#define RCR_RX_DISABLE (RCR_DIS_LONG | RCR_DIS_CRC) +/* 0x06 */ +#define RSR_RF BIT(7) +#define RSR_MF BIT(6) +#define RSR_LCS BIT(5) +#define RSR_RWTO BIT(4) +#define RSR_PLE BIT(3) +#define RSR_AE BIT(2) +#define RSR_CE BIT(1) +#define RSR_FOE BIT(0) +#define RSR_ERR_BITS (RSR_RF | RSR_LCS | RSR_RWTO | RSR_PLE | \ + RSR_AE | RSR_CE | RSR_FOE) +/* 0x0A */ +#define FCR_TXPEN BIT(5) +#define FCR_BKPM BIT(3) +#define FCR_FLCE BIT(0) +#define FCR_RXTX_BITS (FCR_TXPEN | FCR_BKPM | FCR_FLCE) +/* 0x0B */ +#define EPCR_WEP BIT(4) +#define EPCR_EPOS BIT(3) +#define EPCR_ERPRR BIT(2) +#define EPCR_ERPRW BIT(1) +#define EPCR_ERRE BIT(0) +/* 0x1E */ +#define GPCR_GEP_CNTL BIT(0) +/* 0x1F */ +#define GPR_PHY_OFF BIT(0) +/* 0x30 */ +#define ATCR_AUTO_TX BIT(7) +/* 0x39 */ +#define INTCR_POL_LOW (1 << 0) +#define INTCR_POL_HIGH (0 << 0) +/* 0x3D */ +/* Pause Packet Control Register - default = 1 */ +#define PPCR_PAUSE_COUNT 0x08 +/* 0x55 */ +#define MPCR_RSTTX BIT(1) +#define MPCR_RSTRX BIT(0) +/* 0x57 */ +/* LEDMode Control Register - LEDMode1 */ +/* Value 0x81 : bit[7] = 1, bit[2] = 0, bit[1:0] = 01b */ +#define LMCR_NEWMOD BIT(7) +#define LMCR_TYPED1 BIT(1) +#define LMCR_TYPED0 BIT(0) +#define LMCR_MODE1 (LMCR_NEWMOD | LMCR_TYPED0) +/* 0x5E */ +#define MBNDRY_BYTE BIT(7) +/* 0xFE */ +#define ISR_MBS BIT(7) +#define ISR_LNKCHG BIT(5) +#define ISR_ROOS BIT(3) +#define ISR_ROS BIT(2) +#define ISR_PTS BIT(1) +#define ISR_PRS BIT(0) +#define ISR_CLR_INT (ISR_LNKCHG | ISR_ROOS | ISR_ROS | \ + ISR_PTS | ISR_PRS) +#define ISR_STOP_MRCMD (ISR_MBS) +/* 0xFF */ +#define IMR_PAR BIT(7) +#define IMR_LNKCHGI BIT(5) +#define IMR_PTM BIT(1) +#define IMR_PRM BIT(0) + +/* Const + */ +#define DM9051_PHY_ADDR 1 /* PHY id */ +#define DM9051_PHY 0x40 /* PHY address 0x01 */ +#define DM9051_PKT_RDY 0x01 /* Packet ready to receive */ +#define DM9051_PKT_MAX 1536 /* Received packet max size */ +#define DM9051_TX_QUE_HI_WATER 50 +#define DM9051_TX_QUE_LO_WATER 25 +#define DM_EEPROM_MAGIC 0x9051 + +#define DM_RXHDR_SIZE sizeof(struct dm9051_rxhdr) + +static inline struct board_info *to_dm9051_board(struct net_device *ndev) +{ + return netdev_priv(ndev); +} + +#endif /* _DM9051_H_ */ diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 323340826dab..69dbf950d451 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -608,7 +608,6 @@ static s32 nps_enet_probe(struct platform_device *pdev) /* Get IRQ number */ priv->irq = platform_get_irq(pdev, 0); if (priv->irq < 0) { - dev_err(dev, "failed to retrieve <irq Rx-Tx> value from device tree\n"); err = -ENODEV; goto out_netdev; } diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 691605c15265..d5356db7539a 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -989,117 +989,6 @@ static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv) return 0; } -static void ftgmac100_adjust_link(struct net_device *netdev) -{ - struct ftgmac100 *priv = netdev_priv(netdev); - struct phy_device *phydev = netdev->phydev; - bool tx_pause, rx_pause; - int new_speed; - - /* We store "no link" as speed 0 */ - if (!phydev->link) - new_speed = 0; - else - new_speed = phydev->speed; - - /* Grab pause settings from PHY if configured to do so */ - if (priv->aneg_pause) { - rx_pause = tx_pause = phydev->pause; - if (phydev->asym_pause) - tx_pause = !rx_pause; - } else { - rx_pause = priv->rx_pause; - tx_pause = priv->tx_pause; - } - - /* Link hasn't changed, do nothing */ - if (phydev->speed == priv->cur_speed && - phydev->duplex == priv->cur_duplex && - rx_pause == priv->rx_pause && - tx_pause == priv->tx_pause) - return; - - /* Print status if we have a link or we had one and just lost it, - * don't print otherwise. - */ - if (new_speed || priv->cur_speed) - phy_print_status(phydev); - - priv->cur_speed = new_speed; - priv->cur_duplex = phydev->duplex; - priv->rx_pause = rx_pause; - priv->tx_pause = tx_pause; - - /* Link is down, do nothing else */ - if (!new_speed) - return; - - /* Disable all interrupts */ - iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); - - /* Reset the adapter asynchronously */ - schedule_work(&priv->reset_task); -} - -static int ftgmac100_mii_probe(struct net_device *netdev) -{ - struct ftgmac100 *priv = netdev_priv(netdev); - struct platform_device *pdev = to_platform_device(priv->dev); - struct device_node *np = pdev->dev.of_node; - struct phy_device *phydev; - phy_interface_t phy_intf; - int err; - - /* Default to RGMII. It's a gigabit part after all */ - err = of_get_phy_mode(np, &phy_intf); - if (err) - phy_intf = PHY_INTERFACE_MODE_RGMII; - - /* Aspeed only supports these. I don't know about other IP - * block vendors so I'm going to just let them through for - * now. Note that this is only a warning if for some obscure - * reason the DT really means to lie about it or it's a newer - * part we don't know about. - * - * On the Aspeed SoC there are additionally straps and SCU - * control bits that could tell us what the interface is - * (or allow us to configure it while the IP block is held - * in reset). For now I chose to keep this driver away from - * those SoC specific bits and assume the device-tree is - * right and the SCU has been configured properly by pinmux - * or the firmware. - */ - if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) { - netdev_warn(netdev, - "Unsupported PHY mode %s !\n", - phy_modes(phy_intf)); - } - - phydev = phy_find_first(priv->mii_bus); - if (!phydev) { - netdev_info(netdev, "%s: no PHY found\n", netdev->name); - return -ENODEV; - } - - phydev = phy_connect(netdev, phydev_name(phydev), - &ftgmac100_adjust_link, phy_intf); - - if (IS_ERR(phydev)) { - netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name); - return PTR_ERR(phydev); - } - - /* Indicate that we support PAUSE frames (see comment in - * Documentation/networking/phy.rst) - */ - phy_support_asym_pause(phydev); - - /* Display what we found */ - phy_attached_info(phydev); - - return 0; -} - static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) { struct net_device *netdev = bus->priv; @@ -1410,10 +1299,8 @@ static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err) return err; } -static void ftgmac100_reset_task(struct work_struct *work) +static void ftgmac100_reset(struct ftgmac100 *priv) { - struct ftgmac100 *priv = container_of(work, struct ftgmac100, - reset_task); struct net_device *netdev = priv->netdev; int err; @@ -1459,6 +1346,134 @@ static void ftgmac100_reset_task(struct work_struct *work) rtnl_unlock(); } +static void ftgmac100_reset_task(struct work_struct *work) +{ + struct ftgmac100 *priv = container_of(work, struct ftgmac100, + reset_task); + + ftgmac100_reset(priv); +} + +static void ftgmac100_adjust_link(struct net_device *netdev) +{ + struct ftgmac100 *priv = netdev_priv(netdev); + struct phy_device *phydev = netdev->phydev; + bool tx_pause, rx_pause; + int new_speed; + + /* We store "no link" as speed 0 */ + if (!phydev->link) + new_speed = 0; + else + new_speed = phydev->speed; + + /* Grab pause settings from PHY if configured to do so */ + if (priv->aneg_pause) { + rx_pause = tx_pause = phydev->pause; + if (phydev->asym_pause) + tx_pause = !rx_pause; + } else { + rx_pause = priv->rx_pause; + tx_pause = priv->tx_pause; + } + + /* Link hasn't changed, do nothing */ + if (phydev->speed == priv->cur_speed && + phydev->duplex == priv->cur_duplex && + rx_pause == priv->rx_pause && + tx_pause == priv->tx_pause) + return; + + /* Print status if we have a link or we had one and just lost it, + * don't print otherwise. + */ + if (new_speed || priv->cur_speed) + phy_print_status(phydev); + + priv->cur_speed = new_speed; + priv->cur_duplex = phydev->duplex; + priv->rx_pause = rx_pause; + priv->tx_pause = tx_pause; + + /* Link is down, do nothing else */ + if (!new_speed) + return; + + /* Disable all interrupts */ + iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); + + /* Release phy lock to allow ftgmac100_reset to aquire it, keeping lock + * order consistent to prevent dead lock. + */ + if (netdev->phydev) + mutex_unlock(&netdev->phydev->lock); + + ftgmac100_reset(priv); + + if (netdev->phydev) + mutex_lock(&netdev->phydev->lock); + +} + +static int ftgmac100_mii_probe(struct net_device *netdev) +{ + struct ftgmac100 *priv = netdev_priv(netdev); + struct platform_device *pdev = to_platform_device(priv->dev); + struct device_node *np = pdev->dev.of_node; + struct phy_device *phydev; + phy_interface_t phy_intf; + int err; + + /* Default to RGMII. It's a gigabit part after all */ + err = of_get_phy_mode(np, &phy_intf); + if (err) + phy_intf = PHY_INTERFACE_MODE_RGMII; + + /* Aspeed only supports these. I don't know about other IP + * block vendors so I'm going to just let them through for + * now. Note that this is only a warning if for some obscure + * reason the DT really means to lie about it or it's a newer + * part we don't know about. + * + * On the Aspeed SoC there are additionally straps and SCU + * control bits that could tell us what the interface is + * (or allow us to configure it while the IP block is held + * in reset). For now I chose to keep this driver away from + * those SoC specific bits and assume the device-tree is + * right and the SCU has been configured properly by pinmux + * or the firmware. + */ + if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) { + netdev_warn(netdev, + "Unsupported PHY mode %s !\n", + phy_modes(phy_intf)); + } + + phydev = phy_find_first(priv->mii_bus); + if (!phydev) { + netdev_info(netdev, "%s: no PHY found\n", netdev->name); + return -ENODEV; + } + + phydev = phy_connect(netdev, phydev_name(phydev), + &ftgmac100_adjust_link, phy_intf); + + if (IS_ERR(phydev)) { + netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name); + return PTR_ERR(phydev); + } + + /* Indicate that we support PAUSE frames (see comment in + * Documentation/networking/phy.rst) + */ + phy_support_asym_pause(phydev); + + /* Display what we found */ + phy_attached_info(phydev); + + return 0; +} + static int ftgmac100_open(struct net_device *netdev) { struct ftgmac100 *priv = netdev_priv(netdev); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index c4a48e6f1758..939fa9db6a2e 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -35,6 +35,75 @@ MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); struct ptp_qoriq *dpaa2_ptp; EXPORT_SYMBOL(dpaa2_ptp); +static void dpaa2_eth_detect_features(struct dpaa2_eth_priv *priv) +{ + priv->features = 0; + + if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_PTP_ONESTEP_VER_MAJOR, + DPNI_PTP_ONESTEP_VER_MINOR) >= 0) + priv->features |= DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT; +} + +static void dpaa2_update_ptp_onestep_indirect(struct dpaa2_eth_priv *priv, + u32 offset, u8 udp) +{ + struct dpni_single_step_cfg cfg; + + cfg.en = 1; + cfg.ch_update = udp; + cfg.offset = offset; + cfg.peer_delay = 0; + + if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token, &cfg)) + WARN_ONCE(1, "Failed to set single step register"); +} + +static void dpaa2_update_ptp_onestep_direct(struct dpaa2_eth_priv *priv, + u32 offset, u8 udp) +{ + u32 val = 0; + + val = DPAA2_PTP_SINGLE_STEP_ENABLE | + DPAA2_PTP_SINGLE_CORRECTION_OFF(offset); + + if (udp) + val |= DPAA2_PTP_SINGLE_STEP_CH; + + if (priv->onestep_reg_base) + writel(val, priv->onestep_reg_base); +} + +static void dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv *priv) +{ + struct device *dev = priv->net_dev->dev.parent; + struct dpni_single_step_cfg ptp_cfg; + + priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_indirect; + + if (!(priv->features & DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT)) + return; + + if (dpni_get_single_step_cfg(priv->mc_io, 0, + priv->mc_token, &ptp_cfg)) { + dev_err(dev, "dpni_get_single_step_cfg cannot retrieve onestep reg, falling back to indirect update\n"); + return; + } + + if (!ptp_cfg.ptp_onestep_reg_base) { + dev_err(dev, "1588 onestep reg not available, falling back to indirect update\n"); + return; + } + + priv->onestep_reg_base = ioremap(ptp_cfg.ptp_onestep_reg_base, + sizeof(u32)); + if (!priv->onestep_reg_base) { + dev_err(dev, "1588 onestep reg cannot be mapped, falling back to indirect update\n"); + return; + } + + priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct; +} + static void *dpaa2_iova_to_virt(struct iommu_domain *domain, dma_addr_t iova_addr) { @@ -696,7 +765,6 @@ static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv, struct sk_buff *skb) { struct ptp_tstamp origin_timestamp; - struct dpni_single_step_cfg cfg; u8 msgtype, twostep, udp; struct dpaa2_faead *faead; struct dpaa2_fas *fas; @@ -750,14 +818,12 @@ static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv, htonl(origin_timestamp.sec_lsb); *(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec); - cfg.en = 1; - cfg.ch_update = udp; - cfg.offset = offset1; - cfg.peer_delay = 0; + if (priv->ptp_correction_off == offset1) + return; + + priv->dpaa2_set_onestep_params_cb(priv, offset1, udp); + priv->ptp_correction_off = offset1; - if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token, - &cfg)) - WARN_ONCE(1, "Failed to set single step register"); } } @@ -1196,7 +1262,7 @@ static int dpaa2_eth_build_gso_fd(struct dpaa2_eth_priv *priv, /* Setup the SG entry for the header */ dpaa2_sg_set_addr(sgt, tso_hdr_dma); dpaa2_sg_set_len(sgt, hdr_len); - dpaa2_sg_set_final(sgt, data_left > 0 ? false : true); + dpaa2_sg_set_final(sgt, data_left <= 0); /* Compose the SG entries for each fragment of data */ num_sge = 1; @@ -1215,7 +1281,7 @@ static int dpaa2_eth_build_gso_fd(struct dpaa2_eth_priv *priv, } dpaa2_sg_set_addr(sgt, addr); dpaa2_sg_set_len(sgt, size); - dpaa2_sg_set_final(sgt, size == data_left ? true : false); + dpaa2_sg_set_final(sgt, size == data_left); num_sge++; @@ -2407,6 +2473,9 @@ static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) config.rx_filter = HWTSTAMP_FILTER_ALL; } + if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC) + dpaa2_ptp_onestep_reg_update_method(priv); + return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } @@ -4300,6 +4369,8 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev) return err; } + dpaa2_eth_detect_features(priv); + /* Capabilities listing */ supported |= IFF_LIVE_ADDR_CHANGE; @@ -4539,7 +4610,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) } INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp); - + mutex_init(&priv->onestep_tstamp_lock); skb_queue_head_init(&priv->tx_skbs); priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK; @@ -4758,6 +4829,8 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) dpaa2_eth_free_dpbp(priv); dpaa2_eth_free_dpio(priv); dpaa2_eth_free_dpni(priv); + if (priv->onestep_reg_base) + iounmap(priv->onestep_reg_base); fsl_mc_portal_free(priv->mc_io); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index b79831cd1a94..447718483ef4 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -526,12 +526,15 @@ struct dpaa2_eth_priv { u8 num_channels; struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS]; struct dpaa2_eth_sgt_cache __percpu *sgt_cache; - + unsigned long features; struct dpni_attr dpni_attrs; u16 dpni_ver_major; u16 dpni_ver_minor; u16 tx_data_offset; - + void __iomem *onestep_reg_base; + u8 ptp_correction_off; + void (*dpaa2_set_onestep_params_cb)(struct dpaa2_eth_priv *priv, + u32 offset, u8 udp); struct fsl_mc_device *dpbp_dev; u16 rx_buf_size; u16 bpid; @@ -673,6 +676,13 @@ enum dpaa2_eth_rx_dist { #define DPAA2_ETH_DIST_L4DST BIT(8) #define DPAA2_ETH_DIST_ALL (~0ULL) +#define DPNI_PTP_ONESTEP_VER_MAJOR 8 +#define DPNI_PTP_ONESTEP_VER_MINOR 2 +#define DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT BIT(0) +#define DPAA2_PTP_SINGLE_STEP_ENABLE BIT(31) +#define DPAA2_PTP_SINGLE_STEP_CH BIT(7) +#define DPAA2_PTP_SINGLE_CORRECTION_OFF(v) ((v) << 8) + #define DPNI_PAUSE_VER_MAJOR 7 #define DPNI_PAUSE_VER_MINOR 13 #define dpaa2_eth_has_pause_support(priv) \ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c index d6eefbbf163f..cacd454ac696 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c @@ -532,6 +532,7 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls, struct flow_rule *rule = flow_cls_offload_flow_rule(cls); struct flow_dissector *dissector = rule->match.dissector; struct netlink_ext_ack *extack = cls->common.extack; + int ret = -EOPNOTSUPP; if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | @@ -561,9 +562,10 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls, } *vlan = (u16)match.key->vlan_id; + ret = 0; } - return 0; + return ret; } static int diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h index 9f80bdfeedec..828f538097af 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h @@ -98,7 +98,7 @@ #define DPNI_CMDID_GET_LINK_CFG DPNI_CMD(0x278) #define DPNI_CMDID_SET_SINGLE_STEP_CFG DPNI_CMD(0x279) -#define DPNI_CMDID_GET_SINGLE_STEP_CFG DPNI_CMD(0x27a) +#define DPNI_CMDID_GET_SINGLE_STEP_CFG DPNI_CMD_V2(0x27a) /* Macros for accessing command fields smaller than 1byte */ #define DPNI_MASK(field) \ @@ -658,12 +658,16 @@ struct dpni_cmd_single_step_cfg { __le16 flags; __le16 offset; __le32 peer_delay; + __le32 ptp_onestep_reg_base; + __le32 pad0; }; struct dpni_rsp_single_step_cfg { __le16 flags; __le16 offset; __le32 peer_delay; + __le32 ptp_onestep_reg_base; + __le32 pad0; }; struct dpni_cmd_enable_vlan_filter { diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c index d6afada99fb6..6c3b36f20fb8 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c @@ -2136,6 +2136,8 @@ int dpni_get_single_step_cfg(struct fsl_mc_io *mc_io, ptp_cfg->ch_update = dpni_get_field(le16_to_cpu(rsp_params->flags), PTP_CH_UPDATE) ? 1 : 0; ptp_cfg->peer_delay = le32_to_cpu(rsp_params->peer_delay); + ptp_cfg->ptp_onestep_reg_base = + le32_to_cpu(rsp_params->ptp_onestep_reg_base); return err; } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h index 7de0562bbf59..6fffd519aa00 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h @@ -1074,12 +1074,18 @@ int dpni_set_tx_shaping(struct fsl_mc_io *mc_io, * @peer_delay: For peer-to-peer transparent clocks add this value to the * correction field in addition to the transient time update. * The value expresses nanoseconds. + * @ptp_onestep_reg_base: 1588 SINGLE_STEP register base address. This address + * is used to update directly the register contents. + * User has to create an address mapping for it. + * + * */ struct dpni_single_step_cfg { u8 en; u8 ch_update; u16 offset; u32 peer_delay; + u32 ptp_onestep_reg_base; }; int dpni_set_single_step_cfg(struct fsl_mc_io *mc_io, diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index 5a3eea1a718b..79afb1d7289b 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -1021,6 +1021,46 @@ static struct actions_fwd *enetc_check_flow_actions(u64 acts, return NULL; } +static int enetc_psfp_policer_validate(const struct flow_action *action, + const struct flow_action_entry *act, + struct netlink_ext_ack *extack) +{ + if (act->police.exceed.act_id != FLOW_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when exceed action is not drop"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && + act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is not pipe or ok"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && + !flow_action_is_last_entry(action, act)) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is ok, but action is not last"); + return -EOPNOTSUPP; + } + + if (act->police.peakrate_bytes_ps || + act->police.avrate || act->police.overhead) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when peakrate/avrate/overhead is configured"); + return -EOPNOTSUPP; + } + + if (act->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, + "QoS offload not support packets per second"); + return -EOPNOTSUPP; + } + + return 0; +} + static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, struct flow_cls_offload *f) { @@ -1177,11 +1217,10 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, /* Flow meter and max frame size */ if (entryp) { - if (entryp->police.rate_pkt_ps) { - NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); - err = -EOPNOTSUPP; + err = enetc_psfp_policer_validate(&rule->action, entryp, extack); + if (err) goto free_sfi; - } + if (entryp->police.burst) { fmi = kzalloc(sizeof(*fmi), GFP_KERNEL); if (!fmi) { diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index ff756265d58f..9a2c16d69e2c 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -1464,6 +1464,7 @@ static int gfar_get_ts_info(struct net_device *dev, ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp"); if (ptp_node) { ptp_dev = of_find_device_by_node(ptp_node); + of_node_put(ptp_node); if (ptp_dev) ptp = platform_get_drvdata(ptp_dev); } diff --git a/drivers/net/ethernet/fungible/Kconfig b/drivers/net/ethernet/fungible/Kconfig new file mode 100644 index 000000000000..1ecedecc0f6c --- /dev/null +++ b/drivers/net/ethernet/fungible/Kconfig @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Fungible network driver configuration +# + +config NET_VENDOR_FUNGIBLE + bool "Fungible devices" + default y + help + If you have a Fungible network device, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Fungible cards. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_FUNGIBLE + +config FUN_CORE + tristate + select SBITMAP + help + A service module offering basic common services to Fungible + device drivers. + +source "drivers/net/ethernet/fungible/funeth/Kconfig" + +endif # NET_VENDOR_FUNGIBLE diff --git a/drivers/net/ethernet/fungible/Makefile b/drivers/net/ethernet/fungible/Makefile new file mode 100644 index 000000000000..df759f1585a1 --- /dev/null +++ b/drivers/net/ethernet/fungible/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +# +# Makefile for the Fungible network device drivers. +# + +obj-$(CONFIG_FUN_CORE) += funcore/ +obj-$(CONFIG_FUN_ETH) += funeth/ diff --git a/drivers/net/ethernet/fungible/funcore/Makefile b/drivers/net/ethernet/fungible/funcore/Makefile new file mode 100644 index 000000000000..bc16b264b53e --- /dev/null +++ b/drivers/net/ethernet/fungible/funcore/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) + +obj-$(CONFIG_FUN_CORE) += funcore.o + +funcore-y := fun_dev.o fun_queue.o diff --git a/drivers/net/ethernet/fungible/funcore/fun_dev.c b/drivers/net/ethernet/fungible/funcore/fun_dev.c new file mode 100644 index 000000000000..5d7aef73df61 --- /dev/null +++ b/drivers/net/ethernet/fungible/funcore/fun_dev.c @@ -0,0 +1,843 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) + +#include <linux/aer.h> +#include <linux/bitmap.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/nvme.h> +#include <linux/pci.h> +#include <linux/wait.h> +#include <linux/sched/signal.h> + +#include "fun_queue.h" +#include "fun_dev.h" + +#define FUN_ADMIN_CMD_TO_MS 3000 + +enum { + AQA_ASQS_SHIFT = 0, + AQA_ACQS_SHIFT = 16, + AQA_MIN_QUEUE_SIZE = 2, + AQA_MAX_QUEUE_SIZE = 4096 +}; + +/* context for admin commands */ +struct fun_cmd_ctx { + fun_admin_callback_t cb; /* callback to invoke on completion */ + void *cb_data; /* user data provided to callback */ + int cpu; /* CPU where the cmd's tag was allocated */ +}; + +/* Context for synchronous admin commands. */ +struct fun_sync_cmd_ctx { + struct completion compl; + u8 *rsp_buf; /* caller provided response buffer */ + unsigned int rsp_len; /* response buffer size */ + u8 rsp_status; /* command response status */ +}; + +/* Wait for the CSTS.RDY bit to match @enabled. */ +static int fun_wait_ready(struct fun_dev *fdev, bool enabled) +{ + unsigned int cap_to = NVME_CAP_TIMEOUT(fdev->cap_reg); + u32 bit = enabled ? NVME_CSTS_RDY : 0; + unsigned long deadline; + + deadline = ((cap_to + 1) * HZ / 2) + jiffies; /* CAP.TO is in 500ms */ + + for (;;) { + u32 csts = readl(fdev->bar + NVME_REG_CSTS); + + if (csts == ~0) { + dev_err(fdev->dev, "CSTS register read %#x\n", csts); + return -EIO; + } + + if ((csts & NVME_CSTS_RDY) == bit) + return 0; + + if (time_is_before_jiffies(deadline)) + break; + + msleep(100); + } + + dev_err(fdev->dev, + "Timed out waiting for device to indicate RDY %u; aborting %s\n", + enabled, enabled ? "initialization" : "reset"); + return -ETIMEDOUT; +} + +/* Check CSTS and return an error if it is unreadable or has unexpected + * RDY value. + */ +static int fun_check_csts_rdy(struct fun_dev *fdev, unsigned int expected_rdy) +{ + u32 csts = readl(fdev->bar + NVME_REG_CSTS); + u32 actual_rdy = csts & NVME_CSTS_RDY; + + if (csts == ~0) { + dev_err(fdev->dev, "CSTS register read %#x\n", csts); + return -EIO; + } + if (actual_rdy != expected_rdy) { + dev_err(fdev->dev, "Unexpected CSTS RDY %u\n", actual_rdy); + return -EINVAL; + } + return 0; +} + +/* Check that CSTS RDY has the expected value. Then write a new value to the CC + * register and wait for CSTS RDY to match the new CC ENABLE state. + */ +static int fun_update_cc_enable(struct fun_dev *fdev, unsigned int initial_rdy) +{ + int rc = fun_check_csts_rdy(fdev, initial_rdy); + + if (rc) + return rc; + writel(fdev->cc_reg, fdev->bar + NVME_REG_CC); + return fun_wait_ready(fdev, !!(fdev->cc_reg & NVME_CC_ENABLE)); +} + +static int fun_disable_ctrl(struct fun_dev *fdev) +{ + fdev->cc_reg &= ~(NVME_CC_SHN_MASK | NVME_CC_ENABLE); + return fun_update_cc_enable(fdev, 1); +} + +static int fun_enable_ctrl(struct fun_dev *fdev, u32 admin_cqesz_log2, + u32 admin_sqesz_log2) +{ + fdev->cc_reg = (admin_cqesz_log2 << NVME_CC_IOCQES_SHIFT) | + (admin_sqesz_log2 << NVME_CC_IOSQES_SHIFT) | + ((PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT) | + NVME_CC_ENABLE; + + return fun_update_cc_enable(fdev, 0); +} + +static int fun_map_bars(struct fun_dev *fdev, const char *name) +{ + struct pci_dev *pdev = to_pci_dev(fdev->dev); + int err; + + err = pci_request_mem_regions(pdev, name); + if (err) { + dev_err(&pdev->dev, + "Couldn't get PCI memory resources, err %d\n", err); + return err; + } + + fdev->bar = pci_ioremap_bar(pdev, 0); + if (!fdev->bar) { + dev_err(&pdev->dev, "Couldn't map BAR 0\n"); + pci_release_mem_regions(pdev); + return -ENOMEM; + } + + return 0; +} + +static void fun_unmap_bars(struct fun_dev *fdev) +{ + struct pci_dev *pdev = to_pci_dev(fdev->dev); + + if (fdev->bar) { + iounmap(fdev->bar); + fdev->bar = NULL; + pci_release_mem_regions(pdev); + } +} + +static int fun_set_dma_masks(struct device *dev) +{ + int err; + + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (err) + dev_err(dev, "DMA mask configuration failed, err %d\n", err); + return err; +} + +static irqreturn_t fun_admin_irq(int irq, void *data) +{ + struct fun_queue *funq = data; + + return fun_process_cq(funq, 0) ? IRQ_HANDLED : IRQ_NONE; +} + +static void fun_complete_admin_cmd(struct fun_queue *funq, void *data, + void *entry, const struct fun_cqe_info *info) +{ + const struct fun_admin_rsp_common *rsp_common = entry; + struct fun_dev *fdev = funq->fdev; + struct fun_cmd_ctx *cmd_ctx; + int cpu; + u16 cid; + + if (info->sqhd == cpu_to_be16(0xffff)) { + dev_dbg(fdev->dev, "adminq event"); + if (fdev->adminq_cb) + fdev->adminq_cb(fdev, entry); + return; + } + + cid = be16_to_cpu(rsp_common->cid); + dev_dbg(fdev->dev, "admin CQE cid %u, op %u, ret %u\n", cid, + rsp_common->op, rsp_common->ret); + + cmd_ctx = &fdev->cmd_ctx[cid]; + if (cmd_ctx->cpu < 0) { + dev_err(fdev->dev, + "admin CQE with CID=%u, op=%u does not match a pending command\n", + cid, rsp_common->op); + return; + } + + if (cmd_ctx->cb) + cmd_ctx->cb(fdev, entry, xchg(&cmd_ctx->cb_data, NULL)); + + cpu = cmd_ctx->cpu; + cmd_ctx->cpu = -1; + sbitmap_queue_clear(&fdev->admin_sbq, cid, cpu); +} + +static int fun_init_cmd_ctx(struct fun_dev *fdev, unsigned int ntags) +{ + unsigned int i; + + fdev->cmd_ctx = kvcalloc(ntags, sizeof(*fdev->cmd_ctx), GFP_KERNEL); + if (!fdev->cmd_ctx) + return -ENOMEM; + + for (i = 0; i < ntags; i++) + fdev->cmd_ctx[i].cpu = -1; + + return 0; +} + +/* Allocate and enable an admin queue and assign it the first IRQ vector. */ +static int fun_enable_admin_queue(struct fun_dev *fdev, + const struct fun_dev_params *areq) +{ + struct fun_queue_alloc_req qreq = { + .cqe_size_log2 = areq->cqe_size_log2, + .sqe_size_log2 = areq->sqe_size_log2, + .cq_depth = areq->cq_depth, + .sq_depth = areq->sq_depth, + .rq_depth = areq->rq_depth, + }; + unsigned int ntags = areq->sq_depth - 1; + struct fun_queue *funq; + int rc; + + if (fdev->admin_q) + return -EEXIST; + + if (areq->sq_depth < AQA_MIN_QUEUE_SIZE || + areq->sq_depth > AQA_MAX_QUEUE_SIZE || + areq->cq_depth < AQA_MIN_QUEUE_SIZE || + areq->cq_depth > AQA_MAX_QUEUE_SIZE) + return -EINVAL; + + fdev->admin_q = fun_alloc_queue(fdev, 0, &qreq); + if (!fdev->admin_q) + return -ENOMEM; + + rc = fun_init_cmd_ctx(fdev, ntags); + if (rc) + goto free_q; + + rc = sbitmap_queue_init_node(&fdev->admin_sbq, ntags, -1, false, + GFP_KERNEL, dev_to_node(fdev->dev)); + if (rc) + goto free_cmd_ctx; + + funq = fdev->admin_q; + funq->cq_vector = 0; + rc = fun_request_irq(funq, dev_name(fdev->dev), fun_admin_irq, funq); + if (rc) + goto free_sbq; + + fun_set_cq_callback(funq, fun_complete_admin_cmd, NULL); + fdev->adminq_cb = areq->event_cb; + + writel((funq->sq_depth - 1) << AQA_ASQS_SHIFT | + (funq->cq_depth - 1) << AQA_ACQS_SHIFT, + fdev->bar + NVME_REG_AQA); + + writeq(funq->sq_dma_addr, fdev->bar + NVME_REG_ASQ); + writeq(funq->cq_dma_addr, fdev->bar + NVME_REG_ACQ); + + rc = fun_enable_ctrl(fdev, areq->cqe_size_log2, areq->sqe_size_log2); + if (rc) + goto free_irq; + + if (areq->rq_depth) { + rc = fun_create_rq(funq); + if (rc) + goto disable_ctrl; + + funq_rq_post(funq); + } + + return 0; + +disable_ctrl: + fun_disable_ctrl(fdev); +free_irq: + fun_free_irq(funq); +free_sbq: + sbitmap_queue_free(&fdev->admin_sbq); +free_cmd_ctx: + kvfree(fdev->cmd_ctx); + fdev->cmd_ctx = NULL; +free_q: + fun_free_queue(fdev->admin_q); + fdev->admin_q = NULL; + return rc; +} + +static void fun_disable_admin_queue(struct fun_dev *fdev) +{ + struct fun_queue *admq = fdev->admin_q; + + if (!admq) + return; + + fun_disable_ctrl(fdev); + + fun_free_irq(admq); + __fun_process_cq(admq, 0); + + sbitmap_queue_free(&fdev->admin_sbq); + + kvfree(fdev->cmd_ctx); + fdev->cmd_ctx = NULL; + + fun_free_queue(admq); + fdev->admin_q = NULL; +} + +/* Return %true if the admin queue has stopped servicing commands as can be + * detected through registers. This isn't exhaustive and may provide false + * negatives. + */ +static bool fun_adminq_stopped(struct fun_dev *fdev) +{ + u32 csts = readl(fdev->bar + NVME_REG_CSTS); + + return (csts & (NVME_CSTS_CFS | NVME_CSTS_RDY)) != NVME_CSTS_RDY; +} + +static int fun_wait_for_tag(struct fun_dev *fdev, int *cpup) +{ + struct sbitmap_queue *sbq = &fdev->admin_sbq; + struct sbq_wait_state *ws = &sbq->ws[0]; + DEFINE_SBQ_WAIT(wait); + int tag; + + for (;;) { + sbitmap_prepare_to_wait(sbq, ws, &wait, TASK_UNINTERRUPTIBLE); + if (fdev->suppress_cmds) { + tag = -ESHUTDOWN; + break; + } + tag = sbitmap_queue_get(sbq, cpup); + if (tag >= 0) + break; + schedule(); + } + + sbitmap_finish_wait(sbq, ws, &wait); + return tag; +} + +/* Submit an asynchronous admin command. Caller is responsible for implementing + * any waiting or timeout. Upon command completion the callback @cb is called. + */ +int fun_submit_admin_cmd(struct fun_dev *fdev, struct fun_admin_req_common *cmd, + fun_admin_callback_t cb, void *cb_data, bool wait_ok) +{ + struct fun_queue *funq = fdev->admin_q; + unsigned int cmdsize = cmd->len8 * 8; + struct fun_cmd_ctx *cmd_ctx; + int tag, cpu, rc = 0; + + if (WARN_ON(cmdsize > (1 << funq->sqe_size_log2))) + return -EMSGSIZE; + + tag = sbitmap_queue_get(&fdev->admin_sbq, &cpu); + if (tag < 0) { + if (!wait_ok) + return -EAGAIN; + tag = fun_wait_for_tag(fdev, &cpu); + if (tag < 0) + return tag; + } + + cmd->cid = cpu_to_be16(tag); + + cmd_ctx = &fdev->cmd_ctx[tag]; + cmd_ctx->cb = cb; + cmd_ctx->cb_data = cb_data; + + spin_lock(&funq->sq_lock); + + if (unlikely(fdev->suppress_cmds)) { + rc = -ESHUTDOWN; + sbitmap_queue_clear(&fdev->admin_sbq, tag, cpu); + } else { + cmd_ctx->cpu = cpu; + memcpy(fun_sqe_at(funq, funq->sq_tail), cmd, cmdsize); + + dev_dbg(fdev->dev, "admin cmd @ %u: %8ph\n", funq->sq_tail, + cmd); + + if (++funq->sq_tail == funq->sq_depth) + funq->sq_tail = 0; + writel(funq->sq_tail, funq->sq_db); + } + spin_unlock(&funq->sq_lock); + return rc; +} + +/* Abandon a pending admin command by clearing the issuer's callback data. + * Failure indicates that the command either has already completed or its + * completion is racing with this call. + */ +static bool fun_abandon_admin_cmd(struct fun_dev *fd, + const struct fun_admin_req_common *cmd, + void *cb_data) +{ + u16 cid = be16_to_cpu(cmd->cid); + struct fun_cmd_ctx *cmd_ctx = &fd->cmd_ctx[cid]; + + return cmpxchg(&cmd_ctx->cb_data, cb_data, NULL) == cb_data; +} + +/* Stop submission of new admin commands and wake up any processes waiting for + * tags. Already submitted commands are left to complete or time out. + */ +static void fun_admin_stop(struct fun_dev *fdev) +{ + spin_lock(&fdev->admin_q->sq_lock); + fdev->suppress_cmds = true; + spin_unlock(&fdev->admin_q->sq_lock); + sbitmap_queue_wake_all(&fdev->admin_sbq); +} + +/* The callback for synchronous execution of admin commands. It copies the + * command response to the caller's buffer and signals completion. + */ +static void fun_admin_cmd_sync_cb(struct fun_dev *fd, void *rsp, void *cb_data) +{ + const struct fun_admin_rsp_common *rsp_common = rsp; + struct fun_sync_cmd_ctx *ctx = cb_data; + + if (!ctx) + return; /* command issuer timed out and left */ + if (ctx->rsp_buf) { + unsigned int rsp_len = rsp_common->len8 * 8; + + if (unlikely(rsp_len > ctx->rsp_len)) { + dev_err(fd->dev, + "response for op %u is %uB > response buffer %uB\n", + rsp_common->op, rsp_len, ctx->rsp_len); + rsp_len = ctx->rsp_len; + } + memcpy(ctx->rsp_buf, rsp, rsp_len); + } + ctx->rsp_status = rsp_common->ret; + complete(&ctx->compl); +} + +/* Submit a synchronous admin command. */ +int fun_submit_admin_sync_cmd(struct fun_dev *fdev, + struct fun_admin_req_common *cmd, void *rsp, + size_t rspsize, unsigned int timeout) +{ + struct fun_sync_cmd_ctx ctx = { + .compl = COMPLETION_INITIALIZER_ONSTACK(ctx.compl), + .rsp_buf = rsp, + .rsp_len = rspsize, + }; + unsigned int cmdlen = cmd->len8 * 8; + unsigned long jiffies_left; + int ret; + + ret = fun_submit_admin_cmd(fdev, cmd, fun_admin_cmd_sync_cb, &ctx, + true); + if (ret) + return ret; + + if (!timeout) + timeout = FUN_ADMIN_CMD_TO_MS; + + jiffies_left = wait_for_completion_timeout(&ctx.compl, + msecs_to_jiffies(timeout)); + if (!jiffies_left) { + /* The command timed out. Attempt to cancel it so we can return. + * But if the command is in the process of completing we'll + * wait for it. + */ + if (fun_abandon_admin_cmd(fdev, cmd, &ctx)) { + dev_err(fdev->dev, "admin command timed out: %*ph\n", + cmdlen, cmd); + fun_admin_stop(fdev); + /* see if the timeout was due to a queue failure */ + if (fun_adminq_stopped(fdev)) + dev_err(fdev->dev, + "device does not accept admin commands\n"); + + return -ETIMEDOUT; + } + wait_for_completion(&ctx.compl); + } + + if (ctx.rsp_status) { + dev_err(fdev->dev, "admin command failed, err %d: %*ph\n", + ctx.rsp_status, cmdlen, cmd); + } + + return -ctx.rsp_status; +} +EXPORT_SYMBOL_GPL(fun_submit_admin_sync_cmd); + +/* Return the number of device resources of the requested type. */ +int fun_get_res_count(struct fun_dev *fdev, enum fun_admin_op res) +{ + union { + struct fun_admin_res_count_req req; + struct fun_admin_res_count_rsp rsp; + } cmd; + int rc; + + cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(res, sizeof(cmd.req)); + cmd.req.count = FUN_ADMIN_SIMPLE_SUBOP_INIT(FUN_ADMIN_SUBOP_RES_COUNT, + 0, 0); + + rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common, &cmd.rsp, + sizeof(cmd), 0); + return rc ? rc : be32_to_cpu(cmd.rsp.count.data); +} +EXPORT_SYMBOL_GPL(fun_get_res_count); + +/* Request that the instance of resource @res with the given id be deleted. */ +int fun_res_destroy(struct fun_dev *fdev, enum fun_admin_op res, + unsigned int flags, u32 id) +{ + struct fun_admin_generic_destroy_req req = { + .common = FUN_ADMIN_REQ_COMMON_INIT2(res, sizeof(req)), + .destroy = FUN_ADMIN_SIMPLE_SUBOP_INIT(FUN_ADMIN_SUBOP_DESTROY, + flags, id) + }; + + return fun_submit_admin_sync_cmd(fdev, &req.common, NULL, 0, 0); +} +EXPORT_SYMBOL_GPL(fun_res_destroy); + +/* Bind two entities of the given types and IDs. */ +int fun_bind(struct fun_dev *fdev, enum fun_admin_bind_type type0, + unsigned int id0, enum fun_admin_bind_type type1, + unsigned int id1) +{ + struct { + struct fun_admin_bind_req req; + struct fun_admin_bind_entry entry[2]; + } cmd = { + .req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_BIND, + sizeof(cmd)), + .entry[0] = FUN_ADMIN_BIND_ENTRY_INIT(type0, id0), + .entry[1] = FUN_ADMIN_BIND_ENTRY_INIT(type1, id1), + }; + + return fun_submit_admin_sync_cmd(fdev, &cmd.req.common, NULL, 0, 0); +} +EXPORT_SYMBOL_GPL(fun_bind); + +static int fun_get_dev_limits(struct fun_dev *fdev) +{ + struct pci_dev *pdev = to_pci_dev(fdev->dev); + unsigned int cq_count, sq_count, num_dbs; + int rc; + + rc = fun_get_res_count(fdev, FUN_ADMIN_OP_EPCQ); + if (rc < 0) + return rc; + cq_count = rc; + + rc = fun_get_res_count(fdev, FUN_ADMIN_OP_EPSQ); + if (rc < 0) + return rc; + sq_count = rc; + + /* The admin queue consumes 1 CQ and at least 1 SQ. To be usable the + * device must provide additional queues. + */ + if (cq_count < 2 || sq_count < 2 + !!fdev->admin_q->rq_depth) + return -EINVAL; + + /* Calculate the max QID based on SQ/CQ/doorbell counts. + * SQ/CQ doorbells alternate. + */ + num_dbs = (pci_resource_len(pdev, 0) - NVME_REG_DBS) / + (fdev->db_stride * 4); + fdev->max_qid = min3(cq_count, sq_count, num_dbs / 2) - 1; + fdev->kern_end_qid = fdev->max_qid + 1; + return 0; +} + +/* Allocate all MSI-X vectors available on a function and at least @min_vecs. */ +static int fun_alloc_irqs(struct pci_dev *pdev, unsigned int min_vecs) +{ + int vecs, num_msix = pci_msix_vec_count(pdev); + + if (num_msix < 0) + return num_msix; + if (min_vecs > num_msix) + return -ERANGE; + + vecs = pci_alloc_irq_vectors(pdev, min_vecs, num_msix, PCI_IRQ_MSIX); + if (vecs > 0) { + dev_info(&pdev->dev, + "Allocated %d IRQ vectors of %d requested\n", + vecs, num_msix); + } else { + dev_err(&pdev->dev, + "Unable to allocate at least %u IRQ vectors\n", + min_vecs); + } + return vecs; +} + +/* Allocate and initialize the IRQ manager state. */ +static int fun_alloc_irq_mgr(struct fun_dev *fdev) +{ + fdev->irq_map = bitmap_zalloc(fdev->num_irqs, GFP_KERNEL); + if (!fdev->irq_map) + return -ENOMEM; + + spin_lock_init(&fdev->irqmgr_lock); + /* mark IRQ 0 allocated, it is used by the admin queue */ + __set_bit(0, fdev->irq_map); + fdev->irqs_avail = fdev->num_irqs - 1; + return 0; +} + +/* Reserve @nirqs of the currently available IRQs and return their indices. */ +int fun_reserve_irqs(struct fun_dev *fdev, unsigned int nirqs, u16 *irq_indices) +{ + unsigned int b, n = 0; + int err = -ENOSPC; + + if (!nirqs) + return 0; + + spin_lock(&fdev->irqmgr_lock); + if (nirqs > fdev->irqs_avail) + goto unlock; + + for_each_clear_bit(b, fdev->irq_map, fdev->num_irqs) { + __set_bit(b, fdev->irq_map); + irq_indices[n++] = b; + if (n >= nirqs) + break; + } + + WARN_ON(n < nirqs); + fdev->irqs_avail -= n; + err = n; +unlock: + spin_unlock(&fdev->irqmgr_lock); + return err; +} +EXPORT_SYMBOL(fun_reserve_irqs); + +/* Release @nirqs previously allocated IRQS with the supplied indices. */ +void fun_release_irqs(struct fun_dev *fdev, unsigned int nirqs, + u16 *irq_indices) +{ + unsigned int i; + + spin_lock(&fdev->irqmgr_lock); + for (i = 0; i < nirqs; i++) + __clear_bit(irq_indices[i], fdev->irq_map); + fdev->irqs_avail += nirqs; + spin_unlock(&fdev->irqmgr_lock); +} +EXPORT_SYMBOL(fun_release_irqs); + +static void fun_serv_handler(struct work_struct *work) +{ + struct fun_dev *fd = container_of(work, struct fun_dev, service_task); + + if (test_bit(FUN_SERV_DISABLED, &fd->service_flags)) + return; + if (fd->serv_cb) + fd->serv_cb(fd); +} + +void fun_serv_stop(struct fun_dev *fd) +{ + set_bit(FUN_SERV_DISABLED, &fd->service_flags); + cancel_work_sync(&fd->service_task); +} +EXPORT_SYMBOL_GPL(fun_serv_stop); + +void fun_serv_restart(struct fun_dev *fd) +{ + clear_bit(FUN_SERV_DISABLED, &fd->service_flags); + if (fd->service_flags) + schedule_work(&fd->service_task); +} +EXPORT_SYMBOL_GPL(fun_serv_restart); + +void fun_serv_sched(struct fun_dev *fd) +{ + if (!test_bit(FUN_SERV_DISABLED, &fd->service_flags)) + schedule_work(&fd->service_task); +} +EXPORT_SYMBOL_GPL(fun_serv_sched); + +/* Check and try to get the device into a proper state for initialization, + * i.e., CSTS.RDY = CC.EN = 0. + */ +static int sanitize_dev(struct fun_dev *fdev) +{ + int rc; + + fdev->cap_reg = readq(fdev->bar + NVME_REG_CAP); + fdev->cc_reg = readl(fdev->bar + NVME_REG_CC); + + /* First get RDY to agree with the current EN. Give RDY the opportunity + * to complete a potential recent EN change. + */ + rc = fun_wait_ready(fdev, fdev->cc_reg & NVME_CC_ENABLE); + if (rc) + return rc; + + /* Next, reset the device if EN is currently 1. */ + if (fdev->cc_reg & NVME_CC_ENABLE) + rc = fun_disable_ctrl(fdev); + + return rc; +} + +/* Undo the device initialization of fun_dev_enable(). */ +void fun_dev_disable(struct fun_dev *fdev) +{ + struct pci_dev *pdev = to_pci_dev(fdev->dev); + + pci_set_drvdata(pdev, NULL); + + if (fdev->fw_handle != FUN_HCI_ID_INVALID) { + fun_res_destroy(fdev, FUN_ADMIN_OP_SWUPGRADE, 0, + fdev->fw_handle); + fdev->fw_handle = FUN_HCI_ID_INVALID; + } + + fun_disable_admin_queue(fdev); + + bitmap_free(fdev->irq_map); + pci_free_irq_vectors(pdev); + + pci_clear_master(pdev); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + + fun_unmap_bars(fdev); +} +EXPORT_SYMBOL(fun_dev_disable); + +/* Perform basic initialization of a device, including + * - PCI config space setup and BAR0 mapping + * - interrupt management initialization + * - 1 admin queue setup + * - determination of some device limits, such as number of queues. + */ +int fun_dev_enable(struct fun_dev *fdev, struct pci_dev *pdev, + const struct fun_dev_params *areq, const char *name) +{ + int rc; + + fdev->dev = &pdev->dev; + rc = fun_map_bars(fdev, name); + if (rc) + return rc; + + rc = fun_set_dma_masks(fdev->dev); + if (rc) + goto unmap; + + rc = pci_enable_device_mem(pdev); + if (rc) { + dev_err(&pdev->dev, "Couldn't enable device, err %d\n", rc); + goto unmap; + } + + pci_enable_pcie_error_reporting(pdev); + + rc = sanitize_dev(fdev); + if (rc) + goto disable_dev; + + fdev->fw_handle = FUN_HCI_ID_INVALID; + fdev->q_depth = NVME_CAP_MQES(fdev->cap_reg) + 1; + fdev->db_stride = 1 << NVME_CAP_STRIDE(fdev->cap_reg); + fdev->dbs = fdev->bar + NVME_REG_DBS; + + INIT_WORK(&fdev->service_task, fun_serv_handler); + fdev->service_flags = FUN_SERV_DISABLED; + fdev->serv_cb = areq->serv_cb; + + rc = fun_alloc_irqs(pdev, areq->min_msix + 1); /* +1 for admin CQ */ + if (rc < 0) + goto disable_dev; + fdev->num_irqs = rc; + + rc = fun_alloc_irq_mgr(fdev); + if (rc) + goto free_irqs; + + pci_set_master(pdev); + rc = fun_enable_admin_queue(fdev, areq); + if (rc) + goto free_irq_mgr; + + rc = fun_get_dev_limits(fdev); + if (rc < 0) + goto disable_admin; + + pci_save_state(pdev); + pci_set_drvdata(pdev, fdev); + pcie_print_link_status(pdev); + dev_dbg(fdev->dev, "q_depth %u, db_stride %u, max qid %d kern_end_qid %d\n", + fdev->q_depth, fdev->db_stride, fdev->max_qid, + fdev->kern_end_qid); + return 0; + +disable_admin: + fun_disable_admin_queue(fdev); +free_irq_mgr: + pci_clear_master(pdev); + bitmap_free(fdev->irq_map); +free_irqs: + pci_free_irq_vectors(pdev); +disable_dev: + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); +unmap: + fun_unmap_bars(fdev); + return rc; +} +EXPORT_SYMBOL(fun_dev_enable); + +MODULE_AUTHOR("Dimitris Michailidis <dmichail@fungible.com>"); +MODULE_DESCRIPTION("Core services driver for Fungible devices"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/fungible/funcore/fun_dev.h b/drivers/net/ethernet/fungible/funcore/fun_dev.h new file mode 100644 index 000000000000..9e8c17ce8887 --- /dev/null +++ b/drivers/net/ethernet/fungible/funcore/fun_dev.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ + +#ifndef _FUNDEV_H +#define _FUNDEV_H + +#include <linux/sbitmap.h> +#include <linux/spinlock_types.h> +#include <linux/workqueue.h> +#include "fun_hci.h" + +struct pci_dev; +struct fun_dev; +struct fun_queue; +struct fun_cmd_ctx; +struct fun_queue_alloc_req; + +/* doorbell fields */ +enum { + FUN_DB_QIDX_S = 0, + FUN_DB_INTCOAL_ENTRIES_S = 16, + FUN_DB_INTCOAL_ENTRIES_M = 0x7f, + FUN_DB_INTCOAL_USEC_S = 23, + FUN_DB_INTCOAL_USEC_M = 0x7f, + FUN_DB_IRQ_S = 30, + FUN_DB_IRQ_F = 1 << FUN_DB_IRQ_S, + FUN_DB_IRQ_ARM_S = 31, + FUN_DB_IRQ_ARM_F = 1U << FUN_DB_IRQ_ARM_S +}; + +/* Callback for asynchronous admin commands. + * Invoked on reception of command response. + */ +typedef void (*fun_admin_callback_t)(struct fun_dev *fdev, void *rsp, + void *cb_data); + +/* Callback for events/notifications received by an admin queue. */ +typedef void (*fun_admin_event_cb)(struct fun_dev *fdev, void *cqe); + +/* Callback for pending work handled by the service task. */ +typedef void (*fun_serv_cb)(struct fun_dev *fd); + +/* service task flags */ +enum { + FUN_SERV_DISABLED, /* service task is disabled */ + FUN_SERV_FIRST_AVAIL +}; + +/* Driver state associated with a PCI function. */ +struct fun_dev { + struct device *dev; + + void __iomem *bar; /* start of BAR0 mapping */ + u32 __iomem *dbs; /* start of doorbells in BAR0 mapping */ + + /* admin queue */ + struct fun_queue *admin_q; + struct sbitmap_queue admin_sbq; + struct fun_cmd_ctx *cmd_ctx; + fun_admin_event_cb adminq_cb; + bool suppress_cmds; /* if set don't write commands to SQ */ + + /* address increment between consecutive doorbells, in 4B units */ + unsigned int db_stride; + + /* SW versions of device registers */ + u32 cc_reg; /* CC register */ + u64 cap_reg; /* CAPability register */ + + unsigned int q_depth; /* max queue depth supported by device */ + unsigned int max_qid; /* = #queues - 1, separately for SQs and CQs */ + unsigned int kern_end_qid; /* last qid in the kernel range + 1 */ + + unsigned int fw_handle; + + /* IRQ manager */ + unsigned int num_irqs; + unsigned int irqs_avail; + spinlock_t irqmgr_lock; + unsigned long *irq_map; + + /* The service task handles work that needs a process context */ + struct work_struct service_task; + unsigned long service_flags; + fun_serv_cb serv_cb; +}; + +struct fun_dev_params { + u8 cqe_size_log2; /* admin q CQE size */ + u8 sqe_size_log2; /* admin q SQE size */ + + /* admin q depths */ + u16 cq_depth; + u16 sq_depth; + u16 rq_depth; + + u16 min_msix; /* min vectors needed by requesting driver */ + + fun_admin_event_cb event_cb; + fun_serv_cb serv_cb; +}; + +/* Return the BAR address of a doorbell. */ +static inline u32 __iomem *fun_db_addr(const struct fun_dev *fdev, + unsigned int db_index) +{ + return &fdev->dbs[db_index * fdev->db_stride]; +} + +/* Return the BAR address of an SQ doorbell. SQ and CQ DBs alternate, + * SQs have even DB indices. + */ +static inline u32 __iomem *fun_sq_db_addr(const struct fun_dev *fdev, + unsigned int sqid) +{ + return fun_db_addr(fdev, sqid * 2); +} + +static inline u32 __iomem *fun_cq_db_addr(const struct fun_dev *fdev, + unsigned int cqid) +{ + return fun_db_addr(fdev, cqid * 2 + 1); +} + +int fun_get_res_count(struct fun_dev *fdev, enum fun_admin_op res); +int fun_res_destroy(struct fun_dev *fdev, enum fun_admin_op res, + unsigned int flags, u32 id); +int fun_bind(struct fun_dev *fdev, enum fun_admin_bind_type type0, + unsigned int id0, enum fun_admin_bind_type type1, + unsigned int id1); + +int fun_submit_admin_cmd(struct fun_dev *fdev, struct fun_admin_req_common *cmd, + fun_admin_callback_t cb, void *cb_data, bool wait_ok); +int fun_submit_admin_sync_cmd(struct fun_dev *fdev, + struct fun_admin_req_common *cmd, void *rsp, + size_t rspsize, unsigned int timeout); + +int fun_dev_enable(struct fun_dev *fdev, struct pci_dev *pdev, + const struct fun_dev_params *areq, const char *name); +void fun_dev_disable(struct fun_dev *fdev); + +int fun_reserve_irqs(struct fun_dev *fdev, unsigned int nirqs, + u16 *irq_indices); +void fun_release_irqs(struct fun_dev *fdev, unsigned int nirqs, + u16 *irq_indices); + +void fun_serv_stop(struct fun_dev *fd); +void fun_serv_restart(struct fun_dev *fd); +void fun_serv_sched(struct fun_dev *fd); + +#endif /* _FUNDEV_H */ diff --git a/drivers/net/ethernet/fungible/funcore/fun_hci.h b/drivers/net/ethernet/fungible/funcore/fun_hci.h new file mode 100644 index 000000000000..257203e94b68 --- /dev/null +++ b/drivers/net/ethernet/fungible/funcore/fun_hci.h @@ -0,0 +1,1202 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ + +#ifndef __FUN_HCI_H +#define __FUN_HCI_H + +enum { + FUN_HCI_ID_INVALID = 0xffffffff, +}; + +enum fun_admin_op { + FUN_ADMIN_OP_BIND = 0x1, + FUN_ADMIN_OP_EPCQ = 0x11, + FUN_ADMIN_OP_EPSQ = 0x12, + FUN_ADMIN_OP_PORT = 0x13, + FUN_ADMIN_OP_ETH = 0x14, + FUN_ADMIN_OP_VI = 0x15, + FUN_ADMIN_OP_SWUPGRADE = 0x1f, + FUN_ADMIN_OP_RSS = 0x21, + FUN_ADMIN_OP_ADI = 0x25, + FUN_ADMIN_OP_KTLS = 0x26, +}; + +enum { + FUN_REQ_COMMON_FLAG_RSP = 0x1, + FUN_REQ_COMMON_FLAG_HEAD_WB = 0x2, + FUN_REQ_COMMON_FLAG_INT = 0x4, + FUN_REQ_COMMON_FLAG_CQE_IN_RQBUF = 0x8, +}; + +struct fun_admin_req_common { + __u8 op; + __u8 len8; + __be16 flags; + __u8 suboff8; + __u8 rsvd0; + __be16 cid; +}; + +#define FUN_ADMIN_REQ_COMMON_INIT(_op, _len8, _flags, _suboff8, _cid) \ + (struct fun_admin_req_common) { \ + .op = (_op), .len8 = (_len8), .flags = cpu_to_be16(_flags), \ + .suboff8 = (_suboff8), .cid = cpu_to_be16(_cid), \ + } + +#define FUN_ADMIN_REQ_COMMON_INIT2(_op, _len) \ + (struct fun_admin_req_common) { \ + .op = (_op), .len8 = (_len) / 8, \ + } + +struct fun_admin_rsp_common { + __u8 op; + __u8 len8; + __be16 flags; + __u8 suboff8; + __u8 ret; + __be16 cid; +}; + +struct fun_admin_write48_req { + __be64 key_to_data; +}; + +#define FUN_ADMIN_WRITE48_REQ_KEY_S 56U +#define FUN_ADMIN_WRITE48_REQ_KEY_M 0xff +#define FUN_ADMIN_WRITE48_REQ_KEY_P_NOSWAP(x) \ + (((__u64)x) << FUN_ADMIN_WRITE48_REQ_KEY_S) + +#define FUN_ADMIN_WRITE48_REQ_DATA_S 0U +#define FUN_ADMIN_WRITE48_REQ_DATA_M 0xffffffffffff +#define FUN_ADMIN_WRITE48_REQ_DATA_P_NOSWAP(x) \ + (((__u64)x) << FUN_ADMIN_WRITE48_REQ_DATA_S) + +#define FUN_ADMIN_WRITE48_REQ_INIT(key, data) \ + (struct fun_admin_write48_req) { \ + .key_to_data = cpu_to_be64( \ + FUN_ADMIN_WRITE48_REQ_KEY_P_NOSWAP(key) | \ + FUN_ADMIN_WRITE48_REQ_DATA_P_NOSWAP(data)), \ + } + +struct fun_admin_write48_rsp { + __be64 key_to_data; +}; + +struct fun_admin_read48_req { + __be64 key_pack; +}; + +#define FUN_ADMIN_READ48_REQ_KEY_S 56U +#define FUN_ADMIN_READ48_REQ_KEY_M 0xff +#define FUN_ADMIN_READ48_REQ_KEY_P_NOSWAP(x) \ + (((__u64)x) << FUN_ADMIN_READ48_REQ_KEY_S) + +#define FUN_ADMIN_READ48_REQ_INIT(key) \ + (struct fun_admin_read48_req) { \ + .key_pack = \ + cpu_to_be64(FUN_ADMIN_READ48_REQ_KEY_P_NOSWAP(key)), \ + } + +struct fun_admin_read48_rsp { + __be64 key_to_data; +}; + +#define FUN_ADMIN_READ48_RSP_KEY_S 56U +#define FUN_ADMIN_READ48_RSP_KEY_M 0xff +#define FUN_ADMIN_READ48_RSP_KEY_G(x) \ + ((be64_to_cpu(x) >> FUN_ADMIN_READ48_RSP_KEY_S) & \ + FUN_ADMIN_READ48_RSP_KEY_M) + +#define FUN_ADMIN_READ48_RSP_RET_S 48U +#define FUN_ADMIN_READ48_RSP_RET_M 0xff +#define FUN_ADMIN_READ48_RSP_RET_G(x) \ + ((be64_to_cpu(x) >> FUN_ADMIN_READ48_RSP_RET_S) & \ + FUN_ADMIN_READ48_RSP_RET_M) + +#define FUN_ADMIN_READ48_RSP_DATA_S 0U +#define FUN_ADMIN_READ48_RSP_DATA_M 0xffffffffffff +#define FUN_ADMIN_READ48_RSP_DATA_G(x) \ + ((be64_to_cpu(x) >> FUN_ADMIN_READ48_RSP_DATA_S) & \ + FUN_ADMIN_READ48_RSP_DATA_M) + +enum fun_admin_bind_type { + FUN_ADMIN_BIND_TYPE_EPCQ = 0x1, + FUN_ADMIN_BIND_TYPE_EPSQ = 0x2, + FUN_ADMIN_BIND_TYPE_PORT = 0x3, + FUN_ADMIN_BIND_TYPE_RSS = 0x4, + FUN_ADMIN_BIND_TYPE_VI = 0x5, + FUN_ADMIN_BIND_TYPE_ETH = 0x6, +}; + +struct fun_admin_bind_entry { + __u8 type; + __u8 rsvd0[3]; + __be32 id; +}; + +#define FUN_ADMIN_BIND_ENTRY_INIT(_type, _id) \ + (struct fun_admin_bind_entry) { \ + .type = (_type), .id = cpu_to_be32(_id), \ + } + +struct fun_admin_bind_req { + struct fun_admin_req_common common; + struct fun_admin_bind_entry entry[]; +}; + +struct fun_admin_bind_rsp { + struct fun_admin_rsp_common bind_rsp_common; +}; + +struct fun_admin_simple_subop { + __u8 subop; + __u8 rsvd0; + __be16 flags; + __be32 data; +}; + +#define FUN_ADMIN_SIMPLE_SUBOP_INIT(_subop, _flags, _data) \ + (struct fun_admin_simple_subop) { \ + .subop = (_subop), .flags = cpu_to_be16(_flags), \ + .data = cpu_to_be32(_data), \ + } + +enum fun_admin_subop { + FUN_ADMIN_SUBOP_CREATE = 0x10, + FUN_ADMIN_SUBOP_DESTROY = 0x11, + FUN_ADMIN_SUBOP_MODIFY = 0x12, + FUN_ADMIN_SUBOP_RES_COUNT = 0x14, + FUN_ADMIN_SUBOP_READ = 0x15, + FUN_ADMIN_SUBOP_WRITE = 0x16, + FUN_ADMIN_SUBOP_NOTIFY = 0x17, +}; + +enum { + FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR = 0x1, +}; + +struct fun_admin_generic_destroy_req { + struct fun_admin_req_common common; + struct fun_admin_simple_subop destroy; +}; + +struct fun_admin_generic_create_rsp { + struct fun_admin_rsp_common common; + + __u8 subop; + __u8 rsvd0; + __be16 flags; + __be32 id; +}; + +struct fun_admin_res_count_req { + struct fun_admin_req_common common; + struct fun_admin_simple_subop count; +}; + +struct fun_admin_res_count_rsp { + struct fun_admin_rsp_common common; + struct fun_admin_simple_subop count; +}; + +enum { + FUN_ADMIN_EPCQ_CREATE_FLAG_INT_EPCQ = 0x2, + FUN_ADMIN_EPCQ_CREATE_FLAG_ENTRY_WR_TPH = 0x4, + FUN_ADMIN_EPCQ_CREATE_FLAG_SL_WR_TPH = 0x8, + FUN_ADMIN_EPCQ_CREATE_FLAG_RQ = 0x80, + FUN_ADMIN_EPCQ_CREATE_FLAG_INT_IQ = 0x100, + FUN_ADMIN_EPCQ_CREATE_FLAG_INT_NOARM = 0x200, + FUN_ADMIN_EPCQ_CREATE_FLAG_DROP_ON_OVERFLOW = 0x400, +}; + +struct fun_admin_epcq_req { + struct fun_admin_req_common common; + union epcq_req_subop { + struct fun_admin_epcq_create_req { + __u8 subop; + __u8 rsvd0; + __be16 flags; + __be32 id; + + __be32 epsqid; + __u8 rsvd1; + __u8 entry_size_log2; + __be16 nentries; + + __be64 address; + + __be16 tailroom; /* per packet tailroom in bytes */ + __u8 headroom; /* per packet headroom in 2B units */ + __u8 intcoal_kbytes; + __u8 intcoal_holdoff_nentries; + __u8 intcoal_holdoff_usecs; + __be16 intid; + + __be32 scan_start_id; + __be32 scan_end_id; + + __be16 tph_cpuid; + __u8 rsvd3[6]; + } create; + + struct fun_admin_epcq_modify_req { + __u8 subop; + __u8 rsvd0; + __be16 flags; + __be32 id; + + __be16 headroom; /* headroom in bytes */ + __u8 rsvd1[6]; + } modify; + } u; +}; + +#define FUN_ADMIN_EPCQ_CREATE_REQ_INIT( \ + _subop, _flags, _id, _epsqid, _entry_size_log2, _nentries, _address, \ + _tailroom, _headroom, _intcoal_kbytes, _intcoal_holdoff_nentries, \ + _intcoal_holdoff_usecs, _intid, _scan_start_id, _scan_end_id, \ + _tph_cpuid) \ + (struct fun_admin_epcq_create_req) { \ + .subop = (_subop), .flags = cpu_to_be16(_flags), \ + .id = cpu_to_be32(_id), .epsqid = cpu_to_be32(_epsqid), \ + .entry_size_log2 = _entry_size_log2, \ + .nentries = cpu_to_be16(_nentries), \ + .address = cpu_to_be64(_address), \ + .tailroom = cpu_to_be16(_tailroom), .headroom = _headroom, \ + .intcoal_kbytes = _intcoal_kbytes, \ + .intcoal_holdoff_nentries = _intcoal_holdoff_nentries, \ + .intcoal_holdoff_usecs = _intcoal_holdoff_usecs, \ + .intid = cpu_to_be16(_intid), \ + .scan_start_id = cpu_to_be32(_scan_start_id), \ + .scan_end_id = cpu_to_be32(_scan_end_id), \ + .tph_cpuid = cpu_to_be16(_tph_cpuid), \ + } + +#define FUN_ADMIN_EPCQ_MODIFY_REQ_INIT(_subop, _flags, _id, _headroom) \ + (struct fun_admin_epcq_modify_req) { \ + .subop = (_subop), .flags = cpu_to_be16(_flags), \ + .id = cpu_to_be32(_id), .headroom = cpu_to_be16(_headroom), \ + } + +enum { + FUN_ADMIN_EPSQ_CREATE_FLAG_INT_EPSQ = 0x2, + FUN_ADMIN_EPSQ_CREATE_FLAG_ENTRY_RD_TPH = 0x4, + FUN_ADMIN_EPSQ_CREATE_FLAG_GL_RD_TPH = 0x8, + FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS = 0x10, + FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS_TPH = 0x20, + FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_EPCQ = 0x40, + FUN_ADMIN_EPSQ_CREATE_FLAG_RQ = 0x80, + FUN_ADMIN_EPSQ_CREATE_FLAG_INT_IQ = 0x100, + FUN_ADMIN_EPSQ_CREATE_FLAG_NO_CMPL = 0x200, +}; + +struct fun_admin_epsq_req { + struct fun_admin_req_common common; + + union epsq_req_subop { + struct fun_admin_epsq_create_req { + __u8 subop; + __u8 rsvd0; + __be16 flags; + __be32 id; + + __be32 epcqid; + __u8 rsvd1; + __u8 entry_size_log2; + __be16 nentries; + + __be64 address; /* DMA address of epsq */ + + __u8 rsvd2[3]; + __u8 intcoal_kbytes; + __u8 intcoal_holdoff_nentries; + __u8 intcoal_holdoff_usecs; + __be16 intid; + + __be32 scan_start_id; + __be32 scan_end_id; + + __u8 rsvd3[4]; + __be16 tph_cpuid; + __u8 buf_size_log2; /* log2 of RQ buffer size */ + __u8 head_wb_size_log2; /* log2 of head write back size */ + + __be64 head_wb_address; /* DMA address for head writeback */ + } create; + } u; +}; + +#define FUN_ADMIN_EPSQ_CREATE_REQ_INIT( \ + _subop, _flags, _id, _epcqid, _entry_size_log2, _nentries, _address, \ + _intcoal_kbytes, _intcoal_holdoff_nentries, _intcoal_holdoff_usecs, \ + _intid, _scan_start_id, _scan_end_id, _tph_cpuid, _buf_size_log2, \ + _head_wb_size_log2, _head_wb_address) \ + (struct fun_admin_epsq_create_req) { \ + .subop = (_subop), .flags = cpu_to_be16(_flags), \ + .id = cpu_to_be32(_id), .epcqid = cpu_to_be32(_epcqid), \ + .entry_size_log2 = _entry_size_log2, \ + .nentries = cpu_to_be16(_nentries), \ + .address = cpu_to_be64(_address), \ + .intcoal_kbytes = _intcoal_kbytes, \ + .intcoal_holdoff_nentries = _intcoal_holdoff_nentries, \ + .intcoal_holdoff_usecs = _intcoal_holdoff_usecs, \ + .intid = cpu_to_be16(_intid), \ + .scan_start_id = cpu_to_be32(_scan_start_id), \ + .scan_end_id = cpu_to_be32(_scan_end_id), \ + .tph_cpuid = cpu_to_be16(_tph_cpuid), \ + .buf_size_log2 = _buf_size_log2, \ + .head_wb_size_log2 = _head_wb_size_log2, \ + .head_wb_address = cpu_to_be64(_head_wb_address), \ + } + +enum { + FUN_PORT_CAP_OFFLOADS = 0x1, + FUN_PORT_CAP_STATS = 0x2, + FUN_PORT_CAP_LOOPBACK = 0x4, + FUN_PORT_CAP_VPORT = 0x8, + FUN_PORT_CAP_TX_PAUSE = 0x10, + FUN_PORT_CAP_RX_PAUSE = 0x20, + FUN_PORT_CAP_AUTONEG = 0x40, + FUN_PORT_CAP_RSS = 0x80, + FUN_PORT_CAP_VLAN_OFFLOADS = 0x100, + FUN_PORT_CAP_ENCAP_OFFLOADS = 0x200, + FUN_PORT_CAP_1000_X = 0x1000, + FUN_PORT_CAP_10G_R = 0x2000, + FUN_PORT_CAP_40G_R4 = 0x4000, + FUN_PORT_CAP_25G_R = 0x8000, + FUN_PORT_CAP_50G_R2 = 0x10000, + FUN_PORT_CAP_50G_R = 0x20000, + FUN_PORT_CAP_100G_R4 = 0x40000, + FUN_PORT_CAP_100G_R2 = 0x80000, + FUN_PORT_CAP_200G_R4 = 0x100000, + FUN_PORT_CAP_FEC_NONE = 0x10000000, + FUN_PORT_CAP_FEC_FC = 0x20000000, + FUN_PORT_CAP_FEC_RS = 0x40000000, +}; + +enum fun_port_brkout_mode { + FUN_PORT_BRKMODE_NA = 0x0, + FUN_PORT_BRKMODE_NONE = 0x1, + FUN_PORT_BRKMODE_2X = 0x2, + FUN_PORT_BRKMODE_4X = 0x3, +}; + +enum { + FUN_PORT_SPEED_AUTO = 0x0, + FUN_PORT_SPEED_10M = 0x1, + FUN_PORT_SPEED_100M = 0x2, + FUN_PORT_SPEED_1G = 0x4, + FUN_PORT_SPEED_10G = 0x8, + FUN_PORT_SPEED_25G = 0x10, + FUN_PORT_SPEED_40G = 0x20, + FUN_PORT_SPEED_50G = 0x40, + FUN_PORT_SPEED_100G = 0x80, + FUN_PORT_SPEED_200G = 0x100, +}; + +enum fun_port_duplex_mode { + FUN_PORT_FULL_DUPLEX = 0x0, + FUN_PORT_HALF_DUPLEX = 0x1, +}; + +enum { + FUN_PORT_FEC_NA = 0x0, + FUN_PORT_FEC_OFF = 0x1, + FUN_PORT_FEC_RS = 0x2, + FUN_PORT_FEC_FC = 0x4, + FUN_PORT_FEC_AUTO = 0x8, +}; + +enum fun_port_link_status { + FUN_PORT_LINK_UP = 0x0, + FUN_PORT_LINK_UP_WITH_ERR = 0x1, + FUN_PORT_LINK_DOWN = 0x2, +}; + +enum fun_port_led_type { + FUN_PORT_LED_OFF = 0x0, + FUN_PORT_LED_AMBER = 0x1, + FUN_PORT_LED_GREEN = 0x2, + FUN_PORT_LED_BEACON_ON = 0x3, + FUN_PORT_LED_BEACON_OFF = 0x4, +}; + +enum { + FUN_PORT_FLAG_MAC_DOWN = 0x1, + FUN_PORT_FLAG_MAC_UP = 0x2, + FUN_PORT_FLAG_NH_DOWN = 0x4, + FUN_PORT_FLAG_NH_UP = 0x8, +}; + +enum { + FUN_PORT_FLAG_ENABLE_NOTIFY = 0x1, +}; + +enum fun_port_lane_attr { + FUN_PORT_LANE_1 = 0x1, + FUN_PORT_LANE_2 = 0x2, + FUN_PORT_LANE_4 = 0x4, + FUN_PORT_LANE_SPEED_10G = 0x100, + FUN_PORT_LANE_SPEED_25G = 0x200, + FUN_PORT_LANE_SPEED_50G = 0x400, + FUN_PORT_LANE_SPLIT = 0x8000, +}; + +enum fun_admin_port_subop { + FUN_ADMIN_PORT_SUBOP_INETADDR_EVENT = 0x24, +}; + +enum fun_admin_port_key { + FUN_ADMIN_PORT_KEY_ILLEGAL = 0x0, + FUN_ADMIN_PORT_KEY_MTU = 0x1, + FUN_ADMIN_PORT_KEY_FEC = 0x2, + FUN_ADMIN_PORT_KEY_SPEED = 0x3, + FUN_ADMIN_PORT_KEY_DEBOUNCE = 0x4, + FUN_ADMIN_PORT_KEY_DUPLEX = 0x5, + FUN_ADMIN_PORT_KEY_MACADDR = 0x6, + FUN_ADMIN_PORT_KEY_LINKMODE = 0x7, + FUN_ADMIN_PORT_KEY_BREAKOUT = 0x8, + FUN_ADMIN_PORT_KEY_ENABLE = 0x9, + FUN_ADMIN_PORT_KEY_DISABLE = 0xa, + FUN_ADMIN_PORT_KEY_ERR_DISABLE = 0xb, + FUN_ADMIN_PORT_KEY_CAPABILITIES = 0xc, + FUN_ADMIN_PORT_KEY_LP_CAPABILITIES = 0xd, + FUN_ADMIN_PORT_KEY_STATS_DMA_LOW = 0xe, + FUN_ADMIN_PORT_KEY_STATS_DMA_HIGH = 0xf, + FUN_ADMIN_PORT_KEY_LANE_ATTRS = 0x10, + FUN_ADMIN_PORT_KEY_LED = 0x11, + FUN_ADMIN_PORT_KEY_ADVERT = 0x12, +}; + +struct fun_subop_imm { + __u8 subop; /* see fun_data_subop enum */ + __u8 flags; + __u8 nsgl; + __u8 rsvd0; + __be32 len; + + __u8 data[]; +}; + +enum fun_subop_sgl_flags { + FUN_SUBOP_SGL_USE_OFF8 = 0x1, + FUN_SUBOP_FLAG_FREE_BUF = 0x2, + FUN_SUBOP_FLAG_IS_REFBUF = 0x4, + FUN_SUBOP_SGL_FLAG_LOCAL = 0x8, +}; + +enum fun_data_op { + FUN_DATAOP_INVALID = 0x0, + FUN_DATAOP_SL = 0x1, /* scatter */ + FUN_DATAOP_GL = 0x2, /* gather */ + FUN_DATAOP_SGL = 0x3, /* scatter-gather */ + FUN_DATAOP_IMM = 0x4, /* immediate data */ + FUN_DATAOP_RQBUF = 0x8, /* rq buffer */ +}; + +struct fun_dataop_gl { + __u8 subop; + __u8 flags; + __be16 sgl_off; + __be32 sgl_len; + + __be64 sgl_data; +}; + +static inline void fun_dataop_gl_init(struct fun_dataop_gl *s, u8 flags, + u16 sgl_off, u32 sgl_len, u64 sgl_data) +{ + s->subop = FUN_DATAOP_GL; + s->flags = flags; + s->sgl_off = cpu_to_be16(sgl_off); + s->sgl_len = cpu_to_be32(sgl_len); + s->sgl_data = cpu_to_be64(sgl_data); +} + +struct fun_dataop_imm { + __u8 subop; + __u8 flags; + __be16 rsvd0; + __be32 sgl_len; +}; + +struct fun_subop_sgl { + __u8 subop; + __u8 flags; + __u8 nsgl; + __u8 rsvd0; + __be32 sgl_len; + + __be64 sgl_data; +}; + +#define FUN_SUBOP_SGL_INIT(_subop, _flags, _nsgl, _sgl_len, _sgl_data) \ + (struct fun_subop_sgl) { \ + .subop = (_subop), .flags = (_flags), .nsgl = (_nsgl), \ + .sgl_len = cpu_to_be32(_sgl_len), \ + .sgl_data = cpu_to_be64(_sgl_data), \ + } + +struct fun_dataop_rqbuf { + __u8 subop; + __u8 rsvd0; + __be16 cid; + __be32 bufoff; +}; + +struct fun_dataop_hdr { + __u8 nsgl; + __u8 flags; + __u8 ngather; + __u8 nscatter; + __be32 total_len; + + struct fun_dataop_imm imm[]; +}; + +#define FUN_DATAOP_HDR_INIT(_nsgl, _flags, _ngather, _nscatter, _total_len) \ + (struct fun_dataop_hdr) { \ + .nsgl = _nsgl, .flags = _flags, .ngather = _ngather, \ + .nscatter = _nscatter, .total_len = cpu_to_be32(_total_len), \ + } + +enum fun_port_inetaddr_event_type { + FUN_PORT_INETADDR_ADD = 0x1, + FUN_PORT_INETADDR_DEL = 0x2, +}; + +enum fun_port_inetaddr_addr_family { + FUN_PORT_INETADDR_IPV4 = 0x1, + FUN_PORT_INETADDR_IPV6 = 0x2, +}; + +struct fun_admin_port_req { + struct fun_admin_req_common common; + + union port_req_subop { + struct fun_admin_port_create_req { + __u8 subop; + __u8 rsvd0; + __be16 flags; + __be32 id; + } create; + struct fun_admin_port_write_req { + __u8 subop; + __u8 rsvd0; + __be16 flags; + __be32 id; /* portid */ + + struct fun_admin_write48_req write48[]; + } write; + struct fun_admin_port_read_req { + __u8 subop; + __u8 rsvd0; + __be16 flags; + __be32 id; /* portid */ + + struct fun_admin_read48_req read48[]; + } read; + struct fun_admin_port_inetaddr_event_req { + __u8 subop; + __u8 rsvd0; + __u8 event_type; + __u8 addr_family; + __be32 id; + + __u8 addr[]; + } inetaddr_event; + } u; +}; + +#define FUN_ADMIN_PORT_CREATE_REQ_INIT(_subop, _flags, _id) \ + (struct fun_admin_port_create_req) { \ + .subop = (_subop), .flags = cpu_to_be16(_flags), \ + .id = cpu_to_be32(_id), \ + } + +#define FUN_ADMIN_PORT_WRITE_REQ_INIT(_subop, _flags, _id) \ + (struct fun_admin_port_write_req) { \ + .subop = (_subop), .flags = cpu_to_be16(_flags), \ + .id = cpu_to_be32(_id), \ + } + +#define FUN_ADMIN_PORT_READ_REQ_INIT(_subop, _flags, _id) \ + (struct fun_admin_port_read_req) { \ + .subop = (_subop), .flags = cpu_to_be16(_flags), \ + .id = cpu_to_be32(_id), \ + } + +struct fun_admin_port_rsp { + struct fun_admin_rsp_common common; + + union port_rsp_subop { + struct fun_admin_port_create_rsp { + __u8 subop; + __u8 rsvd0[3]; + __be32 id; + + __be16 lport; + __u8 rsvd1[6]; + } create; + struct fun_admin_port_write_rsp { + __u8 subop; + __u8 rsvd0[3]; + __be32 id; /* portid */ + + struct fun_admin_write48_rsp write48[]; + } write; + struct fun_admin_port_read_rsp { + __u8 subop; + __u8 rsvd0[3]; + __be32 id; /* portid */ + + struct fun_admin_read48_rsp read48[]; + } read; + struct fun_admin_port_inetaddr_event_rsp { + __u8 subop; + __u8 rsvd0[3]; + __be32 id; /* portid */ + } inetaddr_event; + } u; +}; + +enum fun_xcvr_type { + FUN_XCVR_BASET = 0x0, + FUN_XCVR_CU = 0x1, + FUN_XCVR_SMF = 0x2, + FUN_XCVR_MMF = 0x3, + FUN_XCVR_AOC = 0x4, + FUN_XCVR_SFPP = 0x10, /* SFP+ or later */ + FUN_XCVR_QSFPP = 0x11, /* QSFP+ or later */ + FUN_XCVR_QSFPDD = 0x12, /* QSFP-DD */ +}; + +struct fun_admin_port_notif { + struct fun_admin_rsp_common common; + + __u8 subop; + __u8 rsvd0; + __be16 id; + __be32 speed; /* in 10 Mbps units */ + + __u8 link_state; + __u8 missed_events; + __u8 link_down_reason; + __u8 xcvr_type; + __u8 flow_ctrl; + __u8 fec; + __u8 active_lanes; + __u8 rsvd1; + + __be64 advertising; + + __be64 lp_advertising; +}; + +enum fun_eth_rss_const { + FUN_ETH_RSS_MAX_KEY_SIZE = 0x28, + FUN_ETH_RSS_MAX_INDIR_ENT = 0x40, +}; + +enum fun_eth_hash_alg { + FUN_ETH_RSS_ALG_INVALID = 0x0, + FUN_ETH_RSS_ALG_TOEPLITZ = 0x1, + FUN_ETH_RSS_ALG_CRC32 = 0x2, +}; + +struct fun_admin_rss_req { + struct fun_admin_req_common common; + + union rss_req_subop { + struct fun_admin_rss_create_req { + __u8 subop; + __u8 rsvd0; + __be16 flags; + __be32 id; + + __be32 rsvd1; + __be32 viid; /* VI flow id */ + + __be64 metadata[1]; + + __u8 alg; + __u8 keylen; + __u8 indir_nent; + __u8 rsvd2; + __be16 key_off; + __be16 indir_off; + + struct fun_dataop_hdr dataop; + } create; + } u; +}; + +#define FUN_ADMIN_RSS_CREATE_REQ_INIT(_subop, _flags, _id, _viid, _alg, \ + _keylen, _indir_nent, _key_off, \ + _indir_off) \ + (struct fun_admin_rss_create_req) { \ + .subop = (_subop), .flags = cpu_to_be16(_flags), \ + .id = cpu_to_be32(_id), .viid = cpu_to_be32(_viid), \ + .alg = _alg, .keylen = _keylen, .indir_nent = _indir_nent, \ + .key_off = cpu_to_be16(_key_off), \ + .indir_off = cpu_to_be16(_indir_off), \ + } + +struct fun_admin_vi_req { + struct fun_admin_req_common common; + + union vi_req_subop { + struct fun_admin_vi_create_req { + __u8 subop; + __u8 rsvd0; + __be16 flags; + __be32 id; + + __be32 rsvd1; + __be32 portid; /* port flow id */ + } create; + } u; +}; + +#define FUN_ADMIN_VI_CREATE_REQ_INIT(_subop, _flags, _id, _portid) \ + (struct fun_admin_vi_create_req) { \ + .subop = (_subop), .flags = cpu_to_be16(_flags), \ + .id = cpu_to_be32(_id), .portid = cpu_to_be32(_portid), \ + } + +struct fun_admin_eth_req { + struct fun_admin_req_common common; + + union eth_req_subop { + struct fun_admin_eth_create_req { + __u8 subop; + __u8 rsvd0; + __be16 flags; + __be32 id; + + __be32 rsvd1; + __be32 portid; /* port flow id */ + } create; + } u; +}; + +#define FUN_ADMIN_ETH_CREATE_REQ_INIT(_subop, _flags, _id, _portid) \ + (struct fun_admin_eth_create_req) { \ + .subop = (_subop), .flags = cpu_to_be16(_flags), \ + .id = cpu_to_be32(_id), .portid = cpu_to_be32(_portid), \ + } + +enum { + FUN_ADMIN_SWU_UPGRADE_FLAG_INIT = 0x10, + FUN_ADMIN_SWU_UPGRADE_FLAG_COMPLETE = 0x20, + FUN_ADMIN_SWU_UPGRADE_FLAG_DOWNGRADE = 0x40, + FUN_ADMIN_SWU_UPGRADE_FLAG_ACTIVE_IMAGE = 0x80, + FUN_ADMIN_SWU_UPGRADE_FLAG_ASYNC = 0x1, +}; + +enum fun_admin_swu_subop { + FUN_ADMIN_SWU_SUBOP_GET_VERSION = 0x20, + FUN_ADMIN_SWU_SUBOP_UPGRADE = 0x21, + FUN_ADMIN_SWU_SUBOP_UPGRADE_DATA = 0x22, + FUN_ADMIN_SWU_SUBOP_GET_ALL_VERSIONS = 0x23, +}; + +struct fun_admin_swu_req { + struct fun_admin_req_common common; + + union swu_req_subop { + struct fun_admin_swu_create_req { + __u8 subop; + __u8 rsvd0; + __be16 flags; + __be32 id; + } create; + struct fun_admin_swu_upgrade_req { + __u8 subop; + __u8 rsvd0; + __be16 flags; + __be32 id; + + __be32 fourcc; + __be32 rsvd1; + + __be64 image_size; /* upgrade image length */ + } upgrade; + struct fun_admin_swu_upgrade_data_req { + __u8 subop; + __u8 rsvd0; + __be16 flags; + __be32 id; + + __be32 offset; /* offset of data in this command */ + __be32 size; /* total size of data in this command */ + } upgrade_data; + } u; + + struct fun_subop_sgl sgl[]; /* in, out buffers through sgl */ +}; + +#define FUN_ADMIN_SWU_CREATE_REQ_INIT(_subop, _flags, _id) \ + (struct fun_admin_swu_create_req) { \ + .subop = (_subop), .flags = cpu_to_be16(_flags), \ + .id = cpu_to_be32(_id), \ + } + +#define FUN_ADMIN_SWU_UPGRADE_REQ_INIT(_subop, _flags, _id, _fourcc, \ + _image_size) \ + (struct fun_admin_swu_upgrade_req) { \ + .subop = (_subop), .flags = cpu_to_be16(_flags), \ + .id = cpu_to_be32(_id), .fourcc = cpu_to_be32(_fourcc), \ + .image_size = cpu_to_be64(_image_size), \ + } + +#define FUN_ADMIN_SWU_UPGRADE_DATA_REQ_INIT(_subop, _flags, _id, _offset, \ + _size) \ + (struct fun_admin_swu_upgrade_data_req) { \ + .subop = (_subop), .flags = cpu_to_be16(_flags), \ + .id = cpu_to_be32(_id), .offset = cpu_to_be32(_offset), \ + .size = cpu_to_be32(_size), \ + } + +struct fun_admin_swu_rsp { + struct fun_admin_rsp_common common; + + union swu_rsp_subop { + struct fun_admin_swu_create_rsp { + __u8 subop; + __u8 rsvd0; + __be16 flags; + __be32 id; + } create; + struct fun_admin_swu_upgrade_rsp { + __u8 subop; + __u8 rsvd0[3]; + __be32 id; + + __be32 fourcc; + __be32 status; + + __be32 progress; + __be32 unused; + } upgrade; + struct fun_admin_swu_upgrade_data_rsp { + __u8 subop; + __u8 rsvd0; + __be16 flags; + __be32 id; + + __be32 offset; + __be32 size; + } upgrade_data; + } u; +}; + +enum fun_ktls_version { + FUN_KTLS_TLSV2 = 0x20, + FUN_KTLS_TLSV3 = 0x30, +}; + +enum fun_ktls_cipher { + FUN_KTLS_CIPHER_AES_GCM_128 = 0x33, + FUN_KTLS_CIPHER_AES_GCM_256 = 0x34, + FUN_KTLS_CIPHER_AES_CCM_128 = 0x35, + FUN_KTLS_CIPHER_CHACHA20_POLY1305 = 0x36, +}; + +enum fun_ktls_modify_flags { + FUN_KTLS_MODIFY_REMOVE = 0x1, +}; + +struct fun_admin_ktls_create_req { + struct fun_admin_req_common common; + + __u8 subop; + __u8 rsvd0; + __be16 flags; + __be32 id; +}; + +#define FUN_ADMIN_KTLS_CREATE_REQ_INIT(_subop, _flags, _id) \ + (struct fun_admin_ktls_create_req) { \ + .subop = (_subop), .flags = cpu_to_be16(_flags), \ + .id = cpu_to_be32(_id), \ + } + +struct fun_admin_ktls_create_rsp { + struct fun_admin_rsp_common common; + + __u8 subop; + __u8 rsvd0[3]; + __be32 id; +}; + +struct fun_admin_ktls_modify_req { + struct fun_admin_req_common common; + + __u8 subop; + __u8 rsvd0; + __be16 flags; + __be32 id; + + __be64 tlsid; + + __be32 tcp_seq; + __u8 version; + __u8 cipher; + __u8 rsvd1[2]; + + __u8 record_seq[8]; + + __u8 key[32]; + + __u8 iv[16]; + + __u8 salt[8]; +}; + +#define FUN_ADMIN_KTLS_MODIFY_REQ_INIT(_subop, _flags, _id, _tlsid, _tcp_seq, \ + _version, _cipher) \ + (struct fun_admin_ktls_modify_req) { \ + .subop = (_subop), .flags = cpu_to_be16(_flags), \ + .id = cpu_to_be32(_id), .tlsid = cpu_to_be64(_tlsid), \ + .tcp_seq = cpu_to_be32(_tcp_seq), .version = _version, \ + .cipher = _cipher, \ + } + +struct fun_admin_ktls_modify_rsp { + struct fun_admin_rsp_common common; + + __u8 subop; + __u8 rsvd0[3]; + __be32 id; + + __be64 tlsid; +}; + +struct fun_req_common { + __u8 op; + __u8 len8; + __be16 flags; + __u8 suboff8; + __u8 rsvd0; + __be16 cid; +}; + +struct fun_rsp_common { + __u8 op; + __u8 len8; + __be16 flags; + __u8 suboff8; + __u8 ret; + __be16 cid; +}; + +struct fun_cqe_info { + __be16 sqhd; + __be16 sqid; + __be16 cid; + __be16 sf_p; +}; + +enum fun_eprq_def { + FUN_EPRQ_PKT_ALIGN = 0x80, +}; + +struct fun_eprq_rqbuf { + __be64 bufaddr; +}; + +#define FUN_EPRQ_RQBUF_INIT(_bufaddr) \ + (struct fun_eprq_rqbuf) { \ + .bufaddr = cpu_to_be64(_bufaddr), \ + } + +enum fun_eth_op { + FUN_ETH_OP_TX = 0x1, + FUN_ETH_OP_RX = 0x2, +}; + +enum { + FUN_ETH_OFFLOAD_EN = 0x8000, + FUN_ETH_OUTER_EN = 0x4000, + FUN_ETH_INNER_LSO = 0x2000, + FUN_ETH_INNER_TSO = 0x1000, + FUN_ETH_OUTER_IPV6 = 0x800, + FUN_ETH_OUTER_UDP = 0x400, + FUN_ETH_INNER_IPV6 = 0x200, + FUN_ETH_INNER_UDP = 0x100, + FUN_ETH_UPDATE_OUTER_L3_LEN = 0x80, + FUN_ETH_UPDATE_OUTER_L3_CKSUM = 0x40, + FUN_ETH_UPDATE_OUTER_L4_LEN = 0x20, + FUN_ETH_UPDATE_OUTER_L4_CKSUM = 0x10, + FUN_ETH_UPDATE_INNER_L3_LEN = 0x8, + FUN_ETH_UPDATE_INNER_L3_CKSUM = 0x4, + FUN_ETH_UPDATE_INNER_L4_LEN = 0x2, + FUN_ETH_UPDATE_INNER_L4_CKSUM = 0x1, +}; + +struct fun_eth_offload { + __be16 flags; /* combination of above flags */ + __be16 mss; /* TSO max seg size */ + __be16 tcp_doff_flags; /* TCP data offset + flags 16b word */ + __be16 vlan; + + __be16 inner_l3_off; /* Inner L3 header offset */ + __be16 inner_l4_off; /* Inner L4 header offset */ + __be16 outer_l3_off; /* Outer L3 header offset */ + __be16 outer_l4_off; /* Outer L4 header offset */ +}; + +static inline void fun_eth_offload_init(struct fun_eth_offload *s, u16 flags, + u16 mss, __be16 tcp_doff_flags, + __be16 vlan, u16 inner_l3_off, + u16 inner_l4_off, u16 outer_l3_off, + u16 outer_l4_off) +{ + s->flags = cpu_to_be16(flags); + s->mss = cpu_to_be16(mss); + s->tcp_doff_flags = tcp_doff_flags; + s->vlan = vlan; + s->inner_l3_off = cpu_to_be16(inner_l3_off); + s->inner_l4_off = cpu_to_be16(inner_l4_off); + s->outer_l3_off = cpu_to_be16(outer_l3_off); + s->outer_l4_off = cpu_to_be16(outer_l4_off); +} + +struct fun_eth_tls { + __be64 tlsid; +}; + +enum { + FUN_ETH_TX_TLS = 0x8000, +}; + +struct fun_eth_tx_req { + __u8 op; + __u8 len8; + __be16 flags; + __u8 suboff8; + __u8 repr_idn; + __be16 encap_proto; + + struct fun_eth_offload offload; + + struct fun_dataop_hdr dataop; +}; + +struct fun_eth_rx_cv { + __be16 il4_prot_to_l2_type; +}; + +#define FUN_ETH_RX_CV_IL4_PROT_S 13U +#define FUN_ETH_RX_CV_IL4_PROT_M 0x3 + +#define FUN_ETH_RX_CV_IL3_PROT_S 11U +#define FUN_ETH_RX_CV_IL3_PROT_M 0x3 + +#define FUN_ETH_RX_CV_OL4_PROT_S 8U +#define FUN_ETH_RX_CV_OL4_PROT_M 0x7 + +#define FUN_ETH_RX_CV_ENCAP_TYPE_S 6U +#define FUN_ETH_RX_CV_ENCAP_TYPE_M 0x3 + +#define FUN_ETH_RX_CV_OL3_PROT_S 4U +#define FUN_ETH_RX_CV_OL3_PROT_M 0x3 + +#define FUN_ETH_RX_CV_VLAN_TYPE_S 3U +#define FUN_ETH_RX_CV_VLAN_TYPE_M 0x1 + +#define FUN_ETH_RX_CV_L2_TYPE_S 2U +#define FUN_ETH_RX_CV_L2_TYPE_M 0x1 + +enum fun_rx_cv { + FUN_RX_CV_NONE = 0x0, + FUN_RX_CV_IP = 0x2, + FUN_RX_CV_IP6 = 0x3, + FUN_RX_CV_TCP = 0x2, + FUN_RX_CV_UDP = 0x3, + FUN_RX_CV_VXLAN = 0x2, + FUN_RX_CV_MPLS = 0x3, +}; + +struct fun_eth_cqe { + __u8 op; + __u8 len8; + __u8 nsgl; + __u8 repr_idn; + __be32 pkt_len; + + __be64 timestamp; + + __be16 pkt_cv; + __be16 rsvd0; + __be32 hash; + + __be16 encap_proto; + __be16 vlan; + __be32 rsvd1; + + __be32 buf_offset; + __be16 headroom; + __be16 csum; +}; + +enum fun_admin_adi_attr { + FUN_ADMIN_ADI_ATTR_MACADDR = 0x1, + FUN_ADMIN_ADI_ATTR_VLAN = 0x2, + FUN_ADMIN_ADI_ATTR_RATE = 0x3, +}; + +struct fun_adi_param { + union adi_param { + struct fun_adi_mac { + __be64 addr; + } mac; + struct fun_adi_vlan { + __be32 rsvd; + __be16 eth_type; + __be16 tci; + } vlan; + struct fun_adi_rate { + __be32 rsvd; + __be32 tx_mbps; + } rate; + } u; +}; + +#define FUN_ADI_MAC_INIT(_addr) \ + (struct fun_adi_mac) { \ + .addr = cpu_to_be64(_addr), \ + } + +#define FUN_ADI_VLAN_INIT(_eth_type, _tci) \ + (struct fun_adi_vlan) { \ + .eth_type = cpu_to_be16(_eth_type), .tci = cpu_to_be16(_tci), \ + } + +#define FUN_ADI_RATE_INIT(_tx_mbps) \ + (struct fun_adi_rate) { \ + .tx_mbps = cpu_to_be32(_tx_mbps), \ + } + +struct fun_admin_adi_req { + struct fun_admin_req_common common; + + union adi_req_subop { + struct fun_admin_adi_write_req { + __u8 subop; + __u8 attribute; + __be16 rsvd; + __be32 id; + + struct fun_adi_param param; + } write; + } u; +}; + +#define FUN_ADMIN_ADI_WRITE_REQ_INIT(_subop, _attribute, _id) \ + (struct fun_admin_adi_write_req) { \ + .subop = (_subop), .attribute = (_attribute), \ + .id = cpu_to_be32(_id), \ + } + +#endif /* __FUN_HCI_H */ diff --git a/drivers/net/ethernet/fungible/funcore/fun_queue.c b/drivers/net/ethernet/fungible/funcore/fun_queue.c new file mode 100644 index 000000000000..8ab9f68434f5 --- /dev/null +++ b/drivers/net/ethernet/fungible/funcore/fun_queue.c @@ -0,0 +1,601 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) + +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/log2.h> +#include <linux/mm.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/slab.h> + +#include "fun_dev.h" +#include "fun_queue.h" + +/* Allocate memory for a queue. This includes the memory for the HW descriptor + * ring, an optional 64b HW write-back area, and an optional SW state ring. + * Returns the virtual and DMA addresses of the HW ring, the VA of the SW ring, + * and the VA of the write-back area. + */ +void *fun_alloc_ring_mem(struct device *dma_dev, size_t depth, + size_t hw_desc_sz, size_t sw_desc_sz, bool wb, + int numa_node, dma_addr_t *dma_addr, void **sw_va, + volatile __be64 **wb_va) +{ + int dev_node = dev_to_node(dma_dev); + size_t dma_sz; + void *va; + + if (numa_node == NUMA_NO_NODE) + numa_node = dev_node; + + /* Place optional write-back area at end of descriptor ring. */ + dma_sz = hw_desc_sz * depth; + if (wb) + dma_sz += sizeof(u64); + + set_dev_node(dma_dev, numa_node); + va = dma_alloc_coherent(dma_dev, dma_sz, dma_addr, GFP_KERNEL); + set_dev_node(dma_dev, dev_node); + if (!va) + return NULL; + + if (sw_desc_sz) { + *sw_va = kvzalloc_node(sw_desc_sz * depth, GFP_KERNEL, + numa_node); + if (!*sw_va) { + dma_free_coherent(dma_dev, dma_sz, va, *dma_addr); + return NULL; + } + } + + if (wb) + *wb_va = va + dma_sz - sizeof(u64); + return va; +} +EXPORT_SYMBOL_GPL(fun_alloc_ring_mem); + +void fun_free_ring_mem(struct device *dma_dev, size_t depth, size_t hw_desc_sz, + bool wb, void *hw_va, dma_addr_t dma_addr, void *sw_va) +{ + if (hw_va) { + size_t sz = depth * hw_desc_sz; + + if (wb) + sz += sizeof(u64); + dma_free_coherent(dma_dev, sz, hw_va, dma_addr); + } + kvfree(sw_va); +} +EXPORT_SYMBOL_GPL(fun_free_ring_mem); + +/* Prepare and issue an admin command to create an SQ on the device with the + * provided parameters. If the queue ID is auto-allocated by the device it is + * returned in *sqidp. + */ +int fun_sq_create(struct fun_dev *fdev, u16 flags, u32 sqid, u32 cqid, + u8 sqe_size_log2, u32 sq_depth, dma_addr_t dma_addr, + u8 coal_nentries, u8 coal_usec, u32 irq_num, + u32 scan_start_id, u32 scan_end_id, + u32 rq_buf_size_log2, u32 *sqidp, u32 __iomem **dbp) +{ + union { + struct fun_admin_epsq_req req; + struct fun_admin_generic_create_rsp rsp; + } cmd; + dma_addr_t wb_addr; + u32 hw_qid; + int rc; + + if (sq_depth > fdev->q_depth) + return -EINVAL; + if (flags & FUN_ADMIN_EPSQ_CREATE_FLAG_RQ) + sqe_size_log2 = ilog2(sizeof(struct fun_eprq_rqbuf)); + + wb_addr = dma_addr + (sq_depth << sqe_size_log2); + + cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPSQ, + sizeof(cmd.req)); + cmd.req.u.create = + FUN_ADMIN_EPSQ_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, flags, + sqid, cqid, sqe_size_log2, + sq_depth - 1, dma_addr, 0, + coal_nentries, coal_usec, + irq_num, scan_start_id, + scan_end_id, 0, + rq_buf_size_log2, + ilog2(sizeof(u64)), wb_addr); + + rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common, + &cmd.rsp, sizeof(cmd.rsp), 0); + if (rc) + return rc; + + hw_qid = be32_to_cpu(cmd.rsp.id); + *dbp = fun_sq_db_addr(fdev, hw_qid); + if (flags & FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR) + *sqidp = hw_qid; + return rc; +} +EXPORT_SYMBOL_GPL(fun_sq_create); + +/* Prepare and issue an admin command to create a CQ on the device with the + * provided parameters. If the queue ID is auto-allocated by the device it is + * returned in *cqidp. + */ +int fun_cq_create(struct fun_dev *fdev, u16 flags, u32 cqid, u32 rqid, + u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr, + u16 headroom, u16 tailroom, u8 coal_nentries, u8 coal_usec, + u32 irq_num, u32 scan_start_id, u32 scan_end_id, u32 *cqidp, + u32 __iomem **dbp) +{ + union { + struct fun_admin_epcq_req req; + struct fun_admin_generic_create_rsp rsp; + } cmd; + u32 hw_qid; + int rc; + + if (cq_depth > fdev->q_depth) + return -EINVAL; + + cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPCQ, + sizeof(cmd.req)); + cmd.req.u.create = + FUN_ADMIN_EPCQ_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, flags, + cqid, rqid, cqe_size_log2, + cq_depth - 1, dma_addr, tailroom, + headroom / 2, 0, coal_nentries, + coal_usec, irq_num, + scan_start_id, scan_end_id, 0); + + rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common, + &cmd.rsp, sizeof(cmd.rsp), 0); + if (rc) + return rc; + + hw_qid = be32_to_cpu(cmd.rsp.id); + *dbp = fun_cq_db_addr(fdev, hw_qid); + if (flags & FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR) + *cqidp = hw_qid; + return rc; +} +EXPORT_SYMBOL_GPL(fun_cq_create); + +static bool fun_sq_is_head_wb(const struct fun_queue *funq) +{ + return funq->sq_flags & FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS; +} + +static void fun_clean_rq(struct fun_queue *funq) +{ + struct fun_dev *fdev = funq->fdev; + struct fun_rq_info *rqinfo; + unsigned int i; + + for (i = 0; i < funq->rq_depth; i++) { + rqinfo = &funq->rq_info[i]; + if (rqinfo->page) { + dma_unmap_page(fdev->dev, rqinfo->dma, PAGE_SIZE, + DMA_FROM_DEVICE); + put_page(rqinfo->page); + rqinfo->page = NULL; + } + } +} + +static int fun_fill_rq(struct fun_queue *funq) +{ + struct device *dev = funq->fdev->dev; + int i, node = dev_to_node(dev); + struct fun_rq_info *rqinfo; + + for (i = 0; i < funq->rq_depth; i++) { + rqinfo = &funq->rq_info[i]; + rqinfo->page = alloc_pages_node(node, GFP_KERNEL, 0); + if (unlikely(!rqinfo->page)) + return -ENOMEM; + + rqinfo->dma = dma_map_page(dev, rqinfo->page, 0, + PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, rqinfo->dma))) { + put_page(rqinfo->page); + rqinfo->page = NULL; + return -ENOMEM; + } + + funq->rqes[i] = FUN_EPRQ_RQBUF_INIT(rqinfo->dma); + } + + funq->rq_tail = funq->rq_depth - 1; + return 0; +} + +static void fun_rq_update_pos(struct fun_queue *funq, int buf_offset) +{ + if (buf_offset <= funq->rq_buf_offset) { + struct fun_rq_info *rqinfo = &funq->rq_info[funq->rq_buf_idx]; + struct device *dev = funq->fdev->dev; + + dma_sync_single_for_device(dev, rqinfo->dma, PAGE_SIZE, + DMA_FROM_DEVICE); + funq->num_rqe_to_fill++; + if (++funq->rq_buf_idx == funq->rq_depth) + funq->rq_buf_idx = 0; + } + funq->rq_buf_offset = buf_offset; +} + +/* Given a command response with data scattered across >= 1 RQ buffers return + * a pointer to a contiguous buffer containing all the data. If the data is in + * one RQ buffer the start address within that buffer is returned, otherwise a + * new buffer is allocated and the data is gathered into it. + */ +static void *fun_data_from_rq(struct fun_queue *funq, + const struct fun_rsp_common *rsp, bool *need_free) +{ + u32 bufoff, total_len, remaining, fragsize, dataoff; + struct device *dma_dev = funq->fdev->dev; + const struct fun_dataop_rqbuf *databuf; + const struct fun_dataop_hdr *dataop; + const struct fun_rq_info *rqinfo; + void *data; + + dataop = (void *)rsp + rsp->suboff8 * 8; + total_len = be32_to_cpu(dataop->total_len); + + if (likely(dataop->nsgl == 1)) { + databuf = (struct fun_dataop_rqbuf *)dataop->imm; + bufoff = be32_to_cpu(databuf->bufoff); + fun_rq_update_pos(funq, bufoff); + rqinfo = &funq->rq_info[funq->rq_buf_idx]; + dma_sync_single_for_cpu(dma_dev, rqinfo->dma + bufoff, + total_len, DMA_FROM_DEVICE); + *need_free = false; + return page_address(rqinfo->page) + bufoff; + } + + /* For scattered completions gather the fragments into one buffer. */ + + data = kmalloc(total_len, GFP_ATOMIC); + /* NULL is OK here. In case of failure we still need to consume the data + * for proper buffer accounting but indicate an error in the response. + */ + if (likely(data)) + *need_free = true; + + dataoff = 0; + for (remaining = total_len; remaining; remaining -= fragsize) { + fun_rq_update_pos(funq, 0); + fragsize = min_t(unsigned int, PAGE_SIZE, remaining); + if (data) { + rqinfo = &funq->rq_info[funq->rq_buf_idx]; + dma_sync_single_for_cpu(dma_dev, rqinfo->dma, fragsize, + DMA_FROM_DEVICE); + memcpy(data + dataoff, page_address(rqinfo->page), + fragsize); + dataoff += fragsize; + } + } + return data; +} + +unsigned int __fun_process_cq(struct fun_queue *funq, unsigned int max) +{ + const struct fun_cqe_info *info; + struct fun_rsp_common *rsp; + unsigned int new_cqes; + u16 sf_p, flags; + bool need_free; + void *cqe; + + if (!max) + max = funq->cq_depth - 1; + + for (new_cqes = 0; new_cqes < max; new_cqes++) { + cqe = funq->cqes + (funq->cq_head << funq->cqe_size_log2); + info = funq_cqe_info(funq, cqe); + sf_p = be16_to_cpu(info->sf_p); + + if ((sf_p & 1) != funq->cq_phase) + break; + + /* ensure the phase tag is read before other CQE fields */ + dma_rmb(); + + if (++funq->cq_head == funq->cq_depth) { + funq->cq_head = 0; + funq->cq_phase = !funq->cq_phase; + } + + rsp = cqe; + flags = be16_to_cpu(rsp->flags); + + need_free = false; + if (unlikely(flags & FUN_REQ_COMMON_FLAG_CQE_IN_RQBUF)) { + rsp = fun_data_from_rq(funq, rsp, &need_free); + if (!rsp) { + rsp = cqe; + rsp->len8 = 1; + if (rsp->ret == 0) + rsp->ret = ENOMEM; + } + } + + if (funq->cq_cb) + funq->cq_cb(funq, funq->cb_data, rsp, info); + if (need_free) + kfree(rsp); + } + + dev_dbg(funq->fdev->dev, "CQ %u, new CQEs %u/%u, head %u, phase %u\n", + funq->cqid, new_cqes, max, funq->cq_head, funq->cq_phase); + return new_cqes; +} + +unsigned int fun_process_cq(struct fun_queue *funq, unsigned int max) +{ + unsigned int processed; + u32 db; + + processed = __fun_process_cq(funq, max); + + if (funq->num_rqe_to_fill) { + funq->rq_tail = (funq->rq_tail + funq->num_rqe_to_fill) % + funq->rq_depth; + funq->num_rqe_to_fill = 0; + writel(funq->rq_tail, funq->rq_db); + } + + db = funq->cq_head | FUN_DB_IRQ_ARM_F; + writel(db, funq->cq_db); + return processed; +} + +static int fun_alloc_sqes(struct fun_queue *funq) +{ + funq->sq_cmds = fun_alloc_ring_mem(funq->fdev->dev, funq->sq_depth, + 1 << funq->sqe_size_log2, 0, + fun_sq_is_head_wb(funq), + NUMA_NO_NODE, &funq->sq_dma_addr, + NULL, &funq->sq_head); + return funq->sq_cmds ? 0 : -ENOMEM; +} + +static int fun_alloc_cqes(struct fun_queue *funq) +{ + funq->cqes = fun_alloc_ring_mem(funq->fdev->dev, funq->cq_depth, + 1 << funq->cqe_size_log2, 0, false, + NUMA_NO_NODE, &funq->cq_dma_addr, NULL, + NULL); + return funq->cqes ? 0 : -ENOMEM; +} + +static int fun_alloc_rqes(struct fun_queue *funq) +{ + funq->rqes = fun_alloc_ring_mem(funq->fdev->dev, funq->rq_depth, + sizeof(*funq->rqes), + sizeof(*funq->rq_info), false, + NUMA_NO_NODE, &funq->rq_dma_addr, + (void **)&funq->rq_info, NULL); + return funq->rqes ? 0 : -ENOMEM; +} + +/* Free a queue's structures. */ +void fun_free_queue(struct fun_queue *funq) +{ + struct device *dev = funq->fdev->dev; + + fun_free_ring_mem(dev, funq->cq_depth, 1 << funq->cqe_size_log2, false, + funq->cqes, funq->cq_dma_addr, NULL); + fun_free_ring_mem(dev, funq->sq_depth, 1 << funq->sqe_size_log2, + fun_sq_is_head_wb(funq), funq->sq_cmds, + funq->sq_dma_addr, NULL); + + if (funq->rqes) { + fun_clean_rq(funq); + fun_free_ring_mem(dev, funq->rq_depth, sizeof(*funq->rqes), + false, funq->rqes, funq->rq_dma_addr, + funq->rq_info); + } + + kfree(funq); +} + +/* Allocate and initialize a funq's structures. */ +struct fun_queue *fun_alloc_queue(struct fun_dev *fdev, int qid, + const struct fun_queue_alloc_req *req) +{ + struct fun_queue *funq = kzalloc(sizeof(*funq), GFP_KERNEL); + + if (!funq) + return NULL; + + funq->fdev = fdev; + spin_lock_init(&funq->sq_lock); + + funq->qid = qid; + + /* Initial CQ/SQ/RQ ids */ + if (req->rq_depth) { + funq->cqid = 2 * qid; + if (funq->qid) { + /* I/O Q: use rqid = cqid, sqid = +1 */ + funq->rqid = funq->cqid; + funq->sqid = funq->rqid + 1; + } else { + /* Admin Q: sqid is always 0, use ID 1 for RQ */ + funq->sqid = 0; + funq->rqid = 1; + } + } else { + funq->cqid = qid; + funq->sqid = qid; + } + + funq->cq_flags = req->cq_flags; + funq->sq_flags = req->sq_flags; + + funq->cqe_size_log2 = req->cqe_size_log2; + funq->sqe_size_log2 = req->sqe_size_log2; + + funq->cq_depth = req->cq_depth; + funq->sq_depth = req->sq_depth; + + funq->cq_intcoal_nentries = req->cq_intcoal_nentries; + funq->cq_intcoal_usec = req->cq_intcoal_usec; + + funq->sq_intcoal_nentries = req->sq_intcoal_nentries; + funq->sq_intcoal_usec = req->sq_intcoal_usec; + + if (fun_alloc_cqes(funq)) + goto free_funq; + + funq->cq_phase = 1; + + if (fun_alloc_sqes(funq)) + goto free_funq; + + if (req->rq_depth) { + funq->rq_flags = req->rq_flags | FUN_ADMIN_EPSQ_CREATE_FLAG_RQ; + funq->rq_depth = req->rq_depth; + funq->rq_buf_offset = -1; + + if (fun_alloc_rqes(funq) || fun_fill_rq(funq)) + goto free_funq; + } + + funq->cq_vector = -1; + funq->cqe_info_offset = (1 << funq->cqe_size_log2) - sizeof(struct fun_cqe_info); + + /* SQ/CQ 0 are implicitly created, assign their doorbells now. + * Other queues are assigned doorbells at their explicit creation. + */ + if (funq->sqid == 0) + funq->sq_db = fun_sq_db_addr(fdev, 0); + if (funq->cqid == 0) + funq->cq_db = fun_cq_db_addr(fdev, 0); + + return funq; + +free_funq: + fun_free_queue(funq); + return NULL; +} + +/* Create a funq's CQ on the device. */ +static int fun_create_cq(struct fun_queue *funq) +{ + struct fun_dev *fdev = funq->fdev; + unsigned int rqid; + int rc; + + rqid = funq->cq_flags & FUN_ADMIN_EPCQ_CREATE_FLAG_RQ ? + funq->rqid : FUN_HCI_ID_INVALID; + rc = fun_cq_create(fdev, funq->cq_flags, funq->cqid, rqid, + funq->cqe_size_log2, funq->cq_depth, + funq->cq_dma_addr, 0, 0, funq->cq_intcoal_nentries, + funq->cq_intcoal_usec, funq->cq_vector, 0, 0, + &funq->cqid, &funq->cq_db); + if (!rc) + dev_dbg(fdev->dev, "created CQ %u\n", funq->cqid); + + return rc; +} + +/* Create a funq's SQ on the device. */ +static int fun_create_sq(struct fun_queue *funq) +{ + struct fun_dev *fdev = funq->fdev; + int rc; + + rc = fun_sq_create(fdev, funq->sq_flags, funq->sqid, funq->cqid, + funq->sqe_size_log2, funq->sq_depth, + funq->sq_dma_addr, funq->sq_intcoal_nentries, + funq->sq_intcoal_usec, funq->cq_vector, 0, 0, + 0, &funq->sqid, &funq->sq_db); + if (!rc) + dev_dbg(fdev->dev, "created SQ %u\n", funq->sqid); + + return rc; +} + +/* Create a funq's RQ on the device. */ +int fun_create_rq(struct fun_queue *funq) +{ + struct fun_dev *fdev = funq->fdev; + int rc; + + rc = fun_sq_create(fdev, funq->rq_flags, funq->rqid, funq->cqid, 0, + funq->rq_depth, funq->rq_dma_addr, 0, 0, + funq->cq_vector, 0, 0, PAGE_SHIFT, &funq->rqid, + &funq->rq_db); + if (!rc) + dev_dbg(fdev->dev, "created RQ %u\n", funq->rqid); + + return rc; +} + +static unsigned int funq_irq(struct fun_queue *funq) +{ + return pci_irq_vector(to_pci_dev(funq->fdev->dev), funq->cq_vector); +} + +int fun_request_irq(struct fun_queue *funq, const char *devname, + irq_handler_t handler, void *data) +{ + int rc; + + if (funq->cq_vector < 0) + return -EINVAL; + + funq->irq_handler = handler; + funq->irq_data = data; + + snprintf(funq->irqname, sizeof(funq->irqname), + funq->qid ? "%s-q[%d]" : "%s-adminq", devname, funq->qid); + + rc = request_irq(funq_irq(funq), handler, 0, funq->irqname, data); + if (rc) + funq->irq_handler = NULL; + + return rc; +} + +/* Create all component queues of a funq on the device. */ +int fun_create_queue(struct fun_queue *funq) +{ + int rc; + + rc = fun_create_cq(funq); + if (rc) + return rc; + + if (funq->rq_depth) { + rc = fun_create_rq(funq); + if (rc) + goto release_cq; + } + + rc = fun_create_sq(funq); + if (rc) + goto release_rq; + + return 0; + +release_rq: + fun_destroy_sq(funq->fdev, funq->rqid); +release_cq: + fun_destroy_cq(funq->fdev, funq->cqid); + return rc; +} + +void fun_free_irq(struct fun_queue *funq) +{ + if (funq->irq_handler) { + unsigned int vector = funq_irq(funq); + + free_irq(vector, funq->irq_data); + funq->irq_handler = NULL; + funq->irq_data = NULL; + } +} diff --git a/drivers/net/ethernet/fungible/funcore/fun_queue.h b/drivers/net/ethernet/fungible/funcore/fun_queue.h new file mode 100644 index 000000000000..7fb53d0ae8b0 --- /dev/null +++ b/drivers/net/ethernet/fungible/funcore/fun_queue.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ + +#ifndef _FUN_QEUEUE_H +#define _FUN_QEUEUE_H + +#include <linux/interrupt.h> +#include <linux/io.h> + +struct device; +struct fun_dev; +struct fun_queue; +struct fun_cqe_info; +struct fun_rsp_common; + +typedef void (*cq_callback_t)(struct fun_queue *funq, void *data, void *msg, + const struct fun_cqe_info *info); + +struct fun_rq_info { + dma_addr_t dma; + struct page *page; +}; + +/* A queue group consisting of an SQ, a CQ, and an optional RQ. */ +struct fun_queue { + struct fun_dev *fdev; + spinlock_t sq_lock; + + dma_addr_t cq_dma_addr; + dma_addr_t sq_dma_addr; + dma_addr_t rq_dma_addr; + + u32 __iomem *cq_db; + u32 __iomem *sq_db; + u32 __iomem *rq_db; + + void *cqes; + void *sq_cmds; + struct fun_eprq_rqbuf *rqes; + struct fun_rq_info *rq_info; + + u32 cqid; + u32 sqid; + u32 rqid; + + u32 cq_depth; + u32 sq_depth; + u32 rq_depth; + + u16 cq_head; + u16 sq_tail; + u16 rq_tail; + + u8 cqe_size_log2; + u8 sqe_size_log2; + + u16 cqe_info_offset; + + u16 rq_buf_idx; + int rq_buf_offset; + u16 num_rqe_to_fill; + + u8 cq_intcoal_usec; + u8 cq_intcoal_nentries; + u8 sq_intcoal_usec; + u8 sq_intcoal_nentries; + + u16 cq_flags; + u16 sq_flags; + u16 rq_flags; + + /* SQ head writeback */ + u16 sq_comp; + + volatile __be64 *sq_head; + + cq_callback_t cq_cb; + void *cb_data; + + irq_handler_t irq_handler; + void *irq_data; + s16 cq_vector; + u8 cq_phase; + + /* I/O q index */ + u16 qid; + + char irqname[24]; +}; + +static inline void *fun_sqe_at(const struct fun_queue *funq, unsigned int pos) +{ + return funq->sq_cmds + (pos << funq->sqe_size_log2); +} + +static inline void funq_sq_post_tail(struct fun_queue *funq, u16 tail) +{ + if (++tail == funq->sq_depth) + tail = 0; + funq->sq_tail = tail; + writel(tail, funq->sq_db); +} + +static inline struct fun_cqe_info *funq_cqe_info(const struct fun_queue *funq, + void *cqe) +{ + return cqe + funq->cqe_info_offset; +} + +static inline void funq_rq_post(struct fun_queue *funq) +{ + writel(funq->rq_tail, funq->rq_db); +} + +struct fun_queue_alloc_req { + u8 cqe_size_log2; + u8 sqe_size_log2; + + u16 cq_flags; + u16 sq_flags; + u16 rq_flags; + + u32 cq_depth; + u32 sq_depth; + u32 rq_depth; + + u8 cq_intcoal_usec; + u8 cq_intcoal_nentries; + u8 sq_intcoal_usec; + u8 sq_intcoal_nentries; +}; + +int fun_sq_create(struct fun_dev *fdev, u16 flags, u32 sqid, u32 cqid, + u8 sqe_size_log2, u32 sq_depth, dma_addr_t dma_addr, + u8 coal_nentries, u8 coal_usec, u32 irq_num, + u32 scan_start_id, u32 scan_end_id, + u32 rq_buf_size_log2, u32 *sqidp, u32 __iomem **dbp); +int fun_cq_create(struct fun_dev *fdev, u16 flags, u32 cqid, u32 rqid, + u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr, + u16 headroom, u16 tailroom, u8 coal_nentries, u8 coal_usec, + u32 irq_num, u32 scan_start_id, u32 scan_end_id, + u32 *cqidp, u32 __iomem **dbp); +void *fun_alloc_ring_mem(struct device *dma_dev, size_t depth, + size_t hw_desc_sz, size_t sw_desc_size, bool wb, + int numa_node, dma_addr_t *dma_addr, void **sw_va, + volatile __be64 **wb_va); +void fun_free_ring_mem(struct device *dma_dev, size_t depth, size_t hw_desc_sz, + bool wb, void *hw_va, dma_addr_t dma_addr, void *sw_va); + +#define fun_destroy_sq(fdev, sqid) \ + fun_res_destroy((fdev), FUN_ADMIN_OP_EPSQ, 0, (sqid)) +#define fun_destroy_cq(fdev, cqid) \ + fun_res_destroy((fdev), FUN_ADMIN_OP_EPCQ, 0, (cqid)) + +struct fun_queue *fun_alloc_queue(struct fun_dev *fdev, int qid, + const struct fun_queue_alloc_req *req); +void fun_free_queue(struct fun_queue *funq); + +static inline void fun_set_cq_callback(struct fun_queue *funq, cq_callback_t cb, + void *cb_data) +{ + funq->cq_cb = cb; + funq->cb_data = cb_data; +} + +int fun_create_rq(struct fun_queue *funq); +int fun_create_queue(struct fun_queue *funq); + +void fun_free_irq(struct fun_queue *funq); +int fun_request_irq(struct fun_queue *funq, const char *devname, + irq_handler_t handler, void *data); + +unsigned int __fun_process_cq(struct fun_queue *funq, unsigned int max); +unsigned int fun_process_cq(struct fun_queue *funq, unsigned int max); + +#endif /* _FUN_QEUEUE_H */ diff --git a/drivers/net/ethernet/fungible/funeth/Kconfig b/drivers/net/ethernet/fungible/funeth/Kconfig new file mode 100644 index 000000000000..c72ad9386400 --- /dev/null +++ b/drivers/net/ethernet/fungible/funeth/Kconfig @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Fungible Ethernet driver configuration +# + +config FUN_ETH + tristate "Fungible Ethernet device driver" + depends on PCI && PCI_MSI + depends on TLS && TLS_DEVICE || TLS_DEVICE=n + select NET_DEVLINK + select FUN_CORE + help + This driver supports the Ethernet functionality of Fungible adapters. + It works with both physical and virtual functions. + + To compile this driver as a module, choose M here. The module + will be called funeth. diff --git a/drivers/net/ethernet/fungible/funeth/Makefile b/drivers/net/ethernet/fungible/funeth/Makefile new file mode 100644 index 000000000000..646d69595b4f --- /dev/null +++ b/drivers/net/ethernet/fungible/funeth/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) + +ccflags-y += -I$(srctree)/$(src)/../funcore -I$(srctree)/$(src) + +obj-$(CONFIG_FUN_ETH) += funeth.o + +funeth-y := funeth_main.o funeth_rx.o funeth_tx.o funeth_devlink.o \ + funeth_ethtool.o + +funeth-$(CONFIG_TLS_DEVICE) += funeth_ktls.o diff --git a/drivers/net/ethernet/fungible/funeth/fun_port.h b/drivers/net/ethernet/fungible/funeth/fun_port.h new file mode 100644 index 000000000000..0f9da44e3786 --- /dev/null +++ b/drivers/net/ethernet/fungible/funeth/fun_port.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ + +#ifndef _FUN_PORT_H +#define _FUN_PORT_H + +enum port_mac_rx_stats { + PORT_MAC_RX_etherStatsOctets = 0x0, + PORT_MAC_RX_OctetsReceivedOK = 0x1, + PORT_MAC_RX_aAlignmentErrors = 0x2, + PORT_MAC_RX_aPAUSEMACCtrlFramesReceived = 0x3, + PORT_MAC_RX_aFrameTooLongErrors = 0x4, + PORT_MAC_RX_aInRangeLengthErrors = 0x5, + PORT_MAC_RX_aFramesReceivedOK = 0x6, + PORT_MAC_RX_aFrameCheckSequenceErrors = 0x7, + PORT_MAC_RX_VLANReceivedOK = 0x8, + PORT_MAC_RX_ifInErrors = 0x9, + PORT_MAC_RX_ifInUcastPkts = 0xa, + PORT_MAC_RX_ifInMulticastPkts = 0xb, + PORT_MAC_RX_ifInBroadcastPkts = 0xc, + PORT_MAC_RX_etherStatsDropEvents = 0xd, + PORT_MAC_RX_etherStatsPkts = 0xe, + PORT_MAC_RX_etherStatsUndersizePkts = 0xf, + PORT_MAC_RX_etherStatsPkts64Octets = 0x10, + PORT_MAC_RX_etherStatsPkts65to127Octets = 0x11, + PORT_MAC_RX_etherStatsPkts128to255Octets = 0x12, + PORT_MAC_RX_etherStatsPkts256to511Octets = 0x13, + PORT_MAC_RX_etherStatsPkts512to1023Octets = 0x14, + PORT_MAC_RX_etherStatsPkts1024to1518Octets = 0x15, + PORT_MAC_RX_etherStatsPkts1519toMaxOctets = 0x16, + PORT_MAC_RX_etherStatsOversizePkts = 0x17, + PORT_MAC_RX_etherStatsJabbers = 0x18, + PORT_MAC_RX_etherStatsFragments = 0x19, + PORT_MAC_RX_CBFCPAUSEFramesReceived_0 = 0x1a, + PORT_MAC_RX_CBFCPAUSEFramesReceived_1 = 0x1b, + PORT_MAC_RX_CBFCPAUSEFramesReceived_2 = 0x1c, + PORT_MAC_RX_CBFCPAUSEFramesReceived_3 = 0x1d, + PORT_MAC_RX_CBFCPAUSEFramesReceived_4 = 0x1e, + PORT_MAC_RX_CBFCPAUSEFramesReceived_5 = 0x1f, + PORT_MAC_RX_CBFCPAUSEFramesReceived_6 = 0x20, + PORT_MAC_RX_CBFCPAUSEFramesReceived_7 = 0x21, + PORT_MAC_RX_CBFCPAUSEFramesReceived_8 = 0x22, + PORT_MAC_RX_CBFCPAUSEFramesReceived_9 = 0x23, + PORT_MAC_RX_CBFCPAUSEFramesReceived_10 = 0x24, + PORT_MAC_RX_CBFCPAUSEFramesReceived_11 = 0x25, + PORT_MAC_RX_CBFCPAUSEFramesReceived_12 = 0x26, + PORT_MAC_RX_CBFCPAUSEFramesReceived_13 = 0x27, + PORT_MAC_RX_CBFCPAUSEFramesReceived_14 = 0x28, + PORT_MAC_RX_CBFCPAUSEFramesReceived_15 = 0x29, + PORT_MAC_RX_MACControlFramesReceived = 0x2a, + PORT_MAC_RX_STATS_MAX = 0x2b, +}; + +enum port_mac_tx_stats { + PORT_MAC_TX_etherStatsOctets = 0x0, + PORT_MAC_TX_OctetsTransmittedOK = 0x1, + PORT_MAC_TX_aPAUSEMACCtrlFramesTransmitted = 0x2, + PORT_MAC_TX_aFramesTransmittedOK = 0x3, + PORT_MAC_TX_VLANTransmittedOK = 0x4, + PORT_MAC_TX_ifOutErrors = 0x5, + PORT_MAC_TX_ifOutUcastPkts = 0x6, + PORT_MAC_TX_ifOutMulticastPkts = 0x7, + PORT_MAC_TX_ifOutBroadcastPkts = 0x8, + PORT_MAC_TX_etherStatsPkts64Octets = 0x9, + PORT_MAC_TX_etherStatsPkts65to127Octets = 0xa, + PORT_MAC_TX_etherStatsPkts128to255Octets = 0xb, + PORT_MAC_TX_etherStatsPkts256to511Octets = 0xc, + PORT_MAC_TX_etherStatsPkts512to1023Octets = 0xd, + PORT_MAC_TX_etherStatsPkts1024to1518Octets = 0xe, + PORT_MAC_TX_etherStatsPkts1519toMaxOctets = 0xf, + PORT_MAC_TX_CBFCPAUSEFramesTransmitted_0 = 0x10, + PORT_MAC_TX_CBFCPAUSEFramesTransmitted_1 = 0x11, + PORT_MAC_TX_CBFCPAUSEFramesTransmitted_2 = 0x12, + PORT_MAC_TX_CBFCPAUSEFramesTransmitted_3 = 0x13, + PORT_MAC_TX_CBFCPAUSEFramesTransmitted_4 = 0x14, + PORT_MAC_TX_CBFCPAUSEFramesTransmitted_5 = 0x15, + PORT_MAC_TX_CBFCPAUSEFramesTransmitted_6 = 0x16, + PORT_MAC_TX_CBFCPAUSEFramesTransmitted_7 = 0x17, + PORT_MAC_TX_CBFCPAUSEFramesTransmitted_8 = 0x18, + PORT_MAC_TX_CBFCPAUSEFramesTransmitted_9 = 0x19, + PORT_MAC_TX_CBFCPAUSEFramesTransmitted_10 = 0x1a, + PORT_MAC_TX_CBFCPAUSEFramesTransmitted_11 = 0x1b, + PORT_MAC_TX_CBFCPAUSEFramesTransmitted_12 = 0x1c, + PORT_MAC_TX_CBFCPAUSEFramesTransmitted_13 = 0x1d, + PORT_MAC_TX_CBFCPAUSEFramesTransmitted_14 = 0x1e, + PORT_MAC_TX_CBFCPAUSEFramesTransmitted_15 = 0x1f, + PORT_MAC_TX_MACControlFramesTransmitted = 0x20, + PORT_MAC_TX_etherStatsPkts = 0x21, + PORT_MAC_TX_STATS_MAX = 0x22, +}; + +enum port_mac_fec_stats { + PORT_MAC_FEC_Correctable = 0x0, + PORT_MAC_FEC_Uncorrectable = 0x1, + PORT_MAC_FEC_STATS_MAX = 0x2, +}; + +#endif /* _FUN_PORT_H */ diff --git a/drivers/net/ethernet/fungible/funeth/funeth.h b/drivers/net/ethernet/fungible/funeth/funeth.h new file mode 100644 index 000000000000..1250e10d21db --- /dev/null +++ b/drivers/net/ethernet/fungible/funeth/funeth.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ + +#ifndef _FUNETH_H +#define _FUNETH_H + +#include <uapi/linux/if_ether.h> +#include <uapi/linux/net_tstamp.h> +#include <linux/mutex.h> +#include <linux/seqlock.h> +#include <linux/xarray.h> +#include <net/devlink.h> +#include "fun_dev.h" + +#define ADMIN_SQE_SIZE SZ_128 +#define ADMIN_CQE_SIZE SZ_64 +#define ADMIN_RSP_MAX_LEN (ADMIN_CQE_SIZE - sizeof(struct fun_cqe_info)) + +#define FUN_MAX_MTU 9024 + +#define SQ_DEPTH 512U +#define CQ_DEPTH 1024U +#define RQ_DEPTH (512U / (PAGE_SIZE / 4096)) + +#define CQ_INTCOAL_USEC 10 +#define CQ_INTCOAL_NPKT 16 +#define SQ_INTCOAL_USEC 10 +#define SQ_INTCOAL_NPKT 16 + +#define INVALID_LPORT 0xffff + +#define FUN_PORT_CAP_PAUSE_MASK (FUN_PORT_CAP_TX_PAUSE | FUN_PORT_CAP_RX_PAUSE) + +struct fun_vport_info { + u8 mac[ETH_ALEN]; + u16 vlan; + __be16 vlan_proto; + u8 qos; + u8 spoofchk:1; + u8 trusted:1; + unsigned int max_rate; +}; + +/* "subclass" of fun_dev for Ethernet functions */ +struct fun_ethdev { + struct fun_dev fdev; + + /* the function's network ports */ + struct net_device **netdevs; + unsigned int num_ports; + + /* configuration for the function's virtual ports */ + unsigned int num_vports; + struct fun_vport_info *vport_info; + + struct mutex state_mutex; /* nests inside RTNL if both taken */ + + unsigned int nsqs_per_port; +}; + +static inline struct fun_ethdev *to_fun_ethdev(struct fun_dev *p) +{ + return container_of(p, struct fun_ethdev, fdev); +} + +struct fun_qset { + struct funeth_rxq **rxqs; + struct funeth_txq **txqs; + struct funeth_txq **xdpqs; + unsigned int nrxqs; + unsigned int ntxqs; + unsigned int nxdpqs; + unsigned int rxq_start; + unsigned int txq_start; + unsigned int xdpq_start; + unsigned int cq_depth; + unsigned int rq_depth; + unsigned int sq_depth; + int state; +}; + +/* Per netdevice driver state, i.e., netdev_priv. */ +struct funeth_priv { + struct fun_dev *fdev; + struct pci_dev *pdev; + struct net_device *netdev; + + struct funeth_rxq * __rcu *rxqs; + struct funeth_txq **txqs; + struct funeth_txq * __rcu *xdpqs; + + struct xarray irqs; + unsigned int num_tx_irqs; + unsigned int num_rx_irqs; + unsigned int rx_irq_ofst; + + unsigned int lane_attrs; + u16 lport; + + /* link settings */ + u64 port_caps; + u64 advertising; + u64 lp_advertising; + unsigned int link_speed; + u8 xcvr_type; + u8 active_fc; + u8 active_fec; + u8 link_down_reason; + seqcount_t link_seq; + + u32 msg_enable; + + unsigned int num_xdpqs; + + /* ethtool, etc. config parameters */ + unsigned int sq_depth; + unsigned int rq_depth; + unsigned int cq_depth; + unsigned int cq_irq_db; + u8 tx_coal_usec; + u8 tx_coal_count; + u8 rx_coal_usec; + u8 rx_coal_count; + + struct hwtstamp_config hwtstamp_cfg; + + /* cumulative queue stats from earlier queue instances */ + u64 tx_packets; + u64 tx_bytes; + u64 tx_dropped; + u64 rx_packets; + u64 rx_bytes; + u64 rx_dropped; + + /* RSS */ + unsigned int rss_hw_id; + enum fun_eth_hash_alg hash_algo; + u8 rss_key[FUN_ETH_RSS_MAX_KEY_SIZE]; + unsigned int indir_table_nentries; + u32 indir_table[FUN_ETH_RSS_MAX_INDIR_ENT]; + dma_addr_t rss_dma_addr; + void *rss_cfg; + + /* DMA area for port stats */ + dma_addr_t stats_dma_addr; + __be64 *stats; + + struct bpf_prog *xdp_prog; + + struct devlink_port dl_port; + + /* kTLS state */ + unsigned int ktls_id; + atomic64_t tx_tls_add; + atomic64_t tx_tls_del; + atomic64_t tx_tls_resync; +}; + +void fun_set_ethtool_ops(struct net_device *netdev); +int fun_port_write_cmd(struct funeth_priv *fp, int key, u64 data); +int fun_port_read_cmd(struct funeth_priv *fp, int key, u64 *data); +int fun_create_and_bind_tx(struct funeth_priv *fp, u32 sqid); +int fun_replace_queues(struct net_device *dev, struct fun_qset *newqs, + struct netlink_ext_ack *extack); +int fun_change_num_queues(struct net_device *dev, unsigned int ntx, + unsigned int nrx); +void fun_set_ring_count(struct net_device *netdev, unsigned int ntx, + unsigned int nrx); +int fun_config_rss(struct net_device *dev, int algo, const u8 *key, + const u32 *qtable, u8 op); + +#endif /* _FUNETH_H */ diff --git a/drivers/net/ethernet/fungible/funeth/funeth_devlink.c b/drivers/net/ethernet/fungible/funeth/funeth_devlink.c new file mode 100644 index 000000000000..a849b3c6b01f --- /dev/null +++ b/drivers/net/ethernet/fungible/funeth/funeth_devlink.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) + +#include "funeth.h" +#include "funeth_devlink.h" + +static int fun_dl_info_get(struct devlink *dl, struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + int err; + + err = devlink_info_driver_name_put(req, KBUILD_MODNAME); + if (err) + return err; + + return 0; +} + +static const struct devlink_ops fun_dl_ops = { + .info_get = fun_dl_info_get, +}; + +struct devlink *fun_devlink_alloc(struct device *dev) +{ + return devlink_alloc(&fun_dl_ops, sizeof(struct fun_ethdev), dev); +} + +void fun_devlink_free(struct devlink *devlink) +{ + devlink_free(devlink); +} + +void fun_devlink_register(struct devlink *devlink) +{ + devlink_register(devlink); +} + +void fun_devlink_unregister(struct devlink *devlink) +{ + devlink_unregister(devlink); +} diff --git a/drivers/net/ethernet/fungible/funeth/funeth_devlink.h b/drivers/net/ethernet/fungible/funeth/funeth_devlink.h new file mode 100644 index 000000000000..e40464d57ff4 --- /dev/null +++ b/drivers/net/ethernet/fungible/funeth/funeth_devlink.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ + +#ifndef __FUNETH_DEVLINK_H +#define __FUNETH_DEVLINK_H + +#include <net/devlink.h> + +struct devlink *fun_devlink_alloc(struct device *dev); +void fun_devlink_free(struct devlink *devlink); +void fun_devlink_register(struct devlink *devlink); +void fun_devlink_unregister(struct devlink *devlink); + +#endif /* __FUNETH_DEVLINK_H */ diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c new file mode 100644 index 000000000000..d081168c95fa --- /dev/null +++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c @@ -0,0 +1,1162 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) + +#include <linux/ethtool.h> +#include <linux/linkmode.h> +#include <linux/netdevice.h> +#include <linux/nvme.h> +#include <linux/io.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/pci.h> +#include <linux/rtnetlink.h> +#include "funeth.h" +#include "fun_port.h" +#include "funeth_txrx.h" + +/* Min queue depth. The smallest power-of-2 supporting jumbo frames with 4K + * pages is 8. Require it for all types of queues though some could work with + * fewer entries. + */ +#define FUNETH_MIN_QDEPTH 8 + +static const char mac_tx_stat_names[][ETH_GSTRING_LEN] = { + "mac_tx_octets_total", + "mac_tx_frames_total", + "mac_tx_vlan_frames_ok", + "mac_tx_unicast_frames", + "mac_tx_multicast_frames", + "mac_tx_broadcast_frames", + "mac_tx_errors", + "mac_tx_CBFCPAUSE0", + "mac_tx_CBFCPAUSE1", + "mac_tx_CBFCPAUSE2", + "mac_tx_CBFCPAUSE3", + "mac_tx_CBFCPAUSE4", + "mac_tx_CBFCPAUSE5", + "mac_tx_CBFCPAUSE6", + "mac_tx_CBFCPAUSE7", + "mac_tx_CBFCPAUSE8", + "mac_tx_CBFCPAUSE9", + "mac_tx_CBFCPAUSE10", + "mac_tx_CBFCPAUSE11", + "mac_tx_CBFCPAUSE12", + "mac_tx_CBFCPAUSE13", + "mac_tx_CBFCPAUSE14", + "mac_tx_CBFCPAUSE15", +}; + +static const char mac_rx_stat_names[][ETH_GSTRING_LEN] = { + "mac_rx_octets_total", + "mac_rx_frames_total", + "mac_rx_VLAN_frames_ok", + "mac_rx_unicast_frames", + "mac_rx_multicast_frames", + "mac_rx_broadcast_frames", + "mac_rx_drop_events", + "mac_rx_errors", + "mac_rx_alignment_errors", + "mac_rx_CBFCPAUSE0", + "mac_rx_CBFCPAUSE1", + "mac_rx_CBFCPAUSE2", + "mac_rx_CBFCPAUSE3", + "mac_rx_CBFCPAUSE4", + "mac_rx_CBFCPAUSE5", + "mac_rx_CBFCPAUSE6", + "mac_rx_CBFCPAUSE7", + "mac_rx_CBFCPAUSE8", + "mac_rx_CBFCPAUSE9", + "mac_rx_CBFCPAUSE10", + "mac_rx_CBFCPAUSE11", + "mac_rx_CBFCPAUSE12", + "mac_rx_CBFCPAUSE13", + "mac_rx_CBFCPAUSE14", + "mac_rx_CBFCPAUSE15", +}; + +static const char * const txq_stat_names[] = { + "tx_pkts", + "tx_bytes", + "tx_cso", + "tx_tso", + "tx_encapsulated_tso", + "tx_more", + "tx_queue_stops", + "tx_queue_restarts", + "tx_mapping_errors", + "tx_tls_encrypted_packets", + "tx_tls_encrypted_bytes", + "tx_tls_ooo", + "tx_tls_drop_no_sync_data", +}; + +static const char * const xdpq_stat_names[] = { + "tx_xdp_pkts", + "tx_xdp_bytes", + "tx_xdp_full", + "tx_xdp_mapping_errors", +}; + +static const char * const rxq_stat_names[] = { + "rx_pkts", + "rx_bytes", + "rx_cso", + "gro_pkts", + "gro_merged", + "rx_xdp_tx", + "rx_xdp_redir", + "rx_xdp_drops", + "rx_buffers", + "rx_page_allocs", + "rx_drops", + "rx_budget_exhausted", + "rx_mapping_errors", +}; + +static const char * const tls_stat_names[] = { + "tx_tls_ctx", + "tx_tls_del", + "tx_tls_resync", +}; + +static void fun_link_modes_to_ethtool(u64 modes, + unsigned long *ethtool_modes_map) +{ +#define ADD_LINK_MODE(mode) \ + __set_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, ethtool_modes_map) + + if (modes & FUN_PORT_CAP_AUTONEG) + ADD_LINK_MODE(Autoneg); + if (modes & FUN_PORT_CAP_1000_X) + ADD_LINK_MODE(1000baseX_Full); + if (modes & FUN_PORT_CAP_10G_R) { + ADD_LINK_MODE(10000baseCR_Full); + ADD_LINK_MODE(10000baseSR_Full); + ADD_LINK_MODE(10000baseLR_Full); + ADD_LINK_MODE(10000baseER_Full); + } + if (modes & FUN_PORT_CAP_25G_R) { + ADD_LINK_MODE(25000baseCR_Full); + ADD_LINK_MODE(25000baseSR_Full); + } + if (modes & FUN_PORT_CAP_40G_R4) { + ADD_LINK_MODE(40000baseCR4_Full); + ADD_LINK_MODE(40000baseSR4_Full); + ADD_LINK_MODE(40000baseLR4_Full); + } + if (modes & FUN_PORT_CAP_50G_R2) { + ADD_LINK_MODE(50000baseCR2_Full); + ADD_LINK_MODE(50000baseSR2_Full); + } + if (modes & FUN_PORT_CAP_50G_R) { + ADD_LINK_MODE(50000baseCR_Full); + ADD_LINK_MODE(50000baseSR_Full); + ADD_LINK_MODE(50000baseLR_ER_FR_Full); + } + if (modes & FUN_PORT_CAP_100G_R4) { + ADD_LINK_MODE(100000baseCR4_Full); + ADD_LINK_MODE(100000baseSR4_Full); + ADD_LINK_MODE(100000baseLR4_ER4_Full); + } + if (modes & FUN_PORT_CAP_100G_R2) { + ADD_LINK_MODE(100000baseCR2_Full); + ADD_LINK_MODE(100000baseSR2_Full); + ADD_LINK_MODE(100000baseLR2_ER2_FR2_Full); + } + if (modes & FUN_PORT_CAP_FEC_NONE) + ADD_LINK_MODE(FEC_NONE); + if (modes & FUN_PORT_CAP_FEC_FC) + ADD_LINK_MODE(FEC_BASER); + if (modes & FUN_PORT_CAP_FEC_RS) + ADD_LINK_MODE(FEC_RS); + if (modes & FUN_PORT_CAP_RX_PAUSE) + ADD_LINK_MODE(Pause); + +#undef ADD_LINK_MODE +} + +static void set_asym_pause(u64 advertising, struct ethtool_link_ksettings *ks) +{ + bool rx_pause, tx_pause; + + rx_pause = advertising & FUN_PORT_CAP_RX_PAUSE; + tx_pause = advertising & FUN_PORT_CAP_TX_PAUSE; + if (tx_pause ^ rx_pause) + ethtool_link_ksettings_add_link_mode(ks, advertising, + Asym_Pause); +} + +static unsigned int fun_port_type(unsigned int xcvr) +{ + if (!xcvr) + return PORT_NONE; + + switch (xcvr & 7) { + case FUN_XCVR_BASET: + return PORT_TP; + case FUN_XCVR_CU: + return PORT_DA; + default: + return PORT_FIBRE; + } +} + +static int fun_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ks) +{ + const struct funeth_priv *fp = netdev_priv(netdev); + unsigned int seq, speed, xcvr; + u64 lp_advertising; + bool link_up; + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + ethtool_link_ksettings_zero_link_mode(ks, lp_advertising); + + /* Link settings change asynchronously, take a consistent snapshot */ + do { + seq = read_seqcount_begin(&fp->link_seq); + link_up = netif_carrier_ok(netdev); + speed = fp->link_speed; + xcvr = fp->xcvr_type; + lp_advertising = fp->lp_advertising; + } while (read_seqcount_retry(&fp->link_seq, seq)); + + if (link_up) { + ks->base.speed = speed; + ks->base.duplex = DUPLEX_FULL; + fun_link_modes_to_ethtool(lp_advertising, + ks->link_modes.lp_advertising); + } else { + ks->base.speed = SPEED_UNKNOWN; + ks->base.duplex = DUPLEX_UNKNOWN; + } + + ks->base.autoneg = (fp->advertising & FUN_PORT_CAP_AUTONEG) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + ks->base.port = fun_port_type(xcvr); + + fun_link_modes_to_ethtool(fp->port_caps, ks->link_modes.supported); + if (fp->port_caps & (FUN_PORT_CAP_RX_PAUSE | FUN_PORT_CAP_TX_PAUSE)) + ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause); + + fun_link_modes_to_ethtool(fp->advertising, ks->link_modes.advertising); + set_asym_pause(fp->advertising, ks); + return 0; +} + +static u64 fun_advert_modes(const struct ethtool_link_ksettings *ks) +{ + u64 modes = 0; + +#define HAS_MODE(mode) \ + ethtool_link_ksettings_test_link_mode(ks, advertising, mode) + + if (HAS_MODE(1000baseX_Full)) + modes |= FUN_PORT_CAP_1000_X; + if (HAS_MODE(10000baseCR_Full) || HAS_MODE(10000baseSR_Full) || + HAS_MODE(10000baseLR_Full) || HAS_MODE(10000baseER_Full)) + modes |= FUN_PORT_CAP_10G_R; + if (HAS_MODE(25000baseCR_Full) || HAS_MODE(25000baseSR_Full)) + modes |= FUN_PORT_CAP_25G_R; + if (HAS_MODE(40000baseCR4_Full) || HAS_MODE(40000baseSR4_Full) || + HAS_MODE(40000baseLR4_Full)) + modes |= FUN_PORT_CAP_40G_R4; + if (HAS_MODE(50000baseCR2_Full) || HAS_MODE(50000baseSR2_Full)) + modes |= FUN_PORT_CAP_50G_R2; + if (HAS_MODE(50000baseCR_Full) || HAS_MODE(50000baseSR_Full) || + HAS_MODE(50000baseLR_ER_FR_Full)) + modes |= FUN_PORT_CAP_50G_R; + if (HAS_MODE(100000baseCR4_Full) || HAS_MODE(100000baseSR4_Full) || + HAS_MODE(100000baseLR4_ER4_Full)) + modes |= FUN_PORT_CAP_100G_R4; + if (HAS_MODE(100000baseCR2_Full) || HAS_MODE(100000baseSR2_Full) || + HAS_MODE(100000baseLR2_ER2_FR2_Full)) + modes |= FUN_PORT_CAP_100G_R2; + + return modes; +#undef HAS_MODE +} + +static u64 fun_speed_to_link_mode(unsigned int speed) +{ + switch (speed) { + case SPEED_100000: + return FUN_PORT_CAP_100G_R4 | FUN_PORT_CAP_100G_R2; + case SPEED_50000: + return FUN_PORT_CAP_50G_R | FUN_PORT_CAP_50G_R2; + case SPEED_40000: + return FUN_PORT_CAP_40G_R4; + case SPEED_25000: + return FUN_PORT_CAP_25G_R; + case SPEED_10000: + return FUN_PORT_CAP_10G_R; + case SPEED_1000: + return FUN_PORT_CAP_1000_X; + default: + return 0; + } +} + +static int fun_change_advert(struct funeth_priv *fp, u64 new_advert) +{ + int err; + + if (new_advert == fp->advertising) + return 0; + + err = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_ADVERT, new_advert); + if (!err) + fp->advertising = new_advert; + return err; +} + +#define FUN_PORT_CAP_FEC_MASK \ + (FUN_PORT_CAP_FEC_NONE | FUN_PORT_CAP_FEC_FC | FUN_PORT_CAP_FEC_RS) + +static int fun_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ks) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {}; + struct funeth_priv *fp = netdev_priv(netdev); + u64 new_advert; + + /* eswitch ports don't support mode changes */ + if (fp->port_caps & FUN_PORT_CAP_VPORT) + return -EOPNOTSUPP; + + if (ks->base.duplex == DUPLEX_HALF) + return -EINVAL; + if (ks->base.autoneg == AUTONEG_ENABLE && + !(fp->port_caps & FUN_PORT_CAP_AUTONEG)) + return -EINVAL; + + if (ks->base.autoneg == AUTONEG_ENABLE) { + if (linkmode_empty(ks->link_modes.advertising)) + return -EINVAL; + + fun_link_modes_to_ethtool(fp->port_caps, supported); + if (!linkmode_subset(ks->link_modes.advertising, supported)) + return -EINVAL; + + new_advert = fun_advert_modes(ks) | FUN_PORT_CAP_AUTONEG; + } else { + new_advert = fun_speed_to_link_mode(ks->base.speed); + new_advert &= fp->port_caps; + if (!new_advert) + return -EINVAL; + } + new_advert |= fp->advertising & + (FUN_PORT_CAP_PAUSE_MASK | FUN_PORT_CAP_FEC_MASK); + + return fun_change_advert(fp, new_advert); +} + +static void fun_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + const struct funeth_priv *fp = netdev_priv(netdev); + u8 active_pause = fp->active_fc; + + pause->rx_pause = !!(active_pause & FUN_PORT_CAP_RX_PAUSE); + pause->tx_pause = !!(active_pause & FUN_PORT_CAP_TX_PAUSE); + pause->autoneg = !!(fp->advertising & FUN_PORT_CAP_AUTONEG); +} + +static int fun_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct funeth_priv *fp = netdev_priv(netdev); + u64 new_advert; + + if (fp->port_caps & FUN_PORT_CAP_VPORT) + return -EOPNOTSUPP; + /* Forcing PAUSE settings with AN enabled is unsupported. */ + if (!pause->autoneg && (fp->advertising & FUN_PORT_CAP_AUTONEG)) + return -EOPNOTSUPP; + if (pause->autoneg && !(fp->advertising & FUN_PORT_CAP_AUTONEG)) + return -EINVAL; + if (pause->tx_pause && !(fp->port_caps & FUN_PORT_CAP_TX_PAUSE)) + return -EINVAL; + if (pause->rx_pause && !(fp->port_caps & FUN_PORT_CAP_RX_PAUSE)) + return -EINVAL; + + new_advert = fp->advertising & ~FUN_PORT_CAP_PAUSE_MASK; + if (pause->tx_pause) + new_advert |= FUN_PORT_CAP_TX_PAUSE; + if (pause->rx_pause) + new_advert |= FUN_PORT_CAP_RX_PAUSE; + + return fun_change_advert(fp, new_advert); +} + +static int fun_restart_an(struct net_device *netdev) +{ + struct funeth_priv *fp = netdev_priv(netdev); + + if (!(fp->advertising & FUN_PORT_CAP_AUTONEG)) + return -EOPNOTSUPP; + + return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_ADVERT, + FUN_PORT_CAP_AUTONEG); +} + +static int fun_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct funeth_priv *fp = netdev_priv(netdev); + unsigned int beacon; + + if (fp->port_caps & FUN_PORT_CAP_VPORT) + return -EOPNOTSUPP; + if (state != ETHTOOL_ID_ACTIVE && state != ETHTOOL_ID_INACTIVE) + return -EOPNOTSUPP; + + beacon = state == ETHTOOL_ID_ACTIVE ? FUN_PORT_LED_BEACON_ON : + FUN_PORT_LED_BEACON_OFF; + return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_LED, beacon); +} + +static void fun_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + const struct funeth_priv *fp = netdev_priv(netdev); + + strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strscpy(info->bus_info, pci_name(fp->pdev), sizeof(info->bus_info)); +} + +static u32 fun_get_msglevel(struct net_device *netdev) +{ + const struct funeth_priv *fp = netdev_priv(netdev); + + return fp->msg_enable; +} + +static void fun_set_msglevel(struct net_device *netdev, u32 value) +{ + struct funeth_priv *fp = netdev_priv(netdev); + + fp->msg_enable = value; +} + +static int fun_get_regs_len(struct net_device *dev) +{ + return NVME_REG_ACQ + sizeof(u64); +} + +static void fun_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *buf) +{ + const struct funeth_priv *fp = netdev_priv(dev); + void __iomem *bar = fp->fdev->bar; + + regs->version = 0; + *(u64 *)(buf + NVME_REG_CAP) = readq(bar + NVME_REG_CAP); + *(u32 *)(buf + NVME_REG_VS) = readl(bar + NVME_REG_VS); + *(u32 *)(buf + NVME_REG_INTMS) = readl(bar + NVME_REG_INTMS); + *(u32 *)(buf + NVME_REG_INTMC) = readl(bar + NVME_REG_INTMC); + *(u32 *)(buf + NVME_REG_CC) = readl(bar + NVME_REG_CC); + *(u32 *)(buf + NVME_REG_CSTS) = readl(bar + NVME_REG_CSTS); + *(u32 *)(buf + NVME_REG_AQA) = readl(bar + NVME_REG_AQA); + *(u64 *)(buf + NVME_REG_ASQ) = readq(bar + NVME_REG_ASQ); + *(u64 *)(buf + NVME_REG_ACQ) = readq(bar + NVME_REG_ACQ); +} + +static int fun_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kcoal, + struct netlink_ext_ack *ext_ack) +{ + const struct funeth_priv *fp = netdev_priv(netdev); + + coal->rx_coalesce_usecs = fp->rx_coal_usec; + coal->rx_max_coalesced_frames = fp->rx_coal_count; + coal->use_adaptive_rx_coalesce = !fp->cq_irq_db; + coal->tx_coalesce_usecs = fp->tx_coal_usec; + coal->tx_max_coalesced_frames = fp->tx_coal_count; + return 0; +} + +static int fun_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kcoal, + struct netlink_ext_ack *ext_ack) +{ + struct funeth_priv *fp = netdev_priv(netdev); + struct funeth_rxq **rxqs; + unsigned int i, db_val; + + if (coal->rx_coalesce_usecs > FUN_DB_INTCOAL_USEC_M || + coal->rx_max_coalesced_frames > FUN_DB_INTCOAL_ENTRIES_M || + (coal->rx_coalesce_usecs | coal->rx_max_coalesced_frames) == 0 || + coal->tx_coalesce_usecs > FUN_DB_INTCOAL_USEC_M || + coal->tx_max_coalesced_frames > FUN_DB_INTCOAL_ENTRIES_M || + (coal->tx_coalesce_usecs | coal->tx_max_coalesced_frames) == 0) + return -EINVAL; + + /* a timer is required if there's any coalescing */ + if ((coal->rx_max_coalesced_frames > 1 && !coal->rx_coalesce_usecs) || + (coal->tx_max_coalesced_frames > 1 && !coal->tx_coalesce_usecs)) + return -EINVAL; + + fp->rx_coal_usec = coal->rx_coalesce_usecs; + fp->rx_coal_count = coal->rx_max_coalesced_frames; + fp->tx_coal_usec = coal->tx_coalesce_usecs; + fp->tx_coal_count = coal->tx_max_coalesced_frames; + + db_val = FUN_IRQ_CQ_DB(fp->rx_coal_usec, fp->rx_coal_count); + WRITE_ONCE(fp->cq_irq_db, db_val); + + rxqs = rtnl_dereference(fp->rxqs); + if (!rxqs) + return 0; + + for (i = 0; i < netdev->real_num_rx_queues; i++) + WRITE_ONCE(rxqs[i]->irq_db_val, db_val); + + db_val = FUN_IRQ_SQ_DB(fp->tx_coal_usec, fp->tx_coal_count); + for (i = 0; i < netdev->real_num_tx_queues; i++) + WRITE_ONCE(fp->txqs[i]->irq_db_val, db_val); + + return 0; +} + +static void fun_get_channels(struct net_device *netdev, + struct ethtool_channels *chan) +{ + chan->max_rx = netdev->num_rx_queues; + chan->rx_count = netdev->real_num_rx_queues; + + chan->max_tx = netdev->num_tx_queues; + chan->tx_count = netdev->real_num_tx_queues; +} + +static int fun_set_channels(struct net_device *netdev, + struct ethtool_channels *chan) +{ + if (!chan->tx_count || !chan->rx_count) + return -EINVAL; + + if (chan->tx_count == netdev->real_num_tx_queues && + chan->rx_count == netdev->real_num_rx_queues) + return 0; + + if (netif_running(netdev)) + return fun_change_num_queues(netdev, chan->tx_count, + chan->rx_count); + + fun_set_ring_count(netdev, chan->tx_count, chan->rx_count); + return 0; +} + +static void fun_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kring, + struct netlink_ext_ack *extack) +{ + const struct funeth_priv *fp = netdev_priv(netdev); + unsigned int max_depth = fp->fdev->q_depth; + + /* We size CQs to be twice the RQ depth so max RQ depth is half the + * max queue depth. + */ + ring->rx_max_pending = max_depth / 2; + ring->tx_max_pending = max_depth; + + ring->rx_pending = fp->rq_depth; + ring->tx_pending = fp->sq_depth; + + kring->rx_buf_len = PAGE_SIZE; + kring->cqe_size = FUNETH_CQE_SIZE; +} + +static int fun_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kring, + struct netlink_ext_ack *extack) +{ + struct funeth_priv *fp = netdev_priv(netdev); + int rc; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + /* queue depths must be powers-of-2 */ + if (!is_power_of_2(ring->rx_pending) || + !is_power_of_2(ring->tx_pending)) + return -EINVAL; + + if (ring->rx_pending < FUNETH_MIN_QDEPTH || + ring->tx_pending < FUNETH_MIN_QDEPTH) + return -EINVAL; + + if (fp->sq_depth == ring->tx_pending && + fp->rq_depth == ring->rx_pending) + return 0; + + if (netif_running(netdev)) { + struct fun_qset req = { + .cq_depth = 2 * ring->rx_pending, + .rq_depth = ring->rx_pending, + .sq_depth = ring->tx_pending + }; + + rc = fun_replace_queues(netdev, &req, extack); + if (rc) + return rc; + } + + fp->sq_depth = ring->tx_pending; + fp->rq_depth = ring->rx_pending; + fp->cq_depth = 2 * fp->rq_depth; + return 0; +} + +static int fun_get_sset_count(struct net_device *dev, int sset) +{ + const struct funeth_priv *fp = netdev_priv(dev); + int n; + + switch (sset) { + case ETH_SS_STATS: + n = (dev->real_num_tx_queues + 1) * ARRAY_SIZE(txq_stat_names) + + (dev->real_num_rx_queues + 1) * ARRAY_SIZE(rxq_stat_names) + + (fp->num_xdpqs + 1) * ARRAY_SIZE(xdpq_stat_names) + + ARRAY_SIZE(tls_stat_names); + if (fp->port_caps & FUN_PORT_CAP_STATS) { + n += ARRAY_SIZE(mac_tx_stat_names) + + ARRAY_SIZE(mac_rx_stat_names); + } + return n; + default: + break; + } + return 0; +} + +static void fun_get_strings(struct net_device *netdev, u32 sset, u8 *data) +{ + const struct funeth_priv *fp = netdev_priv(netdev); + unsigned int i, j; + u8 *p = data; + + switch (sset) { + case ETH_SS_STATS: + if (fp->port_caps & FUN_PORT_CAP_STATS) { + memcpy(p, mac_tx_stat_names, sizeof(mac_tx_stat_names)); + p += sizeof(mac_tx_stat_names); + memcpy(p, mac_rx_stat_names, sizeof(mac_rx_stat_names)); + p += sizeof(mac_rx_stat_names); + } + + for (i = 0; i < netdev->real_num_tx_queues; i++) { + for (j = 0; j < ARRAY_SIZE(txq_stat_names); j++) + ethtool_sprintf(&p, "%s[%u]", txq_stat_names[j], + i); + } + for (j = 0; j < ARRAY_SIZE(txq_stat_names); j++) + ethtool_sprintf(&p, txq_stat_names[j]); + + for (i = 0; i < fp->num_xdpqs; i++) { + for (j = 0; j < ARRAY_SIZE(xdpq_stat_names); j++) + ethtool_sprintf(&p, "%s[%u]", + xdpq_stat_names[j], i); + } + for (j = 0; j < ARRAY_SIZE(xdpq_stat_names); j++) + ethtool_sprintf(&p, xdpq_stat_names[j]); + + for (i = 0; i < netdev->real_num_rx_queues; i++) { + for (j = 0; j < ARRAY_SIZE(rxq_stat_names); j++) + ethtool_sprintf(&p, "%s[%u]", rxq_stat_names[j], + i); + } + for (j = 0; j < ARRAY_SIZE(rxq_stat_names); j++) + ethtool_sprintf(&p, rxq_stat_names[j]); + + for (j = 0; j < ARRAY_SIZE(tls_stat_names); j++) + ethtool_sprintf(&p, tls_stat_names[j]); + break; + default: + break; + } +} + +static u64 *get_mac_stats(const struct funeth_priv *fp, u64 *data) +{ +#define TX_STAT(s) \ + *data++ = be64_to_cpu(fp->stats[PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_##s]) + + TX_STAT(etherStatsOctets); + TX_STAT(etherStatsPkts); + TX_STAT(VLANTransmittedOK); + TX_STAT(ifOutUcastPkts); + TX_STAT(ifOutMulticastPkts); + TX_STAT(ifOutBroadcastPkts); + TX_STAT(ifOutErrors); + TX_STAT(CBFCPAUSEFramesTransmitted_0); + TX_STAT(CBFCPAUSEFramesTransmitted_1); + TX_STAT(CBFCPAUSEFramesTransmitted_2); + TX_STAT(CBFCPAUSEFramesTransmitted_3); + TX_STAT(CBFCPAUSEFramesTransmitted_4); + TX_STAT(CBFCPAUSEFramesTransmitted_5); + TX_STAT(CBFCPAUSEFramesTransmitted_6); + TX_STAT(CBFCPAUSEFramesTransmitted_7); + TX_STAT(CBFCPAUSEFramesTransmitted_8); + TX_STAT(CBFCPAUSEFramesTransmitted_9); + TX_STAT(CBFCPAUSEFramesTransmitted_10); + TX_STAT(CBFCPAUSEFramesTransmitted_11); + TX_STAT(CBFCPAUSEFramesTransmitted_12); + TX_STAT(CBFCPAUSEFramesTransmitted_13); + TX_STAT(CBFCPAUSEFramesTransmitted_14); + TX_STAT(CBFCPAUSEFramesTransmitted_15); + +#define RX_STAT(s) *data++ = be64_to_cpu(fp->stats[PORT_MAC_RX_##s]) + + RX_STAT(etherStatsOctets); + RX_STAT(etherStatsPkts); + RX_STAT(VLANReceivedOK); + RX_STAT(ifInUcastPkts); + RX_STAT(ifInMulticastPkts); + RX_STAT(ifInBroadcastPkts); + RX_STAT(etherStatsDropEvents); + RX_STAT(ifInErrors); + RX_STAT(aAlignmentErrors); + RX_STAT(CBFCPAUSEFramesReceived_0); + RX_STAT(CBFCPAUSEFramesReceived_1); + RX_STAT(CBFCPAUSEFramesReceived_2); + RX_STAT(CBFCPAUSEFramesReceived_3); + RX_STAT(CBFCPAUSEFramesReceived_4); + RX_STAT(CBFCPAUSEFramesReceived_5); + RX_STAT(CBFCPAUSEFramesReceived_6); + RX_STAT(CBFCPAUSEFramesReceived_7); + RX_STAT(CBFCPAUSEFramesReceived_8); + RX_STAT(CBFCPAUSEFramesReceived_9); + RX_STAT(CBFCPAUSEFramesReceived_10); + RX_STAT(CBFCPAUSEFramesReceived_11); + RX_STAT(CBFCPAUSEFramesReceived_12); + RX_STAT(CBFCPAUSEFramesReceived_13); + RX_STAT(CBFCPAUSEFramesReceived_14); + RX_STAT(CBFCPAUSEFramesReceived_15); + + return data; + +#undef TX_STAT +#undef RX_STAT +} + +static void fun_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + const struct funeth_priv *fp = netdev_priv(netdev); + struct funeth_txq_stats txs; + struct funeth_rxq_stats rxs; + struct funeth_txq **xdpqs; + struct funeth_rxq **rxqs; + unsigned int i, start; + u64 *totals, *tot; + + if (fp->port_caps & FUN_PORT_CAP_STATS) + data = get_mac_stats(fp, data); + + rxqs = rtnl_dereference(fp->rxqs); + if (!rxqs) + return; + +#define ADD_STAT(cnt) do { \ + *data = (cnt); *tot++ += *data++; \ +} while (0) + + /* Tx queues */ + totals = data + netdev->real_num_tx_queues * ARRAY_SIZE(txq_stat_names); + + for (i = 0; i < netdev->real_num_tx_queues; i++) { + tot = totals; + + FUN_QSTAT_READ(fp->txqs[i], start, txs); + + ADD_STAT(txs.tx_pkts); + ADD_STAT(txs.tx_bytes); + ADD_STAT(txs.tx_cso); + ADD_STAT(txs.tx_tso); + ADD_STAT(txs.tx_encap_tso); + ADD_STAT(txs.tx_more); + ADD_STAT(txs.tx_nstops); + ADD_STAT(txs.tx_nrestarts); + ADD_STAT(txs.tx_map_err); + ADD_STAT(txs.tx_tls_pkts); + ADD_STAT(txs.tx_tls_bytes); + ADD_STAT(txs.tx_tls_fallback); + ADD_STAT(txs.tx_tls_drops); + } + data += ARRAY_SIZE(txq_stat_names); + + /* XDP Tx queues */ + xdpqs = rtnl_dereference(fp->xdpqs); + totals = data + fp->num_xdpqs * ARRAY_SIZE(xdpq_stat_names); + + for (i = 0; i < fp->num_xdpqs; i++) { + tot = totals; + + FUN_QSTAT_READ(xdpqs[i], start, txs); + + ADD_STAT(txs.tx_pkts); + ADD_STAT(txs.tx_bytes); + ADD_STAT(txs.tx_xdp_full); + ADD_STAT(txs.tx_map_err); + } + data += ARRAY_SIZE(xdpq_stat_names); + + /* Rx queues */ + totals = data + netdev->real_num_rx_queues * ARRAY_SIZE(rxq_stat_names); + + for (i = 0; i < netdev->real_num_rx_queues; i++) { + tot = totals; + + FUN_QSTAT_READ(rxqs[i], start, rxs); + + ADD_STAT(rxs.rx_pkts); + ADD_STAT(rxs.rx_bytes); + ADD_STAT(rxs.rx_cso); + ADD_STAT(rxs.gro_pkts); + ADD_STAT(rxs.gro_merged); + ADD_STAT(rxs.xdp_tx); + ADD_STAT(rxs.xdp_redir); + ADD_STAT(rxs.xdp_drops); + ADD_STAT(rxs.rx_bufs); + ADD_STAT(rxs.rx_page_alloc); + ADD_STAT(rxs.rx_mem_drops + rxs.xdp_err); + ADD_STAT(rxs.rx_budget); + ADD_STAT(rxs.rx_map_err); + } + data += ARRAY_SIZE(rxq_stat_names); +#undef ADD_STAT + + *data++ = atomic64_read(&fp->tx_tls_add); + *data++ = atomic64_read(&fp->tx_tls_del); + *data++ = atomic64_read(&fp->tx_tls_resync); +} + +#define RX_STAT(fp, s) be64_to_cpu((fp)->stats[PORT_MAC_RX_##s]) +#define TX_STAT(fp, s) \ + be64_to_cpu((fp)->stats[PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_##s]) +#define FEC_STAT(fp, s) \ + be64_to_cpu((fp)->stats[PORT_MAC_RX_STATS_MAX + \ + PORT_MAC_TX_STATS_MAX + PORT_MAC_FEC_##s]) + +static void fun_get_pause_stats(struct net_device *netdev, + struct ethtool_pause_stats *stats) +{ + const struct funeth_priv *fp = netdev_priv(netdev); + + if (!(fp->port_caps & FUN_PORT_CAP_STATS)) + return; + + stats->tx_pause_frames = TX_STAT(fp, aPAUSEMACCtrlFramesTransmitted); + stats->rx_pause_frames = RX_STAT(fp, aPAUSEMACCtrlFramesReceived); +} + +static void fun_get_802_3_stats(struct net_device *netdev, + struct ethtool_eth_mac_stats *stats) +{ + const struct funeth_priv *fp = netdev_priv(netdev); + + if (!(fp->port_caps & FUN_PORT_CAP_STATS)) + return; + + stats->FramesTransmittedOK = TX_STAT(fp, aFramesTransmittedOK); + stats->FramesReceivedOK = RX_STAT(fp, aFramesReceivedOK); + stats->FrameCheckSequenceErrors = RX_STAT(fp, aFrameCheckSequenceErrors); + stats->OctetsTransmittedOK = TX_STAT(fp, OctetsTransmittedOK); + stats->OctetsReceivedOK = RX_STAT(fp, OctetsReceivedOK); + stats->InRangeLengthErrors = RX_STAT(fp, aInRangeLengthErrors); + stats->FrameTooLongErrors = RX_STAT(fp, aFrameTooLongErrors); +} + +static void fun_get_802_3_ctrl_stats(struct net_device *netdev, + struct ethtool_eth_ctrl_stats *stats) +{ + const struct funeth_priv *fp = netdev_priv(netdev); + + if (!(fp->port_caps & FUN_PORT_CAP_STATS)) + return; + + stats->MACControlFramesTransmitted = TX_STAT(fp, MACControlFramesTransmitted); + stats->MACControlFramesReceived = RX_STAT(fp, MACControlFramesReceived); +} + +static void fun_get_rmon_stats(struct net_device *netdev, + struct ethtool_rmon_stats *stats, + const struct ethtool_rmon_hist_range **ranges) +{ + static const struct ethtool_rmon_hist_range rmon_ranges[] = { + { 64, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, 32767 }, + {} + }; + + const struct funeth_priv *fp = netdev_priv(netdev); + + if (!(fp->port_caps & FUN_PORT_CAP_STATS)) + return; + + stats->undersize_pkts = RX_STAT(fp, etherStatsUndersizePkts); + stats->oversize_pkts = RX_STAT(fp, etherStatsOversizePkts); + stats->fragments = RX_STAT(fp, etherStatsFragments); + stats->jabbers = RX_STAT(fp, etherStatsJabbers); + + stats->hist[0] = RX_STAT(fp, etherStatsPkts64Octets); + stats->hist[1] = RX_STAT(fp, etherStatsPkts65to127Octets); + stats->hist[2] = RX_STAT(fp, etherStatsPkts128to255Octets); + stats->hist[3] = RX_STAT(fp, etherStatsPkts256to511Octets); + stats->hist[4] = RX_STAT(fp, etherStatsPkts512to1023Octets); + stats->hist[5] = RX_STAT(fp, etherStatsPkts1024to1518Octets); + stats->hist[6] = RX_STAT(fp, etherStatsPkts1519toMaxOctets); + + stats->hist_tx[0] = TX_STAT(fp, etherStatsPkts64Octets); + stats->hist_tx[1] = TX_STAT(fp, etherStatsPkts65to127Octets); + stats->hist_tx[2] = TX_STAT(fp, etherStatsPkts128to255Octets); + stats->hist_tx[3] = TX_STAT(fp, etherStatsPkts256to511Octets); + stats->hist_tx[4] = TX_STAT(fp, etherStatsPkts512to1023Octets); + stats->hist_tx[5] = TX_STAT(fp, etherStatsPkts1024to1518Octets); + stats->hist_tx[6] = TX_STAT(fp, etherStatsPkts1519toMaxOctets); + + *ranges = rmon_ranges; +} + +static void fun_get_fec_stats(struct net_device *netdev, + struct ethtool_fec_stats *stats) +{ + const struct funeth_priv *fp = netdev_priv(netdev); + + if (!(fp->port_caps & FUN_PORT_CAP_STATS)) + return; + + stats->corrected_blocks.total = FEC_STAT(fp, Correctable); + stats->uncorrectable_blocks.total = FEC_STAT(fp, Uncorrectable); +} + +#undef RX_STAT +#undef TX_STAT +#undef FEC_STAT + +static int fun_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = netdev->real_num_rx_queues; + return 0; + default: + break; + } + return -EOPNOTSUPP; +} + +static int fun_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info) +{ + return 0; +} + +static u32 fun_get_rxfh_indir_size(struct net_device *netdev) +{ + const struct funeth_priv *fp = netdev_priv(netdev); + + return fp->indir_table_nentries; +} + +static u32 fun_get_rxfh_key_size(struct net_device *netdev) +{ + const struct funeth_priv *fp = netdev_priv(netdev); + + return sizeof(fp->rss_key); +} + +static int fun_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + const struct funeth_priv *fp = netdev_priv(netdev); + + if (!fp->rss_cfg) + return -EOPNOTSUPP; + + if (indir) + memcpy(indir, fp->indir_table, + sizeof(u32) * fp->indir_table_nentries); + + if (key) + memcpy(key, fp->rss_key, sizeof(fp->rss_key)); + + if (hfunc) + *hfunc = fp->hash_algo == FUN_ETH_RSS_ALG_TOEPLITZ ? + ETH_RSS_HASH_TOP : ETH_RSS_HASH_CRC32; + + return 0; +} + +static int fun_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct funeth_priv *fp = netdev_priv(netdev); + const u32 *rss_indir = indir ? indir : fp->indir_table; + const u8 *rss_key = key ? key : fp->rss_key; + enum fun_eth_hash_alg algo; + + if (!fp->rss_cfg) + return -EOPNOTSUPP; + + if (hfunc == ETH_RSS_HASH_NO_CHANGE) + algo = fp->hash_algo; + else if (hfunc == ETH_RSS_HASH_CRC32) + algo = FUN_ETH_RSS_ALG_CRC32; + else if (hfunc == ETH_RSS_HASH_TOP) + algo = FUN_ETH_RSS_ALG_TOEPLITZ; + else + return -EINVAL; + + /* If the port is enabled try to reconfigure RSS and keep the new + * settings if successful. If it is down we update the RSS settings + * and apply them at the next UP time. + */ + if (netif_running(netdev)) { + int rc = fun_config_rss(netdev, algo, rss_key, rss_indir, + FUN_ADMIN_SUBOP_MODIFY); + if (rc) + return rc; + } + + fp->hash_algo = algo; + if (key) + memcpy(fp->rss_key, key, sizeof(fp->rss_key)); + if (indir) + memcpy(fp->indir_table, indir, + sizeof(u32) * fp->indir_table_nentries); + return 0; +} + +static int fun_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *info) +{ + info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->phc_index = -1; + info->tx_types = BIT(HWTSTAMP_TX_OFF); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); + return 0; +} + +static unsigned int to_ethtool_fec(unsigned int fun_fec) +{ + unsigned int fec = 0; + + if (fun_fec == FUN_PORT_FEC_NA) + fec |= ETHTOOL_FEC_NONE; + if (fun_fec & FUN_PORT_FEC_OFF) + fec |= ETHTOOL_FEC_OFF; + if (fun_fec & FUN_PORT_FEC_RS) + fec |= ETHTOOL_FEC_RS; + if (fun_fec & FUN_PORT_FEC_FC) + fec |= ETHTOOL_FEC_BASER; + if (fun_fec & FUN_PORT_FEC_AUTO) + fec |= ETHTOOL_FEC_AUTO; + return fec; +} + +static int fun_get_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fec) +{ + struct funeth_priv *fp = netdev_priv(netdev); + u64 fec_data; + int rc; + + rc = fun_port_read_cmd(fp, FUN_ADMIN_PORT_KEY_FEC, &fec_data); + if (rc) + return rc; + + fec->active_fec = to_ethtool_fec(fec_data & 0xff); + fec->fec = to_ethtool_fec(fec_data >> 8); + return 0; +} + +static int fun_set_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fec) +{ + struct funeth_priv *fp = netdev_priv(netdev); + u64 fec_mode; + + switch (fec->fec) { + case ETHTOOL_FEC_AUTO: + fec_mode = FUN_PORT_FEC_AUTO; + break; + case ETHTOOL_FEC_OFF: + if (!(fp->port_caps & FUN_PORT_CAP_FEC_NONE)) + return -EINVAL; + fec_mode = FUN_PORT_FEC_OFF; + break; + case ETHTOOL_FEC_BASER: + if (!(fp->port_caps & FUN_PORT_CAP_FEC_FC)) + return -EINVAL; + fec_mode = FUN_PORT_FEC_FC; + break; + case ETHTOOL_FEC_RS: + if (!(fp->port_caps & FUN_PORT_CAP_FEC_RS)) + return -EINVAL; + fec_mode = FUN_PORT_FEC_RS; + break; + default: + return -EINVAL; + } + + return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_FEC, fec_mode); +} + +static const struct ethtool_ops fun_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, + .get_link_ksettings = fun_get_link_ksettings, + .set_link_ksettings = fun_set_link_ksettings, + .set_phys_id = fun_set_phys_id, + .get_drvinfo = fun_get_drvinfo, + .get_msglevel = fun_get_msglevel, + .set_msglevel = fun_set_msglevel, + .get_regs_len = fun_get_regs_len, + .get_regs = fun_get_regs, + .get_link = ethtool_op_get_link, + .get_coalesce = fun_get_coalesce, + .set_coalesce = fun_set_coalesce, + .get_ts_info = fun_get_ts_info, + .get_ringparam = fun_get_ringparam, + .set_ringparam = fun_set_ringparam, + .get_sset_count = fun_get_sset_count, + .get_strings = fun_get_strings, + .get_ethtool_stats = fun_get_ethtool_stats, + .get_rxnfc = fun_get_rxnfc, + .set_rxnfc = fun_set_rxnfc, + .get_rxfh_indir_size = fun_get_rxfh_indir_size, + .get_rxfh_key_size = fun_get_rxfh_key_size, + .get_rxfh = fun_get_rxfh, + .set_rxfh = fun_set_rxfh, + .get_channels = fun_get_channels, + .set_channels = fun_set_channels, + .get_fecparam = fun_get_fecparam, + .set_fecparam = fun_set_fecparam, + .get_pauseparam = fun_get_pauseparam, + .set_pauseparam = fun_set_pauseparam, + .nway_reset = fun_restart_an, + .get_pause_stats = fun_get_pause_stats, + .get_fec_stats = fun_get_fec_stats, + .get_eth_mac_stats = fun_get_802_3_stats, + .get_eth_ctrl_stats = fun_get_802_3_ctrl_stats, + .get_rmon_stats = fun_get_rmon_stats, +}; + +void fun_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &fun_ethtool_ops; +} diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ktls.c b/drivers/net/ethernet/fungible/funeth/funeth_ktls.c new file mode 100644 index 000000000000..f871def70d70 --- /dev/null +++ b/drivers/net/ethernet/fungible/funeth/funeth_ktls.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) + +#include "funeth.h" +#include "funeth_ktls.h" + +static int fun_admin_ktls_create(struct funeth_priv *fp, unsigned int id) +{ + struct fun_admin_ktls_create_req req = { + .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS, + sizeof(req)), + .subop = FUN_ADMIN_SUBOP_CREATE, + .id = cpu_to_be32(id), + }; + + return fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0); +} + +static int fun_ktls_add(struct net_device *netdev, struct sock *sk, + enum tls_offload_ctx_dir direction, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn) +{ + struct funeth_priv *fp = netdev_priv(netdev); + struct fun_admin_ktls_modify_req req = { + .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS, + sizeof(req)), + .subop = FUN_ADMIN_SUBOP_MODIFY, + .id = cpu_to_be32(fp->ktls_id), + .tcp_seq = cpu_to_be32(start_offload_tcp_sn), + }; + struct fun_admin_ktls_modify_rsp rsp; + struct fun_ktls_tx_ctx *tx_ctx; + int rc; + + if (direction != TLS_OFFLOAD_CTX_DIR_TX) + return -EOPNOTSUPP; + + if (crypto_info->version == TLS_1_2_VERSION) + req.version = FUN_KTLS_TLSV2; + else + return -EOPNOTSUPP; + + switch (crypto_info->cipher_type) { + case TLS_CIPHER_AES_GCM_128: { + struct tls12_crypto_info_aes_gcm_128 *c = (void *)crypto_info; + + req.cipher = FUN_KTLS_CIPHER_AES_GCM_128; + memcpy(req.key, c->key, sizeof(c->key)); + memcpy(req.iv, c->iv, sizeof(c->iv)); + memcpy(req.salt, c->salt, sizeof(c->salt)); + memcpy(req.record_seq, c->rec_seq, sizeof(c->rec_seq)); + break; + } + default: + return -EOPNOTSUPP; + } + + rc = fun_submit_admin_sync_cmd(fp->fdev, &req.common, &rsp, + sizeof(rsp), 0); + memzero_explicit(&req, sizeof(req)); + if (rc) + return rc; + + tx_ctx = tls_driver_ctx(sk, direction); + tx_ctx->tlsid = rsp.tlsid; + tx_ctx->next_seq = start_offload_tcp_sn; + atomic64_inc(&fp->tx_tls_add); + return 0; +} + +static void fun_ktls_del(struct net_device *netdev, + struct tls_context *tls_ctx, + enum tls_offload_ctx_dir direction) +{ + struct funeth_priv *fp = netdev_priv(netdev); + struct fun_admin_ktls_modify_req req; + struct fun_ktls_tx_ctx *tx_ctx; + + if (direction != TLS_OFFLOAD_CTX_DIR_TX) + return; + + tx_ctx = __tls_driver_ctx(tls_ctx, direction); + + req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS, + offsetof(struct fun_admin_ktls_modify_req, tcp_seq)); + req.subop = FUN_ADMIN_SUBOP_MODIFY; + req.flags = cpu_to_be16(FUN_KTLS_MODIFY_REMOVE); + req.id = cpu_to_be32(fp->ktls_id); + req.tlsid = tx_ctx->tlsid; + + fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0); + atomic64_inc(&fp->tx_tls_del); +} + +static int fun_ktls_resync(struct net_device *netdev, struct sock *sk, u32 seq, + u8 *rcd_sn, enum tls_offload_ctx_dir direction) +{ + struct funeth_priv *fp = netdev_priv(netdev); + struct fun_admin_ktls_modify_req req; + struct fun_ktls_tx_ctx *tx_ctx; + int rc; + + if (direction != TLS_OFFLOAD_CTX_DIR_TX) + return -EOPNOTSUPP; + + tx_ctx = tls_driver_ctx(sk, direction); + + req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS, + offsetof(struct fun_admin_ktls_modify_req, key)); + req.subop = FUN_ADMIN_SUBOP_MODIFY; + req.flags = 0; + req.id = cpu_to_be32(fp->ktls_id); + req.tlsid = tx_ctx->tlsid; + req.tcp_seq = cpu_to_be32(seq); + req.version = 0; + req.cipher = 0; + memcpy(req.record_seq, rcd_sn, sizeof(req.record_seq)); + + atomic64_inc(&fp->tx_tls_resync); + rc = fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0); + if (!rc) + tx_ctx->next_seq = seq; + return rc; +} + +static const struct tlsdev_ops fun_ktls_ops = { + .tls_dev_add = fun_ktls_add, + .tls_dev_del = fun_ktls_del, + .tls_dev_resync = fun_ktls_resync, +}; + +int fun_ktls_init(struct net_device *netdev) +{ + struct funeth_priv *fp = netdev_priv(netdev); + int rc; + + rc = fun_admin_ktls_create(fp, netdev->dev_port); + if (rc) + return rc; + + fp->ktls_id = netdev->dev_port; + netdev->tlsdev_ops = &fun_ktls_ops; + netdev->hw_features |= NETIF_F_HW_TLS_TX; + netdev->features |= NETIF_F_HW_TLS_TX; + return 0; +} + +void fun_ktls_cleanup(struct funeth_priv *fp) +{ + if (fp->ktls_id == FUN_HCI_ID_INVALID) + return; + + fun_res_destroy(fp->fdev, FUN_ADMIN_OP_KTLS, 0, fp->ktls_id); + fp->ktls_id = FUN_HCI_ID_INVALID; +} diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ktls.h b/drivers/net/ethernet/fungible/funeth/funeth_ktls.h new file mode 100644 index 000000000000..9d6f2141a959 --- /dev/null +++ b/drivers/net/ethernet/fungible/funeth/funeth_ktls.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ + +#ifndef _FUN_KTLS_H +#define _FUN_KTLS_H + +#include <net/tls.h> + +struct funeth_priv; + +struct fun_ktls_tx_ctx { + __be64 tlsid; + u32 next_seq; +}; + +#if IS_ENABLED(CONFIG_TLS_DEVICE) +int fun_ktls_init(struct net_device *netdev); +void fun_ktls_cleanup(struct funeth_priv *fp); + +#else + +static inline void fun_ktls_init(struct net_device *netdev) +{ +} + +static inline void fun_ktls_cleanup(struct funeth_priv *fp) +{ +} +#endif + +#endif /* _FUN_KTLS_H */ diff --git a/drivers/net/ethernet/fungible/funeth/funeth_main.c b/drivers/net/ethernet/fungible/funeth/funeth_main.c new file mode 100644 index 000000000000..67dd02ed1fa3 --- /dev/null +++ b/drivers/net/ethernet/fungible/funeth/funeth_main.c @@ -0,0 +1,2091 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) + +#include <linux/bpf.h> +#include <linux/crash_dump.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/filter.h> +#include <linux/idr.h> +#include <linux/if_vlan.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/rtnetlink.h> +#include <linux/inetdevice.h> + +#include "funeth.h" +#include "funeth_devlink.h" +#include "funeth_ktls.h" +#include "fun_port.h" +#include "fun_queue.h" +#include "funeth_txrx.h" + +#define ADMIN_SQ_DEPTH 32 +#define ADMIN_CQ_DEPTH 64 +#define ADMIN_RQ_DEPTH 16 + +/* Default number of Tx/Rx queues. */ +#define FUN_DFLT_QUEUES 16U + +enum { + FUN_SERV_RES_CHANGE = FUN_SERV_FIRST_AVAIL, + FUN_SERV_DEL_PORTS, +}; + +static const struct pci_device_id funeth_id_table[] = { + { PCI_VDEVICE(FUNGIBLE, 0x0101) }, + { PCI_VDEVICE(FUNGIBLE, 0x0181) }, + { 0, } +}; + +/* Issue a port write admin command with @n key/value pairs. */ +static int fun_port_write_cmds(struct funeth_priv *fp, unsigned int n, + const int *keys, const u64 *data) +{ + unsigned int cmd_size, i; + union { + struct fun_admin_port_req req; + struct fun_admin_port_rsp rsp; + u8 v[ADMIN_SQE_SIZE]; + } cmd; + + cmd_size = offsetof(struct fun_admin_port_req, u.write.write48) + + n * sizeof(struct fun_admin_write48_req); + if (cmd_size > sizeof(cmd) || cmd_size > ADMIN_RSP_MAX_LEN) + return -EINVAL; + + cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT, + cmd_size); + cmd.req.u.write = + FUN_ADMIN_PORT_WRITE_REQ_INIT(FUN_ADMIN_SUBOP_WRITE, 0, + fp->netdev->dev_port); + for (i = 0; i < n; i++) + cmd.req.u.write.write48[i] = + FUN_ADMIN_WRITE48_REQ_INIT(keys[i], data[i]); + + return fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, + &cmd.rsp, cmd_size, 0); +} + +int fun_port_write_cmd(struct funeth_priv *fp, int key, u64 data) +{ + return fun_port_write_cmds(fp, 1, &key, &data); +} + +/* Issue a port read admin command with @n key/value pairs. */ +static int fun_port_read_cmds(struct funeth_priv *fp, unsigned int n, + const int *keys, u64 *data) +{ + const struct fun_admin_read48_rsp *r48rsp; + unsigned int cmd_size, i; + int rc; + union { + struct fun_admin_port_req req; + struct fun_admin_port_rsp rsp; + u8 v[ADMIN_SQE_SIZE]; + } cmd; + + cmd_size = offsetof(struct fun_admin_port_req, u.read.read48) + + n * sizeof(struct fun_admin_read48_req); + if (cmd_size > sizeof(cmd) || cmd_size > ADMIN_RSP_MAX_LEN) + return -EINVAL; + + cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT, + cmd_size); + cmd.req.u.read = + FUN_ADMIN_PORT_READ_REQ_INIT(FUN_ADMIN_SUBOP_READ, 0, + fp->netdev->dev_port); + for (i = 0; i < n; i++) + cmd.req.u.read.read48[i] = FUN_ADMIN_READ48_REQ_INIT(keys[i]); + + rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, + &cmd.rsp, cmd_size, 0); + if (rc) + return rc; + + for (r48rsp = cmd.rsp.u.read.read48, i = 0; i < n; i++, r48rsp++) { + data[i] = FUN_ADMIN_READ48_RSP_DATA_G(r48rsp->key_to_data); + dev_dbg(fp->fdev->dev, + "port_read_rsp lport=%u (key_to_data=0x%llx) key=%d data:%lld retval:%lld", + fp->lport, r48rsp->key_to_data, keys[i], data[i], + FUN_ADMIN_READ48_RSP_RET_G(r48rsp->key_to_data)); + } + return 0; +} + +int fun_port_read_cmd(struct funeth_priv *fp, int key, u64 *data) +{ + return fun_port_read_cmds(fp, 1, &key, data); +} + +static void fun_report_link(struct net_device *netdev) +{ + if (netif_carrier_ok(netdev)) { + const struct funeth_priv *fp = netdev_priv(netdev); + const char *fec = "", *pause = ""; + int speed = fp->link_speed; + char unit = 'M'; + + if (fp->link_speed >= SPEED_1000) { + speed /= 1000; + unit = 'G'; + } + + if (fp->active_fec & FUN_PORT_FEC_RS) + fec = ", RS-FEC"; + else if (fp->active_fec & FUN_PORT_FEC_FC) + fec = ", BASER-FEC"; + + if ((fp->active_fc & FUN_PORT_CAP_PAUSE_MASK) == FUN_PORT_CAP_PAUSE_MASK) + pause = ", Tx/Rx PAUSE"; + else if (fp->active_fc & FUN_PORT_CAP_RX_PAUSE) + pause = ", Rx PAUSE"; + else if (fp->active_fc & FUN_PORT_CAP_TX_PAUSE) + pause = ", Tx PAUSE"; + + netdev_info(netdev, "Link up at %d %cb/s full-duplex%s%s\n", + speed, unit, pause, fec); + } else { + netdev_info(netdev, "Link down\n"); + } +} + +static int fun_adi_write(struct fun_dev *fdev, enum fun_admin_adi_attr attr, + unsigned int adi_id, const struct fun_adi_param *param) +{ + struct fun_admin_adi_req req = { + .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_ADI, + sizeof(req)), + .u.write.subop = FUN_ADMIN_SUBOP_WRITE, + .u.write.attribute = attr, + .u.write.id = cpu_to_be32(adi_id), + .u.write.param = *param + }; + + return fun_submit_admin_sync_cmd(fdev, &req.common, NULL, 0, 0); +} + +/* Configure RSS for the given port. @op determines whether a new RSS context + * is to be created or whether an existing one should be reconfigured. The + * remaining parameters specify the hashing algorithm, key, and indirection + * table. + * + * This initiates packet delivery to the Rx queues set in the indirection + * table. + */ +int fun_config_rss(struct net_device *dev, int algo, const u8 *key, + const u32 *qtable, u8 op) +{ + struct funeth_priv *fp = netdev_priv(dev); + unsigned int table_len = fp->indir_table_nentries; + unsigned int len = FUN_ETH_RSS_MAX_KEY_SIZE + sizeof(u32) * table_len; + struct funeth_rxq **rxqs = rtnl_dereference(fp->rxqs); + union { + struct { + struct fun_admin_rss_req req; + struct fun_dataop_gl gl; + }; + struct fun_admin_generic_create_rsp rsp; + } cmd; + __be32 *indir_tab; + u16 flags; + int rc; + + if (op != FUN_ADMIN_SUBOP_CREATE && fp->rss_hw_id == FUN_HCI_ID_INVALID) + return -EINVAL; + + flags = op == FUN_ADMIN_SUBOP_CREATE ? + FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR : 0; + cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_RSS, + sizeof(cmd)); + cmd.req.u.create = + FUN_ADMIN_RSS_CREATE_REQ_INIT(op, flags, fp->rss_hw_id, + dev->dev_port, algo, + FUN_ETH_RSS_MAX_KEY_SIZE, + table_len, 0, + FUN_ETH_RSS_MAX_KEY_SIZE); + cmd.req.u.create.dataop = FUN_DATAOP_HDR_INIT(1, 0, 1, 0, len); + fun_dataop_gl_init(&cmd.gl, 0, 0, len, fp->rss_dma_addr); + + /* write the key and indirection table into the RSS DMA area */ + memcpy(fp->rss_cfg, key, FUN_ETH_RSS_MAX_KEY_SIZE); + indir_tab = fp->rss_cfg + FUN_ETH_RSS_MAX_KEY_SIZE; + for (rc = 0; rc < table_len; rc++) + *indir_tab++ = cpu_to_be32(rxqs[*qtable++]->hw_cqid); + + rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, + &cmd.rsp, sizeof(cmd.rsp), 0); + if (!rc && op == FUN_ADMIN_SUBOP_CREATE) + fp->rss_hw_id = be32_to_cpu(cmd.rsp.id); + return rc; +} + +/* Destroy the HW RSS conntext associated with the given port. This also stops + * all packet delivery to our Rx queues. + */ +static void fun_destroy_rss(struct funeth_priv *fp) +{ + if (fp->rss_hw_id != FUN_HCI_ID_INVALID) { + fun_res_destroy(fp->fdev, FUN_ADMIN_OP_RSS, 0, fp->rss_hw_id); + fp->rss_hw_id = FUN_HCI_ID_INVALID; + } +} + +static void fun_irq_aff_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + struct fun_irq *p = container_of(notify, struct fun_irq, aff_notify); + + cpumask_copy(&p->affinity_mask, mask); +} + +static void fun_irq_aff_release(struct kref __always_unused *ref) +{ +} + +/* Allocate an IRQ structure, assign an MSI-X index and initial affinity to it, + * and add it to the IRQ XArray. + */ +static struct fun_irq *fun_alloc_qirq(struct funeth_priv *fp, unsigned int idx, + int node, unsigned int xa_idx_offset) +{ + struct fun_irq *irq; + int cpu, res; + + cpu = cpumask_local_spread(idx, node); + node = cpu_to_mem(cpu); + + irq = kzalloc_node(sizeof(*irq), GFP_KERNEL, node); + if (!irq) + return ERR_PTR(-ENOMEM); + + res = fun_reserve_irqs(fp->fdev, 1, &irq->irq_idx); + if (res != 1) + goto free_irq; + + res = xa_insert(&fp->irqs, idx + xa_idx_offset, irq, GFP_KERNEL); + if (res) + goto release_irq; + + irq->irq = pci_irq_vector(fp->pdev, irq->irq_idx); + cpumask_set_cpu(cpu, &irq->affinity_mask); + irq->aff_notify.notify = fun_irq_aff_notify; + irq->aff_notify.release = fun_irq_aff_release; + irq->state = FUN_IRQ_INIT; + return irq; + +release_irq: + fun_release_irqs(fp->fdev, 1, &irq->irq_idx); +free_irq: + kfree(irq); + return ERR_PTR(res); +} + +static void fun_free_qirq(struct funeth_priv *fp, struct fun_irq *irq) +{ + netif_napi_del(&irq->napi); + fun_release_irqs(fp->fdev, 1, &irq->irq_idx); + kfree(irq); +} + +/* Release the IRQs reserved for Tx/Rx queues that aren't being used. */ +static void fun_prune_queue_irqs(struct net_device *dev) +{ + struct funeth_priv *fp = netdev_priv(dev); + unsigned int nreleased = 0; + struct fun_irq *irq; + unsigned long idx; + + xa_for_each(&fp->irqs, idx, irq) { + if (irq->txq || irq->rxq) /* skip those in use */ + continue; + + xa_erase(&fp->irqs, idx); + fun_free_qirq(fp, irq); + nreleased++; + if (idx < fp->rx_irq_ofst) + fp->num_tx_irqs--; + else + fp->num_rx_irqs--; + } + netif_info(fp, intr, dev, "Released %u queue IRQs\n", nreleased); +} + +/* Reserve IRQs, one per queue, to acommodate the requested queue numbers @ntx + * and @nrx. IRQs are added incrementally to those we already have. + * We hold on to allocated IRQs until garbage collection of unused IRQs is + * separately requested. + */ +static int fun_alloc_queue_irqs(struct net_device *dev, unsigned int ntx, + unsigned int nrx) +{ + struct funeth_priv *fp = netdev_priv(dev); + int node = dev_to_node(&fp->pdev->dev); + struct fun_irq *irq; + unsigned int i; + + for (i = fp->num_tx_irqs; i < ntx; i++) { + irq = fun_alloc_qirq(fp, i, node, 0); + if (IS_ERR(irq)) + return PTR_ERR(irq); + + fp->num_tx_irqs++; + netif_tx_napi_add(dev, &irq->napi, fun_txq_napi_poll, + NAPI_POLL_WEIGHT); + } + + for (i = fp->num_rx_irqs; i < nrx; i++) { + irq = fun_alloc_qirq(fp, i, node, fp->rx_irq_ofst); + if (IS_ERR(irq)) + return PTR_ERR(irq); + + fp->num_rx_irqs++; + netif_napi_add(dev, &irq->napi, fun_rxq_napi_poll, + NAPI_POLL_WEIGHT); + } + + netif_info(fp, intr, dev, "Reserved %u/%u IRQs for Tx/Rx queues\n", + ntx, nrx); + return 0; +} + +static void free_txqs(struct funeth_txq **txqs, unsigned int nqs, + unsigned int start, int state) +{ + unsigned int i; + + for (i = start; i < nqs && txqs[i]; i++) + txqs[i] = funeth_txq_free(txqs[i], state); +} + +static int alloc_txqs(struct net_device *dev, struct funeth_txq **txqs, + unsigned int nqs, unsigned int depth, unsigned int start, + int state) +{ + struct funeth_priv *fp = netdev_priv(dev); + unsigned int i; + int err; + + for (i = start; i < nqs; i++) { + err = funeth_txq_create(dev, i, depth, xa_load(&fp->irqs, i), + state, &txqs[i]); + if (err) { + free_txqs(txqs, nqs, start, FUN_QSTATE_DESTROYED); + return err; + } + } + return 0; +} + +static void free_rxqs(struct funeth_rxq **rxqs, unsigned int nqs, + unsigned int start, int state) +{ + unsigned int i; + + for (i = start; i < nqs && rxqs[i]; i++) + rxqs[i] = funeth_rxq_free(rxqs[i], state); +} + +static int alloc_rxqs(struct net_device *dev, struct funeth_rxq **rxqs, + unsigned int nqs, unsigned int ncqe, unsigned int nrqe, + unsigned int start, int state) +{ + struct funeth_priv *fp = netdev_priv(dev); + unsigned int i; + int err; + + for (i = start; i < nqs; i++) { + err = funeth_rxq_create(dev, i, ncqe, nrqe, + xa_load(&fp->irqs, i + fp->rx_irq_ofst), + state, &rxqs[i]); + if (err) { + free_rxqs(rxqs, nqs, start, FUN_QSTATE_DESTROYED); + return err; + } + } + return 0; +} + +static void free_xdpqs(struct funeth_txq **xdpqs, unsigned int nqs, + unsigned int start, int state) +{ + unsigned int i; + + for (i = start; i < nqs && xdpqs[i]; i++) + xdpqs[i] = funeth_txq_free(xdpqs[i], state); + + if (state == FUN_QSTATE_DESTROYED) + kfree(xdpqs); +} + +static struct funeth_txq **alloc_xdpqs(struct net_device *dev, unsigned int nqs, + unsigned int depth, unsigned int start, + int state) +{ + struct funeth_txq **xdpqs; + unsigned int i; + int err; + + xdpqs = kcalloc(nqs, sizeof(*xdpqs), GFP_KERNEL); + if (!xdpqs) + return ERR_PTR(-ENOMEM); + + for (i = start; i < nqs; i++) { + err = funeth_txq_create(dev, i, depth, NULL, state, &xdpqs[i]); + if (err) { + free_xdpqs(xdpqs, nqs, start, FUN_QSTATE_DESTROYED); + return ERR_PTR(err); + } + } + return xdpqs; +} + +static void fun_free_rings(struct net_device *netdev, struct fun_qset *qset) +{ + struct funeth_priv *fp = netdev_priv(netdev); + struct funeth_txq **xdpqs = qset->xdpqs; + struct funeth_rxq **rxqs = qset->rxqs; + + /* qset may not specify any queues to operate on. In that case the + * currently installed queues are implied. + */ + if (!rxqs) { + rxqs = rtnl_dereference(fp->rxqs); + xdpqs = rtnl_dereference(fp->xdpqs); + qset->txqs = fp->txqs; + qset->nrxqs = netdev->real_num_rx_queues; + qset->ntxqs = netdev->real_num_tx_queues; + qset->nxdpqs = fp->num_xdpqs; + } + if (!rxqs) + return; + + if (rxqs == rtnl_dereference(fp->rxqs)) { + rcu_assign_pointer(fp->rxqs, NULL); + rcu_assign_pointer(fp->xdpqs, NULL); + synchronize_net(); + fp->txqs = NULL; + } + + free_rxqs(rxqs, qset->nrxqs, qset->rxq_start, qset->state); + free_txqs(qset->txqs, qset->ntxqs, qset->txq_start, qset->state); + free_xdpqs(xdpqs, qset->nxdpqs, qset->xdpq_start, qset->state); + if (qset->state == FUN_QSTATE_DESTROYED) + kfree(rxqs); + + /* Tell the caller which queues were operated on. */ + qset->rxqs = rxqs; + qset->xdpqs = xdpqs; +} + +static int fun_alloc_rings(struct net_device *netdev, struct fun_qset *qset) +{ + struct funeth_txq **xdpqs = NULL, **txqs; + struct funeth_rxq **rxqs; + int err; + + err = fun_alloc_queue_irqs(netdev, qset->ntxqs, qset->nrxqs); + if (err) + return err; + + rxqs = kcalloc(qset->ntxqs + qset->nrxqs, sizeof(*rxqs), GFP_KERNEL); + if (!rxqs) + return -ENOMEM; + + if (qset->nxdpqs) { + xdpqs = alloc_xdpqs(netdev, qset->nxdpqs, qset->sq_depth, + qset->xdpq_start, qset->state); + if (IS_ERR(xdpqs)) { + err = PTR_ERR(xdpqs); + goto free_qvec; + } + } + + txqs = (struct funeth_txq **)&rxqs[qset->nrxqs]; + err = alloc_txqs(netdev, txqs, qset->ntxqs, qset->sq_depth, + qset->txq_start, qset->state); + if (err) + goto free_xdpqs; + + err = alloc_rxqs(netdev, rxqs, qset->nrxqs, qset->cq_depth, + qset->rq_depth, qset->rxq_start, qset->state); + if (err) + goto free_txqs; + + qset->rxqs = rxqs; + qset->txqs = txqs; + qset->xdpqs = xdpqs; + return 0; + +free_txqs: + free_txqs(txqs, qset->ntxqs, qset->txq_start, FUN_QSTATE_DESTROYED); +free_xdpqs: + free_xdpqs(xdpqs, qset->nxdpqs, qset->xdpq_start, FUN_QSTATE_DESTROYED); +free_qvec: + kfree(rxqs); + return err; +} + +/* Take queues to the next level. Presently this means creating them on the + * device. + */ +static int fun_advance_ring_state(struct net_device *dev, struct fun_qset *qset) +{ + struct funeth_priv *fp = netdev_priv(dev); + int i, err; + + for (i = 0; i < qset->nrxqs; i++) { + err = fun_rxq_create_dev(qset->rxqs[i], + xa_load(&fp->irqs, + i + fp->rx_irq_ofst)); + if (err) + goto out; + } + + for (i = 0; i < qset->ntxqs; i++) { + err = fun_txq_create_dev(qset->txqs[i], xa_load(&fp->irqs, i)); + if (err) + goto out; + } + + for (i = 0; i < qset->nxdpqs; i++) { + err = fun_txq_create_dev(qset->xdpqs[i], NULL); + if (err) + goto out; + } + + return 0; + +out: + fun_free_rings(dev, qset); + return err; +} + +static int fun_port_create(struct net_device *netdev) +{ + struct funeth_priv *fp = netdev_priv(netdev); + union { + struct fun_admin_port_req req; + struct fun_admin_port_rsp rsp; + } cmd; + int rc; + + if (fp->lport != INVALID_LPORT) + return 0; + + cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT, + sizeof(cmd.req)); + cmd.req.u.create = + FUN_ADMIN_PORT_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, 0, + netdev->dev_port); + + rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp, + sizeof(cmd.rsp), 0); + + if (!rc) + fp->lport = be16_to_cpu(cmd.rsp.u.create.lport); + return rc; +} + +static int fun_port_destroy(struct net_device *netdev) +{ + struct funeth_priv *fp = netdev_priv(netdev); + + if (fp->lport == INVALID_LPORT) + return 0; + + fp->lport = INVALID_LPORT; + return fun_res_destroy(fp->fdev, FUN_ADMIN_OP_PORT, 0, + netdev->dev_port); +} + +static int fun_eth_create(struct funeth_priv *fp) +{ + union { + struct fun_admin_eth_req req; + struct fun_admin_generic_create_rsp rsp; + } cmd; + int rc; + + cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_ETH, + sizeof(cmd.req)); + cmd.req.u.create = FUN_ADMIN_ETH_CREATE_REQ_INIT( + FUN_ADMIN_SUBOP_CREATE, + FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR, + 0, fp->netdev->dev_port); + + rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp, + sizeof(cmd.rsp), 0); + return rc ? rc : be32_to_cpu(cmd.rsp.id); +} + +static int fun_vi_create(struct funeth_priv *fp) +{ + struct fun_admin_vi_req req = { + .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_VI, + sizeof(req)), + .u.create = FUN_ADMIN_VI_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, + 0, + fp->netdev->dev_port, + fp->netdev->dev_port) + }; + + return fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0); +} + +/* Helper to create an ETH flow and bind an SQ to it. + * Returns the ETH id (>= 0) on success or a negative error. + */ +int fun_create_and_bind_tx(struct funeth_priv *fp, u32 sqid) +{ + int rc, ethid; + + ethid = fun_eth_create(fp); + if (ethid >= 0) { + rc = fun_bind(fp->fdev, FUN_ADMIN_BIND_TYPE_EPSQ, sqid, + FUN_ADMIN_BIND_TYPE_ETH, ethid); + if (rc) { + fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, ethid); + ethid = rc; + } + } + return ethid; +} + +static irqreturn_t fun_queue_irq_handler(int irq, void *data) +{ + struct fun_irq *p = data; + + if (p->rxq) { + prefetch(p->rxq->next_cqe_info); + p->rxq->irq_cnt++; + } + napi_schedule_irqoff(&p->napi); + return IRQ_HANDLED; +} + +static int fun_enable_irqs(struct net_device *dev) +{ + struct funeth_priv *fp = netdev_priv(dev); + unsigned long idx, last; + unsigned int qidx; + struct fun_irq *p; + const char *qtype; + int err; + + xa_for_each(&fp->irqs, idx, p) { + if (p->txq) { + qtype = "tx"; + qidx = p->txq->qidx; + } else if (p->rxq) { + qtype = "rx"; + qidx = p->rxq->qidx; + } else { + continue; + } + + if (p->state != FUN_IRQ_INIT) + continue; + + snprintf(p->name, sizeof(p->name) - 1, "%s-%s-%u", dev->name, + qtype, qidx); + err = request_irq(p->irq, fun_queue_irq_handler, 0, p->name, p); + if (err) { + netdev_err(dev, "Failed to allocate IRQ %u, err %d\n", + p->irq, err); + goto unroll; + } + p->state = FUN_IRQ_REQUESTED; + } + + xa_for_each(&fp->irqs, idx, p) { + if (p->state != FUN_IRQ_REQUESTED) + continue; + irq_set_affinity_notifier(p->irq, &p->aff_notify); + irq_set_affinity_and_hint(p->irq, &p->affinity_mask); + napi_enable(&p->napi); + p->state = FUN_IRQ_ENABLED; + } + + return 0; + +unroll: + last = idx - 1; + xa_for_each_range(&fp->irqs, idx, p, 0, last) + if (p->state == FUN_IRQ_REQUESTED) { + free_irq(p->irq, p); + p->state = FUN_IRQ_INIT; + } + + return err; +} + +static void fun_disable_one_irq(struct fun_irq *irq) +{ + napi_disable(&irq->napi); + irq_set_affinity_notifier(irq->irq, NULL); + irq_update_affinity_hint(irq->irq, NULL); + free_irq(irq->irq, irq); + irq->state = FUN_IRQ_INIT; +} + +static void fun_disable_irqs(struct net_device *dev) +{ + struct funeth_priv *fp = netdev_priv(dev); + struct fun_irq *p; + unsigned long idx; + + xa_for_each(&fp->irqs, idx, p) + if (p->state == FUN_IRQ_ENABLED) + fun_disable_one_irq(p); +} + +static void fun_down(struct net_device *dev, struct fun_qset *qset) +{ + struct funeth_priv *fp = netdev_priv(dev); + + /* If we don't have queues the data path is already down. + * Note netif_running(dev) may be true. + */ + if (!rcu_access_pointer(fp->rxqs)) + return; + + /* It is also down if the queues aren't on the device. */ + if (fp->txqs[0]->init_state >= FUN_QSTATE_INIT_FULL) { + netif_info(fp, ifdown, dev, + "Tearing down data path on device\n"); + fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_DISABLE, 0); + + netif_carrier_off(dev); + netif_tx_disable(dev); + + fun_destroy_rss(fp); + fun_res_destroy(fp->fdev, FUN_ADMIN_OP_VI, 0, dev->dev_port); + fun_disable_irqs(dev); + } + + fun_free_rings(dev, qset); +} + +static int fun_up(struct net_device *dev, struct fun_qset *qset) +{ + static const int port_keys[] = { + FUN_ADMIN_PORT_KEY_STATS_DMA_LOW, + FUN_ADMIN_PORT_KEY_STATS_DMA_HIGH, + FUN_ADMIN_PORT_KEY_ENABLE + }; + + struct funeth_priv *fp = netdev_priv(dev); + u64 vals[] = { + lower_32_bits(fp->stats_dma_addr), + upper_32_bits(fp->stats_dma_addr), + FUN_PORT_FLAG_ENABLE_NOTIFY + }; + int err; + + netif_info(fp, ifup, dev, "Setting up data path on device\n"); + + if (qset->rxqs[0]->init_state < FUN_QSTATE_INIT_FULL) { + err = fun_advance_ring_state(dev, qset); + if (err) + return err; + } + + err = fun_vi_create(fp); + if (err) + goto free_queues; + + fp->txqs = qset->txqs; + rcu_assign_pointer(fp->rxqs, qset->rxqs); + rcu_assign_pointer(fp->xdpqs, qset->xdpqs); + + err = fun_enable_irqs(dev); + if (err) + goto destroy_vi; + + if (fp->rss_cfg) { + err = fun_config_rss(dev, fp->hash_algo, fp->rss_key, + fp->indir_table, FUN_ADMIN_SUBOP_CREATE); + } else { + /* The non-RSS case has only 1 queue. */ + err = fun_bind(fp->fdev, FUN_ADMIN_BIND_TYPE_VI, dev->dev_port, + FUN_ADMIN_BIND_TYPE_EPCQ, + qset->rxqs[0]->hw_cqid); + } + if (err) + goto disable_irqs; + + err = fun_port_write_cmds(fp, 3, port_keys, vals); + if (err) + goto free_rss; + + netif_tx_start_all_queues(dev); + return 0; + +free_rss: + fun_destroy_rss(fp); +disable_irqs: + fun_disable_irqs(dev); +destroy_vi: + fun_res_destroy(fp->fdev, FUN_ADMIN_OP_VI, 0, dev->dev_port); +free_queues: + fun_free_rings(dev, qset); + return err; +} + +static int funeth_open(struct net_device *netdev) +{ + struct funeth_priv *fp = netdev_priv(netdev); + struct fun_qset qset = { + .nrxqs = netdev->real_num_rx_queues, + .ntxqs = netdev->real_num_tx_queues, + .nxdpqs = fp->num_xdpqs, + .cq_depth = fp->cq_depth, + .rq_depth = fp->rq_depth, + .sq_depth = fp->sq_depth, + .state = FUN_QSTATE_INIT_FULL, + }; + int rc; + + rc = fun_alloc_rings(netdev, &qset); + if (rc) + return rc; + + rc = fun_up(netdev, &qset); + if (rc) { + qset.state = FUN_QSTATE_DESTROYED; + fun_free_rings(netdev, &qset); + } + + return rc; +} + +static int funeth_close(struct net_device *netdev) +{ + struct fun_qset qset = { .state = FUN_QSTATE_DESTROYED }; + + fun_down(netdev, &qset); + return 0; +} + +static void fun_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct funeth_priv *fp = netdev_priv(netdev); + struct funeth_txq **xdpqs; + struct funeth_rxq **rxqs; + unsigned int i, start; + + stats->tx_packets = fp->tx_packets; + stats->tx_bytes = fp->tx_bytes; + stats->tx_dropped = fp->tx_dropped; + + stats->rx_packets = fp->rx_packets; + stats->rx_bytes = fp->rx_bytes; + stats->rx_dropped = fp->rx_dropped; + + rcu_read_lock(); + rxqs = rcu_dereference(fp->rxqs); + if (!rxqs) + goto unlock; + + for (i = 0; i < netdev->real_num_tx_queues; i++) { + struct funeth_txq_stats txs; + + FUN_QSTAT_READ(fp->txqs[i], start, txs); + stats->tx_packets += txs.tx_pkts; + stats->tx_bytes += txs.tx_bytes; + stats->tx_dropped += txs.tx_map_err; + } + + for (i = 0; i < netdev->real_num_rx_queues; i++) { + struct funeth_rxq_stats rxs; + + FUN_QSTAT_READ(rxqs[i], start, rxs); + stats->rx_packets += rxs.rx_pkts; + stats->rx_bytes += rxs.rx_bytes; + stats->rx_dropped += rxs.rx_map_err + rxs.rx_mem_drops; + } + + xdpqs = rcu_dereference(fp->xdpqs); + if (!xdpqs) + goto unlock; + + for (i = 0; i < fp->num_xdpqs; i++) { + struct funeth_txq_stats txs; + + FUN_QSTAT_READ(xdpqs[i], start, txs); + stats->tx_packets += txs.tx_pkts; + stats->tx_bytes += txs.tx_bytes; + } +unlock: + rcu_read_unlock(); +} + +static int fun_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct funeth_priv *fp = netdev_priv(netdev); + int rc; + + rc = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MTU, new_mtu); + if (!rc) + netdev->mtu = new_mtu; + return rc; +} + +static int fun_set_macaddr(struct net_device *netdev, void *addr) +{ + struct funeth_priv *fp = netdev_priv(netdev); + struct sockaddr *saddr = addr; + int rc; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(netdev->dev_addr, saddr->sa_data)) + return 0; + + rc = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MACADDR, + ether_addr_to_u64(saddr->sa_data)); + if (!rc) + eth_hw_addr_set(netdev, saddr->sa_data); + return rc; +} + +static int fun_get_port_attributes(struct net_device *netdev) +{ + static const int keys[] = { + FUN_ADMIN_PORT_KEY_MACADDR, FUN_ADMIN_PORT_KEY_CAPABILITIES, + FUN_ADMIN_PORT_KEY_ADVERT, FUN_ADMIN_PORT_KEY_MTU + }; + static const int phys_keys[] = { + FUN_ADMIN_PORT_KEY_LANE_ATTRS, + }; + + struct funeth_priv *fp = netdev_priv(netdev); + u64 data[ARRAY_SIZE(keys)]; + u8 mac[ETH_ALEN]; + int i, rc; + + rc = fun_port_read_cmds(fp, ARRAY_SIZE(keys), keys, data); + if (rc) + return rc; + + for (i = 0; i < ARRAY_SIZE(keys); i++) { + switch (keys[i]) { + case FUN_ADMIN_PORT_KEY_MACADDR: + u64_to_ether_addr(data[i], mac); + if (is_zero_ether_addr(mac)) { + eth_hw_addr_random(netdev); + } else if (is_valid_ether_addr(mac)) { + eth_hw_addr_set(netdev, mac); + } else { + netdev_err(netdev, + "device provided a bad MAC address %pM\n", + mac); + return -EINVAL; + } + break; + + case FUN_ADMIN_PORT_KEY_CAPABILITIES: + fp->port_caps = data[i]; + break; + + case FUN_ADMIN_PORT_KEY_ADVERT: + fp->advertising = data[i]; + break; + + case FUN_ADMIN_PORT_KEY_MTU: + netdev->mtu = data[i]; + break; + } + } + + if (!(fp->port_caps & FUN_PORT_CAP_VPORT)) { + rc = fun_port_read_cmds(fp, ARRAY_SIZE(phys_keys), phys_keys, + data); + if (rc) + return rc; + + fp->lane_attrs = data[0]; + } + + if (netdev->addr_assign_type == NET_ADDR_RANDOM) + return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MACADDR, + ether_addr_to_u64(netdev->dev_addr)); + return 0; +} + +static int fun_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +{ + const struct funeth_priv *fp = netdev_priv(dev); + + return copy_to_user(ifr->ifr_data, &fp->hwtstamp_cfg, + sizeof(fp->hwtstamp_cfg)) ? -EFAULT : 0; +} + +static int fun_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) +{ + struct funeth_priv *fp = netdev_priv(dev); + struct hwtstamp_config cfg; + + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + /* no TX HW timestamps */ + cfg.tx_type = HWTSTAMP_TX_OFF; + + switch (cfg.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_NTP_ALL: + cfg.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + fp->hwtstamp_cfg = cfg; + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} + +static int fun_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCSHWTSTAMP: + return fun_hwtstamp_set(dev, ifr); + case SIOCGHWTSTAMP: + return fun_hwtstamp_get(dev, ifr); + default: + return -EOPNOTSUPP; + } +} + +/* Prepare the queues for XDP. */ +static int fun_enter_xdp(struct net_device *dev, struct bpf_prog *prog) +{ + struct funeth_priv *fp = netdev_priv(dev); + unsigned int i, nqs = num_online_cpus(); + struct funeth_txq **xdpqs; + struct funeth_rxq **rxqs; + int err; + + xdpqs = alloc_xdpqs(dev, nqs, fp->sq_depth, 0, FUN_QSTATE_INIT_FULL); + if (IS_ERR(xdpqs)) + return PTR_ERR(xdpqs); + + rxqs = rtnl_dereference(fp->rxqs); + for (i = 0; i < dev->real_num_rx_queues; i++) { + err = fun_rxq_set_bpf(rxqs[i], prog); + if (err) + goto out; + } + + fp->num_xdpqs = nqs; + rcu_assign_pointer(fp->xdpqs, xdpqs); + return 0; +out: + while (i--) + fun_rxq_set_bpf(rxqs[i], NULL); + + free_xdpqs(xdpqs, nqs, 0, FUN_QSTATE_DESTROYED); + return err; +} + +/* Set the queues for non-XDP operation. */ +static void fun_end_xdp(struct net_device *dev) +{ + struct funeth_priv *fp = netdev_priv(dev); + struct funeth_txq **xdpqs; + struct funeth_rxq **rxqs; + unsigned int i; + + xdpqs = rtnl_dereference(fp->xdpqs); + rcu_assign_pointer(fp->xdpqs, NULL); + synchronize_net(); + /* at this point both Rx and Tx XDP processing has ended */ + + free_xdpqs(xdpqs, fp->num_xdpqs, 0, FUN_QSTATE_DESTROYED); + fp->num_xdpqs = 0; + + rxqs = rtnl_dereference(fp->rxqs); + for (i = 0; i < dev->real_num_rx_queues; i++) + fun_rxq_set_bpf(rxqs[i], NULL); +} + +#define XDP_MAX_MTU \ + (PAGE_SIZE - FUN_XDP_HEADROOM - VLAN_ETH_HLEN - FUN_RX_TAILROOM) + +static int fun_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp) +{ + struct bpf_prog *old_prog, *prog = xdp->prog; + struct funeth_priv *fp = netdev_priv(dev); + int i, err; + + /* XDP uses at most one buffer */ + if (prog && dev->mtu > XDP_MAX_MTU) { + netdev_err(dev, "device MTU %u too large for XDP\n", dev->mtu); + NL_SET_ERR_MSG_MOD(xdp->extack, + "Device MTU too large for XDP"); + return -EINVAL; + } + + if (!netif_running(dev)) { + fp->num_xdpqs = prog ? num_online_cpus() : 0; + } else if (prog && !fp->xdp_prog) { + err = fun_enter_xdp(dev, prog); + if (err) { + NL_SET_ERR_MSG_MOD(xdp->extack, + "Failed to set queues for XDP."); + return err; + } + } else if (!prog && fp->xdp_prog) { + fun_end_xdp(dev); + } else { + struct funeth_rxq **rxqs = rtnl_dereference(fp->rxqs); + + for (i = 0; i < dev->real_num_rx_queues; i++) + WRITE_ONCE(rxqs[i]->xdp_prog, prog); + } + + dev->max_mtu = prog ? XDP_MAX_MTU : FUN_MAX_MTU; + old_prog = xchg(&fp->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + return 0; +} + +static int fun_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + switch (xdp->command) { + case XDP_SETUP_PROG: + return fun_xdp_setup(dev, xdp); + default: + return -EINVAL; + } +} + +static struct devlink_port *fun_get_devlink_port(struct net_device *netdev) +{ + struct funeth_priv *fp = netdev_priv(netdev); + + return &fp->dl_port; +} + +static int fun_init_vports(struct fun_ethdev *ed, unsigned int n) +{ + if (ed->num_vports) + return -EINVAL; + + ed->vport_info = kvcalloc(n, sizeof(*ed->vport_info), GFP_KERNEL); + if (!ed->vport_info) + return -ENOMEM; + ed->num_vports = n; + return 0; +} + +static void fun_free_vports(struct fun_ethdev *ed) +{ + kvfree(ed->vport_info); + ed->vport_info = NULL; + ed->num_vports = 0; +} + +static struct fun_vport_info *fun_get_vport(struct fun_ethdev *ed, + unsigned int vport) +{ + if (!ed->vport_info || vport >= ed->num_vports) + return NULL; + + return ed->vport_info + vport; +} + +static int fun_set_vf_mac(struct net_device *dev, int vf, u8 *mac) +{ + struct funeth_priv *fp = netdev_priv(dev); + struct fun_adi_param mac_param = {}; + struct fun_dev *fdev = fp->fdev; + struct fun_ethdev *ed = to_fun_ethdev(fdev); + struct fun_vport_info *vi; + int rc = -EINVAL; + + if (is_multicast_ether_addr(mac)) + return -EINVAL; + + mutex_lock(&ed->state_mutex); + vi = fun_get_vport(ed, vf); + if (!vi) + goto unlock; + + mac_param.u.mac = FUN_ADI_MAC_INIT(ether_addr_to_u64(mac)); + rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_MACADDR, vf + 1, + &mac_param); + if (!rc) + ether_addr_copy(vi->mac, mac); +unlock: + mutex_unlock(&ed->state_mutex); + return rc; +} + +static int fun_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, + __be16 vlan_proto) +{ + struct funeth_priv *fp = netdev_priv(dev); + struct fun_adi_param vlan_param = {}; + struct fun_dev *fdev = fp->fdev; + struct fun_ethdev *ed = to_fun_ethdev(fdev); + struct fun_vport_info *vi; + int rc = -EINVAL; + + if (vlan > 4095 || qos > 7) + return -EINVAL; + if (vlan_proto && vlan_proto != htons(ETH_P_8021Q) && + vlan_proto != htons(ETH_P_8021AD)) + return -EINVAL; + + mutex_lock(&ed->state_mutex); + vi = fun_get_vport(ed, vf); + if (!vi) + goto unlock; + + vlan_param.u.vlan = FUN_ADI_VLAN_INIT(be16_to_cpu(vlan_proto), + ((u16)qos << VLAN_PRIO_SHIFT) | vlan); + rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_VLAN, vf + 1, &vlan_param); + if (!rc) { + vi->vlan = vlan; + vi->qos = qos; + vi->vlan_proto = vlan_proto; + } +unlock: + mutex_unlock(&ed->state_mutex); + return rc; +} + +static int fun_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, + int max_tx_rate) +{ + struct funeth_priv *fp = netdev_priv(dev); + struct fun_adi_param rate_param = {}; + struct fun_dev *fdev = fp->fdev; + struct fun_ethdev *ed = to_fun_ethdev(fdev); + struct fun_vport_info *vi; + int rc = -EINVAL; + + if (min_tx_rate) + return -EINVAL; + + mutex_lock(&ed->state_mutex); + vi = fun_get_vport(ed, vf); + if (!vi) + goto unlock; + + rate_param.u.rate = FUN_ADI_RATE_INIT(max_tx_rate); + rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_RATE, vf + 1, &rate_param); + if (!rc) + vi->max_rate = max_tx_rate; +unlock: + mutex_unlock(&ed->state_mutex); + return rc; +} + +static int fun_get_vf_config(struct net_device *dev, int vf, + struct ifla_vf_info *ivi) +{ + struct funeth_priv *fp = netdev_priv(dev); + struct fun_ethdev *ed = to_fun_ethdev(fp->fdev); + const struct fun_vport_info *vi; + + mutex_lock(&ed->state_mutex); + vi = fun_get_vport(ed, vf); + if (!vi) + goto unlock; + + memset(ivi, 0, sizeof(*ivi)); + ivi->vf = vf; + ether_addr_copy(ivi->mac, vi->mac); + ivi->vlan = vi->vlan; + ivi->qos = vi->qos; + ivi->vlan_proto = vi->vlan_proto; + ivi->max_tx_rate = vi->max_rate; + ivi->spoofchk = vi->spoofchk; +unlock: + mutex_unlock(&ed->state_mutex); + return vi ? 0 : -EINVAL; +} + +static void fun_uninit(struct net_device *dev) +{ + struct funeth_priv *fp = netdev_priv(dev); + + fun_prune_queue_irqs(dev); + xa_destroy(&fp->irqs); +} + +static const struct net_device_ops fun_netdev_ops = { + .ndo_open = funeth_open, + .ndo_stop = funeth_close, + .ndo_start_xmit = fun_start_xmit, + .ndo_get_stats64 = fun_get_stats64, + .ndo_change_mtu = fun_change_mtu, + .ndo_set_mac_address = fun_set_macaddr, + .ndo_validate_addr = eth_validate_addr, + .ndo_eth_ioctl = fun_ioctl, + .ndo_uninit = fun_uninit, + .ndo_bpf = fun_xdp, + .ndo_xdp_xmit = fun_xdp_xmit_frames, + .ndo_set_vf_mac = fun_set_vf_mac, + .ndo_set_vf_vlan = fun_set_vf_vlan, + .ndo_set_vf_rate = fun_set_vf_rate, + .ndo_get_vf_config = fun_get_vf_config, + .ndo_get_devlink_port = fun_get_devlink_port, +}; + +#define GSO_ENCAP_FLAGS (NETIF_F_GSO_GRE | NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) +#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) +#define VLAN_FEAT (NETIF_F_SG | NETIF_F_HW_CSUM | TSO_FLAGS | \ + GSO_ENCAP_FLAGS | NETIF_F_HIGHDMA) + +static void fun_dflt_rss_indir(struct funeth_priv *fp, unsigned int nrx) +{ + unsigned int i; + + for (i = 0; i < fp->indir_table_nentries; i++) + fp->indir_table[i] = ethtool_rxfh_indir_default(i, nrx); +} + +/* Reset the RSS indirection table to equal distribution across the current + * number of Rx queues. Called at init time and whenever the number of Rx + * queues changes subsequently. Note that this may also resize the indirection + * table. + */ +static void fun_reset_rss_indir(struct net_device *dev, unsigned int nrx) +{ + struct funeth_priv *fp = netdev_priv(dev); + + if (!fp->rss_cfg) + return; + + /* Set the table size to the max possible that allows an equal number + * of occurrences of each CQ. + */ + fp->indir_table_nentries = rounddown(FUN_ETH_RSS_MAX_INDIR_ENT, nrx); + fun_dflt_rss_indir(fp, nrx); +} + +/* Update the RSS LUT to contain only queues in [0, nrx). Normally this will + * update the LUT to an equal distribution among nrx queues, If @only_if_needed + * is set the LUT is left unchanged if it already does not reference any queues + * >= nrx. + */ +static int fun_rss_set_qnum(struct net_device *dev, unsigned int nrx, + bool only_if_needed) +{ + struct funeth_priv *fp = netdev_priv(dev); + u32 old_lut[FUN_ETH_RSS_MAX_INDIR_ENT]; + unsigned int i, oldsz; + int err; + + if (!fp->rss_cfg) + return 0; + + if (only_if_needed) { + for (i = 0; i < fp->indir_table_nentries; i++) + if (fp->indir_table[i] >= nrx) + break; + + if (i >= fp->indir_table_nentries) + return 0; + } + + memcpy(old_lut, fp->indir_table, sizeof(old_lut)); + oldsz = fp->indir_table_nentries; + fun_reset_rss_indir(dev, nrx); + + err = fun_config_rss(dev, fp->hash_algo, fp->rss_key, + fp->indir_table, FUN_ADMIN_SUBOP_MODIFY); + if (!err) + return 0; + + memcpy(fp->indir_table, old_lut, sizeof(old_lut)); + fp->indir_table_nentries = oldsz; + return err; +} + +/* Allocate the DMA area for the RSS configuration commands to the device, and + * initialize the hash, hash key, indirection table size and its entries to + * their defaults. The indirection table defaults to equal distribution across + * the Rx queues. + */ +static int fun_init_rss(struct net_device *dev) +{ + struct funeth_priv *fp = netdev_priv(dev); + size_t size = sizeof(fp->rss_key) + sizeof(fp->indir_table); + + fp->rss_hw_id = FUN_HCI_ID_INVALID; + if (!(fp->port_caps & FUN_PORT_CAP_OFFLOADS)) + return 0; + + fp->rss_cfg = dma_alloc_coherent(&fp->pdev->dev, size, + &fp->rss_dma_addr, GFP_KERNEL); + if (!fp->rss_cfg) + return -ENOMEM; + + fp->hash_algo = FUN_ETH_RSS_ALG_TOEPLITZ; + netdev_rss_key_fill(fp->rss_key, sizeof(fp->rss_key)); + fun_reset_rss_indir(dev, dev->real_num_rx_queues); + return 0; +} + +static void fun_free_rss(struct funeth_priv *fp) +{ + if (fp->rss_cfg) { + dma_free_coherent(&fp->pdev->dev, + sizeof(fp->rss_key) + sizeof(fp->indir_table), + fp->rss_cfg, fp->rss_dma_addr); + fp->rss_cfg = NULL; + } +} + +void fun_set_ring_count(struct net_device *netdev, unsigned int ntx, + unsigned int nrx) +{ + netif_set_real_num_tx_queues(netdev, ntx); + if (nrx != netdev->real_num_rx_queues) { + netif_set_real_num_rx_queues(netdev, nrx); + fun_reset_rss_indir(netdev, nrx); + } +} + +static int fun_init_stats_area(struct funeth_priv *fp) +{ + unsigned int nstats; + + if (!(fp->port_caps & FUN_PORT_CAP_STATS)) + return 0; + + nstats = PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_STATS_MAX + + PORT_MAC_FEC_STATS_MAX; + + fp->stats = dma_alloc_coherent(&fp->pdev->dev, nstats * sizeof(u64), + &fp->stats_dma_addr, GFP_KERNEL); + if (!fp->stats) + return -ENOMEM; + return 0; +} + +static void fun_free_stats_area(struct funeth_priv *fp) +{ + unsigned int nstats; + + if (fp->stats) { + nstats = PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_STATS_MAX; + dma_free_coherent(&fp->pdev->dev, nstats * sizeof(u64), + fp->stats, fp->stats_dma_addr); + fp->stats = NULL; + } +} + +static int fun_dl_port_register(struct net_device *netdev) +{ + struct funeth_priv *fp = netdev_priv(netdev); + struct devlink *dl = priv_to_devlink(fp->fdev); + struct devlink_port_attrs attrs = {}; + unsigned int idx; + + if (fp->port_caps & FUN_PORT_CAP_VPORT) { + attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL; + idx = fp->lport; + } else { + idx = netdev->dev_port; + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.lanes = fp->lane_attrs & 7; + if (fp->lane_attrs & FUN_PORT_LANE_SPLIT) { + attrs.split = 1; + attrs.phys.port_number = fp->lport & ~3; + attrs.phys.split_subport_number = fp->lport & 3; + } else { + attrs.phys.port_number = fp->lport; + } + } + + devlink_port_attrs_set(&fp->dl_port, &attrs); + + return devlink_port_register(dl, &fp->dl_port, idx); +} + +/* Determine the max Tx/Rx queues for a port. */ +static int fun_max_qs(struct fun_ethdev *ed, unsigned int *ntx, + unsigned int *nrx) +{ + int neth; + + if (ed->num_ports > 1 || is_kdump_kernel()) { + *ntx = 1; + *nrx = 1; + return 0; + } + + neth = fun_get_res_count(&ed->fdev, FUN_ADMIN_OP_ETH); + if (neth < 0) + return neth; + + /* We determine the max number of queues based on the CPU + * cores, device interrupts and queues, RSS size, and device Tx flows. + * + * - At least 1 Rx and 1 Tx queues. + * - At most 1 Rx/Tx queue per core. + * - Each Rx/Tx queue needs 1 SQ. + */ + *ntx = min(ed->nsqs_per_port - 1, num_online_cpus()); + *nrx = *ntx; + if (*ntx > neth) + *ntx = neth; + if (*nrx > FUN_ETH_RSS_MAX_INDIR_ENT) + *nrx = FUN_ETH_RSS_MAX_INDIR_ENT; + return 0; +} + +static void fun_queue_defaults(struct net_device *dev, unsigned int nsqs) +{ + unsigned int ntx, nrx; + + ntx = min(dev->num_tx_queues, FUN_DFLT_QUEUES); + nrx = min(dev->num_rx_queues, FUN_DFLT_QUEUES); + if (ntx <= nrx) { + ntx = min(ntx, nsqs / 2); + nrx = min(nrx, nsqs - ntx); + } else { + nrx = min(nrx, nsqs / 2); + ntx = min(ntx, nsqs - nrx); + } + + netif_set_real_num_tx_queues(dev, ntx); + netif_set_real_num_rx_queues(dev, nrx); +} + +/* Replace the existing Rx/Tx/XDP queues with equal number of queues with + * different settings, e.g. depth. This is a disruptive replacement that + * temporarily shuts down the data path and should be limited to changes that + * can't be applied to live queues. The old queues are always discarded. + */ +int fun_replace_queues(struct net_device *dev, struct fun_qset *newqs, + struct netlink_ext_ack *extack) +{ + struct fun_qset oldqs = { .state = FUN_QSTATE_DESTROYED }; + struct funeth_priv *fp = netdev_priv(dev); + int err; + + newqs->nrxqs = dev->real_num_rx_queues; + newqs->ntxqs = dev->real_num_tx_queues; + newqs->nxdpqs = fp->num_xdpqs; + newqs->state = FUN_QSTATE_INIT_SW; + err = fun_alloc_rings(dev, newqs); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Unable to allocate memory for new queues, keeping current settings"); + return err; + } + + fun_down(dev, &oldqs); + + err = fun_up(dev, newqs); + if (!err) + return 0; + + /* The new queues couldn't be installed. We do not retry the old queues + * as they are the same to the device as the new queues and would + * similarly fail. + */ + newqs->state = FUN_QSTATE_DESTROYED; + fun_free_rings(dev, newqs); + NL_SET_ERR_MSG_MOD(extack, "Unable to restore the data path with the new queues."); + return err; +} + +/* Change the number of Rx/Tx queues of a device while it is up. This is done + * by incrementally adding/removing queues to meet the new requirements while + * handling ongoing traffic. + */ +int fun_change_num_queues(struct net_device *dev, unsigned int ntx, + unsigned int nrx) +{ + unsigned int keep_tx = min(dev->real_num_tx_queues, ntx); + unsigned int keep_rx = min(dev->real_num_rx_queues, nrx); + struct funeth_priv *fp = netdev_priv(dev); + struct fun_qset oldqs = { + .rxqs = rtnl_dereference(fp->rxqs), + .txqs = fp->txqs, + .nrxqs = dev->real_num_rx_queues, + .ntxqs = dev->real_num_tx_queues, + .rxq_start = keep_rx, + .txq_start = keep_tx, + .state = FUN_QSTATE_DESTROYED + }; + struct fun_qset newqs = { + .nrxqs = nrx, + .ntxqs = ntx, + .rxq_start = keep_rx, + .txq_start = keep_tx, + .cq_depth = fp->cq_depth, + .rq_depth = fp->rq_depth, + .sq_depth = fp->sq_depth, + .state = FUN_QSTATE_INIT_FULL + }; + int i, err; + + err = fun_alloc_rings(dev, &newqs); + if (err) + goto free_irqs; + + err = fun_enable_irqs(dev); /* of any newly added queues */ + if (err) + goto free_rings; + + /* copy the queues we are keeping to the new set */ + memcpy(newqs.rxqs, oldqs.rxqs, keep_rx * sizeof(*oldqs.rxqs)); + memcpy(newqs.txqs, fp->txqs, keep_tx * sizeof(*fp->txqs)); + + if (nrx < dev->real_num_rx_queues) { + err = fun_rss_set_qnum(dev, nrx, true); + if (err) + goto disable_tx_irqs; + + for (i = nrx; i < dev->real_num_rx_queues; i++) + fun_disable_one_irq(container_of(oldqs.rxqs[i]->napi, + struct fun_irq, napi)); + + netif_set_real_num_rx_queues(dev, nrx); + } + + if (ntx < dev->real_num_tx_queues) + netif_set_real_num_tx_queues(dev, ntx); + + rcu_assign_pointer(fp->rxqs, newqs.rxqs); + fp->txqs = newqs.txqs; + synchronize_net(); + + if (ntx > dev->real_num_tx_queues) + netif_set_real_num_tx_queues(dev, ntx); + + if (nrx > dev->real_num_rx_queues) { + netif_set_real_num_rx_queues(dev, nrx); + fun_rss_set_qnum(dev, nrx, false); + } + + /* disable interrupts of any excess Tx queues */ + for (i = keep_tx; i < oldqs.ntxqs; i++) + fun_disable_one_irq(oldqs.txqs[i]->irq); + + fun_free_rings(dev, &oldqs); + fun_prune_queue_irqs(dev); + return 0; + +disable_tx_irqs: + for (i = oldqs.ntxqs; i < ntx; i++) + fun_disable_one_irq(newqs.txqs[i]->irq); +free_rings: + newqs.state = FUN_QSTATE_DESTROYED; + fun_free_rings(dev, &newqs); +free_irqs: + fun_prune_queue_irqs(dev); + return err; +} + +static int fun_create_netdev(struct fun_ethdev *ed, unsigned int portid) +{ + struct fun_dev *fdev = &ed->fdev; + struct net_device *netdev; + struct funeth_priv *fp; + unsigned int ntx, nrx; + int rc; + + rc = fun_max_qs(ed, &ntx, &nrx); + if (rc) + return rc; + + netdev = alloc_etherdev_mqs(sizeof(*fp), ntx, nrx); + if (!netdev) { + rc = -ENOMEM; + goto done; + } + + netdev->dev_port = portid; + fun_queue_defaults(netdev, ed->nsqs_per_port); + + fp = netdev_priv(netdev); + fp->fdev = fdev; + fp->pdev = to_pci_dev(fdev->dev); + fp->netdev = netdev; + xa_init(&fp->irqs); + fp->rx_irq_ofst = ntx; + seqcount_init(&fp->link_seq); + + fp->lport = INVALID_LPORT; + rc = fun_port_create(netdev); + if (rc) + goto free_netdev; + + /* bind port to admin CQ for async events */ + rc = fun_bind(fdev, FUN_ADMIN_BIND_TYPE_PORT, portid, + FUN_ADMIN_BIND_TYPE_EPCQ, 0); + if (rc) + goto destroy_port; + + rc = fun_get_port_attributes(netdev); + if (rc) + goto destroy_port; + + rc = fun_init_rss(netdev); + if (rc) + goto destroy_port; + + rc = fun_init_stats_area(fp); + if (rc) + goto free_rss; + + SET_NETDEV_DEV(netdev, fdev->dev); + netdev->netdev_ops = &fun_netdev_ops; + + netdev->hw_features = NETIF_F_SG | NETIF_F_RXHASH | NETIF_F_RXCSUM; + if (fp->port_caps & FUN_PORT_CAP_OFFLOADS) + netdev->hw_features |= NETIF_F_HW_CSUM | TSO_FLAGS; + if (fp->port_caps & FUN_PORT_CAP_ENCAP_OFFLOADS) + netdev->hw_features |= GSO_ENCAP_FLAGS; + + netdev->features |= netdev->hw_features | NETIF_F_HIGHDMA; + netdev->vlan_features = netdev->features & VLAN_FEAT; + netdev->mpls_features = netdev->vlan_features; + netdev->hw_enc_features = netdev->hw_features; + + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = FUN_MAX_MTU; + + fun_set_ethtool_ops(netdev); + + /* configurable parameters */ + fp->sq_depth = min(SQ_DEPTH, fdev->q_depth); + fp->cq_depth = min(CQ_DEPTH, fdev->q_depth); + fp->rq_depth = min_t(unsigned int, RQ_DEPTH, fdev->q_depth); + fp->rx_coal_usec = CQ_INTCOAL_USEC; + fp->rx_coal_count = CQ_INTCOAL_NPKT; + fp->tx_coal_usec = SQ_INTCOAL_USEC; + fp->tx_coal_count = SQ_INTCOAL_NPKT; + fp->cq_irq_db = FUN_IRQ_CQ_DB(fp->rx_coal_usec, fp->rx_coal_count); + + rc = fun_dl_port_register(netdev); + if (rc) + goto free_stats; + + fp->ktls_id = FUN_HCI_ID_INVALID; + fun_ktls_init(netdev); /* optional, failure OK */ + + netif_carrier_off(netdev); + ed->netdevs[portid] = netdev; + rc = register_netdev(netdev); + if (rc) + goto unreg_devlink; + + if (fp->dl_port.devlink) + devlink_port_type_eth_set(&fp->dl_port, netdev); + + return 0; + +unreg_devlink: + ed->netdevs[portid] = NULL; + fun_ktls_cleanup(fp); + if (fp->dl_port.devlink) + devlink_port_unregister(&fp->dl_port); +free_stats: + fun_free_stats_area(fp); +free_rss: + fun_free_rss(fp); +destroy_port: + fun_port_destroy(netdev); +free_netdev: + free_netdev(netdev); +done: + dev_err(fdev->dev, "couldn't allocate port %u, error %d", portid, rc); + return rc; +} + +static void fun_destroy_netdev(struct net_device *netdev) +{ + struct funeth_priv *fp; + + fp = netdev_priv(netdev); + if (fp->dl_port.devlink) { + devlink_port_type_clear(&fp->dl_port); + devlink_port_unregister(&fp->dl_port); + } + unregister_netdev(netdev); + fun_ktls_cleanup(fp); + fun_free_stats_area(fp); + fun_free_rss(fp); + fun_port_destroy(netdev); + free_netdev(netdev); +} + +static int fun_create_ports(struct fun_ethdev *ed, unsigned int nports) +{ + struct fun_dev *fd = &ed->fdev; + int i, rc; + + /* The admin queue takes 1 IRQ and 2 SQs. */ + ed->nsqs_per_port = min(fd->num_irqs - 1, + fd->kern_end_qid - 2) / nports; + if (ed->nsqs_per_port < 2) { + dev_err(fd->dev, "Too few SQs for %u ports", nports); + return -EINVAL; + } + + ed->netdevs = kcalloc(nports, sizeof(*ed->netdevs), GFP_KERNEL); + if (!ed->netdevs) + return -ENOMEM; + + ed->num_ports = nports; + for (i = 0; i < nports; i++) { + rc = fun_create_netdev(ed, i); + if (rc) + goto free_netdevs; + } + + return 0; + +free_netdevs: + while (i) + fun_destroy_netdev(ed->netdevs[--i]); + kfree(ed->netdevs); + ed->netdevs = NULL; + ed->num_ports = 0; + return rc; +} + +static void fun_destroy_ports(struct fun_ethdev *ed) +{ + unsigned int i; + + for (i = 0; i < ed->num_ports; i++) + fun_destroy_netdev(ed->netdevs[i]); + + kfree(ed->netdevs); + ed->netdevs = NULL; + ed->num_ports = 0; +} + +static void fun_update_link_state(const struct fun_ethdev *ed, + const struct fun_admin_port_notif *notif) +{ + unsigned int port_idx = be16_to_cpu(notif->id); + struct net_device *netdev; + struct funeth_priv *fp; + + if (port_idx >= ed->num_ports) + return; + + netdev = ed->netdevs[port_idx]; + fp = netdev_priv(netdev); + + write_seqcount_begin(&fp->link_seq); + fp->link_speed = be32_to_cpu(notif->speed) * 10; /* 10 Mbps->Mbps */ + fp->active_fc = notif->flow_ctrl; + fp->active_fec = notif->fec; + fp->xcvr_type = notif->xcvr_type; + fp->link_down_reason = notif->link_down_reason; + fp->lp_advertising = be64_to_cpu(notif->lp_advertising); + + if ((notif->link_state | notif->missed_events) & FUN_PORT_FLAG_MAC_DOWN) + netif_carrier_off(netdev); + if (notif->link_state & FUN_PORT_FLAG_MAC_UP) + netif_carrier_on(netdev); + + write_seqcount_end(&fp->link_seq); + fun_report_link(netdev); +} + +/* handler for async events delivered through the admin CQ */ +static void fun_event_cb(struct fun_dev *fdev, void *entry) +{ + u8 op = ((struct fun_admin_rsp_common *)entry)->op; + + if (op == FUN_ADMIN_OP_PORT) { + const struct fun_admin_port_notif *rsp = entry; + + if (rsp->subop == FUN_ADMIN_SUBOP_NOTIFY) { + fun_update_link_state(to_fun_ethdev(fdev), rsp); + } else if (rsp->subop == FUN_ADMIN_SUBOP_RES_COUNT) { + const struct fun_admin_res_count_rsp *r = entry; + + if (r->count.data) + set_bit(FUN_SERV_RES_CHANGE, &fdev->service_flags); + else + set_bit(FUN_SERV_DEL_PORTS, &fdev->service_flags); + fun_serv_sched(fdev); + } else { + dev_info(fdev->dev, "adminq event unexpected op %u subop %u", + op, rsp->subop); + } + } else { + dev_info(fdev->dev, "adminq event unexpected op %u", op); + } +} + +/* handler for pending work managed by the service task */ +static void fun_service_cb(struct fun_dev *fdev) +{ + struct fun_ethdev *ed = to_fun_ethdev(fdev); + int rc; + + if (test_and_clear_bit(FUN_SERV_DEL_PORTS, &fdev->service_flags)) + fun_destroy_ports(ed); + + if (!test_and_clear_bit(FUN_SERV_RES_CHANGE, &fdev->service_flags)) + return; + + rc = fun_get_res_count(fdev, FUN_ADMIN_OP_PORT); + if (rc < 0 || rc == ed->num_ports) + return; + + if (ed->num_ports) + fun_destroy_ports(ed); + if (rc) + fun_create_ports(ed, rc); +} + +static int funeth_sriov_configure(struct pci_dev *pdev, int nvfs) +{ + struct fun_dev *fdev = pci_get_drvdata(pdev); + struct fun_ethdev *ed = to_fun_ethdev(fdev); + int rc; + + if (nvfs == 0) { + if (pci_vfs_assigned(pdev)) { + dev_warn(&pdev->dev, + "Cannot disable SR-IOV while VFs are assigned\n"); + return -EPERM; + } + + mutex_lock(&ed->state_mutex); + fun_free_vports(ed); + mutex_unlock(&ed->state_mutex); + pci_disable_sriov(pdev); + return 0; + } + + rc = pci_enable_sriov(pdev, nvfs); + if (rc) + return rc; + + mutex_lock(&ed->state_mutex); + rc = fun_init_vports(ed, nvfs); + mutex_unlock(&ed->state_mutex); + if (rc) { + pci_disable_sriov(pdev); + return rc; + } + + return nvfs; +} + +static int funeth_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct fun_dev_params aqreq = { + .cqe_size_log2 = ilog2(ADMIN_CQE_SIZE), + .sqe_size_log2 = ilog2(ADMIN_SQE_SIZE), + .cq_depth = ADMIN_CQ_DEPTH, + .sq_depth = ADMIN_SQ_DEPTH, + .rq_depth = ADMIN_RQ_DEPTH, + .min_msix = 2, /* 1 Rx + 1 Tx */ + .event_cb = fun_event_cb, + .serv_cb = fun_service_cb, + }; + struct devlink *devlink; + struct fun_ethdev *ed; + struct fun_dev *fdev; + int rc; + + devlink = fun_devlink_alloc(&pdev->dev); + if (!devlink) { + dev_err(&pdev->dev, "devlink alloc failed\n"); + return -ENOMEM; + } + + ed = devlink_priv(devlink); + mutex_init(&ed->state_mutex); + + fdev = &ed->fdev; + rc = fun_dev_enable(fdev, pdev, &aqreq, KBUILD_MODNAME); + if (rc) + goto free_devlink; + + rc = fun_get_res_count(fdev, FUN_ADMIN_OP_PORT); + if (rc > 0) + rc = fun_create_ports(ed, rc); + if (rc < 0) + goto disable_dev; + + fun_serv_restart(fdev); + fun_devlink_register(devlink); + return 0; + +disable_dev: + fun_dev_disable(fdev); +free_devlink: + mutex_destroy(&ed->state_mutex); + fun_devlink_free(devlink); + return rc; +} + +static void funeth_remove(struct pci_dev *pdev) +{ + struct fun_dev *fdev = pci_get_drvdata(pdev); + struct devlink *devlink; + struct fun_ethdev *ed; + + ed = to_fun_ethdev(fdev); + devlink = priv_to_devlink(ed); + fun_devlink_unregister(devlink); + +#ifdef CONFIG_PCI_IOV + funeth_sriov_configure(pdev, 0); +#endif + + fun_serv_stop(fdev); + fun_destroy_ports(ed); + fun_dev_disable(fdev); + mutex_destroy(&ed->state_mutex); + + fun_devlink_free(devlink); +} + +static struct pci_driver funeth_driver = { + .name = KBUILD_MODNAME, + .id_table = funeth_id_table, + .probe = funeth_probe, + .remove = funeth_remove, + .shutdown = funeth_remove, + .sriov_configure = funeth_sriov_configure, +}; + +module_pci_driver(funeth_driver); + +MODULE_AUTHOR("Dimitris Michailidis <dmichail@fungible.com>"); +MODULE_DESCRIPTION("Fungible Ethernet Network Driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DEVICE_TABLE(pci, funeth_id_table); diff --git a/drivers/net/ethernet/fungible/funeth/funeth_rx.c b/drivers/net/ethernet/fungible/funeth/funeth_rx.c new file mode 100644 index 000000000000..0f6a549b9f67 --- /dev/null +++ b/drivers/net/ethernet/fungible/funeth/funeth_rx.c @@ -0,0 +1,826 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) + +#include <linux/bpf_trace.h> +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/filter.h> +#include <linux/irq.h> +#include <linux/pci.h> +#include <linux/skbuff.h> +#include "funeth_txrx.h" +#include "funeth.h" +#include "fun_queue.h" + +#define CREATE_TRACE_POINTS +#include "funeth_trace.h" + +/* Given the device's max supported MTU and pages of at least 4KB a packet can + * be scattered into at most 4 buffers. + */ +#define RX_MAX_FRAGS 4 + +/* Per packet headroom in non-XDP mode. Present only for 1-frag packets. */ +#define FUN_RX_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) + +/* We try to reuse pages for our buffers. To avoid frequent page ref writes we + * take EXTRA_PAGE_REFS references at once and then hand them out one per packet + * occupying the buffer. + */ +#define EXTRA_PAGE_REFS 1000000 +#define MIN_PAGE_REFS 1000 + +enum { + FUN_XDP_FLUSH_REDIR = 1, + FUN_XDP_FLUSH_TX = 2, +}; + +/* See if a page is running low on refs we are holding and if so take more. */ +static void refresh_refs(struct funeth_rxbuf *buf) +{ + if (unlikely(buf->pg_refs < MIN_PAGE_REFS)) { + buf->pg_refs += EXTRA_PAGE_REFS; + page_ref_add(buf->page, EXTRA_PAGE_REFS); + } +} + +/* Offer a buffer to the Rx buffer cache. The cache will hold the buffer if its + * page is worth retaining and there's room for it. Otherwise the page is + * unmapped and our references released. + */ +static void cache_offer(struct funeth_rxq *q, const struct funeth_rxbuf *buf) +{ + struct funeth_rx_cache *c = &q->cache; + + if (c->prod_cnt - c->cons_cnt <= c->mask && buf->node == numa_mem_id()) { + c->bufs[c->prod_cnt & c->mask] = *buf; + c->prod_cnt++; + } else { + dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE, + DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + __page_frag_cache_drain(buf->page, buf->pg_refs); + } +} + +/* Get a page from the Rx buffer cache. We only consider the next available + * page and return it if we own all its references. + */ +static bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb) +{ + struct funeth_rx_cache *c = &q->cache; + struct funeth_rxbuf *buf; + + if (c->prod_cnt == c->cons_cnt) + return false; /* empty cache */ + + buf = &c->bufs[c->cons_cnt & c->mask]; + if (page_ref_count(buf->page) == buf->pg_refs) { + dma_sync_single_for_device(q->dma_dev, buf->dma_addr, + PAGE_SIZE, DMA_FROM_DEVICE); + *rb = *buf; + buf->page = NULL; + refresh_refs(rb); + c->cons_cnt++; + return true; + } + + /* Page can't be reused. If the cache is full drop this page. */ + if (c->prod_cnt - c->cons_cnt > c->mask) { + dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE, + DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + __page_frag_cache_drain(buf->page, buf->pg_refs); + buf->page = NULL; + c->cons_cnt++; + } + return false; +} + +/* Allocate and DMA-map a page for receive. */ +static int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb, + int node, gfp_t gfp) +{ + struct page *p; + + if (cache_get(q, rb)) + return 0; + + p = __alloc_pages_node(node, gfp | __GFP_NOWARN, 0); + if (unlikely(!p)) + return -ENOMEM; + + rb->dma_addr = dma_map_page(q->dma_dev, p, 0, PAGE_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(q->dma_dev, rb->dma_addr))) { + FUN_QSTAT_INC(q, rx_map_err); + __free_page(p); + return -ENOMEM; + } + + FUN_QSTAT_INC(q, rx_page_alloc); + + rb->page = p; + rb->pg_refs = 1; + refresh_refs(rb); + rb->node = page_is_pfmemalloc(p) ? -1 : page_to_nid(p); + return 0; +} + +static void funeth_free_page(struct funeth_rxq *q, struct funeth_rxbuf *rb) +{ + if (rb->page) { + dma_unmap_page(q->dma_dev, rb->dma_addr, PAGE_SIZE, + DMA_FROM_DEVICE); + __page_frag_cache_drain(rb->page, rb->pg_refs); + rb->page = NULL; + } +} + +/* Run the XDP program assigned to an Rx queue. + * Return %NULL if the buffer is consumed, or the virtual address of the packet + * to turn into an skb. + */ +static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va, + int ref_ok, struct funeth_txq *xdp_q) +{ + struct bpf_prog *xdp_prog; + struct xdp_buff xdp; + u32 act; + + /* VA includes the headroom, frag size includes headroom + tailroom */ + xdp_init_buff(&xdp, ALIGN(skb_frag_size(frags), FUN_EPRQ_PKT_ALIGN), + &q->xdp_rxq); + xdp_prepare_buff(&xdp, buf_va, FUN_XDP_HEADROOM, skb_frag_size(frags) - + (FUN_RX_TAILROOM + FUN_XDP_HEADROOM), false); + + xdp_prog = READ_ONCE(q->xdp_prog); + act = bpf_prog_run_xdp(xdp_prog, &xdp); + + switch (act) { + case XDP_PASS: + /* remove headroom, which may not be FUN_XDP_HEADROOM now */ + skb_frag_size_set(frags, xdp.data_end - xdp.data); + skb_frag_off_add(frags, xdp.data - xdp.data_hard_start); + goto pass; + case XDP_TX: + if (unlikely(!ref_ok)) + goto pass; + if (!fun_xdp_tx(xdp_q, xdp.data, xdp.data_end - xdp.data)) + goto xdp_error; + FUN_QSTAT_INC(q, xdp_tx); + q->xdp_flush |= FUN_XDP_FLUSH_TX; + break; + case XDP_REDIRECT: + if (unlikely(!ref_ok)) + goto pass; + if (unlikely(xdp_do_redirect(q->netdev, &xdp, xdp_prog))) + goto xdp_error; + FUN_QSTAT_INC(q, xdp_redir); + q->xdp_flush |= FUN_XDP_FLUSH_REDIR; + break; + default: + bpf_warn_invalid_xdp_action(q->netdev, xdp_prog, act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(q->netdev, xdp_prog, act); +xdp_error: + q->cur_buf->pg_refs++; /* return frags' page reference */ + FUN_QSTAT_INC(q, xdp_err); + break; + case XDP_DROP: + q->cur_buf->pg_refs++; + FUN_QSTAT_INC(q, xdp_drops); + break; + } + return NULL; + +pass: + return xdp.data; +} + +/* A CQE contains a fixed completion structure along with optional metadata and + * even packet data. Given the start address of a CQE return the start of the + * contained fixed structure, which lies at the end. + */ +static const void *cqe_to_info(const void *cqe) +{ + return cqe + FUNETH_CQE_INFO_OFFSET; +} + +/* The inverse of cqe_to_info(). */ +static const void *info_to_cqe(const void *cqe_info) +{ + return cqe_info - FUNETH_CQE_INFO_OFFSET; +} + +/* Return the type of hash provided by the device based on the L3 and L4 + * protocols it parsed for the packet. + */ +static enum pkt_hash_types cqe_to_pkt_hash_type(u16 pkt_parse) +{ + static const enum pkt_hash_types htype_map[] = { + PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3, + PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L4, + PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3, + PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3 + }; + u16 key; + + /* Build the key from the TCP/UDP and IP/IPv6 bits */ + key = ((pkt_parse >> FUN_ETH_RX_CV_OL4_PROT_S) & 6) | + ((pkt_parse >> (FUN_ETH_RX_CV_OL3_PROT_S + 1)) & 1); + + return htype_map[key]; +} + +/* Each received packet can be scattered across several Rx buffers or can + * share a buffer with previously received packets depending on the buffer + * and packet sizes and the room available in the most recently used buffer. + * + * The rules are: + * - If the buffer at the head of an RQ has not been used it gets (part of) the + * next incoming packet. + * - Otherwise, if the packet fully fits in the buffer's remaining space the + * packet is written there. + * - Otherwise, the packet goes into the next Rx buffer. + * + * This function returns the Rx buffer for a packet or fragment thereof of the + * given length. If it isn't @buf it either recycles or frees that buffer + * before advancing the queue to the next buffer. + * + * If called repeatedly with the remaining length of a packet it will walk + * through all the buffers containing the packet. + */ +static struct funeth_rxbuf * +get_buf(struct funeth_rxq *q, struct funeth_rxbuf *buf, unsigned int len) +{ + if (q->buf_offset + len <= PAGE_SIZE || !q->buf_offset) + return buf; /* @buf holds (part of) the packet */ + + /* The packet occupies part of the next buffer. Move there after + * replenishing the current buffer slot either with the spare page or + * by reusing the slot's existing page. Note that if a spare page isn't + * available and the current packet occupies @buf it is a multi-frag + * packet that will be dropped leaving @buf available for reuse. + */ + if ((page_ref_count(buf->page) == buf->pg_refs && + buf->node == numa_mem_id()) || !q->spare_buf.page) { + dma_sync_single_for_device(q->dma_dev, buf->dma_addr, + PAGE_SIZE, DMA_FROM_DEVICE); + refresh_refs(buf); + } else { + cache_offer(q, buf); + *buf = q->spare_buf; + q->spare_buf.page = NULL; + q->rqes[q->rq_cons & q->rq_mask] = + FUN_EPRQ_RQBUF_INIT(buf->dma_addr); + } + q->buf_offset = 0; + q->rq_cons++; + return &q->bufs[q->rq_cons & q->rq_mask]; +} + +/* Gather the page fragments making up the first Rx packet on @q. Its total + * length @tot_len includes optional head- and tail-rooms. + * + * Return 0 if the device retains ownership of at least some of the pages. + * In this case the caller may only copy the packet. + * + * A non-zero return value gives the caller permission to use references to the + * pages, e.g., attach them to skbs. Additionally, if the value is <0 at least + * one of the pages is PF_MEMALLOC. + * + * Regardless of outcome the caller is granted a reference to each of the pages. + */ +static int fun_gather_pkt(struct funeth_rxq *q, unsigned int tot_len, + skb_frag_t *frags) +{ + struct funeth_rxbuf *buf = q->cur_buf; + unsigned int frag_len; + int ref_ok = 1; + + for (;;) { + buf = get_buf(q, buf, tot_len); + + /* We always keep the RQ full of buffers so before we can give + * one of our pages to the stack we require that we can obtain + * a replacement page. If we can't the packet will either be + * copied or dropped so we can retain ownership of the page and + * reuse it. + */ + if (!q->spare_buf.page && + funeth_alloc_page(q, &q->spare_buf, numa_mem_id(), + GFP_ATOMIC | __GFP_MEMALLOC)) + ref_ok = 0; + + frag_len = min_t(unsigned int, tot_len, + PAGE_SIZE - q->buf_offset); + dma_sync_single_for_cpu(q->dma_dev, + buf->dma_addr + q->buf_offset, + frag_len, DMA_FROM_DEVICE); + buf->pg_refs--; + if (ref_ok) + ref_ok |= buf->node; + + __skb_frag_set_page(frags, buf->page); + skb_frag_off_set(frags, q->buf_offset); + skb_frag_size_set(frags++, frag_len); + + tot_len -= frag_len; + if (!tot_len) + break; + + q->buf_offset = PAGE_SIZE; + } + q->buf_offset = ALIGN(q->buf_offset + frag_len, FUN_EPRQ_PKT_ALIGN); + q->cur_buf = buf; + return ref_ok; +} + +static bool rx_hwtstamp_enabled(const struct net_device *dev) +{ + const struct funeth_priv *d = netdev_priv(dev); + + return d->hwtstamp_cfg.rx_filter == HWTSTAMP_FILTER_ALL; +} + +/* Advance the CQ pointers and phase tag to the next CQE. */ +static void advance_cq(struct funeth_rxq *q) +{ + if (unlikely(q->cq_head == q->cq_mask)) { + q->cq_head = 0; + q->phase ^= 1; + q->next_cqe_info = cqe_to_info(q->cqes); + } else { + q->cq_head++; + q->next_cqe_info += FUNETH_CQE_SIZE; + } + prefetch(q->next_cqe_info); +} + +/* Process the packet represented by the head CQE of @q. Gather the packet's + * fragments, run it through the optional XDP program, and if needed construct + * an skb and pass it to the stack. + */ +static void fun_handle_cqe_pkt(struct funeth_rxq *q, struct funeth_txq *xdp_q) +{ + const struct fun_eth_cqe *rxreq = info_to_cqe(q->next_cqe_info); + unsigned int i, tot_len, pkt_len = be32_to_cpu(rxreq->pkt_len); + struct net_device *ndev = q->netdev; + skb_frag_t frags[RX_MAX_FRAGS]; + struct skb_shared_info *si; + unsigned int headroom; + gro_result_t gro_res; + struct sk_buff *skb; + int ref_ok; + void *va; + u16 cv; + + u64_stats_update_begin(&q->syncp); + q->stats.rx_pkts++; + q->stats.rx_bytes += pkt_len; + u64_stats_update_end(&q->syncp); + + advance_cq(q); + + /* account for head- and tail-room, present only for 1-buffer packets */ + tot_len = pkt_len; + headroom = be16_to_cpu(rxreq->headroom); + if (likely(headroom)) + tot_len += FUN_RX_TAILROOM + headroom; + + ref_ok = fun_gather_pkt(q, tot_len, frags); + va = skb_frag_address(frags); + if (xdp_q && headroom == FUN_XDP_HEADROOM) { + va = fun_run_xdp(q, frags, va, ref_ok, xdp_q); + if (!va) + return; + headroom = 0; /* XDP_PASS trims it */ + } + if (unlikely(!ref_ok)) + goto no_mem; + + if (likely(headroom)) { + /* headroom is either FUN_RX_HEADROOM or FUN_XDP_HEADROOM */ + prefetch(va + headroom); + skb = napi_build_skb(va, ALIGN(tot_len, FUN_EPRQ_PKT_ALIGN)); + if (unlikely(!skb)) + goto no_mem; + + skb_reserve(skb, headroom); + __skb_put(skb, pkt_len); + skb->protocol = eth_type_trans(skb, ndev); + } else { + prefetch(va); + skb = napi_get_frags(q->napi); + if (unlikely(!skb)) + goto no_mem; + + if (ref_ok < 0) + skb->pfmemalloc = 1; + + si = skb_shinfo(skb); + si->nr_frags = rxreq->nsgl; + for (i = 0; i < si->nr_frags; i++) + si->frags[i] = frags[i]; + + skb->len = pkt_len; + skb->data_len = pkt_len; + skb->truesize += round_up(pkt_len, FUN_EPRQ_PKT_ALIGN); + } + + skb_record_rx_queue(skb, q->qidx); + cv = be16_to_cpu(rxreq->pkt_cv); + if (likely((q->netdev->features & NETIF_F_RXHASH) && rxreq->hash)) + skb_set_hash(skb, be32_to_cpu(rxreq->hash), + cqe_to_pkt_hash_type(cv)); + if (likely((q->netdev->features & NETIF_F_RXCSUM) && rxreq->csum)) { + FUN_QSTAT_INC(q, rx_cso); + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = be16_to_cpu(rxreq->csum) - 1; + } + if (unlikely(rx_hwtstamp_enabled(q->netdev))) + skb_hwtstamps(skb)->hwtstamp = be64_to_cpu(rxreq->timestamp); + + trace_funeth_rx(q, rxreq->nsgl, pkt_len, skb->hash, cv); + + gro_res = skb->data_len ? napi_gro_frags(q->napi) : + napi_gro_receive(q->napi, skb); + if (gro_res == GRO_MERGED || gro_res == GRO_MERGED_FREE) + FUN_QSTAT_INC(q, gro_merged); + else if (gro_res == GRO_HELD) + FUN_QSTAT_INC(q, gro_pkts); + return; + +no_mem: + FUN_QSTAT_INC(q, rx_mem_drops); + + /* Release the references we've been granted for the frag pages. + * We return the ref of the last frag and free the rest. + */ + q->cur_buf->pg_refs++; + for (i = 0; i < rxreq->nsgl - 1; i++) + __free_page(skb_frag_page(frags + i)); +} + +/* Return 0 if the phase tag of the CQE at the CQ's head matches expectations + * indicating the CQE is new. + */ +static u16 cqe_phase_mismatch(const struct fun_cqe_info *ci, u16 phase) +{ + u16 sf_p = be16_to_cpu(ci->sf_p); + + return (sf_p & 1) ^ phase; +} + +/* Walk through a CQ identifying and processing fresh CQEs up to the given + * budget. Return the remaining budget. + */ +static int fun_process_cqes(struct funeth_rxq *q, int budget) +{ + struct funeth_priv *fp = netdev_priv(q->netdev); + struct funeth_txq **xdpqs, *xdp_q = NULL; + + xdpqs = rcu_dereference_bh(fp->xdpqs); + if (xdpqs) + xdp_q = xdpqs[smp_processor_id()]; + + while (budget && !cqe_phase_mismatch(q->next_cqe_info, q->phase)) { + /* access other descriptor fields after the phase check */ + dma_rmb(); + + fun_handle_cqe_pkt(q, xdp_q); + budget--; + } + + if (unlikely(q->xdp_flush)) { + if (q->xdp_flush & FUN_XDP_FLUSH_TX) + fun_txq_wr_db(xdp_q); + if (q->xdp_flush & FUN_XDP_FLUSH_REDIR) + xdp_do_flush(); + q->xdp_flush = 0; + } + + return budget; +} + +/* NAPI handler for Rx queues. Calls the CQE processing loop and writes RQ/CQ + * doorbells as needed. + */ +int fun_rxq_napi_poll(struct napi_struct *napi, int budget) +{ + struct fun_irq *irq = container_of(napi, struct fun_irq, napi); + struct funeth_rxq *q = irq->rxq; + int work_done = budget - fun_process_cqes(q, budget); + u32 cq_db_val = q->cq_head; + + if (unlikely(work_done >= budget)) + FUN_QSTAT_INC(q, rx_budget); + else if (napi_complete_done(napi, work_done)) + cq_db_val |= q->irq_db_val; + + /* check whether to post new Rx buffers */ + if (q->rq_cons - q->rq_cons_db >= q->rq_db_thres) { + u64_stats_update_begin(&q->syncp); + q->stats.rx_bufs += q->rq_cons - q->rq_cons_db; + u64_stats_update_end(&q->syncp); + q->rq_cons_db = q->rq_cons; + writel((q->rq_cons - 1) & q->rq_mask, q->rq_db); + } + + writel(cq_db_val, q->cq_db); + return work_done; +} + +/* Free the Rx buffers of an Rx queue. */ +static void fun_rxq_free_bufs(struct funeth_rxq *q) +{ + struct funeth_rxbuf *b = q->bufs; + unsigned int i; + + for (i = 0; i <= q->rq_mask; i++, b++) + funeth_free_page(q, b); + + funeth_free_page(q, &q->spare_buf); + q->cur_buf = NULL; +} + +/* Initially provision an Rx queue with Rx buffers. */ +static int fun_rxq_alloc_bufs(struct funeth_rxq *q, int node) +{ + struct funeth_rxbuf *b = q->bufs; + unsigned int i; + + for (i = 0; i <= q->rq_mask; i++, b++) { + if (funeth_alloc_page(q, b, node, GFP_KERNEL)) { + fun_rxq_free_bufs(q); + return -ENOMEM; + } + q->rqes[i] = FUN_EPRQ_RQBUF_INIT(b->dma_addr); + } + q->cur_buf = q->bufs; + return 0; +} + +/* Initialize a used-buffer cache of the given depth. */ +static int fun_rxq_init_cache(struct funeth_rx_cache *c, unsigned int depth, + int node) +{ + c->mask = depth - 1; + c->bufs = kvzalloc_node(depth * sizeof(*c->bufs), GFP_KERNEL, node); + return c->bufs ? 0 : -ENOMEM; +} + +/* Deallocate an Rx queue's used-buffer cache and its contents. */ +static void fun_rxq_free_cache(struct funeth_rxq *q) +{ + struct funeth_rxbuf *b = q->cache.bufs; + unsigned int i; + + for (i = 0; i <= q->cache.mask; i++, b++) + funeth_free_page(q, b); + + kvfree(q->cache.bufs); + q->cache.bufs = NULL; +} + +int fun_rxq_set_bpf(struct funeth_rxq *q, struct bpf_prog *prog) +{ + struct funeth_priv *fp = netdev_priv(q->netdev); + struct fun_admin_epcq_req cmd; + u16 headroom; + int err; + + headroom = prog ? FUN_XDP_HEADROOM : FUN_RX_HEADROOM; + if (headroom != q->headroom) { + cmd.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPCQ, + sizeof(cmd)); + cmd.u.modify = + FUN_ADMIN_EPCQ_MODIFY_REQ_INIT(FUN_ADMIN_SUBOP_MODIFY, + 0, q->hw_cqid, headroom); + err = fun_submit_admin_sync_cmd(fp->fdev, &cmd.common, NULL, 0, + 0); + if (err) + return err; + q->headroom = headroom; + } + + WRITE_ONCE(q->xdp_prog, prog); + return 0; +} + +/* Create an Rx queue, allocating the host memory it needs. */ +static struct funeth_rxq *fun_rxq_create_sw(struct net_device *dev, + unsigned int qidx, + unsigned int ncqe, + unsigned int nrqe, + struct fun_irq *irq) +{ + struct funeth_priv *fp = netdev_priv(dev); + struct funeth_rxq *q; + int err = -ENOMEM; + int numa_node; + + numa_node = fun_irq_node(irq); + q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node); + if (!q) + goto err; + + q->qidx = qidx; + q->netdev = dev; + q->cq_mask = ncqe - 1; + q->rq_mask = nrqe - 1; + q->numa_node = numa_node; + q->rq_db_thres = nrqe / 4; + u64_stats_init(&q->syncp); + q->dma_dev = &fp->pdev->dev; + + q->rqes = fun_alloc_ring_mem(q->dma_dev, nrqe, sizeof(*q->rqes), + sizeof(*q->bufs), false, numa_node, + &q->rq_dma_addr, (void **)&q->bufs, NULL); + if (!q->rqes) + goto free_q; + + q->cqes = fun_alloc_ring_mem(q->dma_dev, ncqe, FUNETH_CQE_SIZE, 0, + false, numa_node, &q->cq_dma_addr, NULL, + NULL); + if (!q->cqes) + goto free_rqes; + + err = fun_rxq_init_cache(&q->cache, nrqe, numa_node); + if (err) + goto free_cqes; + + err = fun_rxq_alloc_bufs(q, numa_node); + if (err) + goto free_cache; + + q->stats.rx_bufs = q->rq_mask; + q->init_state = FUN_QSTATE_INIT_SW; + return q; + +free_cache: + fun_rxq_free_cache(q); +free_cqes: + dma_free_coherent(q->dma_dev, ncqe * FUNETH_CQE_SIZE, q->cqes, + q->cq_dma_addr); +free_rqes: + fun_free_ring_mem(q->dma_dev, nrqe, sizeof(*q->rqes), false, q->rqes, + q->rq_dma_addr, q->bufs); +free_q: + kfree(q); +err: + netdev_err(dev, "Unable to allocate memory for Rx queue %u\n", qidx); + return ERR_PTR(err); +} + +static void fun_rxq_free_sw(struct funeth_rxq *q) +{ + struct funeth_priv *fp = netdev_priv(q->netdev); + + fun_rxq_free_cache(q); + fun_rxq_free_bufs(q); + fun_free_ring_mem(q->dma_dev, q->rq_mask + 1, sizeof(*q->rqes), false, + q->rqes, q->rq_dma_addr, q->bufs); + dma_free_coherent(q->dma_dev, (q->cq_mask + 1) * FUNETH_CQE_SIZE, + q->cqes, q->cq_dma_addr); + + /* Before freeing the queue transfer key counters to the device. */ + fp->rx_packets += q->stats.rx_pkts; + fp->rx_bytes += q->stats.rx_bytes; + fp->rx_dropped += q->stats.rx_map_err + q->stats.rx_mem_drops; + + kfree(q); +} + +/* Create an Rx queue's resources on the device. */ +int fun_rxq_create_dev(struct funeth_rxq *q, struct fun_irq *irq) +{ + struct funeth_priv *fp = netdev_priv(q->netdev); + unsigned int ncqe = q->cq_mask + 1; + unsigned int nrqe = q->rq_mask + 1; + int err; + + err = xdp_rxq_info_reg(&q->xdp_rxq, q->netdev, q->qidx, + irq->napi.napi_id); + if (err) + goto out; + + err = xdp_rxq_info_reg_mem_model(&q->xdp_rxq, MEM_TYPE_PAGE_SHARED, + NULL); + if (err) + goto xdp_unreg; + + q->phase = 1; + q->irq_cnt = 0; + q->cq_head = 0; + q->rq_cons = 0; + q->rq_cons_db = 0; + q->buf_offset = 0; + q->napi = &irq->napi; + q->irq_db_val = fp->cq_irq_db; + q->next_cqe_info = cqe_to_info(q->cqes); + + q->xdp_prog = fp->xdp_prog; + q->headroom = fp->xdp_prog ? FUN_XDP_HEADROOM : FUN_RX_HEADROOM; + + err = fun_sq_create(fp->fdev, FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR | + FUN_ADMIN_EPSQ_CREATE_FLAG_RQ, 0, + FUN_HCI_ID_INVALID, 0, nrqe, q->rq_dma_addr, 0, 0, + 0, 0, fp->fdev->kern_end_qid, PAGE_SHIFT, + &q->hw_sqid, &q->rq_db); + if (err) + goto xdp_unreg; + + err = fun_cq_create(fp->fdev, FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR | + FUN_ADMIN_EPCQ_CREATE_FLAG_RQ, 0, + q->hw_sqid, ilog2(FUNETH_CQE_SIZE), ncqe, + q->cq_dma_addr, q->headroom, FUN_RX_TAILROOM, 0, 0, + irq->irq_idx, 0, fp->fdev->kern_end_qid, + &q->hw_cqid, &q->cq_db); + if (err) + goto free_rq; + + irq->rxq = q; + writel(q->rq_mask, q->rq_db); + q->init_state = FUN_QSTATE_INIT_FULL; + + netif_info(fp, ifup, q->netdev, + "Rx queue %u, depth %u/%u, HW qid %u/%u, IRQ idx %u, node %d, headroom %u\n", + q->qidx, ncqe, nrqe, q->hw_cqid, q->hw_sqid, irq->irq_idx, + q->numa_node, q->headroom); + return 0; + +free_rq: + fun_destroy_sq(fp->fdev, q->hw_sqid); +xdp_unreg: + xdp_rxq_info_unreg(&q->xdp_rxq); +out: + netdev_err(q->netdev, + "Failed to create Rx queue %u on device, error %d\n", + q->qidx, err); + return err; +} + +static void fun_rxq_free_dev(struct funeth_rxq *q) +{ + struct funeth_priv *fp = netdev_priv(q->netdev); + struct fun_irq *irq; + + if (q->init_state < FUN_QSTATE_INIT_FULL) + return; + + irq = container_of(q->napi, struct fun_irq, napi); + netif_info(fp, ifdown, q->netdev, + "Freeing Rx queue %u (id %u/%u), IRQ %u\n", + q->qidx, q->hw_cqid, q->hw_sqid, irq->irq_idx); + + irq->rxq = NULL; + xdp_rxq_info_unreg(&q->xdp_rxq); + fun_destroy_sq(fp->fdev, q->hw_sqid); + fun_destroy_cq(fp->fdev, q->hw_cqid); + q->init_state = FUN_QSTATE_INIT_SW; +} + +/* Create or advance an Rx queue, allocating all the host and device resources + * needed to reach the target state. + */ +int funeth_rxq_create(struct net_device *dev, unsigned int qidx, + unsigned int ncqe, unsigned int nrqe, struct fun_irq *irq, + int state, struct funeth_rxq **qp) +{ + struct funeth_rxq *q = *qp; + int err; + + if (!q) { + q = fun_rxq_create_sw(dev, qidx, ncqe, nrqe, irq); + if (IS_ERR(q)) + return PTR_ERR(q); + } + + if (q->init_state >= state) + goto out; + + err = fun_rxq_create_dev(q, irq); + if (err) { + if (!*qp) + fun_rxq_free_sw(q); + return err; + } + +out: + *qp = q; + return 0; +} + +/* Free Rx queue resources until it reaches the target state. */ +struct funeth_rxq *funeth_rxq_free(struct funeth_rxq *q, int state) +{ + if (state < FUN_QSTATE_INIT_FULL) + fun_rxq_free_dev(q); + + if (state == FUN_QSTATE_DESTROYED) { + fun_rxq_free_sw(q); + q = NULL; + } + + return q; +} diff --git a/drivers/net/ethernet/fungible/funeth/funeth_trace.h b/drivers/net/ethernet/fungible/funeth/funeth_trace.h new file mode 100644 index 000000000000..9e58dfec19d5 --- /dev/null +++ b/drivers/net/ethernet/fungible/funeth/funeth_trace.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM funeth + +#if !defined(_TRACE_FUNETH_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_FUNETH_H + +#include <linux/tracepoint.h> + +#include "funeth_txrx.h" + +TRACE_EVENT(funeth_tx, + + TP_PROTO(const struct funeth_txq *txq, + u32 len, + u32 sqe_idx, + u32 ngle), + + TP_ARGS(txq, len, sqe_idx, ngle), + + TP_STRUCT__entry( + __field(u32, qidx) + __field(u32, len) + __field(u32, sqe_idx) + __field(u32, ngle) + __string(devname, txq->netdev->name) + ), + + TP_fast_assign( + __entry->qidx = txq->qidx; + __entry->len = len; + __entry->sqe_idx = sqe_idx; + __entry->ngle = ngle; + __assign_str(devname, txq->netdev->name); + ), + + TP_printk("%s: Txq %u, SQE idx %u, len %u, num GLEs %u", + __get_str(devname), __entry->qidx, __entry->sqe_idx, + __entry->len, __entry->ngle) +); + +TRACE_EVENT(funeth_tx_free, + + TP_PROTO(const struct funeth_txq *txq, + u32 sqe_idx, + u32 num_sqes, + u32 hw_head), + + TP_ARGS(txq, sqe_idx, num_sqes, hw_head), + + TP_STRUCT__entry( + __field(u32, qidx) + __field(u32, sqe_idx) + __field(u32, num_sqes) + __field(u32, hw_head) + __string(devname, txq->netdev->name) + ), + + TP_fast_assign( + __entry->qidx = txq->qidx; + __entry->sqe_idx = sqe_idx; + __entry->num_sqes = num_sqes; + __entry->hw_head = hw_head; + __assign_str(devname, txq->netdev->name); + ), + + TP_printk("%s: Txq %u, SQE idx %u, SQEs %u, HW head %u", + __get_str(devname), __entry->qidx, __entry->sqe_idx, + __entry->num_sqes, __entry->hw_head) +); + +TRACE_EVENT(funeth_rx, + + TP_PROTO(const struct funeth_rxq *rxq, + u32 num_rqes, + u32 pkt_len, + u32 hash, + u32 cls_vec), + + TP_ARGS(rxq, num_rqes, pkt_len, hash, cls_vec), + + TP_STRUCT__entry( + __field(u32, qidx) + __field(u32, cq_head) + __field(u32, num_rqes) + __field(u32, len) + __field(u32, hash) + __field(u32, cls_vec) + __string(devname, rxq->netdev->name) + ), + + TP_fast_assign( + __entry->qidx = rxq->qidx; + __entry->cq_head = rxq->cq_head; + __entry->num_rqes = num_rqes; + __entry->len = pkt_len; + __entry->hash = hash; + __entry->cls_vec = cls_vec; + __assign_str(devname, rxq->netdev->name); + ), + + TP_printk("%s: Rxq %u, CQ head %u, RQEs %u, len %u, hash %u, CV %#x", + __get_str(devname), __entry->qidx, __entry->cq_head, + __entry->num_rqes, __entry->len, __entry->hash, + __entry->cls_vec) +); + +#endif /* _TRACE_FUNETH_H */ + +/* Below must be outside protection. */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE funeth_trace + +#include <trace/define_trace.h> diff --git a/drivers/net/ethernet/fungible/funeth/funeth_tx.c b/drivers/net/ethernet/fungible/funeth/funeth_tx.c new file mode 100644 index 000000000000..ff6e29237253 --- /dev/null +++ b/drivers/net/ethernet/fungible/funeth/funeth_tx.c @@ -0,0 +1,763 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) + +#include <linux/dma-mapping.h> +#include <linux/ip.h> +#include <linux/pci.h> +#include <linux/skbuff.h> +#include <linux/tcp.h> +#include <uapi/linux/udp.h> +#include "funeth.h" +#include "funeth_ktls.h" +#include "funeth_txrx.h" +#include "funeth_trace.h" +#include "fun_queue.h" + +#define FUN_XDP_CLEAN_THRES 32 +#define FUN_XDP_CLEAN_BATCH 16 + +/* DMA-map a packet and return the (length, DMA_address) pairs for its + * segments. If a mapping error occurs -ENOMEM is returned. + */ +static int map_skb(const struct sk_buff *skb, struct device *dev, + dma_addr_t *addr, unsigned int *len) +{ + const struct skb_shared_info *si; + const skb_frag_t *fp, *end; + + *len = skb_headlen(skb); + *addr = dma_map_single(dev, skb->data, *len, DMA_TO_DEVICE); + if (dma_mapping_error(dev, *addr)) + return -ENOMEM; + + si = skb_shinfo(skb); + end = &si->frags[si->nr_frags]; + + for (fp = si->frags; fp < end; fp++) { + *++len = skb_frag_size(fp); + *++addr = skb_frag_dma_map(dev, fp, 0, *len, DMA_TO_DEVICE); + if (dma_mapping_error(dev, *addr)) + goto unwind; + } + return 0; + +unwind: + while (fp-- > si->frags) + dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); + + dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); + return -ENOMEM; +} + +/* Return the address just past the end of a Tx queue's descriptor ring. + * It exploits the fact that the HW writeback area is just after the end + * of the descriptor ring. + */ +static void *txq_end(const struct funeth_txq *q) +{ + return (void *)q->hw_wb; +} + +/* Return the amount of space within a Tx ring from the given address to the + * end. + */ +static unsigned int txq_to_end(const struct funeth_txq *q, void *p) +{ + return txq_end(q) - p; +} + +/* Return the number of Tx descriptors occupied by a Tx request. */ +static unsigned int tx_req_ndesc(const struct fun_eth_tx_req *req) +{ + return DIV_ROUND_UP(req->len8, FUNETH_SQE_SIZE / 8); +} + +static __be16 tcp_hdr_doff_flags(const struct tcphdr *th) +{ + return *(__be16 *)&tcp_flag_word(th); +} + +static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q, + unsigned int *tls_len) +{ +#if IS_ENABLED(CONFIG_TLS_DEVICE) + const struct fun_ktls_tx_ctx *tls_ctx; + u32 datalen, seq; + + datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); + if (!datalen) + return skb; + + if (likely(!tls_offload_tx_resync_pending(skb->sk))) { + seq = ntohl(tcp_hdr(skb)->seq); + tls_ctx = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX); + + if (likely(tls_ctx->next_seq == seq)) { + *tls_len = datalen; + return skb; + } + if (seq - tls_ctx->next_seq < U32_MAX / 4) { + tls_offload_tx_resync_request(skb->sk, seq, + tls_ctx->next_seq); + } + } + + FUN_QSTAT_INC(q, tx_tls_fallback); + skb = tls_encrypt_skb(skb); + if (!skb) + FUN_QSTAT_INC(q, tx_tls_drops); + + return skb; +#else + return NULL; +#endif +} + +/* Write as many descriptors as needed for the supplied skb starting at the + * current producer location. The caller has made certain enough descriptors + * are available. + * + * Returns the number of descriptors written, 0 on error. + */ +static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q, + unsigned int tls_len) +{ + unsigned int extra_bytes = 0, extra_pkts = 0; + unsigned int idx = q->prod_cnt & q->mask; + const struct skb_shared_info *shinfo; + unsigned int lens[MAX_SKB_FRAGS + 1]; + dma_addr_t addrs[MAX_SKB_FRAGS + 1]; + struct fun_eth_tx_req *req; + struct fun_dataop_gl *gle; + const struct tcphdr *th; + unsigned int ngle, i; + u16 flags; + + if (unlikely(map_skb(skb, q->dma_dev, addrs, lens))) { + FUN_QSTAT_INC(q, tx_map_err); + return 0; + } + + req = fun_tx_desc_addr(q, idx); + req->op = FUN_ETH_OP_TX; + req->len8 = 0; + req->flags = 0; + req->suboff8 = offsetof(struct fun_eth_tx_req, dataop); + req->repr_idn = 0; + req->encap_proto = 0; + + shinfo = skb_shinfo(skb); + if (likely(shinfo->gso_size)) { + if (skb->encapsulation) { + u16 ol4_ofst; + + flags = FUN_ETH_OUTER_EN | FUN_ETH_INNER_LSO | + FUN_ETH_UPDATE_INNER_L4_CKSUM | + FUN_ETH_UPDATE_OUTER_L3_LEN; + if (shinfo->gso_type & (SKB_GSO_UDP_TUNNEL | + SKB_GSO_UDP_TUNNEL_CSUM)) { + flags |= FUN_ETH_UPDATE_OUTER_L4_LEN | + FUN_ETH_OUTER_UDP; + if (shinfo->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) + flags |= FUN_ETH_UPDATE_OUTER_L4_CKSUM; + ol4_ofst = skb_transport_offset(skb); + } else { + ol4_ofst = skb_inner_network_offset(skb); + } + + if (ip_hdr(skb)->version == 4) + flags |= FUN_ETH_UPDATE_OUTER_L3_CKSUM; + else + flags |= FUN_ETH_OUTER_IPV6; + + if (skb->inner_network_header) { + if (inner_ip_hdr(skb)->version == 4) + flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM | + FUN_ETH_UPDATE_INNER_L3_LEN; + else + flags |= FUN_ETH_INNER_IPV6 | + FUN_ETH_UPDATE_INNER_L3_LEN; + } + th = inner_tcp_hdr(skb); + fun_eth_offload_init(&req->offload, flags, + shinfo->gso_size, + tcp_hdr_doff_flags(th), 0, + skb_inner_network_offset(skb), + skb_inner_transport_offset(skb), + skb_network_offset(skb), ol4_ofst); + FUN_QSTAT_INC(q, tx_encap_tso); + } else { + /* HW considers one set of headers as inner */ + flags = FUN_ETH_INNER_LSO | + FUN_ETH_UPDATE_INNER_L4_CKSUM | + FUN_ETH_UPDATE_INNER_L3_LEN; + if (shinfo->gso_type & SKB_GSO_TCPV6) + flags |= FUN_ETH_INNER_IPV6; + else + flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM; + th = tcp_hdr(skb); + fun_eth_offload_init(&req->offload, flags, + shinfo->gso_size, + tcp_hdr_doff_flags(th), 0, + skb_network_offset(skb), + skb_transport_offset(skb), 0, 0); + FUN_QSTAT_INC(q, tx_tso); + } + + u64_stats_update_begin(&q->syncp); + q->stats.tx_cso += shinfo->gso_segs; + u64_stats_update_end(&q->syncp); + + extra_pkts = shinfo->gso_segs - 1; + extra_bytes = (be16_to_cpu(req->offload.inner_l4_off) + + __tcp_hdrlen(th)) * extra_pkts; + } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + flags = FUN_ETH_UPDATE_INNER_L4_CKSUM; + if (skb->csum_offset == offsetof(struct udphdr, check)) + flags |= FUN_ETH_INNER_UDP; + fun_eth_offload_init(&req->offload, flags, 0, 0, 0, 0, + skb_checksum_start_offset(skb), 0, 0); + FUN_QSTAT_INC(q, tx_cso); + } else { + fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0); + } + + ngle = shinfo->nr_frags + 1; + req->len8 = (sizeof(*req) + ngle * sizeof(*gle)) / 8; + req->dataop = FUN_DATAOP_HDR_INIT(ngle, 0, ngle, 0, skb->len); + + for (i = 0, gle = (struct fun_dataop_gl *)req->dataop.imm; + i < ngle && txq_to_end(q, gle); i++, gle++) + fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]); + + if (txq_to_end(q, gle) == 0) { + gle = (struct fun_dataop_gl *)q->desc; + for ( ; i < ngle; i++, gle++) + fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]); + } + + if (IS_ENABLED(CONFIG_TLS_DEVICE) && unlikely(tls_len)) { + struct fun_eth_tls *tls = (struct fun_eth_tls *)gle; + struct fun_ktls_tx_ctx *tls_ctx; + + req->len8 += FUNETH_TLS_SZ / 8; + req->flags = cpu_to_be16(FUN_ETH_TX_TLS); + + tls_ctx = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX); + tls->tlsid = tls_ctx->tlsid; + tls_ctx->next_seq += tls_len; + + u64_stats_update_begin(&q->syncp); + q->stats.tx_tls_bytes += tls_len; + q->stats.tx_tls_pkts += 1 + extra_pkts; + u64_stats_update_end(&q->syncp); + } + + u64_stats_update_begin(&q->syncp); + q->stats.tx_bytes += skb->len + extra_bytes; + q->stats.tx_pkts += 1 + extra_pkts; + u64_stats_update_end(&q->syncp); + + q->info[idx].skb = skb; + + trace_funeth_tx(q, skb->len, idx, req->dataop.ngather); + return tx_req_ndesc(req); +} + +/* Return the number of available descriptors of a Tx queue. + * HW assumes head==tail means the ring is empty so we need to keep one + * descriptor unused. + */ +static unsigned int fun_txq_avail(const struct funeth_txq *q) +{ + return q->mask - q->prod_cnt + q->cons_cnt; +} + +/* Stop a queue if it can't handle another worst-case packet. */ +static void fun_tx_check_stop(struct funeth_txq *q) +{ + if (likely(fun_txq_avail(q) >= FUNETH_MAX_PKT_DESC)) + return; + + netif_tx_stop_queue(q->ndq); + + /* NAPI reclaim is freeing packets in parallel with us and we may race. + * We have stopped the queue but check again after synchronizing with + * reclaim. + */ + smp_mb(); + if (likely(fun_txq_avail(q) < FUNETH_MAX_PKT_DESC)) + FUN_QSTAT_INC(q, tx_nstops); + else + netif_tx_start_queue(q->ndq); +} + +/* Return true if a queue has enough space to restart. Current condition is + * that the queue must be >= 1/4 empty. + */ +static bool fun_txq_may_restart(struct funeth_txq *q) +{ + return fun_txq_avail(q) >= q->mask / 4; +} + +netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct funeth_priv *fp = netdev_priv(netdev); + unsigned int qid = skb_get_queue_mapping(skb); + struct funeth_txq *q = fp->txqs[qid]; + unsigned int tls_len = 0; + unsigned int ndesc; + + if (IS_ENABLED(CONFIG_TLS_DEVICE) && skb->sk && + tls_is_sk_tx_device_offloaded(skb->sk)) { + skb = fun_tls_tx(skb, q, &tls_len); + if (unlikely(!skb)) + goto dropped; + } + + ndesc = write_pkt_desc(skb, q, tls_len); + if (unlikely(!ndesc)) { + dev_kfree_skb_any(skb); + goto dropped; + } + + q->prod_cnt += ndesc; + fun_tx_check_stop(q); + + skb_tx_timestamp(skb); + + if (__netdev_tx_sent_queue(q->ndq, skb->len, netdev_xmit_more())) + fun_txq_wr_db(q); + else + FUN_QSTAT_INC(q, tx_more); + + return NETDEV_TX_OK; + +dropped: + /* A dropped packet may be the last one in a xmit_more train, + * ring the doorbell just in case. + */ + if (!netdev_xmit_more()) + fun_txq_wr_db(q); + return NETDEV_TX_OK; +} + +/* Return a Tx queue's HW head index written back to host memory. */ +static u16 txq_hw_head(const struct funeth_txq *q) +{ + return (u16)be64_to_cpu(*q->hw_wb); +} + +/* Unmap the Tx packet starting at the given descriptor index and + * return the number of Tx descriptors it occupied. + */ +static unsigned int unmap_skb(const struct funeth_txq *q, unsigned int idx) +{ + const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx); + unsigned int ngle = req->dataop.ngather; + struct fun_dataop_gl *gle; + + if (ngle) { + gle = (struct fun_dataop_gl *)req->dataop.imm; + dma_unmap_single(q->dma_dev, be64_to_cpu(gle->sgl_data), + be32_to_cpu(gle->sgl_len), DMA_TO_DEVICE); + + for (gle++; --ngle && txq_to_end(q, gle); gle++) + dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data), + be32_to_cpu(gle->sgl_len), + DMA_TO_DEVICE); + + for (gle = (struct fun_dataop_gl *)q->desc; ngle; ngle--, gle++) + dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data), + be32_to_cpu(gle->sgl_len), + DMA_TO_DEVICE); + } + + return tx_req_ndesc(req); +} + +/* Reclaim completed Tx descriptors and free their packets. Restart a stopped + * queue if we freed enough descriptors. + * + * Return true if we exhausted the budget while there is more work to be done. + */ +static bool fun_txq_reclaim(struct funeth_txq *q, int budget) +{ + unsigned int npkts = 0, nbytes = 0, ndesc = 0; + unsigned int head, limit, reclaim_idx; + + /* budget may be 0, e.g., netpoll */ + limit = budget ? budget : UINT_MAX; + + for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask; + head != reclaim_idx && npkts < limit; head = txq_hw_head(q)) { + /* The HW head is continually updated, ensure we don't read + * descriptor state before the head tells us to reclaim it. + * On the enqueue side the doorbell is an implicit write + * barrier. + */ + rmb(); + + do { + unsigned int pkt_desc = unmap_skb(q, reclaim_idx); + struct sk_buff *skb = q->info[reclaim_idx].skb; + + trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head); + + nbytes += skb->len; + napi_consume_skb(skb, budget); + ndesc += pkt_desc; + reclaim_idx = (reclaim_idx + pkt_desc) & q->mask; + npkts++; + } while (reclaim_idx != head && npkts < limit); + } + + q->cons_cnt += ndesc; + netdev_tx_completed_queue(q->ndq, npkts, nbytes); + smp_mb(); /* pairs with the one in fun_tx_check_stop() */ + + if (unlikely(netif_tx_queue_stopped(q->ndq) && + fun_txq_may_restart(q))) { + netif_tx_wake_queue(q->ndq); + FUN_QSTAT_INC(q, tx_nrestarts); + } + + return reclaim_idx != head; +} + +/* The NAPI handler for Tx queues. */ +int fun_txq_napi_poll(struct napi_struct *napi, int budget) +{ + struct fun_irq *irq = container_of(napi, struct fun_irq, napi); + struct funeth_txq *q = irq->txq; + unsigned int db_val; + + if (fun_txq_reclaim(q, budget)) + return budget; /* exhausted budget */ + + napi_complete(napi); /* exhausted pending work */ + db_val = READ_ONCE(q->irq_db_val) | (q->cons_cnt & q->mask); + writel(db_val, q->db); + return 0; +} + +static void fun_xdp_unmap(const struct funeth_txq *q, unsigned int idx) +{ + const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx); + const struct fun_dataop_gl *gle; + + gle = (const struct fun_dataop_gl *)req->dataop.imm; + dma_unmap_single(q->dma_dev, be64_to_cpu(gle->sgl_data), + be32_to_cpu(gle->sgl_len), DMA_TO_DEVICE); +} + +/* Reclaim up to @budget completed Tx descriptors from a TX XDP queue. */ +static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget) +{ + unsigned int npkts = 0, head, reclaim_idx; + + for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask; + head != reclaim_idx && npkts < budget; head = txq_hw_head(q)) { + /* The HW head is continually updated, ensure we don't read + * descriptor state before the head tells us to reclaim it. + * On the enqueue side the doorbell is an implicit write + * barrier. + */ + rmb(); + + do { + fun_xdp_unmap(q, reclaim_idx); + page_frag_free(q->info[reclaim_idx].vaddr); + + trace_funeth_tx_free(q, reclaim_idx, 1, head); + + reclaim_idx = (reclaim_idx + 1) & q->mask; + npkts++; + } while (reclaim_idx != head && npkts < budget); + } + + q->cons_cnt += npkts; + return npkts; +} + +bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len) +{ + struct fun_eth_tx_req *req; + struct fun_dataop_gl *gle; + unsigned int idx; + dma_addr_t dma; + + if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES) + fun_xdpq_clean(q, FUN_XDP_CLEAN_BATCH); + + if (!unlikely(fun_txq_avail(q))) { + FUN_QSTAT_INC(q, tx_xdp_full); + return false; + } + + dma = dma_map_single(q->dma_dev, data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(q->dma_dev, dma))) { + FUN_QSTAT_INC(q, tx_map_err); + return false; + } + + idx = q->prod_cnt & q->mask; + req = fun_tx_desc_addr(q, idx); + req->op = FUN_ETH_OP_TX; + req->len8 = (sizeof(*req) + sizeof(*gle)) / 8; + req->flags = 0; + req->suboff8 = offsetof(struct fun_eth_tx_req, dataop); + req->repr_idn = 0; + req->encap_proto = 0; + fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0); + req->dataop = FUN_DATAOP_HDR_INIT(1, 0, 1, 0, len); + + gle = (struct fun_dataop_gl *)req->dataop.imm; + fun_dataop_gl_init(gle, 0, 0, len, dma); + + q->info[idx].vaddr = data; + + u64_stats_update_begin(&q->syncp); + q->stats.tx_bytes += len; + q->stats.tx_pkts++; + u64_stats_update_end(&q->syncp); + + trace_funeth_tx(q, len, idx, 1); + q->prod_cnt++; + + return true; +} + +int fun_xdp_xmit_frames(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags) +{ + struct funeth_priv *fp = netdev_priv(dev); + struct funeth_txq *q, **xdpqs; + int i, q_idx; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + xdpqs = rcu_dereference_bh(fp->xdpqs); + if (unlikely(!xdpqs)) + return -ENETDOWN; + + q_idx = smp_processor_id(); + if (unlikely(q_idx >= fp->num_xdpqs)) + return -ENXIO; + + for (q = xdpqs[q_idx], i = 0; i < n; i++) { + const struct xdp_frame *xdpf = frames[i]; + + if (!fun_xdp_tx(q, xdpf->data, xdpf->len)) + break; + } + + if (unlikely(flags & XDP_XMIT_FLUSH)) + fun_txq_wr_db(q); + return i; +} + +/* Purge a Tx queue of any queued packets. Should be called once HW access + * to the packets has been revoked, e.g., after the queue has been disabled. + */ +static void fun_txq_purge(struct funeth_txq *q) +{ + while (q->cons_cnt != q->prod_cnt) { + unsigned int idx = q->cons_cnt & q->mask; + + q->cons_cnt += unmap_skb(q, idx); + dev_kfree_skb_any(q->info[idx].skb); + } + netdev_tx_reset_queue(q->ndq); +} + +static void fun_xdpq_purge(struct funeth_txq *q) +{ + while (q->cons_cnt != q->prod_cnt) { + unsigned int idx = q->cons_cnt & q->mask; + + fun_xdp_unmap(q, idx); + page_frag_free(q->info[idx].vaddr); + q->cons_cnt++; + } +} + +/* Create a Tx queue, allocating all the host resources needed. */ +static struct funeth_txq *fun_txq_create_sw(struct net_device *dev, + unsigned int qidx, + unsigned int ndesc, + struct fun_irq *irq) +{ + struct funeth_priv *fp = netdev_priv(dev); + struct funeth_txq *q; + int numa_node; + + if (irq) + numa_node = fun_irq_node(irq); /* skb Tx queue */ + else + numa_node = cpu_to_node(qidx); /* XDP Tx queue */ + + q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node); + if (!q) + goto err; + + q->dma_dev = &fp->pdev->dev; + q->desc = fun_alloc_ring_mem(q->dma_dev, ndesc, FUNETH_SQE_SIZE, + sizeof(*q->info), true, numa_node, + &q->dma_addr, (void **)&q->info, + &q->hw_wb); + if (!q->desc) + goto free_q; + + q->netdev = dev; + q->mask = ndesc - 1; + q->qidx = qidx; + q->numa_node = numa_node; + u64_stats_init(&q->syncp); + q->init_state = FUN_QSTATE_INIT_SW; + return q; + +free_q: + kfree(q); +err: + netdev_err(dev, "Can't allocate memory for %s queue %u\n", + irq ? "Tx" : "XDP", qidx); + return NULL; +} + +static void fun_txq_free_sw(struct funeth_txq *q) +{ + struct funeth_priv *fp = netdev_priv(q->netdev); + + fun_free_ring_mem(q->dma_dev, q->mask + 1, FUNETH_SQE_SIZE, true, + q->desc, q->dma_addr, q->info); + + fp->tx_packets += q->stats.tx_pkts; + fp->tx_bytes += q->stats.tx_bytes; + fp->tx_dropped += q->stats.tx_map_err; + + kfree(q); +} + +/* Allocate the device portion of a Tx queue. */ +int fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq) +{ + struct funeth_priv *fp = netdev_priv(q->netdev); + unsigned int irq_idx, ndesc = q->mask + 1; + int err; + + q->irq = irq; + *q->hw_wb = 0; + q->prod_cnt = 0; + q->cons_cnt = 0; + irq_idx = irq ? irq->irq_idx : 0; + + err = fun_sq_create(fp->fdev, + FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS | + FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR, 0, + FUN_HCI_ID_INVALID, ilog2(FUNETH_SQE_SIZE), ndesc, + q->dma_addr, fp->tx_coal_count, fp->tx_coal_usec, + irq_idx, 0, fp->fdev->kern_end_qid, 0, + &q->hw_qid, &q->db); + if (err) + goto out; + + err = fun_create_and_bind_tx(fp, q->hw_qid); + if (err < 0) + goto free_devq; + q->ethid = err; + + if (irq) { + irq->txq = q; + q->ndq = netdev_get_tx_queue(q->netdev, q->qidx); + q->irq_db_val = FUN_IRQ_SQ_DB(fp->tx_coal_usec, + fp->tx_coal_count); + writel(q->irq_db_val, q->db); + } + + q->init_state = FUN_QSTATE_INIT_FULL; + netif_info(fp, ifup, q->netdev, + "%s queue %u, depth %u, HW qid %u, IRQ idx %u, eth id %u, node %d\n", + irq ? "Tx" : "XDP", q->qidx, ndesc, q->hw_qid, irq_idx, + q->ethid, q->numa_node); + return 0; + +free_devq: + fun_destroy_sq(fp->fdev, q->hw_qid); +out: + netdev_err(q->netdev, + "Failed to create %s queue %u on device, error %d\n", + irq ? "Tx" : "XDP", q->qidx, err); + return err; +} + +static void fun_txq_free_dev(struct funeth_txq *q) +{ + struct funeth_priv *fp = netdev_priv(q->netdev); + + if (q->init_state < FUN_QSTATE_INIT_FULL) + return; + + netif_info(fp, ifdown, q->netdev, + "Freeing %s queue %u (id %u), IRQ %u, ethid %u\n", + q->irq ? "Tx" : "XDP", q->qidx, q->hw_qid, + q->irq ? q->irq->irq_idx : 0, q->ethid); + + fun_destroy_sq(fp->fdev, q->hw_qid); + fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, q->ethid); + + if (q->irq) { + q->irq->txq = NULL; + fun_txq_purge(q); + } else { + fun_xdpq_purge(q); + } + + q->init_state = FUN_QSTATE_INIT_SW; +} + +/* Create or advance a Tx queue, allocating all the host and device resources + * needed to reach the target state. + */ +int funeth_txq_create(struct net_device *dev, unsigned int qidx, + unsigned int ndesc, struct fun_irq *irq, int state, + struct funeth_txq **qp) +{ + struct funeth_txq *q = *qp; + int err; + + if (!q) + q = fun_txq_create_sw(dev, qidx, ndesc, irq); + if (!q) + return -ENOMEM; + + if (q->init_state >= state) + goto out; + + err = fun_txq_create_dev(q, irq); + if (err) { + if (!*qp) + fun_txq_free_sw(q); + return err; + } + +out: + *qp = q; + return 0; +} + +/* Free Tx queue resources until it reaches the target state. + * The queue must be already disconnected from the stack. + */ +struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state) +{ + if (state < FUN_QSTATE_INIT_FULL) + fun_txq_free_dev(q); + + if (state == FUN_QSTATE_DESTROYED) { + fun_txq_free_sw(q); + q = NULL; + } + + return q; +} diff --git a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h new file mode 100644 index 000000000000..04c9f91b7489 --- /dev/null +++ b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h @@ -0,0 +1,264 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ + +#ifndef _FUNETH_TXRX_H +#define _FUNETH_TXRX_H + +#include <linux/netdevice.h> +#include <linux/u64_stats_sync.h> + +/* Tx descriptor size */ +#define FUNETH_SQE_SIZE 64U + +/* Size of device headers per Tx packet */ +#define FUNETH_FUNOS_HDR_SZ (sizeof(struct fun_eth_tx_req)) + +/* Number of gather list entries per Tx descriptor */ +#define FUNETH_GLE_PER_DESC (FUNETH_SQE_SIZE / sizeof(struct fun_dataop_gl)) + +/* Max gather list size in bytes for an sk_buff. */ +#define FUNETH_MAX_GL_SZ ((MAX_SKB_FRAGS + 1) * sizeof(struct fun_dataop_gl)) + +#if IS_ENABLED(CONFIG_TLS_DEVICE) +# define FUNETH_TLS_SZ sizeof(struct fun_eth_tls) +#else +# define FUNETH_TLS_SZ 0 +#endif + +/* Max number of Tx descriptors for an sk_buff using a gather list. */ +#define FUNETH_MAX_GL_DESC \ + DIV_ROUND_UP((FUNETH_FUNOS_HDR_SZ + FUNETH_MAX_GL_SZ + FUNETH_TLS_SZ), \ + FUNETH_SQE_SIZE) + +/* Max number of Tx descriptors for any packet. */ +#define FUNETH_MAX_PKT_DESC FUNETH_MAX_GL_DESC + +/* Rx CQ descriptor size. */ +#define FUNETH_CQE_SIZE 64U + +/* Offset of cqe_info within a CQE. */ +#define FUNETH_CQE_INFO_OFFSET (FUNETH_CQE_SIZE - sizeof(struct fun_cqe_info)) + +/* Construct the IRQ portion of a CQ doorbell. The resulting value arms the + * interrupt with the supplied time delay and packet count moderation settings. + */ +#define FUN_IRQ_CQ_DB(usec, pkts) \ + (FUN_DB_IRQ_ARM_F | ((usec) << FUN_DB_INTCOAL_USEC_S) | \ + ((pkts) << FUN_DB_INTCOAL_ENTRIES_S)) + +/* As above for SQ doorbells. */ +#define FUN_IRQ_SQ_DB(usec, pkts) \ + (FUN_DB_IRQ_ARM_F | \ + ((usec) << FUN_DB_INTCOAL_USEC_S) | \ + ((pkts) << FUN_DB_INTCOAL_ENTRIES_S)) + +/* Per packet tailroom. Present only for 1-frag packets. */ +#define FUN_RX_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + +/* Per packet headroom for XDP. Preferred over XDP_PACKET_HEADROOM to + * accommodate two packets per buffer for 4K pages and 1500B MTUs. + */ +#define FUN_XDP_HEADROOM 192 + +/* Initialization state of a queue. */ +enum { + FUN_QSTATE_DESTROYED, /* what queue? */ + FUN_QSTATE_INIT_SW, /* exists in SW, not on the device */ + FUN_QSTATE_INIT_FULL, /* exists both in SW and on device */ +}; + +/* Initialization state of an interrupt. */ +enum { + FUN_IRQ_INIT, /* initialized and in the XArray but inactive */ + FUN_IRQ_REQUESTED, /* request_irq() done */ + FUN_IRQ_ENABLED, /* processing enabled */ + FUN_IRQ_DISABLED, /* processing disabled */ +}; + +struct bpf_prog; + +struct funeth_txq_stats { /* per Tx queue SW counters */ + u64 tx_pkts; /* # of Tx packets */ + u64 tx_bytes; /* total bytes of Tx packets */ + u64 tx_cso; /* # of packets with checksum offload */ + u64 tx_tso; /* # of non-encapsulated TSO super-packets */ + u64 tx_encap_tso; /* # of encapsulated TSO super-packets */ + u64 tx_more; /* # of DBs elided due to xmit_more */ + u64 tx_nstops; /* # of times the queue has stopped */ + u64 tx_nrestarts; /* # of times the queue has restarted */ + u64 tx_map_err; /* # of packets dropped due to DMA mapping errors */ + u64 tx_xdp_full; /* # of XDP packets that could not be enqueued */ + u64 tx_tls_pkts; /* # of Tx TLS packets offloaded to HW */ + u64 tx_tls_bytes; /* Tx bytes of HW-handled TLS payload */ + u64 tx_tls_fallback; /* attempted Tx TLS offloads punted to SW */ + u64 tx_tls_drops; /* attempted Tx TLS offloads dropped */ +}; + +struct funeth_tx_info { /* per Tx descriptor state */ + union { + struct sk_buff *skb; /* associated packet */ + void *vaddr; /* start address for XDP */ + }; +}; + +struct funeth_txq { + /* RO cacheline of frequently accessed data */ + u32 mask; /* queue depth - 1 */ + u32 hw_qid; /* device ID of the queue */ + void *desc; /* base address of descriptor ring */ + struct funeth_tx_info *info; + struct device *dma_dev; /* device for DMA mappings */ + volatile __be64 *hw_wb; /* HW write-back location */ + u32 __iomem *db; /* SQ doorbell register address */ + struct netdev_queue *ndq; + dma_addr_t dma_addr; /* DMA address of descriptor ring */ + /* producer R/W cacheline */ + u16 qidx; /* queue index within net_device */ + u16 ethid; + u32 prod_cnt; /* producer counter */ + struct funeth_txq_stats stats; + /* shared R/W cacheline, primarily accessed by consumer */ + u32 irq_db_val; /* value written to IRQ doorbell */ + u32 cons_cnt; /* consumer (cleanup) counter */ + struct net_device *netdev; + struct fun_irq *irq; + int numa_node; + u8 init_state; /* queue initialization state */ + struct u64_stats_sync syncp; +}; + +struct funeth_rxq_stats { /* per Rx queue SW counters */ + u64 rx_pkts; /* # of received packets, including SW drops */ + u64 rx_bytes; /* total size of received packets */ + u64 rx_cso; /* # of packets with checksum offload */ + u64 rx_bufs; /* total # of Rx buffers provided to device */ + u64 gro_pkts; /* # of GRO superpackets */ + u64 gro_merged; /* # of pkts merged into existing GRO superpackets */ + u64 rx_page_alloc; /* # of page allocations for Rx buffers */ + u64 rx_budget; /* NAPI iterations that exhausted their budget */ + u64 rx_mem_drops; /* # of packets dropped due to memory shortage */ + u64 rx_map_err; /* # of page DMA mapping errors */ + u64 xdp_drops; /* XDP_DROPped packets */ + u64 xdp_tx; /* successful XDP transmits */ + u64 xdp_redir; /* successful XDP redirects */ + u64 xdp_err; /* packets dropped due to XDP errors */ +}; + +struct funeth_rxbuf { /* per Rx buffer state */ + struct page *page; /* associated page */ + dma_addr_t dma_addr; /* DMA address of page start */ + int pg_refs; /* page refs held by driver */ + int node; /* page node, or -1 if it is PF_MEMALLOC */ +}; + +struct funeth_rx_cache { /* cache of DMA-mapped previously used buffers */ + struct funeth_rxbuf *bufs; /* base of Rx buffer state ring */ + unsigned int prod_cnt; /* producer counter */ + unsigned int cons_cnt; /* consumer counter */ + unsigned int mask; /* depth - 1 */ +}; + +/* An Rx queue consists of a CQ and an SQ used to provide Rx buffers. */ +struct funeth_rxq { + struct net_device *netdev; + struct napi_struct *napi; + struct device *dma_dev; /* device for DMA mappings */ + void *cqes; /* base of CQ descriptor ring */ + const void *next_cqe_info; /* fun_cqe_info of next CQE */ + u32 __iomem *cq_db; /* CQ doorbell register address */ + unsigned int cq_head; /* CQ head index */ + unsigned int cq_mask; /* CQ depth - 1 */ + u16 phase; /* CQ phase tag */ + u16 qidx; /* queue index within net_device */ + unsigned int irq_db_val; /* IRQ info for CQ doorbell */ + struct fun_eprq_rqbuf *rqes; /* base of RQ descriptor ring */ + struct funeth_rxbuf *bufs; /* base of Rx buffer state ring */ + struct funeth_rxbuf *cur_buf; /* currently active buffer */ + u32 __iomem *rq_db; /* RQ doorbell register address */ + unsigned int rq_cons; /* RQ consumer counter */ + unsigned int rq_mask; /* RQ depth - 1 */ + unsigned int buf_offset; /* offset of next pkt in head buffer */ + u8 xdp_flush; /* XDP flush types needed at NAPI end */ + u8 init_state; /* queue initialization state */ + u16 headroom; /* per packet headroom */ + unsigned int rq_cons_db; /* value of rq_cons at last RQ db */ + unsigned int rq_db_thres; /* # of new buffers needed to write RQ db */ + struct funeth_rxbuf spare_buf; /* spare for next buffer replacement */ + struct funeth_rx_cache cache; /* used buffer cache */ + struct bpf_prog *xdp_prog; /* optional XDP BPF program */ + struct funeth_rxq_stats stats; + dma_addr_t cq_dma_addr; /* DMA address of CQE ring */ + dma_addr_t rq_dma_addr; /* DMA address of RQE ring */ + u16 irq_cnt; + u32 hw_cqid; /* device ID of the queue's CQ */ + u32 hw_sqid; /* device ID of the queue's SQ */ + int numa_node; + struct u64_stats_sync syncp; + struct xdp_rxq_info xdp_rxq; +}; + +#define FUN_QSTAT_INC(q, counter) \ + do { \ + u64_stats_update_begin(&(q)->syncp); \ + (q)->stats.counter++; \ + u64_stats_update_end(&(q)->syncp); \ + } while (0) + +#define FUN_QSTAT_READ(q, seq, stats_copy) \ + do { \ + seq = u64_stats_fetch_begin(&(q)->syncp); \ + stats_copy = (q)->stats; \ + } while (u64_stats_fetch_retry(&(q)->syncp, (seq))) + +#define FUN_INT_NAME_LEN (IFNAMSIZ + 16) + +struct fun_irq { + struct napi_struct napi; + struct funeth_txq *txq; + struct funeth_rxq *rxq; + u8 state; + u16 irq_idx; /* index of MSI-X interrupt */ + int irq; /* Linux IRQ vector */ + cpumask_t affinity_mask; /* IRQ affinity */ + struct irq_affinity_notify aff_notify; + char name[FUN_INT_NAME_LEN]; +} ____cacheline_internodealigned_in_smp; + +/* Return the start address of the idx-th Tx descriptor. */ +static inline void *fun_tx_desc_addr(const struct funeth_txq *q, + unsigned int idx) +{ + return q->desc + idx * FUNETH_SQE_SIZE; +} + +static inline void fun_txq_wr_db(const struct funeth_txq *q) +{ + unsigned int tail = q->prod_cnt & q->mask; + + writel(tail, q->db); +} + +static inline int fun_irq_node(const struct fun_irq *p) +{ + return cpu_to_mem(cpumask_first(&p->affinity_mask)); +} + +int fun_rxq_napi_poll(struct napi_struct *napi, int budget); +int fun_txq_napi_poll(struct napi_struct *napi, int budget); +netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev); +bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len); +int fun_xdp_xmit_frames(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags); + +int funeth_txq_create(struct net_device *dev, unsigned int qidx, + unsigned int ndesc, struct fun_irq *irq, int state, + struct funeth_txq **qp); +int fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq); +struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state); +int funeth_rxq_create(struct net_device *dev, unsigned int qidx, + unsigned int ncqe, unsigned int nrqe, struct fun_irq *irq, + int state, struct funeth_rxq **qp); +int fun_rxq_create_dev(struct funeth_rxq *q, struct fun_irq *irq); +struct funeth_rxq *funeth_rxq_free(struct funeth_rxq *q, int state); +int fun_rxq_set_bpf(struct funeth_rxq *q, struct bpf_prog *prog); + +#endif /* _FUNETH_TXRX_H */ diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 54e51c8221b8..6cafee55efc3 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -857,8 +857,7 @@ static int gve_alloc_qpls(struct gve_priv *priv) int i, j; int err; - /* Raw addressing means no QPLs */ - if (priv->queue_format == GVE_GQI_RDA_FORMAT) + if (num_qpls == 0) return 0; priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL); @@ -901,8 +900,7 @@ static void gve_free_qpls(struct gve_priv *priv) int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); int i; - /* Raw addressing means no QPLs */ - if (priv->queue_format == GVE_GQI_RDA_FORMAT) + if (num_qpls == 0) return; kvfree(priv->qpl_cfg.qpl_id_map); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index f7f5a4b09068..fc92ae385e30 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -1068,11 +1068,6 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue) return tqp->index; } -static inline bool hclge_is_reset_pending(struct hclge_dev *hdev) -{ - return !!hdev->reset_pending; -} - int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport); int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex); int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 29617a86b299..869a4fe17c7c 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -60,6 +60,7 @@ #include <asm/hvcall.h> #include <linux/atomic.h> #include <asm/vio.h> +#include <asm/xive.h> #include <asm/iommu.h> #include <linux/uaccess.h> #include <asm/firmware.h> @@ -2213,6 +2214,19 @@ static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason) } /* + * Initialize the init_done completion and return code values. We + * can get a transport event just after registering the CRQ and the + * tasklet will use this to communicate the transport event. To ensure + * we don't miss the notification/error, initialize these _before_ + * regisering the CRQ. + */ +static inline void reinit_init_done(struct ibmvnic_adapter *adapter) +{ + reinit_completion(&adapter->init_done); + adapter->init_done_rc = 0; +} + +/* * do_reset returns zero if we are able to keep processing reset events, or * non-zero if we hit a fatal error and must halt. */ @@ -2318,6 +2332,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, */ adapter->state = VNIC_PROBED; + reinit_init_done(adapter); + if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { rc = init_crq_queue(adapter); } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) { @@ -2461,7 +2477,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter, */ adapter->state = VNIC_PROBED; - reinit_completion(&adapter->init_done); + reinit_init_done(adapter); + rc = init_crq_queue(adapter); if (rc) { netdev_err(adapter->netdev, @@ -2602,23 +2619,82 @@ out: static void __ibmvnic_reset(struct work_struct *work) { struct ibmvnic_adapter *adapter; - bool saved_state = false; + unsigned int timeout = 5000; struct ibmvnic_rwi *tmprwi; + bool saved_state = false; struct ibmvnic_rwi *rwi; unsigned long flags; - u32 reset_state; + struct device *dev; + bool need_reset; int num_fails = 0; + u32 reset_state; int rc = 0; adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); + dev = &adapter->vdev->dev; - if (test_and_set_bit_lock(0, &adapter->resetting)) { + /* Wait for ibmvnic_probe() to complete. If probe is taking too long + * or if another reset is in progress, defer work for now. If probe + * eventually fails it will flush and terminate our work. + * + * Three possibilities here: + * 1. Adpater being removed - just return + * 2. Timed out on probe or another reset in progress - delay the work + * 3. Completed probe - perform any resets in queue + */ + if (adapter->state == VNIC_PROBING && + !wait_for_completion_timeout(&adapter->probe_done, timeout)) { + dev_err(dev, "Reset thread timed out on probe"); queue_delayed_work(system_long_wq, &adapter->ibmvnic_delayed_reset, IBMVNIC_RESET_DELAY); return; } + /* adapter is done with probe (i.e state is never VNIC_PROBING now) */ + if (adapter->state == VNIC_REMOVING) + return; + + /* ->rwi_list is stable now (no one else is removing entries) */ + + /* ibmvnic_probe() may have purged the reset queue after we were + * scheduled to process a reset so there maybe no resets to process. + * Before setting the ->resetting bit though, we have to make sure + * that there is infact a reset to process. Otherwise we may race + * with ibmvnic_open() and end up leaving the vnic down: + * + * __ibmvnic_reset() ibmvnic_open() + * ----------------- -------------- + * + * set ->resetting bit + * find ->resetting bit is set + * set ->state to IBMVNIC_OPEN (i.e + * assume reset will open device) + * return + * find reset queue empty + * return + * + * Neither performed vnic login/open and vnic stays down + * + * If we hold the lock and conditionally set the bit, either we + * or ibmvnic_open() will complete the open. + */ + need_reset = false; + spin_lock(&adapter->rwi_lock); + if (!list_empty(&adapter->rwi_list)) { + if (test_and_set_bit_lock(0, &adapter->resetting)) { + queue_delayed_work(system_long_wq, + &adapter->ibmvnic_delayed_reset, + IBMVNIC_RESET_DELAY); + } else { + need_reset = true; + } + } + spin_unlock(&adapter->rwi_lock); + + if (!need_reset) + return; + rwi = get_next_rwi(adapter); while (rwi) { spin_lock_irqsave(&adapter->state_lock, flags); @@ -2735,12 +2811,23 @@ static void __ibmvnic_delayed_reset(struct work_struct *work) __ibmvnic_reset(&adapter->ibmvnic_reset); } +static void flush_reset_queue(struct ibmvnic_adapter *adapter) +{ + struct list_head *entry, *tmp_entry; + + if (!list_empty(&adapter->rwi_list)) { + list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) { + list_del(entry); + kfree(list_entry(entry, struct ibmvnic_rwi, list)); + } + } +} + static int ibmvnic_reset(struct ibmvnic_adapter *adapter, enum ibmvnic_reset_reason reason) { - struct list_head *entry, *tmp_entry; - struct ibmvnic_rwi *rwi, *tmp; struct net_device *netdev = adapter->netdev; + struct ibmvnic_rwi *rwi, *tmp; unsigned long flags; int ret; @@ -2759,13 +2846,6 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, goto err; } - if (adapter->state == VNIC_PROBING) { - netdev_warn(netdev, "Adapter reset during probe\n"); - adapter->init_done_rc = -EAGAIN; - ret = EAGAIN; - goto err; - } - list_for_each_entry(tmp, &adapter->rwi_list, list) { if (tmp->reset_reason == reason) { netdev_dbg(netdev, "Skipping matching reset, reason=%s\n", @@ -2783,10 +2863,9 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, /* if we just received a transport event, * flush reset queue and process this reset */ - if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) { - list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) - list_del(entry); - } + if (adapter->force_reset_recovery) + flush_reset_queue(adapter); + rwi->reset_reason = reason; list_add_tail(&rwi->list, &adapter->rwi_list); netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n", @@ -3562,6 +3641,30 @@ static int disable_scrq_irq(struct ibmvnic_adapter *adapter, return rc; } +/* We can not use the IRQ chip EOI handler because that has the + * unintended effect of changing the interrupt priority. + */ +static void ibmvnic_xics_eoi(struct device *dev, struct ibmvnic_sub_crq_queue *scrq) +{ + u64 val = 0xff000000 | scrq->hw_irq; + unsigned long rc; + + rc = plpar_hcall_norets(H_EOI, val); + if (rc) + dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", val, rc); +} + +/* Due to a firmware bug, the hypervisor can send an interrupt to a + * transmit or receive queue just prior to a partition migration. + * Force an EOI after migration. + */ +static void ibmvnic_clear_pending_interrupt(struct device *dev, + struct ibmvnic_sub_crq_queue *scrq) +{ + if (!xive_enabled()) + ibmvnic_xics_eoi(dev, scrq); +} + static int enable_scrq_irq(struct ibmvnic_adapter *adapter, struct ibmvnic_sub_crq_queue *scrq) { @@ -3575,15 +3678,7 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter, if (test_bit(0, &adapter->resetting) && adapter->reset_reason == VNIC_RESET_MOBILITY) { - u64 val = (0xff000000) | scrq->hw_irq; - - rc = plpar_hcall_norets(H_EOI, val); - /* H_EOI would fail with rc = H_FUNCTION when running - * in XIVE mode which is expected, but not an error. - */ - if (rc && (rc != H_FUNCTION)) - dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", - val, rc); + ibmvnic_clear_pending_interrupt(dev, scrq); } rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, @@ -5321,9 +5416,9 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, } if (!completion_done(&adapter->init_done)) { - complete(&adapter->init_done); if (!adapter->init_done_rc) adapter->init_done_rc = -EAGAIN; + complete(&adapter->init_done); } break; @@ -5346,6 +5441,13 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, adapter->fw_done_rc = -EIO; complete(&adapter->fw_done); } + + /* if we got here during crq-init, retry crq-init */ + if (!completion_done(&adapter->init_done)) { + adapter->init_done_rc = -EAGAIN; + complete(&adapter->init_done); + } + if (!completion_done(&adapter->stats_done)) complete(&adapter->stats_done); if (test_bit(0, &adapter->resetting)) @@ -5662,10 +5764,6 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) adapter->from_passive_init = false; - if (reset) - reinit_completion(&adapter->init_done); - - adapter->init_done_rc = 0; rc = ibmvnic_send_crq_init(adapter); if (rc) { dev_err(dev, "Send crq init failed with error %d\n", rc); @@ -5679,12 +5777,14 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) if (adapter->init_done_rc) { release_crq_queue(adapter); + dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc); return adapter->init_done_rc; } if (adapter->from_passive_init) { adapter->state = VNIC_OPEN; adapter->from_passive_init = false; + dev_err(dev, "CRQ-init failed, passive-init\n"); return -EINVAL; } @@ -5724,6 +5824,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) struct ibmvnic_adapter *adapter; struct net_device *netdev; unsigned char *mac_addr_p; + unsigned long flags; bool init_success; int rc; @@ -5768,6 +5869,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) spin_lock_init(&adapter->rwi_lock); spin_lock_init(&adapter->state_lock); mutex_init(&adapter->fw_lock); + init_completion(&adapter->probe_done); init_completion(&adapter->init_done); init_completion(&adapter->fw_done); init_completion(&adapter->reset_done); @@ -5778,6 +5880,33 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) init_success = false; do { + reinit_init_done(adapter); + + /* clear any failovers we got in the previous pass + * since we are reinitializing the CRQ + */ + adapter->failover_pending = false; + + /* If we had already initialized CRQ, we may have one or + * more resets queued already. Discard those and release + * the CRQ before initializing the CRQ again. + */ + release_crq_queue(adapter); + + /* Since we are still in PROBING state, __ibmvnic_reset() + * will not access the ->rwi_list and since we released CRQ, + * we won't get _new_ transport events. But there maybe an + * ongoing ibmvnic_reset() call. So serialize access to + * rwi_list. If we win the race, ibvmnic_reset() could add + * a reset after we purged but thats ok - we just may end + * up with an extra reset (i.e similar to having two or more + * resets in the queue at once). + * CHECK. + */ + spin_lock_irqsave(&adapter->rwi_lock, flags); + flush_reset_queue(adapter); + spin_unlock_irqrestore(&adapter->rwi_lock, flags); + rc = init_crq_queue(adapter); if (rc) { dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", @@ -5809,12 +5938,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) goto ibmvnic_dev_file_err; netif_carrier_off(netdev); - rc = register_netdev(netdev); - if (rc) { - dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); - goto ibmvnic_register_fail; - } - dev_info(&dev->dev, "ibmvnic registered\n"); if (init_success) { adapter->state = VNIC_PROBED; @@ -5827,6 +5950,16 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) adapter->wait_for_reset = false; adapter->last_reset_time = jiffies; + + rc = register_netdev(netdev); + if (rc) { + dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); + goto ibmvnic_register_fail; + } + dev_info(&dev->dev, "ibmvnic registered\n"); + + complete(&adapter->probe_done); + return 0; ibmvnic_register_fail: @@ -5841,6 +5974,17 @@ ibmvnic_stats_fail: ibmvnic_init_fail: release_sub_crqs(adapter, 1); release_crq_queue(adapter); + + /* cleanup worker thread after releasing CRQ so we don't get + * transport events (i.e new work items for the worker thread). + */ + adapter->state = VNIC_REMOVING; + complete(&adapter->probe_done); + flush_work(&adapter->ibmvnic_reset); + flush_delayed_work(&adapter->ibmvnic_delayed_reset); + + flush_reset_queue(adapter); + mutex_destroy(&adapter->fw_lock); free_netdev(netdev); @@ -5917,10 +6061,14 @@ static ssize_t failover_store(struct device *dev, struct device_attribute *attr, be64_to_cpu(session_token)); rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, H_SESSION_ERR_DETECTED, session_token, 0, 0); - if (rc) + if (rc) { netdev_err(netdev, "H_VIOCTL initiated failover failed, rc %ld\n", rc); + goto last_resort; + } + + return count; last_resort: netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n"); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 4a7a56ff74ce..fa2d607a7b1b 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -930,6 +930,7 @@ struct ibmvnic_adapter { struct ibmvnic_tx_pool *tx_pool; struct ibmvnic_tx_pool *tso_pool; + struct completion probe_done; struct completion init_done; int init_done_rc; diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index bcf680e83811..13382df2f2ef 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -630,6 +630,7 @@ struct e1000_phy_info { bool disable_polarity_correction; bool is_mdix; bool polarity_correction; + bool reset_disable; bool speed_downgraded; bool autoneg_wait_to_complete; }; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index c908c84b86d2..d60e2016d03c 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -2050,6 +2050,10 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) bool blocked = false; int i = 0; + /* Check the PHY (LCD) reset flag */ + if (hw->phy.reset_disable) + return true; + while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) && (i++ < 30)) usleep_range(10000, 11000); @@ -4136,9 +4140,9 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) return ret_val; if (!(data & valid_csum_mask)) { - e_dbg("NVM Checksum Invalid\n"); + e_dbg("NVM Checksum valid bit not set\n"); - if (hw->mac.type < e1000_pch_cnp) { + if (hw->mac.type < e1000_pch_tgp) { data |= valid_csum_mask; ret_val = e1000_write_nvm(hw, word, 1, &data); if (ret_val) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 2504b11c3169..638a3ddd7ada 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -271,6 +271,7 @@ #define I217_CGFREG_ENABLE_MTA_RESET 0x0002 #define I217_MEMPWR PHY_REG(772, 26) #define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010 +#define I217_MEMPWR_MOEM 0x1000 /* Receive Address Initial CRC Calculation */ #define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4)) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 6fb3437f68e0..fa06f68c8c80 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -6987,8 +6987,21 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev) struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); struct e1000_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = to_pci_dev(dev); + struct e1000_hw *hw = &adapter->hw; + u16 phy_data; int rc; + if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID && + hw->mac.type >= e1000_pch_adp) { + /* Mask OEM Bits / Gig Disable / Restart AN (772_26[12] = 1) */ + e1e_rphy(hw, I217_MEMPWR, &phy_data); + phy_data |= I217_MEMPWR_MOEM; + e1e_wphy(hw, I217_MEMPWR, phy_data); + + /* Disable LCD reset */ + hw->phy.reset_disable = true; + } + e1000e_flush_lpic(pdev); e1000e_pm_freeze(dev); @@ -7010,6 +7023,8 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev) struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); struct e1000_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = to_pci_dev(dev); + struct e1000_hw *hw = &adapter->hw; + u16 phy_data; int rc; /* Introduce S0ix implementation */ @@ -7020,6 +7035,17 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev) if (rc) return rc; + if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID && + hw->mac.type >= e1000_pch_adp) { + /* Unmask OEM Bits / Gig Disable / Restart AN 772_26[12] = 0 */ + e1e_rphy(hw, I217_MEMPWR, &phy_data); + phy_data &= ~I217_MEMPWR_MOEM; + e1e_wphy(hw, I217_MEMPWR, phy_data); + + /* Enable LCD reset */ + hw->phy.reset_disable = false; + } + return e1000e_pm_thaw(dev); } diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index 0f0efee5fc8e..fd07c3679bb1 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -146,11 +146,11 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) break; } if (!(mdic & E1000_MDIC_READY)) { - e_dbg("MDI Read did not complete\n"); + e_dbg("MDI Read PHY Reg Address %d did not complete\n", offset); return -E1000_ERR_PHY; } if (mdic & E1000_MDIC_ERROR) { - e_dbg("MDI Error\n"); + e_dbg("MDI Read PHY Reg Address %d Error\n", offset); return -E1000_ERR_PHY; } if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { @@ -210,11 +210,11 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) break; } if (!(mdic & E1000_MDIC_READY)) { - e_dbg("MDI Write did not complete\n"); + e_dbg("MDI Write PHY Reg Address %d did not complete\n", offset); return -E1000_ERR_PHY; } if (mdic & E1000_MDIC_ERROR) { - e_dbg("MDI Error\n"); + e_dbg("MDI Write PHY Red Address %d Error\n", offset); return -E1000_ERR_PHY; } if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 90fff05fbd2b..be7c6f34d45c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -741,10 +741,8 @@ static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id) vsi = pf->vsi[vf->lan_vsi_idx]; dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n", vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs); - dev_info(&pf->pdev->dev, " num MDD=%lld, invalid msg=%lld, valid msg=%lld\n", - vf->num_mdd_events, - vf->num_invalid_msgs, - vf->num_valid_msgs); + dev_info(&pf->pdev->dev, " num MDD=%lld\n", + vf->num_mdd_events); } else { dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 9b7ce6d9a92b..6778df2177a1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5386,15 +5386,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, /* There is no need to reset BW when mqprio mode is on. */ if (pf->flags & I40E_FLAG_TC_MQPRIO) return 0; - - if (!vsi->mqprio_qopt.qopt.hw) { - if (pf->flags & I40E_FLAG_DCB_ENABLED) - goto skip_reset; - - if (IS_ENABLED(CONFIG_I40E_DCB) && - i40e_dcb_hw_get_num_tc(&pf->hw) == 1) - goto skip_reset; - + if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) { ret = i40e_set_bw_limit(vsi, vsi->seid, 0); if (ret) dev_info(&pf->pdev->dev, @@ -5402,8 +5394,6 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, vsi->seid); return ret; } - -skip_reset: memset(&bw_data, 0, sizeof(bw_data)); bw_data.tc_valid_bits = enabled_tc; for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index dfdb6e786461..2606e8f0f19b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1917,19 +1917,17 @@ sriov_configure_out: /***********************virtual channel routines******************/ /** - * i40e_vc_send_msg_to_vf_ex + * i40e_vc_send_msg_to_vf * @vf: pointer to the VF info * @v_opcode: virtual channel opcode * @v_retval: virtual channel return value * @msg: pointer to the msg buffer * @msglen: msg length - * @is_quiet: true for not printing unsuccessful return values, false otherwise * * send msg to VF **/ -static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode, - u32 v_retval, u8 *msg, u16 msglen, - bool is_quiet) +static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, + u32 v_retval, u8 *msg, u16 msglen) { struct i40e_pf *pf; struct i40e_hw *hw; @@ -1944,25 +1942,6 @@ static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode, hw = &pf->hw; abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; - /* single place to detect unsuccessful return values */ - if (v_retval && !is_quiet) { - vf->num_invalid_msgs++; - dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", - vf->vf_id, v_opcode, v_retval); - if (vf->num_invalid_msgs > - I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { - dev_err(&pf->pdev->dev, - "Number of invalid messages exceeded for VF %d\n", - vf->vf_id); - dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); - set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); - } - } else { - vf->num_valid_msgs++; - /* reset the invalid counter, if a valid message is received. */ - vf->num_invalid_msgs = 0; - } - aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, msg, msglen, NULL); if (aq_ret) { @@ -1976,23 +1955,6 @@ static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode, } /** - * i40e_vc_send_msg_to_vf - * @vf: pointer to the VF info - * @v_opcode: virtual channel opcode - * @v_retval: virtual channel return value - * @msg: pointer to the msg buffer - * @msglen: msg length - * - * send msg to VF - **/ -static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, - u32 v_retval, u8 *msg, u16 msglen) -{ - return i40e_vc_send_msg_to_vf_ex(vf, v_opcode, v_retval, - msg, msglen, false); -} - -/** * i40e_vc_send_resp_to_vf * @vf: pointer to the VF info * @opcode: operation code @@ -2822,7 +2784,6 @@ error_param: * i40e_check_vf_permission * @vf: pointer to the VF info * @al: MAC address list from virtchnl - * @is_quiet: set true for printing msg without opcode info, false otherwise * * Check that the given list of MAC addresses is allowed. Will return -EPERM * if any address in the list is not valid. Checks the following conditions: @@ -2837,8 +2798,7 @@ error_param: * addresses might not be accurate. **/ static inline int i40e_check_vf_permission(struct i40e_vf *vf, - struct virtchnl_ether_addr_list *al, - bool *is_quiet) + struct virtchnl_ether_addr_list *al) { struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; @@ -2846,7 +2806,6 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, int mac2add_cnt = 0; int i; - *is_quiet = false; for (i = 0; i < al->num_elements; i++) { struct i40e_mac_filter *f; u8 *addr = al->list[i].addr; @@ -2870,7 +2829,6 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, !ether_addr_equal(addr, vf->default_lan_addr.addr)) { dev_err(&pf->pdev->dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); - *is_quiet = true; return -EPERM; } @@ -2921,7 +2879,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) (struct virtchnl_ether_addr_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; - bool is_quiet = false; i40e_status ret = 0; int i; @@ -2938,7 +2895,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) */ spin_lock_bh(&vsi->mac_filter_hash_lock); - ret = i40e_check_vf_permission(vf, al, &is_quiet); + ret = i40e_check_vf_permission(vf, al); if (ret) { spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_param; @@ -2976,8 +2933,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) error_param: /* send the response to the VF */ - return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_ETH_ADDR, - ret, NULL, 0, is_quiet); + return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, + ret, NULL, 0); } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 03c42fd0fea1..a554d0a0b09b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -10,8 +10,6 @@ #define I40E_VIRTCHNL_SUPPORTED_QTYPES 2 -#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10 - #define I40E_VLAN_PRIORITY_SHIFT 13 #define I40E_VLAN_MASK 0xFFF #define I40E_PRIORITY_MASK 0xE000 @@ -92,9 +90,6 @@ struct i40e_vf { u8 num_queue_pairs; /* num of qps assigned to VF vsis */ u8 num_req_queues; /* num of requested qps */ u64 num_mdd_events; /* num of mdd events detected */ - /* num of continuous malformed or invalid msgs detected */ - u64 num_invalid_msgs; - u64 num_valid_msgs; /* num of valid msgs detected */ unsigned long vf_caps; /* vf's adv. capabilities */ unsigned long vf_states; /* vf's runtime states */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 5a997b0d07d8..c1d25b0b0ca2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -218,7 +218,6 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) ntu += nb_buffs; if (ntu == rx_ring->count) { rx_desc = I40E_RX_DESC(rx_ring, 0); - xdp = i40e_rx_bi(rx_ring, 0); ntu = 0; } @@ -328,11 +327,11 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring, int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; - u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); u16 next_to_clean = rx_ring->next_to_clean; u16 count_mask = rx_ring->count - 1; unsigned int xdp_res, xdp_xmit = 0; bool failure = false; + u16 cleaned_count; while (likely(total_rx_packets < (unsigned int)budget)) { union i40e_rx_desc *rx_desc; diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 59806d1f7e79..49aed3e506a6 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -44,6 +44,9 @@ #define DEFAULT_DEBUG_LEVEL_SHIFT 3 #define PFX "iavf: " +int iavf_status_to_errno(enum iavf_status status); +int virtchnl_status_to_errno(enum virtchnl_status_code v_status); + /* VSI state flags shared with common code */ enum iavf_vsi_state_t { __IAVF_VSI_DOWN, @@ -188,7 +191,7 @@ enum iavf_state_t { __IAVF_REMOVE, /* driver is being unloaded */ __IAVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */ __IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */ - __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS, + __IAVF_INIT_EXTENDED_CAPS, /* process extended caps which require aq msg exchange */ __IAVF_INIT_CONFIG_ADAPTER, __IAVF_INIT_SW, /* got resources, setting up structs */ __IAVF_INIT_FAILED, /* init failed, restarting procedure */ @@ -201,6 +204,10 @@ enum iavf_state_t { __IAVF_RUNNING, /* opened, working */ }; +enum iavf_critical_section_t { + __IAVF_IN_REMOVE_TASK, /* device being removed */ +}; + #define IAVF_CLOUD_FIELD_OMAC 0x01 #define IAVF_CLOUD_FIELD_IMAC 0x02 #define IAVF_CLOUD_FIELD_IVLAN 0x04 @@ -246,7 +253,6 @@ struct iavf_adapter { struct list_head mac_filter_list; struct mutex crit_lock; struct mutex client_lock; - struct mutex remove_lock; /* Lock to protect accesses to MAC and VLAN lists */ spinlock_t mac_vlan_list_lock; char misc_vector_name[IFNAMSIZ + 9]; @@ -284,6 +290,8 @@ struct iavf_adapter { #define IAVF_FLAG_LEGACY_RX BIT(15) #define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16) #define IAVF_FLAG_QUEUES_DISABLED BIT(17) +#define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18) +#define IAVF_FLAG_REINIT_MSIX_NEEDED BIT(20) /* duplicates for common code */ #define IAVF_FLAG_DCB_ENABLED 0 /* flags for admin queue service task */ @@ -329,6 +337,21 @@ struct iavf_adapter { #define IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION BIT_ULL(37) #define IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION BIT_ULL(38) + /* flags for processing extended capability messages during + * __IAVF_INIT_EXTENDED_CAPS. Each capability exchange requires + * both a SEND and a RECV step, which must be processed in sequence. + * + * During the __IAVF_INIT_EXTENDED_CAPS state, the driver will + * process one flag at a time during each state loop. + */ + u64 extended_caps; +#define IAVF_EXTENDED_CAP_SEND_VLAN_V2 BIT_ULL(0) +#define IAVF_EXTENDED_CAP_RECV_VLAN_V2 BIT_ULL(1) + +#define IAVF_EXTENDED_CAPS \ + (IAVF_EXTENDED_CAP_SEND_VLAN_V2 | \ + IAVF_EXTENDED_CAP_RECV_VLAN_V2) + /* OS defined structs */ struct net_device *netdev; struct pci_dev *pdev; @@ -510,7 +533,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter); void iavf_del_vlans(struct iavf_adapter *adapter); void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags); void iavf_request_stats(struct iavf_adapter *adapter); -void iavf_request_reset(struct iavf_adapter *adapter); +int iavf_request_reset(struct iavf_adapter *adapter); void iavf_get_hena(struct iavf_adapter *adapter); void iavf_set_hena(struct iavf_adapter *adapter); void iavf_set_rss_key(struct iavf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c index e9cc7f6ddc46..34e46a23894f 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_common.c +++ b/drivers/net/ethernet/intel/iavf/iavf_common.c @@ -131,8 +131,8 @@ const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err) return "IAVF_ERR_INVALID_MAC_ADDR"; case IAVF_ERR_DEVICE_NOT_SUPPORTED: return "IAVF_ERR_DEVICE_NOT_SUPPORTED"; - case IAVF_ERR_MASTER_REQUESTS_PENDING: - return "IAVF_ERR_MASTER_REQUESTS_PENDING"; + case IAVF_ERR_PRIMARY_REQUESTS_PENDING: + return "IAVF_ERR_PRIMARY_REQUESTS_PENDING"; case IAVF_ERR_INVALID_LINK_SETTINGS: return "IAVF_ERR_INVALID_LINK_SETTINGS"; case IAVF_ERR_AUTONEG_NOT_COMPLETE: diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index b0bd95c85480..7c4b75a5e1b5 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -51,6 +51,113 @@ MODULE_LICENSE("GPL v2"); static const struct net_device_ops iavf_netdev_ops; struct workqueue_struct *iavf_wq; +int iavf_status_to_errno(enum iavf_status status) +{ + switch (status) { + case IAVF_SUCCESS: + return 0; + case IAVF_ERR_PARAM: + case IAVF_ERR_MAC_TYPE: + case IAVF_ERR_INVALID_MAC_ADDR: + case IAVF_ERR_INVALID_LINK_SETTINGS: + case IAVF_ERR_INVALID_PD_ID: + case IAVF_ERR_INVALID_QP_ID: + case IAVF_ERR_INVALID_CQ_ID: + case IAVF_ERR_INVALID_CEQ_ID: + case IAVF_ERR_INVALID_AEQ_ID: + case IAVF_ERR_INVALID_SIZE: + case IAVF_ERR_INVALID_ARP_INDEX: + case IAVF_ERR_INVALID_FPM_FUNC_ID: + case IAVF_ERR_QP_INVALID_MSG_SIZE: + case IAVF_ERR_INVALID_FRAG_COUNT: + case IAVF_ERR_INVALID_ALIGNMENT: + case IAVF_ERR_INVALID_PUSH_PAGE_INDEX: + case IAVF_ERR_INVALID_IMM_DATA_SIZE: + case IAVF_ERR_INVALID_VF_ID: + case IAVF_ERR_INVALID_HMCFN_ID: + case IAVF_ERR_INVALID_PBLE_INDEX: + case IAVF_ERR_INVALID_SD_INDEX: + case IAVF_ERR_INVALID_PAGE_DESC_INDEX: + case IAVF_ERR_INVALID_SD_TYPE: + case IAVF_ERR_INVALID_HMC_OBJ_INDEX: + case IAVF_ERR_INVALID_HMC_OBJ_COUNT: + case IAVF_ERR_INVALID_SRQ_ARM_LIMIT: + return -EINVAL; + case IAVF_ERR_NVM: + case IAVF_ERR_NVM_CHECKSUM: + case IAVF_ERR_PHY: + case IAVF_ERR_CONFIG: + case IAVF_ERR_UNKNOWN_PHY: + case IAVF_ERR_LINK_SETUP: + case IAVF_ERR_ADAPTER_STOPPED: + case IAVF_ERR_PRIMARY_REQUESTS_PENDING: + case IAVF_ERR_AUTONEG_NOT_COMPLETE: + case IAVF_ERR_RESET_FAILED: + case IAVF_ERR_BAD_PTR: + case IAVF_ERR_SWFW_SYNC: + case IAVF_ERR_QP_TOOMANY_WRS_POSTED: + case IAVF_ERR_QUEUE_EMPTY: + case IAVF_ERR_FLUSHED_QUEUE: + case IAVF_ERR_OPCODE_MISMATCH: + case IAVF_ERR_CQP_COMPL_ERROR: + case IAVF_ERR_BACKING_PAGE_ERROR: + case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE: + case IAVF_ERR_MEMCPY_FAILED: + case IAVF_ERR_SRQ_ENABLED: + case IAVF_ERR_ADMIN_QUEUE_ERROR: + case IAVF_ERR_ADMIN_QUEUE_FULL: + case IAVF_ERR_BAD_IWARP_CQE: + case IAVF_ERR_NVM_BLANK_MODE: + case IAVF_ERR_PE_DOORBELL_NOT_ENABLED: + case IAVF_ERR_DIAG_TEST_FAILED: + case IAVF_ERR_FIRMWARE_API_VERSION: + case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR: + return -EIO; + case IAVF_ERR_DEVICE_NOT_SUPPORTED: + return -ENODEV; + case IAVF_ERR_NO_AVAILABLE_VSI: + case IAVF_ERR_RING_FULL: + return -ENOSPC; + case IAVF_ERR_NO_MEMORY: + return -ENOMEM; + case IAVF_ERR_TIMEOUT: + case IAVF_ERR_ADMIN_QUEUE_TIMEOUT: + return -ETIMEDOUT; + case IAVF_ERR_NOT_IMPLEMENTED: + case IAVF_NOT_SUPPORTED: + return -EOPNOTSUPP; + case IAVF_ERR_ADMIN_QUEUE_NO_WORK: + return -EALREADY; + case IAVF_ERR_NOT_READY: + return -EBUSY; + case IAVF_ERR_BUF_TOO_SHORT: + return -EMSGSIZE; + } + + return -EIO; +} + +int virtchnl_status_to_errno(enum virtchnl_status_code v_status) +{ + switch (v_status) { + case VIRTCHNL_STATUS_SUCCESS: + return 0; + case VIRTCHNL_STATUS_ERR_PARAM: + case VIRTCHNL_STATUS_ERR_INVALID_VF_ID: + return -EINVAL; + case VIRTCHNL_STATUS_ERR_NO_MEMORY: + return -ENOMEM; + case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH: + case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR: + case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR: + return -EIO; + case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED: + return -EOPNOTSUPP; + } + + return -EIO; +} + /** * iavf_pdev_to_adapter - go from pci_dev to adapter * @pdev: pci_dev pointer @@ -302,8 +409,9 @@ static irqreturn_t iavf_msix_aq(int irq, void *data) rd32(hw, IAVF_VFINT_ICR01); rd32(hw, IAVF_VFINT_ICR0_ENA1); - /* schedule work on the private workqueue */ - queue_work(iavf_wq, &adapter->adminq_task); + if (adapter->state != __IAVF_REMOVE) + /* schedule work on the private workqueue */ + queue_work(iavf_wq, &adapter->adminq_task); return IRQ_HANDLED; } @@ -876,6 +984,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, list_add_tail(&f->list, &adapter->mac_filter_list); f->add = true; f->is_new_mac = true; + f->is_primary = false; adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; } else { f->remove = false; @@ -909,17 +1018,22 @@ static int iavf_set_mac(struct net_device *netdev, void *p) f = iavf_find_filter(adapter, hw->mac.addr); if (f) { f->remove = true; + f->is_primary = true; adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; } f = iavf_add_filter(adapter, addr->sa_data); - - spin_unlock_bh(&adapter->mac_vlan_list_lock); - if (f) { + f->is_primary = true; ether_addr_copy(hw->mac.addr, addr->sa_data); } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + /* schedule the watchdog task to immediately process the request */ + if (f) + queue_work(iavf_wq, &adapter->watchdog_task.work); + return (f == NULL) ? -ENOMEM : 0; } @@ -1136,8 +1250,7 @@ void iavf_down(struct iavf_adapter *adapter) rss->state = IAVF_ADV_RSS_DEL_REQUEST; spin_unlock_bh(&adapter->adv_rss_lock); - if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) && - adapter->state != __IAVF_RESETTING) { + if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) { /* cancel any current operation */ adapter->current_op = VIRTCHNL_OP_UNKNOWN; /* Schedule operations to close down the HW. Don't wait @@ -1421,7 +1534,7 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter) struct iavf_aqc_get_set_rss_key_data *rss_key = (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key; struct iavf_hw *hw = &adapter->hw; - int ret = 0; + enum iavf_status status; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -1430,24 +1543,25 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter) return -EBUSY; } - ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); - if (ret) { + status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); + if (status) { dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", - iavf_stat_str(hw, ret), + iavf_stat_str(hw, status), iavf_aq_str(hw, hw->aq.asq_last_status)); - return ret; + return iavf_status_to_errno(status); } - ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false, - adapter->rss_lut, adapter->rss_lut_size); - if (ret) { + status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false, + adapter->rss_lut, adapter->rss_lut_size); + if (status) { dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", - iavf_stat_str(hw, ret), + iavf_stat_str(hw, status), iavf_aq_str(hw, hw->aq.asq_last_status)); + return iavf_status_to_errno(status); } - return ret; + return 0; } @@ -1517,7 +1631,6 @@ static void iavf_fill_rss_lut(struct iavf_adapter *adapter) static int iavf_init_rss(struct iavf_adapter *adapter) { struct iavf_hw *hw = &adapter->hw; - int ret; if (!RSS_PF(adapter)) { /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ @@ -1533,9 +1646,8 @@ static int iavf_init_rss(struct iavf_adapter *adapter) iavf_fill_rss_lut(adapter); netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); - ret = iavf_config_rss(adapter); - return ret; + return iavf_config_rss(adapter); } /** @@ -2003,23 +2115,24 @@ static void iavf_startup(struct iavf_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct iavf_hw *hw = &adapter->hw; - int err; + enum iavf_status status; + int ret; WARN_ON(adapter->state != __IAVF_STARTUP); /* driver loaded, probe complete */ adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; adapter->flags &= ~IAVF_FLAG_RESET_PENDING; - err = iavf_set_mac_type(hw); - if (err) { - dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err); + status = iavf_set_mac_type(hw); + if (status) { + dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", status); goto err; } - err = iavf_check_reset_complete(hw); - if (err) { + ret = iavf_check_reset_complete(hw); + if (ret) { dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", - err); + ret); goto err; } hw->aq.num_arq_entries = IAVF_AQ_LEN; @@ -2027,14 +2140,15 @@ static void iavf_startup(struct iavf_adapter *adapter) hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE; hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE; - err = iavf_init_adminq(hw); - if (err) { - dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err); + status = iavf_init_adminq(hw); + if (status) { + dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", + status); goto err; } - err = iavf_send_api_ver(adapter); - if (err) { - dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err); + ret = iavf_send_api_ver(adapter); + if (ret) { + dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret); iavf_shutdown_adminq(hw); goto err; } @@ -2070,7 +2184,7 @@ static void iavf_init_version_check(struct iavf_adapter *adapter) /* aq msg sent, awaiting reply */ err = iavf_verify_api_ver(adapter); if (err) { - if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) + if (err == -EALREADY) err = iavf_send_api_ver(adapter); else dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", @@ -2120,7 +2234,7 @@ int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter) "Requested %d queues, but PF only gave us %d.\n", num_req_queues, adapter->vsi_res->num_queue_pairs); - adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; + adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED; adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; iavf_schedule_reset(adapter); @@ -2171,11 +2285,11 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter) } } err = iavf_get_vf_config(adapter); - if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) { + if (err == -EALREADY) { err = iavf_send_vf_config_msg(adapter); goto err_alloc; - } else if (err == IAVF_ERR_PARAM) { - /* We only get ERR_PARAM if the device is in a very bad + } else if (err == -EINVAL) { + /* We only get -EINVAL if the device is in a very bad * state or if we've been disabled for previous bad * behavior. Either way, we're done now. */ @@ -2189,26 +2303,18 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter) } err = iavf_parse_vf_resource_msg(adapter); - if (err) - goto err_alloc; - - err = iavf_send_vf_offload_vlan_v2_msg(adapter); - if (err == -EOPNOTSUPP) { - /* underlying PF doesn't support VIRTCHNL_VF_OFFLOAD_VLAN_V2, so - * go directly to finishing initialization - */ - iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER); - return; - } else if (err) { - dev_err(&pdev->dev, "Unable to send offload vlan v2 request (%d)\n", + if (err) { + dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n", err); goto err_alloc; } - - /* underlying PF supports VIRTCHNL_VF_OFFLOAD_VLAN_V2, so update the - * state accordingly + /* Some features require additional messages to negotiate extended + * capabilities. These are processed in sequence by the + * __IAVF_INIT_EXTENDED_CAPS driver state. */ - iavf_change_state(adapter, __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS); + adapter->extended_caps = IAVF_EXTENDED_CAPS; + + iavf_change_state(adapter, __IAVF_INIT_EXTENDED_CAPS); return; err_alloc: @@ -2219,35 +2325,93 @@ err: } /** - * iavf_init_get_offload_vlan_v2_caps - part of driver startup + * iavf_init_send_offload_vlan_v2_caps - part of initializing VLAN V2 caps + * @adapter: board private structure + * + * Function processes send of the extended VLAN V2 capability message to the + * PF. Must clear IAVF_EXTENDED_CAP_RECV_VLAN_V2 if the message is not sent, + * e.g. due to PF not negotiating VIRTCHNL_VF_OFFLOAD_VLAN_V2. + */ +static void iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter *adapter) +{ + int ret; + + WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2)); + + ret = iavf_send_vf_offload_vlan_v2_msg(adapter); + if (ret && ret == -EOPNOTSUPP) { + /* PF does not support VIRTCHNL_VF_OFFLOAD_V2. In this case, + * we did not send the capability exchange message and do not + * expect a response. + */ + adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2; + } + + /* We sent the message, so move on to the next step */ + adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_VLAN_V2; +} + +/** + * iavf_init_recv_offload_vlan_v2_caps - part of initializing VLAN V2 caps * @adapter: board private structure * - * Function processes __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS driver state if the - * VF negotiates VIRTCHNL_VF_OFFLOAD_VLAN_V2. If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is - * not negotiated, then this state will never be entered. + * Function processes receipt of the extended VLAN V2 capability message from + * the PF. **/ -static void iavf_init_get_offload_vlan_v2_caps(struct iavf_adapter *adapter) +static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter) { int ret; - WARN_ON(adapter->state != __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS); + WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2)); memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps)); ret = iavf_get_vf_vlan_v2_caps(adapter); - if (ret) { - if (ret == IAVF_ERR_ADMIN_QUEUE_NO_WORK) - iavf_send_vf_offload_vlan_v2_msg(adapter); + if (ret) goto err; - } - iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER); + /* We've processed receipt of the VLAN V2 caps message */ + adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2; return; err: + /* We didn't receive a reply. Make sure we try sending again when + * __IAVF_INIT_FAILED attempts to recover. + */ + adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_VLAN_V2; iavf_change_state(adapter, __IAVF_INIT_FAILED); } /** + * iavf_init_process_extended_caps - Part of driver startup + * @adapter: board private structure + * + * Function processes __IAVF_INIT_EXTENDED_CAPS driver state. This state + * handles negotiating capabilities for features which require an additional + * message. + * + * Once all extended capabilities exchanges are finished, the driver will + * transition into __IAVF_INIT_CONFIG_ADAPTER. + */ +static void iavf_init_process_extended_caps(struct iavf_adapter *adapter) +{ + WARN_ON(adapter->state != __IAVF_INIT_EXTENDED_CAPS); + + /* Process capability exchange for VLAN V2 */ + if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2) { + iavf_init_send_offload_vlan_v2_caps(adapter); + return; + } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2) { + iavf_init_recv_offload_vlan_v2_caps(adapter); + return; + } + + /* When we reach here, no further extended capabilities exchanges are + * necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER + */ + iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER); +} + +/** * iavf_init_config_adapter - last part of driver startup * @adapter: board private structure * @@ -2374,17 +2538,22 @@ static void iavf_watchdog_task(struct work_struct *work) struct iavf_hw *hw = &adapter->hw; u32 reg_val; - if (!mutex_trylock(&adapter->crit_lock)) + if (!mutex_trylock(&adapter->crit_lock)) { + if (adapter->state == __IAVF_REMOVE) + return; + goto restart_watchdog; + } if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) iavf_change_state(adapter, __IAVF_COMM_FAILED); - if (adapter->flags & IAVF_FLAG_RESET_NEEDED && - adapter->state != __IAVF_RESETTING) { - iavf_change_state(adapter, __IAVF_RESETTING); + if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; + mutex_unlock(&adapter->crit_lock); + queue_work(iavf_wq, &adapter->reset_task); + return; } switch (adapter->state) { @@ -2406,8 +2575,8 @@ static void iavf_watchdog_task(struct work_struct *work) queue_delayed_work(iavf_wq, &adapter->watchdog_task, msecs_to_jiffies(1)); return; - case __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS: - iavf_init_get_offload_vlan_v2_caps(adapter); + case __IAVF_INIT_EXTENDED_CAPS: + iavf_init_process_extended_caps(adapter); mutex_unlock(&adapter->crit_lock); queue_delayed_work(iavf_wq, &adapter->watchdog_task, msecs_to_jiffies(1)); @@ -2419,6 +2588,15 @@ static void iavf_watchdog_task(struct work_struct *work) msecs_to_jiffies(1)); return; case __IAVF_INIT_FAILED: + if (test_bit(__IAVF_IN_REMOVE_TASK, + &adapter->crit_section)) { + /* Do not update the state and do not reschedule + * watchdog task, iavf_remove should handle this state + * as it can loop forever + */ + mutex_unlock(&adapter->crit_lock); + return; + } if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { dev_err(&adapter->pdev->dev, "Failed to communicate with PF; waiting before retry\n"); @@ -2435,6 +2613,17 @@ static void iavf_watchdog_task(struct work_struct *work) queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ); return; case __IAVF_COMM_FAILED: + if (test_bit(__IAVF_IN_REMOVE_TASK, + &adapter->crit_section)) { + /* Set state to __IAVF_INIT_FAILED and perform remove + * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task + * doesn't bring the state back to __IAVF_COMM_FAILED. + */ + iavf_change_state(adapter, __IAVF_INIT_FAILED); + adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; + mutex_unlock(&adapter->crit_lock); + return; + } reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & IAVF_VFGEN_RSTAT_VFR_STATE_MASK; if (reg_val == VIRTCHNL_VFR_VFACTIVE || @@ -2507,7 +2696,8 @@ static void iavf_watchdog_task(struct work_struct *work) schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); mutex_unlock(&adapter->crit_lock); restart_watchdog: - queue_work(iavf_wq, &adapter->adminq_task); + if (adapter->state >= __IAVF_DOWN) + queue_work(iavf_wq, &adapter->adminq_task); if (adapter->aq_required) queue_delayed_work(iavf_wq, &adapter->watchdog_task, msecs_to_jiffies(20)); @@ -2594,6 +2784,7 @@ static void iavf_reset_task(struct work_struct *work) struct iavf_hw *hw = &adapter->hw; struct iavf_mac_filter *f, *ftmp; struct iavf_cloud_filter *cf; + enum iavf_status status; u32 reg_val; int i = 0, err; bool running; @@ -2601,13 +2792,13 @@ static void iavf_reset_task(struct work_struct *work) /* When device is being removed it doesn't make sense to run the reset * task, just return in such a case. */ - if (mutex_is_locked(&adapter->remove_lock)) - return; + if (!mutex_trylock(&adapter->crit_lock)) { + if (adapter->state != __IAVF_REMOVE) + queue_work(iavf_wq, &adapter->reset_task); - if (iavf_lock_timeout(&adapter->crit_lock, 200)) { - schedule_work(&adapter->reset_task); return; } + while (!mutex_trylock(&adapter->client_lock)) usleep_range(500, 1000); if (CLIENT_ENABLED(adapter)) { @@ -2662,6 +2853,7 @@ static void iavf_reset_task(struct work_struct *work) reg_val); iavf_disable_vf(adapter); mutex_unlock(&adapter->client_lock); + mutex_unlock(&adapter->crit_lock); return; /* Do not attempt to reinit. It's dead, Jim. */ } @@ -2670,8 +2862,7 @@ continue_reset: * ndo_open() returning, so we can't assume it means all our open * tasks have finished, since we're not holding the rtnl_lock here. */ - running = ((adapter->state == __IAVF_RUNNING) || - (adapter->state == __IAVF_RESETTING)); + running = adapter->state == __IAVF_RUNNING; if (running) { netdev->flags &= ~IFF_UP; @@ -2695,13 +2886,16 @@ continue_reset: /* kill and reinit the admin queue */ iavf_shutdown_adminq(hw); adapter->current_op = VIRTCHNL_OP_UNKNOWN; - err = iavf_init_adminq(hw); - if (err) + status = iavf_init_adminq(hw); + if (status) { dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", - err); + status); + goto reset_err; + } adapter->aq_required = 0; - if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { + if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) || + (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) { err = iavf_reinit_interrupt_scheme(adapter); if (err) goto reset_err; @@ -2773,12 +2967,13 @@ continue_reset: if (err) goto reset_err; - if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { + if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) || + (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) { err = iavf_request_traffic_irqs(adapter, netdev->name); if (err) goto reset_err; - adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED; } iavf_configure(adapter); @@ -2793,6 +2988,9 @@ continue_reset: iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); } + + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); @@ -2826,13 +3024,19 @@ static void iavf_adminq_task(struct work_struct *work) if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) goto out; + if (!mutex_trylock(&adapter->crit_lock)) { + if (adapter->state == __IAVF_REMOVE) + return; + + queue_work(iavf_wq, &adapter->adminq_task); + goto out; + } + event.buf_len = IAVF_MAX_AQ_BUF_SIZE; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) goto out; - if (iavf_lock_timeout(&adapter->crit_lock, 200)) - goto freedom; do { ret = iavf_clean_arq_element(hw, &event, &pending); v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); @@ -2848,6 +3052,24 @@ static void iavf_adminq_task(struct work_struct *work) } while (pending); mutex_unlock(&adapter->crit_lock); + if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) { + if (adapter->netdev_registered || + !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) { + struct net_device *netdev = adapter->netdev; + + rtnl_lock(); + netdev_update_features(netdev); + rtnl_unlock(); + /* Request VLAN offload settings */ + if (VLAN_V2_ALLOWED(adapter)) + iavf_set_vlan_offload_features + (adapter, 0, netdev->features); + + iavf_set_queue_vlan_tag_loc(adapter); + } + + adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES; + } if ((adapter->flags & (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || adapter->state == __IAVF_RESETTING) @@ -3800,11 +4022,12 @@ static int iavf_close(struct net_device *netdev) struct iavf_adapter *adapter = netdev_priv(netdev); int status; - if (adapter->state <= __IAVF_DOWN_PENDING) - return 0; + mutex_lock(&adapter->crit_lock); - while (!mutex_trylock(&adapter->crit_lock)) - usleep_range(500, 1000); + if (adapter->state <= __IAVF_DOWN_PENDING) { + mutex_unlock(&adapter->crit_lock); + return 0; + } set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); if (CLIENT_ENABLED(adapter)) @@ -3853,8 +4076,11 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu) iavf_notify_client_l2_params(&adapter->vsi); adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; } - adapter->flags |= IAVF_FLAG_RESET_NEEDED; - queue_work(iavf_wq, &adapter->reset_task); + + if (netif_running(netdev)) { + adapter->flags |= IAVF_FLAG_RESET_NEEDED; + queue_work(iavf_wq, &adapter->reset_task); + } return 0; } @@ -4428,7 +4654,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ mutex_init(&adapter->crit_lock); mutex_init(&adapter->client_lock); - mutex_init(&adapter->remove_lock); mutex_init(&hw->aq.asq_mutex); mutex_init(&hw->aq.arq_mutex); @@ -4544,7 +4769,6 @@ static int __maybe_unused iavf_resume(struct device *dev_d) static void iavf_remove(struct pci_dev *pdev) { struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); - enum iavf_state_t prev_state = adapter->last_state; struct net_device *netdev = adapter->netdev; struct iavf_fdir_fltr *fdir, *fdirtmp; struct iavf_vlan_filter *vlf, *vlftmp; @@ -4553,14 +4777,30 @@ static void iavf_remove(struct pci_dev *pdev) struct iavf_cloud_filter *cf, *cftmp; struct iavf_hw *hw = &adapter->hw; int err; - /* Indicate we are in remove and not to run reset_task */ - mutex_lock(&adapter->remove_lock); - cancel_work_sync(&adapter->reset_task); + + set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section); + /* Wait until port initialization is complete. + * There are flows where register/unregister netdev may race. + */ + while (1) { + mutex_lock(&adapter->crit_lock); + if (adapter->state == __IAVF_RUNNING || + adapter->state == __IAVF_DOWN || + adapter->state == __IAVF_INIT_FAILED) { + mutex_unlock(&adapter->crit_lock); + break; + } + + mutex_unlock(&adapter->crit_lock); + usleep_range(500, 1000); + } cancel_delayed_work_sync(&adapter->watchdog_task); - cancel_delayed_work_sync(&adapter->client_task); + if (adapter->netdev_registered) { - unregister_netdev(netdev); + rtnl_lock(); + unregister_netdevice(netdev); adapter->netdev_registered = false; + rtnl_unlock(); } if (CLIENT_ALLOWED(adapter)) { err = iavf_lan_del_device(adapter); @@ -4569,6 +4809,10 @@ static void iavf_remove(struct pci_dev *pdev) err); } + mutex_lock(&adapter->crit_lock); + dev_info(&adapter->pdev->dev, "Remove device\n"); + iavf_change_state(adapter, __IAVF_REMOVE); + iavf_request_reset(adapter); msleep(50); /* If the FW isn't responding, kick it once, but only once. */ @@ -4576,37 +4820,24 @@ static void iavf_remove(struct pci_dev *pdev) iavf_request_reset(adapter); msleep(50); } - if (iavf_lock_timeout(&adapter->crit_lock, 5000)) - dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); - dev_info(&adapter->pdev->dev, "Removing device\n"); + iavf_misc_irq_disable(adapter); /* Shut down all the garbage mashers on the detention level */ - iavf_change_state(adapter, __IAVF_REMOVE); + cancel_work_sync(&adapter->reset_task); + cancel_delayed_work_sync(&adapter->watchdog_task); + cancel_work_sync(&adapter->adminq_task); + cancel_delayed_work_sync(&adapter->client_task); + adapter->aq_required = 0; adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; iavf_free_all_tx_resources(adapter); iavf_free_all_rx_resources(adapter); - iavf_misc_irq_disable(adapter); iavf_free_misc_irq(adapter); - /* In case we enter iavf_remove from erroneous state, free traffic irqs - * here, so as to not cause a kernel crash, when calling - * iavf_reset_interrupt_capability. - */ - if ((adapter->last_state == __IAVF_RESETTING && - prev_state != __IAVF_DOWN) || - (adapter->last_state == __IAVF_RUNNING && - !(netdev->flags & IFF_UP))) - iavf_free_traffic_irqs(adapter); - iavf_reset_interrupt_capability(adapter); iavf_free_q_vectors(adapter); - cancel_delayed_work_sync(&adapter->watchdog_task); - - cancel_work_sync(&adapter->adminq_task); - iavf_free_rss(adapter); if (hw->aq.asq.count) @@ -4618,8 +4849,6 @@ static void iavf_remove(struct pci_dev *pdev) mutex_destroy(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); mutex_destroy(&adapter->crit_lock); - mutex_unlock(&adapter->remove_lock); - mutex_destroy(&adapter->remove_lock); iounmap(hw->hw_addr); pci_release_regions(pdev); @@ -4689,8 +4918,6 @@ static struct pci_driver iavf_driver = { **/ static int __init iavf_init_module(void) { - int ret; - pr_info("iavf: %s\n", iavf_driver_string); pr_info("%s\n", iavf_copyright); @@ -4701,8 +4928,7 @@ static int __init iavf_init_module(void) pr_err("%s: Failed to create workqueue\n", iavf_driver_name); return -ENOMEM; } - ret = pci_register_driver(&iavf_driver); - return ret; + return pci_register_driver(&iavf_driver); } module_init(iavf_init_module); diff --git a/drivers/net/ethernet/intel/iavf/iavf_status.h b/drivers/net/ethernet/intel/iavf/iavf_status.h index 46e3d1f6b604..2ea5c7c339bc 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_status.h +++ b/drivers/net/ethernet/intel/iavf/iavf_status.h @@ -18,7 +18,7 @@ enum iavf_status { IAVF_ERR_ADAPTER_STOPPED = -9, IAVF_ERR_INVALID_MAC_ADDR = -10, IAVF_ERR_DEVICE_NOT_SUPPORTED = -11, - IAVF_ERR_MASTER_REQUESTS_PENDING = -12, + IAVF_ERR_PRIMARY_REQUESTS_PENDING = -12, IAVF_ERR_INVALID_LINK_SETTINGS = -13, IAVF_ERR_AUTONEG_NOT_COMPLETE = -14, IAVF_ERR_RESET_FAILED = -15, diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 8cbe7ad1347c..978f651c6b09 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -374,29 +374,60 @@ static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector, return &q_vector->rx == rc; } -static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector) +#define IAVF_AIM_MULTIPLIER_100G 2560 +#define IAVF_AIM_MULTIPLIER_50G 1280 +#define IAVF_AIM_MULTIPLIER_40G 1024 +#define IAVF_AIM_MULTIPLIER_20G 512 +#define IAVF_AIM_MULTIPLIER_10G 256 +#define IAVF_AIM_MULTIPLIER_1G 32 + +static unsigned int iavf_mbps_itr_multiplier(u32 speed_mbps) { - unsigned int divisor; + switch (speed_mbps) { + case SPEED_100000: + return IAVF_AIM_MULTIPLIER_100G; + case SPEED_50000: + return IAVF_AIM_MULTIPLIER_50G; + case SPEED_40000: + return IAVF_AIM_MULTIPLIER_40G; + case SPEED_25000: + case SPEED_20000: + return IAVF_AIM_MULTIPLIER_20G; + case SPEED_10000: + default: + return IAVF_AIM_MULTIPLIER_10G; + case SPEED_1000: + case SPEED_100: + return IAVF_AIM_MULTIPLIER_1G; + } +} - switch (q_vector->adapter->link_speed) { +static unsigned int +iavf_virtchnl_itr_multiplier(enum virtchnl_link_speed speed_virtchnl) +{ + switch (speed_virtchnl) { case VIRTCHNL_LINK_SPEED_40GB: - divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024; - break; + return IAVF_AIM_MULTIPLIER_40G; case VIRTCHNL_LINK_SPEED_25GB: case VIRTCHNL_LINK_SPEED_20GB: - divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512; - break; - default: + return IAVF_AIM_MULTIPLIER_20G; case VIRTCHNL_LINK_SPEED_10GB: - divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256; - break; + default: + return IAVF_AIM_MULTIPLIER_10G; case VIRTCHNL_LINK_SPEED_1GB: case VIRTCHNL_LINK_SPEED_100MB: - divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32; - break; + return IAVF_AIM_MULTIPLIER_1G; } +} - return divisor; +static unsigned int iavf_itr_divisor(struct iavf_adapter *adapter) +{ + if (ADV_LINK_SUPPORT(adapter)) + return IAVF_ITR_ADAPTIVE_MIN_INC * + iavf_mbps_itr_multiplier(adapter->link_speed_mbps); + else + return IAVF_ITR_ADAPTIVE_MIN_INC * + iavf_virtchnl_itr_multiplier(adapter->link_speed); } /** @@ -586,8 +617,9 @@ adjust_by_size: * Use addition as we have already recorded the new latency flag * for the ITR value. */ - itr += DIV_ROUND_UP(avg_wire_size, iavf_itr_divisor(q_vector)) * - IAVF_ITR_ADAPTIVE_MIN_INC; + itr += DIV_ROUND_UP(avg_wire_size, + iavf_itr_divisor(q_vector->adapter)) * + IAVF_ITR_ADAPTIVE_MIN_INC; if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) { itr &= IAVF_ITR_ADAPTIVE_LATENCY; diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 5ee1d118fd30..782450d5c12f 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -22,17 +22,17 @@ static int iavf_send_pf_msg(struct iavf_adapter *adapter, enum virtchnl_ops op, u8 *msg, u16 len) { struct iavf_hw *hw = &adapter->hw; - enum iavf_status err; + enum iavf_status status; if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) return 0; /* nothing to see here, move along */ - err = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); - if (err) - dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n", - op, iavf_stat_str(hw, err), + status = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); + if (status) + dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n", + op, iavf_stat_str(hw, status), iavf_aq_str(hw, hw->aq.asq_last_status)); - return err; + return iavf_status_to_errno(status); } /** @@ -55,6 +55,41 @@ int iavf_send_api_ver(struct iavf_adapter *adapter) } /** + * iavf_poll_virtchnl_msg + * @hw: HW configuration structure + * @event: event to populate on success + * @op_to_poll: requested virtchnl op to poll for + * + * Initialize poll for virtchnl msg matching the requested_op. Returns 0 + * if a message of the correct opcode is in the queue or an error code + * if no message matching the op code is waiting and other failures. + */ +static int +iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event, + enum virtchnl_ops op_to_poll) +{ + enum virtchnl_ops received_op; + enum iavf_status status; + u32 v_retval; + + while (1) { + /* When the AQ is empty, iavf_clean_arq_element will return + * nonzero and this loop will terminate. + */ + status = iavf_clean_arq_element(hw, event, NULL); + if (status != IAVF_SUCCESS) + return iavf_status_to_errno(status); + received_op = + (enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high); + if (op_to_poll == received_op) + break; + } + + v_retval = le32_to_cpu(event->desc.cookie_low); + return virtchnl_status_to_errno((enum virtchnl_status_code)v_retval); +} + +/** * iavf_verify_api_ver * @adapter: adapter structure * @@ -65,55 +100,28 @@ int iavf_send_api_ver(struct iavf_adapter *adapter) **/ int iavf_verify_api_ver(struct iavf_adapter *adapter) { - struct virtchnl_version_info *pf_vvi; - struct iavf_hw *hw = &adapter->hw; struct iavf_arq_event_info event; - enum virtchnl_ops op; - enum iavf_status err; + int err; event.buf_len = IAVF_MAX_AQ_BUF_SIZE; - event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); - if (!event.msg_buf) { - err = -ENOMEM; - goto out; - } - - while (1) { - err = iavf_clean_arq_element(hw, &event, NULL); - /* When the AQ is empty, iavf_clean_arq_element will return - * nonzero and this loop will terminate. - */ - if (err) - goto out_alloc; - op = - (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); - if (op == VIRTCHNL_OP_VERSION) - break; - } + event.msg_buf = kzalloc(IAVF_MAX_AQ_BUF_SIZE, GFP_KERNEL); + if (!event.msg_buf) + return -ENOMEM; + err = iavf_poll_virtchnl_msg(&adapter->hw, &event, VIRTCHNL_OP_VERSION); + if (!err) { + struct virtchnl_version_info *pf_vvi = + (struct virtchnl_version_info *)event.msg_buf; + adapter->pf_version = *pf_vvi; - err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); - if (err) - goto out_alloc; - - if (op != VIRTCHNL_OP_VERSION) { - dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n", - op); - err = -EIO; - goto out_alloc; + if (pf_vvi->major > VIRTCHNL_VERSION_MAJOR || + (pf_vvi->major == VIRTCHNL_VERSION_MAJOR && + pf_vvi->minor > VIRTCHNL_VERSION_MINOR)) + err = -EIO; } - pf_vvi = (struct virtchnl_version_info *)event.msg_buf; - adapter->pf_version = *pf_vvi; - - if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) || - ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) && - (pf_vvi->minor > VIRTCHNL_VERSION_MINOR))) - err = -EIO; - -out_alloc: kfree(event.msg_buf); -out: + return err; } @@ -208,33 +216,17 @@ int iavf_get_vf_config(struct iavf_adapter *adapter) { struct iavf_hw *hw = &adapter->hw; struct iavf_arq_event_info event; - enum virtchnl_ops op; - enum iavf_status err; u16 len; + int err; - len = sizeof(struct virtchnl_vf_resource) + + len = sizeof(struct virtchnl_vf_resource) + IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); event.buf_len = len; - event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); - if (!event.msg_buf) { - err = -ENOMEM; - goto out; - } + event.msg_buf = kzalloc(len, GFP_KERNEL); + if (!event.msg_buf) + return -ENOMEM; - while (1) { - /* When the AQ is empty, iavf_clean_arq_element will return - * nonzero and this loop will terminate. - */ - err = iavf_clean_arq_element(hw, &event, NULL); - if (err) - goto out_alloc; - op = - (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); - if (op == VIRTCHNL_OP_GET_VF_RESOURCES) - break; - } - - err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); + err = iavf_poll_virtchnl_msg(hw, &event, VIRTCHNL_OP_GET_VF_RESOURCES); memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); /* some PFs send more queues than we should have so validate that @@ -243,48 +235,32 @@ int iavf_get_vf_config(struct iavf_adapter *adapter) if (!err) iavf_validate_num_queues(adapter); iavf_vf_parse_hw_config(hw, adapter->vf_res); -out_alloc: + kfree(event.msg_buf); -out: + return err; } int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter) { - struct iavf_hw *hw = &adapter->hw; struct iavf_arq_event_info event; - enum virtchnl_ops op; - enum iavf_status err; + int err; u16 len; - len = sizeof(struct virtchnl_vlan_caps); + len = sizeof(struct virtchnl_vlan_caps); event.buf_len = len; - event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); - if (!event.msg_buf) { - err = -ENOMEM; - goto out; - } - - while (1) { - /* When the AQ is empty, iavf_clean_arq_element will return - * nonzero and this loop will terminate. - */ - err = iavf_clean_arq_element(hw, &event, NULL); - if (err) - goto out_alloc; - op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); - if (op == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS) - break; - } + event.msg_buf = kzalloc(len, GFP_KERNEL); + if (!event.msg_buf) + return -ENOMEM; - err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); - if (err) - goto out_alloc; + err = iavf_poll_virtchnl_msg(&adapter->hw, &event, + VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS); + if (!err) + memcpy(&adapter->vlan_v2_caps, event.msg_buf, + min(event.msg_len, len)); - memcpy(&adapter->vlan_v2_caps, event.msg_buf, min(event.msg_len, len)); -out_alloc: kfree(event.msg_buf); -out: + return err; } @@ -454,6 +430,20 @@ void iavf_map_queues(struct iavf_adapter *adapter) } /** + * iavf_set_mac_addr_type - Set the correct request type from the filter type + * @virtchnl_ether_addr: pointer to requested list element + * @filter: pointer to requested filter + **/ +static void +iavf_set_mac_addr_type(struct virtchnl_ether_addr *virtchnl_ether_addr, + const struct iavf_mac_filter *filter) +{ + virtchnl_ether_addr->type = filter->is_primary ? + VIRTCHNL_ETHER_ADDR_PRIMARY : + VIRTCHNL_ETHER_ADDR_EXTRA; +} + +/** * iavf_add_ether_addrs * @adapter: adapter structure * @@ -508,6 +498,7 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter) list_for_each_entry(f, &adapter->mac_filter_list, list) { if (f->add) { ether_addr_copy(veal->list[i].addr, f->macaddr); + iavf_set_mac_addr_type(&veal->list[i], f); i++; f->add = false; if (i == count) @@ -577,6 +568,7 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter) list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { if (f->remove) { ether_addr_copy(veal->list[i].addr, f->macaddr); + iavf_set_mac_addr_type(&veal->list[i], f); i++; list_del(&f->list); kfree(f); @@ -1827,11 +1819,29 @@ void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter) * * Request that the PF reset this VF. No response is expected. **/ -void iavf_request_reset(struct iavf_adapter *adapter) +int iavf_request_reset(struct iavf_adapter *adapter) { + int err; /* Don't check CURRENT_OP - this is always higher priority */ - iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0); + err = iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0); adapter->current_op = VIRTCHNL_OP_UNKNOWN; + return err; +} + +/** + * iavf_netdev_features_vlan_strip_set - update vlan strip status + * @netdev: ptr to netdev being adjusted + * @enable: enable or disable vlan strip + * + * Helper function to change vlan strip status in netdev->features. + */ +static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev, + const bool enable) +{ + if (enable) + netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; + else + netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; } /** @@ -2057,8 +2067,18 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, } break; case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: + dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); + /* Vlan stripping could not be enabled by ethtool. + * Disable it in netdev->features. + */ + iavf_netdev_features_vlan_strip_set(netdev, false); + break; case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); + /* Vlan stripping could not be disabled by ethtool. + * Enable it in netdev->features. + */ + iavf_netdev_features_vlan_strip_set(netdev, true); break; default: dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", @@ -2146,29 +2166,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, sizeof(adapter->vlan_v2_caps))); iavf_process_config(adapter); - - /* unlock crit_lock before acquiring rtnl_lock as other - * processes holding rtnl_lock could be waiting for the same - * crit_lock - */ - mutex_unlock(&adapter->crit_lock); - /* VLAN capabilities can change during VFR, so make sure to - * update the netdev features with the new capabilities - */ - rtnl_lock(); - netdev_update_features(netdev); - rtnl_unlock(); - if (iavf_lock_timeout(&adapter->crit_lock, 10000)) - dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", - __FUNCTION__); - - /* Request VLAN offload settings */ - if (VLAN_V2_ALLOWED(adapter)) - iavf_set_vlan_offload_features(adapter, 0, - netdev->features); - - iavf_set_queue_vlan_tag_loc(adapter); - + adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES; } break; case VIRTCHNL_OP_ENABLE_QUEUES: @@ -2334,6 +2332,20 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, spin_unlock_bh(&adapter->adv_rss_lock); } break; + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: + /* PF enabled vlan strip on this VF. + * Update netdev->features if needed to be in sync with ethtool. + */ + if (!v_retval) + iavf_netdev_features_vlan_strip_set(netdev, true); + break; + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: + /* PF disabled vlan strip on this VF. + * Update netdev->features if needed to be in sync with ethtool. + */ + if (!v_retval) + iavf_netdev_features_vlan_strip_set(netdev, false); + break; default: if (adapter->current_op && (v_opcode != adapter->current_op)) dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 389fff70d22e..44b8464b7663 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -40,6 +40,7 @@ ice-$(CONFIG_PCI_IOV) += \ ice_vf_vsi_vlan_ops.o \ ice_virtchnl_pf.o ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o +ice-$(CONFIG_TTY) += ice_gnss.o ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 827fcb5e0d4c..23c39805a1b0 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -73,6 +73,7 @@ #include "ice_eswitch.h" #include "ice_lag.h" #include "ice_vsi_vlan_ops.h" +#include "ice_gnss.h" #define ICE_BAR0 0 #define ICE_REQ_DESC_MULTIPLE 32 @@ -108,7 +109,6 @@ /* All VF control VSIs share the same IRQ, so assign a unique ID for them */ #define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_RDMA_VEC_ID - 1) #define ICE_INVAL_Q_INDEX 0xffff -#define ICE_INVAL_VFID 256 #define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */ @@ -184,6 +184,7 @@ enum ice_feature { ICE_F_DSCP, ICE_F_SMA_CTRL, + ICE_F_GNSS, ICE_F_MAX }; @@ -281,7 +282,6 @@ enum ice_pf_state { ICE_VFLR_EVENT_PENDING, ICE_FLTR_OVERFLOW_PROMISC, ICE_VF_DIS, - ICE_VF_DEINIT_IN_PROGRESS, ICE_CFG_BUSY, ICE_SERVICE_SCHED, ICE_SERVICE_DIS, @@ -332,7 +332,7 @@ struct ice_vsi { u16 vsi_num; /* HW (absolute) index of this VSI */ u16 idx; /* software index in pf->vsi[] */ - s16 vf_id; /* VF ID for SR-IOV VSIs */ + struct ice_vf *vf; /* VF associated with this VSI */ u16 ethtype; /* Ethernet protocol for pause frame */ u16 num_gfltr; @@ -471,7 +471,6 @@ enum ice_pf_flags { ICE_FLAG_FD_ENA, ICE_FLAG_PTP_SUPPORTED, /* PTP is supported by NVM */ ICE_FLAG_PTP, /* PTP is enabled by software */ - ICE_FLAG_AUX_ENA, ICE_FLAG_ADV_FEATURES, ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */ ICE_FLAG_CLS_FLOWER, @@ -488,6 +487,8 @@ enum ice_pf_flags { ICE_FLAG_VF_VLAN_PRUNING, ICE_FLAG_LINK_LENIENT_MODE_ENA, ICE_FLAG_PLUG_AUX_DEV, + ICE_FLAG_MTU_CHANGED, + ICE_FLAG_GNSS, /* GNSS successfully initialized */ ICE_PF_FLAGS_NBITS /* must be last */ }; @@ -528,15 +529,7 @@ struct ice_pf { struct ice_vsi **vsi; /* VSIs created by the driver */ struct ice_sw *first_sw; /* first switch created by firmware */ u16 eswitch_mode; /* current mode of eswitch */ - /* Virtchnl/SR-IOV config info */ - struct ice_vf *vf; - u16 num_alloc_vfs; /* actual number of VFs allocated */ - u16 num_vfs_supported; /* num VFs supported for this PF */ - u16 num_qps_per_vf; - u16 num_msix_per_vf; - /* used to ratelimit the MDD event logging */ - unsigned long last_printed_mdd_jiffies; - DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT); + struct ice_vfs vfs; DECLARE_BITMAP(features, ICE_F_MAX); DECLARE_BITMAP(state, ICE_STATE_NBITS); DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); @@ -551,6 +544,9 @@ struct ice_pf { struct mutex tc_mutex; /* lock to protect TC changes */ u32 msg_enable; struct ice_ptp ptp; + struct tty_driver *ice_gnss_tty_driver; + struct tty_port gnss_tty_port; + struct gnss_serial *gnss_serial; u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */ u16 rdma_base_vector; @@ -838,6 +834,9 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf); int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx); void ice_update_vsi_stats(struct ice_vsi *vsi); void ice_update_pf_stats(struct ice_pf *pf); +void +ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, + struct ice_q_stats stats, u64 *pkts, u64 *bytes); int ice_up(struct ice_vsi *vsi); int ice_down(struct ice_vsi *vsi); int ice_vsi_cfg(struct ice_vsi *vsi); @@ -891,7 +890,6 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf) { if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) { set_bit(ICE_FLAG_RDMA_ENA, pf->flags); - set_bit(ICE_FLAG_AUX_ENA, pf->flags); set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags); } } @@ -902,8 +900,16 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf) */ static inline void ice_clear_rdma_cap(struct ice_pf *pf) { - ice_unplug_aux_dev(pf); + /* We can directly unplug aux device here only if the flag bit + * ICE_FLAG_PLUG_AUX_DEV is not set because ice_unplug_aux_dev() + * could race with ice_plug_aux_dev() called from + * ice_service_task(). In this case we only clear that bit now and + * aux device will be unplugged later once ice_plug_aux_device() + * called from ice_service_task() finishes (see ice_service_task()). + */ + if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) + ice_unplug_aux_dev(pf); + clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); - clear_bit(ICE_FLAG_AUX_ENA, pf->flags); } #endif /* _ICE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index fd8ee5b7f596..b25e27c4d887 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -1401,6 +1401,24 @@ struct ice_aqc_get_link_topo { u8 rsvd[9]; }; +/* Read I2C (direct, 0x06E2) */ +struct ice_aqc_i2c { + struct ice_aqc_link_topo_addr topo_addr; + __le16 i2c_addr; + u8 i2c_params; +#define ICE_AQC_I2C_DATA_SIZE_M GENMASK(3, 0) +#define ICE_AQC_I2C_USE_REPEATED_START BIT(7) + + u8 rsvd; + __le16 i2c_bus_addr; + u8 rsvd2[4]; +}; + +/* Read I2C Response (direct, 0x06E2) */ +struct ice_aqc_read_i2c_resp { + u8 i2c_data[16]; +}; + /* Set Port Identification LED (direct, 0x06E9) */ struct ice_aqc_set_port_id_led { u8 lport_num; @@ -2112,6 +2130,8 @@ struct ice_aq_desc { struct ice_aqc_get_link_status get_link_status; struct ice_aqc_event_lan_overflow lan_overflow; struct ice_aqc_get_link_topo get_link_topo; + struct ice_aqc_i2c read_i2c; + struct ice_aqc_read_i2c_resp read_i2c_resp; } params; }; @@ -2226,6 +2246,7 @@ enum ice_adminq_opc { ice_aqc_opc_set_event_mask = 0x0613, ice_aqc_opc_set_mac_lb = 0x0620, ice_aqc_opc_get_link_topo = 0x06E0, + ice_aqc_opc_read_i2c = 0x06E2, ice_aqc_opc_set_port_id_led = 0x06E9, ice_aqc_opc_set_gpio = 0x06EC, ice_aqc_opc_get_gpio = 0x06ED, diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 2360e6abdb1e..a3094470d31d 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -323,7 +323,7 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf break; case ICE_VSI_VF: /* Firmware expects vmvf_num to be absolute VF ID */ - tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id; + tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id; tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; break; case ICE_VSI_SWITCHDEV_CTRL: @@ -429,7 +429,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) */ if (ice_is_dvm_ena(hw)) if (vsi->type == ICE_VSI_VF && - ice_vf_is_port_vlan_ena(&vsi->back->vf[vsi->vf_id])) + ice_vf_is_port_vlan_ena(vsi->vf)) rlan_ctx.l2tsel = 1; else rlan_ctx.l2tsel = 0; diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index c57e5fc41cf8..9619bdb9e49a 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -3379,7 +3379,7 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && !ice_fw_supports_report_dflt_cfg(hw)) { - struct ice_link_default_override_tlv tlv; + struct ice_link_default_override_tlv tlv = { 0 }; status = ice_get_link_default_override(&tlv, pi); if (status) @@ -4798,6 +4798,59 @@ ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, } /** + * ice_aq_read_i2c + * @hw: pointer to the hw struct + * @topo_addr: topology address for a device to communicate with + * @bus_addr: 7-bit I2C bus address + * @addr: I2C memory address (I2C offset) with up to 16 bits + * @params: I2C parameters: bit [7] - Repeated start, + * bits [6:5] data offset size, + * bit [4] - I2C address type, + * bits [3:0] - data size to read (0-16 bytes) + * @data: pointer to data (0 to 16 bytes) to be read from the I2C device + * @cd: pointer to command details structure or NULL + * + * Read I2C (0x06E2) + */ +int +ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, + u16 bus_addr, __le16 addr, u8 params, u8 *data, + struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc = { 0 }; + struct ice_aqc_i2c *cmd; + u8 data_size; + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c); + cmd = &desc.params.read_i2c; + + if (!data) + return -EINVAL; + + data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); + + cmd->i2c_bus_addr = cpu_to_le16(bus_addr); + cmd->topo_addr = topo_addr; + cmd->i2c_params = params; + cmd->i2c_addr = addr; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + if (!status) { + struct ice_aqc_read_i2c_resp *resp; + u8 i; + + resp = &desc.params.read_i2c_resp; + for (i = 0; i < data_size; i++) { + *data = resp->i2c_data[i]; + data++; + } + } + + return status; +} + +/** * ice_aq_set_driver_param - Set driver parameter to share via firmware * @hw: pointer to the HW struct * @idx: parameter index to set diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index d28749edd92f..1efe6b2c32f0 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -4,6 +4,8 @@ #ifndef _ICE_COMMON_H_ #define _ICE_COMMON_H_ +#include <linux/bitfield.h> + #include "ice.h" #include "ice_type.h" #include "ice_nvm.h" @@ -208,5 +210,9 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw); int ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add); +int +ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, + u16 bus_addr, __le16 addr, u8 params, u8 *data, + struct ice_sq_cd *cd); bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw); #endif /* _ICE_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index e1cb6682eee2..9a84d746a6c4 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -44,6 +44,7 @@ ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac) ctrl_vsi->rxq_map[vf->vf_id]; rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE; rule_info.flags_info.act_valid = true; + rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN; err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, vf->repr->mac_rule); @@ -175,10 +176,20 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) int q_id; ice_for_each_txq(vsi, q_id) { - struct ice_repr *repr = pf->vf[q_id].repr; - struct ice_q_vector *q_vector = repr->q_vector; - struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id]; - struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id]; + struct ice_q_vector *q_vector; + struct ice_tx_ring *tx_ring; + struct ice_rx_ring *rx_ring; + struct ice_repr *repr; + struct ice_vf *vf; + + vf = ice_get_vf_by_id(pf, q_id); + if (WARN_ON(!vf)) + continue; + + repr = vf->repr; + q_vector = repr->q_vector; + tx_ring = vsi->tx_rings[q_id]; + rx_ring = vsi->rx_rings[q_id]; q_vector->vsi = vsi; q_vector->reg_idx = vsi->q_vectors[0]->reg_idx; @@ -198,6 +209,38 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) rx_ring->q_vector = q_vector; rx_ring->next = NULL; rx_ring->netdev = repr->netdev; + + ice_put_vf(vf); + } +} + +/** + * ice_eswitch_release_reprs - clear PR VSIs configuration + * @pf: poiner to PF struct + * @ctrl_vsi: pointer to switchdev control VSI + */ +static void +ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi) +{ + struct ice_vf *vf; + unsigned int bkt; + + lockdep_assert_held(&pf->vfs.table_lock); + + ice_for_each_vf(pf, bkt, vf) { + struct ice_vsi *vsi = vf->repr->src_vsi; + + /* Skip VFs that aren't configured */ + if (!vf->repr->dst) + continue; + + ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); + metadata_dst_free(vf->repr->dst); + vf->repr->dst = NULL; + ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, + ICE_FWD_TO_VSI); + + netif_napi_del(&vf->repr->q_vector->napi); } } @@ -209,11 +252,13 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) { struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; int max_vsi_num = 0; - int i; + struct ice_vf *vf; + unsigned int bkt; + + lockdep_assert_held(&pf->vfs.table_lock); - ice_for_each_vf(pf, i) { - struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; - struct ice_vf *vf = &pf->vf[i]; + ice_for_each_vf(pf, bkt, vf) { + struct ice_vsi *vsi = vf->repr->src_vsi; ice_remove_vsi_fltr(&pf->hw, vsi->idx); vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, @@ -230,6 +275,7 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) vf->hw_lan_addr.addr, ICE_FWD_TO_VSI); metadata_dst_free(vf->repr->dst); + vf->repr->dst = NULL; goto err; } @@ -238,6 +284,7 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) vf->hw_lan_addr.addr, ICE_FWD_TO_VSI); metadata_dst_free(vf->repr->dst); + vf->repr->dst = NULL; ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); goto err; } @@ -251,8 +298,8 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) netif_keep_dst(vf->repr->netdev); } - ice_for_each_vf(pf, i) { - struct ice_repr *repr = pf->vf[i].repr; + ice_for_each_vf(pf, bkt, vf) { + struct ice_repr *repr = vf->repr; struct ice_vsi *vsi = repr->src_vsi; struct metadata_dst *dst; @@ -265,43 +312,12 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) return 0; err: - for (i = i - 1; i >= 0; i--) { - struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; - struct ice_vf *vf = &pf->vf[i]; - - ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); - metadata_dst_free(vf->repr->dst); - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, - ICE_FWD_TO_VSI); - } + ice_eswitch_release_reprs(pf, ctrl_vsi); return -ENODEV; } /** - * ice_eswitch_release_reprs - clear PR VSIs configuration - * @pf: poiner to PF struct - * @ctrl_vsi: pointer to switchdev control VSI - */ -static void -ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi) -{ - int i; - - ice_for_each_vf(pf, i) { - struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; - struct ice_vf *vf = &pf->vf[i]; - - ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); - metadata_dst_free(vf->repr->dst); - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, - ICE_FWD_TO_VSI); - - netif_napi_del(&vf->repr->q_vector->napi); - } -} - -/** * ice_eswitch_update_repr - reconfigure VF port representor * @vsi: VF VSI for which port representor is configured */ @@ -315,7 +331,7 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi) if (!ice_is_switchdev_running(pf)) return; - vf = &pf->vf[vsi->vf_id]; + vf = vsi->vf; repr = vf->repr; repr->src_vsi = vsi; repr->dst->u.port_info.port_id = vsi->vsi_num; @@ -323,7 +339,8 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi) ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); if (ret) { ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI); - dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", vsi->vf_id); + dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", + vsi->vf->vf_id); } } @@ -407,7 +424,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf) static struct ice_vsi * ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) { - return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, ICE_INVAL_VFID, NULL); + return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, NULL, NULL); } /** @@ -416,10 +433,13 @@ ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) */ static void ice_eswitch_napi_del(struct ice_pf *pf) { - int i; + struct ice_vf *vf; + unsigned int bkt; - ice_for_each_vf(pf, i) - netif_napi_del(&pf->vf[i].repr->q_vector->napi); + lockdep_assert_held(&pf->vfs.table_lock); + + ice_for_each_vf(pf, bkt, vf) + netif_napi_del(&vf->repr->q_vector->napi); } /** @@ -428,10 +448,13 @@ static void ice_eswitch_napi_del(struct ice_pf *pf) */ static void ice_eswitch_napi_enable(struct ice_pf *pf) { - int i; + struct ice_vf *vf; + unsigned int bkt; + + lockdep_assert_held(&pf->vfs.table_lock); - ice_for_each_vf(pf, i) - napi_enable(&pf->vf[i].repr->q_vector->napi); + ice_for_each_vf(pf, bkt, vf) + napi_enable(&vf->repr->q_vector->napi); } /** @@ -440,10 +463,13 @@ static void ice_eswitch_napi_enable(struct ice_pf *pf) */ static void ice_eswitch_napi_disable(struct ice_pf *pf) { - int i; + struct ice_vf *vf; + unsigned int bkt; + + lockdep_assert_held(&pf->vfs.table_lock); - ice_for_each_vf(pf, i) - napi_disable(&pf->vf[i].repr->q_vector->napi); + ice_for_each_vf(pf, bkt, vf) + napi_disable(&vf->repr->q_vector->napi); } /** @@ -521,7 +547,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, if (pf->eswitch_mode == mode) return 0; - if (pf->num_alloc_vfs) { + if (ice_has_vfs(pf)) { dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created"); NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created"); return -EOPNOTSUPP; @@ -612,16 +638,17 @@ int ice_eswitch_configure(struct ice_pf *pf) */ static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf) { - struct ice_repr *repr; - int i; + struct ice_vf *vf; + unsigned int bkt; + + lockdep_assert_held(&pf->vfs.table_lock); if (test_bit(ICE_DOWN, pf->state)) return; - ice_for_each_vf(pf, i) { - repr = pf->vf[i].repr; - if (repr) - ice_repr_start_tx_queues(repr); + ice_for_each_vf(pf, bkt, vf) { + if (vf->repr) + ice_repr_start_tx_queues(vf->repr); } } @@ -631,16 +658,17 @@ static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf) */ void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { - struct ice_repr *repr; - int i; + struct ice_vf *vf; + unsigned int bkt; + + lockdep_assert_held(&pf->vfs.table_lock); if (test_bit(ICE_DOWN, pf->state)) return; - ice_for_each_vf(pf, i) { - repr = pf->vf[i].repr; - if (repr) - ice_repr_stop_tx_queues(repr); + ice_for_each_vf(pf, bkt, vf) { + if (vf->repr) + ice_repr_stop_tx_queues(vf->repr); } } diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index a3492754d0d3..24cda7e1f916 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -316,16 +316,20 @@ out: */ static bool ice_active_vfs(struct ice_pf *pf) { - unsigned int i; - - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; + bool active = false; + struct ice_vf *vf; + unsigned int bkt; - if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) - return true; + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) { + if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + active = true; + break; + } } + rcu_read_unlock(); - return false; + return active; } /** @@ -1298,7 +1302,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) } if (test_bit(ICE_FLAG_VF_VLAN_PRUNING, change_flags) && - pf->num_alloc_vfs) { + ice_has_vfs(pf)) { dev_err(dev, "vf-vlan-pruning: VLAN pruning cannot be changed while VFs are active.\n"); /* toggle bit back to previous state */ change_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags); @@ -2307,7 +2311,7 @@ ice_set_link_ksettings(struct net_device *netdev, if (err) goto done; - curr_link_speed = pi->phy.link_info.link_speed; + curr_link_speed = pi->phy.curr_user_speed_req; adv_link_speed = ice_ksettings_find_adv_link_speed(ks); /* If speed didn't get set, set it to what it currently is. diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c new file mode 100644 index 000000000000..755e1580f368 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_gnss.c @@ -0,0 +1,376 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#include "ice.h" +#include "ice_lib.h" +#include <linux/tty_driver.h> + +/** + * ice_gnss_read - Read data from internal GNSS module + * @work: GNSS read work structure + * + * Read the data from internal GNSS receiver, number of bytes read will be + * returned in *read_data parameter. + */ +static void ice_gnss_read(struct kthread_work *work) +{ + struct gnss_serial *gnss = container_of(work, struct gnss_serial, + read_work.work); + struct ice_aqc_link_topo_addr link_topo; + u8 i2c_params, bytes_read; + struct tty_port *port; + struct ice_pf *pf; + struct ice_hw *hw; + __be16 data_len_b; + char *buf = NULL; + u16 i, data_len; + int err = 0; + + pf = gnss->back; + if (!pf || !gnss->tty || !gnss->tty->port) { + err = -EFAULT; + goto exit; + } + + hw = &pf->hw; + port = gnss->tty->port; + + buf = (char *)get_zeroed_page(GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto exit; + } + + memset(&link_topo, 0, sizeof(struct ice_aqc_link_topo_addr)); + link_topo.topo_params.index = ICE_E810T_GNSS_I2C_BUS; + link_topo.topo_params.node_type_ctx |= + FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, + ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE); + + i2c_params = ICE_GNSS_UBX_DATA_LEN_WIDTH | + ICE_AQC_I2C_USE_REPEATED_START; + + /* Read data length in a loop, when it's not 0 the data is ready */ + for (i = 0; i < ICE_MAX_UBX_READ_TRIES; i++) { + err = ice_aq_read_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR, + cpu_to_le16(ICE_GNSS_UBX_DATA_LEN_H), + i2c_params, (u8 *)&data_len_b, NULL); + if (err) + goto exit_buf; + + data_len = be16_to_cpu(data_len_b); + if (data_len != 0 && data_len != U16_MAX) + break; + + mdelay(10); + } + + data_len = min(data_len, (u16)PAGE_SIZE); + data_len = tty_buffer_request_room(port, data_len); + if (!data_len) { + err = -ENOMEM; + goto exit_buf; + } + + /* Read received data */ + for (i = 0; i < data_len; i += bytes_read) { + u16 bytes_left = data_len - i; + + bytes_read = bytes_left < ICE_MAX_I2C_DATA_SIZE ? bytes_left : + ICE_MAX_I2C_DATA_SIZE; + + err = ice_aq_read_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR, + cpu_to_le16(ICE_GNSS_UBX_EMPTY_DATA), + bytes_read, &buf[i], NULL); + if (err) + goto exit_buf; + } + + /* Send the data to the tty layer for users to read. This doesn't + * actually push the data through unless tty->low_latency is set. + */ + tty_insert_flip_string(port, buf, i); + tty_flip_buffer_push(port); + +exit_buf: + free_page((unsigned long)buf); + kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, + ICE_GNSS_TIMER_DELAY_TIME); +exit: + if (err) + dev_dbg(ice_pf_to_dev(pf), "GNSS failed to read err=%d\n", err); +} + +/** + * ice_gnss_struct_init - Initialize GNSS structure for the TTY + * @pf: Board private structure + */ +static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + struct kthread_worker *kworker; + struct gnss_serial *gnss; + + gnss = kzalloc(sizeof(*gnss), GFP_KERNEL); + if (!gnss) + return NULL; + + mutex_init(&gnss->gnss_mutex); + gnss->open_count = 0; + gnss->back = pf; + pf->gnss_serial = gnss; + + kthread_init_delayed_work(&gnss->read_work, ice_gnss_read); + /* Allocate a kworker for handling work required for the GNSS TTY + * writes. + */ + kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev)); + if (!kworker) { + kfree(gnss); + return NULL; + } + + gnss->kworker = kworker; + + return gnss; +} + +/** + * ice_gnss_tty_open - Initialize GNSS structures on TTY device open + * @tty: pointer to the tty_struct + * @filp: pointer to the file + * + * This routine is mandatory. If this routine is not filled in, the attempted + * open will fail with ENODEV. + */ +static int ice_gnss_tty_open(struct tty_struct *tty, struct file *filp) +{ + struct gnss_serial *gnss; + struct ice_pf *pf; + + pf = (struct ice_pf *)tty->driver->driver_state; + if (!pf) + return -EFAULT; + + /* Clear the pointer in case something fails */ + tty->driver_data = NULL; + + /* Get the serial object associated with this tty pointer */ + gnss = pf->gnss_serial; + if (!gnss) { + /* Initialize GNSS struct on the first device open */ + gnss = ice_gnss_struct_init(pf); + if (!gnss) + return -ENOMEM; + } + + mutex_lock(&gnss->gnss_mutex); + + /* Save our structure within the tty structure */ + tty->driver_data = gnss; + gnss->tty = tty; + gnss->open_count++; + kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, 0); + + mutex_unlock(&gnss->gnss_mutex); + + return 0; +} + +/** + * ice_gnss_tty_close - Cleanup GNSS structures on tty device close + * @tty: pointer to the tty_struct + * @filp: pointer to the file + */ +static void ice_gnss_tty_close(struct tty_struct *tty, struct file *filp) +{ + struct gnss_serial *gnss = tty->driver_data; + struct ice_pf *pf; + + if (!gnss) + return; + + pf = (struct ice_pf *)tty->driver->driver_state; + if (!pf) + return; + + mutex_lock(&gnss->gnss_mutex); + + if (!gnss->open_count) { + /* Port was never opened */ + dev_err(ice_pf_to_dev(pf), "GNSS port not opened\n"); + goto exit; + } + + gnss->open_count--; + if (gnss->open_count <= 0) { + /* Port is in shutdown state */ + kthread_cancel_delayed_work_sync(&gnss->read_work); + } +exit: + mutex_unlock(&gnss->gnss_mutex); +} + +/** + * ice_gnss_tty_write - Dummy TTY write function to avoid kernel panic + * @tty: pointer to the tty_struct + * @buf: pointer to the user data + * @cnt: the number of characters that was able to be sent to the hardware (or + * queued to be sent at a later time) + */ +static int +ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int cnt) +{ + return 0; +} + +/** + * ice_gnss_tty_write_room - Dummy TTY write_room function to avoid kernel panic + * @tty: pointer to the tty_struct + */ +static unsigned int ice_gnss_tty_write_room(struct tty_struct *tty) +{ + return 0; +} + +static const struct tty_operations tty_gps_ops = { + .open = ice_gnss_tty_open, + .close = ice_gnss_tty_close, + .write = ice_gnss_tty_write, + .write_room = ice_gnss_tty_write_room, +}; + +/** + * ice_gnss_create_tty_driver - Create a TTY driver for GNSS + * @pf: Board private structure + */ +static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + const int ICE_TTYDRV_NAME_MAX = 14; + struct tty_driver *tty_driver; + char *ttydrv_name; + int err; + + tty_driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW); + if (!tty_driver) { + dev_err(ice_pf_to_dev(pf), "Failed to allocate memory for GNSS TTY\n"); + return NULL; + } + + ttydrv_name = kzalloc(ICE_TTYDRV_NAME_MAX, GFP_KERNEL); + if (!ttydrv_name) { + tty_driver_kref_put(tty_driver); + return NULL; + } + + snprintf(ttydrv_name, ICE_TTYDRV_NAME_MAX, "ttyGNSS_%02x%02x_", + (u8)pf->pdev->bus->number, (u8)PCI_SLOT(pf->pdev->devfn)); + + /* Initialize the tty driver*/ + tty_driver->owner = THIS_MODULE; + tty_driver->driver_name = dev_driver_string(dev); + tty_driver->name = (const char *)ttydrv_name; + tty_driver->type = TTY_DRIVER_TYPE_SERIAL; + tty_driver->subtype = SERIAL_TYPE_NORMAL; + tty_driver->init_termios = tty_std_termios; + tty_driver->init_termios.c_iflag &= ~INLCR; + tty_driver->init_termios.c_iflag |= IGNCR; + tty_driver->init_termios.c_oflag &= ~OPOST; + tty_driver->init_termios.c_lflag &= ~ICANON; + tty_driver->init_termios.c_cflag &= ~(CSIZE | CBAUD | CBAUDEX); + /* baud rate 9600 */ + tty_termios_encode_baud_rate(&tty_driver->init_termios, 9600, 9600); + tty_driver->driver_state = pf; + tty_set_operations(tty_driver, &tty_gps_ops); + + pf->gnss_serial = NULL; + + tty_port_init(&pf->gnss_tty_port); + tty_port_link_device(&pf->gnss_tty_port, tty_driver, 0); + + err = tty_register_driver(tty_driver); + if (err) { + dev_err(ice_pf_to_dev(pf), "Failed to register TTY driver err=%d\n", + err); + + tty_port_destroy(&pf->gnss_tty_port); + kfree(ttydrv_name); + tty_driver_kref_put(pf->ice_gnss_tty_driver); + + return NULL; + } + + return tty_driver; +} + +/** + * ice_gnss_init - Initialize GNSS TTY support + * @pf: Board private structure + */ +void ice_gnss_init(struct ice_pf *pf) +{ + struct tty_driver *tty_driver; + + tty_driver = ice_gnss_create_tty_driver(pf); + if (!tty_driver) + return; + + pf->ice_gnss_tty_driver = tty_driver; + + set_bit(ICE_FLAG_GNSS, pf->flags); + dev_info(ice_pf_to_dev(pf), "GNSS TTY init successful\n"); +} + +/** + * ice_gnss_exit - Disable GNSS TTY support + * @pf: Board private structure + */ +void ice_gnss_exit(struct ice_pf *pf) +{ + if (!test_bit(ICE_FLAG_GNSS, pf->flags) || !pf->ice_gnss_tty_driver) + return; + + tty_port_destroy(&pf->gnss_tty_port); + + if (pf->gnss_serial) { + struct gnss_serial *gnss = pf->gnss_serial; + + kthread_cancel_delayed_work_sync(&gnss->read_work); + kfree(gnss); + pf->gnss_serial = NULL; + } + + tty_unregister_driver(pf->ice_gnss_tty_driver); + kfree(pf->ice_gnss_tty_driver->name); + tty_driver_kref_put(pf->ice_gnss_tty_driver); + pf->ice_gnss_tty_driver = NULL; +} + +/** + * ice_gnss_is_gps_present - Check if GPS HW is present + * @hw: pointer to HW struct + */ +bool ice_gnss_is_gps_present(struct ice_hw *hw) +{ + if (!hw->func_caps.ts_func_info.src_tmr_owned) + return false; + +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) + if (ice_is_e810t(hw)) { + int err; + u8 data; + + err = ice_read_pca9575_reg_e810t(hw, ICE_PCA9575_P0_IN, &data); + if (err || !!(data & ICE_E810T_P0_GNSS_PRSNT_N)) + return false; + } else { + return false; + } +#else + if (!ice_is_e810t(hw)) + return false; +#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ + + return true; +} diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h new file mode 100644 index 000000000000..9211adb2372c --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_gnss.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_GNSS_H_ +#define _ICE_GNSS_H_ + +#include <linux/tty.h> +#include <linux/tty_flip.h> + +#define ICE_E810T_GNSS_I2C_BUS 0x2 +#define ICE_GNSS_UBX_I2C_BUS_ADDR 0x42 +/* Data length register is big endian */ +#define ICE_GNSS_UBX_DATA_LEN_H 0xFD +#define ICE_GNSS_UBX_DATA_LEN_WIDTH 2 +#define ICE_GNSS_UBX_EMPTY_DATA 0xFF +#define ICE_GNSS_TIMER_DELAY_TIME (HZ / 10) /* 0.1 second per message */ +#define ICE_MAX_I2C_DATA_SIZE FIELD_MAX(ICE_AQC_I2C_DATA_SIZE_M) +#define ICE_MAX_UBX_READ_TRIES 255 + +/** + * struct gnss_serial - data used to initialize GNSS TTY port + * @back: back pointer to PF + * @tty: pointer to the tty for this device + * @open_count: number of times this port has been opened + * @gnss_mutex: gnss_mutex used to protect GNSS serial operations + * @kworker: kwork thread for handling periodic work + * @read_work: read_work function for handling GNSS reads + */ +struct gnss_serial { + struct ice_pf *back; + struct tty_struct *tty; + int open_count; + struct mutex gnss_mutex; /* protects GNSS serial structure */ + struct kthread_worker *kworker; + struct kthread_delayed_work read_work; +}; + +#if IS_ENABLED(CONFIG_TTY) +void ice_gnss_init(struct ice_pf *pf); +void ice_gnss_exit(struct ice_pf *pf); +bool ice_gnss_is_gps_present(struct ice_hw *hw); +#else +static inline void ice_gnss_init(struct ice_pf *pf) { } +static inline void ice_gnss_exit(struct ice_pf *pf) { } +static inline bool ice_gnss_is_gps_present(struct ice_hw *hw) +{ + return false; +} +#endif /* IS_ENABLED(CONFIG_TTY) */ +#endif /* _ICE_GNSS_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c index 263a2e7577a2..73aa520317d4 100644 --- a/drivers/net/ethernet/intel/ice/ice_idc.c +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@ -79,7 +79,7 @@ int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset) dev = ice_pf_to_dev(pf); - if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) + if (!ice_is_rdma_ena(pf)) return -EINVAL; vsi = ice_get_main_vsi(pf); @@ -241,7 +241,7 @@ EXPORT_SYMBOL_GPL(ice_get_qos_params); */ static int ice_reserve_rdma_qvector(struct ice_pf *pf) { - if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) { + if (ice_is_rdma_ena(pf)) { int index; index = ice_get_res(pf, pf->irq_tracker, pf->num_rdma_msix, @@ -279,7 +279,7 @@ int ice_plug_aux_dev(struct ice_pf *pf) /* if this PF doesn't support a technology that requires auxiliary * devices, then gracefully exit */ - if (!ice_is_aux_ena(pf)) + if (!ice_is_rdma_ena(pf)) return 0; iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index f23917d6a495..b897926f817d 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -166,21 +166,19 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi) /** * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI * @vsi: the VSI being configured - * @vf_id: ID of the VF being configured + * @vf: the VF associated with this VSI, if any * * Return 0 on success and a negative value on error */ -static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) +static void ice_vsi_set_num_qs(struct ice_vsi *vsi, struct ice_vf *vf) { + enum ice_vsi_type vsi_type = vsi->type; struct ice_pf *pf = vsi->back; - struct ice_vf *vf = NULL; - if (vsi->type == ICE_VSI_VF) - vsi->vf_id = vf_id; - else - vsi->vf_id = ICE_INVAL_VFID; + if (WARN_ON(vsi_type == ICE_VSI_VF && !vf)) + return; - switch (vsi->type) { + switch (vsi_type) { case ICE_VSI_PF: if (vsi->req_txq) { vsi->alloc_txq = vsi->req_txq; @@ -217,22 +215,21 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) /* The number of queues for ctrl VSI is equal to number of VFs. * Each ring is associated to the corresponding VF_PR netdev. */ - vsi->alloc_txq = pf->num_alloc_vfs; - vsi->alloc_rxq = pf->num_alloc_vfs; + vsi->alloc_txq = ice_get_num_vfs(pf); + vsi->alloc_rxq = vsi->alloc_txq; vsi->num_q_vectors = 1; break; case ICE_VSI_VF: - vf = &pf->vf[vsi->vf_id]; if (vf->num_req_qs) vf->num_vf_qs = vf->num_req_qs; vsi->alloc_txq = vf->num_vf_qs; vsi->alloc_rxq = vf->num_vf_qs; - /* pf->num_msix_per_vf includes (VF miscellaneous vector + + /* pf->vfs.num_msix_per includes (VF miscellaneous vector + * data queue interrupts). Since vsi->num_q_vectors is number * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the * original vector count */ - vsi->num_q_vectors = pf->num_msix_per_vf - ICE_NONQ_VECS_VF; + vsi->num_q_vectors = pf->vfs.num_msix_per - ICE_NONQ_VECS_VF; break; case ICE_VSI_CTRL: vsi->alloc_txq = 1; @@ -248,7 +245,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) vsi->alloc_rxq = 1; break; default: - dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi->type); + dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi_type); break; } @@ -299,7 +296,7 @@ void ice_vsi_delete(struct ice_vsi *vsi) return; if (vsi->type == ICE_VSI_VF) - ctxt->vf_num = vsi->vf_id; + ctxt->vf_num = vsi->vf->vf_id; ctxt->vsi_num = vsi->vsi_num; memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); @@ -384,8 +381,7 @@ int ice_vsi_clear(struct ice_vsi *vsi) pf->vsi[vsi->idx] = NULL; if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL) pf->next_vsi = vsi->idx; - if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL && - vsi->vf_id != ICE_INVAL_VFID) + if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL && vsi->vf) pf->next_vsi = vsi->idx; ice_vsi_free_arrays(vsi); @@ -437,13 +433,16 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d { struct ice_q_vector *q_vector = (struct ice_q_vector *)data; struct ice_pf *pf = q_vector->vsi->back; - int i; + struct ice_vf *vf; + unsigned int bkt; if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) return IRQ_HANDLED; - ice_for_each_vf(pf, i) - napi_schedule(&pf->vf[i].repr->q_vector->napi); + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) + napi_schedule(&vf->repr->q_vector->napi); + rcu_read_unlock(); return IRQ_HANDLED; } @@ -453,17 +452,24 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d * @pf: board private structure * @vsi_type: type of VSI * @ch: ptr to channel - * @vf_id: ID of the VF being configured + * @vf: VF for ICE_VSI_VF and ICE_VSI_CTRL + * + * The VF pointer is used for ICE_VSI_VF and ICE_VSI_CTRL. For ICE_VSI_CTRL, + * it may be NULL in the case there is no association with a VF. For + * ICE_VSI_VF the VF pointer *must not* be NULL. * * returns a pointer to a VSI on success, NULL on failure. */ static struct ice_vsi * ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, - struct ice_channel *ch, u16 vf_id) + struct ice_channel *ch, struct ice_vf *vf) { struct device *dev = ice_pf_to_dev(pf); struct ice_vsi *vsi = NULL; + if (WARN_ON(vsi_type == ICE_VSI_VF && !vf)) + return NULL; + /* Need to protect the allocation of the VSIs at the PF level */ mutex_lock(&pf->sw_mutex); @@ -485,9 +491,9 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, set_bit(ICE_VSI_DOWN, vsi->state); if (vsi_type == ICE_VSI_VF) - ice_vsi_set_num_qs(vsi, vf_id); + ice_vsi_set_num_qs(vsi, vf); else if (vsi_type != ICE_VSI_CHNL) - ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); + ice_vsi_set_num_qs(vsi, NULL); switch (vsi->type) { case ICE_VSI_SWITCHDEV_CTRL: @@ -510,10 +516,16 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, /* Setup ctrl VSI MSIX irq handler */ vsi->irq_handler = ice_msix_clean_ctrl_vsi; + + /* For the PF control VSI this is NULL, for the VF control VSI + * this will be the first VF to allocate it. + */ + vsi->vf = vf; break; case ICE_VSI_VF: if (ice_vsi_alloc_arrays(vsi)) goto err_rings; + vsi->vf = vf; break; case ICE_VSI_CHNL: if (!ch) @@ -531,7 +543,7 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, goto unlock_pf; } - if (vsi->type == ICE_VSI_CTRL && vf_id == ICE_INVAL_VFID) { + if (vsi->type == ICE_VSI_CTRL && !vf) { /* Use the last VSI slot as the index for PF control VSI */ vsi->idx = pf->num_alloc_vsi - 1; pf->ctrl_vsi_idx = vsi->idx; @@ -546,8 +558,8 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, pf->next_vsi); } - if (vsi->type == ICE_VSI_CTRL && vf_id != ICE_INVAL_VFID) - pf->vf[vf_id].ctrl_vsi_idx = vsi->idx; + if (vsi->type == ICE_VSI_CTRL && vf) + vf->ctrl_vsi_idx = vsi->idx; goto unlock_pf; err_rings: @@ -733,14 +745,14 @@ bool ice_is_safe_mode(struct ice_pf *pf) } /** - * ice_is_aux_ena + * ice_is_rdma_ena * @pf: pointer to the PF struct * - * returns true if AUX devices/drivers are supported, false otherwise + * returns true if RDMA is currently supported, false otherwise */ -bool ice_is_aux_ena(struct ice_pf *pf) +bool ice_is_rdma_ena(struct ice_pf *pf) { - return test_bit(ICE_FLAG_AUX_ENA, pf->flags); + return test_bit(ICE_FLAG_RDMA_ENA, pf->flags); } /** @@ -1130,7 +1142,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) case ICE_VSI_VF: ctxt->flags = ICE_AQ_VSI_TYPE_VF; /* VF number here is the absolute VF number (0-255) */ - ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id; + ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id; break; default: ret = -ENODEV; @@ -1322,6 +1334,36 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) } /** + * ice_get_vf_ctrl_res - Get VF control VSI resource + * @pf: pointer to the PF structure + * @vsi: the VSI to allocate a resource for + * + * Look up whether another VF has already allocated the control VSI resource. + * If so, re-use this resource so that we share it among all VFs. + * + * Otherwise, allocate the resource and return it. + */ +static int ice_get_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi) +{ + struct ice_vf *vf; + unsigned int bkt; + int base; + + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) { + if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) { + base = pf->vsi[vf->ctrl_vsi_idx]->base_vector; + rcu_read_unlock(); + return base; + } + } + rcu_read_unlock(); + + return ice_get_res(pf, pf->irq_tracker, vsi->num_q_vectors, + ICE_RES_VF_CTRL_VEC_ID); +} + +/** * ice_vsi_setup_vector_base - Set up the base vector for the given VSI * @vsi: ptr to the VSI * @@ -1353,20 +1395,8 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) num_q_vectors = vsi->num_q_vectors; /* reserve slots from OS requested IRQs */ - if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) { - int i; - - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; - - if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI) { - base = pf->vsi[vf->ctrl_vsi_idx]->base_vector; - break; - } - } - if (i == pf->num_alloc_vfs) - base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, - ICE_RES_VF_CTRL_VEC_ID); + if (vsi->type == ICE_VSI_CTRL && vsi->vf) { + base = ice_get_vf_ctrl_res(pf, vsi); } else { base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, vsi->idx); @@ -1686,6 +1716,12 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) if (status) dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n", vsi_num, status); + + status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI, + ICE_FLOW_SEG_HDR_ESP); + if (status) + dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n", + vsi_num, status); } /** @@ -2212,7 +2248,7 @@ ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi) } if (vsi->type == ICE_VSI_VF) { - struct ice_vf *vf = &vsi->back->vf[vsi->vf_id]; + struct ice_vf *vf = vsi->vf; q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector); } else { @@ -2397,9 +2433,8 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi) * @pf: board private structure * @pi: pointer to the port_info instance * @vsi_type: VSI type - * @vf_id: defines VF ID to which this VSI connects. This field is meant to be - * used only for ICE_VSI_VF VSI type. For other VSI types, should - * fill-in ICE_INVAL_VFID as input. + * @vf: pointer to VF to which this VSI connects. This field is used primarily + * for the ICE_VSI_VF type. Other VSI types should pass NULL. * @ch: ptr to channel * * This allocates the sw VSI structure and its queue resources. @@ -2409,7 +2444,8 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi) */ struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, - enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch) + enum ice_vsi_type vsi_type, struct ice_vf *vf, + struct ice_channel *ch) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct device *dev = ice_pf_to_dev(pf); @@ -2417,11 +2453,11 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, int ret, i; if (vsi_type == ICE_VSI_CHNL) - vsi = ice_vsi_alloc(pf, vsi_type, ch, ICE_INVAL_VFID); + vsi = ice_vsi_alloc(pf, vsi_type, ch, NULL); else if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL) - vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf_id); + vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf); else - vsi = ice_vsi_alloc(pf, vsi_type, NULL, ICE_INVAL_VFID); + vsi = ice_vsi_alloc(pf, vsi_type, NULL, NULL); if (!vsi) { dev_err(dev, "could not allocate VSI\n"); @@ -2433,9 +2469,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, if (vsi->type == ICE_VSI_PF) vsi->ethtype = ETH_P_PAUSE; - if (vsi->type == ICE_VSI_VF || vsi->type == ICE_VSI_CTRL) - vsi->vf_id = vf_id; - ice_alloc_fd_res(vsi); if (vsi_type != ICE_VSI_CHNL) { @@ -2856,6 +2889,37 @@ void ice_napi_del(struct ice_vsi *vsi) } /** + * ice_free_vf_ctrl_res - Free the VF control VSI resource + * @pf: pointer to PF structure + * @vsi: the VSI to free resources for + * + * Check if the VF control VSI resource is still in use. If no VF is using it + * any more, release the VSI resource. Otherwise, leave it to be cleaned up + * once no other VF uses it. + */ +static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi) +{ + struct ice_vf *vf; + unsigned int bkt; + + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) { + if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) { + rcu_read_unlock(); + return; + } + } + rcu_read_unlock(); + + /* No other VFs left that have control VSI. It is now safe to reclaim + * SW interrupts back to the common pool. + */ + ice_free_res(pf->irq_tracker, vsi->base_vector, + ICE_RES_VF_CTRL_VEC_ID); + pf->num_avail_sw_msix += vsi->num_q_vectors; +} + +/** * ice_vsi_release - Delete a VSI and free its resources * @vsi: the VSI being removed * @@ -2898,23 +2962,8 @@ int ice_vsi_release(struct ice_vsi *vsi) * many interrupts each VF needs. SR-IOV MSIX resources are also * cleared in the same manner. */ - if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) { - int i; - - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; - - if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI) - break; - } - if (i == pf->num_alloc_vfs) { - /* No other VFs left that have control VSI, reclaim SW - * interrupts back to the common pool - */ - ice_free_res(pf->irq_tracker, vsi->base_vector, - ICE_RES_VF_CTRL_VEC_ID); - pf->num_avail_sw_msix += vsi->num_q_vectors; - } + if (vsi->type == ICE_VSI_CTRL && vsi->vf) { + ice_free_vf_ctrl_res(pf, vsi); } else if (vsi->type != ICE_VSI_VF) { /* reclaim SW interrupts back to the common pool */ ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); @@ -3098,7 +3147,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct ice_coalesce_stored *coalesce; int prev_num_q_vectors = 0; - struct ice_vf *vf = NULL; enum ice_vsi_type vtype; struct ice_pf *pf; int ret, i; @@ -3108,8 +3156,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) pf = vsi->back; vtype = vsi->type; - if (vtype == ICE_VSI_VF) - vf = &pf->vf[vsi->vf_id]; + if (WARN_ON(vtype == ICE_VSI_VF) && !vsi->vf) + return -EINVAL; ice_vsi_init_vlan_ops(vsi); @@ -3148,9 +3196,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) ice_vsi_clear_rings(vsi); ice_vsi_free_arrays(vsi); if (vtype == ICE_VSI_VF) - ice_vsi_set_num_qs(vsi, vf->vf_id); + ice_vsi_set_num_qs(vsi, vsi->vf); else - ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); + ice_vsi_set_num_qs(vsi, NULL); ret = ice_vsi_alloc_arrays(vsi); if (ret < 0) @@ -3912,9 +3960,9 @@ int ice_set_link(struct ice_vsi *vsi, bool ena) */ if (status == -EIO) { if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) - dev_warn(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n", - (ena ? "ON" : "OFF"), status, - ice_aq_str(hw->adminq.sq_last_status)); + dev_dbg(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n", + (ena ? "ON" : "OFF"), status, + ice_aq_str(hw->adminq.sq_last_status)); } else if (status) { dev_err(dev, "can't set link to %s, err %d aq_err %s\n", (ena ? "ON" : "OFF"), status, @@ -4007,9 +4055,14 @@ static u16 ice_vsi_num_zero_vlans(struct ice_vsi *vsi) #define ICE_DVM_NUM_ZERO_VLAN_FLTRS 2 #define ICE_SVM_NUM_ZERO_VLAN_FLTRS 1 /* no VLAN 0 filter is created when a port VLAN is active */ - if (vsi->type == ICE_VSI_VF && - ice_vf_is_port_vlan_ena(&vsi->back->vf[vsi->vf_id])) - return 0; + if (vsi->type == ICE_VSI_VF) { + if (WARN_ON(!vsi->vf)) + return 0; + + if (ice_vf_is_port_vlan_ena(vsi->vf)) + return 0; + } + if (ice_is_dvm_ena(&vsi->back->hw)) return ICE_DVM_NUM_ZERO_VLAN_FLTRS; else @@ -4088,8 +4141,11 @@ void ice_init_feature_support(struct ice_pf *pf) case ICE_DEV_ID_E810C_QSFP: case ICE_DEV_ID_E810C_SFP: ice_set_feature_support(pf, ICE_F_DSCP); - if (ice_is_e810t(&pf->hw)) + if (ice_is_e810t(&pf->hw)) { ice_set_feature_support(pf, ICE_F_SMA_CTRL); + if (ice_gnss_is_gps_present(&pf->hw)) + ice_set_feature_support(pf, ICE_F_GNSS); + } break; default: break; diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 133fc235141a..0095329949d4 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -52,7 +52,8 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, - enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch); + enum ice_vsi_type vsi_type, struct ice_vf *vf, + struct ice_channel *ch); void ice_napi_del(struct ice_vsi *vsi); @@ -100,7 +101,7 @@ void ice_set_q_vector_intrl(struct ice_q_vector *q_vector); int ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set); bool ice_is_safe_mode(struct ice_pf *pf); -bool ice_is_aux_ena(struct ice_pf *pf); +bool ice_is_rdma_ena(struct ice_pf *pf); bool ice_is_dflt_vsi_in_use(struct ice_sw *sw); bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index ce90ebf4b853..2694acb1aa01 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -505,7 +505,8 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) { struct ice_hw *hw = &pf->hw; struct ice_vsi *vsi; - unsigned int i; + struct ice_vf *vf; + unsigned int bkt; dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type); @@ -520,8 +521,10 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) ice_vc_notify_reset(pf); /* Disable VFs until reset is completed */ - ice_for_each_vf(pf, i) - ice_set_vf_state_qs_dis(&pf->vf[i]); + mutex_lock(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, vf) + ice_set_vf_state_qs_dis(vf); + mutex_unlock(&pf->vfs.table_lock); if (ice_is_eswitch_mode_switchdev(pf)) { if (reset_type != ICE_RESET_PFR) @@ -568,6 +571,9 @@ skip: if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) ice_ptp_prepare_for_reset(pf); + if (ice_is_feature_supported(pf, ICE_F_GNSS)) + ice_gnss_exit(pf); + if (hw->port_info) ice_sched_clear_port(hw->port_info); @@ -1663,7 +1669,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - unsigned int i; + struct ice_vf *vf; + unsigned int bkt; u32 reg; if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { @@ -1751,47 +1758,46 @@ static void ice_handle_mdd_event(struct ice_pf *pf) /* Check to see if one of the VFs caused an MDD event, and then * increment counters and set print pending */ - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; - - reg = rd32(hw, VP_MDET_TX_PQM(i)); + mutex_lock(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, vf) { + reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id)); if (reg & VP_MDET_TX_PQM_VALID_M) { - wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); + wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF); vf->mdd_tx_events.count++; set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", - i); + vf->vf_id); } - reg = rd32(hw, VP_MDET_TX_TCLAN(i)); + reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id)); if (reg & VP_MDET_TX_TCLAN_VALID_M) { - wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); + wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF); vf->mdd_tx_events.count++; set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", - i); + vf->vf_id); } - reg = rd32(hw, VP_MDET_TX_TDPU(i)); + reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id)); if (reg & VP_MDET_TX_TDPU_VALID_M) { - wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); + wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF); vf->mdd_tx_events.count++; set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", - i); + vf->vf_id); } - reg = rd32(hw, VP_MDET_RX(i)); + reg = rd32(hw, VP_MDET_RX(vf->vf_id)); if (reg & VP_MDET_RX_VALID_M) { - wr32(hw, VP_MDET_RX(i), 0xFFFF); + wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF); vf->mdd_rx_events.count++; set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); if (netif_msg_rx_err(pf)) dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", - i); + vf->vf_id); /* Since the queue is disabled on VF Rx MDD events, the * PF can be configured to reset the VF through ethtool @@ -1802,10 +1808,13 @@ static void ice_handle_mdd_event(struct ice_pf *pf) * reset, so print the event prior to reset. */ ice_print_vf_rx_mdd_event(vf); - ice_reset_vf(&pf->vf[i], false); + mutex_lock(&vf->cfg_lock); + ice_reset_vf(vf, false); + mutex_unlock(&vf->cfg_lock); } } } + mutex_unlock(&pf->vfs.table_lock); ice_print_vfs_mdd_events(pf); } @@ -2256,9 +2265,30 @@ static void ice_service_task(struct work_struct *work) return; } - if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) + if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) { + /* Plug aux device per request */ ice_plug_aux_dev(pf); + /* Mark plugging as done but check whether unplug was + * requested during ice_plug_aux_dev() call + * (e.g. from ice_clear_rdma_cap()) and if so then + * plug aux device. + */ + if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) + ice_unplug_aux_dev(pf); + } + + if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { + struct iidc_event *event; + + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (event) { + set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); + ice_send_event_to_aux(pf, event); + kfree(event); + } + } + ice_clean_adminq_subtask(pf); ice_check_media_subtask(pf); ice_check_for_hang_subtask(pf); @@ -2434,7 +2464,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) /* skip this unused q_vector */ continue; } - if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) + if (vsi->type == ICE_VSI_CTRL && vsi->vf) err = devm_request_irq(dev, irq_num, vsi->irq_handler, IRQF_SHARED, q_vector->name, q_vector); @@ -3024,7 +3054,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) struct iidc_event *event; ena_mask &= ~ICE_AUX_CRIT_ERR; - event = kzalloc(sizeof(*event), GFP_KERNEL); + event = kzalloc(sizeof(*event), GFP_ATOMIC); if (event) { set_bit(IIDC_EVENT_CRIT_ERR, event->type); /* report the entire OICR value to AUX driver */ @@ -3381,14 +3411,14 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) static struct ice_vsi * ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) { - return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID, NULL); + return ice_vsi_setup(pf, pi, ICE_VSI_PF, NULL, NULL); } static struct ice_vsi * ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, struct ice_channel *ch) { - return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, ICE_INVAL_VFID, ch); + return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, NULL, ch); } /** @@ -3402,7 +3432,7 @@ ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, static struct ice_vsi * ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) { - return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID, NULL); + return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, NULL, NULL); } /** @@ -3416,7 +3446,7 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) struct ice_vsi * ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) { - return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID, NULL); + return ice_vsi_setup(pf, pi, ICE_VSI_LB, NULL, NULL); } /** @@ -3675,6 +3705,7 @@ static void ice_deinit_pf(struct ice_pf *pf) mutex_destroy(&pf->sw_mutex); mutex_destroy(&pf->tc_mutex); mutex_destroy(&pf->avail_q_mutex); + mutex_destroy(&pf->vfs.table_lock); if (pf->avail_txqs) { bitmap_free(pf->avail_txqs); @@ -3699,18 +3730,15 @@ static void ice_set_pf_caps(struct ice_pf *pf) struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); - clear_bit(ICE_FLAG_AUX_ENA, pf->flags); - if (func_caps->common_cap.rdma) { + if (func_caps->common_cap.rdma) set_bit(ICE_FLAG_RDMA_ENA, pf->flags); - set_bit(ICE_FLAG_AUX_ENA, pf->flags); - } clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); if (func_caps->common_cap.dcb) set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); if (func_caps->common_cap.sr_iov_1_1) { set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); - pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs, + pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs, ICE_MAX_VF_COUNT); } clear_bit(ICE_FLAG_RSS_ENA, pf->flags); @@ -3777,6 +3805,9 @@ static int ice_init_pf(struct ice_pf *pf) return -ENOMEM; } + mutex_init(&pf->vfs.table_lock); + hash_init(pf->vfs.table); + return 0; } @@ -3831,7 +3862,7 @@ static int ice_ena_msix_range(struct ice_pf *pf) v_left -= needed; /* reserve vectors for RDMA auxiliary driver */ - if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) { + if (ice_is_rdma_ena(pf)) { needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; if (v_left < needed) goto no_hw_vecs_left_err; @@ -3872,7 +3903,7 @@ static int ice_ena_msix_range(struct ice_pf *pf) int v_remain = v_actual - v_other; int v_rdma = 0, v_min_rdma = 0; - if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) { + if (ice_is_rdma_ena(pf)) { /* Need at least 1 interrupt in addition to * AEQ MSIX */ @@ -3906,7 +3937,7 @@ static int ice_ena_msix_range(struct ice_pf *pf) dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n", pf->num_lan_msix); - if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) + if (ice_is_rdma_ena(pf)) dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n", pf->num_rdma_msix); } @@ -4703,6 +4734,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) ice_ptp_init(pf); + if (ice_is_feature_supported(pf, ICE_F_GNSS)) + ice_gnss_init(pf); + /* Note: Flow director init failure is non-fatal to load */ if (ice_init_fdir(pf)) dev_err(dev, "could not initialize flow director\n"); @@ -4732,7 +4766,7 @@ probe_done: /* ready to go, so clear down state bit */ clear_bit(ICE_DOWN, pf->state); - if (ice_is_aux_ena(pf)) { + if (ice_is_rdma_ena(pf)) { pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL); if (pf->aux_idx < 0) { dev_err(dev, "Failed to allocate device ID for AUX driver\n"); @@ -4878,6 +4912,8 @@ static void ice_remove(struct pci_dev *pdev) ice_deinit_lag(pf); if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) ice_ptp_release(pf); + if (ice_is_feature_supported(pf, ICE_F_GNSS)) + ice_gnss_exit(pf); if (!ice_is_safe_mode(pf)) ice_remove_arfs(pf); ice_setup_mc_magic_wake(pf); @@ -6098,9 +6134,9 @@ int ice_up(struct ice_vsi *vsi) * This function fetches stats from the ring considering the atomic operations * that needs to be performed to read u64 values in 32 bit machine. */ -static void -ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats stats, - u64 *pkts, u64 *bytes) +void +ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, + struct ice_q_stats stats, u64 *pkts, u64 *bytes) { unsigned int start; @@ -6921,6 +6957,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) ice_ptp_reset(pf); + if (ice_is_feature_supported(pf, ICE_F_GNSS)) + ice_gnss_init(pf); + /* rebuild PF VSI */ err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); if (err) { @@ -7019,7 +7058,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; - struct iidc_event *event; u8 count = 0; int err = 0; @@ -7054,14 +7092,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) return -EBUSY; } - event = kzalloc(sizeof(*event), GFP_KERNEL); - if (!event) - return -ENOMEM; - - set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type); - ice_send_event_to_aux(pf, event); - clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type); - netdev->mtu = (unsigned int)new_mtu; /* if VSI is up, bring it down and then back up */ @@ -7069,21 +7099,18 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) err = ice_down(vsi); if (err) { netdev_err(netdev, "change MTU if_down err %d\n", err); - goto event_after; + return err; } err = ice_up(vsi); if (err) { netdev_err(netdev, "change MTU if_up err %d\n", err); - goto event_after; + return err; } } netdev_dbg(netdev, "changed MTU to %d\n", new_mtu); -event_after: - set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); - ice_send_event_to_aux(pf, event); - kfree(event); + set_bit(ICE_FLAG_MTU_CHANGED, pf->flags); return err; } diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h index dc1b0e9e6df5..385deaa021ac 100644 --- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h +++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h @@ -29,6 +29,7 @@ enum ice_protocol_type { ICE_MAC_OFOS = 0, ICE_MAC_IL, ICE_ETYPE_OL, + ICE_ETYPE_IL, ICE_VLAN_OFOS, ICE_IPV4_OFOS, ICE_IPV4_IL, @@ -47,6 +48,7 @@ enum ice_protocol_type { enum ice_sw_tunnel_type { ICE_NON_TUN = 0, + ICE_SW_TUN_AND_NON_TUN, ICE_SW_TUN_VXLAN, ICE_SW_TUN_GENEVE, ICE_SW_TUN_NVGRE, @@ -91,6 +93,7 @@ enum ice_prot_id { #define ICE_MAC_OFOS_HW 1 #define ICE_MAC_IL_HW 4 #define ICE_ETYPE_OL_HW 9 +#define ICE_ETYPE_IL_HW 10 #define ICE_VLAN_OF_HW 16 #define ICE_VLAN_OL_HW 17 #define ICE_IPV4_OFOS_HW 32 diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index ae291d442539..000c39d163a2 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -1533,9 +1533,12 @@ exit: static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta) { struct timespec64 now, then; + int ret; then = ns_to_timespec64(delta); - ice_ptp_gettimex64(info, &now, NULL); + ret = ice_ptp_gettimex64(info, &now, NULL); + if (ret) + return ret; now = timespec64_add(now, then); return ice_ptp_settime64(info, (const struct timespec64 *)&now); diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index ec8450f034e6..6dff97d53d81 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -3251,6 +3251,37 @@ int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data) } /** + * ice_read_pca9575_reg_e810t + * @hw: pointer to the hw struct + * @offset: GPIO controller register offset + * @data: pointer to data to be read from the GPIO controller + * + * Read the register from the GPIO controller + */ +int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data) +{ + struct ice_aqc_link_topo_addr link_topo; + __le16 addr; + u16 handle; + int err; + + memset(&link_topo, 0, sizeof(link_topo)); + + err = ice_get_pca9575_handle(hw, &handle); + if (err) + return err; + + link_topo.handle = cpu_to_le16(handle); + link_topo.topo_params.node_type_ctx = + FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, + ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED); + + addr = cpu_to_le16((u16)offset); + + return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL); +} + +/** * ice_is_pca9575_present * @hw: pointer to the hw struct * diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h index 519e75462e67..1246e4ee4b5d 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h @@ -191,6 +191,7 @@ int ice_phy_exit_bypass_e822(struct ice_hw *hw, u8 port); int ice_ptp_init_phy_e810(struct ice_hw *hw); int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data); int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data); +int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data); bool ice_is_pca9575_present(struct ice_hw *hw); #define PFTSYN_SEM_BYTES 4 @@ -443,4 +444,10 @@ bool ice_is_pca9575_present(struct ice_hw *hw); #define ICE_SMA_MAX_BIT_E810T 7 #define ICE_PCA9575_P1_OFFSET 8 +/* E810T PCA9575 IO controller registers */ +#define ICE_PCA9575_P0_IN 0x0 + +/* E810T PCA9575 IO controller pin control */ +#define ICE_E810T_P0_GNSS_PRSNT_N BIT(4) + #endif /* _ICE_PTP_HW_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index dcc310e29300..f8db3ca521da 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -142,6 +142,59 @@ ice_repr_get_devlink_port(struct net_device *netdev) return &repr->vf->devlink_port; } +/** + * ice_repr_sp_stats64 - get slow path stats for port representor + * @dev: network interface device structure + * @stats: netlink stats structure + * + * RX/TX stats are being swapped here to be consistent with VF stats. In slow + * path, port representor receives data when the corresponding VF is sending it + * (and vice versa), TX and RX bytes/packets are effectively swapped on port + * representor. + */ +static int +ice_repr_sp_stats64(const struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct ice_netdev_priv *np = netdev_priv(dev); + int vf_id = np->repr->vf->vf_id; + struct ice_tx_ring *tx_ring; + struct ice_rx_ring *rx_ring; + u64 pkts, bytes; + + tx_ring = np->vsi->tx_rings[vf_id]; + ice_fetch_u64_stats_per_ring(&tx_ring->syncp, tx_ring->stats, + &pkts, &bytes); + stats->rx_packets = pkts; + stats->rx_bytes = bytes; + + rx_ring = np->vsi->rx_rings[vf_id]; + ice_fetch_u64_stats_per_ring(&rx_ring->syncp, rx_ring->stats, + &pkts, &bytes); + stats->tx_packets = pkts; + stats->tx_bytes = bytes; + stats->tx_dropped = rx_ring->rx_stats.alloc_page_failed + + rx_ring->rx_stats.alloc_buf_failed; + + return 0; +} + +static bool +ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id) +{ + return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT; +} + +static int +ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev, + void *sp) +{ + if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT) + return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp); + + return -EINVAL; +} + static int ice_repr_setup_tc_cls_flower(struct ice_repr *repr, struct flow_cls_offload *flower) @@ -199,6 +252,8 @@ static const struct net_device_ops ice_repr_netdev_ops = { .ndo_start_xmit = ice_eswitch_port_start_xmit, .ndo_get_devlink_port = ice_repr_get_devlink_port, .ndo_setup_tc = ice_repr_setup_tc, + .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats, + .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats, }; /** @@ -284,6 +339,8 @@ static int ice_repr_add(struct ice_vf *vf) devlink_port_type_eth_set(&vf->devlink_port, repr->netdev); + ice_vc_change_ops_to_repr(&vf->vc_ops); + return 0; err_netdev: @@ -311,6 +368,9 @@ err_alloc_rule: */ static void ice_repr_rem(struct ice_vf *vf) { + if (!vf->repr) + return; + ice_devlink_destroy_vf_port(vf); kfree(vf->repr->q_vector); vf->repr->q_vector = NULL; @@ -323,6 +383,23 @@ static void ice_repr_rem(struct ice_vf *vf) #endif kfree(vf->repr); vf->repr = NULL; + + ice_vc_set_dflt_vf_ops(&vf->vc_ops); +} + +/** + * ice_repr_rem_from_all_vfs - remove port representor for all VFs + * @pf: pointer to PF structure + */ +void ice_repr_rem_from_all_vfs(struct ice_pf *pf) +{ + struct ice_vf *vf; + unsigned int bkt; + + lockdep_assert_held(&pf->vfs.table_lock); + + ice_for_each_vf(pf, bkt, vf) + ice_repr_rem(vf); } /** @@ -331,49 +408,27 @@ static void ice_repr_rem(struct ice_vf *vf) */ int ice_repr_add_for_all_vfs(struct ice_pf *pf) { + struct ice_vf *vf; + unsigned int bkt; int err; - int i; - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; + lockdep_assert_held(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, vf) { err = ice_repr_add(vf); if (err) goto err; - - ice_vc_change_ops_to_repr(&vf->vc_ops); } return 0; err: - for (i = i - 1; i >= 0; i--) { - struct ice_vf *vf = &pf->vf[i]; - - ice_repr_rem(vf); - ice_vc_set_dflt_vf_ops(&vf->vc_ops); - } + ice_repr_rem_from_all_vfs(pf); return err; } /** - * ice_repr_rem_from_all_vfs - remove port representor for all VFs - * @pf: pointer to PF structure - */ -void ice_repr_rem_from_all_vfs(struct ice_pf *pf) -{ - int i; - - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; - - ice_repr_rem(vf); - ice_vc_set_dflt_vf_ops(&vf->vc_ops); - } -} - -/** * ice_repr_start_tx_queues - start Tx queues of port representor * @repr: pointer to repr structure */ diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 4143728a1919..d98aa35c0337 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -41,6 +41,7 @@ static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = { { ICE_IPV4_OFOS, 14 }, { ICE_NVGRE, 34 }, { ICE_MAC_IL, 42 }, + { ICE_ETYPE_IL, 54 }, { ICE_IPV4_IL, 56 }, { ICE_TCP_IL, 76 }, { ICE_PROTOCOL_LAST, 0 }, @@ -65,7 +66,8 @@ static const u8 dummy_gre_tcp_packet[] = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x08, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_IL 54 */ 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */ 0x00, 0x00, 0x00, 0x00, @@ -86,6 +88,7 @@ static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = { { ICE_IPV4_OFOS, 14 }, { ICE_NVGRE, 34 }, { ICE_MAC_IL, 42 }, + { ICE_ETYPE_IL, 54 }, { ICE_IPV4_IL, 56 }, { ICE_UDP_ILOS, 76 }, { ICE_PROTOCOL_LAST, 0 }, @@ -110,7 +113,8 @@ static const u8 dummy_gre_udp_packet[] = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x08, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_IL 54 */ 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */ 0x00, 0x00, 0x00, 0x00, @@ -131,6 +135,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = { { ICE_GENEVE, 42 }, { ICE_VXLAN_GPE, 42 }, { ICE_MAC_IL, 50 }, + { ICE_ETYPE_IL, 62 }, { ICE_IPV4_IL, 64 }, { ICE_TCP_IL, 84 }, { ICE_PROTOCOL_LAST, 0 }, @@ -158,7 +163,8 @@ static const u8 dummy_udp_tun_tcp_packet[] = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x08, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_IL 62 */ 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */ 0x00, 0x01, 0x00, 0x00, @@ -182,6 +188,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = { { ICE_GENEVE, 42 }, { ICE_VXLAN_GPE, 42 }, { ICE_MAC_IL, 50 }, + { ICE_ETYPE_IL, 62 }, { ICE_IPV4_IL, 64 }, { ICE_UDP_ILOS, 84 }, { ICE_PROTOCOL_LAST, 0 }, @@ -209,7 +216,8 @@ static const u8 dummy_udp_tun_udp_packet[] = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x08, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_IL 62 */ 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */ 0x00, 0x01, 0x00, 0x00, @@ -221,6 +229,224 @@ static const u8 dummy_udp_tun_udp_packet[] = { 0x00, 0x08, 0x00, 0x00, }; +static const struct ice_dummy_pkt_offsets +dummy_gre_ipv6_tcp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_NVGRE, 34 }, + { ICE_MAC_IL, 42 }, + { ICE_ETYPE_IL, 54 }, + { ICE_IPV6_IL, 56 }, + { ICE_TCP_IL, 96 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_gre_ipv6_tcp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x66, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x2F, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x86, 0xdd, /* ICE_ETYPE_IL 54 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */ + 0x00, 0x08, 0x06, 0x40, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 96 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x02, 0x20, 0x00, + 0x00, 0x00, 0x00, 0x00 +}; + +static const struct ice_dummy_pkt_offsets +dummy_gre_ipv6_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_NVGRE, 34 }, + { ICE_MAC_IL, 42 }, + { ICE_ETYPE_IL, 54 }, + { ICE_IPV6_IL, 56 }, + { ICE_UDP_ILOS, 96 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_gre_ipv6_udp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x2F, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x86, 0xdd, /* ICE_ETYPE_IL 54 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */ + 0x00, 0x08, 0x11, 0x40, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 96 */ + 0x00, 0x08, 0x00, 0x00, +}; + +static const struct ice_dummy_pkt_offsets +dummy_udp_tun_ipv6_tcp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_OF, 34 }, + { ICE_VXLAN, 42 }, + { ICE_GENEVE, 42 }, + { ICE_VXLAN_GPE, 42 }, + { ICE_MAC_IL, 50 }, + { ICE_ETYPE_IL, 62 }, + { ICE_IPV6_IL, 64 }, + { ICE_TCP_IL, 104 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_udp_tun_ipv6_tcp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x6e, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x01, 0x00, 0x00, + 0x40, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */ + 0x00, 0x5a, 0x00, 0x00, + + 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x86, 0xdd, /* ICE_ETYPE_IL 62 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */ + 0x00, 0x08, 0x06, 0x40, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 104 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x02, 0x20, 0x00, + 0x00, 0x00, 0x00, 0x00 +}; + +static const struct ice_dummy_pkt_offsets +dummy_udp_tun_ipv6_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_OF, 34 }, + { ICE_VXLAN, 42 }, + { ICE_GENEVE, 42 }, + { ICE_VXLAN_GPE, 42 }, + { ICE_MAC_IL, 50 }, + { ICE_ETYPE_IL, 62 }, + { ICE_IPV6_IL, 64 }, + { ICE_UDP_ILOS, 104 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_udp_tun_ipv6_udp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x62, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */ + 0x00, 0x4e, 0x00, 0x00, + + 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x86, 0xdd, /* ICE_ETYPE_IL 62 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */ + 0x00, 0x08, 0x11, 0x40, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 104 */ + 0x00, 0x08, 0x00, 0x00, +}; + /* offset info for MAC + IPv4 + UDP dummy packet */ static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = { { ICE_MAC_OFOS, 0 }, @@ -3818,6 +4044,7 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = { { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } }, { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } }, { ICE_ETYPE_OL, { 0 } }, + { ICE_ETYPE_IL, { 0 } }, { ICE_VLAN_OFOS, { 2, 0 } }, { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, @@ -3837,6 +4064,7 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { { ICE_MAC_OFOS, ICE_MAC_OFOS_HW }, { ICE_MAC_IL, ICE_MAC_IL_HW }, { ICE_ETYPE_OL, ICE_ETYPE_OL_HW }, + { ICE_ETYPE_IL, ICE_ETYPE_IL_HW }, { ICE_VLAN_OFOS, ICE_VLAN_OL_HW }, { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW }, { ICE_IPV4_IL, ICE_IPV4_IL_HW }, @@ -4617,6 +4845,7 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, case ICE_SW_TUN_NVGRE: prof_type = ICE_PROF_TUN_GRE; break; + case ICE_SW_TUN_AND_NON_TUN: default: prof_type = ICE_PROF_ALL; break; @@ -4817,6 +5046,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, const struct ice_dummy_pkt_offsets **offsets) { bool tcp = false, udp = false, ipv6 = false, vlan = false; + bool ipv6_il = false; u16 i; for (i = 0; i < lkups_cnt; i++) { @@ -4832,18 +5062,35 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, lkups[i].h_u.ethertype.ethtype_id == cpu_to_be16(ICE_IPV6_ETHER_ID) && lkups[i].m_u.ethertype.ethtype_id == - cpu_to_be16(0xFFFF)) + cpu_to_be16(0xFFFF)) ipv6 = true; + else if (lkups[i].type == ICE_ETYPE_IL && + lkups[i].h_u.ethertype.ethtype_id == + cpu_to_be16(ICE_IPV6_ETHER_ID) && + lkups[i].m_u.ethertype.ethtype_id == + cpu_to_be16(0xFFFF)) + ipv6_il = true; } if (tun_type == ICE_SW_TUN_NVGRE) { + if (tcp && ipv6_il) { + *pkt = dummy_gre_ipv6_tcp_packet; + *pkt_len = sizeof(dummy_gre_ipv6_tcp_packet); + *offsets = dummy_gre_ipv6_tcp_packet_offsets; + return; + } if (tcp) { *pkt = dummy_gre_tcp_packet; *pkt_len = sizeof(dummy_gre_tcp_packet); *offsets = dummy_gre_tcp_packet_offsets; return; } - + if (ipv6_il) { + *pkt = dummy_gre_ipv6_udp_packet; + *pkt_len = sizeof(dummy_gre_ipv6_udp_packet); + *offsets = dummy_gre_ipv6_udp_packet_offsets; + return; + } *pkt = dummy_gre_udp_packet; *pkt_len = sizeof(dummy_gre_udp_packet); *offsets = dummy_gre_udp_packet_offsets; @@ -4852,13 +5099,24 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE) { + if (tcp && ipv6_il) { + *pkt = dummy_udp_tun_ipv6_tcp_packet; + *pkt_len = sizeof(dummy_udp_tun_ipv6_tcp_packet); + *offsets = dummy_udp_tun_ipv6_tcp_packet_offsets; + return; + } if (tcp) { *pkt = dummy_udp_tun_tcp_packet; *pkt_len = sizeof(dummy_udp_tun_tcp_packet); *offsets = dummy_udp_tun_tcp_packet_offsets; return; } - + if (ipv6_il) { + *pkt = dummy_udp_tun_ipv6_udp_packet; + *pkt_len = sizeof(dummy_udp_tun_ipv6_udp_packet); + *offsets = dummy_udp_tun_ipv6_udp_packet_offsets; + return; + } *pkt = dummy_udp_tun_udp_packet; *pkt_len = sizeof(dummy_udp_tun_udp_packet); *offsets = dummy_udp_tun_udp_packet_offsets; @@ -4964,6 +5222,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, len = sizeof(struct ice_ether_hdr); break; case ICE_ETYPE_OL: + case ICE_ETYPE_IL: len = sizeof(struct ice_ethtype_hdr); break; case ICE_VLAN_OFOS: @@ -5385,7 +5644,8 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, if (status) goto err_ice_add_adv_rule; - if (rinfo->tun_type != ICE_NON_TUN) { + if (rinfo->tun_type != ICE_NON_TUN && + rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) { status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, s_rule->pdata.lkup_tx_rx.hdr, pkt_offsets); diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index e8aab664270a..fedc310c376c 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -24,6 +24,9 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) lkups_cnt++; + if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) + lkups_cnt++; + if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 | ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | @@ -33,9 +36,7 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) lkups_cnt++; - /* currently inner etype filter isn't supported */ - if ((flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) && - fltr->tunnel_type == TNL_LAST) + if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) lkups_cnt++; /* are MAC fields specified? */ @@ -64,6 +65,11 @@ static enum ice_protocol_type ice_proto_type_from_mac(bool inner) return inner ? ICE_MAC_IL : ICE_MAC_OFOS; } +static enum ice_protocol_type ice_proto_type_from_etype(bool inner) +{ + return inner ? ICE_ETYPE_IL : ICE_ETYPE_OL; +} + static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner) { return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS; @@ -145,6 +151,15 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, } } + if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) { + list[i].type = ice_proto_type_from_mac(false); + ether_addr_copy(list[i].h_u.eth_hdr.dst_addr, + hdr->l2_key.dst_mac); + ether_addr_copy(list[i].m_u.eth_hdr.dst_addr, + hdr->l2_mask.dst_mac); + i++; + } + if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) { list[i].type = ice_proto_type_from_ipv4(false); @@ -224,8 +239,10 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, headers = &tc_fltr->inner_headers; inner = true; - } else if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) { - list[i].type = ICE_ETYPE_OL; + } + + if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) { + list[i].type = ice_proto_type_from_etype(inner); list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto; list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto; i++; @@ -709,7 +726,7 @@ ice_tc_set_port(struct flow_match_ports match, fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT; else fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT; - fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT; + headers->l4_key.dst_port = match.key->dst; headers->l4_mask.dst_port = match.mask->dst; } @@ -718,7 +735,7 @@ ice_tc_set_port(struct flow_match_ports match, fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT; else fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT; - fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT; + headers->l4_key.src_port = match.key->src; headers->l4_mask.src_port = match.mask->src; } @@ -1059,12 +1076,24 @@ ice_handle_tclass_action(struct ice_vsi *vsi, * this code won't do anything * 2. For non-tunnel, if user didn't specify MAC address, add implicit * dest MAC to be lower netdev's active unicast MAC address + * 3. For tunnel, as of now TC-filter through flower classifier doesn't + * have provision for user to specify outer DMAC, hence driver to + * implicitly add outer dest MAC to be lower netdev's active unicast + * MAC address. */ - if (!(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC)) { - ether_addr_copy(fltr->outer_headers.l2_key.dst_mac, - main_vsi->netdev->dev_addr); - eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac); + if (fltr->tunnel_type != TNL_LAST && + !(fltr->flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)) + fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DST_MAC; + + if (fltr->tunnel_type == TNL_LAST && + !(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC)) fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC; + + if (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC | + ICE_TC_FLWR_FIELD_ENC_DST_MAC)) { + ether_addr_copy(fltr->outer_headers.l2_key.dst_mac, + vsi->netdev->dev_addr); + memset(fltr->outer_headers.l2_mask.dst_mac, 0xff, ETH_ALEN); } /* validate specified dest MAC address, make sure either it belongs to diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index ff93ec71aed6..f9bf008471c9 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -223,8 +223,7 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) struct ice_tx_buf *tx_buf; /* get the bql data ready */ - if (!ice_ring_is_xdp(tx_ring)) - netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring)); + netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring)); tx_buf = &tx_ring->tx_buf[i]; tx_desc = ICE_TX_DESC(tx_ring, i); @@ -313,10 +312,6 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) tx_ring->next_to_clean = i; ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); - - if (ice_ring_is_xdp(tx_ring)) - return !!budget; - netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes); #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) @@ -1165,7 +1160,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) struct ice_vsi *ctrl_vsi = rx_ring->vsi; if (rx_desc->wb.rxdid == FDIR_DESC_RXDID && - ctrl_vsi->vf_id != ICE_INVAL_VFID) + ctrl_vsi->vf) ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc); ice_put_rx_buf(rx_ring, NULL, 0); cleaned_count++; diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c index 39f2d36cabba..b16f946185f2 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c @@ -34,9 +34,10 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) { struct ice_vsi_vlan_ops *vlan_ops; struct ice_pf *pf = vsi->back; - struct ice_vf *vf; + struct ice_vf *vf = vsi->vf; - vf = &pf->vf[vsi->vf_id]; + if (WARN_ON(!vf)) + return; if (ice_is_dvm_ena(&pf->hw)) { vlan_ops = &vsi->outer_vlan_ops; @@ -126,9 +127,14 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) */ void ice_vf_vsi_cfg_dvm_legacy_vlan_mode(struct ice_vsi *vsi) { - struct ice_vf *vf = &vsi->back->vf[vsi->vf_id]; - struct device *dev = ice_pf_to_dev(vf->pf); struct ice_vsi_vlan_ops *vlan_ops; + struct ice_vf *vf = vsi->vf; + struct device *dev; + + if (WARN_ON(!vf)) + return; + + dev = ice_pf_to_dev(vf->pf); if (!ice_is_dvm_ena(&vsi->back->hw) || ice_vf_is_port_vlan_ena(vf)) return; @@ -192,7 +198,10 @@ void ice_vf_vsi_cfg_dvm_legacy_vlan_mode(struct ice_vsi *vsi) */ void ice_vf_vsi_cfg_svm_legacy_vlan_mode(struct ice_vsi *vsi) { - struct ice_vf *vf = &vsi->back->vf[vsi->vf_id]; + struct ice_vf *vf = vsi->vf; + + if (WARN_ON(!vf)) + return; if (ice_is_dvm_ena(&vsi->back->hw) || ice_vf_is_port_vlan_ena(vf)) return; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index d64df81d4893..07989f1d08ef 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -1288,15 +1288,16 @@ ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, union ice_32b_rx_flex_desc *rx_desc) { struct ice_pf *pf = ctrl_vsi->back; + struct ice_vf *vf = ctrl_vsi->vf; struct ice_vf_fdir_ctx *ctx_done; struct ice_vf_fdir_ctx *ctx_irq; struct ice_vf_fdir *fdir; unsigned long flags; struct device *dev; - struct ice_vf *vf; int ret; - vf = &pf->vf[ctrl_vsi->vf_id]; + if (WARN_ON(!vf)) + return; fdir = &vf->fdir; ctx_done = &fdir->ctx_done; @@ -1571,15 +1572,16 @@ err_exit: */ void ice_flush_fdir_ctx(struct ice_pf *pf) { - int i; + struct ice_vf *vf; + unsigned int bkt; if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state)) return; - ice_for_each_vf(pf, i) { + mutex_lock(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, vf) { struct device *dev = ice_pf_to_dev(pf); enum virtchnl_fdir_prgm_status status; - struct ice_vf *vf = &pf->vf[i]; struct ice_vf_fdir_ctx *ctx; unsigned long flags; int ret; @@ -1633,6 +1635,7 @@ err_exit: ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); } + mutex_unlock(&pf->vfs.table_lock); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 02a8c15d2bf3..45fe36db076a 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -175,18 +175,107 @@ struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf) } /** - * ice_validate_vf_id - helper to check if VF ID is valid - * @pf: pointer to the PF structure - * @vf_id: the ID of the VF to check + * ice_get_vf_by_id - Get pointer to VF by ID + * @pf: the PF private structure + * @vf_id: the VF ID to locate + * + * Locate and return a pointer to the VF structure associated with a given ID. + * Returns NULL if the ID does not have a valid VF structure associated with + * it. + * + * This function takes a reference to the VF, which must be released by + * calling ice_put_vf() once the caller is finished accessing the VF structure + * returned. */ -static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id) +struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id) { - /* vf_id range is only valid for 0-255, and should always be unsigned */ - if (vf_id >= pf->num_alloc_vfs) { - dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id); - return -EINVAL; + struct ice_vf *vf; + + rcu_read_lock(); + hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) { + if (vf->vf_id == vf_id) { + struct ice_vf *found; + + if (kref_get_unless_zero(&vf->refcnt)) + found = vf; + else + found = NULL; + + rcu_read_unlock(); + return found; + } } - return 0; + rcu_read_unlock(); + + return NULL; +} + +/** + * ice_release_vf - Release VF associated with a refcount + * @ref: the kref decremented to zero + * + * Callback function for kref_put to release a VF once its reference count has + * hit zero. + */ +static void ice_release_vf(struct kref *ref) +{ + struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt); + + mutex_destroy(&vf->cfg_lock); + + kfree_rcu(vf, rcu); +} + +/** + * ice_put_vf - Release a reference to a VF + * @vf: the VF structure to decrease reference count on + * + * This must be called after ice_get_vf_by_id() once the reference to the VF + * structure is no longer used. Otherwise, the VF structure will never be + * freed. + */ +void ice_put_vf(struct ice_vf *vf) +{ + kref_put(&vf->refcnt, ice_release_vf); +} + +/** + * ice_has_vfs - Return true if the PF has any associated VFs + * @pf: the PF private structure + * + * Return whether or not the PF has any allocated VFs. + * + * Note that this function only guarantees that there are no VFs at the point + * of calling it. It does not guarantee that no more VFs will be added. + */ +bool ice_has_vfs(struct ice_pf *pf) +{ + /* A simple check that the hash table is not empty does not require + * the mutex or rcu_read_lock. + */ + return !hash_empty(pf->vfs.table); +} + +/** + * ice_get_num_vfs - Get number of allocated VFs + * @pf: the PF private structure + * + * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed + * to be contiguous. Do not assume that a VF ID is guaranteed to be less than + * the output of this function. + */ +u16 ice_get_num_vfs(struct ice_pf *pf) +{ + struct ice_vf *vf; + unsigned int bkt; + u16 num_vfs = 0; + + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) + num_vfs++; + rcu_read_unlock(); + + return num_vfs; } /** @@ -205,6 +294,32 @@ static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf) } /** + * ice_free_vf_entries - Free all VF entries from the hash table + * @pf: pointer to the PF structure + * + * Iterate over the VF hash table, removing and releasing all VF entries. + * Called during VF teardown or as cleanup during failed VF initialization. + */ +static void ice_free_vf_entries(struct ice_pf *pf) +{ + struct ice_vfs *vfs = &pf->vfs; + struct hlist_node *tmp; + struct ice_vf *vf; + unsigned int bkt; + + /* Remove all VFs from the hash table and release their main + * reference. Once all references to the VF are dropped, ice_put_vf() + * will call ice_release_vf which will remove the VF memory. + */ + lockdep_assert_held(&vfs->table_lock); + + hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) { + hash_del_rcu(&vf->entry); + ice_put_vf(vf); + } +} + +/** * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF * @pf: pointer to the PF structure * @v_opcode: operation code @@ -217,11 +332,11 @@ ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode, enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) { struct ice_hw *hw = &pf->hw; - unsigned int i; - - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; + struct ice_vf *vf; + unsigned int bkt; + mutex_lock(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, vf) { /* Not all vfs are enabled so skip the ones that are not */ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) @@ -233,6 +348,7 @@ ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode, ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg, msglen, NULL); } + mutex_unlock(&pf->vfs.table_lock); } /** @@ -381,7 +497,7 @@ static void ice_free_vf_res(struct ice_vf *vf) vf->num_mac = 0; } - last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1; + last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1; /* clear VF MDD event information */ memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); @@ -417,7 +533,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf) wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0); first = vf->first_vector_idx; - last = first + pf->num_msix_per_vf - 1; + last = first + pf->vfs.num_msix_per - 1; for (v = first; v <= last; v++) { u32 reg; @@ -499,16 +615,14 @@ static void ice_dis_vf_qs(struct ice_vf *vf) void ice_free_vfs(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); + struct ice_vfs *vfs = &pf->vfs; struct ice_hw *hw = &pf->hw; - unsigned int tmp, i; - - set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state); + struct ice_vf *vf; + unsigned int bkt; - if (!pf->vf) + if (!ice_has_vfs(pf)) return; - ice_eswitch_release(pf); - while (test_and_set_bit(ICE_VF_DIS, pf->state)) usleep_range(1000, 2000); @@ -521,58 +635,48 @@ void ice_free_vfs(struct ice_pf *pf) else dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); - /* Avoid wait time by stopping all VFs at the same time */ - ice_for_each_vf(pf, i) - ice_dis_vf_qs(&pf->vf[i]); - - tmp = pf->num_alloc_vfs; - pf->num_qps_per_vf = 0; - pf->num_alloc_vfs = 0; - for (i = 0; i < tmp; i++) { - if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) { - /* disable VF qp mappings and set VF disable state */ - ice_dis_vf_mappings(&pf->vf[i]); - set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states); - ice_free_vf_res(&pf->vf[i]); - } + mutex_lock(&vfs->table_lock); - mutex_destroy(&pf->vf[i].cfg_lock); - } + ice_eswitch_release(pf); - if (ice_sriov_free_msix_res(pf)) - dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n"); + ice_for_each_vf(pf, bkt, vf) { + mutex_lock(&vf->cfg_lock); - devm_kfree(dev, pf->vf); - pf->vf = NULL; + ice_dis_vf_qs(vf); - /* This check is for when the driver is unloaded while VFs are - * assigned. Setting the number of VFs to 0 through sysfs is caught - * before this function ever gets called. - */ - if (!pci_vfs_assigned(pf->pdev)) { - unsigned int vf_id; + if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + /* disable VF qp mappings and set VF disable state */ + ice_dis_vf_mappings(vf); + set_bit(ICE_VF_STATE_DIS, vf->vf_states); + ice_free_vf_res(vf); + } - /* Acknowledge VFLR for all VFs. Without this, VFs will fail to - * work correctly when SR-IOV gets re-enabled. - */ - for (vf_id = 0; vf_id < tmp; vf_id++) { + if (!pci_vfs_assigned(pf->pdev)) { u32 reg_idx, bit_idx; - reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; - bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; + reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; + bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); } - } - /* clear malicious info if the VFs are getting released */ - for (i = 0; i < tmp; i++) - if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, - ICE_MAX_VF_COUNT, i)) + /* clear malicious info since the VF is getting released */ + if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs, + ICE_MAX_VF_COUNT, vf->vf_id)) dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", - i); + vf->vf_id); + + mutex_unlock(&vf->cfg_lock); + } + + if (ice_sriov_free_msix_res(pf)) + dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n"); + + vfs->num_qps_per = 0; + ice_free_vf_entries(pf); + + mutex_unlock(&vfs->table_lock); clear_bit(ICE_VF_DIS, pf->state); - clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state); clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); } @@ -666,7 +770,7 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; - vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id, NULL); + vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf, NULL); if (!vsi) { dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n"); @@ -693,7 +797,7 @@ struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf) struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; - vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id, NULL); + vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf, NULL); if (!vsi) { dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n"); ice_vf_ctrl_invalidate_vsi(vf); @@ -716,7 +820,7 @@ struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf) */ static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) { - return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf; + return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per; } /** @@ -973,12 +1077,12 @@ static void ice_ena_vf_msix_mappings(struct ice_vf *vf) hw = &pf->hw; pf_based_first_msix = vf->first_vector_idx; - pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1; + pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1; device_based_first_msix = pf_based_first_msix + pf->hw.func_caps.common_cap.msix_vector_first_id; device_based_last_msix = - (device_based_first_msix + pf->num_msix_per_vf) - 1; + (device_based_first_msix + pf->vfs.num_msix_per) - 1; device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id; reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) & @@ -1069,45 +1173,6 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) } /** - * ice_determine_res - * @pf: pointer to the PF structure - * @avail_res: available resources in the PF structure - * @max_res: maximum resources that can be given per VF - * @min_res: minimum resources that can be given per VF - * - * Returns non-zero value if resources (queues/vectors) are available or - * returns zero if PF cannot accommodate for all num_alloc_vfs. - */ -static int -ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res) -{ - bool checked_min_res = false; - int res; - - /* start by checking if PF can assign max number of resources for - * all num_alloc_vfs. - * if yes, return number per VF - * If no, divide by 2 and roundup, check again - * repeat the loop till we reach a point where even minimum resources - * are not available, in that case return 0 - */ - res = max_res; - while ((res >= min_res) && !checked_min_res) { - int num_all_res; - - num_all_res = pf->num_alloc_vfs * res; - if (num_all_res <= avail_res) - return res; - - if (res == min_res) - checked_min_res = true; - - res = DIV_ROUND_UP(res, 2); - } - return 0; -} - -/** * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space * @vf: VF to calculate the register index for * @q_vector: a q_vector associated to the VF @@ -1122,7 +1187,7 @@ int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) pf = vf->pf; /* always add one to account for the OICR being the first MSIX */ - return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id + + return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id + q_vector->v_idx + 1; } @@ -1186,6 +1251,7 @@ static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) /** * ice_set_per_vf_res - check if vectors and queues are available * @pf: pointer to the PF structure + * @num_vfs: the number of SR-IOV VFs being configured * * First, determine HW interrupts from common pool. If we allocate fewer VFs, we * get more vectors and can enable more queues per VF. Note that this does not @@ -1204,20 +1270,22 @@ static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used * by each VF during VF initialization and reset. */ -static int ice_set_per_vf_res(struct ice_pf *pf) +static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) { int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker); + u16 num_msix_per_vf, num_txq, num_rxq, avail_qs; int msix_avail_per_vf, msix_avail_for_sriov; struct device *dev = ice_pf_to_dev(pf); - u16 num_msix_per_vf, num_txq, num_rxq; - if (!pf->num_alloc_vfs || max_valid_res_idx < 0) + lockdep_assert_held(&pf->vfs.table_lock); + + if (!num_vfs || max_valid_res_idx < 0) return -EINVAL; /* determine MSI-X resources per VF */ msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors - pf->irq_tracker->num_entries; - msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs; + msix_avail_per_vf = msix_avail_for_sriov / num_vfs; if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) { num_msix_per_vf = ICE_NUM_VF_MSIX_MED; } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) { @@ -1229,40 +1297,43 @@ static int ice_set_per_vf_res(struct ice_pf *pf) } else { dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n", msix_avail_for_sriov, ICE_MIN_INTR_PER_VF, - pf->num_alloc_vfs); + num_vfs); return -EIO; } - /* determine queue resources per VF */ - num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf), - min_t(u16, - num_msix_per_vf - ICE_NONQ_VECS_VF, - ICE_MAX_RSS_QS_PER_VF), - ICE_MIN_QS_PER_VF); + num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF, + ICE_MAX_RSS_QS_PER_VF); + avail_qs = ice_get_avail_txq_count(pf) / num_vfs; + if (!avail_qs) + num_txq = 0; + else if (num_txq > avail_qs) + num_txq = rounddown_pow_of_two(avail_qs); - num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf), - min_t(u16, - num_msix_per_vf - ICE_NONQ_VECS_VF, - ICE_MAX_RSS_QS_PER_VF), - ICE_MIN_QS_PER_VF); + num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF, + ICE_MAX_RSS_QS_PER_VF); + avail_qs = ice_get_avail_rxq_count(pf) / num_vfs; + if (!avail_qs) + num_rxq = 0; + else if (num_rxq > avail_qs) + num_rxq = rounddown_pow_of_two(avail_qs); - if (!num_txq || !num_rxq) { + if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) { dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n", - ICE_MIN_QS_PER_VF, pf->num_alloc_vfs); + ICE_MIN_QS_PER_VF, num_vfs); return -EIO; } - if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) { + if (ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs)) { dev_err(dev, "Unable to set MSI-X resources for %d VFs\n", - pf->num_alloc_vfs); + num_vfs); return -EINVAL; } /* only allow equal Tx/Rx queue count (i.e. queue pairs) */ - pf->num_qps_per_vf = min_t(int, num_txq, num_rxq); - pf->num_msix_per_vf = num_msix_per_vf; + pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq); + pf->vfs.num_msix_per = num_msix_per_vf; dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n", - pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf); + num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per); return 0; } @@ -1509,24 +1580,30 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; struct ice_vf *vf; - int v, i; + unsigned int bkt; /* If we don't have any VFs, then there is nothing to reset */ - if (!pf->num_alloc_vfs) + if (!ice_has_vfs(pf)) return false; + mutex_lock(&pf->vfs.table_lock); + /* clear all malicious info if the VFs are getting reset */ - ice_for_each_vf(pf, i) - if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, i)) - dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i); + ice_for_each_vf(pf, bkt, vf) + if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs, + ICE_MAX_VF_COUNT, vf->vf_id)) + dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", + vf->vf_id); /* If VFs have been disabled, there is no need to reset */ - if (test_and_set_bit(ICE_VF_DIS, pf->state)) + if (test_and_set_bit(ICE_VF_DIS, pf->state)) { + mutex_unlock(&pf->vfs.table_lock); return false; + } /* Begin reset on all VFs at once */ - ice_for_each_vf(pf, v) - ice_trigger_vf_reset(&pf->vf[v], is_vflr, true); + ice_for_each_vf(pf, bkt, vf) + ice_trigger_vf_reset(vf, is_vflr, true); /* HW requires some time to make sure it can flush the FIFO for a VF * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in @@ -1534,35 +1611,35 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) * the VFs using a simple iterator that increments once that VF has * finished resetting. */ - for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { - /* Check each VF in sequence */ - while (v < pf->num_alloc_vfs) { - u32 reg; - - vf = &pf->vf[v]; - reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id)); - if (!(reg & VPGEN_VFRSTAT_VFRD_M)) { - /* only delay if the check failed */ - usleep_range(10, 20); + ice_for_each_vf(pf, bkt, vf) { + bool done = false; + unsigned int i; + u32 reg; + + for (i = 0; i < 10; i++) { + reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id)); + if (reg & VPGEN_VFRSTAT_VFRD_M) { + done = true; break; } - /* If the current VF has finished resetting, move on - * to the next VF in sequence. + /* only delay if check failed */ + usleep_range(10, 20); + } + + if (!done) { + /* Display a warning if at least one VF didn't manage + * to reset in time, but continue on with the + * operation. */ - v++; + dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id); + break; } } - /* Display a warning if at least one VF didn't manage to reset in - * time, but continue on with the operation. - */ - if (v < pf->num_alloc_vfs) - dev_warn(dev, "VF reset check timeout\n"); - /* free VF resources to begin resetting the VSI state */ - ice_for_each_vf(pf, v) { - vf = &pf->vf[v]; + ice_for_each_vf(pf, bkt, vf) { + mutex_lock(&vf->cfg_lock); vf->driver_caps = 0; ice_vc_set_default_allowlist(vf); @@ -1578,6 +1655,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ice_vf_pre_vsi_rebuild(vf); ice_vf_rebuild_vsi(vf); ice_vf_post_vsi_rebuild(vf); + + mutex_unlock(&vf->cfg_lock); } if (ice_is_eswitch_mode_switchdev(pf)) @@ -1587,6 +1666,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ice_flush(hw); clear_bit(ICE_VF_DIS, pf->state); + mutex_unlock(&pf->vfs.table_lock); + return true; } @@ -1628,6 +1709,8 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) u32 reg; int i; + lockdep_assert_held(&vf->cfg_lock); + dev = ice_pf_to_dev(pf); if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { @@ -1721,7 +1804,8 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) ice_eswitch_replay_vf_mac_rule(vf); /* if the VF has been reset allow it to come up again */ - if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id)) + if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs, + ICE_MAX_VF_COUNT, vf->vf_id)) dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i); return true; @@ -1733,10 +1817,13 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) */ void ice_vc_notify_link_state(struct ice_pf *pf) { - int i; + struct ice_vf *vf; + unsigned int bkt; - ice_for_each_vf(pf, i) - ice_vc_notify_vf_link_state(&pf->vf[i]); + mutex_lock(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, vf) + ice_vc_notify_vf_link_state(vf); + mutex_unlock(&pf->vfs.table_lock); } /** @@ -1749,7 +1836,7 @@ void ice_vc_notify_reset(struct ice_pf *pf) { struct virtchnl_pf_event pfe; - if (!pf->num_alloc_vfs) + if (!ice_has_vfs(pf)) return; pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; @@ -1765,14 +1852,7 @@ void ice_vc_notify_reset(struct ice_pf *pf) static void ice_vc_notify_vf_reset(struct ice_vf *vf) { struct virtchnl_pf_event pfe; - struct ice_pf *pf; - - if (!vf) - return; - - pf = vf->pf; - if (ice_validate_vf_id(pf, vf->vf_id)) - return; + struct ice_pf *pf = vf->pf; /* Bail out if VF is in disabled state, neither initialized, nor active * state - otherwise proceed with notifications @@ -1858,11 +1938,14 @@ release_vsi: static int ice_start_vfs(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; - int retval, i; + unsigned int bkt, it_cnt; + struct ice_vf *vf; + int retval; - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; + lockdep_assert_held(&pf->vfs.table_lock); + it_cnt = 0; + ice_for_each_vf(pf, bkt, vf) { ice_clear_vf_reset_trigger(vf); retval = ice_init_vf_vsi_res(vf); @@ -1875,40 +1958,63 @@ static int ice_start_vfs(struct ice_pf *pf) set_bit(ICE_VF_STATE_INIT, vf->vf_states); ice_ena_vf_mappings(vf); wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); + it_cnt++; } ice_flush(hw); return 0; teardown: - for (i = i - 1; i >= 0; i--) { - struct ice_vf *vf = &pf->vf[i]; + ice_for_each_vf(pf, bkt, vf) { + if (it_cnt == 0) + break; ice_dis_vf_mappings(vf); ice_vf_vsi_release(vf); + it_cnt--; } return retval; } /** - * ice_set_dflt_settings_vfs - set VF defaults during initialization/creation - * @pf: PF holding reference to all VFs for default configuration + * ice_create_vf_entries - Allocate and insert VF entries + * @pf: pointer to the PF structure + * @num_vfs: the number of VFs to allocate + * + * Allocate new VF entries and insert them into the hash table. Set some + * basic default fields for initializing the new VFs. + * + * After this function exits, the hash table will have num_vfs entries + * inserted. + * + * Returns 0 on success or an integer error code on failure. */ -static void ice_set_dflt_settings_vfs(struct ice_pf *pf) +static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs) { - int i; + struct ice_vfs *vfs = &pf->vfs; + struct ice_vf *vf; + u16 vf_id; + int err; - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; + lockdep_assert_held(&vfs->table_lock); + + for (vf_id = 0; vf_id < num_vfs; vf_id++) { + vf = kzalloc(sizeof(*vf), GFP_KERNEL); + if (!vf) { + err = -ENOMEM; + goto err_free_entries; + } + kref_init(&vf->refcnt); vf->pf = pf; - vf->vf_id = i; + vf->vf_id = vf_id; + vf->vf_sw_id = pf->first_sw; /* assign default capabilities */ set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps); vf->spoofchk = true; - vf->num_vf_qs = pf->num_qps_per_vf; + vf->num_vf_qs = pf->vfs.num_qps_per; ice_vc_set_default_allowlist(vf); /* ctrl_vsi_idx will be set to a valid value only when VF @@ -1920,27 +2026,15 @@ static void ice_set_dflt_settings_vfs(struct ice_pf *pf) ice_vc_set_dflt_vf_ops(&vf->vc_ops); mutex_init(&vf->cfg_lock); - } -} -/** - * ice_alloc_vfs - allocate num_vfs in the PF structure - * @pf: PF to store the allocated VFs in - * @num_vfs: number of VFs to allocate - */ -static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs) -{ - struct ice_vf *vfs; - - vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs), - GFP_KERNEL); - if (!vfs) - return -ENOMEM; - - pf->vf = vfs; - pf->num_alloc_vfs = num_vfs; + hash_add_rcu(vfs->table, &vf->entry, vf_id); + } return 0; + +err_free_entries: + ice_free_vf_entries(pf); + return err; } /** @@ -1961,28 +2055,29 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) ice_flush(hw); ret = pci_enable_sriov(pf->pdev, num_vfs); - if (ret) { - pf->num_alloc_vfs = 0; + if (ret) goto err_unroll_intr; - } - ret = ice_alloc_vfs(pf, num_vfs); - if (ret) - goto err_pci_disable_sriov; + mutex_lock(&pf->vfs.table_lock); - if (ice_set_per_vf_res(pf)) { + if (ice_set_per_vf_res(pf, num_vfs)) { dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n", num_vfs); ret = -ENOSPC; goto err_unroll_sriov; } - ice_set_dflt_settings_vfs(pf); + ret = ice_create_vf_entries(pf, num_vfs); + if (ret) { + dev_err(dev, "Failed to allocate VF entries for %d VFs\n", + num_vfs); + goto err_unroll_sriov; + } if (ice_start_vfs(pf)) { dev_err(dev, "Failed to start VF(s)\n"); ret = -EAGAIN; - goto err_unroll_sriov; + goto err_unroll_vf_entries; } clear_bit(ICE_VF_DIS, pf->state); @@ -1995,13 +2090,14 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state)) ice_irq_dynamic_ena(hw, NULL, NULL); + mutex_unlock(&pf->vfs.table_lock); + return 0; +err_unroll_vf_entries: + ice_free_vf_entries(pf); err_unroll_sriov: - devm_kfree(dev, pf->vf); - pf->vf = NULL; - pf->num_alloc_vfs = 0; -err_pci_disable_sriov: + mutex_unlock(&pf->vfs.table_lock); pci_disable_sriov(pf->pdev); err_unroll_intr: /* rearm interrupts here */ @@ -2028,9 +2124,9 @@ static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) else if (pre_existing_vfs && pre_existing_vfs == num_vfs) return 0; - if (num_vfs > pf->num_vfs_supported) { + if (num_vfs > pf->vfs.num_supported) { dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n", - num_vfs, pf->num_vfs_supported); + num_vfs, pf->vfs.num_supported); return -EOPNOTSUPP; } @@ -2128,25 +2224,30 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) void ice_process_vflr_event(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; - unsigned int vf_id; + struct ice_vf *vf; + unsigned int bkt; u32 reg; if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) || - !pf->num_alloc_vfs) + !ice_has_vfs(pf)) return; - ice_for_each_vf(pf, vf_id) { - struct ice_vf *vf = &pf->vf[vf_id]; + mutex_lock(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, vf) { u32 reg_idx, bit_idx; - reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; - bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; + reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; + bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; /* read GLGEN_VFLRSTAT register to find out the flr VFs */ reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx)); - if (reg & BIT(bit_idx)) + if (reg & BIT(bit_idx)) { /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */ + mutex_lock(&vf->cfg_lock); ice_reset_vf(vf, true); + mutex_unlock(&vf->cfg_lock); + } } + mutex_unlock(&pf->vfs.table_lock); } /** @@ -2166,22 +2267,36 @@ static void ice_vc_reset_vf(struct ice_vf *vf) * * If no VF is found who owns the pfq then return NULL, otherwise return a * pointer to the VF who owns the pfq + * + * If this function returns non-NULL, it acquires a reference count of the VF + * structure. The caller is responsible for calling ice_put_vf() to drop this + * reference. */ static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq) { - unsigned int vf_id; + struct ice_vf *vf; + unsigned int bkt; - ice_for_each_vf(pf, vf_id) { - struct ice_vf *vf = &pf->vf[vf_id]; + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) { struct ice_vsi *vsi; u16 rxq_idx; vsi = ice_get_vf_vsi(vf); ice_for_each_rxq(vsi, rxq_idx) - if (vsi->rxq_map[rxq_idx] == pfq) - return vf; + if (vsi->rxq_map[rxq_idx] == pfq) { + struct ice_vf *found; + + if (kref_get_unless_zero(&vf->refcnt)) + found = vf; + else + found = NULL; + rcu_read_unlock(); + return found; + } } + rcu_read_unlock(); return NULL; } @@ -2222,7 +2337,11 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) if (!vf) return; + mutex_lock(&vf->cfg_lock); ice_vc_reset_vf(vf); + mutex_unlock(&vf->cfg_lock); + + ice_put_vf(vf); } /** @@ -2243,33 +2362,9 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, struct ice_pf *pf; int aq_ret; - if (!vf) - return -EINVAL; - pf = vf->pf; - if (ice_validate_vf_id(pf, vf->vf_id)) - return -EINVAL; - dev = ice_pf_to_dev(pf); - /* single place to detect unsuccessful return values */ - if (v_retval) { - vf->num_inval_msgs++; - dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id, - v_opcode, v_retval); - if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) { - dev_err(dev, "Number of invalid messages exceeded for VF %d\n", - vf->vf_id); - dev_err(dev, "Use PF Control I/F to enable the VF\n"); - set_bit(ICE_VF_STATE_DIS, vf->vf_states); - return -EIO; - } - } else { - vf->num_valid_msgs++; - /* reset the invalid counter, if a valid message is received. */ - vf->num_inval_msgs = 0; - } - aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, msg, msglen, NULL); if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) { @@ -2440,7 +2535,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) vfres->num_vsis = 1; /* Tx and Rx queue are equal for VF */ vfres->num_queue_pairs = vsi->num_txq; - vfres->max_vectors = pf->num_msix_per_vf; + vfres->max_vectors = pf->vfs.num_msix_per; vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE; vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE; vfres->max_mtu = ice_vc_get_max_frame_size(vf); @@ -2514,7 +2609,7 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) vsi = ice_find_vsi_from_id(pf, vsi_id); - return (vsi && (vsi->vf_id == vf->vf_id)); + return (vsi && (vsi->vf == vf)); } /** @@ -3005,30 +3100,34 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) int ret; dev = ice_pf_to_dev(pf); - if (ice_validate_vf_id(pf, vf_id)) + + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) return -EINVAL; - vf = &pf->vf[vf_id]; ret = ice_check_vf_ready_for_cfg(vf); if (ret) - return ret; + goto out_put_vf; vf_vsi = ice_get_vf_vsi(vf); if (!vf_vsi) { netdev_err(netdev, "VSI %d for VF %d is null\n", vf->lan_vsi_idx, vf->vf_id); - return -EINVAL; + ret = -EINVAL; + goto out_put_vf; } if (vf_vsi->type != ICE_VSI_VF) { netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n", vf_vsi->type, vf_vsi->vsi_num, vf->vf_id); - return -ENODEV; + ret = -ENODEV; + goto out_put_vf; } if (ena == vf->spoofchk) { dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF"); - return 0; + ret = 0; + goto out_put_vf; } if (ena) @@ -3041,6 +3140,8 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) else vf->spoofchk = ena; +out_put_vf: + ice_put_vf(vf); return ret; } @@ -3053,18 +3154,22 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) */ bool ice_is_any_vf_in_promisc(struct ice_pf *pf) { - int vf_idx; - - ice_for_each_vf(pf, vf_idx) { - struct ice_vf *vf = &pf->vf[vf_idx]; + bool is_vf_promisc = false; + struct ice_vf *vf; + unsigned int bkt; + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) { /* found a VF that has promiscuous mode configured */ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) || - test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) - return true; + test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) { + is_vf_promisc = true; + break; + } } + rcu_read_unlock(); - return false; + return is_vf_promisc; } /** @@ -3584,7 +3689,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) * there is actually at least a single VF queue vector mapped */ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || - pf->num_msix_per_vf < num_q_vectors_mapped || + pf->vfs.num_msix_per < num_q_vectors_mapped || !num_q_vectors_mapped) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -3606,7 +3711,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) /* vector_id is always 0-based for each VF, and can never be * larger than or equal to the max allowed interrupts per VF */ - if (!(vector_id < pf->num_msix_per_vf) || + if (!(vector_id < pf->vfs.num_msix_per) || !ice_vc_isvalid_vsi_id(vf, vsi_id) || (!vector_id && (map->rxq_map || map->txq_map))) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -4197,8 +4302,6 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, int ret; dev = ice_pf_to_dev(pf); - if (ice_validate_vf_id(pf, vf_id)) - return -EINVAL; if (vlan_id >= VLAN_N_VID || qos > 7) { dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n", @@ -4212,10 +4315,13 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, return -EPROTONOSUPPORT; } - vf = &pf->vf[vf_id]; + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) + return -EINVAL; + ret = ice_check_vf_ready_for_cfg(vf); if (ret) - return ret; + goto out_put_vf; if (ice_vf_get_port_vlan_prio(vf) == qos && ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto && @@ -4223,7 +4329,8 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, /* duplicate request, so just return success */ dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n", vlan_id, qos, local_vlan_proto); - return 0; + ret = 0; + goto out_put_vf; } mutex_lock(&vf->cfg_lock); @@ -4238,7 +4345,9 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, ice_vc_reset_vf(vf); mutex_unlock(&vf->cfg_lock); - return 0; +out_put_vf: + ice_put_vf(vf); + return ret; } /** @@ -5759,17 +5868,14 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) struct device *dev; int err = 0; - /* if de-init is underway, don't process messages from VF */ - if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state)) - return; - dev = ice_pf_to_dev(pf); - if (ice_validate_vf_id(pf, vf_id)) { - err = -EINVAL; - goto error_handler; - } - vf = &pf->vf[vf_id]; + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) { + dev_err(dev, "Unable to locate VF for message from VF ID %d, opcode %d, len %d\n", + vf_id, v_opcode, msglen); + return; + } /* Check if VF is disabled. */ if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) { @@ -5792,6 +5898,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL, 0); + ice_put_vf(vf); return; } @@ -5801,6 +5908,7 @@ error_handler: NULL, 0); dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n", vf_id, v_opcode, msglen, err); + ice_put_vf(vf); return; } @@ -5810,6 +5918,7 @@ error_handler: if (!mutex_trylock(&vf->cfg_lock)) { dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n", vf->vf_id); + ice_put_vf(vf); return; } @@ -5924,6 +6033,7 @@ error_handler: } mutex_unlock(&vf->cfg_lock); + ice_put_vf(vf); } /** @@ -5939,14 +6049,15 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) { struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_vf *vf; + int ret; - if (ice_validate_vf_id(pf, vf_id)) + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) return -EINVAL; - vf = &pf->vf[vf_id]; - - if (ice_check_vf_init(pf, vf)) - return -EBUSY; + ret = ice_check_vf_ready_for_cfg(vf); + if (ret) + goto out_put_vf; ivi->vf = vf_id; ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr); @@ -5967,7 +6078,10 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; ivi->max_tx_rate = vf->max_tx_rate; ivi->min_tx_rate = vf->min_tx_rate; - return 0; + +out_put_vf: + ice_put_vf(vf); + return ret; } /** @@ -6017,28 +6131,31 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) struct ice_vf *vf; int ret; - if (ice_validate_vf_id(pf, vf_id)) - return -EINVAL; - if (is_multicast_ether_addr(mac)) { netdev_err(netdev, "%pM not a valid unicast address\n", mac); return -EINVAL; } - vf = &pf->vf[vf_id]; + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) + return -EINVAL; + /* nothing left to do, unicast MAC already set */ if (ether_addr_equal(vf->dev_lan_addr.addr, mac) && - ether_addr_equal(vf->hw_lan_addr.addr, mac)) - return 0; + ether_addr_equal(vf->hw_lan_addr.addr, mac)) { + ret = 0; + goto out_put_vf; + } ret = ice_check_vf_ready_for_cfg(vf); if (ret) - return ret; + goto out_put_vf; if (ice_unicast_mac_exists(pf, mac)) { netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n", mac, vf_id, mac); - return -EINVAL; + ret = -EINVAL; + goto out_put_vf; } mutex_lock(&vf->cfg_lock); @@ -6062,7 +6179,10 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) ice_vc_reset_vf(vf); mutex_unlock(&vf->cfg_lock); - return 0; + +out_put_vf: + ice_put_vf(vf); + return ret; } /** @@ -6084,17 +6204,19 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) return -EOPNOTSUPP; } - if (ice_validate_vf_id(pf, vf_id)) + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) return -EINVAL; - vf = &pf->vf[vf_id]; ret = ice_check_vf_ready_for_cfg(vf); if (ret) - return ret; + goto out_put_vf; /* Check if already trusted */ - if (trusted == vf->trusted) - return 0; + if (trusted == vf->trusted) { + ret = 0; + goto out_put_vf; + } mutex_lock(&vf->cfg_lock); @@ -6105,7 +6227,9 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) mutex_unlock(&vf->cfg_lock); - return 0; +out_put_vf: + ice_put_vf(vf); + return ret; } /** @@ -6122,13 +6246,13 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) struct ice_vf *vf; int ret; - if (ice_validate_vf_id(pf, vf_id)) + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) return -EINVAL; - vf = &pf->vf[vf_id]; ret = ice_check_vf_ready_for_cfg(vf); if (ret) - return ret; + goto out_put_vf; switch (link_state) { case IFLA_VF_LINK_STATE_AUTO: @@ -6143,12 +6267,15 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) vf->link_up = false; break; default: - return -EINVAL; + ret = -EINVAL; + goto out_put_vf; } ice_vc_notify_vf_link_state(vf); - return 0; +out_put_vf: + ice_put_vf(vf); + return ret; } /** @@ -6157,10 +6284,14 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) */ static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf) { - int rate = 0, i; + struct ice_vf *vf; + unsigned int bkt; + int rate = 0; - ice_for_each_vf(pf, i) - rate += pf->vf[i].min_tx_rate; + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) + rate += vf->min_tx_rate; + rcu_read_unlock(); return rate; } @@ -6215,13 +6346,14 @@ ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, int ret; dev = ice_pf_to_dev(pf); - if (ice_validate_vf_id(pf, vf_id)) + + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) return -EINVAL; - vf = &pf->vf[vf_id]; ret = ice_check_vf_ready_for_cfg(vf); if (ret) - return ret; + goto out_put_vf; vsi = ice_get_vf_vsi(vf); @@ -6231,23 +6363,27 @@ ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, if (max_tx_rate && min_tx_rate > max_tx_rate) { dev_err(dev, "Cannot set min Tx rate %d Mbps greater than max Tx rate %d Mbps\n", min_tx_rate, max_tx_rate); - return -EINVAL; + ret = -EINVAL; + goto out_put_vf; } if (min_tx_rate && ice_is_dcb_active(pf)) { dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n"); - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto out_put_vf; } - if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) - return -EINVAL; + if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) { + ret = -EINVAL; + goto out_put_vf; + } if (vf->min_tx_rate != (unsigned int)min_tx_rate) { ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000); if (ret) { dev_err(dev, "Unable to set min-tx-rate for VF %d\n", vf->vf_id); - return ret; + goto out_put_vf; } vf->min_tx_rate = min_tx_rate; @@ -6258,13 +6394,15 @@ ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, if (ret) { dev_err(dev, "Unable to set max-tx-rate for VF %d\n", vf->vf_id); - return ret; + goto out_put_vf; } vf->max_tx_rate = max_tx_rate; } - return 0; +out_put_vf: + ice_put_vf(vf); + return ret; } /** @@ -6282,17 +6420,19 @@ int ice_get_vf_stats(struct net_device *netdev, int vf_id, struct ice_vf *vf; int ret; - if (ice_validate_vf_id(pf, vf_id)) + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) return -EINVAL; - vf = &pf->vf[vf_id]; ret = ice_check_vf_ready_for_cfg(vf); if (ret) - return ret; + goto out_put_vf; vsi = ice_get_vf_vsi(vf); - if (!vsi) - return -EINVAL; + if (!vsi) { + ret = -EINVAL; + goto out_put_vf; + } ice_update_eth_stats(vsi); stats = &vsi->eth_stats; @@ -6310,7 +6450,9 @@ int ice_get_vf_stats(struct net_device *netdev, int vf_id, vf_stats->rx_dropped = stats->rx_discards; vf_stats->tx_dropped = stats->tx_discards; - return 0; +out_put_vf: + ice_put_vf(vf); + return ret; } /** @@ -6341,21 +6483,21 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - int i; + struct ice_vf *vf; + unsigned int bkt; /* check that there are pending MDD events to print */ if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state)) return; /* VF MDD event logs are rate limited to one second intervals */ - if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1)) + if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1)) return; - pf->last_printed_mdd_jiffies = jiffies; - - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; + pf->vfs.last_printed_mdd_jiffies = jiffies; + mutex_lock(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, vf) { /* only print Rx MDD event message if there are new events */ if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) { vf->mdd_rx_events.last_printed = @@ -6369,10 +6511,11 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf) vf->mdd_tx_events.count; dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n", - vf->mdd_tx_events.count, hw->pf_id, i, + vf->mdd_tx_events.count, hw->pf_id, vf->vf_id, vf->dev_lan_addr.addr); } } + mutex_unlock(&pf->vfs.table_lock); } /** @@ -6424,13 +6567,12 @@ ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, struct ice_vf *vf; int status; - if (ice_validate_vf_id(pf, vf_id)) + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) return false; - vf = &pf->vf[vf_id]; - /* Check if VF is disabled. */ if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) - return false; + goto out_put_vf; mbxdata.num_msg_proc = num_msg_proc; mbxdata.num_pending_arq = num_msg_pending; @@ -6441,7 +6583,7 @@ ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, /* check to see if we have a malicious VF */ status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf); if (status) - return false; + goto out_put_vf; if (malvf) { bool report_vf = false; @@ -6449,7 +6591,7 @@ ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, /* if the VF is malicious and we haven't let the user * know about it, then let them know now */ - status = ice_mbx_report_malvf(&pf->hw, pf->malvfs, + status = ice_mbx_report_malvf(&pf->hw, pf->vfs.malvfs, ICE_MAX_VF_COUNT, vf_id, &report_vf); if (status) @@ -6463,12 +6605,9 @@ ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, &vf->dev_lan_addr.addr[0], pf_vsi->netdev->dev_addr); } - - return true; } - /* if there was an error in detection or the VF is not malicious then - * return false - */ - return false; +out_put_vf: + ice_put_vf(vf); + return malvf; } diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 4f4961043638..7f16ed9c70d6 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -15,7 +15,6 @@ #define ICE_MAX_MACADDR_PER_VF 18 /* Malicious Driver Detection */ -#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10 #define ICE_MDD_EVENTS_THRESHOLD 30 /* Static VF transaction/status register def */ @@ -39,8 +38,50 @@ #define ICE_MAX_VF_RESET_TRIES 40 #define ICE_MAX_VF_RESET_SLEEP_MS 20 -#define ice_for_each_vf(pf, i) \ - for ((i) = 0; (i) < (pf)->num_alloc_vfs; (i)++) +/* VF Hash Table access functions + * + * These functions provide abstraction for interacting with the VF hash table. + * In general, direct access to the hash table should be avoided outside of + * these functions where possible. + * + * The VF entries in the hash table are protected by reference counting to + * track lifetime of accesses from the table. The ice_get_vf_by_id() function + * obtains a reference to the VF structure which must be dropped by using + * ice_put_vf(). + */ + +/** + * ice_for_each_vf - Iterate over each VF entry + * @pf: pointer to the PF private structure + * @bkt: bucket index used for iteration + * @vf: pointer to the VF entry currently being processed in the loop. + * + * The bkt variable is an unsigned integer iterator used to traverse the VF + * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is. + * Use vf->vf_id to get the id number if needed. + * + * The caller is expected to be under the table_lock mutex for the entire + * loop. Use this iterator if your loop is long or if it might sleep. + */ +#define ice_for_each_vf(pf, bkt, vf) \ + hash_for_each((pf)->vfs.table, (bkt), (vf), entry) + +/** + * ice_for_each_vf_rcu - Iterate over each VF entry protected by RCU + * @pf: pointer to the PF private structure + * @bkt: bucket index used for iteration + * @vf: pointer to the VF entry currently being processed in the loop. + * + * The bkt variable is an unsigned integer iterator used to traverse the VF + * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is. + * Use vf->vf_id to get the id number if needed. + * + * The caller is expected to be under rcu_read_lock() for the entire loop. + * Only use this iterator if your loop is short and you can guarantee it does + * not sleep. + */ +#define ice_for_each_vf_rcu(pf, bkt, vf) \ + hash_for_each_rcu((pf)->vfs.table, (bkt), (vf), entry) /* Specific VF states */ enum ice_vf_states { @@ -104,8 +145,22 @@ struct ice_vc_vf_ops { int (*dis_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg); }; +/* Virtchnl/SR-IOV config info */ +struct ice_vfs { + DECLARE_HASHTABLE(table, 8); /* table of VF entries */ + struct mutex table_lock; /* Lock for protecting the hash table */ + u16 num_supported; /* max supported VFs on this PF */ + u16 num_qps_per; /* number of queue pairs per VF */ + u16 num_msix_per; /* number of MSI-X vectors per VF */ + unsigned long last_printed_mdd_jiffies; /* MDD message rate limit */ + DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT); /* malicious VF indicator */ +}; + /* VF information structure */ struct ice_vf { + struct hlist_node entry; + struct rcu_head rcu; + struct kref refcnt; struct ice_pf *pf; /* Used during virtchnl message handling and NDO ops against the VF @@ -143,8 +198,6 @@ struct ice_vf { unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */ DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */ - u64 num_inval_msgs; /* number of continuous invalid msgs */ - u64 num_valid_msgs; /* number of valid msgs detected */ unsigned long vf_caps; /* VF's adv. capabilities */ u8 num_req_qs; /* num of queue pairs requested by VF */ u16 num_mac; @@ -162,6 +215,10 @@ struct ice_vf { }; #ifdef CONFIG_PCI_IOV +struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id); +void ice_put_vf(struct ice_vf *vf); +bool ice_has_vfs(struct ice_pf *pf); +u16 ice_get_num_vfs(struct ice_pf *pf); struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf); void ice_process_vflr_event(struct ice_pf *pf); int ice_sriov_configure(struct pci_dev *pdev, int num_vfs); @@ -221,6 +278,25 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id); bool ice_vf_is_port_vlan_ena(struct ice_vf *vf); #else /* CONFIG_PCI_IOV */ +static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id) +{ + return NULL; +} + +static inline void ice_put_vf(struct ice_vf *vf) +{ +} + +static inline bool ice_has_vfs(struct ice_pf *pf) +{ + return false; +} + +static inline u16 ice_get_num_vfs(struct ice_pf *pf) +{ + return 0; +} + static inline void ice_process_vflr_event(struct ice_pf *pf) { } static inline void ice_free_vfs(struct ice_pf *pf) { } static inline diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h index 0cbb5793b5b8..123bb98ebfbe 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.h +++ b/drivers/net/ethernet/intel/ice/ice_xsk.h @@ -10,7 +10,7 @@ #ifdef __clang__ #define loop_unrolled_for _Pragma("clang loop unroll_count(8)") for -#elif __GNUC__ >= 4 +#elif __GNUC__ >= 8 #define loop_unrolled_for _Pragma("GCC unroll 8") for #else #define loop_unrolled_for for diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c index 5cad31c3c7b0..40dbf4b43234 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.c +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -746,8 +746,6 @@ s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data) if (ret_val) return ret_val; ret_val = igc_write_phy_reg_mdic(hw, offset, data); - if (ret_val) - return ret_val; hw->phy.ops.release(hw); } else { ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr, @@ -779,8 +777,6 @@ s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data) if (ret_val) return ret_val; ret_val = igc_read_phy_reg_mdic(hw, offset, data); - if (ret_val) - return ret_val; hw->phy.ops.release(hw); } else { ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 4a69823e6abd..921a4d977d65 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -177,11 +177,14 @@ struct vf_data_storage { u16 pf_vlan; /* When set, guest VLAN config not allowed. */ u16 pf_qos; u16 tx_rate; + int link_enable; + int link_state; u8 spoofchk_enabled; bool rss_query_enabled; u8 trusted; int xcast_mode; unsigned int vf_api; + u8 primary_abort_count; }; enum ixgbevf_xcast_modes { @@ -556,6 +559,8 @@ struct ixgbe_mac_addr { #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) #define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */ +#define IXGBE_PRIMARY_ABORT_LIMIT 5 + /* board specific private data structure */ struct ixgbe_adapter { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; @@ -614,6 +619,7 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_RX_LEGACY BIT(16) #define IXGBE_FLAG2_IPSEC_ENABLED BIT(17) #define IXGBE_FLAG2_VF_IPSEC_ENABLED BIT(18) +#define IXGBE_FLAG2_AUTO_DISABLE_VF BIT(19) /* Tx fast path data */ int num_tx_queues; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index e90b5047e695..4c26c4b92f07 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -30,7 +30,7 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data); static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, u16 offset); -static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); +static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw); /* Base table for registers values that change by MAC */ const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = { @@ -746,10 +746,10 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) usleep_range(1000, 2000); /* - * Prevent the PCI-E bus from from hanging by disabling PCI-E master + * Prevent the PCI-E bus from hanging by disabling PCI-E primary * access and verify no pending requests */ - return ixgbe_disable_pcie_master(hw); + return ixgbe_disable_pcie_primary(hw); } /** @@ -2506,15 +2506,15 @@ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) } /** - * ixgbe_disable_pcie_master - Disable PCI-express master access + * ixgbe_disable_pcie_primary - Disable PCI-express primary access * @hw: pointer to hardware structure * - * Disables PCI-Express master access and verifies there are no pending - * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable - * bit hasn't caused the master requests to be disabled, else 0 - * is returned signifying master requests disabled. + * Disables PCI-Express primary access and verifies there are no pending + * requests. IXGBE_ERR_PRIMARY_REQUESTS_PENDING is returned if primary disable + * bit hasn't caused the primary requests to be disabled, else 0 + * is returned signifying primary requests disabled. **/ -static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) +static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw) { u32 i, poll; u16 value; @@ -2523,23 +2523,23 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); /* Poll for bit to read as set */ - for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + for (i = 0; i < IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT; i++) { if (IXGBE_READ_REG(hw, IXGBE_CTRL) & IXGBE_CTRL_GIO_DIS) break; usleep_range(100, 120); } - if (i >= IXGBE_PCI_MASTER_DISABLE_TIMEOUT) { + if (i >= IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT) { hw_dbg(hw, "GIO disable did not set - requesting resets\n"); goto gio_disable_fail; } - /* Exit if master requests are blocked */ + /* Exit if primary requests are blocked */ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || ixgbe_removed(hw->hw_addr)) return 0; - /* Poll for master request bit to clear */ - for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + /* Poll for primary request bit to clear */ + for (i = 0; i < IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT; i++) { udelay(100); if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) return 0; @@ -2547,13 +2547,13 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) /* * Two consecutive resets are required via CTRL.RST per datasheet - * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine - * of this need. The first reset prevents new master requests from + * 5.2.5.3.2 Primary Disable. We set a flag to inform the reset routine + * of this need. The first reset prevents new primary requests from * being issued by our device. We then must wait 1usec or more for any * remaining completions from the PCIe bus to trickle in, and then reset * again to clear out any effects they may have had on our device. */ - hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n"); + hw_dbg(hw, "GIO Primary Disable bit didn't clear - requesting resets\n"); gio_disable_fail: hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; @@ -2575,7 +2575,7 @@ gio_disable_fail: } hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n"); - return IXGBE_ERR_MASTER_REQUESTS_PENDING; + return IXGBE_ERR_PRIMARY_REQUESTS_PENDING; } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index f70967c32116..628d0eb0599f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -138,6 +138,8 @@ static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = { "legacy-rx", #define IXGBE_PRIV_FLAGS_VF_IPSEC_EN BIT(1) "vf-ipsec", +#define IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF BIT(2) + "mdd-disable-vf", }; #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings) @@ -3510,6 +3512,9 @@ static u32 ixgbe_get_priv_flags(struct net_device *netdev) if (adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED) priv_flags |= IXGBE_PRIV_FLAGS_VF_IPSEC_EN; + if (adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF) + priv_flags |= IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF; + return priv_flags; } @@ -3517,6 +3522,7 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags) { struct ixgbe_adapter *adapter = netdev_priv(netdev); unsigned int flags2 = adapter->flags2; + unsigned int i; flags2 &= ~IXGBE_FLAG2_RX_LEGACY; if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX) @@ -3526,6 +3532,21 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags) if (priv_flags & IXGBE_PRIV_FLAGS_VF_IPSEC_EN) flags2 |= IXGBE_FLAG2_VF_IPSEC_ENABLED; + flags2 &= ~IXGBE_FLAG2_AUTO_DISABLE_VF; + if (priv_flags & IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF) { + if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + /* Reset primary abort counter */ + for (i = 0; i < adapter->num_vfs; i++) + adapter->vfinfo[i].primary_abort_count = 0; + + flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF; + } else { + e_info(probe, + "Cannot set private flags: Operation not supported\n"); + return -EOPNOTSUPP; + } + } + if (flags2 != adapter->flags2) { adapter->flags2 = flags2; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 2c8a4a06f56a..c4a4954aa317 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -5687,6 +5687,9 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + + /* update setting rx tx for all active vfs */ + ixgbe_set_all_vfs(adapter); } void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) @@ -5948,8 +5951,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) case IXGBE_ERR_SFP_NOT_PRESENT: case IXGBE_ERR_SFP_NOT_SUPPORTED: break; - case IXGBE_ERR_MASTER_REQUESTS_PENDING: - e_dev_err("master disable timed out\n"); + case IXGBE_ERR_PRIMARY_REQUESTS_PENDING: + e_dev_err("primary disable timed out\n"); break; case IXGBE_ERR_EEPROM_VERSION: /* We are running on a pre-production device, log a warning */ @@ -6144,11 +6147,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter) for (i = 0 ; i < adapter->num_vfs; i++) adapter->vfinfo[i].clear_to_send = false; - /* ping all the active vfs to let them know we are going down */ - ixgbe_ping_all_vfs(adapter); - - /* Disable all VFTE/VFRE TX/RX */ - ixgbe_disable_tx_rx(adapter); + /* update setting rx tx for all active vfs */ + ixgbe_set_all_vfs(adapter); } /* disable transmits in the hardware now that interrupts are off */ @@ -7613,6 +7613,27 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) } #ifdef CONFIG_PCI_IOV +static void ixgbe_bad_vf_abort(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + + if (adapter->hw.mac.type == ixgbe_mac_82599EB && + adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF) { + adapter->vfinfo[vf].primary_abort_count++; + if (adapter->vfinfo[vf].primary_abort_count == + IXGBE_PRIMARY_ABORT_LIMIT) { + ixgbe_set_vf_link_state(adapter, vf, + IFLA_VF_LINK_STATE_DISABLE); + adapter->vfinfo[vf].primary_abort_count = 0; + + e_info(drv, + "Malicious Driver Detection event detected on PF %d VF %d MAC: %pM mdd-disable-vf=on", + hw->bus.func, vf, + adapter->vfinfo[vf].vf_mac_addresses); + } + } +} + static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -7644,8 +7665,10 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) continue; pci_read_config_word(vfdev, PCI_STATUS, &status_reg); if (status_reg != IXGBE_FAILED_READ_CFG_WORD && - status_reg & PCI_STATUS_REC_MASTER_ABORT) + status_reg & PCI_STATUS_REC_MASTER_ABORT) { + ixgbe_bad_vf_abort(adapter, vf); pcie_flr(vfdev); + } } } @@ -10284,6 +10307,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, + .ndo_set_vf_link_state = ixgbe_ndo_set_vf_link_state, .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en, .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, .ndo_get_vf_config = ixgbe_ndo_get_vf_config, @@ -10745,6 +10769,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_sw_init; + if (adapter->hw.mac.type == ixgbe_mac_82599EB) + adapter->flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF; + switch (adapter->hw.mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index a148534d7256..8f4316b19278 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -85,6 +85,8 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_IPSEC_ADD 0x0d #define IXGBE_VF_IPSEC_DEL 0x0e +#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */ + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 214a38de3f41..7f11c0a8e7a9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -96,6 +96,7 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, for (i = 0; i < num_vfs; i++) { /* enable spoof checking for all VFs */ adapter->vfinfo[i].spoofchk_enabled = true; + adapter->vfinfo[i].link_enable = true; /* We support VF RSS querying only for 82599 and x540 * devices at the moment. These devices share RSS @@ -820,6 +821,57 @@ static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf, } } +/** + * ixgbe_set_vf_rx_tx - Set VF rx tx + * @adapter: Pointer to adapter struct + * @vf: VF identifier + * + * Set or reset correct transmit and receive for vf + **/ +static void ixgbe_set_vf_rx_tx(struct ixgbe_adapter *adapter, int vf) +{ + u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx; + struct ixgbe_hw *hw = &adapter->hw; + u32 reg_offset, vf_shift; + + vf_shift = vf % 32; + reg_offset = vf / 32; + + reg_cur_tx = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); + reg_cur_rx = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); + + if (adapter->vfinfo[vf].link_enable) { + reg_req_tx = reg_cur_tx | 1 << vf_shift; + reg_req_rx = reg_cur_rx | 1 << vf_shift; + } else { + reg_req_tx = reg_cur_tx & ~(1 << vf_shift); + reg_req_rx = reg_cur_rx & ~(1 << vf_shift); + } + + /* The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. + * For more info take a look at ixgbe_set_vf_lpe + */ + if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + struct net_device *dev = adapter->netdev; + int pf_max_frame = dev->mtu + ETH_HLEN; + +#if IS_ENABLED(CONFIG_FCOE) + if (dev->features & NETIF_F_FCOE_MTU) + pf_max_frame = max_t(int, pf_max_frame, + IXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif /* CONFIG_FCOE */ + + if (pf_max_frame > ETH_FRAME_LEN) + reg_req_rx = reg_cur_rx & ~(1 << vf_shift); + } + + /* Enable/Disable particular VF */ + if (reg_cur_tx != reg_req_tx) + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg_req_tx); + if (reg_cur_rx != reg_req_rx) + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg_req_rx); +} + static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; @@ -845,11 +897,6 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) vf_shift = vf % 32; reg_offset = vf / 32; - /* enable transmit for vf */ - reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); - reg |= BIT(vf_shift); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); - /* force drop enable for all VF Rx queues */ reg = IXGBE_QDE_ENABLE; if (adapter->vfinfo[vf].pf_vlan) @@ -857,27 +904,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) ixgbe_write_qde(adapter, vf, reg); - /* enable receive for vf */ - reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); - reg |= BIT(vf_shift); - /* - * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. - * For more info take a look at ixgbe_set_vf_lpe - */ - if (adapter->hw.mac.type == ixgbe_mac_82599EB) { - struct net_device *dev = adapter->netdev; - int pf_max_frame = dev->mtu + ETH_HLEN; - -#ifdef CONFIG_FCOE - if (dev->features & NETIF_F_FCOE_MTU) - pf_max_frame = max_t(int, pf_max_frame, - IXGBE_FCOE_JUMBO_FRAME_SIZE); - -#endif /* CONFIG_FCOE */ - if (pf_max_frame > ETH_FRAME_LEN) - reg &= ~BIT(vf_shift); - } - IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); + ixgbe_set_vf_rx_tx(adapter, vf); /* enable VF mailbox for further messages */ adapter->vfinfo[vf].clear_to_send = true; @@ -1202,6 +1229,26 @@ out: return 0; } +static int ixgbe_get_vf_link_state(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u32 *link_state = &msgbuf[1]; + + /* verify the PF is supporting the correct API */ + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: + break; + default: + return -EOPNOTSUPP; + } + + *link_state = adapter->vfinfo[vf].link_enable; + + return 0; +} + static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) { u32 mbx_size = IXGBE_VFMAILBOX_SIZE; @@ -1267,6 +1314,9 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) case IXGBE_VF_UPDATE_XCAST_MODE: retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf); break; + case IXGBE_VF_GET_LINK_STATE: + retval = ixgbe_get_vf_link_state(adapter, msgbuf, vf); + break; case IXGBE_VF_IPSEC_ADD: retval = ixgbe_ipsec_vf_add_sa(adapter, msgbuf, vf); break; @@ -1322,18 +1372,6 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter) } } -void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - - /* disable transmit and receive for all vfs */ - IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0); - - IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0); - IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); -} - static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) { struct ixgbe_hw *hw = &adapter->hw; @@ -1359,6 +1397,21 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) } } +/** + * ixgbe_set_all_vfs - update vfs queues + * @adapter: Pointer to adapter struct + * + * Update setting transmit and receive queues for all vfs + **/ +void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0 ; i < adapter->num_vfs; i++) + ixgbe_set_vf_link_state(adapter, i, + adapter->vfinfo[i].link_state); +} + int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -1656,6 +1709,84 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) return 0; } +/** + * ixgbe_set_vf_link_state - Set link state + * @adapter: Pointer to adapter struct + * @vf: VF identifier + * @state: required link state + * + * Set a link force state on/off a single vf + **/ +void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state) +{ + adapter->vfinfo[vf].link_state = state; + + switch (state) { + case IFLA_VF_LINK_STATE_AUTO: + if (test_bit(__IXGBE_DOWN, &adapter->state)) + adapter->vfinfo[vf].link_enable = false; + else + adapter->vfinfo[vf].link_enable = true; + break; + case IFLA_VF_LINK_STATE_ENABLE: + adapter->vfinfo[vf].link_enable = true; + break; + case IFLA_VF_LINK_STATE_DISABLE: + adapter->vfinfo[vf].link_enable = false; + break; + } + + ixgbe_set_vf_rx_tx(adapter, vf); + + /* restart the VF */ + adapter->vfinfo[vf].clear_to_send = false; + ixgbe_ping_vf(adapter, vf); +} + +/** + * ixgbe_ndo_set_vf_link_state - Set link state + * @netdev: network interface device structure + * @vf: VF identifier + * @state: required link state + * + * Set the link state of a specified VF, regardless of physical link state + **/ +int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + if (vf < 0 || vf >= adapter->num_vfs) { + dev_err(&adapter->pdev->dev, + "NDO set VF link - invalid VF identifier %d\n", vf); + return -EINVAL; + } + + switch (state) { + case IFLA_VF_LINK_STATE_ENABLE: + dev_info(&adapter->pdev->dev, + "NDO set VF %d link state %d - not supported\n", + vf, state); + break; + case IFLA_VF_LINK_STATE_DISABLE: + dev_info(&adapter->pdev->dev, + "NDO set VF %d link state disable\n", vf); + ixgbe_set_vf_link_state(adapter, vf, state); + break; + case IFLA_VF_LINK_STATE_AUTO: + dev_info(&adapter->pdev->dev, + "NDO set VF %d link state auto\n", vf); + ixgbe_set_vf_link_state(adapter, vf, state); + break; + default: + dev_err(&adapter->pdev->dev, + "NDO set VF %d - invalid link state %d\n", vf, state); + ret = -EINVAL; + } + + return ret; +} + int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, bool setting) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index 3ec21923c89c..0690ecb8dfa3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -17,8 +17,8 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); #endif void ixgbe_msg_task(struct ixgbe_adapter *adapter); int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); -void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); +void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter); int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, u8 qos, __be16 vlan_proto); @@ -31,7 +31,9 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting); int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); +int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state); void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); +void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state); int ixgbe_disable_sriov(struct ixgbe_adapter *adapter); #ifdef CONFIG_PCI_IOV void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 2647937f7f4d..6da9880d766a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1247,7 +1247,7 @@ struct ixgbe_nvm_version { #define IXGBE_PSRTYPE_RQPL_SHIFT 29 /* CTRL Bit Masks */ -#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */ +#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Primary Disable bit */ #define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */ #define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ #define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST) @@ -1811,7 +1811,7 @@ enum { /* STATUS Bit Masks */ #define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */ #define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/ -#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */ +#define IXGBE_STATUS_GIO 0x00080000 /* GIO Primary Enable Status */ #define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */ #define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */ @@ -2193,8 +2193,8 @@ enum { #define IXGBE_PCIDEVCTRL2_4_8s 0xd #define IXGBE_PCIDEVCTRL2_17_34s 0xe -/* Number of 100 microseconds we wait for PCI Express master disable */ -#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 +/* Number of 100 microseconds we wait for PCI Express primary disable */ +#define IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT 800 /* RAH */ #define IXGBE_RAH_VIND_MASK 0x003C0000 @@ -3671,7 +3671,7 @@ struct ixgbe_info { #define IXGBE_ERR_ADAPTER_STOPPED -9 #define IXGBE_ERR_INVALID_MAC_ADDR -10 #define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11 -#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12 +#define IXGBE_ERR_PRIMARY_REQUESTS_PENDING -12 #define IXGBE_ERR_INVALID_LINK_SETTINGS -13 #define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14 #define IXGBE_ERR_RESET_FAILED -15 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index ee28929b9c5f..dd7ff66d422f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -395,12 +395,14 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) u32 cmd_type; while (budget-- > 0) { - if (unlikely(!ixgbe_desc_unused(xdp_ring)) || - !netif_carrier_ok(xdp_ring->netdev)) { + if (unlikely(!ixgbe_desc_unused(xdp_ring))) { work_done = false; break; } + if (!netif_carrier_ok(xdp_ring->netdev)) + break; + if (!xsk_tx_peek_desc(pool, &desc)) break; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index e257390a4f6a..149c733fcc2b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -387,6 +387,8 @@ struct ixgbevf_adapter { u32 *rss_key; u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE]; u32 flags; + bool link_state; + #define IXGBEVF_FLAGS_LEGACY_RX BIT(1) #ifdef CONFIG_XFRM diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 17fbc450da61..55b87bc3a938 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -2298,7 +2298,9 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; struct ixgbe_hw *hw = &adapter->hw; + bool state; ixgbevf_configure_msix(adapter); @@ -2311,6 +2313,11 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) spin_unlock_bh(&adapter->mbx_lock); + state = adapter->link_state; + hw->mac.ops.get_link_state(hw, &adapter->link_state); + if (state && state != adapter->link_state) + dev_info(&pdev->dev, "VF is administratively disabled\n"); + smp_mb__before_atomic(); clear_bit(__IXGBEVF_DOWN, &adapter->state); ixgbevf_napi_enable_all(adapter); @@ -2753,7 +2760,7 @@ static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx, ring->reg_idx = reg_idx; /* assign ring to adapter */ - adapter->tx_ring[txr_idx] = ring; + adapter->tx_ring[txr_idx] = ring; /* update count and index */ txr_count--; @@ -3081,6 +3088,8 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; + adapter->link_state = true; + set_bit(__IXGBEVF_DOWN, &adapter->state); return 0; @@ -3313,7 +3322,7 @@ static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter) ixgbevf_watchdog_update_link(adapter); - if (adapter->link_up) + if (adapter->link_up && adapter->link_state) ixgbevf_watchdog_link_is_up(adapter); else ixgbevf_watchdog_link_is_down(adapter); diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index 7346ccf014a5..835bbcc5cc8e 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -100,6 +100,8 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_IPSEC_ADD 0x0d #define IXGBE_VF_IPSEC_DEL 0x0e +#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */ + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 61d8970c6d1d..68fc32e36e88 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -585,6 +585,46 @@ static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) } /** + * ixgbevf_get_link_state_vf - Get VF link state from PF + * @hw: pointer to the HW structure + * @link_state: link state storage + * + * Returns state of the operation error or success. + */ +static s32 ixgbevf_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state) +{ + u32 msgbuf[2]; + s32 ret_val; + s32 err; + + msgbuf[0] = IXGBE_VF_GET_LINK_STATE; + msgbuf[1] = 0x0; + + err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); + + if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) { + ret_val = IXGBE_ERR_MBX; + } else { + ret_val = 0; + *link_state = msgbuf[1]; + } + + return ret_val; +} + +/** + * ixgbevf_hv_get_link_state_vf - * Hyper-V variant - just a stub. + * @hw: unused + * @link_state: unused + * + * Hyper-V variant; there is no mailbox communication. + */ +static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state) +{ + return -EOPNOTSUPP; +} + +/** * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address * @hw: pointer to the HW structure * @vlan: 12 bit VLAN ID @@ -968,6 +1008,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = { .set_rar = ixgbevf_set_rar_vf, .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, .update_xcast_mode = ixgbevf_update_xcast_mode, + .get_link_state = ixgbevf_get_link_state_vf, .set_uc_addr = ixgbevf_set_uc_addr_vf, .set_vfta = ixgbevf_set_vfta_vf, .set_rlpml = ixgbevf_set_rlpml_vf, @@ -985,6 +1026,7 @@ static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = { .set_rar = ixgbevf_hv_set_rar_vf, .update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf, .update_xcast_mode = ixgbevf_hv_update_xcast_mode, + .get_link_state = ixgbevf_hv_get_link_state_vf, .set_uc_addr = ixgbevf_hv_set_uc_addr_vf, .set_vfta = ixgbevf_hv_set_vfta_vf, .set_rlpml = ixgbevf_hv_set_rlpml_vf, diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 54158dac8707..b4eef5b6c172 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -39,6 +39,7 @@ struct ixgbe_mac_operations { s32 (*init_rx_addrs)(struct ixgbe_hw *); s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); s32 (*update_xcast_mode)(struct ixgbe_hw *, int); + s32 (*get_link_state)(struct ixgbe_hw *hw, bool *link_state); s32 (*enable_mc)(struct ixgbe_hw *); s32 (*disable_mc)(struct ixgbe_hw *); s32 (*clear_vfta)(struct ixgbe_hw *); diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 439674fc9765..b6c5122da995 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -28,6 +28,7 @@ #include <linux/udp.h> #include <linux/if_vlan.h> #include <linux/slab.h> +#include <linux/jiffies.h> #include <net/ip6_checksum.h> #include "jme.h" @@ -2179,7 +2180,7 @@ jme_stop_queue_if_full(struct jme_adapter *jme) } if (unlikely(txbi->start_xmit && - (jiffies - txbi->start_xmit) >= TX_TIMEOUT && + time_is_before_eq_jiffies(txbi->start_xmit + TX_TIMEOUT) && txbi->skb)) { netif_stop_queue(jme->dev); netif_info(jme, tx_queued, jme->dev, diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c index 41d11137cde0..5712c3e94be8 100644 --- a/drivers/net/ethernet/lantiq_xrx200.c +++ b/drivers/net/ethernet/lantiq_xrx200.c @@ -260,9 +260,9 @@ static int xrx200_hw_receive(struct xrx200_chan *ch) if (ctl & LTQ_DMA_EOP) { ch->skb_head->protocol = eth_type_trans(ch->skb_head, net_dev); - netif_receive_skb(ch->skb_head); net_dev->stats.rx_packets++; net_dev->stats.rx_bytes += ch->skb_head->len; + netif_receive_skb(ch->skb_head); ch->skb_head = NULL; ch->skb_tail = NULL; ret = XRX200_DMA_PACKET_COMPLETE; diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 105247582684..e6cd4e214d79 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1661,7 +1661,7 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er, if (er->rx_mini_pending || er->rx_jumbo_pending) return -EINVAL; - mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096; + mp->rx_ring_size = min(er->rx_pending, 4096U); mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending, MV643XX_MAX_SKB_DESCS * 2, 4096); if (mp->tx_ring_size != er->tx_pending) @@ -2704,6 +2704,16 @@ MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids); static struct platform_device *port_platdev[3]; +static void mv643xx_eth_shared_of_remove(void) +{ + int n; + + for (n = 0; n < 3; n++) { + platform_device_del(port_platdev[n]); + port_platdev[n] = NULL; + } +} + static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, struct device_node *pnp) { @@ -2740,7 +2750,9 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, return -EINVAL; } - of_get_mac_address(pnp, ppd.mac_addr); + ret = of_get_mac_address(pnp, ppd.mac_addr); + if (ret) + return ret; mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size); mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr); @@ -2804,21 +2816,13 @@ static int mv643xx_eth_shared_of_probe(struct platform_device *pdev) ret = mv643xx_eth_shared_of_add_port(pdev, pnp); if (ret) { of_node_put(pnp); + mv643xx_eth_shared_of_remove(); return ret; } } return 0; } -static void mv643xx_eth_shared_of_remove(void) -{ - int n; - - for (n = 0; n < 3; n++) { - platform_device_del(port_platdev[n]); - port_platdev[n] = NULL; - } -} #else static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev) { @@ -3088,8 +3092,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) struct mv643xx_eth_private *mp; struct net_device *dev; struct phy_device *phydev = NULL; - struct resource *res; - int err; + int err, irq; pd = dev_get_platdata(&pdev->dev); if (pd == NULL) { @@ -3185,9 +3188,10 @@ static int mv643xx_eth_probe(struct platform_device *pdev) timer_setup(&mp->rx_oom, oom_timer_wrapper, 0); - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - BUG_ON(!res); - dev->irq = res->start; + irq = platform_get_irq(pdev, 0); + if (WARN_ON(irq < 0)) + return irq; + dev->irq = irq; dev->netdev_ops = &mv643xx_eth_netdev_ops; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 7cdbf8b8bbf6..1a835b48791b 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -6870,6 +6870,9 @@ static int mvpp2_port_probe(struct platform_device *pdev, dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; dev->dev.of_node = port_node; + port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops; + port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops; + if (!mvpp2_use_acpi_compat_mode(port_fwnode)) { port->phylink_config.dev = &dev->dev; port->phylink_config.type = PHYLINK_NETDEV; @@ -6940,9 +6943,6 @@ static int mvpp2_port_probe(struct platform_device *pdev, port->phylink_config.supported_interfaces); } - port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops; - port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops; - phylink = phylink_create(&port->phylink_config, port_fwnode, phy_mode, &mvpp2_phylink_ops); if (IS_ERR(phylink)) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c index e682b7bfde64..67a6821d2dff 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c @@ -25,6 +25,9 @@ #define PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP 0xB600 #define PCI_DEVID_OCTEONTX2_RST 0xA085 #define PCI_DEVID_CN10K_PTP 0xA09E +#define PCI_SUBSYS_DEVID_CN10K_A_PTP 0xB900 +#define PCI_SUBSYS_DEVID_CNF10K_A_PTP 0xBA00 +#define PCI_SUBSYS_DEVID_CNF10K_B_PTP 0xBC00 #define PCI_PTP_BAR_NO 0 @@ -46,10 +49,105 @@ #define PTP_CLOCK_HI 0xF10ULL #define PTP_CLOCK_COMP 0xF18ULL #define PTP_TIMESTAMP 0xF20ULL +#define PTP_CLOCK_SEC 0xFD0ULL + +#define CYCLE_MULT 1000 static struct ptp *first_ptp_block; static const struct pci_device_id ptp_id_table[]; +static bool cn10k_ptp_errata(struct ptp *ptp) +{ + if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP || + ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP) + return true; + return false; +} + +static bool is_ptp_tsfmt_sec_nsec(struct ptp *ptp) +{ + if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP || + ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP) + return true; + return false; +} + +static u64 read_ptp_tstmp_sec_nsec(struct ptp *ptp) +{ + u64 sec, sec1, nsec; + unsigned long flags; + + spin_lock_irqsave(&ptp->ptp_lock, flags); + sec = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL; + nsec = readq(ptp->reg_base + PTP_CLOCK_HI); + sec1 = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL; + /* check nsec rollover */ + if (sec1 > sec) { + nsec = readq(ptp->reg_base + PTP_CLOCK_HI); + sec = sec1; + } + spin_unlock_irqrestore(&ptp->ptp_lock, flags); + + return sec * NSEC_PER_SEC + nsec; +} + +static u64 read_ptp_tstmp_nsec(struct ptp *ptp) +{ + return readq(ptp->reg_base + PTP_CLOCK_HI); +} + +static u64 ptp_calc_adjusted_comp(u64 ptp_clock_freq) +{ + u64 comp, adj = 0, cycles_per_sec, ns_drift = 0; + u32 ptp_clock_nsec, cycle_time; + int cycle; + + /* Errata: + * Issue #1: At the time of 1 sec rollover of the nano-second counter, + * the nano-second counter is set to 0. However, it should be set to + * (existing counter_value - 10^9). + * + * Issue #2: The nano-second counter rolls over at 0x3B9A_C9FF. + * It should roll over at 0x3B9A_CA00. + */ + + /* calculate ptp_clock_comp value */ + comp = ((u64)1000000000ULL << 32) / ptp_clock_freq; + /* use CYCLE_MULT to avoid accuracy loss due to integer arithmetic */ + cycle_time = NSEC_PER_SEC * CYCLE_MULT / ptp_clock_freq; + /* cycles per sec */ + cycles_per_sec = ptp_clock_freq; + + /* check whether ptp nanosecond counter rolls over early */ + cycle = cycles_per_sec - 1; + ptp_clock_nsec = (cycle * comp) >> 32; + while (ptp_clock_nsec < NSEC_PER_SEC) { + if (ptp_clock_nsec == 0x3B9AC9FF) + goto calc_adj_comp; + cycle++; + ptp_clock_nsec = (cycle * comp) >> 32; + } + /* compute nanoseconds lost per second when nsec counter rolls over */ + ns_drift = ptp_clock_nsec - NSEC_PER_SEC; + /* calculate ptp_clock_comp adjustment */ + if (ns_drift > 0) { + adj = comp * ns_drift; + adj = adj / 1000000000ULL; + } + /* speed up the ptp clock to account for nanoseconds lost */ + comp += adj; + return comp; + +calc_adj_comp: + /* slow down the ptp clock to not rollover early */ + adj = comp * cycle_time; + adj = adj / 1000000000ULL; + adj = adj / CYCLE_MULT; + comp -= adj; + + return comp; +} + struct ptp *ptp_get(void) { struct ptp *ptp = first_ptp_block; @@ -77,8 +175,8 @@ void ptp_put(struct ptp *ptp) static int ptp_adjfine(struct ptp *ptp, long scaled_ppm) { bool neg_adj = false; - u64 comp; - u64 adj; + u32 freq, freq_adj; + u64 comp, adj; s64 ppb; if (scaled_ppm < 0) { @@ -100,15 +198,22 @@ static int ptp_adjfine(struct ptp *ptp, long scaled_ppm) * where tbase is the basic compensation value calculated * initialy in the probe function. */ - comp = ((u64)1000000000ull << 32) / ptp->clock_rate; /* convert scaled_ppm to ppb */ ppb = 1 + scaled_ppm; ppb *= 125; ppb >>= 13; - adj = comp * ppb; - adj = div_u64(adj, 1000000000ull); - comp = neg_adj ? comp - adj : comp + adj; + if (cn10k_ptp_errata(ptp)) { + /* calculate the new frequency based on ppb */ + freq_adj = (ptp->clock_rate * ppb) / 1000000000ULL; + freq = neg_adj ? ptp->clock_rate + freq_adj : ptp->clock_rate - freq_adj; + comp = ptp_calc_adjusted_comp(freq); + } else { + comp = ((u64)1000000000ull << 32) / ptp->clock_rate; + adj = comp * ppb; + adj = div_u64(adj, 1000000000ull); + comp = neg_adj ? comp - adj : comp + adj; + } writeq(comp, ptp->reg_base + PTP_CLOCK_COMP); return 0; @@ -117,7 +222,7 @@ static int ptp_adjfine(struct ptp *ptp, long scaled_ppm) static int ptp_get_clock(struct ptp *ptp, u64 *clk) { /* Return the current PTP clock */ - *clk = readq(ptp->reg_base + PTP_CLOCK_HI); + *clk = ptp->read_ptp_tstmp(ptp); return 0; } @@ -166,7 +271,11 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts) writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR); writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR); - clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate; + if (cn10k_ptp_errata(ptp)) + clock_comp = ptp_calc_adjusted_comp(ptp->clock_rate); + else + clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate; + /* Initial compensation value to start the nanosecs counter */ writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP); } @@ -214,6 +323,12 @@ static int ptp_probe(struct pci_dev *pdev, if (!first_ptp_block) first_ptp_block = ptp; + spin_lock_init(&ptp->ptp_lock); + if (is_ptp_tsfmt_sec_nsec(ptp)) + ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec; + else + ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec; + return 0; error_free: diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h index 1b81a0493cd3..95a955159f40 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h @@ -15,6 +15,8 @@ struct ptp { struct pci_dev *pdev; void __iomem *reg_base; + u64 (*read_ptp_tstmp)(struct ptp *ptp); + spinlock_t ptp_lock; /* lock */ u32 clock_rate; }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c index d7a8aad46e12..47e83d7a5804 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c @@ -141,14 +141,15 @@ int rpm_lmac_get_pause_frm_status(void *rpmd, int lmac_id, return 0; } -static void rpm_cfg_pfc_quanta_thresh(rpm_t *rpm, int lmac_id, u16 pfc_en, +static void rpm_cfg_pfc_quanta_thresh(rpm_t *rpm, int lmac_id, + unsigned long pfc_en, bool enable) { u64 quanta_offset = 0, quanta_thresh = 0, cfg; int i, shift; /* Set pause time and interval */ - for_each_set_bit(i, (unsigned long *)&pfc_en, 16) { + for_each_set_bit(i, &pfc_en, 16) { switch (i) { case 0: case 1: diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 2c9760814bc3..b9d7601138ca 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -1048,7 +1048,7 @@ int otx2_config_nix(struct otx2_nic *pfvf) struct nix_lf_alloc_rsp *rsp; int err; - pfvf->qset.xqe_size = NIX_XQESZ_W16 ? 128 : 512; + pfvf->qset.xqe_size = pfvf->hw.xqe_size; /* Get memory to put this msg */ nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox); @@ -1061,7 +1061,7 @@ int otx2_config_nix(struct otx2_nic *pfvf) nixlf->cq_cnt = pfvf->qset.cq_cnt; nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE; nixlf->rss_grps = MAX_RSS_GROUPS; - nixlf->xqe_sz = NIX_XQESZ_W16; + nixlf->xqe_sz = pfvf->hw.xqe_size == 128 ? NIX_XQESZ_W16 : NIX_XQESZ_W64; /* We don't know absolute NPA LF idx attached. * AF will replace 'RVU_DEFAULT_PF_FUNC' with * NPA LF attached to this RVU PF/VF. diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 7724f17ec31f..c587c14ac2a3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -17,6 +17,7 @@ #include <linux/soc/marvell/octeontx2/asm.h> #include <net/pkt_cls.h> #include <net/devlink.h> +#include <linux/time64.h> #include <mbox.h> #include <npc.h> @@ -180,6 +181,7 @@ struct otx2_hw { #define OTX2_DEFAULT_RBUF_LEN 2048 u16 rbuf_len; + u32 xqe_size; /* NPA */ u32 stack_pg_ptrs; /* No of ptrs per stack page */ @@ -275,6 +277,8 @@ struct otx2_ptp { u64 thresh; struct ptp_pin_desc extts_config; + u64 (*convert_rx_ptp_tstmp)(u64 timestamp); + u64 (*convert_tx_ptp_tstmp)(u64 timestamp); }; #define OTX2_HW_TIMESTAMP_LEN 8 diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index abe5267210ef..fc328de5345e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -372,6 +372,7 @@ static void otx2_get_ringparam(struct net_device *netdev, ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX); ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K); kernel_ring->rx_buf_len = pfvf->hw.rbuf_len; + kernel_ring->cqe_size = pfvf->hw.xqe_size; } static int otx2_set_ringparam(struct net_device *netdev, @@ -382,6 +383,7 @@ static int otx2_set_ringparam(struct net_device *netdev, struct otx2_nic *pfvf = netdev_priv(netdev); u32 rx_buf_len = kernel_ring->rx_buf_len; u32 old_rx_buf_len = pfvf->hw.rbuf_len; + u32 xqe_size = kernel_ring->cqe_size; bool if_up = netif_running(netdev); struct otx2_qset *qs = &pfvf->qset; u32 rx_count, tx_count; @@ -398,6 +400,12 @@ static int otx2_set_ringparam(struct net_device *netdev, return -EINVAL; } + if (xqe_size != 128 && xqe_size != 512) { + netdev_err(netdev, + "Completion event size must be 128 or 512"); + return -EINVAL; + } + /* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M */ rx_count = ring->rx_pending; /* On some silicon variants a skid or reserved CQEs are @@ -416,7 +424,7 @@ static int otx2_set_ringparam(struct net_device *netdev, tx_count = Q_COUNT(Q_SIZE(tx_count, 3)); if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt && - rx_buf_len == old_rx_buf_len) + rx_buf_len == old_rx_buf_len && xqe_size == pfvf->hw.xqe_size) return 0; if (if_up) @@ -427,6 +435,7 @@ static int otx2_set_ringparam(struct net_device *netdev, qs->rqe_cnt = rx_count; pfvf->hw.rbuf_len = rx_buf_len; + pfvf->hw.xqe_size = xqe_size; if (if_up) return netdev->netdev_ops->ndo_open(netdev); @@ -1222,7 +1231,8 @@ end: static const struct ethtool_ops otx2_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES, - .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN, + .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN | + ETHTOOL_RING_USE_CQE_SIZE, .get_link = otx2_get_link, .get_drvinfo = otx2_get_drvinfo, .get_strings = otx2_get_strings, @@ -1342,7 +1352,8 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev, static const struct ethtool_ops otx2vf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES, - .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN, + .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN | + ETHTOOL_RING_USE_CQE_SIZE, .get_link = otx2_get_link, .get_drvinfo = otx2vf_get_drvinfo, .get_strings = otx2vf_get_strings, diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index a5369167ab54..441aafc26a08 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -2585,6 +2585,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) hw->tot_tx_queues = qcount; hw->max_queues = qcount; hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN; + /* Use CQE of 128 byte descriptor size by default */ + hw->xqe_size = 128; num_vec = pci_msix_vec_count(pdev); hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE, diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c index 61c20907315f..fdc2c9315b91 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c @@ -294,6 +294,14 @@ int otx2_ptp_init(struct otx2_nic *pfvf) goto error; } + if (is_dev_otx2(pfvf->pdev)) { + ptp_ptr->convert_rx_ptp_tstmp = &otx2_ptp_convert_rx_timestamp; + ptp_ptr->convert_tx_ptp_tstmp = &otx2_ptp_convert_tx_timestamp; + } else { + ptp_ptr->convert_rx_ptp_tstmp = &cn10k_ptp_convert_timestamp; + ptp_ptr->convert_tx_ptp_tstmp = &cn10k_ptp_convert_timestamp; + } + pfvf->ptp = ptp_ptr; error: diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h index 6ff284211d7b..7ff41927ceaf 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h @@ -8,6 +8,21 @@ #ifndef OTX2_PTP_H #define OTX2_PTP_H +static inline u64 otx2_ptp_convert_rx_timestamp(u64 timestamp) +{ + return be64_to_cpu(*(__be64 *)×tamp); +} + +static inline u64 otx2_ptp_convert_tx_timestamp(u64 timestamp) +{ + return timestamp; +} + +static inline u64 cn10k_ptp_convert_timestamp(u64 timestamp) +{ + return ((timestamp >> 32) * NSEC_PER_SEC) + (timestamp & 0xFFFFFFFFUL); +} + int otx2_ptp_init(struct otx2_nic *pfvf); void otx2_ptp_destroy(struct otx2_nic *pfvf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index 0593106d7161..28b19945d716 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -190,6 +190,40 @@ static int otx2_tc_validate_flow(struct otx2_nic *nic, return 0; } +static int otx2_policer_validate(const struct flow_action *action, + const struct flow_action_entry *act, + struct netlink_ext_ack *extack) +{ + if (act->police.exceed.act_id != FLOW_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when exceed action is not drop"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && + act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is not pipe or ok"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && + !flow_action_is_last_entry(action, act)) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is ok, but action is not last"); + return -EOPNOTSUPP; + } + + if (act->police.peakrate_bytes_ps || + act->police.avrate || act->police.overhead) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when peakrate/avrate/overhead is configured"); + return -EOPNOTSUPP; + } + + return 0; +} + static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, struct tc_cls_matchall_offload *cls) { @@ -212,6 +246,10 @@ static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, entry = &cls->rule->action.entries[0]; switch (entry->id) { case FLOW_ACTION_POLICE: + err = otx2_policer_validate(&cls->rule->action, entry, extack); + if (err) + return err; + if (entry->police.rate_pkt_ps) { NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); return -EOPNOTSUPP; @@ -315,6 +353,7 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic, u8 nr_police = 0; bool pps = false; u64 rate; + int err; int i; if (!flow_action_has_entries(flow_action)) { @@ -355,6 +394,10 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic, return -EOPNOTSUPP; } + err = otx2_policer_validate(flow_action, act, extack); + if (err) + return err; + if (act->police.rate_bytes_ps > 0) { rate = act->police.rate_bytes_ps * 8; burst = act->police.burst; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 7c4068c5d1ac..c26de15b2ac3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -148,6 +148,7 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf, if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) { timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id]; if (timestamp != 1) { + timestamp = pfvf->ptp->convert_tx_ptp_tstmp(timestamp); err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns); if (!err) { memset(&ts, 0, sizeof(ts)); @@ -167,14 +168,15 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf, static void otx2_set_rxtstamp(struct otx2_nic *pfvf, struct sk_buff *skb, void *data) { - u64 tsns; + u64 timestamp, tsns; int err; if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)) return; + timestamp = pfvf->ptp->convert_rx_ptp_tstmp(*(u64 *)data); /* The first 8 bytes is the timestamp */ - err = otx2_ptp_tstamp2time(pfvf, be64_to_cpu(*(__be64 *)data), &tsns); + err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns); if (err) return; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index a232e202f6a4..9e87836ed8bf 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -572,6 +572,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) hw->max_queues = qcount; hw->tot_tx_queues = qcount; hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN; + /* Use CQE of 128 byte descriptor size by default */ + hw->xqe_size = 128; hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE, GFP_KERNEL); diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h index 2fd9ef2fe5d6..6f754ae2a584 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera.h +++ b/drivers/net/ethernet/marvell/prestera/prestera.h @@ -281,8 +281,11 @@ struct prestera_router { struct prestera_switch *sw; struct list_head vr_list; struct list_head rif_entry_list; + struct rhashtable fib_ht; + struct rhashtable kern_fib_cache_ht; struct notifier_block inetaddr_nb; struct notifier_block inetaddr_valid_nb; + struct notifier_block fib_nb; }; struct prestera_rxtx_params { @@ -325,6 +328,8 @@ int prestera_port_cfg_mac_write(struct prestera_port *port, struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev); +void prestera_queue_work(struct work_struct *work); + int prestera_port_pvid_set(struct prestera_port *port, u16 vid); bool prestera_netdev_check(const struct net_device *dev); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c index f0d9f592173b..47c899c08951 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_acl.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c @@ -22,6 +22,7 @@ struct prestera_acl { struct prestera_acl_ruleset_ht_key { struct prestera_flow_block *block; + u32 chain_index; }; struct prestera_acl_rule_entry { @@ -34,6 +35,10 @@ struct prestera_acl_rule_entry { u8 valid:1; } accept, drop, trap; struct { + struct prestera_acl_action_jump i; + u8 valid:1; + } jump; + struct { u32 id; struct prestera_counter_block *block; } counter; @@ -49,6 +54,7 @@ struct prestera_acl_ruleset { refcount_t refcount; void *keymask; u32 vtcam_id; + u32 index; u16 pcl_id; bool offload; }; @@ -83,20 +89,45 @@ static const struct rhashtable_params __prestera_acl_rule_entry_ht_params = { .automatic_shrinking = true, }; +int prestera_acl_chain_to_client(u32 chain_index, u32 *client) +{ + static const u32 client_map[] = { + PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0, + PRESTERA_HW_COUNTER_CLIENT_LOOKUP_1, + PRESTERA_HW_COUNTER_CLIENT_LOOKUP_2 + }; + + if (chain_index >= ARRAY_SIZE(client_map)) + return -EINVAL; + + *client = client_map[chain_index]; + return 0; +} + +static bool prestera_acl_chain_is_supported(u32 chain_index) +{ + return (chain_index & ~PRESTERA_ACL_CHAIN_MASK) == 0; +} + static struct prestera_acl_ruleset * prestera_acl_ruleset_create(struct prestera_acl *acl, - struct prestera_flow_block *block) + struct prestera_flow_block *block, + u32 chain_index) { struct prestera_acl_ruleset *ruleset; u32 uid = 0; int err; + if (!prestera_acl_chain_is_supported(chain_index)) + return ERR_PTR(-EINVAL); + ruleset = kzalloc(sizeof(*ruleset), GFP_KERNEL); if (!ruleset) return ERR_PTR(-ENOMEM); ruleset->acl = acl; ruleset->ht_key.block = block; + ruleset->ht_key.chain_index = chain_index; refcount_set(&ruleset->refcount, 1); err = rhashtable_init(&ruleset->rule_ht, &prestera_acl_rule_ht_params); @@ -108,7 +139,9 @@ prestera_acl_ruleset_create(struct prestera_acl *acl, goto err_ruleset_create; /* make pcl-id based on uid */ - ruleset->pcl_id = (u8)uid; + ruleset->pcl_id = PRESTERA_ACL_PCL_ID_MAKE((u8)uid, chain_index); + ruleset->index = uid; + err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node, prestera_acl_ruleset_ht_params); if (err) @@ -133,35 +166,64 @@ void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset, int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset) { + struct prestera_acl_iface iface; u32 vtcam_id; int err; if (ruleset->offload) return -EEXIST; - err = prestera_acl_vtcam_id_get(ruleset->acl, 0, + err = prestera_acl_vtcam_id_get(ruleset->acl, + ruleset->ht_key.chain_index, ruleset->keymask, &vtcam_id); if (err) - return err; + goto err_vtcam_create; + + if (ruleset->ht_key.chain_index) { + /* for chain > 0, bind iface index to pcl-id to be able + * to jump from any other ruleset to this one using the index. + */ + iface.index = ruleset->index; + iface.type = PRESTERA_ACL_IFACE_TYPE_INDEX; + err = prestera_hw_vtcam_iface_bind(ruleset->acl->sw, &iface, + vtcam_id, ruleset->pcl_id); + if (err) + goto err_ruleset_bind; + } ruleset->vtcam_id = vtcam_id; ruleset->offload = true; return 0; + +err_ruleset_bind: + prestera_acl_vtcam_id_put(ruleset->acl, ruleset->vtcam_id); +err_vtcam_create: + return err; } static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset) { struct prestera_acl *acl = ruleset->acl; u8 uid = ruleset->pcl_id & PRESTERA_ACL_KEYMASK_PCL_ID_USER; + int err; rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, prestera_acl_ruleset_ht_params); - if (ruleset->offload) + if (ruleset->offload) { + if (ruleset->ht_key.chain_index) { + struct prestera_acl_iface iface = { + .type = PRESTERA_ACL_IFACE_TYPE_INDEX, + .index = ruleset->index + }; + err = prestera_hw_vtcam_iface_unbind(acl->sw, &iface, + ruleset->vtcam_id); + WARN_ON(err); + } WARN_ON(prestera_acl_vtcam_id_put(acl, ruleset->vtcam_id)); + } idr_remove(&acl->uid, uid); - rhashtable_destroy(&ruleset->rule_ht); kfree(ruleset->keymask); kfree(ruleset); @@ -169,23 +231,26 @@ static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset) static struct prestera_acl_ruleset * __prestera_acl_ruleset_lookup(struct prestera_acl *acl, - struct prestera_flow_block *block) + struct prestera_flow_block *block, + u32 chain_index) { struct prestera_acl_ruleset_ht_key ht_key; memset(&ht_key, 0, sizeof(ht_key)); ht_key.block = block; + ht_key.chain_index = chain_index; return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key, prestera_acl_ruleset_ht_params); } struct prestera_acl_ruleset * prestera_acl_ruleset_lookup(struct prestera_acl *acl, - struct prestera_flow_block *block) + struct prestera_flow_block *block, + u32 chain_index) { struct prestera_acl_ruleset *ruleset; - ruleset = __prestera_acl_ruleset_lookup(acl, block); + ruleset = __prestera_acl_ruleset_lookup(acl, block, chain_index); if (!ruleset) return ERR_PTR(-ENOENT); @@ -195,17 +260,18 @@ prestera_acl_ruleset_lookup(struct prestera_acl *acl, struct prestera_acl_ruleset * prestera_acl_ruleset_get(struct prestera_acl *acl, - struct prestera_flow_block *block) + struct prestera_flow_block *block, + u32 chain_index) { struct prestera_acl_ruleset *ruleset; - ruleset = __prestera_acl_ruleset_lookup(acl, block); + ruleset = __prestera_acl_ruleset_lookup(acl, block, chain_index); if (ruleset) { refcount_inc(&ruleset->refcount); return ruleset; } - return prestera_acl_ruleset_create(acl, block); + return prestera_acl_ruleset_create(acl, block, chain_index); } void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset) @@ -293,6 +359,11 @@ prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset, prestera_acl_rule_ht_params); } +u32 prestera_acl_ruleset_index_get(const struct prestera_acl_ruleset *ruleset) +{ + return ruleset->index; +} + bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset) { return ruleset->offload; @@ -300,7 +371,7 @@ bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset) struct prestera_acl_rule * prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset, - unsigned long cookie) + unsigned long cookie, u32 chain_index) { struct prestera_acl_rule *rule; @@ -310,6 +381,7 @@ prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset, rule->ruleset = ruleset; rule->cookie = cookie; + rule->chain_index = chain_index; refcount_inc(&ruleset->refcount); @@ -324,6 +396,10 @@ void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule, void prestera_acl_rule_destroy(struct prestera_acl_rule *rule) { + if (rule->jump_ruleset) + /* release ruleset kept by jump action */ + prestera_acl_ruleset_put(rule->jump_ruleset); + prestera_acl_ruleset_put(rule->ruleset); kfree(rule); } @@ -347,7 +423,10 @@ int prestera_acl_rule_add(struct prestera_switch *sw, /* setup counter */ rule->re_arg.count.valid = true; - rule->re_arg.count.client = PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0; + err = prestera_acl_chain_to_client(ruleset->ht_key.chain_index, + &rule->re_arg.count.client); + if (err) + goto err_rule_add; rule->re = prestera_acl_rule_entry_find(sw->acl, &rule->re_key); err = WARN_ON(rule->re) ? -EEXIST : 0; @@ -360,8 +439,10 @@ int prestera_acl_rule_add(struct prestera_switch *sw, if (err) goto err_rule_add; - /* bind the block (all ports) to chain index 0 */ - if (!ruleset->rule_count) { + /* bind the block (all ports) to chain index 0, rest of + * the chains are bound to goto action + */ + if (!ruleset->ht_key.chain_index && !ruleset->rule_count) { err = prestera_acl_ruleset_block_bind(ruleset, block); if (err) goto err_acl_block_bind; @@ -395,7 +476,7 @@ void prestera_acl_rule_del(struct prestera_switch *sw, prestera_acl_rule_entry_destroy(sw->acl, rule->re); /* unbind block (all ports) */ - if (!ruleset->rule_count) + if (!ruleset->ht_key.chain_index && !ruleset->rule_count) prestera_acl_ruleset_block_unbind(ruleset, block); } @@ -459,6 +540,12 @@ static int __prestera_acl_rule_entry2hw_add(struct prestera_switch *sw, act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_TRAP; act_num++; } + /* jump */ + if (e->jump.valid) { + act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_JUMP; + act_hw[act_num].jump = e->jump.i; + act_num++; + } /* counter */ if (e->counter.block) { act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_COUNT; @@ -505,6 +592,9 @@ __prestera_acl_rule_entry_act_construct(struct prestera_switch *sw, e->drop.valid = arg->drop.valid; /* trap */ e->trap.valid = arg->trap.valid; + /* jump */ + e->jump.valid = arg->jump.valid; + e->jump.i = arg->jump.i; /* counter */ if (arg->count.valid) { int err; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.h b/drivers/net/ethernet/marvell/prestera/prestera_acl.h index 40f6c1d961fa..6d2ad27682d1 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_acl.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.h @@ -10,6 +10,14 @@ #define PRESTERA_ACL_KEYMASK_PCL_ID 0x3FF #define PRESTERA_ACL_KEYMASK_PCL_ID_USER \ (PRESTERA_ACL_KEYMASK_PCL_ID & 0x00FF) +#define PRESTERA_ACL_KEYMASK_PCL_ID_CHAIN \ + (PRESTERA_ACL_KEYMASK_PCL_ID & 0xFF00) +#define PRESTERA_ACL_CHAIN_MASK \ + (PRESTERA_ACL_KEYMASK_PCL_ID >> 8) + +#define PRESTERA_ACL_PCL_ID_MAKE(uid, chain_id) \ + (((uid) & PRESTERA_ACL_KEYMASK_PCL_ID_USER) | \ + (((chain_id) << 8) & PRESTERA_ACL_KEYMASK_PCL_ID_CHAIN)) #define rule_match_set_n(match_p, type, val_p, size) \ memcpy(&(match_p)[PRESTERA_ACL_RULE_MATCH_TYPE_##type], \ @@ -46,6 +54,7 @@ enum prestera_acl_rule_action { PRESTERA_ACL_RULE_ACTION_ACCEPT = 0, PRESTERA_ACL_RULE_ACTION_DROP = 1, PRESTERA_ACL_RULE_ACTION_TRAP = 2, + PRESTERA_ACL_RULE_ACTION_JUMP = 5, PRESTERA_ACL_RULE_ACTION_COUNT = 7, PRESTERA_ACL_RULE_ACTION_MAX @@ -61,6 +70,10 @@ struct prestera_acl_match { __be32 mask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; }; +struct prestera_acl_action_jump { + u32 index; +}; + struct prestera_acl_action_count { u32 id; }; @@ -74,6 +87,7 @@ struct prestera_acl_hw_action_info { enum prestera_acl_rule_action id; union { struct prestera_acl_action_count count; + struct prestera_acl_action_jump jump; }; }; @@ -88,6 +102,10 @@ struct prestera_acl_rule_entry_arg { u8 valid:1; } accept, drop, trap; struct { + struct prestera_acl_action_jump i; + u8 valid:1; + } jump; + struct { u8 valid:1; u32 client; } count; @@ -98,7 +116,9 @@ struct prestera_acl_rule { struct rhash_head ht_node; /* Member of acl HT */ struct list_head list; struct prestera_acl_ruleset *ruleset; + struct prestera_acl_ruleset *jump_ruleset; unsigned long cookie; + u32 chain_index; u32 priority; struct prestera_acl_rule_entry_key re_key; struct prestera_acl_rule_entry_arg re_arg; @@ -122,7 +142,7 @@ void prestera_acl_fini(struct prestera_switch *sw); struct prestera_acl_rule * prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset, - unsigned long cookie); + unsigned long cookie, u32 chain_index); void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule, u32 priority); void prestera_acl_rule_destroy(struct prestera_acl_rule *rule); @@ -147,10 +167,12 @@ prestera_acl_rule_entry_create(struct prestera_acl *acl, struct prestera_acl_rule_entry_arg *arg); struct prestera_acl_ruleset * prestera_acl_ruleset_get(struct prestera_acl *acl, - struct prestera_flow_block *block); + struct prestera_flow_block *block, + u32 chain_index); struct prestera_acl_ruleset * prestera_acl_ruleset_lookup(struct prestera_acl *acl, - struct prestera_flow_block *block); + struct prestera_flow_block *block, + u32 chain_index); void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset, void *keymask); bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset); @@ -160,6 +182,7 @@ int prestera_acl_ruleset_bind(struct prestera_acl_ruleset *ruleset, struct prestera_port *port); int prestera_acl_ruleset_unbind(struct prestera_acl_ruleset *ruleset, struct prestera_port *port); +u32 prestera_acl_ruleset_index_get(const struct prestera_acl_ruleset *ruleset); void prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule, u16 pcl_id); @@ -167,5 +190,6 @@ prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule, int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup, void *keymask, u32 *vtcam_id); int prestera_acl_vtcam_id_put(struct prestera_acl *acl, u32 vtcam_id); +int prestera_acl_chain_to_client(u32 chain_index, u32 *client); #endif /* _PRESTERA_ACL_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.c b/drivers/net/ethernet/marvell/prestera/prestera_flow.c index d849f046ece7..05c3ad98eba9 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_flow.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.c @@ -29,9 +29,6 @@ static int prestera_flow_block_mall_cb(struct prestera_flow_block *block, static int prestera_flow_block_flower_cb(struct prestera_flow_block *block, struct flow_cls_offload *f) { - if (f->common.chain_index != 0) - return -EOPNOTSUPP; - switch (f->command) { case FLOW_CLS_REPLACE: return prestera_flower_replace(block, f); @@ -71,6 +68,7 @@ static void prestera_flow_block_destroy(void *cb_priv) prestera_flower_template_cleanup(block); + WARN_ON(!list_empty(&block->template_list)); WARN_ON(!list_empty(&block->binding_list)); kfree(block); @@ -86,6 +84,7 @@ prestera_flow_block_create(struct prestera_switch *sw, struct net *net) return NULL; INIT_LIST_HEAD(&block->binding_list); + INIT_LIST_HEAD(&block->template_list); block->net = net; block->sw = sw; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.h b/drivers/net/ethernet/marvell/prestera/prestera_flow.h index 1ea5b745bf72..6550278b166a 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_flow.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.h @@ -8,7 +8,6 @@ struct prestera_port; struct prestera_switch; -struct prestera_flower_template; struct prestera_flow_block_binding { struct list_head list; @@ -22,7 +21,7 @@ struct prestera_flow_block { struct net *net; struct prestera_acl_ruleset *ruleset_zero; struct flow_block_cb *block_cb; - struct prestera_flower_template *tmplt; + struct list_head template_list; unsigned int rule_count; }; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c index 19c1417fd05f..921959a980ee 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c @@ -8,26 +8,63 @@ struct prestera_flower_template { struct prestera_acl_ruleset *ruleset; + struct list_head list; + u32 chain_index; }; +static void +prestera_flower_template_free(struct prestera_flower_template *template) +{ + prestera_acl_ruleset_put(template->ruleset); + list_del(&template->list); + kfree(template); +} + void prestera_flower_template_cleanup(struct prestera_flow_block *block) { - if (block->tmplt) { - /* put the reference to the ruleset kept in create */ - prestera_acl_ruleset_put(block->tmplt->ruleset); - kfree(block->tmplt); - block->tmplt = NULL; - return; - } + struct prestera_flower_template *template, *tmp; + + /* put the reference to all rulesets kept in tmpl create */ + list_for_each_entry_safe(template, tmp, &block->template_list, list) + prestera_flower_template_free(template); +} + +static int +prestera_flower_parse_goto_action(struct prestera_flow_block *block, + struct prestera_acl_rule *rule, + u32 chain_index, + const struct flow_action_entry *act) +{ + struct prestera_acl_ruleset *ruleset; + + if (act->chain_index <= chain_index) + /* we can jump only forward */ + return -EINVAL; + + if (rule->re_arg.jump.valid) + return -EEXIST; + + ruleset = prestera_acl_ruleset_get(block->sw->acl, block, + act->chain_index); + if (IS_ERR(ruleset)) + return PTR_ERR(ruleset); + + rule->re_arg.jump.valid = 1; + rule->re_arg.jump.i.index = prestera_acl_ruleset_index_get(ruleset); + + rule->jump_ruleset = ruleset; + + return 0; } static int prestera_flower_parse_actions(struct prestera_flow_block *block, struct prestera_acl_rule *rule, struct flow_action *flow_action, + u32 chain_index, struct netlink_ext_ack *extack) { const struct flow_action_entry *act; - int i; + int err, i; /* whole struct (rule->re_arg) must be initialized with 0 */ if (!flow_action_has_entries(flow_action)) @@ -53,6 +90,13 @@ static int prestera_flower_parse_actions(struct prestera_flow_block *block, rule->re_arg.trap.valid = 1; break; + case FLOW_ACTION_GOTO: + err = prestera_flower_parse_goto_action(block, rule, + chain_index, + act); + if (err) + return err; + break; default: NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); pr_err("Unsupported action\n"); @@ -259,6 +303,7 @@ static int prestera_flower_parse(struct prestera_flow_block *block, } return prestera_flower_parse_actions(block, rule, &f->rule->action, + f->common.chain_index, f->common.extack); } @@ -270,12 +315,13 @@ int prestera_flower_replace(struct prestera_flow_block *block, struct prestera_acl_rule *rule; int err; - ruleset = prestera_acl_ruleset_get(acl, block); + ruleset = prestera_acl_ruleset_get(acl, block, f->common.chain_index); if (IS_ERR(ruleset)) return PTR_ERR(ruleset); /* increments the ruleset reference */ - rule = prestera_acl_rule_create(ruleset, f->cookie); + rule = prestera_acl_rule_create(ruleset, f->cookie, + f->common.chain_index); if (IS_ERR(rule)) { err = PTR_ERR(rule); goto err_rule_create; @@ -312,7 +358,8 @@ void prestera_flower_destroy(struct prestera_flow_block *block, struct prestera_acl_ruleset *ruleset; struct prestera_acl_rule *rule; - ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block); + ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block, + f->common.chain_index); if (IS_ERR(ruleset)) return; @@ -345,7 +392,8 @@ int prestera_flower_tmplt_create(struct prestera_flow_block *block, } prestera_acl_rule_keymask_pcl_id_set(&rule, 0); - ruleset = prestera_acl_ruleset_get(block->sw->acl, block); + ruleset = prestera_acl_ruleset_get(block->sw->acl, block, + f->common.chain_index); if (IS_ERR_OR_NULL(ruleset)) { err = -EINVAL; goto err_ruleset_get; @@ -364,7 +412,8 @@ int prestera_flower_tmplt_create(struct prestera_flow_block *block, /* keep the reference to the ruleset */ template->ruleset = ruleset; - block->tmplt = template; + template->chain_index = f->common.chain_index; + list_add_rcu(&template->list, &block->template_list); return 0; err_ruleset_get: @@ -377,7 +426,14 @@ err_malloc: void prestera_flower_tmplt_destroy(struct prestera_flow_block *block, struct flow_cls_offload *f) { - prestera_flower_template_cleanup(block); + struct prestera_flower_template *template, *tmp; + + list_for_each_entry_safe(template, tmp, &block->template_list, list) + if (template->chain_index == f->common.chain_index) { + /* put the reference to the ruleset kept in create */ + prestera_flower_template_free(template); + return; + } } int prestera_flower_stats(struct prestera_flow_block *block, @@ -390,7 +446,8 @@ int prestera_flower_stats(struct prestera_flow_block *block, u64 bytes; int err; - ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block); + ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block, + f->common.chain_index); if (IS_ERR(ruleset)) return PTR_ERR(ruleset); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.h b/drivers/net/ethernet/marvell/prestera/prestera_flower.h index dc3aa4280e9f..495f151e6fa9 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_flower.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.h @@ -6,7 +6,6 @@ #include <net/pkt_cls.h> -struct prestera_switch; struct prestera_flow_block; int prestera_flower_replace(struct prestera_flow_block *block, diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c index e6bfadc874c5..c66cc929c820 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c @@ -55,6 +55,8 @@ enum prestera_cmd_type_t { PRESTERA_CMD_TYPE_ROUTER_RIF_CREATE = 0x600, PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE = 0x601, + PRESTERA_CMD_TYPE_ROUTER_LPM_ADD = 0x610, + PRESTERA_CMD_TYPE_ROUTER_LPM_DELETE = 0x611, PRESTERA_CMD_TYPE_ROUTER_VR_CREATE = 0x630, PRESTERA_CMD_TYPE_ROUTER_VR_DELETE = 0x631, @@ -424,6 +426,9 @@ struct prestera_msg_acl_action { __le32 __reserved; union { struct { + __le32 index; + } jump; + struct { __le32 id; } count; __le32 reserved[6]; @@ -499,6 +504,15 @@ struct prestera_msg_iface { u8 __pad[3]; }; +struct prestera_msg_ip_addr { + union { + __be32 ipv4; + __be32 ipv6[4]; + } u; + u8 v; /* e.g. PRESTERA_IPV4 */ + u8 __pad[3]; +}; + struct prestera_msg_rif_req { struct prestera_msg_cmd cmd; struct prestera_msg_iface iif; @@ -515,6 +529,15 @@ struct prestera_msg_rif_resp { u8 __pad[2]; }; +struct prestera_msg_lpm_req { + struct prestera_msg_cmd cmd; + struct prestera_msg_ip_addr dst; + __le32 grp_id; + __le32 dst_len; + __le16 vr_id; + u8 __pad[2]; +}; + struct prestera_msg_vr_req { struct prestera_msg_cmd cmd; __le16 vr_id; @@ -598,9 +621,11 @@ static void prestera_hw_build_tests(void) BUILD_BUG_ON(sizeof(struct prestera_msg_counter_stats) != 16); BUILD_BUG_ON(sizeof(struct prestera_msg_rif_req) != 36); BUILD_BUG_ON(sizeof(struct prestera_msg_vr_req) != 8); + BUILD_BUG_ON(sizeof(struct prestera_msg_lpm_req) != 36); /* structure that are part of req/resp fw messages */ BUILD_BUG_ON(sizeof(struct prestera_msg_iface) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_ip_addr) != 20); /* check responses */ BUILD_BUG_ON(sizeof(struct prestera_msg_common_resp) != 8); @@ -1164,6 +1189,9 @@ prestera_acl_rule_add_put_action(struct prestera_msg_acl_action *action, case PRESTERA_ACL_RULE_ACTION_TRAP: /* just rule action id, no specific data */ break; + case PRESTERA_ACL_RULE_ACTION_JUMP: + action->jump.index = __cpu_to_le32(info->jump.index); + break; case PRESTERA_ACL_RULE_ACTION_COUNT: action->count.id = __cpu_to_le32(info->count.id); break; @@ -1891,6 +1919,33 @@ int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id) sizeof(req)); } +int prestera_hw_lpm_add(struct prestera_switch *sw, u16 vr_id, + __be32 dst, u32 dst_len, u32 grp_id) +{ + struct prestera_msg_lpm_req req = { + .dst_len = __cpu_to_le32(dst_len), + .vr_id = __cpu_to_le16(vr_id), + .grp_id = __cpu_to_le32(grp_id), + .dst.u.ipv4 = dst + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_LPM_ADD, &req.cmd, + sizeof(req)); +} + +int prestera_hw_lpm_del(struct prestera_switch *sw, u16 vr_id, + __be32 dst, u32 dst_len) +{ + struct prestera_msg_lpm_req req = { + .dst_len = __cpu_to_le32(dst_len), + .vr_id = __cpu_to_le16(vr_id), + .dst.u.ipv4 = dst + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_LPM_DELETE, &req.cmd, + sizeof(req)); +} + int prestera_hw_rxtx_init(struct prestera_switch *sw, struct prestera_rxtx_params *params) { diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h index 3ff12bae5909..fd896a8838bb 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_hw.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h @@ -249,6 +249,12 @@ int prestera_hw_rif_delete(struct prestera_switch *sw, u16 rif_id, int prestera_hw_vr_create(struct prestera_switch *sw, u16 *vr_id); int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id); +/* LPM PI */ +int prestera_hw_lpm_add(struct prestera_switch *sw, u16 vr_id, + __be32 dst, u32 dst_len, u32 grp_id); +int prestera_hw_lpm_del(struct prestera_switch *sw, u16 vr_id, + __be32 dst, u32 dst_len); + /* Event handlers */ int prestera_hw_event_handler_register(struct prestera_switch *sw, enum prestera_event_type type, diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index cad93f747d0c..1402c7889e78 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -28,6 +28,12 @@ #define PRESTERA_MAC_ADDR_NUM_MAX 255 static struct workqueue_struct *prestera_wq; +static struct workqueue_struct *prestera_owq; + +void prestera_queue_work(struct work_struct *work) +{ + queue_work(prestera_owq, work); +} int prestera_port_pvid_set(struct prestera_port *port, u16 vid) { @@ -554,6 +560,7 @@ static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw) dev_info(prestera_dev(sw), "using random base mac address\n"); } of_node_put(base_mac_np); + of_node_put(np); return prestera_hw_switch_mac_set(sw, sw->base_mac); } @@ -1024,12 +1031,17 @@ static int __init prestera_module_init(void) if (!prestera_wq) return -ENOMEM; + prestera_owq = alloc_ordered_workqueue("prestera_ordered", 0); + if (!prestera_owq) + return -ENOMEM; + return 0; } static void __exit prestera_module_exit(void) { destroy_workqueue(prestera_wq); + destroy_workqueue(prestera_owq); } module_init(prestera_module_init); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c index 6ef4d32b8fdd..6c5618cf4f08 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_router.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c @@ -5,10 +5,39 @@ #include <linux/types.h> #include <linux/inetdevice.h> #include <net/switchdev.h> +#include <linux/rhashtable.h> #include "prestera.h" #include "prestera_router_hw.h" +struct prestera_kern_fib_cache_key { + struct prestera_ip_addr addr; + u32 prefix_len; + u32 kern_tb_id; /* tb_id from kernel (not fixed) */ +}; + +/* Subscribing on neighbours in kernel */ +struct prestera_kern_fib_cache { + struct prestera_kern_fib_cache_key key; + struct { + struct prestera_fib_key fib_key; + enum prestera_fib_type fib_type; + } lpm_info; /* hold prepared lpm info */ + /* Indicate if route is not overlapped by another table */ + struct rhash_head ht_node; /* node of prestera_router */ + struct fib_info *fi; + u8 kern_tos; + u8 kern_type; + bool reachable; +}; + +static const struct rhashtable_params __prestera_kern_fib_cache_ht_params = { + .key_offset = offsetof(struct prestera_kern_fib_cache, key), + .head_offset = offsetof(struct prestera_kern_fib_cache, ht_node), + .key_len = sizeof(struct prestera_kern_fib_cache_key), + .automatic_shrinking = true, +}; + /* This util to be used, to convert kernel rules for default vr in hw_vr */ static u32 prestera_fix_tb_id(u32 tb_id) { @@ -20,6 +49,290 @@ static u32 prestera_fix_tb_id(u32 tb_id) return tb_id; } +static void +prestera_util_fen_info2fib_cache_key(struct fib_entry_notifier_info *fen_info, + struct prestera_kern_fib_cache_key *key) +{ + memset(key, 0, sizeof(*key)); + key->addr.u.ipv4 = cpu_to_be32(fen_info->dst); + key->prefix_len = fen_info->dst_len; + key->kern_tb_id = fen_info->tb_id; +} + +static struct prestera_kern_fib_cache * +prestera_kern_fib_cache_find(struct prestera_switch *sw, + struct prestera_kern_fib_cache_key *key) +{ + struct prestera_kern_fib_cache *fib_cache; + + fib_cache = + rhashtable_lookup_fast(&sw->router->kern_fib_cache_ht, key, + __prestera_kern_fib_cache_ht_params); + return fib_cache; +} + +static void +prestera_kern_fib_cache_destroy(struct prestera_switch *sw, + struct prestera_kern_fib_cache *fib_cache) +{ + fib_info_put(fib_cache->fi); + rhashtable_remove_fast(&sw->router->kern_fib_cache_ht, + &fib_cache->ht_node, + __prestera_kern_fib_cache_ht_params); + kfree(fib_cache); +} + +/* Operations on fi (offload, etc) must be wrapped in utils. + * This function just create storage. + */ +static struct prestera_kern_fib_cache * +prestera_kern_fib_cache_create(struct prestera_switch *sw, + struct prestera_kern_fib_cache_key *key, + struct fib_info *fi, u8 tos, u8 type) +{ + struct prestera_kern_fib_cache *fib_cache; + int err; + + fib_cache = kzalloc(sizeof(*fib_cache), GFP_KERNEL); + if (!fib_cache) + goto err_kzalloc; + + memcpy(&fib_cache->key, key, sizeof(*key)); + fib_info_hold(fi); + fib_cache->fi = fi; + fib_cache->kern_tos = tos; + fib_cache->kern_type = type; + + err = rhashtable_insert_fast(&sw->router->kern_fib_cache_ht, + &fib_cache->ht_node, + __prestera_kern_fib_cache_ht_params); + if (err) + goto err_ht_insert; + + return fib_cache; + +err_ht_insert: + fib_info_put(fi); + kfree(fib_cache); +err_kzalloc: + return NULL; +} + +static void +__prestera_k_arb_fib_lpm_offload_set(struct prestera_switch *sw, + struct prestera_kern_fib_cache *fc, + bool fail, bool offload, bool trap) +{ + struct fib_rt_info fri; + + if (fc->key.addr.v != PRESTERA_IPV4) + return; + + fri.fi = fc->fi; + fri.tb_id = fc->key.kern_tb_id; + fri.dst = fc->key.addr.u.ipv4; + fri.dst_len = fc->key.prefix_len; + fri.tos = fc->kern_tos; + fri.type = fc->kern_type; + /* flags begin */ + fri.offload = offload; + fri.trap = trap; + fri.offload_failed = fail; + /* flags end */ + fib_alias_hw_flags_set(&init_net, &fri); +} + +static int +__prestera_pr_k_arb_fc_lpm_info_calc(struct prestera_switch *sw, + struct prestera_kern_fib_cache *fc) +{ + memset(&fc->lpm_info, 0, sizeof(fc->lpm_info)); + + switch (fc->fi->fib_type) { + case RTN_UNICAST: + fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_TRAP; + break; + /* Unsupported. Leave it for kernel: */ + case RTN_BROADCAST: + case RTN_MULTICAST: + /* Routes we must trap by design: */ + case RTN_LOCAL: + case RTN_UNREACHABLE: + case RTN_PROHIBIT: + fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_TRAP; + break; + case RTN_BLACKHOLE: + fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_DROP; + break; + default: + dev_err(sw->dev->dev, "Unsupported fib_type"); + return -EOPNOTSUPP; + } + + fc->lpm_info.fib_key.addr = fc->key.addr; + fc->lpm_info.fib_key.prefix_len = fc->key.prefix_len; + fc->lpm_info.fib_key.tb_id = prestera_fix_tb_id(fc->key.kern_tb_id); + + return 0; +} + +static int __prestera_k_arb_f_lpm_set(struct prestera_switch *sw, + struct prestera_kern_fib_cache *fc, + bool enabled) +{ + struct prestera_fib_node *fib_node; + + fib_node = prestera_fib_node_find(sw, &fc->lpm_info.fib_key); + if (fib_node) + prestera_fib_node_destroy(sw, fib_node); + + if (!enabled) + return 0; + + fib_node = prestera_fib_node_create(sw, &fc->lpm_info.fib_key, + fc->lpm_info.fib_type); + + if (!fib_node) { + dev_err(sw->dev->dev, "fib_node=NULL %pI4n/%d kern_tb_id = %d", + &fc->key.addr.u.ipv4, fc->key.prefix_len, + fc->key.kern_tb_id); + return -ENOENT; + } + + return 0; +} + +static int __prestera_k_arb_fc_apply(struct prestera_switch *sw, + struct prestera_kern_fib_cache *fc) +{ + int err; + + err = __prestera_pr_k_arb_fc_lpm_info_calc(sw, fc); + if (err) + return err; + + err = __prestera_k_arb_f_lpm_set(sw, fc, fc->reachable); + if (err) { + __prestera_k_arb_fib_lpm_offload_set(sw, fc, + true, false, false); + return err; + } + + switch (fc->lpm_info.fib_type) { + case PRESTERA_FIB_TYPE_TRAP: + __prestera_k_arb_fib_lpm_offload_set(sw, fc, false, + false, fc->reachable); + break; + case PRESTERA_FIB_TYPE_DROP: + __prestera_k_arb_fib_lpm_offload_set(sw, fc, false, true, + fc->reachable); + break; + case PRESTERA_FIB_TYPE_INVALID: + break; + } + + return 0; +} + +static struct prestera_kern_fib_cache * +__prestera_k_arb_util_fib_overlaps(struct prestera_switch *sw, + struct prestera_kern_fib_cache *fc) +{ + struct prestera_kern_fib_cache_key fc_key; + struct prestera_kern_fib_cache *rfc; + + /* TODO: parse kernel rules */ + rfc = NULL; + if (fc->key.kern_tb_id == RT_TABLE_LOCAL) { + memcpy(&fc_key, &fc->key, sizeof(fc_key)); + fc_key.kern_tb_id = RT_TABLE_MAIN; + rfc = prestera_kern_fib_cache_find(sw, &fc_key); + } + + return rfc; +} + +static struct prestera_kern_fib_cache * +__prestera_k_arb_util_fib_overlapped(struct prestera_switch *sw, + struct prestera_kern_fib_cache *fc) +{ + struct prestera_kern_fib_cache_key fc_key; + struct prestera_kern_fib_cache *rfc; + + /* TODO: parse kernel rules */ + rfc = NULL; + if (fc->key.kern_tb_id == RT_TABLE_MAIN) { + memcpy(&fc_key, &fc->key, sizeof(fc_key)); + fc_key.kern_tb_id = RT_TABLE_LOCAL; + rfc = prestera_kern_fib_cache_find(sw, &fc_key); + } + + return rfc; +} + +static int +prestera_k_arb_fib_evt(struct prestera_switch *sw, + bool replace, /* replace or del */ + struct fib_entry_notifier_info *fen_info) +{ + struct prestera_kern_fib_cache *tfib_cache, *bfib_cache; /* top/btm */ + struct prestera_kern_fib_cache_key fc_key; + struct prestera_kern_fib_cache *fib_cache; + int err; + + prestera_util_fen_info2fib_cache_key(fen_info, &fc_key); + fib_cache = prestera_kern_fib_cache_find(sw, &fc_key); + if (fib_cache) { + fib_cache->reachable = false; + err = __prestera_k_arb_fc_apply(sw, fib_cache); + if (err) + dev_err(sw->dev->dev, + "Applying destroyed fib_cache failed"); + + bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache); + tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache); + if (!tfib_cache && bfib_cache) { + bfib_cache->reachable = true; + err = __prestera_k_arb_fc_apply(sw, bfib_cache); + if (err) + dev_err(sw->dev->dev, + "Applying fib_cache btm failed"); + } + + prestera_kern_fib_cache_destroy(sw, fib_cache); + } + + if (replace) { + fib_cache = prestera_kern_fib_cache_create(sw, &fc_key, + fen_info->fi, + fen_info->tos, + fen_info->type); + if (!fib_cache) { + dev_err(sw->dev->dev, "fib_cache == NULL"); + return -ENOENT; + } + + bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache); + tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache); + if (!tfib_cache) + fib_cache->reachable = true; + + if (bfib_cache) { + bfib_cache->reachable = false; + err = __prestera_k_arb_fc_apply(sw, bfib_cache); + if (err) + dev_err(sw->dev->dev, + "Applying fib_cache btm failed"); + } + + err = __prestera_k_arb_fc_apply(sw, fib_cache); + if (err) + dev_err(sw->dev->dev, "Applying fib_cache failed"); + } + + return 0; +} + static int __prestera_inetaddr_port_event(struct net_device *port_dev, unsigned long event, struct netlink_ext_ack *extack) @@ -137,6 +450,89 @@ out: return notifier_from_errno(err); } +struct prestera_fib_event_work { + struct work_struct work; + struct prestera_switch *sw; + struct fib_entry_notifier_info fen_info; + unsigned long event; +}; + +static void __prestera_router_fib_event_work(struct work_struct *work) +{ + struct prestera_fib_event_work *fib_work = + container_of(work, struct prestera_fib_event_work, work); + struct prestera_switch *sw = fib_work->sw; + int err; + + rtnl_lock(); + + switch (fib_work->event) { + case FIB_EVENT_ENTRY_REPLACE: + err = prestera_k_arb_fib_evt(sw, true, &fib_work->fen_info); + if (err) + goto err_out; + + break; + case FIB_EVENT_ENTRY_DEL: + err = prestera_k_arb_fib_evt(sw, false, &fib_work->fen_info); + if (err) + goto err_out; + + break; + } + + goto out; + +err_out: + dev_err(sw->dev->dev, "Error when processing %pI4h/%d", + &fib_work->fen_info.dst, + fib_work->fen_info.dst_len); +out: + fib_info_put(fib_work->fen_info.fi); + rtnl_unlock(); + kfree(fib_work); +} + +/* Called with rcu_read_lock() */ +static int __prestera_router_fib_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct prestera_fib_event_work *fib_work; + struct fib_entry_notifier_info *fen_info; + struct fib_notifier_info *info = ptr; + struct prestera_router *router; + + if (info->family != AF_INET) + return NOTIFY_DONE; + + router = container_of(nb, struct prestera_router, fib_nb); + + switch (event) { + case FIB_EVENT_ENTRY_REPLACE: + case FIB_EVENT_ENTRY_DEL: + fen_info = container_of(info, struct fib_entry_notifier_info, + info); + if (!fen_info->fi) + return NOTIFY_DONE; + + fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); + if (WARN_ON(!fib_work)) + return NOTIFY_BAD; + + fib_info_hold(fen_info->fi); + fib_work->fen_info = *fen_info; + fib_work->event = event; + fib_work->sw = router->sw; + INIT_WORK(&fib_work->work, __prestera_router_fib_event_work); + prestera_queue_work(&fib_work->work); + break; + default: + return NOTIFY_DONE; + } + + return NOTIFY_DONE; +} + int prestera_router_init(struct prestera_switch *sw) { struct prestera_router *router; @@ -153,6 +549,11 @@ int prestera_router_init(struct prestera_switch *sw) if (err) goto err_router_lib_init; + err = rhashtable_init(&router->kern_fib_cache_ht, + &__prestera_kern_fib_cache_ht_params); + if (err) + goto err_kern_fib_cache_ht_init; + router->inetaddr_valid_nb.notifier_call = __prestera_inetaddr_valid_cb; err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb); if (err) @@ -163,11 +564,21 @@ int prestera_router_init(struct prestera_switch *sw) if (err) goto err_register_inetaddr_notifier; + router->fib_nb.notifier_call = __prestera_router_fib_event; + err = register_fib_notifier(&init_net, &router->fib_nb, + /* TODO: flush fib entries */ NULL, NULL); + if (err) + goto err_register_fib_notifier; + return 0; +err_register_fib_notifier: + unregister_inetaddr_notifier(&router->inetaddr_nb); err_register_inetaddr_notifier: unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb); err_register_inetaddr_validator_notifier: + rhashtable_destroy(&router->kern_fib_cache_ht); +err_kern_fib_cache_ht_init: prestera_router_hw_fini(sw); err_router_lib_init: kfree(sw->router); @@ -178,6 +589,7 @@ void prestera_router_fini(struct prestera_switch *sw) { unregister_inetaddr_notifier(&sw->router->inetaddr_nb); unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb); + rhashtable_destroy(&sw->router->kern_fib_cache_ht); prestera_router_hw_fini(sw); kfree(sw->router); sw->router = NULL; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c index e5592b69ad37..5b0cf3be9a9e 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c @@ -9,23 +9,41 @@ #include "prestera_acl.h" /* +--+ - * +------->|vr| - * | +--+ - * | - * +-+-------+ - * |rif_entry| - * +---------+ - * Rif is + * +------->|vr|<-+ + * | +--+ | + * | | + * +-+-------+ +--+---+-+ + * |rif_entry| |fib_node| + * +---------+ +--------+ + * Rif is Fib - is exit point * used as * entry point * for vr in hw */ +#define PRESTERA_NHGR_UNUSED (0) +#define PRESTERA_NHGR_DROP (0xFFFFFFFF) + +static const struct rhashtable_params __prestera_fib_ht_params = { + .key_offset = offsetof(struct prestera_fib_node, key), + .head_offset = offsetof(struct prestera_fib_node, ht_node), + .key_len = sizeof(struct prestera_fib_key), + .automatic_shrinking = true, +}; + int prestera_router_hw_init(struct prestera_switch *sw) { + int err; + + err = rhashtable_init(&sw->router->fib_ht, + &__prestera_fib_ht_params); + if (err) + goto err_fib_ht_init; + INIT_LIST_HEAD(&sw->router->vr_list); INIT_LIST_HEAD(&sw->router->rif_entry_list); +err_fib_ht_init: return 0; } @@ -33,6 +51,7 @@ void prestera_router_hw_fini(struct prestera_switch *sw) { WARN_ON(!list_empty(&sw->router->vr_list)); WARN_ON(!list_empty(&sw->router->rif_entry_list)); + rhashtable_destroy(&sw->router->fib_ht); } static struct prestera_vr *__prestera_vr_find(struct prestera_switch *sw, @@ -212,3 +231,102 @@ err_key_copy: err_kzalloc: return NULL; } + +struct prestera_fib_node * +prestera_fib_node_find(struct prestera_switch *sw, struct prestera_fib_key *key) +{ + struct prestera_fib_node *fib_node; + + fib_node = rhashtable_lookup_fast(&sw->router->fib_ht, key, + __prestera_fib_ht_params); + return fib_node; +} + +static void __prestera_fib_node_destruct(struct prestera_switch *sw, + struct prestera_fib_node *fib_node) +{ + struct prestera_vr *vr; + + vr = fib_node->info.vr; + prestera_hw_lpm_del(sw, vr->hw_vr_id, fib_node->key.addr.u.ipv4, + fib_node->key.prefix_len); + switch (fib_node->info.type) { + case PRESTERA_FIB_TYPE_TRAP: + break; + case PRESTERA_FIB_TYPE_DROP: + break; + default: + pr_err("Unknown fib_node->info.type = %d", + fib_node->info.type); + } + + prestera_vr_put(sw, vr); +} + +void prestera_fib_node_destroy(struct prestera_switch *sw, + struct prestera_fib_node *fib_node) +{ + __prestera_fib_node_destruct(sw, fib_node); + rhashtable_remove_fast(&sw->router->fib_ht, &fib_node->ht_node, + __prestera_fib_ht_params); + kfree(fib_node); +} + +struct prestera_fib_node * +prestera_fib_node_create(struct prestera_switch *sw, + struct prestera_fib_key *key, + enum prestera_fib_type fib_type) +{ + struct prestera_fib_node *fib_node; + u32 grp_id; + struct prestera_vr *vr; + int err; + + fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL); + if (!fib_node) + goto err_kzalloc; + + memcpy(&fib_node->key, key, sizeof(*key)); + fib_node->info.type = fib_type; + + vr = prestera_vr_get(sw, key->tb_id, NULL); + if (IS_ERR(vr)) + goto err_vr_get; + + fib_node->info.vr = vr; + + switch (fib_type) { + case PRESTERA_FIB_TYPE_TRAP: + grp_id = PRESTERA_NHGR_UNUSED; + break; + case PRESTERA_FIB_TYPE_DROP: + grp_id = PRESTERA_NHGR_DROP; + break; + default: + pr_err("Unsupported fib_type %d", fib_type); + goto err_nh_grp_get; + } + + err = prestera_hw_lpm_add(sw, vr->hw_vr_id, key->addr.u.ipv4, + key->prefix_len, grp_id); + if (err) + goto err_lpm_add; + + err = rhashtable_insert_fast(&sw->router->fib_ht, &fib_node->ht_node, + __prestera_fib_ht_params); + if (err) + goto err_ht_insert; + + return fib_node; + +err_ht_insert: + prestera_hw_lpm_del(sw, vr->hw_vr_id, key->addr.u.ipv4, + key->prefix_len); +err_lpm_add: +err_nh_grp_get: + prestera_vr_put(sw, vr); +err_vr_get: + kfree(fib_node); +err_kzalloc: + return NULL; +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h index b6b028551868..67dbb49c8bd4 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h @@ -22,6 +22,42 @@ struct prestera_rif_entry { struct list_head router_node; /* ht */ }; +struct prestera_ip_addr { + union { + __be32 ipv4; + struct in6_addr ipv6; + } u; + enum { + PRESTERA_IPV4 = 0, + PRESTERA_IPV6 + } v; +}; + +struct prestera_fib_key { + struct prestera_ip_addr addr; + u32 prefix_len; + u32 tb_id; +}; + +struct prestera_fib_info { + struct prestera_vr *vr; + struct list_head vr_node; + enum prestera_fib_type { + PRESTERA_FIB_TYPE_INVALID = 0, + /* It can be connected route + * and will be overlapped with neighbours + */ + PRESTERA_FIB_TYPE_TRAP, + PRESTERA_FIB_TYPE_DROP + } type; +}; + +struct prestera_fib_node { + struct rhash_head ht_node; /* node of prestera_vr */ + struct prestera_fib_key key; + struct prestera_fib_info info; /* action related info */ +}; + struct prestera_rif_entry * prestera_rif_entry_find(const struct prestera_switch *sw, const struct prestera_rif_entry_key *k); @@ -31,6 +67,14 @@ struct prestera_rif_entry * prestera_rif_entry_create(struct prestera_switch *sw, struct prestera_rif_entry_key *k, u32 tb_id, const unsigned char *addr); +struct prestera_fib_node *prestera_fib_node_find(struct prestera_switch *sw, + struct prestera_fib_key *key); +void prestera_fib_node_destroy(struct prestera_switch *sw, + struct prestera_fib_node *fib_node); +struct prestera_fib_node * +prestera_fib_node_create(struct prestera_switch *sw, + struct prestera_fib_key *key, + enum prestera_fib_type fib_type); int prestera_router_hw_init(struct prestera_switch *sw); void prestera_router_hw_fini(struct prestera_switch *sw); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 817f4154b86d..f777151d226f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -42,7 +42,6 @@ #include <linux/tcp.h> #include <linux/ip.h> #include <linux/ipv6.h> -#include <linux/moduleparam.h> #include <linux/indirect_call_wrapper.h> #include "mlx4_en.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index fcfd38fa9e6c..4bc666714a35 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -28,7 +28,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \ en_selftest.o en/port.o en/monitor_stats.o en/health.o \ en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \ en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \ - en/qos.o en/trap.o en/fs_tt_redirect.o + en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o # # Netdev extra @@ -55,7 +55,11 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/a en/tc/act/ct.o en/tc/act/sample.o en/tc/act/ptype.o \ en/tc/act/redirect_ingress.o -mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o +ifneq ($(CONFIG_MLX5_TC_CT),) + mlx5_core-y += en/tc_ct.o en/tc/ct_fs_dmfs.o + mlx5_core-$(CONFIG_MLX5_SW_STEERING) += en/tc/ct_fs_smfs.o +endif + mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o # @@ -103,9 +107,10 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o steering/dr_icm_pool.o steering/dr_buddy.o \ steering/dr_ste.o steering/dr_send.o \ steering/dr_ste_v0.o steering/dr_ste_v1.o \ + steering/dr_ste_v2.o \ steering/dr_cmd.o steering/dr_fw.o \ steering/dr_action.o steering/fs_dr.o \ - steering/dr_dbg.o + steering/dr_dbg.o lib/smfs.o # # SF device # diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index 291e427e9e4f..d5408f6ce5a7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -183,11 +183,11 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev, u32 db_per_page = PAGE_SIZE / cache_line_size(); struct mlx5_db_pgdir *pgdir; - pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL); + pgdir = kzalloc_node(sizeof(*pgdir), GFP_KERNEL, node); if (!pgdir) return NULL; - pgdir->bitmap = bitmap_zalloc(db_per_page, GFP_KERNEL); + pgdir->bitmap = bitmap_zalloc_node(db_per_page, GFP_KERNEL, node); if (!pgdir->bitmap) { kfree(pgdir); return NULL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 17fe05809653..c2462d37f1b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -31,7 +31,6 @@ */ #include <linux/highmem.h> -#include <linux/module.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/dma-mapping.h> @@ -131,11 +130,8 @@ static int cmd_alloc_index(struct mlx5_cmd *cmd) static void cmd_free_index(struct mlx5_cmd *cmd, int idx) { - unsigned long flags; - - spin_lock_irqsave(&cmd->alloc_lock, flags); + lockdep_assert_held(&cmd->alloc_lock); set_bit(idx, &cmd->bitmask); - spin_unlock_irqrestore(&cmd->alloc_lock, flags); } static void cmd_ent_get(struct mlx5_cmd_work_ent *ent) @@ -145,17 +141,21 @@ static void cmd_ent_get(struct mlx5_cmd_work_ent *ent) static void cmd_ent_put(struct mlx5_cmd_work_ent *ent) { + struct mlx5_cmd *cmd = ent->cmd; + unsigned long flags; + + spin_lock_irqsave(&cmd->alloc_lock, flags); if (!refcount_dec_and_test(&ent->refcnt)) - return; + goto out; if (ent->idx >= 0) { - struct mlx5_cmd *cmd = ent->cmd; - cmd_free_index(cmd, ent->idx); up(ent->page_queue ? &cmd->pages_sem : &cmd->sem); } cmd_free_ent(ent); +out: + spin_unlock_irqrestore(&cmd->alloc_lock, flags); } static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) @@ -190,10 +190,10 @@ static int verify_block_sig(struct mlx5_cmd_prot_block *block) int xor_len = sizeof(*block) - sizeof(block->data) - 1; if (xor8_buf(block, rsvd0_off, xor_len) != 0xff) - return -EINVAL; + return -EHWPOISON; if (xor8_buf(block, 0, sizeof(*block)) != 0xff) - return -EINVAL; + return -EHWPOISON; return 0; } @@ -259,12 +259,12 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent) sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay)); if (sig != 0xff) - return -EINVAL; + return -EHWPOISON; for (i = 0; i < n && next; i++) { err = verify_block_sig(next->buf); if (err) - return err; + return -EHWPOISON; next = next->next; } @@ -479,7 +479,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_ALLOC_SF: *status = MLX5_DRIVER_STATUS_ABORTED; *synd = MLX5_DRIVER_SYND; - return -EIO; + return -ENOLINK; default: mlx5_core_err(dev, "Unknown FW command (%d)\n", op); return -EINVAL; @@ -760,44 +760,72 @@ struct mlx5_ifc_mbox_in_bits { u8 reserved_at_40[0x40]; }; -void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome) +void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out) { - *status = MLX5_GET(mbox_out, out, status); - *syndrome = MLX5_GET(mbox_out, out, syndrome); + u32 syndrome = MLX5_GET(mbox_out, out, syndrome); + u8 status = MLX5_GET(mbox_out, out, status); + + mlx5_core_err_rl(dev, + "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n", + mlx5_command_str(opcode), opcode, op_mod, + cmd_status_str(status), status, syndrome, cmd_status_to_err(status)); } +EXPORT_SYMBOL(mlx5_cmd_out_err); -static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out) +static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out) { + u16 opcode, op_mod; u32 syndrome; u8 status; - u16 opcode; - u16 op_mod; u16 uid; + int err; - mlx5_cmd_mbox_status(out, &status, &syndrome); - if (!status) - return 0; + syndrome = MLX5_GET(mbox_out, out, syndrome); + status = MLX5_GET(mbox_out, out, status); opcode = MLX5_GET(mbox_in, in, opcode); op_mod = MLX5_GET(mbox_in, in, op_mod); uid = MLX5_GET(mbox_in, in, uid); + err = cmd_status_to_err(status); + if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY) - mlx5_core_err_rl(dev, - "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n", - mlx5_command_str(opcode), opcode, op_mod, - cmd_status_str(status), status, syndrome); + mlx5_cmd_out_err(dev, opcode, op_mod, out); else mlx5_core_dbg(dev, - "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n", - mlx5_command_str(opcode), - opcode, op_mod, - cmd_status_str(status), - status, - syndrome); + "%s(0x%x) op_mod(0x%x) uid(%d) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n", + mlx5_command_str(opcode), opcode, op_mod, uid, + cmd_status_str(status), status, syndrome, err); +} - return cmd_status_to_err(status); +int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out) +{ + /* aborted due to PCI error or via reset flow mlx5_cmd_trigger_completions() */ + if (err == -ENXIO) { + u16 opcode = MLX5_GET(mbox_in, in, opcode); + u32 syndrome; + u8 status; + + /* PCI Error, emulate command return status, for smooth reset */ + err = mlx5_internal_err_ret_value(dev, opcode, &syndrome, &status); + MLX5_SET(mbox_out, out, status, status); + MLX5_SET(mbox_out, out, syndrome, syndrome); + if (!err) + return 0; + } + + /* driver or FW delivery error */ + if (err != -EREMOTEIO && err) + return err; + + /* check outbox status */ + err = cmd_status_to_err(MLX5_GET(mbox_out, out, status)); + if (err) + cmd_status_print(dev, in, out); + + return err; } +EXPORT_SYMBOL(mlx5_cmd_check); static void dump_command(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent, int input) @@ -980,13 +1008,7 @@ static void cmd_work_handler(struct work_struct *work) /* Skip sending command to fw if internal error */ if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) { - u8 status = 0; - u32 drv_synd; - - ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status); - MLX5_SET(mbox_out, ent->out, status, status); - MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); - + ent->ret = -ENXIO; mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); return; } @@ -1005,6 +1027,31 @@ static void cmd_work_handler(struct work_struct *work) } } +static int deliv_status_to_err(u8 status) +{ + switch (status) { + case MLX5_CMD_DELIVERY_STAT_OK: + case MLX5_DRIVER_STATUS_ABORTED: + return 0; + case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: + case MLX5_CMD_DELIVERY_STAT_TOK_ERR: + return -EBADR; + case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: + case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: + case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: + return -EFAULT; /* Bad address */ + case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: + case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: + case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: + case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: + return -ENOMSG; + case MLX5_CMD_DELIVERY_STAT_FW_ERR: + return -EIO; + default: + return -EINVAL; + } +} + static const char *deliv_status_to_str(u8 status) { switch (status) { @@ -1101,16 +1148,27 @@ out_err: /* Notes: * 1. Callback functions may not sleep * 2. page queue commands do not support asynchrous completion + * + * return value in case (!callback): + * ret < 0 : Command execution couldn't be submitted by driver + * ret > 0 : Command execution couldn't be performed by firmware + * ret == 0: Command was executed by FW, Caller must check FW outbox status. + * + * return value in case (callback): + * ret < 0 : Command execution couldn't be submitted by driver + * ret == 0: Command will be submitted to FW for execution + * and the callback will be called for further status updates */ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, struct mlx5_cmd_msg *out, void *uout, int uout_size, mlx5_cmd_cbk_t callback, - void *context, int page_queue, u8 *status, + void *context, int page_queue, u8 token, bool force_polling) { struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd_work_ent *ent; struct mlx5_cmd_stats *stats; + u8 status = 0; int err = 0; s64 ds; u16 op; @@ -1141,12 +1199,12 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, cmd_work_handler(&ent->work); } else if (!queue_work(cmd->wq, &ent->work)) { mlx5_core_warn(dev, "failed to queue work\n"); - err = -ENOMEM; + err = -EALREADY; goto out_free; } if (callback) - goto out; /* mlx5_cmd_comp_handler() will put(ent) */ + return 0; /* mlx5_cmd_comp_handler() will put(ent) */ err = wait_func(dev, ent); if (err == -ETIMEDOUT || err == -ECANCELED) @@ -1164,12 +1222,11 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, "fw exec time for %s is %lld nsec\n", mlx5_command_str(op), ds); - *status = ent->status; out_free: + status = ent->status; cmd_ent_put(ent); -out: - return err; + return err ? : status; } static ssize_t dbg_write(struct file *filp, const char __user *buf, @@ -1486,7 +1543,7 @@ static void create_debugfs_files(struct mlx5_core_dev *dev) { struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; - dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root); + dbg->dbg_root = debugfs_create_dir("cmd", mlx5_debugfs_get_dev_root(dev)); debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops); debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops); @@ -1612,15 +1669,15 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force ent->ts2 = ktime_get_ns(); memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); dump_command(dev, ent, 0); - if (!ent->ret) { + + if (vec & MLX5_TRIGGERED_CMD_COMP) + ent->ret = -ENXIO; + + if (!ent->ret) { /* Command completed by FW */ if (!cmd->checksum_disabled) ent->ret = verify_signature(ent); - else - ent->ret = 0; - if (vec & MLX5_TRIGGERED_CMD_COMP) - ent->status = MLX5_DRIVER_STATUS_ABORTED; - else - ent->status = ent->lay->status_own >> 1; + + ent->status = ent->lay->status_own >> 1; mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n", ent->ret, deliv_status_to_str(ent->status), ent->status); @@ -1638,21 +1695,18 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force callback = ent->callback; context = ent->context; - err = ent->ret; - if (!err) { + err = ent->ret ? : ent->status; + if (err > 0) /* Failed in FW, command didn't execute */ + err = deliv_status_to_err(err); + + if (!err) err = mlx5_copy_from_msg(ent->uout, ent->out, ent->uout_size); - err = err ? err : mlx5_cmd_check(dev, - ent->in->first.data, - ent->uout); - } - mlx5_free_cmd_msg(dev, ent->out); free_msg(dev, ent->in); - err = err ? err : ent->status; /* final consumer is done, release ent */ cmd_ent_put(ent); callback(err, context); @@ -1719,31 +1773,6 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev) up(&cmd->sem); } -static int status_to_err(u8 status) -{ - switch (status) { - case MLX5_CMD_DELIVERY_STAT_OK: - case MLX5_DRIVER_STATUS_ABORTED: - return 0; - case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: - case MLX5_CMD_DELIVERY_STAT_TOK_ERR: - return -EBADR; - case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: - case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: - case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: - return -EFAULT; /* Bad address */ - case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: - case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: - case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: - case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: - return -ENOMSG; - case MLX5_CMD_DELIVERY_STAT_FW_ERR: - return -EIO; - default: - return -EINVAL; - } -} - static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, gfp_t gfp) { @@ -1787,27 +1816,23 @@ static int is_manage_pages(void *in) return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES; } +/* Notes: + * 1. Callback functions may not sleep + * 2. Page queue commands do not support asynchrous completion + */ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size, mlx5_cmd_cbk_t callback, void *context, bool force_polling) { - struct mlx5_cmd_msg *inb; - struct mlx5_cmd_msg *outb; + u16 opcode = MLX5_GET(mbox_in, in, opcode); + struct mlx5_cmd_msg *inb, *outb; int pages_queue; gfp_t gfp; - int err; - u8 status = 0; - u32 drv_synd; - u16 opcode; u8 token; + int err; - opcode = MLX5_GET(mbox_in, in, opcode); - if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode)) { - err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status); - MLX5_SET(mbox_out, out, status, status); - MLX5_SET(mbox_out, out, syndrome, drv_synd); - return err; - } + if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode)) + return -ENXIO; pages_queue = is_manage_pages(in); gfp = callback ? GFP_ATOMIC : GFP_KERNEL; @@ -1833,39 +1858,133 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, } err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, - pages_queue, &status, token, force_polling); + pages_queue, token, force_polling); + if (callback) + return err; + + if (err > 0) /* Failed in FW, command didn't execute */ + err = deliv_status_to_err(err); + if (err) goto out_out; - mlx5_core_dbg(dev, "err %d, status %d\n", err, status); - if (status) { - err = status_to_err(status); - goto out_out; + /* command completed by FW */ + err = mlx5_copy_from_msg(out, outb, out_size); +out_out: + mlx5_free_cmd_msg(dev, outb); +out_in: + free_msg(dev, inb); + return err; +} + +static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, int err) +{ + struct mlx5_cmd_stats *stats; + + if (!err) + return; + + stats = &dev->cmd.stats[opcode]; + spin_lock_irq(&stats->lock); + stats->failed++; + if (err < 0) + stats->last_failed_errno = -err; + if (err == -EREMOTEIO) { + stats->failed_mbox_status++; + stats->last_failed_mbox_status = status; } + spin_unlock_irq(&stats->lock); +} - if (!callback) - err = mlx5_copy_from_msg(out, outb, out_size); +/* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */ +static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *out) +{ + u8 status = MLX5_GET(mbox_out, out, status); -out_out: - if (!callback) - mlx5_free_cmd_msg(dev, outb); + if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */ + err = -EIO; -out_in: - if (!callback) - free_msg(dev, inb); + if (!err && status != MLX5_CMD_STAT_OK) + err = -EREMOTEIO; + + cmd_status_log(dev, opcode, status, err); + return err; +} + +/** + * mlx5_cmd_do - Executes a fw command, wait for completion. + * Unlike mlx5_cmd_exec, this function will not translate or intercept + * outbox.status and will return -EREMOTEIO when + * outbox.status != MLX5_CMD_STAT_OK + * + * @dev: mlx5 core device + * @in: inbox mlx5_ifc command buffer + * @in_size: inbox buffer size + * @out: outbox mlx5_ifc buffer + * @out_size: outbox size + * + * @return: + * -EREMOTEIO : Command executed by FW, outbox.status != MLX5_CMD_STAT_OK. + * Caller must check FW outbox status. + * 0 : Command execution successful, outbox.status == MLX5_CMD_STAT_OK. + * < 0 : Command execution couldn't be performed by firmware or driver + */ +int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size) +{ + int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false); + u16 opcode = MLX5_GET(mbox_in, in, opcode); + + err = cmd_status_err(dev, err, opcode, out); return err; } +EXPORT_SYMBOL(mlx5_cmd_do); +/** + * mlx5_cmd_exec - Executes a fw command, wait for completion + * + * @dev: mlx5 core device + * @in: inbox mlx5_ifc command buffer + * @in_size: inbox buffer size + * @out: outbox mlx5_ifc buffer + * @out_size: outbox size + * + * @return: 0 if no error, FW command execution was successful + * and outbox status is ok. + */ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size) { - int err; + int err = mlx5_cmd_do(dev, in, in_size, out, out_size); - err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false); - return err ? : mlx5_cmd_check(dev, in, out); + return mlx5_cmd_check(dev, err, in, out); } EXPORT_SYMBOL(mlx5_cmd_exec); +/** + * mlx5_cmd_exec_polling - Executes a fw command, poll for completion + * Needed for driver force teardown, when command completion EQ + * will not be available to complete the command + * + * @dev: mlx5 core device + * @in: inbox mlx5_ifc command buffer + * @in_size: inbox buffer size + * @out: outbox mlx5_ifc buffer + * @out_size: outbox size + * + * @return: 0 if no error, FW command execution was successful + * and outbox status is ok. + */ +int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, + void *out, int out_size) +{ + int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true); + u16 opcode = MLX5_GET(mbox_in, in, opcode); + + err = cmd_status_err(dev, err, opcode, out); + return mlx5_cmd_check(dev, err, in, out); +} +EXPORT_SYMBOL(mlx5_cmd_exec_polling); + void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, struct mlx5_async_ctx *ctx) { @@ -1894,8 +2013,10 @@ EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx); static void mlx5_cmd_exec_cb_handler(int status, void *_work) { struct mlx5_async_work *work = _work; - struct mlx5_async_ctx *ctx = work->ctx; + struct mlx5_async_ctx *ctx; + ctx = work->ctx; + status = cmd_status_err(ctx->dev, status, work->opcode, work->out); work->user_callback(status, work); if (atomic_dec_and_test(&ctx->num_inflight)) wake_up(&ctx->wait); @@ -1909,6 +2030,8 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, work->ctx = ctx; work->user_callback = callback; + work->opcode = MLX5_GET(mbox_in, in, opcode); + work->out = out; if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight))) return -EIO; ret = cmd_exec(ctx->dev, in, in_size, out, out_size, @@ -1920,17 +2043,6 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, } EXPORT_SYMBOL(mlx5_cmd_exec_cb); -int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, - void *out, int out_size) -{ - int err; - - err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true); - - return err ? : mlx5_cmd_check(dev, in, out); -} -EXPORT_SYMBOL(mlx5_cmd_exec_polling); - static void destroy_msg_cache(struct mlx5_core_dev *dev) { struct cmd_msg_cache *ch; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index 5371ad0a12eb..4caa1b6f40ba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -31,7 +31,6 @@ */ #include <linux/kernel.h> -#include <linux/module.h> #include <linux/hardirq.h> #include <linux/mlx5/driver.h> #include <rdma/ib_verbs.h> @@ -86,8 +85,9 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq, spin_unlock_irqrestore(&tasklet_ctx->lock, flags); } -int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, - u32 *in, int inlen, u32 *out, int outlen) +/* Callers must verify outbox status in case of err */ +int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + u32 *in, int inlen, u32 *out, int outlen) { int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn_or_apu_element); @@ -101,7 +101,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, memset(out, 0, outlen); MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ); - err = mlx5_cmd_exec(dev, in, inlen, out, outlen); + err = mlx5_cmd_do(dev, in, inlen, out, outlen); if (err) return err; @@ -148,6 +148,16 @@ err_cmd: mlx5_cmd_exec_in(dev, destroy_cq, din); return err; } +EXPORT_SYMBOL(mlx5_create_cq); + +/* oubox is checked and err val is normalized */ +int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + u32 *in, int inlen, u32 *out, int outlen) +{ + int err = mlx5_create_cq(dev, cq, in, inlen, out, outlen); + + return mlx5_cmd_check(dev, err, in, out); +} EXPORT_SYMBOL(mlx5_core_create_cq); int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 10d195042ab5..3d3e55a5cb11 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -30,7 +30,6 @@ * SOFTWARE. */ -#include <linux/module.h> #include <linux/debugfs.h> #include <linux/mlx5/qp.h> #include <linux/mlx5/cq.h> @@ -99,26 +98,32 @@ void mlx5_unregister_debugfs(void) debugfs_remove(mlx5_debugfs_root); } +struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev) +{ + return dev->priv.dbg.dbg_root; +} +EXPORT_SYMBOL(mlx5_debugfs_get_dev_root); + void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev) { - dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root); + dev->priv.dbg.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg.dbg_root); } EXPORT_SYMBOL(mlx5_qp_debugfs_init); void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev) { - debugfs_remove_recursive(dev->priv.qp_debugfs); + debugfs_remove_recursive(dev->priv.dbg.qp_debugfs); } EXPORT_SYMBOL(mlx5_qp_debugfs_cleanup); void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev) { - dev->priv.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg_root); + dev->priv.dbg.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg.dbg_root); } void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev) { - debugfs_remove_recursive(dev->priv.eq_debugfs); + debugfs_remove_recursive(dev->priv.dbg.eq_debugfs); } static ssize_t average_read(struct file *filp, char __user *buf, size_t count, @@ -168,8 +173,8 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev) const char *namep; int i; - cmd = &dev->priv.cmdif_debugfs; - *cmd = debugfs_create_dir("commands", dev->priv.dbg_root); + cmd = &dev->priv.dbg.cmdif_debugfs; + *cmd = debugfs_create_dir("commands", dev->priv.dbg.dbg_root); for (i = 0; i < MLX5_CMD_OP_MAX; i++) { stats = &dev->cmd.stats[i]; @@ -180,23 +185,51 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev) debugfs_create_file("average", 0400, stats->root, stats, &stats_fops); debugfs_create_u64("n", 0400, stats->root, &stats->n); + debugfs_create_u64("failed", 0400, stats->root, &stats->failed); + debugfs_create_u64("failed_mbox_status", 0400, stats->root, + &stats->failed_mbox_status); + debugfs_create_u32("last_failed_errno", 0400, stats->root, + &stats->last_failed_errno); + debugfs_create_u8("last_failed_mbox_status", 0400, stats->root, + &stats->last_failed_mbox_status); } } } void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev) { - debugfs_remove_recursive(dev->priv.cmdif_debugfs); + debugfs_remove_recursive(dev->priv.dbg.cmdif_debugfs); } void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev) { - dev->priv.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg_root); + dev->priv.dbg.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg.dbg_root); } void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev) { - debugfs_remove_recursive(dev->priv.cq_debugfs); + debugfs_remove_recursive(dev->priv.dbg.cq_debugfs); +} + +void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev) +{ + struct dentry *pages; + + dev->priv.dbg.pages_debugfs = debugfs_create_dir("pages", dev->priv.dbg.dbg_root); + pages = dev->priv.dbg.pages_debugfs; + + debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages); + debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.vfs_pages); + debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.host_pf_pages); + debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed); + debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped); + debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages, + &dev->priv.reclaim_pages_discard); +} + +void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev) +{ + debugfs_remove_recursive(dev->priv.dbg.pages_debugfs); } static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, @@ -441,7 +474,7 @@ int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) if (!mlx5_debugfs_root) return 0; - err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.qp_debugfs, + err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.dbg.qp_debugfs, &qp->dbg, qp->qpn, qp_fields, ARRAY_SIZE(qp_fields), qp); if (err) @@ -468,7 +501,7 @@ int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq) if (!mlx5_debugfs_root) return 0; - err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.eq_debugfs, + err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.dbg.eq_debugfs, &eq->dbg, eq->eqn, eq_fields, ARRAY_SIZE(eq_fields), eq); if (err) @@ -493,7 +526,7 @@ int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) if (!mlx5_debugfs_root) return 0; - err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.cq_debugfs, + err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.dbg.cq_debugfs, &cq->dbg, cq->cqn, cq_fields, ARRAY_SIZE(cq_fields), cq); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index d1093bb2d436..057dde6f4417 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -100,15 +100,11 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli } net_port_alive = !!(reset_type & MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE); - err = mlx5_fw_reset_set_reset_sync(dev, net_port_alive); + err = mlx5_fw_reset_set_reset_sync(dev, net_port_alive, extack); if (err) - goto out; + return err; - err = mlx5_fw_reset_wait_reset_done(dev); -out: - if (err) - NL_SET_ERR_MSG_MOD(extack, "FW activate command failed"); - return err; + return mlx5_fw_reset_wait_reset_done(dev); } static int mlx5_devlink_trigger_fw_live_patch(struct devlink *devlink, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index c14e06ca64d8..2704c7537481 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -59,6 +59,7 @@ #include "lib/hv_vhca.h" #include "lib/clock.h" #include "en/rx_res.h" +#include "en/selq.h" extern const struct net_device_ops mlx5e_netdev_ops; struct page_pool; @@ -172,8 +173,9 @@ struct page_pool; #define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\ ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT) -#define MLX5E_MAX_KLM_PER_WQE \ - MLX5E_KLM_ENTRIES_PER_WQE(MLX5E_TX_MPW_MAX_NUM_DS << MLX5_MKEY_BSF_OCTO_SIZE) +#define MLX5E_MAX_KLM_PER_WQE(mdev) \ + MLX5E_KLM_ENTRIES_PER_WQE(mlx5e_get_sw_max_sq_mpw_wqebbs(mlx5e_get_max_sq_wqebbs(mdev)) \ + << MLX5_MKEY_BSF_OCTO_SIZE) #define MLX5E_MSG_LEVEL NETIF_MSG_LINK @@ -221,6 +223,32 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS); } +/* The maximum WQE size can be retrieved by max_wqe_sz_sq in + * bytes units. Driver hardens the limitation to 1KB (16 + * WQEBBs), unless firmware capability is stricter. + */ +static inline u16 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev) +{ + return min_t(u16, MLX5_SEND_WQE_MAX_WQEBBS, + MLX5_CAP_GEN(mdev, max_wqe_sz_sq) / MLX5_SEND_WQE_BB); +} + +static inline u16 mlx5e_get_sw_max_sq_mpw_wqebbs(u16 max_sq_wqebbs) +{ +/* The return value will be multiplied by MLX5_SEND_WQEBB_NUM_DS. + * Since max_sq_wqebbs may be up to MLX5_SEND_WQE_MAX_WQEBBS == 16, + * see mlx5e_get_max_sq_wqebbs(), the multiplication (16 * 4 == 64) + * overflows the 6-bit DS field of Ctrl Segment. Use a bound lower + * than MLX5_SEND_WQE_MAX_WQEBBS to let a full-session WQE be + * cache-aligned. + */ +#if L1_CACHE_BYTES < 128 + return min_t(u16, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1); +#else + return min_t(u16, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 2); +#endif +} + struct mlx5e_tx_wqe { struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_eth_seg eth; @@ -427,12 +455,12 @@ struct mlx5e_txqsq { struct netdev_queue *txq; u32 sqn; u16 stop_room; + u16 max_sq_mpw_wqebbs; u8 min_inline_mode; struct device *pdev; __be32 mkey_be; unsigned long state; unsigned int hw_mtu; - struct hwtstamp_config *tstamp; struct mlx5_clock *clock; struct net_device *netdev; struct mlx5_core_dev *mdev; @@ -446,6 +474,7 @@ struct mlx5e_txqsq { struct work_struct recover_work; struct mlx5e_ptpsq *ptpsq; cqe_ts_to_ns ptp_cyc2time; + u16 max_sq_wqebbs; } ____cacheline_aligned_in_smp; struct mlx5e_dma_info { @@ -540,6 +569,8 @@ struct mlx5e_xdpsq { u32 sqn; struct device *pdev; __be32 mkey_be; + u16 stop_room; + u16 max_sq_mpw_wqebbs; u8 min_inline_mode; unsigned long state; unsigned int hw_mtu; @@ -547,6 +578,7 @@ struct mlx5e_xdpsq { /* control path */ struct mlx5_wq_ctrl wq_ctrl; struct mlx5e_channel *channel; + u16 max_sq_wqebbs; } ____cacheline_aligned_in_smp; struct mlx5e_ktls_resync_resp; @@ -575,6 +607,7 @@ struct mlx5e_icosq { /* control path */ struct mlx5_wq_ctrl wq_ctrl; struct mlx5e_channel *channel; + u16 max_sq_wqebbs; struct work_struct recover_work; } ____cacheline_aligned_in_smp; @@ -681,6 +714,7 @@ struct mlx5e_rq { u8 umr_in_progress; u8 umr_last_bulk; u8 umr_completed; + u8 min_wqe_bulk; struct mlx5e_shampo_hd *shampo; } mpwqe; }; @@ -876,9 +910,8 @@ struct mlx5e_trap; struct mlx5e_priv { /* priv data path fields - start */ + struct mlx5e_selq selq; struct mlx5e_txqsq **txq2sq; - int **channel_tc2realtxq; - int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC]; #ifdef CONFIG_MLX5_CORE_EN_DCB struct mlx5e_dcbx_dp dcbx_dp; #endif @@ -921,7 +954,6 @@ struct mlx5e_priv { u16 drop_rq_q_counter; struct notifier_block events_nb; struct notifier_block blocking_events_nb; - int num_tc_x_num_ch; struct udp_tunnel_nic_info nic_info; #ifdef CONFIG_MLX5_CORE_EN_DCB diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 66180ffb4606..0bd8698f7226 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -178,6 +178,12 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); } +u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz) +{ +#define UMR_WQE_BULK (2) + return min_t(unsigned int, UMR_WQE_BULK, wq_sz / 2 - 1); +} + u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) @@ -196,13 +202,13 @@ u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par u16 stop_room; stop_room = mlx5e_tls_get_stop_room(mdev, params); - stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); + stop_room += mlx5e_stop_room_for_max_wqe(mdev); if (is_mpwqe) /* A MPWQE can take up to the maximum-sized WQE + all the normal * stop room can be taken if a new packet breaks the active * MPWQE session and allocates its WQEs right away. */ - stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); + stop_room += mlx5e_stop_room_for_max_wqe(mdev); return stop_room; } @@ -359,12 +365,13 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, { /* Prefer Striding RQ, unless any of the following holds: * - Striding RQ configuration is not possible/supported. - * - Slow PCI heuristic. + * - CQE compression is ON, and stride_index mini_cqe layout is not supported. * - Legacy RQ would use linear SKB while Striding RQ would use non-linear. * * No XSK params: checking the availability of striding RQ in general. */ - if (!slow_pci_heuristic(mdev) && + if ((!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) || + MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) && mlx5e_striding_rq_possible(mdev, params) && (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) || !mlx5e_rx_is_linear_skb(params, NULL))) @@ -717,7 +724,7 @@ static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev, int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); u32 wqebbs; - max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE; + max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev); max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param); max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr; rest = max_hd_per_wqe % max_klm_per_umr; @@ -774,10 +781,10 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev, void *wq = MLX5_ADDR_OF(sqc, sqc, wq); mlx5e_build_sq_param_common(mdev, param); - param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */ + param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1); /* for XSK NOP */ param->is_tls = mlx5e_accel_is_ktls_rx(mdev); if (param->is_tls) - param->stop_room += mlx5e_stop_room_for_wqe(1); /* for TLS RX resync NOP */ + param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1); /* for TLS RX resync NOP */ MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq)); MLX5_SET(wq, wq, log_wq_sz, log_wq_size); mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h index 433e6967692d..47a368112e31 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h @@ -129,6 +129,7 @@ u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk); +u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz); u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index 82baafd3c00c..335b20b6383b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -195,7 +195,6 @@ static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix, int node; sq->pdev = c->pdev; - sq->tstamp = c->tstamp; sq->clock = &mdev->clock; sq->mkey_be = c->mkey_be; sq->netdev = c->netdev; @@ -449,7 +448,7 @@ static void mlx5e_ptp_build_sq_param(struct mlx5_core_dev *mdev, wq = MLX5_ADDR_OF(sqc, sqc, wq); MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); - param->stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); + param->stop_room = mlx5e_stop_room_for_max_wqe(mdev); mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c index c1e07496c89c..9db677e9ca9c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c @@ -50,7 +50,6 @@ static int mlx5e_find_unused_qos_qid(struct mlx5e_priv *priv) struct mlx5e_qos_node { struct hlist_node hnode; - struct rcu_head rcu; struct mlx5e_qos_node *parent; u64 rate; u32 bw_share; @@ -132,7 +131,11 @@ static void mlx5e_sw_node_delete(struct mlx5e_priv *priv, struct mlx5e_qos_node __clear_bit(node->qid, priv->htb.qos_used_qids); mlx5e_update_tx_netdev_queues(priv); } - kfree_rcu(node, rcu); + /* Make sure this qid is no longer selected by mlx5e_select_queue, so + * that mlx5e_reactivate_qos_sq can safely restart the netdev TX queue. + */ + synchronize_net(); + kfree(node); } /* TX datapath API */ @@ -273,10 +276,18 @@ err_free_sq: static void mlx5e_activate_qos_sq(struct mlx5e_priv *priv, struct mlx5e_qos_node *node) { struct mlx5e_txqsq *sq; + u16 qid; sq = mlx5e_get_qos_sq(priv, node->qid); - WRITE_ONCE(priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, node->qid)], sq); + qid = mlx5e_qid_from_qos(&priv->channels, node->qid); + + /* If it's a new queue, it will be marked as started at this point. + * Stop it before updating txq2sq. + */ + mlx5e_tx_disable_queue(netdev_get_tx_queue(priv->netdev, qid)); + + priv->txq2sq[qid] = sq; /* Make the change to txq2sq visible before the queue is started. * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE, @@ -299,8 +310,13 @@ static void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid) qos_dbg(priv->mdev, "Deactivate QoS SQ qid %u\n", qid); mlx5e_deactivate_txqsq(sq); - /* The queue is disabled, no synchronization with datapath is needed. */ priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL; + + /* Make the change to txq2sq visible before the queue is started again. + * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE, + * which pairs with this barrier. + */ + smp_wmb(); } static void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid) @@ -485,9 +501,11 @@ int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls, opened = test_bit(MLX5E_STATE_OPENED, &priv->state); if (opened) { + mlx5e_selq_prepare(&priv->selq, &priv->channels.params, true); + err = mlx5e_qos_alloc_queues(priv, &priv->channels); if (err) - return err; + goto err_cancel_selq; } root = mlx5e_sw_node_create_root(priv); @@ -508,6 +526,9 @@ int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls, */ smp_store_release(&priv->htb.maj_id, htb_maj_id); + if (opened) + mlx5e_selq_apply(&priv->selq); + return 0; err_sw_node_delete: @@ -516,6 +537,8 @@ err_sw_node_delete: err_free_queues: if (opened) mlx5e_qos_close_all_queues(&priv->channels); +err_cancel_selq: + mlx5e_selq_cancel(&priv->selq); return err; } @@ -526,8 +549,15 @@ int mlx5e_htb_root_del(struct mlx5e_priv *priv) qos_dbg(priv->mdev, "TC_HTB_DESTROY\n"); + /* Wait until real_num_tx_queues is updated for mlx5e_select_queue, + * so that we can safely switch to its non-HTB non-PTP fastpath. + */ + synchronize_net(); + + mlx5e_selq_prepare(&priv->selq, &priv->channels.params, false); + mlx5e_selq_apply(&priv->selq); + WRITE_ONCE(priv->htb.maj_id, 0); - synchronize_rcu(); /* Sync with mlx5e_select_htb_queue and TX data path. */ root = mlx5e_sw_node_find(priv, MLX5E_HTB_CLASSID_ROOT); if (!root) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index 0991345c4ae5..86fa0bdbee36 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -263,14 +263,14 @@ int mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv) INIT_LIST_HEAD(&uplink_priv->unready_flows); /* init shared tc flow table */ - err = mlx5e_tc_esw_init(&uplink_priv->tc_ht); + err = mlx5e_tc_esw_init(uplink_priv); return err; } void mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv) { /* delete shared tc flow table */ - mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht); + mlx5e_tc_esw_cleanup(&rpriv->uplink_priv); mutex_destroy(&rpriv->uplink_priv.unready_flows_lock); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c index c1cdd8c2e37a..7f93426b88b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c @@ -442,7 +442,7 @@ int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss, goto inner_tir; err = mlx5e_tir_modify(tir, builder); if (err) { - mlx5e_rss_warn(rss->mdev, "Failed to update LRO state of indirect TIR %#x for traffic type %d: err = %d\n", + mlx5e_rss_warn(rss->mdev, "Failed to update packet merge state of indirect TIR %#x for traffic type %d: err = %d\n", mlx5e_tir_get_tirn(tir), tt, err); if (!final_err) final_err = err; @@ -457,7 +457,7 @@ inner_tir: continue; err = mlx5e_tir_modify(tir, builder); if (err) { - mlx5e_rss_warn(rss->mdev, "Failed to update LRO state of inner indirect TIR %#x for traffic type %d: err = %d\n", + mlx5e_rss_warn(rss->mdev, "Failed to update packet merge state of inner indirect TIR %#x for traffic type %d: err = %d\n", mlx5e_tir_get_tirn(tir), tt, err); if (!final_err) final_err = err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c new file mode 100644 index 000000000000..d98a277eb7f8 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#include "selq.h" +#include <linux/slab.h> +#include <linux/netdevice.h> +#include <linux/rcupdate.h> +#include "en.h" +#include "en/ptp.h" + +struct mlx5e_selq_params { + unsigned int num_regular_queues; + unsigned int num_channels; + unsigned int num_tcs; + union { + u8 is_special_queues; + struct { + bool is_htb : 1; + bool is_ptp : 1; + }; + }; +}; + +int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock) +{ + struct mlx5e_selq_params *init_params; + + selq->state_lock = state_lock; + + selq->standby = kvzalloc(sizeof(*selq->standby), GFP_KERNEL); + if (!selq->standby) + return -ENOMEM; + + init_params = kvzalloc(sizeof(*selq->active), GFP_KERNEL); + if (!init_params) { + kvfree(selq->standby); + selq->standby = NULL; + return -ENOMEM; + } + /* Assign dummy values, so that mlx5e_select_queue won't crash. */ + *init_params = (struct mlx5e_selq_params) { + .num_regular_queues = 1, + .num_channels = 1, + .num_tcs = 1, + .is_htb = false, + .is_ptp = false, + }; + rcu_assign_pointer(selq->active, init_params); + + return 0; +} + +void mlx5e_selq_cleanup(struct mlx5e_selq *selq) +{ + WARN_ON_ONCE(selq->is_prepared); + + kvfree(selq->standby); + selq->standby = NULL; + selq->is_prepared = true; + + mlx5e_selq_apply(selq); + + kvfree(selq->standby); + selq->standby = NULL; +} + +void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb) +{ + lockdep_assert_held(selq->state_lock); + WARN_ON_ONCE(selq->is_prepared); + + selq->is_prepared = true; + + selq->standby->num_channels = params->num_channels; + selq->standby->num_tcs = mlx5e_get_dcb_num_tc(params); + selq->standby->num_regular_queues = + selq->standby->num_channels * selq->standby->num_tcs; + selq->standby->is_htb = htb; + selq->standby->is_ptp = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS); +} + +void mlx5e_selq_apply(struct mlx5e_selq *selq) +{ + struct mlx5e_selq_params *old_params; + + WARN_ON_ONCE(!selq->is_prepared); + + selq->is_prepared = false; + + old_params = rcu_replace_pointer(selq->active, selq->standby, + lockdep_is_held(selq->state_lock)); + synchronize_net(); /* Wait until ndo_select_queue starts emitting correct values. */ + selq->standby = old_params; +} + +void mlx5e_selq_cancel(struct mlx5e_selq *selq) +{ + lockdep_assert_held(selq->state_lock); + WARN_ON_ONCE(!selq->is_prepared); + + selq->is_prepared = false; +} + +#ifdef CONFIG_MLX5_CORE_EN_DCB +static int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb) +{ + int dscp_cp = 0; + + if (skb->protocol == htons(ETH_P_IP)) + dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; + else if (skb->protocol == htons(ETH_P_IPV6)) + dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; + + return priv->dcbx_dp.dscp2prio[dscp_cp]; +} +#endif + +static int mlx5e_get_up(struct mlx5e_priv *priv, struct sk_buff *skb) +{ +#ifdef CONFIG_MLX5_CORE_EN_DCB + if (READ_ONCE(priv->dcbx_dp.trust_state) == MLX5_QPTS_TRUST_DSCP) + return mlx5e_get_dscp_up(priv, skb); +#endif + if (skb_vlan_tag_present(skb)) + return skb_vlan_tag_get_prio(skb); + return 0; +} + +static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb, + struct mlx5e_selq_params *selq) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int up; + + up = selq->num_tcs > 1 ? mlx5e_get_up(priv, skb) : 0; + + return selq->num_regular_queues + up; +} + +static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb) +{ + u16 classid; + + /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */ + if ((TC_H_MAJ(skb->priority) >> 16) == smp_load_acquire(&priv->htb.maj_id)) + classid = TC_H_MIN(skb->priority); + else + classid = READ_ONCE(priv->htb.defcls); + + if (!classid) + return 0; + + return mlx5e_get_txq_by_classid(priv, classid); +} + +u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_selq_params *selq; + int txq_ix, up; + + selq = rcu_dereference_bh(priv->selq.active); + + /* This is a workaround needed only for the mlx5e_netdev_change_profile + * flow that zeroes out the whole priv without unregistering the netdev + * and without preventing ndo_select_queue from being called. + */ + if (unlikely(!selq)) + return 0; + + if (likely(!selq->is_special_queues)) { + /* No special queues, netdev_pick_tx returns one of the regular ones. */ + + txq_ix = netdev_pick_tx(dev, skb, NULL); + + if (selq->num_tcs <= 1) + return txq_ix; + + up = mlx5e_get_up(priv, skb); + + /* Normalize any picked txq_ix to [0, num_channels), + * So we can return a txq_ix that matches the channel and + * packet UP. + */ + return mlx5e_txq_to_ch_ix(txq_ix, selq->num_channels) + + up * selq->num_channels; + } + + if (unlikely(selq->is_htb)) { + /* num_tcs == 1, shortcut for PTP */ + + txq_ix = mlx5e_select_htb_queue(priv, skb); + if (txq_ix > 0) + return txq_ix; + + if (unlikely(selq->is_ptp && mlx5e_use_ptpsq(skb))) + return selq->num_channels; + + txq_ix = netdev_pick_tx(dev, skb, NULL); + + /* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs. + * If they are selected, switch to regular queues. + * Driver to select these queues only at mlx5e_select_ptpsq() + * and mlx5e_select_htb_queue(). + */ + return mlx5e_txq_to_ch_ix_htb(txq_ix, selq->num_channels); + } + + /* PTP is enabled */ + + if (mlx5e_use_ptpsq(skb)) + return mlx5e_select_ptpsq(dev, skb, selq); + + txq_ix = netdev_pick_tx(dev, skb, NULL); + + /* Normalize any picked txq_ix to [0, num_channels). Queues in range + * [0, num_regular_queues) will be mapped to the corresponding channel + * index, so that we can apply the packet's UP (if num_tcs > 1). + * If netdev_pick_tx() picks ptp_channel, switch to a regular queue, + * because driver should select the PTP only at mlx5e_select_ptpsq(). + */ + txq_ix = mlx5e_txq_to_ch_ix(txq_ix, selq->num_channels); + + if (selq->num_tcs <= 1) + return txq_ix; + + up = mlx5e_get_up(priv, skb); + + return txq_ix + up * selq->num_channels; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h new file mode 100644 index 000000000000..6c070141d8f1 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef __MLX5_EN_SELQ_H__ +#define __MLX5_EN_SELQ_H__ + +#include <linux/kernel.h> + +struct mlx5e_selq_params; + +struct mlx5e_selq { + struct mlx5e_selq_params __rcu *active; + struct mlx5e_selq_params *standby; + struct mutex *state_lock; /* points to priv->state_lock */ + bool is_prepared; +}; + +struct mlx5e_params; +struct net_device; +struct sk_buff; + +int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock); +void mlx5e_selq_cleanup(struct mlx5e_selq *selq); +void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb); +void mlx5e_selq_apply(struct mlx5e_selq *selq); +void mlx5e_selq_cancel(struct mlx5e_selq *selq); + +static inline u16 mlx5e_txq_to_ch_ix(u16 txq, u16 num_channels) +{ + while (unlikely(txq >= num_channels)) + txq -= num_channels; + return txq; +} + +static inline u16 mlx5e_txq_to_ch_ix_htb(u16 txq, u16 num_channels) +{ + if (unlikely(txq >= num_channels)) { + if (unlikely(txq >= num_channels << 3)) + txq %= num_channels; + else + do + txq -= num_channels; + while (txq >= num_channels); + } + return txq; +} + +u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev); + +#endif /* __MLX5_EN_SELQ_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c index 2b53738938a9..21aab96357b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c @@ -19,8 +19,7 @@ tc_act_parse_accept(struct mlx5e_tc_act_parse_state *parse_state, struct mlx5e_priv *priv, struct mlx5_flow_attr *attr) { - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; attr->flags |= MLX5_ATTR_FLAG_ACCEPT; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c index e600924e30ea..cb8f7593a00c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c @@ -2,6 +2,7 @@ // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. #include "act.h" +#include "en/tc/post_act.h" #include "en/tc_priv.h" #include "mlx5_core.h" @@ -101,3 +102,75 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state, parse_state->num_actions = flow_action->num_entries; parse_state->extack = extack; } + +void +mlx5e_tc_act_reorder_flow_actions(struct flow_action *flow_action, + struct mlx5e_tc_flow_action *flow_action_reorder) +{ + struct flow_action_entry *act; + int i, j = 0; + + flow_action_for_each(i, act, flow_action) { + /* Add CT action to be first. */ + if (act->id == FLOW_ACTION_CT) + flow_action_reorder->entries[j++] = act; + } + + flow_action_for_each(i, act, flow_action) { + if (act->id == FLOW_ACTION_CT) + continue; + flow_action_reorder->entries[j++] = act; + } +} + +int +mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state, + struct flow_action *flow_action, + struct mlx5_flow_attr *attr, + enum mlx5_flow_namespace_type ns_type) +{ + struct flow_action_entry *act; + struct mlx5e_tc_act *tc_act; + struct mlx5e_priv *priv; + int err = 0, i; + + priv = parse_state->flow->priv; + + flow_action_for_each(i, act, flow_action) { + tc_act = mlx5e_tc_act_get(act->id, ns_type); + if (!tc_act || !tc_act->post_parse || + !tc_act->can_offload(parse_state, act, i, attr)) + continue; + + err = tc_act->post_parse(parse_state, priv, attr); + if (err) + goto out; + } + +out: + return err; +} + +int +mlx5e_tc_act_set_next_post_act(struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr, + struct mlx5_flow_attr *next_attr) +{ + struct mlx5_core_dev *mdev = flow->priv->mdev; + struct mlx5e_tc_mod_hdr_acts *mod_acts; + int err; + + mod_acts = &attr->parse_attr->mod_hdr_acts; + + /* Set handle on current post act rule to next post act rule. */ + err = mlx5e_tc_post_act_set_handle(mdev, next_attr->post_act_handle, mod_acts); + if (err) { + mlx5_core_warn(mdev, "Failed setting post action handle"); + return err; + } + + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h index bfbc91c116a5..94a7cf38d6b1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h @@ -16,12 +16,15 @@ struct mlx5e_tc_act_parse_state { unsigned int num_actions; struct mlx5e_tc_flow *flow; struct netlink_ext_ack *extack; + u32 actions; bool ct; + bool ct_clear; bool encap; bool decap; bool mpls_push; bool ptype_host; const struct ip_tunnel_info *tun_info; + struct mlx5e_mpls_info mpls_info; int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS]; int if_count; struct mlx5_tc_ct_priv *ct_priv; @@ -41,6 +44,15 @@ struct mlx5e_tc_act { int (*post_parse)(struct mlx5e_tc_act_parse_state *parse_state, struct mlx5e_priv *priv, struct mlx5_flow_attr *attr); + + bool (*is_multi_table_act)(struct mlx5e_priv *priv, + const struct flow_action_entry *act, + struct mlx5_flow_attr *attr); +}; + +struct mlx5e_tc_flow_action { + unsigned int num_entries; + struct flow_action_entry **entries; }; extern struct mlx5e_tc_act mlx5e_tc_act_drop; @@ -73,4 +85,19 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state, struct flow_action *flow_action, struct netlink_ext_ack *extack); +void +mlx5e_tc_act_reorder_flow_actions(struct flow_action *flow_action, + struct mlx5e_tc_flow_action *flow_action_reorder); + +int +mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state, + struct flow_action *flow_action, + struct mlx5_flow_attr *attr, + enum mlx5_flow_namespace_type ns_type); + +int +mlx5e_tc_act_set_next_post_act(struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr, + struct mlx5_flow_attr *next_attr); + #endif /* __MLX5_EN_TC_ACT_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c index 85f0cb88127f..b9d38fe807df 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c @@ -14,14 +14,8 @@ tc_act_can_offload_ct(struct mlx5e_tc_act_parse_state *parse_state, bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR; struct netlink_ext_ack *extack = parse_state->extack; - if (flow_flag_test(parse_state->flow, SAMPLE)) { - NL_SET_ERR_MSG_MOD(extack, - "Sample action with connection tracking is not supported"); - return false; - } - if (parse_state->ct && !clear_action) { - NL_SET_ERR_MSG_MOD(extack, "Multiple CT actions are not supoported"); + NL_SET_ERR_MSG_MOD(extack, "Multiple CT actions are not supported"); return false; } @@ -37,6 +31,10 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state, bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR; int err; + /* It's redundant to do ct clear more than once. */ + if (clear_action && parse_state->ct_clear) + return 0; + err = mlx5_tc_ct_parse_action(parse_state->ct_priv, attr, &attr->parse_attr->mod_hdr_acts, act, parse_state->extack); @@ -52,12 +50,25 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state, flow_flag_set(parse_state->flow, CT); parse_state->ct = true; } + parse_state->ct_clear = clear_action; return 0; } +static bool +tc_act_is_multi_table_act_ct(struct mlx5e_priv *priv, + const struct flow_action_entry *act, + struct mlx5_flow_attr *attr) +{ + if (act->ct.action & TCA_CT_ACT_CLEAR) + return false; + + return true; +} + struct mlx5e_tc_act mlx5e_tc_act_ct = { .can_offload = tc_act_can_offload_ct, .parse_action = tc_act_parse_ct, + .is_multi_table_act = tc_act_is_multi_table_act_ct, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c index 3d5f23636a02..dd025a95c439 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c @@ -19,8 +19,7 @@ tc_act_parse_drop(struct mlx5e_tc_act_parse_state *parse_state, struct mlx5e_priv *priv, struct mlx5_flow_attr *attr) { - attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP | - MLX5_FLOW_CONTEXT_ACTION_COUNT; + attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c index fb1be822ad25..4726bcb46eec 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c @@ -76,8 +76,7 @@ tc_act_parse_goto(struct mlx5e_tc_act_parse_state *parse_state, struct mlx5e_priv *priv, struct mlx5_flow_attr *attr) { - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; attr->dest_chain = act->chain_index; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c index 99fb98b3e71b..05a42fb4ba97 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c @@ -178,6 +178,12 @@ parse_mirred_encap(struct mlx5e_tc_act_parse_state *parse_state, return -ENOMEM; parse_state->encap = false; + + if (parse_state->mpls_push) { + memcpy(&parse_attr->mpls_info[esw_attr->out_count], + &parse_state->mpls_info, sizeof(parse_state->mpls_info)); + parse_state->mpls_push = false; + } esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP; esw_attr->out_count++; /* attr->dests[].rep is resolved when we handle encap */ @@ -296,8 +302,7 @@ tc_act_parse_mirred(struct mlx5e_tc_act_parse_state *parse_state, if (err) return err; - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c index 16681cf6e93e..90b4c1b34776 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c @@ -40,8 +40,7 @@ tc_act_parse_mirred_nic(struct mlx5e_tc_act_parse_state *parse_state, { attr->parse_attr->mirred_ifindex[0] = act->dev->ifindex; flow_flag_set(parse_state->flow, HAIRPIN); - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c index 40332949509a..96a80e03d129 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c @@ -23,6 +23,16 @@ tc_act_can_offload_mpls_push(struct mlx5e_tc_act_parse_state *parse_state, return true; } +static void +copy_mpls_info(struct mlx5e_mpls_info *mpls_info, + const struct flow_action_entry *act) +{ + mpls_info->label = act->mpls_push.label; + mpls_info->tc = act->mpls_push.tc; + mpls_info->bos = act->mpls_push.bos; + mpls_info->ttl = act->mpls_push.ttl; +} + static int tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state, const struct flow_action_entry *act, @@ -30,6 +40,7 @@ tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state, struct mlx5_flow_attr *attr) { parse_state->mpls_push = true; + copy_mpls_info(&parse_state->mpls_info, act); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c index 9dd244147385..ad09a8a5f36e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c @@ -58,8 +58,7 @@ tc_act_parse_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state, struct net_device *out_dev = act->dev; int err; - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex, MLX5E_TC_INT_PORT_INGRESS, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c index 539fea13ce9f..2c0196431302 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c @@ -4,6 +4,7 @@ #include <net/psample.h> #include "act.h" #include "en/tc_priv.h" +#include "en/tc/act/sample.h" static bool tc_act_can_offload_sample(struct mlx5e_tc_act_parse_state *parse_state, @@ -12,10 +13,12 @@ tc_act_can_offload_sample(struct mlx5e_tc_act_parse_state *parse_state, struct mlx5_flow_attr *attr) { struct netlink_ext_ack *extack = parse_state->extack; + bool ct_nat; - if (flow_flag_test(parse_state->flow, CT)) { - NL_SET_ERR_MSG_MOD(extack, - "Sample action with connection tracking is not supported"); + ct_nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT; + + if (flow_flag_test(parse_state->flow, CT) && ct_nat) { + NL_SET_ERR_MSG_MOD(extack, "Sample action with CT NAT is not supported"); return false; } @@ -42,7 +45,27 @@ tc_act_parse_sample(struct mlx5e_tc_act_parse_state *parse_state, return 0; } +bool +mlx5e_tc_act_sample_is_multi_table(struct mlx5_core_dev *mdev, + struct mlx5_flow_attr *attr) +{ + if (MLX5_CAP_GEN(mdev, reg_c_preserve) || + attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) + return true; + + return false; +} + +static bool +tc_act_is_multi_table_act_sample(struct mlx5e_priv *priv, + const struct flow_action_entry *act, + struct mlx5_flow_attr *attr) +{ + return mlx5e_tc_act_sample_is_multi_table(priv->mdev, attr); +} + struct mlx5e_tc_act mlx5e_tc_act_sample = { .can_offload = tc_act_can_offload_sample, .parse_action = tc_act_parse_sample, + .is_multi_table_act = tc_act_is_multi_table_act_sample, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.h new file mode 100644 index 000000000000..3efb3a15c5d2 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef __MLX5_EN_TC_ACT_SAMPLE_H__ +#define __MLX5_EN_TC_ACT_SAMPLE_H__ + +#include <net/flow_offload.h> +#include "en/tc_priv.h" + +bool +mlx5e_tc_act_sample_is_multi_table(struct mlx5_core_dev *mdev, + struct mlx5_flow_attr *attr); + +#endif /* __MLX5_EN_TC_ACT_SAMPLE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c index 9ea293fdc434..a7d9eab19e4a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c @@ -26,8 +26,7 @@ tc_act_parse_trap(struct mlx5e_tc_act_parse_state *parse_state, struct mlx5e_priv *priv, struct mlx5_flow_attr *attr) { - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h new file mode 100644 index 000000000000..bb6b1a979ba1 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */ + +#ifndef __MLX5_EN_TC_CT_FS_H__ +#define __MLX5_EN_TC_CT_FS_H__ + +struct mlx5_ct_fs { + const struct net_device *netdev; + struct mlx5_core_dev *dev; + + /* private data */ + void *priv_data[]; +}; + +struct mlx5_ct_fs_rule { +}; + +struct mlx5_ct_fs_ops { + int (*init)(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct, + struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct); + void (*destroy)(struct mlx5_ct_fs *fs); + + struct mlx5_ct_fs_rule * (*ct_rule_add)(struct mlx5_ct_fs *fs, + struct mlx5_flow_spec *spec, + struct mlx5_flow_attr *attr, + struct flow_rule *flow_rule); + void (*ct_rule_del)(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule); + + size_t priv_size; +}; + +static inline void *mlx5_ct_fs_priv(struct mlx5_ct_fs *fs) +{ + return &fs->priv_data; +} + +struct mlx5_ct_fs_ops *mlx5_ct_fs_dmfs_ops_get(void); + +#if IS_ENABLED(CONFIG_MLX5_SW_STEERING) +struct mlx5_ct_fs_ops *mlx5_ct_fs_smfs_ops_get(void); +#else +static inline struct mlx5_ct_fs_ops * +mlx5_ct_fs_smfs_ops_get(void) +{ + return NULL; +} +#endif /* IS_ENABLED(CONFIG_MLX5_SW_STEERING) */ + +#endif /* __MLX5_EN_TC_CT_FS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c new file mode 100644 index 000000000000..ae4f55be48ce --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */ + +#include "en_tc.h" +#include "en/tc_ct.h" +#include "en/tc/ct_fs.h" + +#define ct_dbg(fmt, args...)\ + netdev_dbg(fs->netdev, "ct_fs_dmfs debug: " fmt "\n", ##args) + +struct mlx5_ct_fs_dmfs_rule { + struct mlx5_ct_fs_rule fs_rule; + struct mlx5_flow_handle *rule; + struct mlx5_flow_attr *attr; +}; + +static int +mlx5_ct_fs_dmfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct, + struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct) +{ + return 0; +} + +static void +mlx5_ct_fs_dmfs_destroy(struct mlx5_ct_fs *fs) +{ +} + +static struct mlx5_ct_fs_rule * +mlx5_ct_fs_dmfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, + struct mlx5_flow_attr *attr, struct flow_rule *flow_rule) +{ + struct mlx5e_priv *priv = netdev_priv(fs->netdev); + struct mlx5_ct_fs_dmfs_rule *dmfs_rule; + int err; + + dmfs_rule = kzalloc(sizeof(*dmfs_rule), GFP_KERNEL); + if (!dmfs_rule) + return ERR_PTR(-ENOMEM); + + dmfs_rule->rule = mlx5_tc_rule_insert(priv, spec, attr); + if (IS_ERR(dmfs_rule->rule)) { + err = PTR_ERR(dmfs_rule->rule); + ct_dbg("Failed to add ct entry fs rule"); + goto err_insert; + } + + dmfs_rule->attr = attr; + + return &dmfs_rule->fs_rule; + +err_insert: + kfree(dmfs_rule); + return ERR_PTR(err); +} + +static void +mlx5_ct_fs_dmfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule) +{ + struct mlx5_ct_fs_dmfs_rule *dmfs_rule = container_of(fs_rule, + struct mlx5_ct_fs_dmfs_rule, + fs_rule); + + mlx5_tc_rule_delete(netdev_priv(fs->netdev), dmfs_rule->rule, dmfs_rule->attr); + kfree(dmfs_rule); +} + +static struct mlx5_ct_fs_ops dmfs_ops = { + .ct_rule_add = mlx5_ct_fs_dmfs_ct_rule_add, + .ct_rule_del = mlx5_ct_fs_dmfs_ct_rule_del, + + .init = mlx5_ct_fs_dmfs_init, + .destroy = mlx5_ct_fs_dmfs_destroy, +}; + +struct mlx5_ct_fs_ops *mlx5_ct_fs_dmfs_ops_get(void) +{ + return &dmfs_ops; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c new file mode 100644 index 000000000000..59988e24b704 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c @@ -0,0 +1,372 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */ + +#include <linux/refcount.h> + +#include "en_tc.h" +#include "en/tc_priv.h" +#include "en/tc_ct.h" +#include "en/tc/ct_fs.h" + +#include "lib/smfs.h" + +#define INIT_ERR_PREFIX "ct_fs_smfs init failed" +#define ct_dbg(fmt, args...)\ + netdev_dbg(fs->netdev, "ct_fs_smfs debug: " fmt "\n", ##args) +#define MLX5_CT_TCP_FLAGS_MASK cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16) + +struct mlx5_ct_fs_smfs_matcher { + struct mlx5dr_matcher *dr_matcher; + struct list_head list; + int prio; + refcount_t ref; +}; + +struct mlx5_ct_fs_smfs_matchers { + struct mlx5_ct_fs_smfs_matcher smfs_matchers[4]; + struct list_head used; +}; + +struct mlx5_ct_fs_smfs { + struct mlx5dr_table *ct_tbl, *ct_nat_tbl; + struct mlx5_ct_fs_smfs_matchers matchers; + struct mlx5_ct_fs_smfs_matchers matchers_nat; + struct mlx5dr_action *fwd_action; + struct mlx5_flow_table *ct_nat; + struct mutex lock; /* Guards matchers */ +}; + +struct mlx5_ct_fs_smfs_rule { + struct mlx5_ct_fs_rule fs_rule; + struct mlx5dr_rule *rule; + struct mlx5dr_action *count_action; + struct mlx5_ct_fs_smfs_matcher *smfs_matcher; +}; + +static inline void +mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bool ipv4, bool tcp) +{ + void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); + + if (likely(MLX5_CAP_FLOWTABLE_NIC_RX(fs->dev, ft_field_support.outer_ip_version))) + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version); + else + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); + + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol); + if (likely(ipv4)) { + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, + src_ipv4_src_ipv6.ipv4_layout.ipv4); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4); + } else { + memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + 0xFF, + MLX5_FLD_SZ_BYTES(fte_match_set_lyr_2_4, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6)); + memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + src_ipv4_src_ipv6.ipv6_layout.ipv6), + 0xFF, + MLX5_FLD_SZ_BYTES(fte_match_set_lyr_2_4, + src_ipv4_src_ipv6.ipv6_layout.ipv6)); + } + + if (likely(tcp)) { + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_sport); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_dport); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags, + ntohs(MLX5_CT_TCP_FLAGS_MASK)); + } else { + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_sport); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_dport); + } + + mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, 0, MLX5_CT_ZONE_MASK); +} + +static struct mlx5dr_matcher * +mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl, bool ipv4, + bool tcp, u32 priority) +{ + struct mlx5dr_matcher *dr_matcher; + struct mlx5_flow_spec *spec; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return ERR_PTR(-ENOMEM); + + mlx5_ct_fs_smfs_fill_mask(fs, spec, ipv4, tcp); + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS; + + dr_matcher = mlx5_smfs_matcher_create(tbl, priority, spec); + kfree(spec); + if (!dr_matcher) + return ERR_PTR(-EINVAL); + + return dr_matcher; +} + +static struct mlx5_ct_fs_smfs_matcher * +mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp) +{ + struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs); + struct mlx5_ct_fs_smfs_matcher *m, *smfs_matcher; + struct mlx5_ct_fs_smfs_matchers *matchers; + struct mlx5dr_matcher *dr_matcher; + struct mlx5dr_table *tbl; + struct list_head *prev; + int prio; + + matchers = nat ? &fs_smfs->matchers_nat : &fs_smfs->matchers; + smfs_matcher = &matchers->smfs_matchers[ipv4 * 2 + tcp]; + + if (refcount_inc_not_zero(&smfs_matcher->ref)) + return smfs_matcher; + + mutex_lock(&fs_smfs->lock); + + /* Retry with lock, as another thread might have already created the relevant matcher + * till we acquired the lock + */ + if (refcount_inc_not_zero(&smfs_matcher->ref)) + goto out_unlock; + + // Find next available priority in sorted used list + prio = 0; + prev = &matchers->used; + list_for_each_entry(m, &matchers->used, list) { + prev = &m->list; + + if (m->prio == prio) + prio = m->prio + 1; + else + break; + } + + tbl = nat ? fs_smfs->ct_nat_tbl : fs_smfs->ct_tbl; + dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, prio); + if (IS_ERR(dr_matcher)) { + netdev_warn(fs->netdev, + "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d), err: %ld\n", + nat, ipv4, tcp, PTR_ERR(dr_matcher)); + + smfs_matcher = ERR_CAST(dr_matcher); + goto out_unlock; + } + + smfs_matcher->dr_matcher = dr_matcher; + smfs_matcher->prio = prio; + list_add(&smfs_matcher->list, prev); + refcount_set(&smfs_matcher->ref, 1); + +out_unlock: + mutex_unlock(&fs_smfs->lock); + return smfs_matcher; +} + +static void +mlx5_ct_fs_smfs_matcher_put(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_smfs_matcher *smfs_matcher) +{ + struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs); + + if (!refcount_dec_and_mutex_lock(&smfs_matcher->ref, &fs_smfs->lock)) + return; + + mlx5_smfs_matcher_destroy(smfs_matcher->dr_matcher); + list_del(&smfs_matcher->list); + mutex_unlock(&fs_smfs->lock); +} + +static int +mlx5_ct_fs_smfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct, + struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct) +{ + struct mlx5dr_table *ct_tbl, *ct_nat_tbl, *post_ct_tbl; + struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs); + + post_ct_tbl = mlx5_smfs_table_get_from_fs_ft(post_ct); + ct_nat_tbl = mlx5_smfs_table_get_from_fs_ft(ct_nat); + ct_tbl = mlx5_smfs_table_get_from_fs_ft(ct); + fs_smfs->ct_nat = ct_nat; + + if (!ct_tbl || !ct_nat_tbl || !post_ct_tbl) { + netdev_warn(fs->netdev, "ct_fs_smfs: failed to init, missing backing dr tables"); + return -EOPNOTSUPP; + } + + ct_dbg("using smfs steering"); + + fs_smfs->fwd_action = mlx5_smfs_action_create_dest_table(post_ct_tbl); + if (!fs_smfs->fwd_action) { + return -EINVAL; + } + + fs_smfs->ct_tbl = ct_tbl; + fs_smfs->ct_nat_tbl = ct_nat_tbl; + mutex_init(&fs_smfs->lock); + INIT_LIST_HEAD(&fs_smfs->matchers.used); + INIT_LIST_HEAD(&fs_smfs->matchers_nat.used); + + return 0; +} + +static void +mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs) +{ + struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs); + + mlx5_smfs_action_destroy(fs_smfs->fwd_action); +} + +static inline bool +mlx5_tc_ct_valid_used_dissector_keys(const u32 used_keys) +{ +#define DISSECTOR_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name) + const u32 basic_keys = DISSECTOR_BIT(BASIC) | DISSECTOR_BIT(CONTROL) | + DISSECTOR_BIT(PORTS) | DISSECTOR_BIT(META); + const u32 ipv4_tcp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS) | DISSECTOR_BIT(TCP); + const u32 ipv4_udp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS); + const u32 ipv6_tcp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS) | DISSECTOR_BIT(TCP); + const u32 ipv6_udp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS); + + return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp || + used_keys == ipv6_udp); +} + +static bool +mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *flow_rule) +{ + struct flow_match_ipv4_addrs ipv4_addrs; + struct flow_match_ipv6_addrs ipv6_addrs; + struct flow_match_control control; + struct flow_match_basic basic; + struct flow_match_ports ports; + struct flow_match_tcp tcp; + + if (!mlx5_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) { + ct_dbg("rule uses unexpected dissectors (0x%08x)", + flow_rule->match.dissector->used_keys); + return false; + } + + flow_rule_match_basic(flow_rule, &basic); + flow_rule_match_control(flow_rule, &control); + flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs); + flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs); + flow_rule_match_ports(flow_rule, &ports); + flow_rule_match_tcp(flow_rule, &tcp); + + if (basic.mask->n_proto != htons(0xFFFF) || + (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) || + basic.mask->ip_proto != 0xFF || + (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP)) { + ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)", + ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto), + basic.key->ip_proto, basic.mask->ip_proto); + return false; + } + + if (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF)) { + ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)", + ports.mask->src, ports.mask->dst); + return false; + } + + if (basic.key->ip_proto == IPPROTO_TCP && tcp.mask->flags != MLX5_CT_TCP_FLAGS_MASK) { + ct_dbg("rule uses unexpected tcp match (flags 0x%02x)", tcp.mask->flags); + return false; + } + + return true; +} + +static struct mlx5_ct_fs_rule * +mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, + struct mlx5_flow_attr *attr, struct flow_rule *flow_rule) +{ + struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs); + struct mlx5_ct_fs_smfs_matcher *smfs_matcher; + struct mlx5_ct_fs_smfs_rule *smfs_rule; + struct mlx5dr_action *actions[5]; + struct mlx5dr_rule *rule; + int num_actions = 0, err; + bool nat, tcp, ipv4; + + if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule)) + return ERR_PTR(-EOPNOTSUPP); + + smfs_rule = kzalloc(sizeof(*smfs_rule), GFP_KERNEL); + if (!smfs_rule) + return ERR_PTR(-ENOMEM); + + smfs_rule->count_action = mlx5_smfs_action_create_flow_counter(mlx5_fc_id(attr->counter)); + if (!smfs_rule->count_action) { + err = -EINVAL; + goto err_count; + } + + actions[num_actions++] = smfs_rule->count_action; + actions[num_actions++] = attr->modify_hdr->action.dr_action; + actions[num_actions++] = fs_smfs->fwd_action; + + nat = (attr->ft == fs_smfs->ct_nat); + ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4; + tcp = MLX5_GET(fte_match_param, spec->match_value, + outer_headers.ip_protocol) == IPPROTO_TCP; + + smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp); + if (IS_ERR(smfs_matcher)) { + err = PTR_ERR(smfs_matcher); + goto err_matcher; + } + + rule = mlx5_smfs_rule_create(smfs_matcher->dr_matcher, spec, num_actions, actions, + MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT); + if (!rule) { + err = -EINVAL; + goto err_create; + } + + smfs_rule->rule = rule; + smfs_rule->smfs_matcher = smfs_matcher; + + return &smfs_rule->fs_rule; + +err_create: + mlx5_ct_fs_smfs_matcher_put(fs, smfs_matcher); +err_matcher: + mlx5_smfs_action_destroy(smfs_rule->count_action); +err_count: + kfree(smfs_rule); + return ERR_PTR(err); +} + +static void +mlx5_ct_fs_smfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule) +{ + struct mlx5_ct_fs_smfs_rule *smfs_rule = container_of(fs_rule, + struct mlx5_ct_fs_smfs_rule, + fs_rule); + + mlx5_smfs_rule_destroy(smfs_rule->rule); + mlx5_ct_fs_smfs_matcher_put(fs, smfs_rule->smfs_matcher); + mlx5_smfs_action_destroy(smfs_rule->count_action); + kfree(smfs_rule); +} + +static struct mlx5_ct_fs_ops fs_smfs_ops = { + .ct_rule_add = mlx5_ct_fs_smfs_ct_rule_add, + .ct_rule_del = mlx5_ct_fs_smfs_ct_rule_del, + + .init = mlx5_ct_fs_smfs_init, + .destroy = mlx5_ct_fs_smfs_destroy, + + .priv_size = sizeof(struct mlx5_ct_fs_smfs), +}; + +struct mlx5_ct_fs_ops * +mlx5_ct_fs_smfs_ops_get(void) +{ + return &fs_smfs_ops; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c index 9e0e229cf164..dea137dd744b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +#include "en/tc_priv.h" #include "en_tc.h" #include "post_act.h" #include "mlx5_core.h" @@ -75,21 +76,47 @@ mlx5e_tc_post_act_destroy(struct mlx5e_post_act *post_act) kfree(post_act); } +int +mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act, + struct mlx5e_post_act_handle *handle) +{ + struct mlx5_flow_spec *spec; + int err; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + /* Post action rule matches on fte_id and executes original rule's tc rule action */ + mlx5e_tc_match_to_reg_match(spec, FTEID_TO_REG, handle->id, MLX5_POST_ACTION_MASK); + + handle->rule = mlx5e_tc_rule_offload(post_act->priv, spec, handle->attr); + if (IS_ERR(handle->rule)) { + err = PTR_ERR(handle->rule); + netdev_warn(post_act->priv->netdev, "Failed to add post action rule"); + goto err_rule; + } + + kvfree(spec); + return 0; + +err_rule: + kvfree(spec); + return err; +} + struct mlx5e_post_act_handle * mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *attr) { u32 attr_sz = ns_to_attr_sz(post_act->ns_type); - struct mlx5e_post_act_handle *handle = NULL; - struct mlx5_flow_attr *post_attr = NULL; - struct mlx5_flow_spec *spec = NULL; + struct mlx5e_post_act_handle *handle; + struct mlx5_flow_attr *post_attr; int err; handle = kzalloc(sizeof(*handle), GFP_KERNEL); - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); post_attr = mlx5_alloc_flow_attr(post_act->ns_type); - if (!handle || !spec || !post_attr) { + if (!handle || !post_attr) { kfree(post_attr); - kvfree(spec); kfree(handle); return ERR_PTR(-ENOMEM); } @@ -100,8 +127,7 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at post_attr->ft = post_act->ft; post_attr->inner_match_level = MLX5_MATCH_NONE; post_attr->outer_match_level = MLX5_MATCH_NONE; - post_attr->action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP); - post_attr->flags &= ~MLX5_ATTR_FLAG_SAMPLE; + post_attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_DECAP; handle->ns_type = post_act->ns_type; /* Splits were handled before post action */ @@ -113,36 +139,29 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at if (err) goto err_xarray; - /* Post action rule matches on fte_id and executes original rule's - * tc rule action - */ - mlx5e_tc_match_to_reg_match(spec, FTEID_TO_REG, - handle->id, MLX5_POST_ACTION_MASK); - - handle->rule = mlx5_tc_rule_insert(post_act->priv, spec, post_attr); - if (IS_ERR(handle->rule)) { - err = PTR_ERR(handle->rule); - netdev_warn(post_act->priv->netdev, "Failed to add post action rule"); - goto err_rule; - } handle->attr = post_attr; - kvfree(spec); return handle; -err_rule: - xa_erase(&post_act->ids, handle->id); err_xarray: kfree(post_attr); - kvfree(spec); kfree(handle); return ERR_PTR(err); } void +mlx5e_tc_post_act_unoffload(struct mlx5e_post_act *post_act, + struct mlx5e_post_act_handle *handle) +{ + mlx5e_tc_rule_unoffload(post_act->priv, handle->rule, handle->attr); + handle->rule = NULL; +} + +void mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle) { - mlx5_tc_rule_delete(post_act->priv, handle->rule, handle->attr); + if (!IS_ERR_OR_NULL(handle->rule)) + mlx5e_tc_post_act_unoffload(post_act, handle); xa_erase(&post_act->ids, handle->id); kfree(handle->attr); kfree(handle); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h index b530ec1981a5..f476774c0b75 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h @@ -24,6 +24,14 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at void mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle); +int +mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act, + struct mlx5e_post_act_handle *handle); + +void +mlx5e_tc_post_act_unoffload(struct mlx5e_post_act *post_act, + struct mlx5e_post_act_handle *handle); + struct mlx5_flow_table * mlx5e_tc_post_act_get_ft(struct mlx5e_post_act *post_act); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c index 32230e677029..fd4504518578 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c @@ -5,6 +5,7 @@ #include <net/psample.h> #include "en/mapping.h" #include "en/tc/post_act.h" +#include "en/tc/act/sample.h" #include "en/mod_hdr.h" #include "sample.h" #include "eswitch.h" @@ -46,14 +47,12 @@ struct mlx5e_sample_flow { struct mlx5_flow_handle *pre_rule; struct mlx5_flow_attr *post_attr; struct mlx5_flow_handle *post_rule; - struct mlx5e_post_act_handle *post_act_handle; }; struct mlx5e_sample_restore { struct hlist_node hlist; struct mlx5_modify_hdr *modify_hdr; struct mlx5_flow_handle *rule; - struct mlx5e_post_act_handle *post_act_handle; u32 obj_id; int count; }; @@ -231,69 +230,46 @@ sampler_put(struct mlx5e_tc_psample *tc_psample, struct mlx5e_sampler *sampler) */ static struct mlx5_modify_hdr * sample_modify_hdr_get(struct mlx5_core_dev *mdev, u32 obj_id, - struct mlx5e_post_act_handle *handle) + struct mlx5e_tc_mod_hdr_acts *mod_acts) { - struct mlx5e_tc_mod_hdr_acts mod_acts = {}; struct mlx5_modify_hdr *modify_hdr; int err; - err = mlx5e_tc_match_to_reg_set(mdev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB, + err = mlx5e_tc_match_to_reg_set(mdev, mod_acts, MLX5_FLOW_NAMESPACE_FDB, CHAIN_TO_REG, obj_id); if (err) goto err_set_regc0; - if (handle) { - err = mlx5e_tc_post_act_set_handle(mdev, handle, &mod_acts); - if (err) - goto err_post_act; - } - modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_FDB, - mod_acts.num_actions, - mod_acts.actions); + mod_acts->num_actions, + mod_acts->actions); if (IS_ERR(modify_hdr)) { err = PTR_ERR(modify_hdr); goto err_modify_hdr; } - mlx5e_mod_hdr_dealloc(&mod_acts); + mlx5e_mod_hdr_dealloc(mod_acts); return modify_hdr; err_modify_hdr: -err_post_act: - mlx5e_mod_hdr_dealloc(&mod_acts); + mlx5e_mod_hdr_dealloc(mod_acts); err_set_regc0: return ERR_PTR(err); } -static u32 -restore_hash(u32 obj_id, struct mlx5e_post_act_handle *post_act_handle) -{ - return jhash_2words(obj_id, hash32_ptr(post_act_handle), 0); -} - -static bool -restore_equal(struct mlx5e_sample_restore *restore, u32 obj_id, - struct mlx5e_post_act_handle *post_act_handle) -{ - return restore->obj_id == obj_id && restore->post_act_handle == post_act_handle; -} - static struct mlx5e_sample_restore * sample_restore_get(struct mlx5e_tc_psample *tc_psample, u32 obj_id, - struct mlx5e_post_act_handle *post_act_handle) + struct mlx5e_tc_mod_hdr_acts *mod_acts) { struct mlx5_eswitch *esw = tc_psample->esw; struct mlx5_core_dev *mdev = esw->dev; struct mlx5e_sample_restore *restore; struct mlx5_modify_hdr *modify_hdr; - u32 hash_key; int err; mutex_lock(&tc_psample->restore_lock); - hash_key = restore_hash(obj_id, post_act_handle); - hash_for_each_possible(tc_psample->restore_hashtbl, restore, hlist, hash_key) - if (restore_equal(restore, obj_id, post_act_handle)) + hash_for_each_possible(tc_psample->restore_hashtbl, restore, hlist, obj_id) + if (restore->obj_id == obj_id) goto add_ref; restore = kzalloc(sizeof(*restore), GFP_KERNEL); @@ -302,9 +278,8 @@ sample_restore_get(struct mlx5e_tc_psample *tc_psample, u32 obj_id, goto err_alloc; } restore->obj_id = obj_id; - restore->post_act_handle = post_act_handle; - modify_hdr = sample_modify_hdr_get(mdev, obj_id, post_act_handle); + modify_hdr = sample_modify_hdr_get(mdev, obj_id, mod_acts); if (IS_ERR(modify_hdr)) { err = PTR_ERR(modify_hdr); goto err_modify_hdr; @@ -317,7 +292,7 @@ sample_restore_get(struct mlx5e_tc_psample *tc_psample, u32 obj_id, goto err_restore; } - hash_add(tc_psample->restore_hashtbl, &restore->hlist, hash_key); + hash_add(tc_psample->restore_hashtbl, &restore->hlist, obj_id); add_ref: restore->count++; mutex_unlock(&tc_psample->restore_lock); @@ -494,10 +469,10 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample, struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr) { - struct mlx5e_post_act_handle *post_act_handle = NULL; struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; struct mlx5_esw_flow_attr *pre_esw_attr; struct mlx5_mapped_obj restore_obj = {}; + struct mlx5e_tc_mod_hdr_acts *mod_acts; struct mlx5e_sample_flow *sample_flow; struct mlx5e_sample_attr *sample_attr; struct mlx5_flow_attr *pre_attr; @@ -522,18 +497,11 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample, * original flow table. */ esw = tc_psample->esw; - if (MLX5_CAP_GEN(esw->dev, reg_c_preserve) || - attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { + if (mlx5e_tc_act_sample_is_multi_table(esw->dev, attr)) { struct mlx5_flow_table *ft; ft = mlx5e_tc_post_act_get_ft(tc_psample->post_act); default_tbl_id = ft->id; - post_act_handle = mlx5e_tc_post_act_add(tc_psample->post_act, attr); - if (IS_ERR(post_act_handle)) { - err = PTR_ERR(post_act_handle); - goto err_post_act; - } - sample_flow->post_act_handle = post_act_handle; } else { err = add_post_rule(esw, sample_flow, spec, attr, &default_tbl_id); if (err) @@ -560,7 +528,8 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample, sample_attr->restore_obj_id = obj_id; /* Create sample restore context. */ - sample_flow->restore = sample_restore_get(tc_psample, obj_id, post_act_handle); + mod_acts = &attr->parse_attr->mod_hdr_acts; + sample_flow->restore = sample_restore_get(tc_psample, obj_id, mod_acts); if (IS_ERR(sample_flow->restore)) { err = PTR_ERR(sample_flow->restore); goto err_sample_restore; @@ -586,6 +555,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample, pre_attr->outer_match_level = attr->outer_match_level; pre_attr->chain = attr->chain; pre_attr->prio = attr->prio; + pre_attr->ft = attr->ft; pre_attr->sample_attr = *sample_attr; pre_esw_attr = pre_attr->esw_attr; pre_esw_attr->in_mdev = esw_attr->in_mdev; @@ -611,9 +581,6 @@ err_sampler: if (sample_flow->post_rule) del_post_rule(esw, sample_flow, attr); err_post_rule: - if (post_act_handle) - mlx5e_tc_post_act_del(tc_psample->post_act, post_act_handle); -err_post_act: kfree(sample_flow); return ERR_PTR(err); } @@ -639,9 +606,7 @@ mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample, sample_restore_put(tc_psample, sample_flow->restore); mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr.restore_obj_id); sampler_put(tc_psample, sample_flow->sampler); - if (sample_flow->post_act_handle) - mlx5e_tc_post_act_del(tc_psample->post_act, sample_flow->post_act_handle); - else + if (sample_flow->post_rule) del_post_rule(esw, sample_flow, attr); kfree(sample_flow->pre_attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 0f4d3b9dd979..ca1510399d1e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -18,15 +18,16 @@ #include "lib/fs_chains.h" #include "en/tc_ct.h" +#include "en/tc/ct_fs.h" +#include "en/tc_priv.h" #include "en/mod_hdr.h" #include "en/mapping.h" #include "en/tc/post_act.h" #include "en.h" #include "en_tc.h" #include "en_rep.h" +#include "fs_core.h" -#define MLX5_CT_ZONE_BITS (mlx5e_tc_attr_to_reg_mappings[ZONE_TO_REG].mlen) -#define MLX5_CT_ZONE_MASK GENMASK(MLX5_CT_ZONE_BITS - 1, 0) #define MLX5_CT_STATE_ESTABLISHED_BIT BIT(1) #define MLX5_CT_STATE_TRK_BIT BIT(2) #define MLX5_CT_STATE_NAT_BIT BIT(3) @@ -62,19 +63,20 @@ struct mlx5_tc_ct_priv { struct mapping_ctx *labels_mapping; enum mlx5_flow_namespace_type ns_type; struct mlx5_fs_chains *chains; + struct mlx5_ct_fs *fs; + struct mlx5_ct_fs_ops *fs_ops; spinlock_t ht_lock; /* protects ft entries */ }; struct mlx5_ct_flow { struct mlx5_flow_attr *pre_ct_attr; struct mlx5_flow_handle *pre_ct_rule; - struct mlx5e_post_act_handle *post_act_handle; struct mlx5_ct_ft *ft; u32 chain_mapping; }; struct mlx5_ct_zone_rule { - struct mlx5_flow_handle *rule; + struct mlx5_ct_fs_rule *rule; struct mlx5e_mod_hdr_handle *mh; struct mlx5_flow_attr *attr; bool nat; @@ -505,7 +507,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv, ct_dbg("Deleting ct entry rule in zone %d", entry->tuple.zone); - mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr); + ct_priv->fs_ops->ct_rule_del(ct_priv->fs, zone_rule->rule); mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, zone_rule->mh); mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id); kfree(attr); @@ -816,7 +818,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule); mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK); - zone_rule->rule = mlx5_tc_rule_insert(priv, spec, attr); + zone_rule->rule = ct_priv->fs_ops->ct_rule_add(ct_priv->fs, spec, attr, flow_rule); if (IS_ERR(zone_rule->rule)) { err = PTR_ERR(zone_rule->rule); ct_dbg("Failed to add ct entry rule, nat: %d", nat); @@ -1756,7 +1758,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) /* We translate the tc filter with CT action to the following HW model: * * +---------------------+ - * + ft prio (tc chain) + + * + ft prio (tc chain) + * + original match + * +---------------------+ * | set chain miss mapping @@ -1766,7 +1768,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) * v * +---------------------+ * + pre_ct/pre_ct_nat + if matches +-------------------------+ - * + zone+nat match +---------------->+ post_act (see below) + + * + zone+nat match +---------------->+ post_act (see below) + * +---------------------+ set zone +-------------------------+ * | set zone * v @@ -1781,7 +1783,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) * | do nat (if needed) * v * +--------------+ - * + post_act + original filter actions + * + post_act + original filter actions * + fte_id match +------------------------> * +--------------+ */ @@ -1792,9 +1794,8 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, { bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT; struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev); - struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {}; + struct mlx5e_tc_mod_hdr_acts *pre_mod_acts; u32 attr_sz = ns_to_attr_sz(ct_priv->ns_type); - struct mlx5e_post_act_handle *handle; struct mlx5_flow_attr *pre_ct_attr; struct mlx5_modify_hdr *mod_hdr; struct mlx5_ct_flow *ct_flow; @@ -1817,14 +1818,6 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, } ct_flow->ft = ft; - handle = mlx5e_tc_post_act_add(ct_priv->post_act, attr); - if (IS_ERR(handle)) { - err = PTR_ERR(handle); - ct_dbg("Failed to allocate post action handle"); - goto err_post_act_handle; - } - ct_flow->post_act_handle = handle; - /* Base flow attributes of both rules on original rule attribute */ ct_flow->pre_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type); if (!ct_flow->pre_ct_attr) { @@ -1834,6 +1827,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, pre_ct_attr = ct_flow->pre_ct_attr; memcpy(pre_ct_attr, attr, attr_sz); + pre_mod_acts = &pre_ct_attr->parse_attr->mod_hdr_acts; /* Modify the original rule's action to fwd and modify, leave decap */ pre_ct_attr->action = attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP; @@ -1852,25 +1846,19 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, } ct_flow->chain_mapping = chain_mapping; - err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts, ct_priv->ns_type, + err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts, ct_priv->ns_type, CHAIN_TO_REG, chain_mapping); if (err) { ct_dbg("Failed to set chain register mapping"); goto err_mapping; } - err = mlx5e_tc_post_act_set_handle(priv->mdev, handle, &pre_mod_acts); - if (err) { - ct_dbg("Failed to set post action handle"); - goto err_mapping; - } - /* If original flow is decap, we do it before going into ct table * so add a rewrite for the tunnel match_id. */ if ((pre_ct_attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) && attr->chain == 0) { - err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts, + err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts, ct_priv->ns_type, TUNNEL_TO_REG, attr->tunnel_id); @@ -1881,8 +1869,8 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, } mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type, - pre_mod_acts.num_actions, - pre_mod_acts.actions); + pre_mod_acts->num_actions, + pre_mod_acts->actions); if (IS_ERR(mod_hdr)) { err = PTR_ERR(mod_hdr); ct_dbg("Failed to create pre ct mod hdr"); @@ -1902,20 +1890,18 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, } attr->ct_attr.ct_flow = ct_flow; - mlx5e_mod_hdr_dealloc(&pre_mod_acts); + mlx5e_mod_hdr_dealloc(pre_mod_acts); return ct_flow->pre_ct_rule; err_insert_orig: mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr); err_mapping: - mlx5e_mod_hdr_dealloc(&pre_mod_acts); + mlx5e_mod_hdr_dealloc(pre_mod_acts); mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping); err_get_chain: kfree(ct_flow->pre_ct_attr); err_alloc_pre: - mlx5e_tc_post_act_del(ct_priv->post_act, handle); -err_post_act_handle: mlx5_tc_ct_del_ft_cb(ct_priv, ft); err_ft: kfree(ct_flow); @@ -1952,11 +1938,8 @@ __mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv, mlx5_tc_rule_delete(priv, ct_flow->pre_ct_rule, pre_ct_attr); mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr); - if (ct_flow->post_act_handle) { - mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping); - mlx5e_tc_post_act_del(ct_priv->post_act, ct_flow->post_act_handle); - mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft); - } + mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping); + mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft); kfree(ct_flow->pre_ct_attr); kfree(ct_flow); @@ -1980,6 +1963,38 @@ mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv, } static int +mlx5_tc_ct_fs_init(struct mlx5_tc_ct_priv *ct_priv) +{ + struct mlx5_flow_table *post_ct = mlx5e_tc_post_act_get_ft(ct_priv->post_act); + struct mlx5_ct_fs_ops *fs_ops = mlx5_ct_fs_dmfs_ops_get(); + int err; + + if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB && + ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) { + ct_dbg("Using SMFS ct flow steering provider"); + fs_ops = mlx5_ct_fs_smfs_ops_get(); + } + + ct_priv->fs = kzalloc(sizeof(*ct_priv->fs) + fs_ops->priv_size, GFP_KERNEL); + if (!ct_priv->fs) + return -ENOMEM; + + ct_priv->fs->netdev = ct_priv->netdev; + ct_priv->fs->dev = ct_priv->dev; + ct_priv->fs_ops = fs_ops; + + err = ct_priv->fs_ops->init(ct_priv->fs, ct_priv->ct, ct_priv->ct_nat, post_ct); + if (err) + goto err_init; + + return 0; + +err_init: + kfree(ct_priv->fs); + return err; +} + +static int mlx5_tc_ct_init_check_esw_support(struct mlx5_eswitch *esw, const char **err_msg) { @@ -2117,8 +2132,14 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, if (rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params)) goto err_ct_tuples_nat_ht; + err = mlx5_tc_ct_fs_init(ct_priv); + if (err) + goto err_init_fs; + return ct_priv; +err_init_fs: + rhashtable_destroy(&ct_priv->ct_tuples_nat_ht); err_ct_tuples_nat_ht: rhashtable_destroy(&ct_priv->ct_tuples_ht); err_ct_tuples_ht: @@ -2149,6 +2170,9 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv) chains = ct_priv->chains; + ct_priv->fs_ops->destroy(ct_priv->fs); + kfree(ct_priv->fs); + mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat); mlx5_chains_destroy_global_table(chains, ct_priv->ct); mapping_destroy(ct_priv->zone_mapping); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h index 2b21c7b97a52..36d3652bf829 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h @@ -86,6 +86,8 @@ struct mlx5_ct_attr { #define REG_MAPPING_MLEN(reg) (mlx5e_tc_attr_to_reg_mappings[reg].mlen) #define REG_MAPPING_MOFFSET(reg) (mlx5e_tc_attr_to_reg_mappings[reg].moffset) +#define MLX5_CT_ZONE_BITS (mlx5e_tc_attr_to_reg_mappings[ZONE_TO_REG].mlen) +#define MLX5_CT_ZONE_MASK GENMASK(MLX5_CT_ZONE_BITS - 1, 0) #if IS_ENABLED(CONFIG_MLX5_TC_CT) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h index 9ffba584b982..03c953dacb09 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h @@ -35,6 +35,7 @@ enum { struct mlx5e_tc_flow_parse_attr { const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS]; + struct mlx5e_mpls_info mpls_info[MLX5_MAX_FLOW_FWD_VPORTS]; struct net_device *filter_dev; struct mlx5_flow_spec spec; struct pedit_headers_action hdrs[__PEDIT_CMD_MAX]; @@ -109,6 +110,7 @@ struct mlx5e_tc_flow { struct completion init_done; struct completion del_hw_done; struct mlx5_flow_attr *attr; + struct list_head attrs; }; struct mlx5_flow_handle * @@ -129,6 +131,12 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr); +struct mlx5_flow_attr * +mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow); + +void mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow); +int mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow); + bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow); bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow); bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index 1f8d339ff0c3..5105c8018d37 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -173,19 +173,29 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, list_for_each_entry(flow, flow_list, tmp_list) { if (!mlx5e_is_offloaded_flow(flow) || !flow_flag_test(flow, SLOW)) continue; - attr = flow->attr; - esw_attr = attr->esw_attr; - spec = &attr->parse_attr->spec; + spec = &flow->attr->parse_attr->spec; + + attr = mlx5e_tc_get_encap_attr(flow); + esw_attr = attr->esw_attr; esw_attr->dests[flow->tmp_entry_index].pkt_reformat = e->pkt_reformat; esw_attr->dests[flow->tmp_entry_index].flags |= MLX5_ESW_DEST_ENCAP_VALID; /* Do not offload flows with unresolved neighbors */ if (!mlx5e_tc_flow_all_encaps_valid(esw_attr)) continue; + + err = mlx5e_tc_offload_flow_post_acts(flow); + if (err) { + mlx5_core_warn(priv->mdev, "Failed to update flow post acts, %d\n", + err); + continue; + } + /* update from slow path rule to encap rule */ - rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr); + rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, flow->attr); if (IS_ERR(rule)) { + mlx5e_tc_unoffload_flow_post_acts(flow); err = PTR_ERR(rule); mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n", err); @@ -214,12 +224,13 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, list_for_each_entry(flow, flow_list, tmp_list) { if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW)) continue; - attr = flow->attr; - esw_attr = attr->esw_attr; - spec = &attr->parse_attr->spec; + spec = &flow->attr->parse_attr->spec; /* update from encap rule to slow path rule */ rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec); + + attr = mlx5e_tc_get_encap_attr(flow); + esw_attr = attr->esw_attr; /* mark the flow's encap dest as non-valid */ esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID; @@ -230,7 +241,8 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, continue; } - mlx5e_tc_unoffload_fdb_rules(esw, flow, attr); + mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr); + mlx5e_tc_unoffload_flow_post_acts(flow); flow->rule[0] = rule; /* was unset when fast path rule removed */ flow_flag_set(flow, OFFLOADED); @@ -495,6 +507,9 @@ void mlx5e_detach_encap(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e = flow->encaps[out_index].e; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + if (!mlx5e_is_eswitch_flow(flow)) + return; + if (attr->esw_attr->dests[out_index].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) mlx5e_detach_encap_route(priv, flow, out_index); @@ -753,6 +768,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv, struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_tc_flow_parse_attr *parse_attr; const struct ip_tunnel_info *tun_info; + const struct mlx5e_mpls_info *mpls_info; unsigned long tbl_time_before = 0; struct mlx5e_encap_entry *e; struct mlx5e_encap_key key; @@ -763,6 +779,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv, parse_attr = attr->parse_attr; tun_info = parse_attr->tun_info[out_index]; + mpls_info = &parse_attr->mpls_info[out_index]; family = ip_tunnel_info_af(tun_info); key.ip_tun_key = &tun_info->key; key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev); @@ -813,6 +830,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv, goto out_err_init; } e->tun_info = tun_info; + memcpy(&e->mpls_info, mpls_info, sizeof(*mpls_info)); err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack); if (err) goto out_err_init; @@ -1360,17 +1378,19 @@ static void mlx5e_reoffload_encap(struct mlx5e_priv *priv, list_for_each_entry(flow, encap_flows, tmp_list) { struct mlx5e_tc_flow_parse_attr *parse_attr; - struct mlx5_flow_attr *attr = flow->attr; struct mlx5_esw_flow_attr *esw_attr; struct mlx5_flow_handle *rule; + struct mlx5_flow_attr *attr; struct mlx5_flow_spec *spec; if (flow_flag_test(flow, FAILED)) continue; + spec = &flow->attr->parse_attr->spec; + + attr = mlx5e_tc_get_encap_attr(flow); esw_attr = attr->esw_attr; parse_attr = attr->parse_attr; - spec = &parse_attr->spec; err = mlx5e_update_vf_tunnel(esw, esw_attr, &parse_attr->mod_hdr_acts, e->out_dev, e->route_dev_ifindex, @@ -1392,9 +1412,18 @@ static void mlx5e_reoffload_encap(struct mlx5e_priv *priv, esw_attr->dests[flow->tmp_entry_index].flags |= MLX5_ESW_DEST_ENCAP_VALID; if (!mlx5e_tc_flow_all_encaps_valid(esw_attr)) goto offload_to_slow_path; + + err = mlx5e_tc_offload_flow_post_acts(flow); + if (err) { + mlx5_core_warn(priv->mdev, "Failed to update flow post acts, %d\n", + err); + goto offload_to_slow_path; + } + /* update from slow path rule to encap rule */ - rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr); + rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, flow->attr); if (IS_ERR(rule)) { + mlx5e_tc_unoffload_flow_post_acts(flow); err = PTR_ERR(rule); mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n", err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c index 60952b33b568..c5b1617d556f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c @@ -30,16 +30,15 @@ static int generate_ip_tun_hdr(char buf[], struct mlx5e_encap_entry *r) { const struct ip_tunnel_key *tun_key = &r->tun_info->key; + const struct mlx5e_mpls_info *mpls_info = &r->mpls_info; struct udphdr *udp = (struct udphdr *)(buf); struct mpls_shim_hdr *mpls; - u32 tun_id; - tun_id = be32_to_cpu(tunnel_id_to_key32(tun_key->tun_id)); mpls = (struct mpls_shim_hdr *)(udp + 1); *ip_proto = IPPROTO_UDP; udp->dest = tun_key->tp_dst; - *mpls = mpls_entry_encode(tun_id, tun_key->ttl, tun_key->tos, true); + *mpls = mpls_entry_encode(mpls_info->label, mpls_info->ttl, mpls_info->tc, mpls_info->bos); return 0; } @@ -60,37 +59,31 @@ static int parse_tunnel(struct mlx5e_priv *priv, void *headers_v) { struct flow_rule *rule = flow_cls_offload_flow_rule(f); - struct flow_match_enc_keyid enc_keyid; struct flow_match_mpls match; void *misc2_c; void *misc2_v; - misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, - misc_parameters_2); - misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, - misc_parameters_2); - - if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) - return 0; - - if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) - return 0; - - flow_rule_match_enc_keyid(rule, &enc_keyid); - - if (!enc_keyid.mask->keyid) - return 0; - if (!MLX5_CAP_ETH(priv->mdev, tunnel_stateless_mpls_over_udp) && !(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & MLX5_FLEX_PROTO_CW_MPLS_UDP)) return -EOPNOTSUPP; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) + return -EOPNOTSUPP; + + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) + return 0; + flow_rule_match_mpls(rule, &match); /* Only support matching the first LSE */ if (match.mask->used_lses != 1) return -EOPNOTSUPP; + misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters_2); + misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters_2); + MLX5_SET(fte_match_set_misc2, misc2_c, outer_first_mpls_over_udp.mpls_label, match.mask->ls[0].mpls_label); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c index da169b816665..d4239e3b3c88 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c @@ -88,9 +88,6 @@ void mlx5e_tir_builder_build_packet_merge(struct mlx5e_tir_builder *builder, (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8); MLX5_SET(tirc, tirc, lro_timeout_period_usecs, pkt_merge_param->timeout); break; - case MLX5E_PACKET_MERGE_SHAMPO: - MLX5_SET(tirc, tirc, packet_merge_mask, MLX5_TIRC_PACKET_MERGE_MASK_SHAMPO); - break; default: break; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index b789af07829c..210d23bf3701 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -9,19 +9,6 @@ #define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) -/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS - * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment. - * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a - * full-session WQE be cache-aligned. - */ -#if L1_CACHE_BYTES < 128 -#define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1) -#else -#define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2) -#endif - -#define MLX5E_TX_MPW_MAX_NUM_DS (MLX5E_TX_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS) - #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) #define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND) @@ -68,8 +55,6 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq); void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq); /* TX */ -u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev); netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); @@ -308,9 +293,9 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma) void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more); void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq); -static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session) +static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs) { - return session->ds_count == MLX5E_TX_MPW_MAX_NUM_DS; + return session->ds_count == max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS; } static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq) @@ -431,10 +416,10 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, } } -static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size) -{ - BUILD_BUG_ON(PAGE_SIZE / MLX5_SEND_WQE_BB < MLX5_SEND_WQE_MAX_WQEBBS); +#define MLX5E_STOP_ROOM(wqebbs) ((wqebbs) * 2 - 1) +static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size) +{ /* A WQE must not cross the page boundary, hence two conditions: * 1. Its size must not exceed the page size. * 2. If the WQE size is X, and the space remaining in a page is less @@ -443,18 +428,28 @@ static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size) * stop room of X-1 + X. * WQE size is also limited by the hardware limit. */ + WARN_ONCE(wqe_size > mlx5e_get_max_sq_wqebbs(mdev), + "wqe_size %u is greater than max SQ WQEBBs %u", + wqe_size, mlx5e_get_max_sq_wqebbs(mdev)); - if (__builtin_constant_p(wqe_size)) - BUILD_BUG_ON(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS); - else - WARN_ON_ONCE(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS); - return wqe_size * 2 - 1; + return MLX5E_STOP_ROOM(wqe_size); +} + +static inline u16 mlx5e_stop_room_for_max_wqe(struct mlx5_core_dev *mdev) +{ + return MLX5E_STOP_ROOM(mlx5e_get_max_sq_wqebbs(mdev)); } static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size) { - u16 room = sq->reserved_room + mlx5e_stop_room_for_wqe(wqe_size); + u16 room = sq->reserved_room; + + WARN_ONCE(wqe_size > sq->max_sq_wqebbs, + "wqe_size %u is greater than max SQ WQEBBs %u", + wqe_size, sq->max_sq_wqebbs); + + room += MLX5E_STOP_ROOM(wqe_size); return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 56e10c84a706..a7f020399370 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -199,7 +199,7 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq) struct mlx5e_tx_wqe *wqe; u16 pi; - pi = mlx5e_xdpsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS); + pi = mlx5e_xdpsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs); wqe = MLX5E_TX_FETCH_WQE(sq, pi); net_prefetchw(wqe->data); @@ -245,10 +245,8 @@ enum { INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq) { if (unlikely(!sq->mpwqe.wqe)) { - const u16 stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); - if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, - stop_room))) { + sq->stop_room))) { /* SQ is full, ring doorbell */ mlx5e_xmit_xdp_doorbell(sq); sq->stats->full++; @@ -288,7 +286,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats); - if (unlikely(mlx5e_xdp_mpqwe_is_full(session))) + if (unlikely(mlx5e_xdp_mpqwe_is_full(session, sq->max_sq_mpw_wqebbs))) mlx5e_xdp_mpwqe_complete(sq); mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index 8d991c3b7a50..c62f11d7ef6a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -123,12 +123,13 @@ static inline bool mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq *sq, bool cur) return cur; } -static inline bool mlx5e_xdp_mpqwe_is_full(struct mlx5e_tx_mpwqe *session) +static inline bool mlx5e_xdp_mpqwe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs) { if (session->inline_on) return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT > - MLX5E_TX_MPW_MAX_NUM_DS; - return mlx5e_tx_mpwqe_is_full(session); + max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS; + + return mlx5e_tx_mpwqe_is_full(session, max_sq_mpw_wqebbs); } struct mlx5e_xdp_wqe_info { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h index d964665eaa63..62cde3e87c2e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -139,15 +139,6 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev, return true; } -static inline bool mlx5e_accel_tx_is_ipsec_flow(struct mlx5e_accel_tx_state *state) -{ -#ifdef CONFIG_MLX5_EN_IPSEC - return mlx5e_ipsec_is_tx_flow(&state->ipsec); -#else - return false; -#endif -} - static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq, struct mlx5e_accel_tx_state *state) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 7cab08a2f715..299e3f0fcb5c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -35,7 +35,6 @@ #include <crypto/aead.h> #include <linux/inetdevice.h> #include <linux/netdevice.h> -#include <linux/module.h> #include "en.h" #include "en_accel/ipsec.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 9ad3459fb63a..aaf11c66bf4c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -32,9 +32,9 @@ u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *pa num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE); - stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS); - stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS); - stop_room += num_dumps * mlx5e_stop_room_for_wqe(MLX5E_KTLS_DUMP_WQEBBS); + stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS); + stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS); + stop_room += num_dumps * mlx5e_stop_room_for_wqe(mdev, MLX5E_KTLS_DUMP_WQEBBS); return stop_room; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index 7a700f913582..a05580cea481 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -386,5 +386,5 @@ u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par /* FPGA */ /* Resync SKB. */ - return mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); + return mlx5e_stop_room_for_max_wqe(mdev); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index a4c8d8d00d5a..d659fe07d464 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -1142,7 +1142,7 @@ static int mlx5e_update_trust_state_hw(struct mlx5e_priv *priv, void *context) err = mlx5_set_trust_state(priv->mdev, *trust_state); if (err) return err; - priv->dcbx_dp.trust_state = *trust_state; + WRITE_ONCE(priv->dcbx_dp.trust_state, *trust_state); return 0; } @@ -1187,16 +1187,18 @@ static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio) static int mlx5e_trust_initialize(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; + u8 trust_state; int err; - priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP; - - if (!MLX5_DSCP_SUPPORTED(mdev)) + if (!MLX5_DSCP_SUPPORTED(mdev)) { + WRITE_ONCE(priv->dcbx_dp.trust_state, MLX5_QPTS_TRUST_PCP); return 0; + } - err = mlx5_query_trust_state(priv->mdev, &priv->dcbx_dp.trust_state); + err = mlx5_query_trust_state(priv->mdev, &trust_state); if (err) return err; + WRITE_ONCE(priv->dcbx_dp.trust_state, trust_state); mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params, priv->dcbx_dp.trust_state); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 57d755db1cf5..6e80585d731f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1792,7 +1792,7 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev, if (size_read < 0) { netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n", __func__, size_read); - return 0; + return size_read; } i += size_read; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index bf80fb612449..91b90bbb2b28 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -72,12 +72,13 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) { - bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && - MLX5_CAP_GEN(mdev, umr_ptr_rlky) && - MLX5_CAP_ETH(mdev, reg_umr_sq); - u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq); - bool inline_umr = MLX5E_UMR_WQE_INLINE_SZ <= max_wqe_sz_cap; + bool striding_rq_umr, inline_umr; + u16 max_wqe_sz_cap; + striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && MLX5_CAP_GEN(mdev, umr_ptr_rlky) && + MLX5_CAP_ETH(mdev, reg_umr_sq); + max_wqe_sz_cap = mlx5e_get_max_sq_wqebbs(mdev) * MLX5_SEND_WQE_BB; + inline_umr = max_wqe_sz_cap >= MLX5E_UMR_WQE_INLINE_SZ; if (!striding_rq_umr) return false; if (!inline_umr) { @@ -594,6 +595,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); rq->mpwqe.num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk)); + rq->mpwqe.min_wqe_bulk = mlx5e_mpwqe_get_min_wqe_bulk(wq_sz); rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz); @@ -1164,6 +1166,9 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, is_redirect ? &c->priv->channel_stats[c->ix]->xdpsq : &c->priv->channel_stats[c->ix]->rq_xdpsq; + sq->max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev); + sq->stop_room = MLX5E_STOP_ROOM(sq->max_sq_wqebbs); + sq->max_sq_mpw_wqebbs = mlx5e_get_sw_max_sq_mpw_wqebbs(sq->max_sq_wqebbs); param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); @@ -1238,6 +1243,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c, sq->channel = c; sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map; sq->reserved_room = param->stop_room; + sq->max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev); param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); @@ -1313,7 +1319,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, int err; sq->pdev = c->pdev; - sq->tstamp = c->tstamp; sq->clock = &mdev->clock; sq->mkey_be = c->mkey_be; sq->netdev = c->netdev; @@ -1324,6 +1329,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + sq->max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev); + sq->max_sq_mpw_wqebbs = mlx5e_get_sw_max_sq_mpw_wqebbs(sq->max_sq_wqebbs); INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert)) set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state); @@ -2677,39 +2684,41 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) struct mlx5e_txqsq *sq = &c->sq[tc]; priv->txq2sq[sq->txq_ix] = sq; - priv->channel_tc2realtxq[i][tc] = i + tc * ch; } } if (!priv->channels.ptp) - return; + goto out; if (!test_bit(MLX5E_PTP_STATE_TX, priv->channels.ptp->state)) - return; + goto out; for (tc = 0; tc < num_tc; tc++) { struct mlx5e_ptp *c = priv->channels.ptp; struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq; priv->txq2sq[sq->txq_ix] = sq; - priv->port_ptp_tc2realtxq[tc] = priv->num_tc_x_num_ch + tc; } -} -static void mlx5e_update_num_tc_x_num_ch(struct mlx5e_priv *priv) -{ - /* Sync with mlx5e_select_queue. */ - WRITE_ONCE(priv->num_tc_x_num_ch, - mlx5e_get_dcb_num_tc(&priv->channels.params) * priv->channels.num); +out: + /* Make the change to txq2sq visible before the queue is started. + * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE, + * which pairs with this barrier. + */ + smp_wmb(); } void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) { - mlx5e_update_num_tc_x_num_ch(priv); mlx5e_build_txq_maps(priv); mlx5e_activate_channels(&priv->channels); mlx5e_qos_activate_queues(priv); mlx5e_xdp_tx_enable(priv); + + /* dev_watchdog() wants all TX queues to be started when the carrier is + * OK, including the ones in range real_num_tx_queues..num_tx_queues-1. + * Make it happy to avoid TX timeout false alarms. + */ netif_tx_start_all_queues(priv->netdev); if (mlx5e_is_vport_rep(priv)) @@ -2729,11 +2738,13 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) if (mlx5e_is_vport_rep(priv)) mlx5e_remove_sqs_fwd_rules(priv); - /* FIXME: This is a W/A only for tx timeout watch dog false alarm when - * polling for inactive tx queues. + /* The results of ndo_select_queue are unreliable, while netdev config + * is being changed (real_num_tx_queues, num_tc). Stop all queues to + * prevent ndo_start_xmit from being called, so that it can assume that + * the selected queue is always valid. */ - netif_tx_stop_all_queues(priv->netdev); netif_tx_disable(priv->netdev); + mlx5e_xdp_tx_disable(priv); mlx5e_deactivate_channels(&priv->channels); } @@ -2793,6 +2804,7 @@ static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv, mlx5e_close_channels(&old_chs); priv->profile->update_rx(priv); + mlx5e_selq_apply(&priv->selq); out: mlx5e_activate_priv_channels(priv); @@ -2816,13 +2828,24 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv, return mlx5e_switch_priv_params(priv, params, preactivate, context); new_chs.params = *params; + + mlx5e_selq_prepare(&priv->selq, &new_chs.params, !!priv->htb.maj_id); + err = mlx5e_open_channels(priv, &new_chs); if (err) - return err; + goto err_cancel_selq; + err = mlx5e_switch_priv_channels(priv, &new_chs, preactivate, context); if (err) - mlx5e_close_channels(&new_chs); + goto err_close; + return 0; + +err_close: + mlx5e_close_channels(&new_chs); + +err_cancel_selq: + mlx5e_selq_cancel(&priv->selq); return err; } @@ -2862,6 +2885,8 @@ int mlx5e_open_locked(struct net_device *netdev) struct mlx5e_priv *priv = netdev_priv(netdev); int err; + mlx5e_selq_prepare(&priv->selq, &priv->channels.params, !!priv->htb.maj_id); + set_bit(MLX5E_STATE_OPENED, &priv->state); err = mlx5e_open_channels(priv, &priv->channels); @@ -2869,6 +2894,7 @@ int mlx5e_open_locked(struct net_device *netdev) goto err_clear_state_opened_flag; priv->profile->update_rx(priv); + mlx5e_selq_apply(&priv->selq); mlx5e_activate_priv_channels(priv); mlx5e_apply_traps(priv, true); if (priv->profile->update_carrier) @@ -2879,6 +2905,7 @@ int mlx5e_open_locked(struct net_device *netdev) err_clear_state_opened_flag: clear_bit(MLX5E_STATE_OPENED, &priv->state); + mlx5e_selq_cancel(&priv->selq); return err; } @@ -3616,8 +3643,7 @@ static int set_feature_hw_gro(struct net_device *netdev, bool enable) goto out; } - err = mlx5e_safe_switch_params(priv, &new_params, - mlx5e_modify_tirs_packet_merge_ctx, NULL, reset); + err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset); out: mutex_unlock(&priv->state_lock); return err; @@ -4637,11 +4663,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 priv->max_nch); mlx5e_params_mqprio_reset(params); - /* Set an initial non-zero value, so that mlx5e_select_queue won't - * divide by zero if called before first activating channels. - */ - priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc; - /* SQ */ params->log_sq_size = is_kdump_kernel() ? MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE : @@ -5194,7 +5215,8 @@ int mlx5e_priv_init(struct mlx5e_priv *priv, struct net_device *netdev, struct mlx5_core_dev *mdev) { - int nch, num_txqs, node, i; + int nch, num_txqs, node; + int err; num_txqs = netdev->num_tx_queues; nch = mlx5e_calc_max_nch(mdev, netdev, profile); @@ -5211,6 +5233,11 @@ int mlx5e_priv_init(struct mlx5e_priv *priv, return -ENOMEM; mutex_init(&priv->state_lock); + + err = mlx5e_selq_init(&priv->selq, &priv->state_lock); + if (err) + goto err_free_cpumask; + hash_init(priv->htb.qos_tc2node); INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); @@ -5219,7 +5246,7 @@ int mlx5e_priv_init(struct mlx5e_priv *priv, priv->wq = create_singlethread_workqueue("mlx5e"); if (!priv->wq) - goto err_free_cpumask; + goto err_free_selq; priv->txq2sq = kcalloc_node(num_txqs, sizeof(*priv->txq2sq), GFP_KERNEL, node); if (!priv->txq2sq) @@ -5229,36 +5256,21 @@ int mlx5e_priv_init(struct mlx5e_priv *priv, if (!priv->tx_rates) goto err_free_txq2sq; - priv->channel_tc2realtxq = - kcalloc_node(nch, sizeof(*priv->channel_tc2realtxq), GFP_KERNEL, node); - if (!priv->channel_tc2realtxq) - goto err_free_tx_rates; - - for (i = 0; i < nch; i++) { - priv->channel_tc2realtxq[i] = - kcalloc_node(profile->max_tc, sizeof(**priv->channel_tc2realtxq), - GFP_KERNEL, node); - if (!priv->channel_tc2realtxq[i]) - goto err_free_channel_tc2realtxq; - } - priv->channel_stats = kcalloc_node(nch, sizeof(*priv->channel_stats), GFP_KERNEL, node); if (!priv->channel_stats) - goto err_free_channel_tc2realtxq; + goto err_free_tx_rates; return 0; -err_free_channel_tc2realtxq: - while (--i >= 0) - kfree(priv->channel_tc2realtxq[i]); - kfree(priv->channel_tc2realtxq); err_free_tx_rates: kfree(priv->tx_rates); err_free_txq2sq: kfree(priv->txq2sq); err_destroy_workqueue: destroy_workqueue(priv->wq); +err_free_selq: + mlx5e_selq_cleanup(&priv->selq); err_free_cpumask: free_cpumask_var(priv->scratchpad.cpumask); return -ENOMEM; @@ -5275,12 +5287,12 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv) for (i = 0; i < priv->stats_nch; i++) kvfree(priv->channel_stats[i]); kfree(priv->channel_stats); - for (i = 0; i < priv->max_nch; i++) - kfree(priv->channel_tc2realtxq[i]); - kfree(priv->channel_tc2realtxq); kfree(priv->tx_rates); kfree(priv->txq2sq); destroy_workqueue(priv->wq); + mutex_lock(&priv->state_lock); + mlx5e_selq_cleanup(&priv->selq); + mutex_unlock(&priv->state_lock); free_cpumask_var(priv->scratchpad.cpumask); for (i = 0; i < priv->htb.max_qos_sqs; i++) @@ -5346,6 +5358,7 @@ mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *prof } netif_carrier_off(netdev); + netif_tx_disable(netdev); dev_net_set(netdev, mlx5_core_net(mdev)); return netdev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 06d1f46f1688..6b7e7ea6ded2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -55,6 +55,7 @@ #include "diag/en_rep_tracepoint.h" #include "en_accel/ipsec.h" #include "en/tc/int_port.h" +#include "en/ptp.h" #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \ max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) @@ -401,13 +402,18 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; + int n, tc, nch, num_sqs = 0; struct mlx5e_channel *c; - int n, tc, num_sqs = 0; int err = -ENOMEM; + bool ptp_sq; u32 *sqs; - sqs = kcalloc(priv->channels.num * mlx5e_get_dcb_num_tc(&priv->channels.params), - sizeof(*sqs), GFP_KERNEL); + ptp_sq = !!(priv->channels.ptp && + MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS)); + nch = priv->channels.num + ptp_sq; + + sqs = kcalloc(nch * mlx5e_get_dcb_num_tc(&priv->channels.params), sizeof(*sqs), + GFP_KERNEL); if (!sqs) goto out; @@ -416,6 +422,12 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) for (tc = 0; tc < c->num_tc; tc++) sqs[num_sqs++] = c->sq[tc].sqn; } + if (ptp_sq) { + struct mlx5e_ptp *ptp_ch = priv->channels.ptp; + + for (tc = 0; tc < ptp_ch->num_tc; tc++) + sqs[num_sqs++] = ptp_ch->ptpsq[tc].txqsq.sqn; + } err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs); kfree(sqs); @@ -632,11 +644,6 @@ static void mlx5e_build_rep_params(struct net_device *netdev) params->mqprio.num_tc = 1; params->tunneled_offload_en = false; - /* Set an initial non-zero value, so that mlx5e_select_queue won't - * divide by zero if called before first activating channels. - */ - priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc; - mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); } @@ -935,15 +942,21 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) return err; } + err = mlx5e_tc_ht_init(&rpriv->tc_ht); + if (err) + goto err_ht_init; + if (rpriv->rep->vport == MLX5_VPORT_UPLINK) { err = mlx5e_init_uplink_rep_tx(rpriv); if (err) - goto destroy_tises; + goto err_init_tx; } return 0; -destroy_tises: +err_init_tx: + mlx5e_tc_ht_cleanup(&rpriv->tc_ht); +err_ht_init: mlx5e_destroy_tises(priv); return err; } @@ -963,6 +976,8 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) if (rpriv->rep->vport == MLX5_VPORT_UPLINK) mlx5e_cleanup_uplink_rep_tx(rpriv); + + mlx5e_tc_ht_cleanup(&rpriv->tc_ht); } static void mlx5e_rep_enable(struct mlx5e_priv *priv) @@ -1099,6 +1114,7 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = { &MLX5E_STATS_GRP(ipsec_sw), &MLX5E_STATS_GRP(ipsec_hw), #endif + &MLX5E_STATS_GRP(ptp), }; static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index b01dacb6f527..adf5cc6a7b8c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -64,11 +64,6 @@ struct mlx5e_tc_tun_encap; struct mlx5e_post_act; struct mlx5_rep_uplink_priv { - /* Filters DB - instantiated by the uplink representor and shared by - * the uplink's VFs - */ - struct rhashtable tc_ht; - /* indirect block callbacks are invoked on bind/unbind events * on registered higher level devices (e.g. tunnel devices) * @@ -113,6 +108,7 @@ struct mlx5e_rep_priv { struct list_head vport_sqs_list; struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */ struct rtnl_link_stats64 prev_vf_vport_stats; + struct rhashtable tc_ht; }; static inline @@ -183,6 +179,13 @@ struct mlx5e_decap_entry { struct rcu_head rcu; }; +struct mlx5e_mpls_info { + u32 label; + u8 tc; + u8 bos; + u8 ttl; +}; + struct mlx5e_encap_entry { /* attached neigh hash entry */ struct mlx5e_neigh_hash_entry *nhe; @@ -196,6 +199,7 @@ struct mlx5e_encap_entry { struct list_head route_list; struct mlx5_pkt_reformat *pkt_reformat; const struct ip_tunnel_info *tun_info; + struct mlx5e_mpls_info mpls_info; unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ struct net_device *out_dev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index ee0a8f5206e3..074a44b281b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -620,7 +620,7 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq) struct mlx5e_icosq *sq = rq->icosq; int i, err, max_klm_entries, len; - max_klm_entries = MLX5E_MAX_KLM_PER_WQE; + max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev); klm_entries = bitmap_find_window(shampo->bitmap, shampo->hd_per_wqe, shampo->hd_per_wq, shampo->pi); @@ -960,8 +960,7 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk)) rq->stats->congst_umr++; -#define UMR_WQE_BULK (2) - if (likely(missing < UMR_WQE_BULK)) + if (likely(missing < rq->mpwqe.min_wqe_bulk)) return false; if (rq->page_pool) @@ -1349,7 +1348,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, } /* True when explicitly set via priv flag, or XDP prog is loaded */ - if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)) + if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) || + get_cqe_tls_offload(cqe)) goto csum_unnecessary; /* CQE csum doesn't cover padding octets in short ethernet @@ -1489,7 +1489,7 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, static inline struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, u32 frag_size, u16 headroom, - u32 cqe_bcnt) + u32 cqe_bcnt, u32 metasize) { struct sk_buff *skb = build_skb(va, frag_size); @@ -1501,6 +1501,9 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, skb_reserve(skb, headroom); skb_put(skb, cqe_bcnt); + if (metasize) + skb_metadata_set(skb, metasize); + return skb; } @@ -1508,7 +1511,7 @@ static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom, u32 len, struct xdp_buff *xdp) { xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq); - xdp_prepare_buff(xdp, va, headroom, len, false); + xdp_prepare_buff(xdp, va, headroom, len, true); } static struct sk_buff * @@ -1521,6 +1524,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct sk_buff *skb; void *va, *data; u32 frag_size; + u32 metasize; va = page_address(di->page) + wi->offset; data = va + rx_headroom; @@ -1537,7 +1541,8 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, rx_headroom = xdp.data - xdp.data_hard_start; frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); - skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt); + metasize = xdp.data - xdp.data_meta; + skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize); if (unlikely(!skb)) return NULL; @@ -1836,6 +1841,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct sk_buff *skb; void *va, *data; u32 frag_size; + u32 metasize; /* Check packet size. Note LRO doesn't use linear SKB */ if (unlikely(cqe_bcnt > rq->hw_mtu)) { @@ -1861,7 +1867,8 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, rx_headroom = xdp.data - xdp.data_hard_start; frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32); - skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32); + metasize = xdp.data - xdp.data_meta; + skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32, metasize); if (unlikely(!skb)) return NULL; @@ -1892,7 +1899,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, DMA_FROM_DEVICE); prefetchw(hdr); prefetch(data); - skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size); + skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0); if (unlikely(!skb)) return NULL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 8c9163d2c646..08a75654f5f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -334,6 +334,7 @@ void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, netdev_info(ndev, "\t[%d] %s start..\n", i, st.name); buf[count] = st.st_func(priv); netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", i, st.name, buf[count]); + count++; } mutex_unlock(&priv->state_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 26e326fe503c..336e4d04c5f2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -37,6 +37,10 @@ #include "en/ptp.h" #include "en/port.h" +#ifdef CONFIG_PAGE_POOL_STATS +#include <net/page_pool.h> +#endif + static unsigned int stats_grps_num(struct mlx5e_priv *priv) { return !priv->profile->stats_grps_num ? 0 : @@ -183,6 +187,19 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) }, +#ifdef CONFIG_PAGE_POOL_STATS + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_empty) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_refill) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_waive) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cached) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cache_full) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) }, +#endif #ifdef CONFIG_MLX5_EN_TLS { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) }, @@ -349,6 +366,19 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s, s->rx_congst_umr += rq_stats->congst_umr; s->rx_arfs_err += rq_stats->arfs_err; s->rx_recover += rq_stats->recover; +#ifdef CONFIG_PAGE_POOL_STATS + s->rx_pp_alloc_fast += rq_stats->pp_alloc_fast; + s->rx_pp_alloc_slow += rq_stats->pp_alloc_slow; + s->rx_pp_alloc_empty += rq_stats->pp_alloc_empty; + s->rx_pp_alloc_refill += rq_stats->pp_alloc_refill; + s->rx_pp_alloc_waive += rq_stats->pp_alloc_waive; + s->rx_pp_alloc_slow_high_order += rq_stats->pp_alloc_slow_high_order; + s->rx_pp_recycle_cached += rq_stats->pp_recycle_cached; + s->rx_pp_recycle_cache_full += rq_stats->pp_recycle_cache_full; + s->rx_pp_recycle_ring += rq_stats->pp_recycle_ring; + s->rx_pp_recycle_ring_full += rq_stats->pp_recycle_ring_full; + s->rx_pp_recycle_released_ref += rq_stats->pp_recycle_released_ref; +#endif #ifdef CONFIG_MLX5_EN_TLS s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets; s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes; @@ -455,6 +485,35 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv, } } +#ifdef CONFIG_PAGE_POOL_STATS +static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c) +{ + struct mlx5e_rq_stats *rq_stats = c->rq.stats; + struct page_pool *pool = c->rq.page_pool; + struct page_pool_stats stats = { 0 }; + + if (!page_pool_get_stats(pool, &stats)) + return; + + rq_stats->pp_alloc_fast = stats.alloc_stats.fast; + rq_stats->pp_alloc_slow = stats.alloc_stats.slow; + rq_stats->pp_alloc_slow_high_order = stats.alloc_stats.slow_high_order; + rq_stats->pp_alloc_empty = stats.alloc_stats.empty; + rq_stats->pp_alloc_waive = stats.alloc_stats.waive; + rq_stats->pp_alloc_refill = stats.alloc_stats.refill; + + rq_stats->pp_recycle_cached = stats.recycle_stats.cached; + rq_stats->pp_recycle_cache_full = stats.recycle_stats.cache_full; + rq_stats->pp_recycle_ring = stats.recycle_stats.ring; + rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full; + rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt; +} +#else +static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c) +{ +} +#endif + static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) { struct mlx5e_sw_stats *s = &priv->stats.sw; @@ -465,8 +524,11 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) for (i = 0; i < priv->stats_nch; i++) { struct mlx5e_channel_stats *channel_stats = priv->channel_stats[i]; + int j; + mlx5e_stats_update_stats_rq_page_pool(priv->channels.c[i]); + mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq); mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq); mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch); @@ -1254,9 +1316,6 @@ static void fec_set_corrected_bits_total(struct mlx5e_priv *priv, u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); - if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) - return; - MLX5_SET(ppcnt_reg, in, local_port, 1); MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical, @@ -1272,6 +1331,9 @@ static void fec_set_corrected_bits_total(struct mlx5e_priv *priv, void mlx5e_stats_fec_get(struct mlx5e_priv *priv, struct ethtool_fec_stats *fec_stats) { + if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group)) + return; + fec_set_corrected_bits_total(priv, fec_stats); fec_set_block_stats(priv, fec_stats); } @@ -1887,6 +1949,19 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) }, +#ifdef CONFIG_PAGE_POOL_STATS + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_empty) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_refill) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_waive) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cached) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cache_full) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) }, +#endif #ifdef CONFIG_MLX5_EN_TLS { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) }, @@ -2348,7 +2423,7 @@ MLX5E_DEFINE_STATS_GRP(channels, 0); MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0); MLX5E_DEFINE_STATS_GRP(eth_ext, 0); static MLX5E_DEFINE_STATS_GRP(tls, 0); -static MLX5E_DEFINE_STATS_GRP(ptp, 0); +MLX5E_DEFINE_STATS_GRP(ptp, 0); static MLX5E_DEFINE_STATS_GRP(qos, 0); /* The stats groups order is opposite to the update_stats() order calls */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 2c1ed5b81be6..a7a025d15c14 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -205,7 +205,19 @@ struct mlx5e_sw_stats { u64 ch_aff_change; u64 ch_force_irq; u64 ch_eq_rearm; - +#ifdef CONFIG_PAGE_POOL_STATS + u64 rx_pp_alloc_fast; + u64 rx_pp_alloc_slow; + u64 rx_pp_alloc_slow_high_order; + u64 rx_pp_alloc_empty; + u64 rx_pp_alloc_refill; + u64 rx_pp_alloc_waive; + u64 rx_pp_recycle_cached; + u64 rx_pp_recycle_cache_full; + u64 rx_pp_recycle_ring; + u64 rx_pp_recycle_ring_full; + u64 rx_pp_recycle_released_ref; +#endif #ifdef CONFIG_MLX5_EN_TLS u64 tx_tls_encrypted_packets; u64 tx_tls_encrypted_bytes; @@ -352,6 +364,19 @@ struct mlx5e_rq_stats { u64 congst_umr; u64 arfs_err; u64 recover; +#ifdef CONFIG_PAGE_POOL_STATS + u64 pp_alloc_fast; + u64 pp_alloc_slow; + u64 pp_alloc_slow_high_order; + u64 pp_alloc_empty; + u64 pp_alloc_refill; + u64 pp_alloc_waive; + u64 pp_recycle_cached; + u64 pp_recycle_cache_full; + u64 pp_recycle_ring; + u64 pp_recycle_ring_full; + u64 pp_recycle_released_ref; +#endif #ifdef CONFIG_MLX5_EN_TLS u64 tls_decrypted_packets; u64 tls_decrypted_bytes; @@ -459,5 +484,6 @@ extern MLX5E_DECLARE_STATS_GRP(channels); extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest); extern MLX5E_DECLARE_STATS_GRP(ipsec_hw); extern MLX5E_DECLARE_STATS_GRP(ipsec_sw); +extern MLX5E_DECLARE_STATS_GRP(ptp); #endif /* __MLX5_EN_STATS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 099d4ce16049..e3fc15ae7bb1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -115,6 +115,7 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = { static struct lock_class_key tc_ht_lock_key; static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow); +static void free_flow_post_acts(struct mlx5e_tc_flow *flow); void mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec, @@ -273,6 +274,23 @@ get_sample_priv(struct mlx5e_priv *priv) return NULL; } +static struct mlx5e_post_act * +get_post_action(struct mlx5e_priv *priv) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_rep_uplink_priv *uplink_priv; + struct mlx5e_rep_priv *uplink_rpriv; + + if (is_mdev_switchdev_mode(priv->mdev)) { + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + uplink_priv = &uplink_rpriv->uplink_priv; + + return uplink_priv->post_act; + } + + return priv->fs.tc.post_act; +} + struct mlx5_flow_handle * mlx5_tc_rule_insert(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, @@ -1193,6 +1211,8 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, if (flow_flag_test(flow, HAIRPIN)) mlx5e_hairpin_flow_del(priv, flow); + free_flow_post_acts(flow); + kvfree(attr->parse_attr); kfree(flow->attr); } @@ -1425,6 +1445,9 @@ set_encap_dests(struct mlx5e_priv *priv, int out_index; int err = 0; + if (!mlx5e_is_eswitch_flow(flow)) + return 0; + parse_attr = attr->parse_attr; esw_attr = attr->esw_attr; *vf_tun = false; @@ -1480,6 +1503,9 @@ clean_encap_dests(struct mlx5e_priv *priv, struct mlx5_esw_flow_attr *esw_attr; int out_index; + if (!mlx5e_is_eswitch_flow(flow)) + return; + esw_attr = attr->esw_attr; *vf_tun = false; @@ -1627,7 +1653,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, * (1) there's no error * (2) there's an encap action and we don't have valid neigh */ - if (!encap_valid) + if (!encap_valid || flow_flag_test(flow, SLOW)) flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec); else flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr); @@ -1712,6 +1738,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, if (flow_flag_test(flow, L3_TO_L2_DECAP)) mlx5e_detach_decap(priv, flow); + free_flow_post_acts(flow); + kvfree(attr->esw_attr->rx_tun_attr); kvfree(attr->parse_attr); kfree(flow->attr); @@ -1719,7 +1747,10 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow) { - return flow->attr->counter; + struct mlx5_flow_attr *attr; + + attr = list_first_entry(&flow->attrs, struct mlx5_flow_attr, list); + return attr->counter; } /* Iterate over tmp_list of flows attached to flow_list head. */ @@ -3257,11 +3288,11 @@ actions_match_supported_fdb(struct mlx5e_priv *priv, static bool actions_match_supported(struct mlx5e_priv *priv, struct flow_action *flow_action, + u32 actions, struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) { - u32 actions = flow->attr->action; bool ct_flow, ct_clear; ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR; @@ -3285,6 +3316,18 @@ actions_match_supported(struct mlx5e_priv *priv, return false; } + if (!(~actions & + (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { + NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action"); + return false; + } + + if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && + actions & MLX5_FLOW_CONTEXT_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported"); + return false; + } + if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && !modify_header_match_supported(priv, &parse_attr->spec, flow_action, actions, ct_flow, ct_clear, extack)) @@ -3317,50 +3360,6 @@ bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) } static int -parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state, - struct flow_action *flow_action) -{ - struct netlink_ext_ack *extack = parse_state->extack; - struct mlx5e_tc_flow *flow = parse_state->flow; - struct mlx5_flow_attr *attr = flow->attr; - enum mlx5_flow_namespace_type ns_type; - struct mlx5e_priv *priv = flow->priv; - const struct flow_action_entry *act; - struct mlx5e_tc_act *tc_act; - int err, i; - - ns_type = mlx5e_get_flow_namespace(flow); - - flow_action_for_each(i, act, flow_action) { - tc_act = mlx5e_tc_act_get(act->id, ns_type); - if (!tc_act) { - NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action"); - return -EOPNOTSUPP; - } - - if (!tc_act->can_offload(parse_state, act, i, attr)) - return -EOPNOTSUPP; - - err = tc_act->parse_action(parse_state, act, priv, attr); - if (err) - return err; - } - - flow_action_for_each(i, act, flow_action) { - tc_act = mlx5e_tc_act_get(act->id, ns_type); - if (!tc_act || !tc_act->post_parse || - !tc_act->can_offload(parse_state, act, i, attr)) - continue; - - err = tc_act->post_parse(parse_state, priv, attr); - if (err) - return err; - } - - return 0; -} - -static int actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr, @@ -3398,6 +3397,299 @@ actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv, return 0; } +static struct mlx5_flow_attr* +mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr, + enum mlx5_flow_namespace_type ns_type) +{ + struct mlx5e_tc_flow_parse_attr *parse_attr; + u32 attr_sz = ns_to_attr_sz(ns_type); + struct mlx5_flow_attr *attr2; + + attr2 = mlx5_alloc_flow_attr(ns_type); + parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL); + if (!attr2 || !parse_attr) { + kvfree(parse_attr); + kfree(attr2); + return NULL; + } + + memcpy(attr2, attr, attr_sz); + INIT_LIST_HEAD(&attr2->list); + parse_attr->filter_dev = attr->parse_attr->filter_dev; + attr2->action = 0; + attr2->flags = 0; + attr2->parse_attr = parse_attr; + return attr2; +} + +static struct mlx5_core_dev * +get_flow_counter_dev(struct mlx5e_tc_flow *flow) +{ + return mlx5e_is_eswitch_flow(flow) ? flow->attr->esw_attr->counter_dev : flow->priv->mdev; +} + +struct mlx5_flow_attr * +mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow) +{ + struct mlx5_esw_flow_attr *esw_attr; + struct mlx5_flow_attr *attr; + int i; + + list_for_each_entry(attr, &flow->attrs, list) { + esw_attr = attr->esw_attr; + for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) { + if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) + return attr; + } + } + + return NULL; +} + +void +mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow) +{ + struct mlx5e_post_act *post_act = get_post_action(flow->priv); + struct mlx5_flow_attr *attr; + + list_for_each_entry(attr, &flow->attrs, list) { + if (list_is_last(&attr->list, &flow->attrs)) + break; + + mlx5e_tc_post_act_unoffload(post_act, attr->post_act_handle); + } +} + +static void +free_flow_post_acts(struct mlx5e_tc_flow *flow) +{ + struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow); + struct mlx5e_post_act *post_act = get_post_action(flow->priv); + struct mlx5_flow_attr *attr, *tmp; + bool vf_tun; + + list_for_each_entry_safe(attr, tmp, &flow->attrs, list) { + if (list_is_last(&attr->list, &flow->attrs)) + break; + + if (attr->post_act_handle) + mlx5e_tc_post_act_del(post_act, attr->post_act_handle); + + clean_encap_dests(flow->priv, flow, attr, &vf_tun); + + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) + mlx5_fc_destroy(counter_dev, attr->counter); + + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { + mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts); + if (attr->modify_hdr) + mlx5_modify_header_dealloc(flow->priv->mdev, attr->modify_hdr); + } + + list_del(&attr->list); + kvfree(attr->parse_attr); + kfree(attr); + } +} + +int +mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow) +{ + struct mlx5e_post_act *post_act = get_post_action(flow->priv); + struct mlx5_flow_attr *attr; + int err = 0; + + list_for_each_entry(attr, &flow->attrs, list) { + if (list_is_last(&attr->list, &flow->attrs)) + break; + + err = mlx5e_tc_post_act_offload(post_act, attr->post_act_handle); + if (err) + break; + } + + return err; +} + +/* TC filter rule HW translation: + * + * +---------------------+ + * + ft prio (tc chain) + + * + original match + + * +---------------------+ + * | + * | if multi table action + * | + * v + * +---------------------+ + * + post act ft |<----. + * + match fte id | | split on multi table action + * + do actions |-----' + * +---------------------+ + * | + * | + * v + * Do rest of the actions after last multi table action. + */ +static int +alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) +{ + struct mlx5e_post_act *post_act = get_post_action(flow->priv); + struct mlx5_flow_attr *attr, *next_attr = NULL; + struct mlx5e_post_act_handle *handle; + bool vf_tun, encap_valid = true; + int err; + + /* This is going in reverse order as needed. + * The first entry is the last attribute. + */ + list_for_each_entry(attr, &flow->attrs, list) { + if (!next_attr) { + /* Set counter action on last post act rule. */ + attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; + } else { + err = mlx5e_tc_act_set_next_post_act(flow, attr, next_attr); + if (err) + goto out_free; + } + + /* Don't add post_act rule for first attr (last in the list). + * It's being handled by the caller. + */ + if (list_is_last(&attr->list, &flow->attrs)) + break; + + err = set_encap_dests(flow->priv, flow, attr, extack, &encap_valid, &vf_tun); + if (err) + goto out_free; + + if (!encap_valid) + flow_flag_set(flow, SLOW); + + err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack); + if (err) + goto out_free; + + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { + err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr); + if (err) + goto out_free; + } + + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + err = alloc_flow_attr_counter(get_flow_counter_dev(flow), attr); + if (err) + goto out_free; + } + + handle = mlx5e_tc_post_act_add(post_act, attr); + if (IS_ERR(handle)) { + err = PTR_ERR(handle); + goto out_free; + } + + attr->post_act_handle = handle; + next_attr = attr; + } + + if (flow_flag_test(flow, SLOW)) + goto out; + + err = mlx5e_tc_offload_flow_post_acts(flow); + if (err) + goto out_free; + +out: + return 0; + +out_free: + free_flow_post_acts(flow); + return err; +} + +static int +parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state, + struct flow_action *flow_action) +{ + struct netlink_ext_ack *extack = parse_state->extack; + struct mlx5e_tc_flow_action flow_action_reorder; + struct mlx5e_tc_flow *flow = parse_state->flow; + struct mlx5_flow_attr *attr = flow->attr; + enum mlx5_flow_namespace_type ns_type; + struct mlx5e_priv *priv = flow->priv; + struct flow_action_entry *act, **_act; + struct mlx5e_tc_act *tc_act; + int err, i; + + flow_action_reorder.num_entries = flow_action->num_entries; + flow_action_reorder.entries = kcalloc(flow_action->num_entries, + sizeof(flow_action), GFP_KERNEL); + if (!flow_action_reorder.entries) + return -ENOMEM; + + mlx5e_tc_act_reorder_flow_actions(flow_action, &flow_action_reorder); + + ns_type = mlx5e_get_flow_namespace(flow); + list_add(&attr->list, &flow->attrs); + + flow_action_for_each(i, _act, &flow_action_reorder) { + act = *_act; + tc_act = mlx5e_tc_act_get(act->id, ns_type); + if (!tc_act) { + NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action"); + err = -EOPNOTSUPP; + goto out_free; + } + + if (!tc_act->can_offload(parse_state, act, i, attr)) { + err = -EOPNOTSUPP; + goto out_free; + } + + err = tc_act->parse_action(parse_state, act, priv, attr); + if (err) + goto out_free; + + parse_state->actions |= attr->action; + + /* Split attr for multi table act if not the last act. */ + if (tc_act->is_multi_table_act && + tc_act->is_multi_table_act(priv, act, attr) && + i < flow_action_reorder.num_entries - 1) { + err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type); + if (err) + goto out_free; + + attr = mlx5e_clone_flow_attr_for_post_act(flow->attr, ns_type); + if (!attr) { + err = -ENOMEM; + goto out_free; + } + + list_add(&attr->list, &flow->attrs); + } + } + + kfree(flow_action_reorder.entries); + + err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type); + if (err) + goto out_free_post_acts; + + err = alloc_flow_post_acts(flow, extack); + if (err) + goto out_free_post_acts; + + return 0; + +out_free: + kfree(flow_action_reorder.entries); +out_free_post_acts: + free_flow_post_acts(flow); + + return err; +} + static int flow_action_supported(struct flow_action *flow_action, struct netlink_ext_ack *extack) @@ -3445,7 +3737,8 @@ parse_tc_nic_actions(struct mlx5e_priv *priv, if (err) return err; - if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack)) + if (!actions_match_supported(priv, flow_action, parse_state->actions, + parse_attr, flow, extack)) return -EOPNOTSUPP; return 0; @@ -3574,7 +3867,8 @@ parse_tc_fdb_actions(struct mlx5e_priv *priv, if (err) return err; - if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack)) + if (!actions_match_supported(priv, flow_action, parse_state->actions, + parse_attr, flow, extack)) return -EOPNOTSUPP; return 0; @@ -3609,12 +3903,11 @@ static const struct rhashtable_params tc_ht_params = { static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, unsigned long flags) { - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5e_rep_priv *uplink_rpriv; + struct mlx5e_rep_priv *rpriv; if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) { - uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); - return &uplink_rpriv->uplink_priv.tc_ht; + rpriv = priv->ppriv; + return &rpriv->tc_ht; } else /* NIC offload */ return &priv->fs.tc.ht; } @@ -3649,7 +3942,12 @@ mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type) sizeof(struct mlx5_nic_flow_attr); struct mlx5_flow_attr *attr; - return kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL); + attr = kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL); + if (!attr) + return attr; + + INIT_LIST_HEAD(&attr->list); + return attr; } static int @@ -3683,6 +3981,7 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, INIT_LIST_HEAD(&flow->encaps[out_index].list); INIT_LIST_HEAD(&flow->hairpin); INIT_LIST_HEAD(&flow->l3_to_l2_reformat); + INIT_LIST_HEAD(&flow->attrs); refcount_set(&flow->refcnt, 1); init_completion(&flow->init_done); init_completion(&flow->del_hw_done); @@ -4183,6 +4482,46 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate, return err; } +static int mlx5e_policer_validate(const struct flow_action *action, + const struct flow_action_entry *act, + struct netlink_ext_ack *extack) +{ + if (act->police.exceed.act_id != FLOW_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when exceed action is not drop"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && + act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is not pipe or ok"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && + !flow_action_is_last_entry(action, act)) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is ok, but action is not last"); + return -EOPNOTSUPP; + } + + if (act->police.peakrate_bytes_ps || + act->police.avrate || act->police.overhead) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when peakrate/avrate/overhead is configured"); + return -EOPNOTSUPP; + } + + if (act->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, + "QoS offload not support packets per second"); + return -EOPNOTSUPP; + } + + return 0; +} + static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv, struct flow_action *flow_action, struct netlink_ext_ack *extack) @@ -4210,10 +4549,10 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv, flow_action_for_each(i, act, flow_action) { switch (act->id) { case FLOW_ACTION_POLICE: - if (act->police.rate_pkt_ps) { - NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); - return -EOPNOTSUPP; - } + err = mlx5e_policer_validate(flow_action, act, extack); + if (err) + return err; + err = apply_police_params(priv, act->police.rate_bytes_ps, extack); if (err) return err; @@ -4447,10 +4786,27 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) mlx5_chains_destroy(tc->chains); } -int mlx5e_tc_esw_init(struct rhashtable *tc_ht) +int mlx5e_tc_ht_init(struct rhashtable *tc_ht) +{ + int err; + + err = rhashtable_init(tc_ht, &tc_ht_params); + if (err) + return err; + + lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key); + + return 0; +} + +void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht) +{ + rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL); +} + +int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv) { const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts); - struct mlx5_rep_uplink_priv *uplink_priv; struct mlx5e_rep_priv *rpriv; struct mapping_ctx *mapping; struct mlx5_eswitch *esw; @@ -4458,7 +4814,6 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht) u64 mapping_id; int err = 0; - uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht); rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv); priv = netdev_priv(rpriv->netdev); esw = priv->mdev->priv.eswitch; @@ -4498,12 +4853,6 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht) } uplink_priv->tunnel_enc_opts_mapping = mapping; - err = rhashtable_init(tc_ht, &tc_ht_params); - if (err) - goto err_ht_init; - - lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key); - uplink_priv->encap = mlx5e_tc_tun_init(priv); if (IS_ERR(uplink_priv->encap)) { err = PTR_ERR(uplink_priv->encap); @@ -4513,8 +4862,6 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht) return 0; err_register_fib_notifier: - rhashtable_destroy(tc_ht); -err_ht_init: mapping_destroy(uplink_priv->tunnel_enc_opts_mapping); err_enc_opts_mapping: mapping_destroy(uplink_priv->tunnel_mapping); @@ -4528,13 +4875,8 @@ err_tun_mapping: return err; } -void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht) +void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv) { - struct mlx5_rep_uplink_priv *uplink_priv; - - uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht); - - rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL); mlx5e_tc_tun_cleanup(uplink_priv->encap); mapping_destroy(uplink_priv->tunnel_enc_opts_mapping); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index c6221728b767..a80b00946f1b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -53,7 +53,6 @@ ESW_FLOW_ATTR_SZ :\ NIC_FLOW_ATTR_SZ) - int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags); struct mlx5e_tc_update_priv { @@ -84,6 +83,8 @@ struct mlx5_flow_attr { u8 tun_ip_version; int tunnel_id; /* mapped tunnel id */ u32 flags; + struct list_head list; + struct mlx5e_post_act_handle *post_act_handle; union { struct mlx5_esw_flow_attr esw_attr[0]; struct mlx5_nic_flow_attr nic_attr[0]; @@ -167,8 +168,11 @@ enum { #define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT) -int mlx5e_tc_esw_init(struct rhashtable *tc_ht); -void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht); +int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv); +void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv); + +int mlx5e_tc_ht_init(struct rhashtable *tc_ht); +void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht); int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, struct flow_cls_offload *f, unsigned long flags); @@ -304,6 +308,8 @@ int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv, #else /* CONFIG_MLX5_CLS_ACT */ static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; } static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {} +static inline int mlx5e_tc_ht_init(struct rhashtable *tc_ht) { return 0; } +static inline void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht) {} static inline int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index ee7ecb88adc1..2dc48406cd08 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -53,117 +53,6 @@ static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) } } -#ifdef CONFIG_MLX5_CORE_EN_DCB -static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb) -{ - int dscp_cp = 0; - - if (skb->protocol == htons(ETH_P_IP)) - dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; - else if (skb->protocol == htons(ETH_P_IPV6)) - dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; - - return priv->dcbx_dp.dscp2prio[dscp_cp]; -} -#endif - -static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb) -{ - struct mlx5e_priv *priv = netdev_priv(dev); - int up = 0; - - if (!netdev_get_num_tc(dev)) - goto return_txq; - -#ifdef CONFIG_MLX5_CORE_EN_DCB - if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP) - up = mlx5e_get_dscp_up(priv, skb); - else -#endif - if (skb_vlan_tag_present(skb)) - up = skb_vlan_tag_get_prio(skb); - -return_txq: - return priv->port_ptp_tc2realtxq[up]; -} - -static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb, - u16 htb_maj_id) -{ - u16 classid; - - if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id) - classid = TC_H_MIN(skb->priority); - else - classid = READ_ONCE(priv->htb.defcls); - - if (!classid) - return 0; - - return mlx5e_get_txq_by_classid(priv, classid); -} - -u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev) -{ - struct mlx5e_priv *priv = netdev_priv(dev); - int num_tc_x_num_ch; - int txq_ix; - int up = 0; - int ch_ix; - - /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */ - num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch); - if (unlikely(dev->real_num_tx_queues > num_tc_x_num_ch)) { - struct mlx5e_ptp *ptp_channel; - - /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */ - u16 htb_maj_id = smp_load_acquire(&priv->htb.maj_id); - - if (unlikely(htb_maj_id)) { - txq_ix = mlx5e_select_htb_queue(priv, skb, htb_maj_id); - if (txq_ix > 0) - return txq_ix; - } - - ptp_channel = READ_ONCE(priv->channels.ptp); - if (unlikely(ptp_channel && - test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) && - mlx5e_use_ptpsq(skb))) - return mlx5e_select_ptpsq(dev, skb); - - txq_ix = netdev_pick_tx(dev, skb, NULL); - /* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs. - * If they are selected, switch to regular queues. - * Driver to select these queues only at mlx5e_select_ptpsq() - * and mlx5e_select_htb_queue(). - */ - if (unlikely(txq_ix >= num_tc_x_num_ch)) - txq_ix %= num_tc_x_num_ch; - } else { - txq_ix = netdev_pick_tx(dev, skb, NULL); - } - - if (!netdev_get_num_tc(dev)) - return txq_ix; - -#ifdef CONFIG_MLX5_CORE_EN_DCB - if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP) - up = mlx5e_get_dscp_up(priv, skb); - else -#endif - if (skb_vlan_tag_present(skb)) - up = skb_vlan_tag_get_prio(skb); - - /* Normalize any picked txq_ix to [0, num_channels), - * So we can return a txq_ix that matches the channel and - * packet UP. - */ - ch_ix = priv->txq2sq[txq_ix]->ch_ix; - - return priv->channel_tc2realtxq[ch_ix][up]; -} - static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb) { #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN) @@ -544,7 +433,7 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe *wqe; u16 pi; - pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS); + pi = mlx5e_txqsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs); wqe = MLX5E_TX_FETCH_WQE(sq, pi); net_prefetchw(wqe->data); @@ -645,7 +534,7 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, mlx5e_tx_skb_update_hwts_flags(skb); - if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) { + if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe, sq->max_sq_mpw_wqebbs))) { /* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */ cseg = mlx5e_tx_mpwqe_session_complete(sq); @@ -691,8 +580,21 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) struct mlx5e_txqsq *sq; u16 pi; + /* All changes to txq2sq are performed in sync with mlx5e_xmit, when the + * queue being changed is disabled, and smp_wmb guarantees that the + * changes are visible before mlx5e_xmit tries to read from txq2sq. It + * guarantees that the value of txq2sq[qid] doesn't change while + * mlx5e_xmit is running on queue number qid. smb_wmb is paired with + * HARD_TX_LOCK around ndo_start_xmit, which serves as an ACQUIRE. + */ sq = priv->txq2sq[skb_get_queue_mapping(skb)]; if (unlikely(!sq)) { + /* Two cases when sq can be NULL: + * 1. The HTB node is registered, and mlx5e_select_queue + * selected its queue ID, but the SQ itself is not yet created. + * 2. HTB SQ creation failed. Similar to the previous case, but + * the SQ won't be created. + */ dev_kfree_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 48a45aa54a3c..229728c80233 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -5,7 +5,6 @@ #include <linux/interrupt.h> #include <linux/notifier.h> -#include <linux/module.h> #include <linux/mlx5/driver.h> #include <linux/mlx5/vport.h> #include <linux/mlx5/eq.h> @@ -439,7 +438,8 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev) struct mlx5_eq_table *eq_table; int i; - eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL); + eq_table = kvzalloc_node(sizeof(*eq_table), GFP_KERNEL, + dev->priv.numa_node); if (!eq_table) return -ENOMEM; @@ -728,7 +728,8 @@ struct mlx5_eq * mlx5_eq_create_generic(struct mlx5_core_dev *dev, struct mlx5_eq_param *param) { - struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL); + struct mlx5_eq *eq = kvzalloc_node(sizeof(*eq), GFP_KERNEL, + dev->priv.numa_node); int err; if (!eq) @@ -888,10 +889,11 @@ static int create_comp_eqs(struct mlx5_core_dev *dev) return ncomp_eqs; INIT_LIST_HEAD(&table->comp_eqs_list); nent = comp_eq_depth_devlink_param_get(dev); + for (i = 0; i < ncomp_eqs; i++) { struct mlx5_eq_param param = {}; - eq = kzalloc(sizeof(*eq), GFP_KERNEL); + eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node); if (!eq) { err = -ENOMEM; goto clean; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c index 39e948bc1204..a994e71e05c1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c @@ -92,6 +92,7 @@ static int esw_acl_ingress_mod_metadata_create(struct mlx5_eswitch *esw, flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW; flow_act.modify_hdr = vport->ingress.offloads.modify_metadata; + flow_act.fg = vport->ingress.offloads.metadata_allmatch_grp; vport->ingress.offloads.modify_metadata_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, NULL, 0); @@ -117,6 +118,36 @@ static void esw_acl_ingress_mod_metadata_destroy(struct mlx5_eswitch *esw, vport->ingress.offloads.modify_metadata_rule = NULL; } +static int esw_acl_ingress_src_port_drop_create(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *flow_rule; + int err = 0; + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; + flow_act.fg = vport->ingress.offloads.drop_grp; + flow_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, NULL, 0); + if (IS_ERR(flow_rule)) { + err = PTR_ERR(flow_rule); + goto out; + } + + vport->ingress.offloads.drop_rule = flow_rule; +out: + return err; +} + +static void esw_acl_ingress_src_port_drop_destroy(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + if (!vport->ingress.offloads.drop_rule) + return; + + mlx5_del_flow_rules(vport->ingress.offloads.drop_rule); + vport->ingress.offloads.drop_rule = NULL; +} + static int esw_acl_ingress_ofld_rules_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { @@ -154,6 +185,7 @@ static void esw_acl_ingress_ofld_rules_destroy(struct mlx5_eswitch *esw, { esw_acl_ingress_allow_rule_destroy(vport); esw_acl_ingress_mod_metadata_destroy(esw, vport); + esw_acl_ingress_src_port_drop_destroy(esw, vport); } static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw, @@ -170,10 +202,29 @@ static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw, if (!flow_group_in) return -ENOMEM; + if (vport->vport == MLX5_VPORT_UPLINK) { + /* This group can hold an FTE to drop all traffic. + * Need in case LAG is enabled. + */ + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index); + + g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); + if (IS_ERR(g)) { + ret = PTR_ERR(g); + esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n", + vport->vport, ret); + goto drop_err; + } + vport->ingress.offloads.drop_grp = g; + flow_index++; + } + if (esw_acl_ingress_prio_tag_enabled(esw, vport)) { /* This group is to hold FTE to match untagged packets when prio_tag * is enabled. */ + memset(flow_group_in, 0, inlen); match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); MLX5_SET(create_flow_group_in, flow_group_in, @@ -221,6 +272,11 @@ metadata_err: vport->ingress.offloads.metadata_prio_tag_grp = NULL; } prio_tag_err: + if (!IS_ERR_OR_NULL(vport->ingress.offloads.drop_grp)) { + mlx5_destroy_flow_group(vport->ingress.offloads.drop_grp); + vport->ingress.offloads.drop_grp = NULL; + } +drop_err: kvfree(flow_group_in); return ret; } @@ -236,6 +292,11 @@ static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport) mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp); vport->ingress.offloads.metadata_prio_tag_grp = NULL; } + + if (vport->ingress.offloads.drop_grp) { + mlx5_destroy_flow_group(vport->ingress.offloads.drop_grp); + vport->ingress.offloads.drop_grp = NULL; + } } int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, @@ -252,6 +313,8 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, if (mlx5_eswitch_vport_match_metadata_enabled(esw)) num_ftes++; + if (vport->vport == MLX5_VPORT_UPLINK) + num_ftes++; if (esw_acl_ingress_prio_tag_enabled(esw, vport)) num_ftes++; @@ -320,3 +383,27 @@ out: vport->metadata = vport->default_metadata; return err; } + +int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw, u16 vport_num) +{ + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); + + if (IS_ERR(vport)) { + esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num); + return PTR_ERR(vport); + } + + return esw_acl_ingress_src_port_drop_create(esw, vport); +} + +void mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw, u16 vport_num) +{ + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); + + if (WARN_ON_ONCE(IS_ERR(vport))) { + esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num); + return; + } + + esw_acl_ingress_src_port_drop_destroy(esw, vport); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h index c57869b93d60..11d3d3978848 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h @@ -6,6 +6,7 @@ #include "eswitch.h" +#ifdef CONFIG_MLX5_ESWITCH /* Eswitch acl egress external APIs */ int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); void esw_acl_egress_ofld_cleanup(struct mlx5_vport *vport); @@ -25,5 +26,19 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vpor void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); int mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch *esw, u16 vport_num, u32 metadata); +void mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw, u16 vport_num); +int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw, u16 vport_num); +#else /* CONFIG_MLX5_ESWITCH */ +static void +mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw, + u16 vport_num) +{} + +static int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw, + u16 vport_num) +{ + return 0; +} +#endif /* CONFIG_MLX5_ESWITCH */ #endif /* __MLX5_ESWITCH_ACL_OFLD_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 11bbcd5f5b8b..694c54066955 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -697,7 +697,7 @@ void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vpo } int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vport, - u32 min_rate, u32 max_rate) + u32 max_rate, u32 min_rate) { int err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 44321cdfe928..973281bdb4a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -113,8 +113,11 @@ struct vport_ingress { * packet with metadata. */ struct mlx5_flow_group *metadata_allmatch_grp; + /* Optional group to add a drop all rule */ + struct mlx5_flow_group *drop_grp; struct mlx5_modify_hdr *modify_metadata; struct mlx5_flow_handle *modify_metadata_rule; + struct mlx5_flow_handle *drop_rule; } offloads; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 2b31d8bbd1b8..35cf4cb3098e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -2379,60 +2379,6 @@ void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num) mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); } -static int esw_set_uplink_slave_ingress_root(struct mlx5_core_dev *master, - struct mlx5_core_dev *slave) -{ - u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; - u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {}; - struct mlx5_eswitch *esw; - struct mlx5_flow_root_namespace *root; - struct mlx5_flow_namespace *ns; - struct mlx5_vport *vport; - int err; - - MLX5_SET(set_flow_table_root_in, in, opcode, - MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); - MLX5_SET(set_flow_table_root_in, in, table_type, FS_FT_ESW_INGRESS_ACL); - MLX5_SET(set_flow_table_root_in, in, other_vport, 1); - MLX5_SET(set_flow_table_root_in, in, vport_number, MLX5_VPORT_UPLINK); - - if (master) { - esw = master->priv.eswitch; - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); - MLX5_SET(set_flow_table_root_in, in, table_of_other_vport, 1); - MLX5_SET(set_flow_table_root_in, in, table_vport_number, - MLX5_VPORT_UPLINK); - - ns = mlx5_get_flow_vport_acl_namespace(master, - MLX5_FLOW_NAMESPACE_ESW_INGRESS, - vport->index); - root = find_root(&ns->node); - mutex_lock(&root->chain_lock); - - MLX5_SET(set_flow_table_root_in, in, - table_eswitch_owner_vhca_id_valid, 1); - MLX5_SET(set_flow_table_root_in, in, - table_eswitch_owner_vhca_id, - MLX5_CAP_GEN(master, vhca_id)); - MLX5_SET(set_flow_table_root_in, in, table_id, - root->root_ft->id); - } else { - esw = slave->priv.eswitch; - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); - ns = mlx5_get_flow_vport_acl_namespace(slave, - MLX5_FLOW_NAMESPACE_ESW_INGRESS, - vport->index); - root = find_root(&ns->node); - mutex_lock(&root->chain_lock); - MLX5_SET(set_flow_table_root_in, in, table_id, root->root_ft->id); - } - - err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out)); - mutex_unlock(&root->chain_lock); - - return err; -} - static int esw_set_slave_root_fdb(struct mlx5_core_dev *master, struct mlx5_core_dev *slave) { @@ -2614,15 +2560,10 @@ int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw, { int err; - err = esw_set_uplink_slave_ingress_root(master_esw->dev, - slave_esw->dev); - if (err) - return -EINVAL; - err = esw_set_slave_root_fdb(master_esw->dev, slave_esw->dev); if (err) - goto err_fdb; + return err; err = esw_set_master_egress_rule(master_esw->dev, slave_esw->dev); @@ -2634,9 +2575,6 @@ int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw, err_acl: esw_set_slave_root_fdb(NULL, slave_esw->dev); -err_fdb: - esw_set_uplink_slave_ingress_root(NULL, slave_esw->dev); - return err; } @@ -2645,7 +2583,6 @@ void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw, { esw_unset_master_egress_rule(master_esw->dev); esw_set_slave_root_fdb(NULL, slave_esw->dev); - esw_set_uplink_slave_ingress_root(NULL, slave_esw->dev); } #define ESW_OFFLOADS_DEVCOM_PAIR (0) @@ -2839,13 +2776,22 @@ bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw) if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source)) return false; - if (mlx5_core_is_ecpf_esw_manager(esw->dev) || - mlx5_ecpf_vport_exists(esw->dev)) - return false; - return true; } +#define MLX5_ESW_METADATA_RSVD_UPLINK 1 + +/* Share the same metadata for uplink's. This is fine because: + * (a) In shared FDB mode (LAG) both uplink's are treated the + * same and tagged with the same metadata. + * (b) In non shared FDB mode, packets from physical port0 + * cannot hit eswitch of PF1 and vice versa. + */ +static u32 mlx5_esw_match_metadata_reserved(struct mlx5_eswitch *esw) +{ + return MLX5_ESW_METADATA_RSVD_UPLINK; +} + u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw) { u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1; @@ -2860,8 +2806,10 @@ u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw) return 0; /* Metadata is 4 bits of PFNUM and 12 bits of unique id */ - /* Use only non-zero vport_id (1-4095) for all PF's */ - id = ida_alloc_range(&esw->offloads.vport_metadata_ida, 1, vport_end_ida, GFP_KERNEL); + /* Use only non-zero vport_id (2-4095) for all PF's */ + id = ida_alloc_range(&esw->offloads.vport_metadata_ida, + MLX5_ESW_METADATA_RSVD_UPLINK + 1, + vport_end_ida, GFP_KERNEL); if (id < 0) return 0; id = (pf_num << ESW_VPORT_BITS) | id; @@ -2879,7 +2827,11 @@ void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata) static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { - vport->default_metadata = mlx5_esw_match_metadata_alloc(esw); + if (vport->vport == MLX5_VPORT_UPLINK) + vport->default_metadata = mlx5_esw_match_metadata_reserved(esw); + else + vport->default_metadata = mlx5_esw_match_metadata_alloc(esw); + vport->metadata = vport->default_metadata; return vport->metadata ? 0 : -ENOSPC; } @@ -2890,6 +2842,9 @@ static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw, if (!vport->default_metadata) return; + if (vport->vport == MLX5_VPORT_UPLINK) + return; + WARN_ON(vport->metadata != vport->default_metadata); mlx5_esw_match_metadata_free(esw, vport->default_metadata); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c index 2ce4241459ce..39c03dcbd196 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c @@ -30,7 +30,6 @@ * SOFTWARE. */ -#include <linux/module.h> #include <linux/etherdevice.h> #include <linux/mlx5/driver.h> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 42f878e21fea..816d991f7621 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1696,6 +1696,7 @@ static void free_match_list(struct match_list *head, bool ft_locked) static int build_match_list(struct match_list *match_head, struct mlx5_flow_table *ft, const struct mlx5_flow_spec *spec, + struct mlx5_flow_group *fg, bool ft_locked) { struct rhlist_head *tmp, *list; @@ -1710,6 +1711,9 @@ static int build_match_list(struct match_list *match_head, rhl_for_each_entry_rcu(g, tmp, list, hash) { struct match_list *curr_match; + if (fg && fg != g) + continue; + if (unlikely(!tree_get_node(&g->node))) continue; @@ -1889,6 +1893,9 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, if (!check_valid_spec(spec)) return ERR_PTR(-EINVAL); + if (flow_act->fg && ft->autogroup.active) + return ERR_PTR(-EINVAL); + for (i = 0; i < dest_num; i++) { if (!dest_is_valid(&dest[i], flow_act, ft)) return ERR_PTR(-EINVAL); @@ -1898,7 +1905,7 @@ search_again_locked: version = atomic_read(&ft->node.version); /* Collect all fgs which has a matching match_criteria */ - err = build_match_list(&match_head, ft, spec, take_write); + err = build_match_list(&match_head, ft, spec, flow_act->fg, take_write); if (err) { if (take_write) up_write_ref_node(&ft->node, false); @@ -2074,6 +2081,8 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) fte->node.del_hw_func = NULL; up_write_ref_node(&fte->node, false); tree_put_node(&fte->node, false); + } else { + up_write_ref_node(&fte->node, false); } kfree(handle); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 2d8406fab844..614687e0e3d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -32,7 +32,6 @@ #include <linux/mlx5/driver.h> #include <linux/mlx5/eswitch.h> -#include <linux/module.h> #include "mlx5_core.h" #include "../../mlxfw/mlxfw.h" #include "lib/tout.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index 84dbe46d5ede..4aa22dce9b77 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -57,7 +57,8 @@ static int mlx5_reg_mfrl_set(struct mlx5_core_dev *dev, u8 reset_level, return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MFRL, 0, 1); } -static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type) +static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level, + u8 *reset_type, u8 *reset_state) { u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {}; u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {}; @@ -71,25 +72,67 @@ static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *r *reset_level = MLX5_GET(mfrl_reg, out, reset_level); if (reset_type) *reset_type = MLX5_GET(mfrl_reg, out, reset_type); + if (reset_state) + *reset_state = MLX5_GET(mfrl_reg, out, reset_state); return 0; } int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type) { - return mlx5_reg_mfrl_query(dev, reset_level, reset_type); + return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL); } -int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel) +static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev, + struct netlink_ext_ack *extack) +{ + u8 reset_state; + + if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state)) + goto out; + + switch (reset_state) { + case MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION: + case MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS: + NL_SET_ERR_MSG_MOD(extack, "Sync reset was already triggered"); + return -EBUSY; + case MLX5_MFRL_REG_RESET_STATE_TIMEOUT: + NL_SET_ERR_MSG_MOD(extack, "Sync reset got timeout"); + return -ETIMEDOUT; + case MLX5_MFRL_REG_RESET_STATE_NACK: + NL_SET_ERR_MSG_MOD(extack, "One of the hosts disabled reset"); + return -EPERM; + } + +out: + NL_SET_ERR_MSG_MOD(extack, "Sync reset failed"); + return -EIO; +} + +int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel, + struct netlink_ext_ack *extack) { struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; + u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {}; int err; set_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags); - err = mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, reset_type_sel, 0, true); - if (err) - clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags); - return err; + + MLX5_SET(mfrl_reg, in, reset_level, MLX5_MFRL_REG_RESET_LEVEL3); + MLX5_SET(mfrl_reg, in, rst_type_sel, reset_type_sel); + MLX5_SET(mfrl_reg, in, pci_sync_for_fw_update_start, 1); + err = mlx5_access_reg(dev, in, sizeof(in), out, sizeof(out), + MLX5_REG_MFRL, 0, 1, false); + if (!err) + return 0; + + clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags); + if (err == -EREMOTEIO && MLX5_CAP_MCAM_FEATURE(dev, reset_state)) + return mlx5_fw_reset_get_reset_state_err(dev, extack); + + NL_SET_ERR_MSG_MOD(extack, "Sync reset command failed"); + return mlx5_cmd_check(dev, err, in, out); } int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h index 7761ee5fc7d0..694fc7cb2684 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h @@ -9,7 +9,8 @@ void mlx5_fw_reset_enable_remote_dev_reset_set(struct mlx5_core_dev *dev, bool enable); bool mlx5_fw_reset_enable_remote_dev_reset_get(struct mlx5_core_dev *dev); int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type); -int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel); +int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel, + struct netlink_ext_ack *extack); int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev); int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 737df402c927..659021c31cbd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -31,7 +31,6 @@ */ #include <linux/kernel.h> -#include <linux/module.h> #include <linux/random.h> #include <linux/vmalloc.h> #include <linux/hardirq.h> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 4ddf6b330a44..6cad3b72c133 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -31,15 +31,22 @@ */ #include <linux/netdevice.h> +#include <net/bonding.h> #include <linux/mlx5/driver.h> #include <linux/mlx5/eswitch.h> #include <linux/mlx5/vport.h> #include "lib/devcom.h" #include "mlx5_core.h" #include "eswitch.h" +#include "esw/acl/ofld.h" #include "lag.h" #include "mp.h" +enum { + MLX5_LAG_EGRESS_PORT_1 = 1, + MLX5_LAG_EGRESS_PORT_2, +}; + /* General purpose, use for short periods of time. * Beware of lock dependencies (preferably, no locks should be acquired * under it). @@ -193,15 +200,71 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, p2en = tracker->netdev_state[MLX5_LAG_P2].tx_enabled && tracker->netdev_state[MLX5_LAG_P2].link_up; - *port1 = 1; - *port2 = 2; + *port1 = MLX5_LAG_EGRESS_PORT_1; + *port2 = MLX5_LAG_EGRESS_PORT_2; if ((!p1en && !p2en) || (p1en && p2en)) return; if (p1en) - *port2 = 1; + *port2 = MLX5_LAG_EGRESS_PORT_1; + else + *port1 = MLX5_LAG_EGRESS_PORT_2; +} + +static bool mlx5_lag_has_drop_rule(struct mlx5_lag *ldev) +{ + return ldev->pf[MLX5_LAG_P1].has_drop || ldev->pf[MLX5_LAG_P2].has_drop; +} + +static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev) +{ + int i; + + for (i = 0; i < MLX5_MAX_PORTS; i++) { + if (!ldev->pf[i].has_drop) + continue; + + mlx5_esw_acl_ingress_vport_drop_rule_destroy(ldev->pf[i].dev->priv.eswitch, + MLX5_VPORT_UPLINK); + ldev->pf[i].has_drop = false; + } +} + +static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev, + struct lag_tracker *tracker) +{ + struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; + struct mlx5_core_dev *inactive; + u8 v2p_port1, v2p_port2; + int inactive_idx; + int err; + + /* First delete the current drop rule so there won't be any dropped + * packets + */ + mlx5_lag_drop_rule_cleanup(ldev); + + if (!ldev->tracker.has_inactive) + return; + + mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1, &v2p_port2); + + if (v2p_port1 == MLX5_LAG_EGRESS_PORT_1) { + inactive = dev1; + inactive_idx = MLX5_LAG_P2; + } else { + inactive = dev0; + inactive_idx = MLX5_LAG_P1; + } + + err = mlx5_esw_acl_ingress_vport_drop_rule_create(inactive->priv.eswitch, + MLX5_VPORT_UPLINK); + if (!err) + ldev->pf[inactive_idx].has_drop = true; else - *port1 = 2; + mlx5_core_err(inactive, + "Failed to create lag drop rule, error: %d", err); } static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 v2p_port1, u8 v2p_port2) @@ -238,6 +301,10 @@ void mlx5_modify_lag(struct mlx5_lag *ldev, ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2]); } + + if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP && + !(ldev->flags & MLX5_LAG_FLAG_ROCE)) + mlx5_lag_drop_rule_setup(ldev, tracker); } static void mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev, @@ -339,6 +406,10 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, return err; } + if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP && + !roce_lag) + mlx5_lag_drop_rule_setup(ldev, tracker); + ldev->flags |= flags; ldev->shared_fdb = shared_fdb; return 0; @@ -347,6 +418,7 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, static int mlx5_deactivate_lag(struct mlx5_lag *ldev) { struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {}; bool roce_lag = __mlx5_lag_is_roce(ldev); u8 flags = ldev->flags; @@ -356,8 +428,8 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev) mlx5_lag_mp_reset(ldev); if (ldev->shared_fdb) { - mlx5_eswitch_offloads_destroy_single_fdb(ldev->pf[MLX5_LAG_P1].dev->priv.eswitch, - ldev->pf[MLX5_LAG_P2].dev->priv.eswitch); + mlx5_eswitch_offloads_destroy_single_fdb(dev0->priv.eswitch, + dev1->priv.eswitch); ldev->shared_fdb = false; } @@ -372,11 +444,15 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev) "Failed to deactivate VF LAG; driver restart required\n" "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n"); } - } else if (flags & MLX5_LAG_FLAG_HASH_BASED) { - mlx5_lag_port_sel_destroy(ldev); + return err; } - return err; + if (flags & MLX5_LAG_FLAG_HASH_BASED) + mlx5_lag_port_sel_destroy(ldev); + if (mlx5_lag_has_drop_rule(ldev)) + mlx5_lag_drop_rule_cleanup(ldev); + + return 0; } static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) @@ -613,6 +689,8 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, struct net_device *upper = info->upper_dev, *ndev_tmp; struct netdev_lag_upper_info *lag_upper_info = NULL; bool is_bonded, is_in_lag, mode_supported; + bool has_inactive = 0; + struct slave *slave; int bond_status = 0; int num_slaves = 0; int changed = 0; @@ -632,8 +710,12 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, rcu_read_lock(); for_each_netdev_in_bond_rcu(upper, ndev_tmp) { idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp); - if (idx >= 0) + if (idx >= 0) { + slave = bond_slave_get_rcu(ndev_tmp); + if (slave) + has_inactive |= bond_is_slave_inactive(slave); bond_status |= (1 << idx); + } num_slaves++; } @@ -648,6 +730,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, tracker->hash_type = lag_upper_info->hash_type; } + tracker->has_inactive = has_inactive; /* Determine bonding status: * A device is considered bonded if both its physical ports are slaves * of the same lag master, and only them. @@ -704,6 +787,38 @@ static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev, return 1; } +static int mlx5_handle_changeinfodata_event(struct mlx5_lag *ldev, + struct lag_tracker *tracker, + struct net_device *ndev) +{ + struct net_device *ndev_tmp; + struct slave *slave; + bool has_inactive = 0; + int idx; + + if (!netif_is_lag_master(ndev)) + return 0; + + rcu_read_lock(); + for_each_netdev_in_bond_rcu(ndev, ndev_tmp) { + idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp); + if (idx < 0) + continue; + + slave = bond_slave_get_rcu(ndev_tmp); + if (slave) + has_inactive |= bond_is_slave_inactive(slave); + } + rcu_read_unlock(); + + if (tracker->has_inactive == has_inactive) + return 0; + + tracker->has_inactive = has_inactive; + + return 1; +} + static int mlx5_lag_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { @@ -712,7 +827,9 @@ static int mlx5_lag_netdev_event(struct notifier_block *this, struct mlx5_lag *ldev; int changed = 0; - if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE)) + if (event != NETDEV_CHANGEUPPER && + event != NETDEV_CHANGELOWERSTATE && + event != NETDEV_CHANGEINFODATA) return NOTIFY_DONE; ldev = container_of(this, struct mlx5_lag, nb); @@ -728,6 +845,9 @@ static int mlx5_lag_netdev_event(struct notifier_block *this, changed = mlx5_handle_changelowerstate_event(ldev, &tracker, ndev, ptr); break; + case NETDEV_CHANGEINFODATA: + changed = mlx5_handle_changeinfodata_event(ldev, &tracker, ndev); + break; } ldev->tracker = tracker; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h index e5d231c31b54..cbf9a9003e55 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h @@ -28,6 +28,7 @@ enum { struct lag_func { struct mlx5_core_dev *dev; struct net_device *netdev; + bool has_drop; }; /* Used for collection of netdev event info. */ @@ -35,6 +36,7 @@ struct lag_tracker { enum netdev_lag_tx_type tx_type; struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS]; unsigned int is_bonded:1; + unsigned int has_inactive:1; enum netdev_lag_hash hash_type; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c index 1ca01a5b6cdd..4a6ec15ef046 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c @@ -50,7 +50,7 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev) static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev, enum mlx5_lag_port_affinity port) { - struct lag_tracker tracker; + struct lag_tracker tracker = {}; if (!__mlx5_lag_is_multipath(ldev)) return; @@ -126,6 +126,10 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, return; } + /* Handle multipath entry with lower priority value */ + if (mp->mfi && mp->mfi != fi && fi->fib_priority >= mp->mfi->fib_priority) + return; + /* Handle add/replace event */ nhs = fib_info_num_path(fi); if (nhs == 1) { @@ -135,12 +139,13 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev); if (i < 0) - i = MLX5_LAG_NORMAL_AFFINITY; - else - ++i; + return; + i++; mlx5_lag_set_port_affinity(ldev, i); } + + mp->mfi = fi; return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h index 4bad6a5fde56..f240ffe5116c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h @@ -92,13 +92,6 @@ mlx5_hv_vhca_agent_create(struct mlx5_hv_vhca *hv_vhca, static inline void mlx5_hv_vhca_agent_destroy(struct mlx5_hv_vhca_agent *agent) { } - -static inline int -mlx5_hv_vhca_write_agent(struct mlx5_hv_vhca_agent *agent, - void *buf, int len) -{ - return 0; -} #endif #endif /* __LIB_HV_VHCA_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c index e042e0924079..4571c56ec3c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c @@ -1,7 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2019 Mellanox Technologies. */ -#include <linux/module.h> #include <linux/mlx5/driver.h> #include <linux/mlx5/port.h> #include "mlx5_core.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.c new file mode 100644 index 000000000000..9b8c051ccf65 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */ + +#include <linux/kernel.h> +#include <linux/mlx5/driver.h> + +#include "smfs.h" + +struct mlx5dr_matcher * +mlx5_smfs_matcher_create(struct mlx5dr_table *table, u32 priority, struct mlx5_flow_spec *spec) +{ + struct mlx5dr_match_parameters matcher_mask = {}; + + matcher_mask.match_buf = (u64 *)&spec->match_criteria; + matcher_mask.match_sz = DR_SZ_MATCH_PARAM; + + return mlx5dr_matcher_create(table, priority, spec->match_criteria_enable, &matcher_mask); +} + +void +mlx5_smfs_matcher_destroy(struct mlx5dr_matcher *matcher) +{ + mlx5dr_matcher_destroy(matcher); +} + +struct mlx5dr_table * +mlx5_smfs_table_get_from_fs_ft(struct mlx5_flow_table *ft) +{ + return mlx5dr_table_get_from_fs_ft(ft); +} + +struct mlx5dr_action * +mlx5_smfs_action_create_dest_table(struct mlx5dr_table *table) +{ + return mlx5dr_action_create_dest_table(table); +} + +struct mlx5dr_action * +mlx5_smfs_action_create_flow_counter(u32 counter_id) +{ + return mlx5dr_action_create_flow_counter(counter_id); +} + +void +mlx5_smfs_action_destroy(struct mlx5dr_action *action) +{ + mlx5dr_action_destroy(action); +} + +struct mlx5dr_rule * +mlx5_smfs_rule_create(struct mlx5dr_matcher *matcher, struct mlx5_flow_spec *spec, + size_t num_actions, struct mlx5dr_action *actions[], + u32 flow_source) +{ + struct mlx5dr_match_parameters value = {}; + + value.match_buf = (u64 *)spec->match_value; + value.match_sz = DR_SZ_MATCH_PARAM; + + return mlx5dr_rule_create(matcher, &value, num_actions, actions, flow_source); +} + +void +mlx5_smfs_rule_destroy(struct mlx5dr_rule *rule) +{ + mlx5dr_rule_destroy(rule); +} + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h new file mode 100644 index 000000000000..452d0df339ac --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */ + +#ifndef __MLX5_LIB_SMFS_H__ +#define __MLX5_LIB_SMFS_H__ + +#include "steering/mlx5dr.h" +#include "steering/dr_types.h" + +struct mlx5dr_matcher * +mlx5_smfs_matcher_create(struct mlx5dr_table *table, u32 priority, struct mlx5_flow_spec *spec); + +void +mlx5_smfs_matcher_destroy(struct mlx5dr_matcher *matcher); + +struct mlx5dr_table * +mlx5_smfs_table_get_from_fs_ft(struct mlx5_flow_table *ft); + +struct mlx5dr_action * +mlx5_smfs_action_create_dest_table(struct mlx5dr_table *table); + +struct mlx5dr_action * +mlx5_smfs_action_create_flow_counter(u32 counter_id); + +void +mlx5_smfs_action_destroy(struct mlx5dr_action *action); + +struct mlx5dr_rule * +mlx5_smfs_rule_create(struct mlx5dr_matcher *matcher, struct mlx5_flow_spec *spec, + size_t num_actions, struct mlx5dr_action *actions[], + u32 flow_source); + +void +mlx5_smfs_rule_destroy(struct mlx5dr_rule *rule); + +#endif /* __MLX5_LIB_SMFS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c index e3b0a131c3e1..d55e15c1f380 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c @@ -31,7 +31,6 @@ */ #include <linux/kernel.h> -#include <linux/module.h> #include <linux/refcount.h> #include <linux/mlx5/driver.h> #include <net/vxlan.h> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 2c774f367199..d8d36477b97f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -526,7 +526,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) /* Check log_max_qp from HCA caps to set in current profile */ if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) { - prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp); + prof->log_max_qp = min_t(u8, 17, MLX5_CAP_GEN_MAX(dev, log_max_qp)); } else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) { mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n", prof->log_max_qp, @@ -736,10 +736,9 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev) MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI); err = mlx5_cmd_exec_inout(dev, query_issi, query_in, query_out); if (err) { - u32 syndrome; - u8 status; + u32 syndrome = MLX5_GET(query_issi_out, query_out, syndrome); + u8 status = MLX5_GET(query_issi_out, query_out, status); - mlx5_cmd_mbox_status(query_out, &status, &syndrome); if (!status || syndrome == MLX5_DRIVER_SYND) { mlx5_core_err(dev, "Failed to query ISSI err(%d) status(%d) synd(%d)\n", err, status, syndrome); @@ -1488,8 +1487,8 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) INIT_LIST_HEAD(&priv->pgdir_list); priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev)); - priv->dbg_root = debugfs_create_dir(dev_name(dev->device), - mlx5_debugfs_root); + priv->dbg.dbg_root = debugfs_create_dir(dev_name(dev->device), + mlx5_debugfs_root); INIT_LIST_HEAD(&priv->traps); err = mlx5_tout_init(dev); @@ -1525,7 +1524,7 @@ err_pagealloc_init: err_health_init: mlx5_tout_cleanup(dev); err_timeout_init: - debugfs_remove(dev->priv.dbg_root); + debugfs_remove(dev->priv.dbg.dbg_root); mutex_destroy(&priv->pgdir_mutex); mutex_destroy(&priv->alloc_mutex); mutex_destroy(&priv->bfregs.wc_head.lock); @@ -1543,7 +1542,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev) mlx5_pagealloc_cleanup(dev); mlx5_health_cleanup(dev); mlx5_tout_cleanup(dev); - debugfs_remove_recursive(dev->priv.dbg_root); + debugfs_remove_recursive(dev->priv.dbg.dbg_root); mutex_destroy(&priv->pgdir_mutex); mutex_destroy(&priv->alloc_mutex); mutex_destroy(&priv->bfregs.wc_head.lock); @@ -1840,10 +1839,12 @@ static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */ { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */ { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */ + { PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ { PCI_VDEVICE(MELLANOX, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */ + { PCI_VDEVICE(MELLANOX, 0xa2df) }, /* BlueField-4 integrated ConnectX-8 network controller */ { 0, } }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c index e019d68062d8..495cca58dccc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c @@ -31,7 +31,6 @@ */ #include <linux/kernel.h> -#include <linux/module.h> #include <linux/mlx5/driver.h> #include <rdma/ib_verbs.h> #include "mlx5_core.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index f099a087400e..9d735c343a3b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -31,7 +31,6 @@ */ #include <linux/kernel.h> -#include <linux/module.h> #include <linux/mlx5/driver.h> #include "mlx5_core.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index f6b5451328fc..ec76a8b1acc1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -32,7 +32,6 @@ #include <linux/highmem.h> #include <linux/kernel.h> -#include <linux/module.h> #include <linux/delay.h> #include <linux/mlx5/driver.h> #include <linux/xarray.h> @@ -327,11 +326,12 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id, } static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, - int notify_fail, bool ec_function) + int event, bool ec_function) { u32 function = get_function(func_id, ec_function); u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0}; int inlen = MLX5_ST_SZ_BYTES(manage_pages_in); + int notify_fail = event; u64 addr; int err; u32 *in; @@ -351,8 +351,10 @@ retry: if (err) { if (err == -ENOMEM) err = alloc_system_page(dev, function); - if (err) + if (err) { + dev->priv.fw_pages_alloc_failed += (npages - i); goto out_4k; + } goto retry; } @@ -365,11 +367,20 @@ retry: MLX5_SET(manage_pages_in, in, input_num_entries, npages); MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function); - err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); + err = mlx5_cmd_do(dev, in, inlen, out, sizeof(out)); + if (err == -EREMOTEIO) { + notify_fail = 0; + /* if triggered by FW and failed by FW ignore */ + if (event) { + err = 0; + goto out_dropped; + } + } if (err) { + err = mlx5_cmd_check(dev, err, in, out); mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err); - goto out_4k; + goto out_dropped; } dev->priv.fw_pages += npages; @@ -384,6 +395,8 @@ retry: kvfree(in); return 0; +out_dropped: + dev->priv.give_pages_dropped += npages; out_4k: for (i--; i >= 0; i--) free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function); @@ -455,7 +468,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev, u32 i = 0; if (!mlx5_cmd_is_down(dev)) - return mlx5_cmd_exec(dev, in, in_size, out, out_size); + return mlx5_cmd_do(dev, in, in_size, out, out_size); /* No hard feelings, we want our pages back! */ npages = MLX5_GET(manage_pages_in, in, input_num_entries); @@ -479,7 +492,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev, } static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, - int *nclaimed, bool ec_function) + int *nclaimed, bool event, bool ec_function) { u32 function = get_function(func_id, ec_function); int outlen = MLX5_ST_SZ_BYTES(manage_pages_out); @@ -507,6 +520,14 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, func_id, npages, outlen); err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen); if (err) { + npages = MLX5_GET(manage_pages_in, in, input_num_entries); + dev->priv.reclaim_pages_discard += npages; + } + /* if triggered by FW event and failed by FW then ignore */ + if (event && err == -EREMOTEIO) + err = 0; + if (err) { + err = mlx5_cmd_check(dev, err, in, out); mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err); goto out_free; } @@ -546,7 +567,7 @@ static void pages_work_handler(struct work_struct *work) release_all_pages(dev, req->func_id, req->ec_function); else if (req->npages < 0) err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL, - req->ec_function); + true, req->ec_function); else if (req->npages > 0) err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function); @@ -645,7 +666,7 @@ static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev, int err; err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(), - &nclaimed, mlx5_core_is_ecpf(dev)); + &nclaimed, false, mlx5_core_is_ecpf(dev)); if (err) { mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n", err, func_id); @@ -700,12 +721,14 @@ int mlx5_pagealloc_init(struct mlx5_core_dev *dev) return -ENOMEM; xa_init(&dev->priv.page_root_xa); + mlx5_pages_debugfs_init(dev); return 0; } void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) { + mlx5_pages_debugfs_cleanup(dev); xa_destroy(&dev->priv.page_root_xa); destroy_workqueue(dev->priv.pg_wq); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 41807ef55201..db77f1d2eeb4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -3,7 +3,6 @@ #include <linux/interrupt.h> #include <linux/notifier.h> -#include <linux/module.h> #include <linux/mlx5/driver.h> #include "mlx5_core.h" #include "mlx5_irq.h" @@ -601,7 +600,8 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev) if (mlx5_core_is_sf(dev)) return 0; - irq_table = kvzalloc(sizeof(*irq_table), GFP_KERNEL); + irq_table = kvzalloc_node(sizeof(*irq_table), GFP_KERNEL, + dev->priv.numa_node); if (!irq_table) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pd.c b/drivers/net/ethernet/mellanox/mlx5/core/pd.c index aabc53ad8bdd..ee5ffdeb9015 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pd.c @@ -31,7 +31,6 @@ */ #include <linux/kernel.h> -#include <linux/module.h> #include <linux/mlx5/driver.h> #include "mlx5_core.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 7b16a1188aab..e1bd54574ea5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -33,9 +33,10 @@ #include <linux/mlx5/port.h> #include "mlx5_core.h" -int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, - int size_in, void *data_out, int size_out, - u16 reg_id, int arg, int write) +/* calling with verbose false will not print error to log */ +int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, + void *data_out, int size_out, u16 reg_id, int arg, + int write, bool verbose) { int outlen = MLX5_ST_SZ_BYTES(access_register_out) + size_out; int inlen = MLX5_ST_SZ_BYTES(access_register_in) + size_in; @@ -57,7 +58,9 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, MLX5_SET(access_register_in, in, argument, arg); MLX5_SET(access_register_in, in, register_id, reg_id); - err = mlx5_cmd_exec(dev, in, inlen, out, outlen); + err = mlx5_cmd_do(dev, in, inlen, out, outlen); + if (verbose) + err = mlx5_cmd_check(dev, err, in, out); if (err) goto out; @@ -69,6 +72,15 @@ out: kvfree(in); return err; } +EXPORT_SYMBOL_GPL(mlx5_access_reg); + +int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, + int size_in, void *data_out, int size_out, + u16 reg_id, int arg, int write) +{ + return mlx5_access_reg(dev, data_in, size_in, data_out, size_out, + reg_id, arg, write, true); +} EXPORT_SYMBOL_GPL(mlx5_core_access_reg); int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group, @@ -263,7 +275,6 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num) { u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pmlp_reg)]; - int module_mapping; int err; MLX5_SET(pmlp_reg, in, local_port, 1); @@ -272,8 +283,9 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num) if (err) return err; - module_mapping = MLX5_GET(pmlp_reg, out, lane0_module_mapping); - *module_num = module_mapping & MLX5_EEPROM_IDENTIFIER_BYTE_MASK; + *module_num = MLX5_GET(lane_2_module_mapping, + MLX5_ADDR_OF(pmlp_reg, out, lane0_module_mapping), + module); return 0; } @@ -353,6 +365,12 @@ static void mlx5_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset *offset -= MLX5_EEPROM_PAGE_LENGTH; } +static int mlx5_mcia_max_bytes(struct mlx5_core_dev *dev) +{ + /* mcia supports either 12 dwords or 32 dwords */ + return (MLX5_CAP_MCAM_FEATURE(dev, mcia_32dwords) ? 32 : 12) * sizeof(u32); +} + static int mlx5_query_mcia(struct mlx5_core_dev *dev, struct mlx5_module_eeprom_query_params *params, u8 *data) { @@ -362,7 +380,7 @@ static int mlx5_query_mcia(struct mlx5_core_dev *dev, void *ptr; u16 size; - size = min_t(int, params->size, MLX5_EEPROM_MAX_BYTES); + size = min_t(int, params->size, mlx5_mcia_max_bytes(dev)); MLX5_SET(mcia_reg, in, l, 0); MLX5_SET(mcia_reg, in, size, size); @@ -433,35 +451,12 @@ int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev, struct mlx5_module_eeprom_query_params *params, u8 *data) { - u8 module_id; int err; err = mlx5_query_module_num(dev, ¶ms->module_number); if (err) return err; - err = mlx5_query_module_id(dev, params->module_number, &module_id); - if (err) - return err; - - switch (module_id) { - case MLX5_MODULE_ID_SFP: - if (params->page > 0) - return -EINVAL; - break; - case MLX5_MODULE_ID_QSFP: - case MLX5_MODULE_ID_QSFP28: - case MLX5_MODULE_ID_QSFP_PLUS: - if (params->page > 3) - return -EINVAL; - break; - case MLX5_MODULE_ID_DSFP: - break; - default: - mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id); - return -EINVAL; - } - if (params->i2c_address != MLX5_I2C_ADDR_HIGH && params->i2c_address != MLX5_I2C_ADDR_LOW) { mlx5_core_err(dev, "I2C address not recognized: 0x%x\n", params->i2c_address); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c index 7161220afe30..9f8b4005f4bd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c @@ -31,7 +31,6 @@ */ #include <linux/kernel.h> -#include <linux/module.h> #include <linux/mlx5/driver.h> #include "mlx5_core.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index c61a5e83c78c..743422acc3d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -669,15 +669,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, case DR_ACTION_TYP_VPORT: attr.hit_gvmi = action->vport->caps->vhca_gvmi; dest_action = action; - if (rx_rule) { - if (action->vport->caps->num == MLX5_VPORT_UPLINK) { - mlx5dr_dbg(dmn, "Device doesn't support Loopback on WIRE vport\n"); - return -EOPNOTSUPP; - } - attr.final_icm_addr = action->vport->caps->icm_address_rx; - } else { - attr.final_icm_addr = action->vport->caps->icm_address_tx; - } + attr.final_icm_addr = rx_rule ? + action->vport->caps->icm_address_rx : + action->vport->caps->icm_address_tx; break; case DR_ACTION_TYP_POP_VLAN: if (!rx_rule && !(dmn->ste_ctx->actions_caps & diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c index 2784cd59fefe..d232f1ea34a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c @@ -3,7 +3,6 @@ #include <linux/debugfs.h> #include <linux/kernel.h> -#include <linux/module.h> #include <linux/seq_file.h> #include "dr_types.h" @@ -630,7 +629,7 @@ void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn) } dmn->dump_info.steering_debugfs = - debugfs_create_dir("steering", dev->priv.dbg_root); + debugfs_create_dir("steering", mlx5_debugfs_get_dev_root(dev)); dmn->dump_info.fdb_debugfs = debugfs_create_dir("fdb", dmn->dump_info.steering_debugfs); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c index 5fa7f9d6d8b9..fc6ae49b5ecc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c @@ -8,7 +8,7 @@ #define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \ ((dmn)->info.caps.dmn_type##_sw_owner || \ ((dmn)->info.caps.dmn_type##_sw_owner_v2 && \ - (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX)) + (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_7)) static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c index 7f6fd9c5e371..e289cfdbce07 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c @@ -4,7 +4,6 @@ #include "dr_types.h" #define DR_ICM_MODIFY_HDR_ALIGN_BASE 64 -#define DR_ICM_SYNC_THRESHOLD_POOL (64 * 1024 * 1024) struct mlx5dr_icm_pool { enum mlx5dr_icm_type icm_type; @@ -136,37 +135,35 @@ static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr) kvfree(icm_mr); } -static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk) +static int dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem *buddy) { - chunk->ste_arr = kvzalloc(chunk->num_of_entries * - sizeof(chunk->ste_arr[0]), GFP_KERNEL); - if (!chunk->ste_arr) - return -ENOMEM; - - chunk->hw_ste_arr = kvzalloc(chunk->num_of_entries * - DR_STE_SIZE_REDUCED, GFP_KERNEL); - if (!chunk->hw_ste_arr) - goto out_free_ste_arr; - - chunk->miss_list = kvmalloc(chunk->num_of_entries * - sizeof(chunk->miss_list[0]), GFP_KERNEL); - if (!chunk->miss_list) - goto out_free_hw_ste_arr; + /* We support only one type of STE size, both for ConnectX-5 and later + * devices. Once the support for match STE which has a larger tag is + * added (32B instead of 16B), the STE size for devices later than + * ConnectX-5 needs to account for that. + */ + return DR_STE_SIZE_REDUCED; +} - return 0; +static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset) +{ + struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem; + int index = offset / DR_STE_SIZE; -out_free_hw_ste_arr: - kvfree(chunk->hw_ste_arr); -out_free_ste_arr: - kvfree(chunk->ste_arr); - return -ENOMEM; + chunk->ste_arr = &buddy->ste_arr[index]; + chunk->miss_list = &buddy->miss_list[index]; + chunk->hw_ste_arr = buddy->hw_ste_arr + + index * dr_icm_buddy_get_ste_size(buddy); } static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk) { - kvfree(chunk->miss_list); - kvfree(chunk->hw_ste_arr); - kvfree(chunk->ste_arr); + struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem; + + memset(chunk->hw_ste_arr, 0, + chunk->num_of_entries * dr_icm_buddy_get_ste_size(buddy)); + memset(chunk->ste_arr, 0, + chunk->num_of_entries * sizeof(chunk->ste_arr[0])); } static enum mlx5dr_icm_type @@ -189,6 +186,44 @@ static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk, kvfree(chunk); } +static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy) +{ + int num_of_entries = + mlx5dr_icm_pool_chunk_size_to_entries(buddy->pool->max_log_chunk_sz); + + buddy->ste_arr = kvcalloc(num_of_entries, + sizeof(struct mlx5dr_ste), GFP_KERNEL); + if (!buddy->ste_arr) + return -ENOMEM; + + /* Preallocate full STE size on non-ConnectX-5 devices since + * we need to support both full and reduced with the same cache. + */ + buddy->hw_ste_arr = kvcalloc(num_of_entries, + dr_icm_buddy_get_ste_size(buddy), GFP_KERNEL); + if (!buddy->hw_ste_arr) + goto free_ste_arr; + + buddy->miss_list = kvmalloc(num_of_entries * sizeof(struct list_head), GFP_KERNEL); + if (!buddy->miss_list) + goto free_hw_ste_arr; + + return 0; + +free_hw_ste_arr: + kvfree(buddy->hw_ste_arr); +free_ste_arr: + kvfree(buddy->ste_arr); + return -ENOMEM; +} + +static void dr_icm_buddy_cleanup_ste_cache(struct mlx5dr_icm_buddy_mem *buddy) +{ + kvfree(buddy->ste_arr); + kvfree(buddy->hw_ste_arr); + kvfree(buddy->miss_list); +} + static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool) { struct mlx5dr_icm_buddy_mem *buddy; @@ -208,11 +243,19 @@ static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool) buddy->icm_mr = icm_mr; buddy->pool = pool; + if (pool->icm_type == DR_ICM_TYPE_STE) { + /* Reduce allocations by preallocating and reusing the STE structures */ + if (dr_icm_buddy_init_ste_cache(buddy)) + goto err_cleanup_buddy; + } + /* add it to the -start- of the list in order to search in it first */ list_add(&buddy->list_node, &pool->buddy_mem_list); return 0; +err_cleanup_buddy: + mlx5dr_buddy_cleanup(buddy); err_free_buddy: kvfree(buddy); free_mr: @@ -234,6 +277,9 @@ static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy) mlx5dr_buddy_cleanup(buddy); + if (buddy->pool->icm_type == DR_ICM_TYPE_STE) + dr_icm_buddy_cleanup_ste_cache(buddy); + kvfree(buddy); } @@ -261,34 +307,30 @@ dr_icm_chunk_create(struct mlx5dr_icm_pool *pool, chunk->byte_size = mlx5dr_icm_pool_chunk_size_to_byte(chunk_size, pool->icm_type); chunk->seg = seg; + chunk->buddy_mem = buddy_mem_pool; - if (pool->icm_type == DR_ICM_TYPE_STE && dr_icm_chunk_ste_init(chunk)) { - mlx5dr_err(pool->dmn, - "Failed to init ste arrays (order: %d)\n", - chunk_size); - goto out_free_chunk; - } + if (pool->icm_type == DR_ICM_TYPE_STE) + dr_icm_chunk_ste_init(chunk, offset); buddy_mem_pool->used_memory += chunk->byte_size; - chunk->buddy_mem = buddy_mem_pool; INIT_LIST_HEAD(&chunk->chunk_list); /* chunk now is part of the used_list */ list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list); return chunk; - -out_free_chunk: - kvfree(chunk); - return NULL; } static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool) { - if (pool->hot_memory_size > DR_ICM_SYNC_THRESHOLD_POOL) - return true; + int allow_hot_size; + + /* sync when hot memory reaches half of the pool size */ + allow_hot_size = + mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz, + pool->icm_type) / 2; - return false; + return pool->hot_memory_size > allow_hot_size; } static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c index e87cf498c77b..a4b5b415df90 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c @@ -13,18 +13,6 @@ static bool dr_mask_is_dmac_set(struct mlx5dr_match_spec *spec) return (spec->dmac_47_16 || spec->dmac_15_0); } -static bool dr_mask_is_src_addr_set(struct mlx5dr_match_spec *spec) -{ - return (spec->src_ip_127_96 || spec->src_ip_95_64 || - spec->src_ip_63_32 || spec->src_ip_31_0); -} - -static bool dr_mask_is_dst_addr_set(struct mlx5dr_match_spec *spec) -{ - return (spec->dst_ip_127_96 || spec->dst_ip_95_64 || - spec->dst_ip_63_32 || spec->dst_ip_31_0); -} - static bool dr_mask_is_l3_base_set(struct mlx5dr_match_spec *spec) { return (spec->ip_protocol || spec->frag || spec->tcp_flags || @@ -59,6 +47,11 @@ static bool dr_mask_is_ttl_set(struct mlx5dr_match_spec *spec) return spec->ttl_hoplimit; } +static bool dr_mask_is_ipv4_ihl_set(struct mlx5dr_match_spec *spec) +{ + return spec->ipv4_ihl; +} + #define DR_MASK_IS_L2_DST(_spec, _misc, _inner_outer) (_spec.first_vid || \ (_spec).first_cfi || (_spec).first_prio || (_spec).cvlan_tag || \ (_spec).svlan_tag || (_spec).dmac_47_16 || (_spec).dmac_15_0 || \ @@ -115,7 +108,7 @@ dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3) static bool dr_matcher_supp_vxlan_gpe(struct mlx5dr_cmd_caps *caps) { - return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) || + return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) || (caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED); } @@ -156,7 +149,7 @@ static bool dr_mask_is_tnl_geneve_tlv_opt_exist_set(struct mlx5dr_match_misc *mi static bool dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps) { - return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) || + return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) || (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_ENABLED); } @@ -273,13 +266,13 @@ static bool dr_mask_is_tnl_gtpu_any(struct mlx5dr_match_param *mask, static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps) { - return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) || + return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) || (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED); } static int dr_matcher_supp_icmp_v6(struct mlx5dr_cmd_caps *caps) { - return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) || + return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) || (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED); } @@ -503,11 +496,11 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, &mask, inner, rx); if (outer_ipv == DR_RULE_IPV6) { - if (dr_mask_is_dst_addr_set(&mask.outer)) + if (DR_MASK_IS_DST_IP_SET(&mask.outer)) mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++], &mask, inner, rx); - if (dr_mask_is_src_addr_set(&mask.outer)) + if (DR_MASK_IS_SRC_IP_SET(&mask.outer)) mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++], &mask, inner, rx); @@ -519,7 +512,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++], &mask, inner, rx); - if (dr_mask_is_ttl_set(&mask.outer)) + if (dr_mask_is_ttl_set(&mask.outer) || + dr_mask_is_ipv4_ihl_set(&mask.outer)) mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++], &mask, inner, rx); } @@ -610,11 +604,11 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, &mask, inner, rx); if (inner_ipv == DR_RULE_IPV6) { - if (dr_mask_is_dst_addr_set(&mask.inner)) + if (DR_MASK_IS_DST_IP_SET(&mask.inner)) mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++], &mask, inner, rx); - if (dr_mask_is_src_addr_set(&mask.inner)) + if (DR_MASK_IS_SRC_IP_SET(&mask.inner)) mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++], &mask, inner, rx); @@ -626,7 +620,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++], &mask, inner, rx); - if (dr_mask_is_ttl_set(&mask.inner)) + if (dr_mask_is_ttl_set(&mask.inner) || + dr_mask_is_ipv4_ihl_set(&mask.inner)) mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++], &mask, inner, rx); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index 7e61742e58a0..518e949847a3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -523,8 +523,8 @@ void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_ste_actions_attr *attr, u32 *added_stes) { - ste_ctx->set_actions_tx(dmn, action_type_set, hw_ste_arr, - attr, added_stes); + ste_ctx->set_actions_tx(dmn, action_type_set, ste_ctx->actions_caps, + hw_ste_arr, attr, added_stes); } void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, @@ -534,8 +534,8 @@ void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_ste_actions_attr *attr, u32 *added_stes) { - ste_ctx->set_actions_rx(dmn, action_type_set, hw_ste_arr, - attr, added_stes); + ste_ctx->set_actions_rx(dmn, action_type_set, ste_ctx->actions_caps, + hw_ste_arr, attr, added_stes); } const struct mlx5dr_ste_action_modify_field * @@ -602,12 +602,34 @@ int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx, used_hw_action_num); } +static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn, + struct mlx5dr_match_spec *spec) +{ + if (spec->ip_version) { + if (spec->ip_version != 0xf) { + mlx5dr_err(dmn, + "Partial ip_version mask with src/dst IP is not supported\n"); + return -EINVAL; + } + } else if (spec->ethertype != 0xffff && + (DR_MASK_IS_SRC_IP_SET(spec) || DR_MASK_IS_DST_IP_SET(spec))) { + mlx5dr_err(dmn, + "Partial/no ethertype mask with src/dst IP is not supported\n"); + return -EINVAL; + } + + return 0; +} + int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn, u8 match_criteria, struct mlx5dr_match_param *mask, struct mlx5dr_match_param *value) { - if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) { + if (value) + return 0; + + if (match_criteria & DR_MATCHER_CRITERIA_MISC) { if (mask->misc.source_port && mask->misc.source_port != 0xffff) { mlx5dr_err(dmn, "Partial mask source_port is not supported\n"); @@ -621,6 +643,14 @@ int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn, } } + if ((match_criteria & DR_MATCHER_CRITERIA_OUTER) && + dr_ste_build_pre_check_spec(dmn, &mask->outer)) + return -EINVAL; + + if ((match_criteria & DR_MATCHER_CRITERIA_INNER) && + dr_ste_build_pre_check_spec(dmn, &mask->inner)) + return -EINVAL; + return 0; } @@ -763,6 +793,7 @@ static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec, bo spec->tcp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_sport, clr); spec->tcp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_dport, clr); + spec->ipv4_ihl = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ipv4_ihl, clr); spec->ttl_hoplimit = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ttl_hoplimit, clr); spec->udp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_sport, clr); @@ -1330,15 +1361,14 @@ void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx, ste_ctx->build_tnl_header_0_1_init(sb, mask); } -static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = { - [MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0, - [MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1, -}; - struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version) { - if (version > MLX5_STEERING_FORMAT_CONNECTX_6DX) - return NULL; + if (version == MLX5_STEERING_FORMAT_CONNECTX_5) + return mlx5dr_ste_get_ctx_v0(); + else if (version == MLX5_STEERING_FORMAT_CONNECTX_6DX) + return mlx5dr_ste_get_ctx_v1(); + else if (version == MLX5_STEERING_FORMAT_CONNECTX_7) + return mlx5dr_ste_get_ctx_v2(); - return mlx5dr_ste_ctx_arr[version]; + return NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h index ca8fa32b8680..17513baff9b0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h @@ -161,11 +161,13 @@ struct mlx5dr_ste_ctx { u32 actions_caps; void (*set_actions_rx)(struct mlx5dr_domain *dmn, u8 *action_type_set, + u32 actions_caps, u8 *hw_ste_arr, struct mlx5dr_ste_actions_attr *attr, u32 *added_stes); void (*set_actions_tx)(struct mlx5dr_domain *dmn, u8 *action_type_set, + u32 actions_caps, u8 *hw_ste_arr, struct mlx5dr_ste_actions_attr *attr, u32 *added_stes); @@ -197,7 +199,8 @@ struct mlx5dr_ste_ctx { void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size); }; -extern struct mlx5dr_ste_ctx ste_ctx_v0; -extern struct mlx5dr_ste_ctx ste_ctx_v1; +struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void); +struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void); +struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void); #endif /* _DR_STE_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c index 2d62950f7a29..5a322335f204 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c @@ -408,6 +408,7 @@ static void dr_ste_v0_arr_init_next(u8 **last_ste, static void dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn, u8 *action_type_set, + u32 actions_caps, u8 *last_ste, struct mlx5dr_ste_actions_attr *attr, u32 *added_stes) @@ -477,6 +478,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn, static void dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn, u8 *action_type_set, + u32 actions_caps, u8 *last_ste, struct mlx5dr_ste_actions_attr *attr, u32 *added_stes) @@ -1152,6 +1154,7 @@ dr_ste_v0_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value, struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit); + DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, ihl, spec, ipv4_ihl); return 0; } @@ -1897,7 +1900,7 @@ static void dr_ste_v0_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb, sb->ste_build_tag_func = &dr_ste_v0_build_tnl_header_0_1_tag; } -struct mlx5dr_ste_ctx ste_ctx_v0 = { +static struct mlx5dr_ste_ctx ste_ctx_v0 = { /* Builders */ .build_eth_l2_src_dst_init = &dr_ste_v0_build_eth_l2_src_dst_init, .build_eth_l3_ipv6_src_init = &dr_ste_v0_build_eth_l3_ipv6_src_init, @@ -1950,3 +1953,8 @@ struct mlx5dr_ste_ctx ste_ctx_v0 = { .set_action_copy = &dr_ste_v0_set_action_copy, .set_action_decap_l3_list = &dr_ste_v0_set_action_decap_l3_list, }; + +struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void) +{ + return &ste_ctx_v0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c index 6ca06800f1d9..fcb962c6db2e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -3,7 +3,7 @@ #include <linux/types.h> #include "mlx5_ifc_dr_ste_v1.h" -#include "dr_ste.h" +#include "dr_ste_v1.h" #define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \ ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \ @@ -121,12 +121,12 @@ enum { DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70, DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b, DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c, - DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2 = 0x8c, - DR_STE_V1_ACTION_MDFY_FLD_REGISTER_3 = 0x8d, - DR_STE_V1_ACTION_MDFY_FLD_REGISTER_4 = 0x8e, - DR_STE_V1_ACTION_MDFY_FLD_REGISTER_5 = 0x8f, - DR_STE_V1_ACTION_MDFY_FLD_REGISTER_6 = 0x90, - DR_STE_V1_ACTION_MDFY_FLD_REGISTER_7 = 0x91, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91, }; static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = { @@ -223,22 +223,22 @@ static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field .hw_field = DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31, }, [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = { - .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_6, .start = 0, .end = 31, + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31, }, [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = { - .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_7, .start = 0, .end = 31, + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31, }, [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = { - .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_4, .start = 0, .end = 31, + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31, }, [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = { - .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_5, .start = 0, .end = 31, + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31, }, [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = { - .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2, .start = 0, .end = 31, + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31, }, [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = { - .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_3, .start = 0, .end = 31, + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31, }, [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = { .hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31, @@ -262,7 +262,7 @@ static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type) MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, entry_type); } -static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr) +void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr) { u64 index = miss_addr >> 6; @@ -270,7 +270,7 @@ static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr) MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6, index); } -static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p) +u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p) { u64 index = ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) | @@ -279,12 +279,12 @@ static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p) return index << 6; } -static void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask) +void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask) { MLX5_SET(ste_match_bwc_v1, hw_ste_p, byte_mask, byte_mask); } -static u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p) +u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p) { return MLX5_GET(ste_match_bwc_v1, hw_ste_p, byte_mask); } @@ -295,13 +295,13 @@ static void dr_ste_v1_set_lu_type(u8 *hw_ste_p, u16 lu_type) MLX5_SET(ste_match_bwc_v1, hw_ste_p, match_definer_ctx_idx, lu_type & 0xFF); } -static void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type) +void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type) { MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_entry_format, lu_type >> 8); MLX5_SET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx, lu_type & 0xFF); } -static u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p) +u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p) { u8 mode = MLX5_GET(ste_match_bwc_v1, hw_ste_p, next_entry_format); u8 index = MLX5_GET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx); @@ -314,7 +314,7 @@ static void dr_ste_v1_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi) MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi); } -static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size) +void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size) { u64 index = (icm_addr >> 5) | ht_size; @@ -322,8 +322,7 @@ static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size) MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_31_5_size, index); } -static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, - bool is_rx, u16 gvmi) +void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi) { dr_ste_v1_set_lu_type(hw_ste_p, lu_type); dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE); @@ -333,8 +332,7 @@ static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_63_48, gvmi); } -static void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, - u32 ste_size) +void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size) { u8 *tag = hw_ste_p + DR_STE_SIZE_CTRL; u8 *mask = tag + DR_STE_SIZE_TAG; @@ -511,11 +509,12 @@ static void dr_ste_v1_arr_init_next_match(u8 **last_ste, memset(action, 0, MLX5_FLD_SZ_BYTES(ste_mask_and_match_v1, action)); } -static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, - u8 *action_type_set, - u8 *last_ste, - struct mlx5dr_ste_actions_attr *attr, - u32 *added_stes) +void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, + u8 *action_type_set, + u32 actions_caps, + u8 *last_ste, + struct mlx5dr_ste_actions_attr *attr, + u32 *added_stes) { u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action); u8 action_sz = DR_STE_ACTION_DOUBLE_SZ; @@ -533,7 +532,10 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count); action_sz -= DR_STE_ACTION_SINGLE_SZ; action += DR_STE_ACTION_SINGLE_SZ; - allow_modify_hdr = false; + + /* Check if vlan_pop and modify_hdr on same STE is supported */ + if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY)) + allow_modify_hdr = false; } if (action_type_set[DR_ACTION_TYP_CTR]) @@ -631,11 +633,12 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1); } -static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, - u8 *action_type_set, - u8 *last_ste, - struct mlx5dr_ste_actions_attr *attr, - u32 *added_stes) +void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, + u8 *action_type_set, + u32 actions_caps, + u8 *last_ste, + struct mlx5dr_ste_actions_attr *attr, + u32 *added_stes) { u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action); u8 action_sz = DR_STE_ACTION_DOUBLE_SZ; @@ -677,13 +680,16 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); action_sz = DR_STE_ACTION_TRIPLE_SZ; - allow_modify_hdr = false; - allow_ctr = false; } dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count); action_sz -= DR_STE_ACTION_SINGLE_SZ; action += DR_STE_ACTION_SINGLE_SZ; + allow_ctr = false; + + /* Check if vlan_pop and modify_hdr on same STE is supported */ + if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY)) + allow_modify_hdr = false; } if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) { @@ -731,9 +737,9 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); action_sz = DR_STE_ACTION_TRIPLE_SZ; allow_modify_hdr = true; - allow_ctr = false; } dr_ste_v1_set_counter_id(last_ste, attr->ctr_id); + allow_ctr = false; } if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) { @@ -800,11 +806,11 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1); } -static void dr_ste_v1_set_action_set(u8 *d_action, - u8 hw_field, - u8 shifter, - u8 length, - u32 data) +void dr_ste_v1_set_action_set(u8 *d_action, + u8 hw_field, + u8 shifter, + u8 length, + u32 data) { shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET; MLX5_SET(ste_double_action_set_v1, d_action, action_id, DR_STE_V1_ACTION_ID_SET); @@ -814,11 +820,11 @@ static void dr_ste_v1_set_action_set(u8 *d_action, MLX5_SET(ste_double_action_set_v1, d_action, inline_data, data); } -static void dr_ste_v1_set_action_add(u8 *d_action, - u8 hw_field, - u8 shifter, - u8 length, - u32 data) +void dr_ste_v1_set_action_add(u8 *d_action, + u8 hw_field, + u8 shifter, + u8 length, + u32 data) { shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET; MLX5_SET(ste_double_action_add_v1, d_action, action_id, DR_STE_V1_ACTION_ID_ADD); @@ -828,12 +834,12 @@ static void dr_ste_v1_set_action_add(u8 *d_action, MLX5_SET(ste_double_action_add_v1, d_action, add_value, data); } -static void dr_ste_v1_set_action_copy(u8 *d_action, - u8 dst_hw_field, - u8 dst_shifter, - u8 dst_len, - u8 src_hw_field, - u8 src_shifter) +void dr_ste_v1_set_action_copy(u8 *d_action, + u8 dst_hw_field, + u8 dst_shifter, + u8 dst_len, + u8 src_hw_field, + u8 src_shifter) { dst_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET; src_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET; @@ -848,11 +854,11 @@ static void dr_ste_v1_set_action_copy(u8 *d_action, #define DR_STE_DECAP_L3_ACTION_NUM 8 #define DR_STE_L2_HDR_MAX_SZ 20 -static int dr_ste_v1_set_action_decap_l3_list(void *data, - u32 data_sz, - u8 *hw_action, - u32 hw_action_sz, - u16 *used_hw_action_num) +int dr_ste_v1_set_action_decap_l3_list(void *data, + u32 data_sz, + u8 *hw_action, + u32 hw_action_sz, + u16 *used_hw_action_num) { u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {}; void *data_ptr = padded_data; @@ -977,8 +983,8 @@ static int dr_ste_v1_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask); @@ -1001,8 +1007,8 @@ static int dr_ste_v1_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask); @@ -1025,8 +1031,8 @@ static int dr_ste_v1_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask); @@ -1060,8 +1066,8 @@ static int dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *va return 0; } -static void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask); @@ -1079,8 +1085,8 @@ static void dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_vlan_id, mask, first_vid); DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_cfi, mask, first_cfi); DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_priority, mask, first_prio); - DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag); // ? - DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype); // ? + DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag); + DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype); DR_STE_SET_ONES(eth_l2_src_v1, bit_mask, l3_type, mask, ip_version); if (mask->svlan_tag || mask->cvlan_tag) { @@ -1201,8 +1207,8 @@ static int dr_ste_v1_build_eth_l2_src_tag(struct mlx5dr_match_param *value, return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag); } -static void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask); @@ -1234,8 +1240,8 @@ static int dr_ste_v1_build_eth_l2_dst_tag(struct mlx5dr_match_param *value, return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag); } -static void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l2_dst_bit_mask(mask, sb->inner, sb->bit_mask); @@ -1314,8 +1320,8 @@ static int dr_ste_v1_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask); @@ -1331,12 +1337,13 @@ static int dr_ste_v1_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, time_to_live, spec, ttl_hoplimit); + DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, ihl, spec, ipv4_ihl); return 0; } -static void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask); @@ -1375,8 +1382,8 @@ static int dr_ste_v1_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask); @@ -1399,8 +1406,8 @@ static int dr_ste_v1_build_mpls_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_mpls_tag(mask, sb, sb->bit_mask); @@ -1426,8 +1433,8 @@ static int dr_ste_v1_build_tnl_gre_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_tnl_gre_tag(mask, sb, sb->bit_mask); @@ -1471,8 +1478,8 @@ static int dr_ste_v1_build_tnl_mpls_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_tnl_mpls_tag(mask, sb, sb->bit_mask); @@ -1506,8 +1513,8 @@ static int dr_ste_v1_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *valu return 0; } -static void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask); @@ -1547,8 +1554,8 @@ static int dr_ste_v1_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *valu return 0; } -static void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask); @@ -1594,8 +1601,8 @@ static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask); @@ -1616,8 +1623,8 @@ static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_general_purpose_tag(mask, sb, sb->bit_mask); @@ -1643,8 +1650,8 @@ static int dr_ste_v1_build_eth_l4_misc_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l4_misc_tag(mask, sb, sb->bit_mask); @@ -1673,9 +1680,8 @@ dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value, return 0; } -static void -dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask); @@ -1703,9 +1709,8 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value, return 0; } -static void -dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask); @@ -1726,8 +1731,8 @@ static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER; dr_ste_v1_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask); @@ -1749,8 +1754,8 @@ static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_register_0_tag(mask, sb, sb->bit_mask); @@ -1773,8 +1778,8 @@ static int dr_ste_v1_build_register_1_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_register_1_tag(mask, sb, sb->bit_mask); @@ -1837,8 +1842,8 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask); @@ -1892,8 +1897,8 @@ static int dr_ste_v1_build_felx_parser_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0; dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask); @@ -1901,8 +1906,8 @@ static void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb, sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag; } -static void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1; dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask); @@ -1926,7 +1931,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *va return 0; } -static void +void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask) { @@ -1959,7 +1964,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_par return 0; } -static void +void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask) { @@ -1982,8 +1987,8 @@ static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *v return 0; } -static void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask); @@ -2008,7 +2013,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value, return 0; } -static void +void dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask) { @@ -2035,7 +2040,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value, return 0; } -static void +void dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask) { @@ -2046,7 +2051,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb, sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag; } -struct mlx5dr_ste_ctx ste_ctx_v1 = { +static struct mlx5dr_ste_ctx ste_ctx_v1 = { /* Builders */ .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init, .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init, @@ -2091,7 +2096,8 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = { /* Actions */ .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP | DR_STE_CTX_ACTION_CAP_RX_PUSH | - DR_STE_CTX_ACTION_CAP_RX_ENCAP, + DR_STE_CTX_ACTION_CAP_RX_ENCAP | + DR_STE_CTX_ACTION_CAP_POP_MDFY, .set_actions_rx = &dr_ste_v1_set_actions_rx, .set_actions_tx = &dr_ste_v1_set_actions_tx, .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v1_action_modify_field_arr), @@ -2103,3 +2109,8 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = { /* Send */ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend, }; + +struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void) +{ + return &ste_ctx_v1; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h new file mode 100644 index 000000000000..8a1d49790c6e --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef _DR_STE_V1_ +#define _DR_STE_V1_ + +#include "dr_types.h" +#include "dr_ste.h" + +void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr); +u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p); +void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask); +u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p); +void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type); +u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p); +void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size); +void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi); +void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size); +void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, u8 *action_type_set, + u32 actions_caps, u8 *last_ste, + struct mlx5dr_ste_actions_attr *attr, u32 *added_stes); +void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, u8 *action_type_set, + u32 actions_caps, u8 *last_ste, + struct mlx5dr_ste_actions_attr *attr, u32 *added_stes); +void dr_ste_v1_set_action_set(u8 *d_action, u8 hw_field, u8 shifter, + u8 length, u32 data); +void dr_ste_v1_set_action_add(u8 *d_action, u8 hw_field, u8 shifter, + u8 length, u32 data); +void dr_ste_v1_set_action_copy(u8 *d_action, u8 dst_hw_field, u8 dst_shifter, + u8 dst_len, u8 src_hw_field, u8 src_shifter); +int dr_ste_v1_set_action_decap_l3_list(void *data, u32 data_sz, u8 *hw_action, + u32 hw_action_sz, u16 *used_hw_action_num); +void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); + +#endif /* _DR_STE_V1_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c new file mode 100644 index 000000000000..c60fddd125d2 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#include "dr_ste_v1.h" + +enum { + DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0 = 0x00, + DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1 = 0x01, + DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2 = 0x02, + DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08, + DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09, + DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e, + DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0 = 0x18, + DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1 = 0x19, + DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40, + DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f, + DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e, + DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f, + DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f, + DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70, + DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b, + DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0 = 0x90, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1 = 0x91, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0 = 0x92, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1 = 0x93, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0 = 0x94, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1 = 0x95, +}; + +static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field_arr[] = { + [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15, + }, + [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, + }, + [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, + }, + [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP, + }, + [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15, + }, + [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15, + }, +}; + +static struct mlx5dr_ste_ctx ste_ctx_v2 = { + /* Builders */ + .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init, + .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init, + .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init, + .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init, + .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init, + .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init, + .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init, + .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init, + .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init, + .build_mpls_init = &dr_ste_v1_build_mpls_init, + .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init, + .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init, + .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init, + .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init, + .build_icmp_init = &dr_ste_v1_build_icmp_init, + .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init, + .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init, + .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init, + .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init, + .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init, + .build_tnl_geneve_tlv_opt_exist_init = + &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init, + .build_register_0_init = &dr_ste_v1_build_register_0_init, + .build_register_1_init = &dr_ste_v1_build_register_1_init, + .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init, + .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init, + .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init, + .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init, + .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init, + .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init, + .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init, + + /* Getters and Setters */ + .ste_init = &dr_ste_v1_init, + .set_next_lu_type = &dr_ste_v1_set_next_lu_type, + .get_next_lu_type = &dr_ste_v1_get_next_lu_type, + .set_miss_addr = &dr_ste_v1_set_miss_addr, + .get_miss_addr = &dr_ste_v1_get_miss_addr, + .set_hit_addr = &dr_ste_v1_set_hit_addr, + .set_byte_mask = &dr_ste_v1_set_byte_mask, + .get_byte_mask = &dr_ste_v1_get_byte_mask, + + /* Actions */ + .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP | + DR_STE_CTX_ACTION_CAP_RX_PUSH | + DR_STE_CTX_ACTION_CAP_RX_ENCAP, + .set_actions_rx = &dr_ste_v1_set_actions_rx, + .set_actions_tx = &dr_ste_v1_set_actions_tx, + .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v2_action_modify_field_arr), + .modify_field_arr = dr_ste_v2_action_modify_field_arr, + .set_action_set = &dr_ste_v1_set_action_set, + .set_action_add = &dr_ste_v1_set_action_add, + .set_action_copy = &dr_ste_v1_set_action_copy, + .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list, + + /* Send */ + .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend, +}; + +struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void) +{ + return &ste_ctx_v2; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c index 8ca110643cc0..f5f2d356e75f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c @@ -305,3 +305,8 @@ u32 mlx5dr_table_get_id(struct mlx5dr_table *tbl) { return tbl->table_id; } + +struct mlx5dr_table *mlx5dr_table_get_from_fs_ft(struct mlx5_flow_table *ft) +{ + return ft->fs_dr_table.dr_table; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 1b3d484b99be..88092fabf55b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -91,6 +91,7 @@ enum mlx5dr_ste_ctx_action_cap { DR_STE_CTX_ACTION_CAP_TX_POP = 1 << 0, DR_STE_CTX_ACTION_CAP_RX_PUSH = 1 << 1, DR_STE_CTX_ACTION_CAP_RX_ENCAP = 1 << 2, + DR_STE_CTX_ACTION_CAP_POP_MDFY = 1 << 3, }; enum { @@ -555,7 +556,9 @@ struct mlx5dr_match_spec { */ u32 tcp_dport:16; - u32 reserved_auto1:24; + u32 reserved_auto1:16; + u32 ipv4_ihl:4; + u32 reserved_auto2:4; u32 ttl_hoplimit:8; /* UDP source port.;tcp and udp sport/dport are mutually exclusive */ @@ -798,6 +801,16 @@ struct mlx5dr_match_param { (_misc3)->icmpv4_code || \ (_misc3)->icmpv4_header_data) +#define DR_MASK_IS_SRC_IP_SET(_spec) ((_spec)->src_ip_127_96 || \ + (_spec)->src_ip_95_64 || \ + (_spec)->src_ip_63_32 || \ + (_spec)->src_ip_31_0) + +#define DR_MASK_IS_DST_IP_SET(_spec) ((_spec)->dst_ip_127_96 || \ + (_spec)->dst_ip_95_64 || \ + (_spec)->dst_ip_63_32 || \ + (_spec)->dst_ip_31_0) + struct mlx5dr_esw_caps { u64 drop_icm_address_rx; u64 drop_icm_address_tx; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 033757bfdf64..045b0cf90063 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -233,7 +233,11 @@ static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst) dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID; } -#define MLX5_FLOW_CONTEXT_ACTION_MAX 32 +/* We want to support a rule with 32 destinations, which means we need to + * account for 32 destinations plus usually a counter plus one more action + * for a multi-destination flow table. + */ +#define MLX5_FLOW_CONTEXT_ACTION_MAX 34 static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct mlx5_flow_group *group, @@ -403,9 +407,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, enum mlx5_flow_destination_type type = dst->dest_attr.type; u32 id; - if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || - num_term_actions >= MLX5_FLOW_CONTEXT_ACTION_MAX) { - err = -ENOSPC; + if (fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || + num_term_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { + err = -EOPNOTSUPP; goto free_actions; } @@ -478,8 +482,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, MLX5_FLOW_DESTINATION_TYPE_COUNTER) continue; - if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { - err = -ENOSPC; + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || + fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { + err = -EOPNOTSUPP; goto free_actions; } @@ -499,14 +504,28 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, params.match_sz = match_sz; params.match_buf = (u64 *)fte->val; if (num_term_actions == 1) { - if (term_actions->reformat) + if (term_actions->reformat) { + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { + err = -EOPNOTSUPP; + goto free_actions; + } actions[num_actions++] = term_actions->reformat; + } + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { + err = -EOPNOTSUPP; + goto free_actions; + } actions[num_actions++] = term_actions->dest; } else if (num_term_actions > 1) { bool ignore_flow_level = !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL); + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || + fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { + err = -EOPNOTSUPP; + goto free_actions; + } tmp_action = mlx5dr_action_create_mult_dest_tbl(domain, term_actions, num_term_actions, @@ -739,7 +758,7 @@ static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns, enum fs_flow_table_type ft_type) { if (ft_type != FS_FT_FDB || - MLX5_CAP_GEN(ns->dev, steering_format_version) != MLX5_STEERING_FORMAT_CONNECTX_6DX) + MLX5_CAP_GEN(ns->dev, steering_format_version) == MLX5_STEERING_FORMAT_CONNECTX_5) return 0; return MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX | MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h index c7c93131b762..ec5cbec0d455 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@ -53,6 +53,9 @@ void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn, struct mlx5dr_table * mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags); +struct mlx5dr_table * +mlx5dr_table_get_from_fs_ft(struct mlx5_flow_table *ft); + int mlx5dr_table_destroy(struct mlx5dr_table *table); u32 mlx5dr_table_get_id(struct mlx5dr_table *table); @@ -136,7 +139,7 @@ mlx5dr_is_supported(struct mlx5_core_dev *dev) (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) || (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) && (MLX5_CAP_GEN(dev, steering_format_version) <= - MLX5_STEERING_FORMAT_CONNECTX_6DX))); + MLX5_STEERING_FORMAT_CONNECTX_7))); } /* buddy functions & structure */ @@ -160,6 +163,11 @@ struct mlx5dr_icm_buddy_mem { * sync_ste command sets them free. */ struct list_head hot_list; + + /* Memory optimisation */ + struct mlx5dr_ste *ste_arr; + struct list_head *miss_list; + u8 *hw_ste_arr; }; int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index 01e9c412977c..8455e79bc44a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -31,7 +31,6 @@ */ #include <linux/kernel.h> -#include <linux/module.h> #include <linux/io-mapping.h> #include <linux/mlx5/driver.h> #include "mlx5_core.h" @@ -100,19 +99,21 @@ static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev, int err = -ENOMEM; phys_addr_t pfn; int bfregs; + int node; int i; bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR; - up = kzalloc(sizeof(*up), GFP_KERNEL); + node = mdev->priv.numa_node; + up = kzalloc_node(sizeof(*up), GFP_KERNEL, node); if (!up) return ERR_PTR(err); up->mdev = mdev; - up->reg_bitmap = bitmap_zalloc(bfregs, GFP_KERNEL); + up->reg_bitmap = bitmap_zalloc_node(bfregs, GFP_KERNEL, node); if (!up->reg_bitmap) goto error1; - up->fp_bitmap = bitmap_zalloc(bfregs, GFP_KERNEL); + up->fp_bitmap = bitmap_zalloc_node(bfregs, GFP_KERNEL, node); if (!up->fp_bitmap) goto error1; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index f45df5fbdcc0..0bf1d64644ba 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -177,17 +177,6 @@ void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core) } EXPORT_SYMBOL(mlxsw_core_driver_priv); -bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core) -{ - return mlxsw_core->driver->res_query_enabled; -} -EXPORT_SYMBOL(mlxsw_core_res_query_enabled); - -bool mlxsw_core_temp_warn_enabled(const struct mlxsw_core *mlxsw_core) -{ - return mlxsw_core->driver->temp_warn_enabled; -} - bool mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev, const struct mlxsw_fw_rev *req_rev) @@ -223,6 +212,9 @@ static int mlxsw_core_trap_groups_set(struct mlxsw_core *mlxsw_core) int err; int i; + if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) + return 0; + for (i = 0; i < ARRAY_SIZE(mlxsw_core_trap_groups); i++) { mlxsw_reg_htgt_pack(htgt_pl, mlxsw_core_trap_groups[i], MLXSW_REG_HTGT_INVALID_POLICER, @@ -2036,7 +2028,7 @@ static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core) struct devlink_health_reporter *fw_fatal; int err; - if (!mlxsw_core->driver->fw_fatal_enabled) + if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) return 0; fw_fatal = devlink_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops, @@ -2066,7 +2058,7 @@ err_trap_register: static void mlxsw_core_health_fini(struct mlxsw_core *mlxsw_core) { - if (!mlxsw_core->driver->fw_fatal_enabled) + if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) return; mlxsw_core_health_fw_fatal_config(mlxsw_core, false); @@ -2086,7 +2078,6 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, const char *device_kind = mlxsw_bus_info->device_kind; struct mlxsw_core *mlxsw_core; struct mlxsw_driver *mlxsw_driver; - struct mlxsw_res *res; size_t alloc_size; int err; @@ -2112,8 +2103,8 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, mlxsw_core->bus_priv = bus_priv; mlxsw_core->bus_info = mlxsw_bus_info; - res = mlxsw_driver->res_query_enabled ? &mlxsw_core->res : NULL; - err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, res); + err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, + &mlxsw_core->res); if (err) goto err_bus_init; @@ -2522,6 +2513,9 @@ int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core, char hpkt_pl[MLXSW_REG_HPKT_LEN]; int err; + if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) + return 0; + err = mlxsw_core_listener_register(mlxsw_core, listener, priv, listener->enabled_on_register); if (err) @@ -2551,6 +2545,9 @@ void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core, { char hpkt_pl[MLXSW_REG_HPKT_LEN]; + if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) + return; + if (!listener->is_event) { mlxsw_reg_hpkt_pack(hpkt_pl, listener->dis_action, listener->trap_id, listener->dis_trap_group, @@ -3242,9 +3239,6 @@ int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox, u16 id; int err; - if (!res) - return 0; - mlxsw_cmd_mbox_zero(mbox); for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 6d304092f4e7..16ee5e90973d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -35,10 +35,6 @@ unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core); void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core); -bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core); - -bool mlxsw_core_temp_warn_enabled(const struct mlxsw_core *mlxsw_core); - bool mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev, const struct mlxsw_fw_rev *req_rev); @@ -406,9 +402,6 @@ struct mlxsw_driver { u8 txhdr_len; const struct mlxsw_config_profile *profile; - bool res_query_enabled; - bool fw_fatal_enabled; - bool temp_warn_enabled; }; int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core, diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 6ea4bf87be0b..29a74b8bd5b5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -87,6 +87,7 @@ mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id, bool *qsfp, *qsfp = true; break; case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD: + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_OSFP: *qsfp = true; *cmis = true; break; @@ -303,6 +304,7 @@ int mlxsw_env_get_module_info(struct net_device *netdev, modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN / 2; break; case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD: + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_OSFP: /* Use SFF_8636 as base type. ethtool should recognize specific * type through the identifier value. */ @@ -462,9 +464,6 @@ int mlxsw_env_reset_module(struct net_device *netdev, !(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))) return 0; - if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) - return -EINVAL; - mutex_lock(&mlxsw_env->module_info_lock); err = __mlxsw_env_validate_module_type(mlxsw_core, module); @@ -510,9 +509,6 @@ mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module, u32 status_bits; int err; - if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) - return -EINVAL; - mutex_lock(&mlxsw_env->module_info_lock); err = __mlxsw_env_validate_module_type(mlxsw_core, module); @@ -620,9 +616,6 @@ mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module, bool low_power; int err = 0; - if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) - return -EINVAL; - if (policy != ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH && policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) { NL_SET_ERR_MSG_MOD(extack, "Unsupported power mode policy"); @@ -831,9 +824,6 @@ static int mlxsw_env_temp_warn_event_register(struct mlxsw_core *mlxsw_core) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); - if (!mlxsw_core_temp_warn_enabled(mlxsw_core)) - return 0; - return mlxsw_core_trap_register(mlxsw_core, &mlxsw_env_temp_warn_listener, mlxsw_env); @@ -841,9 +831,6 @@ static int mlxsw_env_temp_warn_event_register(struct mlxsw_core *mlxsw_core) static void mlxsw_env_temp_warn_event_unregister(struct mlxsw_env *mlxsw_env) { - if (!mlxsw_core_temp_warn_enabled(mlxsw_env->core)) - return; - mlxsw_core_trap_unregister(mlxsw_env->core, &mlxsw_env_temp_warn_listener, mlxsw_env); } @@ -922,9 +909,6 @@ mlxsw_env_module_plug_event_register(struct mlxsw_core *mlxsw_core) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); - if (!mlxsw_core_temp_warn_enabled(mlxsw_core)) - return 0; - return mlxsw_core_trap_register(mlxsw_core, &mlxsw_env_module_plug_listener, mlxsw_env); @@ -933,9 +917,6 @@ mlxsw_env_module_plug_event_register(struct mlxsw_core *mlxsw_core) static void mlxsw_env_module_plug_event_unregister(struct mlxsw_env *mlxsw_env) { - if (!mlxsw_core_temp_warn_enabled(mlxsw_env->core)) - return; - mlxsw_core_trap_unregister(mlxsw_env->core, &mlxsw_env_module_plug_listener, mlxsw_env); @@ -966,9 +947,6 @@ mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module, { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); - if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) - return -EINVAL; - mutex_lock(&mlxsw_env->module_info_lock); *p_counter = mlxsw_env->module_info[module].module_overheat_counter; mutex_unlock(&mlxsw_env->module_info_lock); @@ -981,9 +959,6 @@ void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); - if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) - return; - mutex_lock(&mlxsw_env->module_info_lock); mlxsw_env->module_info[module].num_ports_mapped++; mutex_unlock(&mlxsw_env->module_info_lock); @@ -994,9 +969,6 @@ void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); - if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) - return; - mutex_lock(&mlxsw_env->module_info_lock); mlxsw_env->module_info[module].num_ports_mapped--; mutex_unlock(&mlxsw_env->module_info_lock); @@ -1008,9 +980,6 @@ int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module) struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); int err = 0; - if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) - return -EINVAL; - mutex_lock(&mlxsw_env->module_info_lock); if (mlxsw_env->module_info[module].power_mode_policy != @@ -1040,9 +1009,6 @@ void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); - if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) - return; - mutex_lock(&mlxsw_env->module_info_lock); mlxsw_env->module_info[module].num_ports_up--; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c index d41afdfbd085..8b170ad92302 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -57,14 +57,14 @@ static ssize_t mlxsw_hwmon_temp_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; char mtmp_pl[MLXSW_REG_MTMP_LEN]; int temp, index; int err; - index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index, + index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index, mlxsw_hwmon->module_sensor_max); mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); @@ -80,14 +80,14 @@ static ssize_t mlxsw_hwmon_temp_max_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; char mtmp_pl[MLXSW_REG_MTMP_LEN]; int temp_max, index; int err; - index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index, + index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index, mlxsw_hwmon->module_sensor_max); mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); @@ -103,9 +103,9 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { - struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0}; unsigned long val; int index; @@ -117,7 +117,7 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev, if (val != 1) return -EINVAL; - index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index, + index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index, mlxsw_hwmon->module_sensor_max); mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, index); @@ -138,13 +138,13 @@ static ssize_t mlxsw_hwmon_fan_rpm_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; char mfsm_pl[MLXSW_REG_MFSM_LEN]; int err; - mlxsw_reg_mfsm_pack(mfsm_pl, mlwsw_hwmon_attr->type_index); + mlxsw_reg_mfsm_pack(mfsm_pl, mlxsw_hwmon_attr->type_index); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mfsm), mfsm_pl); if (err) { dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query fan\n"); @@ -157,9 +157,9 @@ static ssize_t mlxsw_hwmon_fan_fault_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; char fore_pl[MLXSW_REG_FORE_LEN]; bool fault; int err; @@ -169,7 +169,7 @@ static ssize_t mlxsw_hwmon_fan_fault_show(struct device *dev, dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query fan\n"); return err; } - mlxsw_reg_fore_unpack(fore_pl, mlwsw_hwmon_attr->type_index, &fault); + mlxsw_reg_fore_unpack(fore_pl, mlxsw_hwmon_attr->type_index, &fault); return sprintf(buf, "%u\n", fault); } @@ -178,13 +178,13 @@ static ssize_t mlxsw_hwmon_pwm_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; char mfsc_pl[MLXSW_REG_MFSC_LEN]; int err; - mlxsw_reg_mfsc_pack(mfsc_pl, mlwsw_hwmon_attr->type_index, 0); + mlxsw_reg_mfsc_pack(mfsc_pl, mlxsw_hwmon_attr->type_index, 0); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mfsc), mfsc_pl); if (err) { dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query PWM\n"); @@ -198,9 +198,9 @@ static ssize_t mlxsw_hwmon_pwm_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { - struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; char mfsc_pl[MLXSW_REG_MFSC_LEN]; unsigned long val; int err; @@ -211,7 +211,7 @@ static ssize_t mlxsw_hwmon_pwm_store(struct device *dev, if (val > 255) return -EINVAL; - mlxsw_reg_mfsc_pack(mfsc_pl, mlwsw_hwmon_attr->type_index, val); + mlxsw_reg_mfsc_pack(mfsc_pl, mlxsw_hwmon_attr->type_index, val); err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mfsc), mfsc_pl); if (err) { dev_err(mlxsw_hwmon->bus_info->dev, "Failed to write PWM\n"); @@ -224,14 +224,14 @@ static int mlxsw_hwmon_module_temp_get(struct device *dev, struct device_attribute *attr, int *p_temp) { - struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; char mtmp_pl[MLXSW_REG_MTMP_LEN]; u8 module; int err; - module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; + module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, false, false); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); @@ -261,15 +261,15 @@ static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; char mtbr_pl[MLXSW_REG_MTBR_LEN] = {0}; u8 module, fault; u16 temp; int err; - module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; + module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtbr), mtbr_pl); @@ -303,13 +303,13 @@ static int mlxsw_hwmon_module_temp_critical_get(struct device *dev, struct device_attribute *attr, int *p_temp) { - struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; u8 module; int err; - module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; + module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module, SFP_TEMP_HIGH_WARN, p_temp); if (err) { @@ -337,13 +337,13 @@ static int mlxsw_hwmon_module_temp_emergency_get(struct device *dev, struct device_attribute *attr, int *p_temp) { - struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; u8 module; int err; - module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; + module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module, SFP_TEMP_HIGH_ALARM, p_temp); if (err) { @@ -373,11 +373,11 @@ mlxsw_hwmon_module_temp_label_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); return sprintf(buf, "front panel %03u\n", - mlwsw_hwmon_attr->type_index); + mlxsw_hwmon_attr->type_index); } static ssize_t @@ -385,10 +385,10 @@ mlxsw_hwmon_gbox_temp_label_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; - int index = mlwsw_hwmon_attr->type_index - + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; + int index = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->module_sensor_max + 1; return sprintf(buf, "gearbox %03u\n", index); @@ -655,9 +655,6 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon) u8 module_sensor_max; int i, err; - if (!mlxsw_core_res_query_enabled(mlxsw_hwmon->core)) - return 0; - mlxsw_reg_mgpir_pack(mgpir_pl); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mgpir), mgpir_pl); if (err) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index b29824448aa8..05f54bd982c0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -357,6 +357,10 @@ static int mlxsw_thermal_trend_get(struct thermal_zone_device *tzdev, return 0; } +static struct thermal_zone_params mlxsw_thermal_params = { + .no_hwmon = true, +}; + static struct thermal_zone_device_ops mlxsw_thermal_ops = { .bind = mlxsw_thermal_bind, .unbind = mlxsw_thermal_unbind, @@ -388,11 +392,11 @@ static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev, trip->min_state, THERMAL_WEIGHT_DEFAULT); if (err < 0) - goto err_bind_cooling_device; + goto err_thermal_zone_bind_cooling_device; } return 0; -err_bind_cooling_device: +err_thermal_zone_bind_cooling_device: for (j = i - 1; j >= 0; j--) thermal_zone_unbind_cooling_device(tzdev, j, cdev); return err; @@ -678,7 +682,8 @@ mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz) MLXSW_THERMAL_TRIP_MASK, module_tz, &mlxsw_thermal_module_ops, - NULL, 0, + &mlxsw_thermal_params, + 0, module_tz->parent->polling_delay); if (IS_ERR(module_tz->tzdev)) { err = PTR_ERR(module_tz->tzdev); @@ -741,9 +746,6 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core, char mgpir_pl[MLXSW_REG_MGPIR_LEN]; int i, err; - if (!mlxsw_core_res_query_enabled(core)) - return 0; - mlxsw_reg_mgpir_pack(mgpir_pl); err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl); if (err) @@ -761,7 +763,7 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core, for (i = 0; i < thermal->tz_module_num; i++) { err = mlxsw_thermal_module_init(dev, core, thermal, i); if (err) - goto err_unreg_tz_module_arr; + goto err_thermal_module_init; } for (i = 0; i < thermal->tz_module_num; i++) { @@ -770,12 +772,13 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core, continue; err = mlxsw_thermal_module_tz_init(module_tz); if (err) - goto err_unreg_tz_module_arr; + goto err_thermal_module_tz_init; } return 0; -err_unreg_tz_module_arr: +err_thermal_module_tz_init: +err_thermal_module_init: for (i = thermal->tz_module_num - 1; i >= 0; i--) mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]); kfree(thermal->tz_module_arr); @@ -787,9 +790,6 @@ mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal) { int i; - if (!mlxsw_core_res_query_enabled(thermal->core)) - return; - for (i = thermal->tz_module_num - 1; i >= 0; i--) mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]); kfree(thermal->tz_module_arr); @@ -808,7 +808,7 @@ mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz) MLXSW_THERMAL_TRIP_MASK, gearbox_tz, &mlxsw_thermal_gearbox_ops, - NULL, 0, + &mlxsw_thermal_params, 0, gearbox_tz->parent->polling_delay); if (IS_ERR(gearbox_tz->tzdev)) return PTR_ERR(gearbox_tz->tzdev); @@ -837,9 +837,6 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core, int i; int err; - if (!mlxsw_core_res_query_enabled(core)) - return 0; - mlxsw_reg_mgpir_pack(mgpir_pl); err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl); if (err) @@ -866,12 +863,12 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core, gearbox_tz->parent = thermal; err = mlxsw_thermal_gearbox_tz_init(gearbox_tz); if (err) - goto err_unreg_tz_gearbox; + goto err_thermal_gearbox_tz_init; } return 0; -err_unreg_tz_gearbox: +err_thermal_gearbox_tz_init: for (i--; i >= 0; i--) mlxsw_thermal_gearbox_tz_fini(&thermal->tz_gearbox_arr[i]); kfree(thermal->tz_gearbox_arr); @@ -883,9 +880,6 @@ mlxsw_thermal_gearboxes_fini(struct mlxsw_thermal *thermal) { int i; - if (!mlxsw_core_res_query_enabled(thermal->core)) - return; - for (i = thermal->tz_gearbox_num - 1; i >= 0; i--) mlxsw_thermal_gearbox_tz_fini(&thermal->tz_gearbox_arr[i]); kfree(thermal->tz_gearbox_arr); @@ -915,7 +909,7 @@ int mlxsw_thermal_init(struct mlxsw_core *core, err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfcr), mfcr_pl); if (err) { dev_err(dev, "Failed to probe PWMs\n"); - goto err_free_thermal; + goto err_reg_query; } mlxsw_reg_mfcr_unpack(mfcr_pl, &freq, &tacho_active, &pwm_active); @@ -929,14 +923,14 @@ int mlxsw_thermal_init(struct mlxsw_core *core, err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsl), mfsl_pl); if (err) - goto err_free_thermal; + goto err_reg_query; /* set the minimal RPMs to 0 */ mlxsw_reg_mfsl_tach_min_set(mfsl_pl, 0); err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsl), mfsl_pl); if (err) - goto err_free_thermal; + goto err_reg_write; } } for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) { @@ -949,7 +943,7 @@ int mlxsw_thermal_init(struct mlxsw_core *core, if (IS_ERR(cdev)) { err = PTR_ERR(cdev); dev_err(dev, "Failed to register cooling device\n"); - goto err_unreg_cdevs; + goto err_thermal_cooling_device_register; } thermal->cdevs[i] = cdev; } @@ -968,43 +962,45 @@ int mlxsw_thermal_init(struct mlxsw_core *core, MLXSW_THERMAL_TRIP_MASK, thermal, &mlxsw_thermal_ops, - NULL, 0, + &mlxsw_thermal_params, 0, thermal->polling_delay); if (IS_ERR(thermal->tzdev)) { err = PTR_ERR(thermal->tzdev); dev_err(dev, "Failed to register thermal zone\n"); - goto err_unreg_cdevs; + goto err_thermal_zone_device_register; } err = mlxsw_thermal_modules_init(dev, core, thermal); if (err) - goto err_unreg_tzdev; + goto err_thermal_modules_init; err = mlxsw_thermal_gearboxes_init(dev, core, thermal); if (err) - goto err_unreg_modules_tzdev; + goto err_thermal_gearboxes_init; err = thermal_zone_device_enable(thermal->tzdev); if (err) - goto err_unreg_gearboxes; + goto err_thermal_zone_device_enable; *p_thermal = thermal; return 0; -err_unreg_gearboxes: +err_thermal_zone_device_enable: mlxsw_thermal_gearboxes_fini(thermal); -err_unreg_modules_tzdev: +err_thermal_gearboxes_init: mlxsw_thermal_modules_fini(thermal); -err_unreg_tzdev: +err_thermal_modules_init: if (thermal->tzdev) { thermal_zone_device_unregister(thermal->tzdev); thermal->tzdev = NULL; } -err_unreg_cdevs: +err_thermal_zone_device_register: +err_thermal_cooling_device_register: for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) if (thermal->cdevs[i]) thermal_cooling_device_unregister(thermal->cdevs[i]); -err_free_thermal: +err_reg_write: +err_reg_query: devm_kfree(dev, thermal); return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c index 9ac8ce01c061..060209983438 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c @@ -461,7 +461,6 @@ static struct mlxsw_driver mlxsw_m_driver = { .init = mlxsw_m_init, .fini = mlxsw_m_fini, .profile = &mlxsw_m_config_profile, - .res_query_enabled = true, }; static const struct i2c_device_id mlxsw_m_i2c_id[] = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index eebd0479b2bc..67b1a2f8397f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -6784,12 +6784,14 @@ static inline void mlxsw_reg_ritr_counter_pack(char *payload, u32 index, set_type = MLXSW_REG_RITR_COUNTER_SET_TYPE_BASIC; else set_type = MLXSW_REG_RITR_COUNTER_SET_TYPE_NO_COUNT; - mlxsw_reg_ritr_egress_counter_set_type_set(payload, set_type); - if (egress) + if (egress) { + mlxsw_reg_ritr_egress_counter_set_type_set(payload, set_type); mlxsw_reg_ritr_egress_counter_index_set(payload, index); - else + } else { + mlxsw_reg_ritr_ingress_counter_set_type_set(payload, set_type); mlxsw_reg_ritr_ingress_counter_index_set(payload, index); + } } static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif) @@ -10037,6 +10039,7 @@ enum mlxsw_reg_mcia_eeprom_module_info_id { MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D, MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11, MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD = 0x18, + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_OSFP = 0x19, }; enum mlxsw_reg_mcia_eeprom_module_info { @@ -11323,24 +11326,24 @@ enum mlxsw_reg_mgpir_device_type { MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE, }; -/* device_type +/* mgpir_device_type * Access: RO */ MLXSW_ITEM32(reg, mgpir, device_type, 0x00, 24, 4); -/* devices_per_flash +/* mgpir_devices_per_flash * Number of devices of device_type per flash (can be shared by few devices). * Access: RO */ MLXSW_ITEM32(reg, mgpir, devices_per_flash, 0x00, 16, 8); -/* num_of_devices +/* mgpir_num_of_devices * Number of devices of device_type. * Access: RO */ MLXSW_ITEM32(reg, mgpir, num_of_devices, 0x00, 0, 8); -/* num_of_modules +/* mgpir_num_of_modules * Number of modules. * Access: RO */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index a4b94eecea98..7b7b17183d10 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -45,52 +45,49 @@ #include "spectrum_ptp.h" #include "spectrum_trap.h" +#define MLXSW_SP_FWREV_MINOR 2010 +#define MLXSW_SP_FWREV_SUBMINOR 1006 + #define MLXSW_SP1_FWREV_MAJOR 13 -#define MLXSW_SP1_FWREV_MINOR 2010 -#define MLXSW_SP1_FWREV_SUBMINOR 1006 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { .major = MLXSW_SP1_FWREV_MAJOR, - .minor = MLXSW_SP1_FWREV_MINOR, - .subminor = MLXSW_SP1_FWREV_SUBMINOR, + .minor = MLXSW_SP_FWREV_MINOR, + .subminor = MLXSW_SP_FWREV_SUBMINOR, .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, }; #define MLXSW_SP1_FW_FILENAME \ "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ - "." __stringify(MLXSW_SP1_FWREV_MINOR) \ - "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" + "." __stringify(MLXSW_SP_FWREV_MINOR) \ + "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" #define MLXSW_SP2_FWREV_MAJOR 29 -#define MLXSW_SP2_FWREV_MINOR 2010 -#define MLXSW_SP2_FWREV_SUBMINOR 1006 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { .major = MLXSW_SP2_FWREV_MAJOR, - .minor = MLXSW_SP2_FWREV_MINOR, - .subminor = MLXSW_SP2_FWREV_SUBMINOR, + .minor = MLXSW_SP_FWREV_MINOR, + .subminor = MLXSW_SP_FWREV_SUBMINOR, }; #define MLXSW_SP2_FW_FILENAME \ "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ - "." __stringify(MLXSW_SP2_FWREV_MINOR) \ - "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2" + "." __stringify(MLXSW_SP_FWREV_MINOR) \ + "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" #define MLXSW_SP3_FWREV_MAJOR 30 -#define MLXSW_SP3_FWREV_MINOR 2010 -#define MLXSW_SP3_FWREV_SUBMINOR 1006 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { .major = MLXSW_SP3_FWREV_MAJOR, - .minor = MLXSW_SP3_FWREV_MINOR, - .subminor = MLXSW_SP3_FWREV_SUBMINOR, + .minor = MLXSW_SP_FWREV_MINOR, + .subminor = MLXSW_SP_FWREV_SUBMINOR, }; #define MLXSW_SP3_FW_FILENAME \ "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \ - "." __stringify(MLXSW_SP3_FWREV_MINOR) \ - "." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2" + "." __stringify(MLXSW_SP_FWREV_MINOR) \ + "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; @@ -3630,9 +3627,6 @@ static struct mlxsw_driver mlxsw_sp1_driver = { .ptp_transmitted = mlxsw_sp_ptp_transmitted, .txhdr_len = MLXSW_TXHDR_LEN, .profile = &mlxsw_sp1_config_profile, - .res_query_enabled = true, - .fw_fatal_enabled = true, - .temp_warn_enabled = true, }; static struct mlxsw_driver mlxsw_sp2_driver = { @@ -3670,9 +3664,6 @@ static struct mlxsw_driver mlxsw_sp2_driver = { .ptp_transmitted = mlxsw_sp_ptp_transmitted, .txhdr_len = MLXSW_TXHDR_LEN, .profile = &mlxsw_sp2_config_profile, - .res_query_enabled = true, - .fw_fatal_enabled = true, - .temp_warn_enabled = true, }; static struct mlxsw_driver mlxsw_sp3_driver = { @@ -3710,9 +3701,6 @@ static struct mlxsw_driver mlxsw_sp3_driver = { .ptp_transmitted = mlxsw_sp_ptp_transmitted, .txhdr_len = MLXSW_TXHDR_LEN, .profile = &mlxsw_sp2_config_profile, - .res_query_enabled = true, - .fw_fatal_enabled = true, - .temp_warn_enabled = true, }; static struct mlxsw_driver mlxsw_sp4_driver = { @@ -3748,9 +3736,6 @@ static struct mlxsw_driver mlxsw_sp4_driver = { .ptp_transmitted = mlxsw_sp_ptp_transmitted, .txhdr_len = MLXSW_TXHDR_LEN, .profile = &mlxsw_sp2_config_profile, - .res_query_enabled = true, - .fw_fatal_enabled = true, - .temp_warn_enabled = true, }; bool mlxsw_sp_port_dev_check(const struct net_device *dev) @@ -4838,6 +4823,22 @@ static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, return 0; } +static bool mlxsw_sp_netdevice_event_is_router(unsigned long event) +{ + switch (event) { + case NETDEV_PRE_CHANGEADDR: + case NETDEV_CHANGEADDR: + case NETDEV_CHANGEMTU: + case NETDEV_OFFLOAD_XSTATS_ENABLE: + case NETDEV_OFFLOAD_XSTATS_DISABLE: + case NETDEV_OFFLOAD_XSTATS_REPORT_USED: + case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA: + return true; + default: + return false; + } +} + static int mlxsw_sp_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr) { @@ -4862,9 +4863,7 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb, else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, event, ptr); - else if (event == NETDEV_PRE_CHANGEADDR || - event == NETDEV_CHANGEADDR || - event == NETDEV_CHANGEMTU) + else if (mlxsw_sp_netdevice_event_is_router(event)) err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); else if (mlxsw_sp_is_vrf_event(event, ptr)) err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c index 1a2fef2a5379..5d494fabf93d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c @@ -266,10 +266,10 @@ static int mlxsw_sp_dpipe_table_erif_counters_update(void *priv, bool enable) if (!rif) continue; if (enable) - mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, + mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS); else - mlxsw_sp_rif_counter_free(mlxsw_sp, rif, + mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS); } mutex_unlock(&mlxsw_sp->router->lock); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index f54af3d9a03b..e91fb205e0b4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -15,6 +15,46 @@ #include "spectrum.h" #include "core_acl_flex_keys.h" +static int mlxsw_sp_policer_validate(const struct flow_action *action, + const struct flow_action_entry *act, + struct netlink_ext_ack *extack) +{ + if (act->police.exceed.act_id != FLOW_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when exceed action is not drop"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && + act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is not pipe or ok"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && + !flow_action_is_last_entry(action, act)) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is ok, but action is not last"); + return -EOPNOTSUPP; + } + + if (act->police.peakrate_bytes_ps || + act->police.avrate || act->police.overhead) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when peakrate/avrate/overhead is configured"); + return -EOPNOTSUPP; + } + + if (act->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, + "QoS offload not support packets per second"); + return -EOPNOTSUPP; + } + + return 0; +} + static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_flow_block *block, struct mlxsw_sp_acl_rule_info *rulei, @@ -191,10 +231,9 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, return -EOPNOTSUPP; } - if (act->police.rate_pkt_ps) { - NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); - return -EOPNOTSUPP; - } + err = mlxsw_sp_policer_validate(flow_action, act, extack); + if (err) + return err; /* The kernel might adjust the requested burst size so * that it is not exactly a power of two. Re-adjust it diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index d40762cfc453..79deb19e3a19 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -225,6 +225,64 @@ int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp, return 0; } +struct mlxsw_sp_rif_counter_set_basic { + u64 good_unicast_packets; + u64 good_multicast_packets; + u64 good_broadcast_packets; + u64 good_unicast_bytes; + u64 good_multicast_bytes; + u64 good_broadcast_bytes; + u64 error_packets; + u64 discard_packets; + u64 error_bytes; + u64 discard_bytes; +}; + +static int +mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif *rif, + enum mlxsw_sp_rif_counter_dir dir, + struct mlxsw_sp_rif_counter_set_basic *set) +{ + struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; + char ricnt_pl[MLXSW_REG_RICNT_LEN]; + unsigned int *p_counter_index; + int err; + + if (!mlxsw_sp_rif_counter_valid_get(rif, dir)) + return -EINVAL; + + p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir); + if (!p_counter_index) + return -EINVAL; + + mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index, + MLXSW_REG_RICNT_OPCODE_CLEAR); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl); + if (err) + return err; + + if (!set) + return 0; + +#define MLXSW_SP_RIF_COUNTER_EXTRACT(NAME) \ + (set->NAME = mlxsw_reg_ricnt_ ## NAME ## _get(ricnt_pl)) + + MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_packets); + MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_packets); + MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_packets); + MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_bytes); + MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_bytes); + MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_bytes); + MLXSW_SP_RIF_COUNTER_EXTRACT(error_packets); + MLXSW_SP_RIF_COUNTER_EXTRACT(discard_packets); + MLXSW_SP_RIF_COUNTER_EXTRACT(error_bytes); + MLXSW_SP_RIF_COUNTER_EXTRACT(discard_bytes); + +#undef MLXSW_SP_RIF_COUNTER_EXTRACT + + return 0; +} + static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp, unsigned int counter_index) { @@ -235,16 +293,20 @@ static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl); } -int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_rif *rif, +int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif, enum mlxsw_sp_rif_counter_dir dir) { + struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; unsigned int *p_counter_index; int err; + if (mlxsw_sp_rif_counter_valid_get(rif, dir)) + return 0; + p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir); if (!p_counter_index) return -EINVAL; + err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF, p_counter_index); if (err) @@ -268,10 +330,10 @@ err_counter_clear: return err; } -void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_rif *rif, +void mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif *rif, enum mlxsw_sp_rif_counter_dir dir) { + struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; unsigned int *p_counter_index; if (!mlxsw_sp_rif_counter_valid_get(rif, dir)) @@ -296,14 +358,12 @@ static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif) if (!devlink_dpipe_table_counter_enabled(devlink, MLXSW_SP_DPIPE_TABLE_NAME_ERIF)) return; - mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS); + mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS); } static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif) { - struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; - - mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS); + mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS); } #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1) @@ -8148,6 +8208,166 @@ u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif) return lb_rif->ul_rif_id; } +static bool +mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif *rif) +{ + return mlxsw_sp_rif_counter_valid_get(rif, + MLXSW_SP_RIF_COUNTER_EGRESS) && + mlxsw_sp_rif_counter_valid_get(rif, + MLXSW_SP_RIF_COUNTER_INGRESS); +} + +static int +mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif *rif) +{ + int err; + + err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_INGRESS); + if (err) + return err; + + /* Clear stale data. */ + err = mlxsw_sp_rif_counter_fetch_clear(rif, + MLXSW_SP_RIF_COUNTER_INGRESS, + NULL); + if (err) + goto err_clear_ingress; + + err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS); + if (err) + goto err_alloc_egress; + + /* Clear stale data. */ + err = mlxsw_sp_rif_counter_fetch_clear(rif, + MLXSW_SP_RIF_COUNTER_EGRESS, + NULL); + if (err) + goto err_clear_egress; + + return 0; + +err_clear_egress: + mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS); +err_alloc_egress: +err_clear_ingress: + mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS); + return err; +} + +static void +mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif *rif) +{ + mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS); + mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS); +} + +static void +mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif *rif, + struct netdev_notifier_offload_xstats_info *info) +{ + if (!mlxsw_sp_router_port_l3_stats_enabled(rif)) + return; + netdev_offload_xstats_report_used(info->report_used); +} + +static int +mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif *rif, + struct rtnl_hw_stats64 *p_stats) +{ + struct mlxsw_sp_rif_counter_set_basic ingress; + struct mlxsw_sp_rif_counter_set_basic egress; + int err; + + err = mlxsw_sp_rif_counter_fetch_clear(rif, + MLXSW_SP_RIF_COUNTER_INGRESS, + &ingress); + if (err) + return err; + + err = mlxsw_sp_rif_counter_fetch_clear(rif, + MLXSW_SP_RIF_COUNTER_EGRESS, + &egress); + if (err) + return err; + +#define MLXSW_SP_ROUTER_ALL_GOOD(SET, SFX) \ + ((SET.good_unicast_ ## SFX) + \ + (SET.good_multicast_ ## SFX) + \ + (SET.good_broadcast_ ## SFX)) + + p_stats->rx_packets = MLXSW_SP_ROUTER_ALL_GOOD(ingress, packets); + p_stats->tx_packets = MLXSW_SP_ROUTER_ALL_GOOD(egress, packets); + p_stats->rx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(ingress, bytes); + p_stats->tx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(egress, bytes); + p_stats->rx_errors = ingress.error_packets; + p_stats->tx_errors = egress.error_packets; + p_stats->rx_dropped = ingress.discard_packets; + p_stats->tx_dropped = egress.discard_packets; + p_stats->multicast = ingress.good_multicast_packets + + ingress.good_broadcast_packets; + +#undef MLXSW_SP_ROUTER_ALL_GOOD + + return 0; +} + +static int +mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif *rif, + struct netdev_notifier_offload_xstats_info *info) +{ + struct rtnl_hw_stats64 stats = {}; + int err; + + if (!mlxsw_sp_router_port_l3_stats_enabled(rif)) + return 0; + + err = mlxsw_sp_router_port_l3_stats_fetch(rif, &stats); + if (err) + return err; + + netdev_offload_xstats_report_delta(info->report_delta, &stats); + return 0; +} + +struct mlxsw_sp_router_hwstats_notify_work { + struct work_struct work; + struct net_device *dev; +}; + +static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work) +{ + struct mlxsw_sp_router_hwstats_notify_work *hws_work = + container_of(work, struct mlxsw_sp_router_hwstats_notify_work, + work); + + rtnl_lock(); + rtnl_offload_xstats_notify(hws_work->dev); + rtnl_unlock(); + dev_put(hws_work->dev); + kfree(hws_work); +} + +static void +mlxsw_sp_router_hwstats_notify_schedule(struct net_device *dev) +{ + struct mlxsw_sp_router_hwstats_notify_work *hws_work; + + /* To collect notification payload, the core ends up sending another + * notifier block message, which would deadlock on the attempt to + * acquire the router lock again. Just postpone the notification until + * later. + */ + + hws_work = kzalloc(sizeof(*hws_work), GFP_KERNEL); + if (!hws_work) + return; + + INIT_WORK(&hws_work->work, mlxsw_sp_router_hwstats_notify_work); + dev_hold(dev); + hws_work->dev = dev; + mlxsw_core_schedule_work(&hws_work->work); +} + int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif) { return rif->dev->ifindex; @@ -8158,6 +8378,16 @@ const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif) return rif->dev; } +static void mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif *rif) +{ + struct rtnl_hw_stats64 stats = {}; + + if (!mlxsw_sp_router_port_l3_stats_fetch(rif, &stats)) + netdev_offload_xstats_push_delta(rif->dev, + NETDEV_OFFLOAD_XSTATS_TYPE_L3, + &stats); +} + static struct mlxsw_sp_rif * mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_rif_params *params, @@ -8218,10 +8448,19 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, goto err_mr_rif_add; } - mlxsw_sp_rif_counters_alloc(rif); + if (netdev_offload_xstats_enabled(rif->dev, + NETDEV_OFFLOAD_XSTATS_TYPE_L3)) { + err = mlxsw_sp_router_port_l3_stats_enable(rif); + if (err) + goto err_stats_enable; + mlxsw_sp_router_hwstats_notify_schedule(rif->dev); + } else { + mlxsw_sp_rif_counters_alloc(rif); + } return rif; +err_stats_enable: err_mr_rif_add: for (i--; i >= 0; i--) mlxsw_sp_mr_rif_del(vr->mr_table[i], rif); @@ -8251,7 +8490,15 @@ static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif); vr = &mlxsw_sp->router->vrs[rif->vr_id]; - mlxsw_sp_rif_counters_free(rif); + if (netdev_offload_xstats_enabled(rif->dev, + NETDEV_OFFLOAD_XSTATS_TYPE_L3)) { + mlxsw_sp_rif_push_l3_stats(rif); + mlxsw_sp_router_port_l3_stats_disable(rif); + mlxsw_sp_router_hwstats_notify_schedule(rif->dev); + } else { + mlxsw_sp_rif_counters_free(rif); + } + for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) mlxsw_sp_mr_rif_del(vr->mr_table[i], rif); ops->deconfigure(rif); @@ -9128,6 +9375,35 @@ static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif, return -ENOBUFS; } +static int +mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif *rif, + unsigned long event, + struct netdev_notifier_offload_xstats_info *info) +{ + switch (info->type) { + case NETDEV_OFFLOAD_XSTATS_TYPE_L3: + break; + default: + return 0; + } + + switch (event) { + case NETDEV_OFFLOAD_XSTATS_ENABLE: + return mlxsw_sp_router_port_l3_stats_enable(rif); + case NETDEV_OFFLOAD_XSTATS_DISABLE: + mlxsw_sp_router_port_l3_stats_disable(rif); + return 0; + case NETDEV_OFFLOAD_XSTATS_REPORT_USED: + mlxsw_sp_router_port_l3_stats_report_used(rif, info); + return 0; + case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA: + return mlxsw_sp_router_port_l3_stats_report_delta(rif, info); + } + + WARN_ON_ONCE(1); + return 0; +} + int mlxsw_sp_netdevice_router_port_event(struct net_device *dev, unsigned long event, void *ptr) { @@ -9153,6 +9429,15 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev, case NETDEV_PRE_CHANGEADDR: err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr); break; + case NETDEV_OFFLOAD_XSTATS_ENABLE: + case NETDEV_OFFLOAD_XSTATS_DISABLE: + case NETDEV_OFFLOAD_XSTATS_REPORT_USED: + case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA: + err = mlxsw_sp_router_port_offload_xstats_cmd(rif, event, ptr); + break; + default: + WARN_ON_ONCE(1); + break; } out: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 99e8371a82a5..fa829658a11b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -159,11 +159,9 @@ int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *rif, enum mlxsw_sp_rif_counter_dir dir, u64 *cnt); -void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_rif *rif, +void mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif *rif, enum mlxsw_sp_rif_counter_dir dir); -int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_rif *rif, +int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif, enum mlxsw_sp_rif_counter_dir dir); struct mlxsw_sp_neigh_entry * mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index f9671cc53002..b73466470f75 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -269,8 +269,7 @@ mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev, if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid))) return NULL; - if (!vid || - br_vlan_get_info(br_dev, vid, &vinfo) || + if (!vid || br_vlan_get_info(br_dev, vid, &vinfo) || !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY)) return NULL; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index bffdb41fc4ed..3bf12092a8a2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1234,8 +1234,7 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, if (netif_is_bridge_master(orig_dev)) { int err = 0; - if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) && - br_vlan_enabled(orig_dev)) + if (br_vlan_enabled(orig_dev)) err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp, orig_dev, vlan); if (!err) diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c index 0303e727e99f..82d55fc27edc 100644 --- a/drivers/net/ethernet/micrel/ks8851_spi.c +++ b/drivers/net/ethernet/micrel/ks8851_spi.c @@ -293,7 +293,7 @@ static void ks8851_wrfifo_spi(struct ks8851_net *ks, struct sk_buff *txp, */ static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb) { - netif_rx_ni(skb); + netif_rx(skb); } /** @@ -452,11 +452,9 @@ static int ks8851_probe_spi(struct spi_device *spi) return ks8851_probe_common(netdev, dev, msg_enable); } -static int ks8851_remove_spi(struct spi_device *spi) +static void ks8851_remove_spi(struct spi_device *spi) { ks8851_remove_common(&spi->dev); - - return 0; } static const struct of_device_id ks8851_match_table[] = { diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c index 634ac7649c43..559ad94a44d0 100644 --- a/drivers/net/ethernet/microchip/enc28j60.c +++ b/drivers/net/ethernet/microchip/enc28j60.c @@ -975,7 +975,7 @@ static void enc28j60_hw_rx(struct net_device *ndev) /* update statistics */ ndev->stats.rx_packets++; ndev->stats.rx_bytes += len; - netif_rx_ni(skb); + netif_rx(skb); } } /* @@ -1612,15 +1612,13 @@ error_alloc: return ret; } -static int enc28j60_remove(struct spi_device *spi) +static void enc28j60_remove(struct spi_device *spi) { struct enc28j60_net *priv = spi_get_drvdata(spi); unregister_netdev(priv->netdev); free_irq(spi->irq, priv); free_netdev(priv->netdev); - - return 0; } static const struct of_device_id enc28j60_dt_ids[] = { diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c index b90efc80fb59..dc1840cb5b10 100644 --- a/drivers/net/ethernet/microchip/encx24j600.c +++ b/drivers/net/ethernet/microchip/encx24j600.c @@ -1093,7 +1093,7 @@ error_out: return ret; } -static int encx24j600_spi_remove(struct spi_device *spi) +static void encx24j600_spi_remove(struct spi_device *spi) { struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev); @@ -1101,8 +1101,6 @@ static int encx24j600_spi_remove(struct spi_device *spi) kthread_stop(priv->kworker_task); free_netdev(priv->ndev); - - return 0; } static const struct spi_device_id encx24j600_spi_id_table[] = { diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 8c6390d95158..5282d25a6f9b 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -18,6 +18,51 @@ #include "lan743x_main.h" #include "lan743x_ethtool.h" +#define MMD_ACCESS_ADDRESS 0 +#define MMD_ACCESS_WRITE 1 +#define MMD_ACCESS_READ 2 +#define MMD_ACCESS_READ_INC 3 + +static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter) +{ + u32 chip_rev; + u32 strap; + + strap = lan743x_csr_read(adapter, STRAP_READ); + if (strap & STRAP_READ_USE_SGMII_EN_) { + if (strap & STRAP_READ_SGMII_EN_) + adapter->is_sgmii_en = true; + else + adapter->is_sgmii_en = false; + netif_dbg(adapter, drv, adapter->netdev, + "STRAP_READ: 0x%08X\n", strap); + } else { + chip_rev = lan743x_csr_read(adapter, FPGA_REV); + if (chip_rev) { + if (chip_rev & FPGA_SGMII_OP) + adapter->is_sgmii_en = true; + else + adapter->is_sgmii_en = false; + netif_dbg(adapter, drv, adapter->netdev, + "FPGA_REV: 0x%08X\n", chip_rev); + } else { + adapter->is_sgmii_en = false; + } + } +} + +static bool is_pci11x1x_chip(struct lan743x_adapter *adapter) +{ + struct lan743x_csr *csr = &adapter->csr; + u32 id_rev = csr->id_rev; + + if (((id_rev & 0xFFFF0000) == ID_REV_ID_A011_) || + ((id_rev & 0xFFFF0000) == ID_REV_ID_A041_)) { + return true; + } + return false; +} + static void lan743x_pci_cleanup(struct lan743x_adapter *adapter) { pci_release_selected_regions(adapter->pdev, @@ -250,7 +295,7 @@ static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags) } } if (int_sts & INT_BIT_ALL_TX_) { - for (channel = 0; channel < LAN743X_USED_TX_CHANNELS; + for (channel = 0; channel < adapter->used_tx_channels; channel++) { u32 int_bit = INT_BIT_DMA_TX_(channel); @@ -410,7 +455,7 @@ static u32 lan743x_intr_get_vector_flags(struct lan743x_adapter *adapter, { int index; - for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) { + for (index = 0; index < adapter->max_vector_count; index++) { if (adapter->intr.vector_list[index].int_mask & int_mask) return adapter->intr.vector_list[index].flags; } @@ -423,9 +468,12 @@ static void lan743x_intr_close(struct lan743x_adapter *adapter) int index = 0; lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_); - lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF); + if (adapter->is_pci11x1x) + lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x0000FFFF); + else + lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF); - for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) { + for (index = 0; index < intr->number_of_vectors; index++) { if (intr->flags & INTR_FLAG_IRQ_REQUESTED(index)) { lan743x_intr_unregister_isr(adapter, index); intr->flags &= ~INTR_FLAG_IRQ_REQUESTED(index); @@ -445,9 +493,11 @@ static void lan743x_intr_close(struct lan743x_adapter *adapter) static int lan743x_intr_open(struct lan743x_adapter *adapter) { - struct msix_entry msix_entries[LAN743X_MAX_VECTOR_COUNT]; + struct msix_entry msix_entries[PCI11X1X_MAX_VECTOR_COUNT]; struct lan743x_intr *intr = &adapter->intr; + unsigned int used_tx_channels; u32 int_vec_en_auto_clr = 0; + u8 max_vector_count; u32 int_vec_map0 = 0; u32 int_vec_map1 = 0; int ret = -ENODEV; @@ -457,13 +507,15 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter) intr->number_of_vectors = 0; /* Try to set up MSIX interrupts */ + max_vector_count = adapter->max_vector_count; memset(&msix_entries[0], 0, - sizeof(struct msix_entry) * LAN743X_MAX_VECTOR_COUNT); - for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) + sizeof(struct msix_entry) * max_vector_count); + for (index = 0; index < max_vector_count; index++) msix_entries[index].entry = index; + used_tx_channels = adapter->used_tx_channels; ret = pci_enable_msix_range(adapter->pdev, msix_entries, 1, - 1 + LAN743X_USED_TX_CHANNELS + + 1 + used_tx_channels + LAN743X_USED_RX_CHANNELS); if (ret > 0) { @@ -556,8 +608,15 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter) lan743x_csr_write(adapter, INT_MOD_CFG5, LAN743X_INT_MOD); lan743x_csr_write(adapter, INT_MOD_CFG6, LAN743X_INT_MOD); lan743x_csr_write(adapter, INT_MOD_CFG7, LAN743X_INT_MOD); - lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432); - lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001); + if (adapter->is_pci11x1x) { + lan743x_csr_write(adapter, INT_MOD_CFG8, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_CFG9, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00007654); + lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00003210); + } else { + lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432); + lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001); + } lan743x_csr_write(adapter, INT_MOD_MAP2, 0x00FFFFFF); } @@ -570,8 +629,8 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter) if (intr->number_of_vectors > 1) { int number_of_tx_vectors = intr->number_of_vectors - 1; - if (number_of_tx_vectors > LAN743X_USED_TX_CHANNELS) - number_of_tx_vectors = LAN743X_USED_TX_CHANNELS; + if (number_of_tx_vectors > used_tx_channels) + number_of_tx_vectors = used_tx_channels; flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ | LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C | LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK | @@ -609,9 +668,9 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter) INT_VEC_EN_(vector)); } } - if ((intr->number_of_vectors - LAN743X_USED_TX_CHANNELS) > 1) { + if ((intr->number_of_vectors - used_tx_channels) > 1) { int number_of_rx_vectors = intr->number_of_vectors - - LAN743X_USED_TX_CHANNELS - 1; + used_tx_channels - 1; if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS) number_of_rx_vectors = LAN743X_USED_RX_CHANNELS; @@ -632,7 +691,7 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter) LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR; } for (index = 0; index < number_of_rx_vectors; index++) { - int vector = index + 1 + LAN743X_USED_TX_CHANNELS; + int vector = index + 1 + used_tx_channels; u32 int_bit = INT_BIT_DMA_RX_(index); /* map RX interrupt to vector */ @@ -760,6 +819,96 @@ static int lan743x_mdiobus_write(struct mii_bus *bus, return ret; } +static u32 lan743x_mac_mmd_access(int id, int index, int op) +{ + u16 dev_addr; + u32 ret; + + dev_addr = (index >> 16) & 0x1f; + ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) & + MAC_MII_ACC_PHY_ADDR_MASK_; + ret |= (dev_addr << MAC_MII_ACC_MIIMMD_SHIFT_) & + MAC_MII_ACC_MIIMMD_MASK_; + if (op == MMD_ACCESS_WRITE) + ret |= MAC_MII_ACC_MIICMD_WRITE_; + else if (op == MMD_ACCESS_READ) + ret |= MAC_MII_ACC_MIICMD_READ_; + else if (op == MMD_ACCESS_READ_INC) + ret |= MAC_MII_ACC_MIICMD_READ_INC_; + else + ret |= MAC_MII_ACC_MIICMD_ADDR_; + ret |= (MAC_MII_ACC_MII_BUSY_ | MAC_MII_ACC_MIICL45_); + + return ret; +} + +static int lan743x_mdiobus_c45_read(struct mii_bus *bus, int phy_id, int index) +{ + struct lan743x_adapter *adapter = bus->priv; + u32 mmd_access; + int ret; + + /* comfirm MII not busy */ + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + if (index & MII_ADDR_C45) { + /* Load Register Address */ + lan743x_csr_write(adapter, MAC_MII_DATA, (u32)(index & 0xffff)); + mmd_access = lan743x_mac_mmd_access(phy_id, index, + MMD_ACCESS_ADDRESS); + lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + /* Read Data */ + mmd_access = lan743x_mac_mmd_access(phy_id, index, + MMD_ACCESS_READ); + lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + ret = lan743x_csr_read(adapter, MAC_MII_DATA); + return (int)(ret & 0xFFFF); + } + + ret = lan743x_mdiobus_read(bus, phy_id, index); + return ret; +} + +static int lan743x_mdiobus_c45_write(struct mii_bus *bus, + int phy_id, int index, u16 regval) +{ + struct lan743x_adapter *adapter = bus->priv; + u32 mmd_access; + int ret; + + /* confirm MII not busy */ + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + if (index & MII_ADDR_C45) { + /* Load Register Address */ + lan743x_csr_write(adapter, MAC_MII_DATA, (u32)(index & 0xffff)); + mmd_access = lan743x_mac_mmd_access(phy_id, index, + MMD_ACCESS_ADDRESS); + lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + /* Write Data */ + lan743x_csr_write(adapter, MAC_MII_DATA, (u32)regval); + mmd_access = lan743x_mac_mmd_access(phy_id, index, + MMD_ACCESS_WRITE); + lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + } else { + ret = lan743x_mdiobus_write(bus, phy_id, index, regval); + } + + return ret; +} + static void lan743x_mac_set_address(struct lan743x_adapter *adapter, u8 *addr) { @@ -2491,7 +2640,8 @@ static int lan743x_netdev_close(struct net_device *netdev) struct lan743x_adapter *adapter = netdev_priv(netdev); int index; - lan743x_tx_close(&adapter->tx[0]); + for (index = 0; index < adapter->used_tx_channels; index++) + lan743x_tx_close(&adapter->tx[index]); for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) lan743x_rx_close(&adapter->rx[index]); @@ -2537,12 +2687,19 @@ static int lan743x_netdev_open(struct net_device *netdev) goto close_rx; } - ret = lan743x_tx_open(&adapter->tx[0]); - if (ret) - goto close_rx; - + for (index = 0; index < adapter->used_tx_channels; index++) { + ret = lan743x_tx_open(&adapter->tx[index]); + if (ret) + goto close_tx; + } return 0; +close_tx: + for (index = 0; index < adapter->used_tx_channels; index++) { + if (adapter->tx[index].ring_cpu_ptr) + lan743x_tx_close(&adapter->tx[index]); + } + close_rx: for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { if (adapter->rx[index].ring_cpu_ptr) @@ -2569,8 +2726,12 @@ static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct lan743x_adapter *adapter = netdev_priv(netdev); + u8 ch = 0; - return lan743x_tx_xmit_frame(&adapter->tx[0], skb); + if (adapter->is_pci11x1x) + ch = skb->queue_mapping % PCI11X1X_USED_TX_CHANNELS; + + return lan743x_tx_xmit_frame(&adapter->tx[ch], skb); } static int lan743x_netdev_ioctl(struct net_device *netdev, @@ -2701,6 +2862,18 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, int index; int ret; + adapter->is_pci11x1x = is_pci11x1x_chip(adapter); + if (adapter->is_pci11x1x) { + adapter->max_tx_channels = PCI11X1X_MAX_TX_CHANNELS; + adapter->used_tx_channels = PCI11X1X_USED_TX_CHANNELS; + adapter->max_vector_count = PCI11X1X_MAX_VECTOR_COUNT; + pci11x1x_strap_get_status(adapter); + } else { + adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS; + adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS; + adapter->max_vector_count = LAN743X_MAX_VECTOR_COUNT; + } + adapter->intr.irq = adapter->pdev->irq; lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF); @@ -2731,15 +2904,19 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, adapter->rx[index].channel_number = index; } - tx = &adapter->tx[0]; - tx->adapter = adapter; - tx->channel_number = 0; - spin_lock_init(&tx->ring_lock); + for (index = 0; index < adapter->used_tx_channels; index++) { + tx = &adapter->tx[index]; + tx->adapter = adapter; + tx->channel_number = index; + spin_lock_init(&tx->ring_lock); + } + return 0; } static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) { + u32 sgmii_ctl; int ret; adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); @@ -2749,9 +2926,35 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) } adapter->mdiobus->priv = (void *)adapter; - adapter->mdiobus->read = lan743x_mdiobus_read; - adapter->mdiobus->write = lan743x_mdiobus_write; - adapter->mdiobus->name = "lan743x-mdiobus"; + if (adapter->is_pci11x1x) { + if (adapter->is_sgmii_en) { + sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); + sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_; + sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_; + lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); + netif_dbg(adapter, drv, adapter->netdev, + "SGMII operation\n"); + } else { + sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); + sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_; + sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_; + lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); + netif_dbg(adapter, drv, adapter->netdev, + "(R)GMII operation\n"); + } + + adapter->mdiobus->probe_capabilities = MDIOBUS_C22_C45; + adapter->mdiobus->read = lan743x_mdiobus_c45_read; + adapter->mdiobus->write = lan743x_mdiobus_c45_write; + adapter->mdiobus->name = "lan743x-mdiobus-c45"; + netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus-c45\n"); + } else { + adapter->mdiobus->read = lan743x_mdiobus_read; + adapter->mdiobus->write = lan743x_mdiobus_write; + adapter->mdiobus->name = "lan743x-mdiobus"; + netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus\n"); + } + snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "pci-%s", pci_name(adapter->pdev)); @@ -2786,8 +2989,17 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev, struct net_device *netdev = NULL; int ret = -ENODEV; - netdev = devm_alloc_etherdev(&pdev->dev, - sizeof(struct lan743x_adapter)); + if (id->device == PCI_DEVICE_ID_SMSC_A011 || + id->device == PCI_DEVICE_ID_SMSC_A041) { + netdev = devm_alloc_etherdev_mqs(&pdev->dev, + sizeof(struct lan743x_adapter), + PCI11X1X_USED_TX_CHANNELS, + LAN743X_USED_RX_CHANNELS); + } else { + netdev = devm_alloc_etherdev(&pdev->dev, + sizeof(struct lan743x_adapter)); + } + if (!netdev) goto return_error; @@ -3056,6 +3268,8 @@ static const struct dev_pm_ops lan743x_pm_ops = { static const struct pci_device_id lan743x_pcidev_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) }, { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7431) }, + { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A011) }, + { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A041) }, { 0, } }; diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index aaf7aaeaba0c..2c8e76b4e1f7 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -16,8 +16,13 @@ #define ID_REV_ID_MASK_ (0xFFFF0000) #define ID_REV_ID_LAN7430_ (0x74300000) #define ID_REV_ID_LAN7431_ (0x74310000) -#define ID_REV_IS_VALID_CHIP_ID_(id_rev) \ - (((id_rev) & 0xFFF00000) == 0x74300000) +#define ID_REV_ID_LAN743X_ (0x74300000) +#define ID_REV_ID_A011_ (0xA0110000) // PCI11010 +#define ID_REV_ID_A041_ (0xA0410000) // PCI11414 +#define ID_REV_ID_A0X1_ (0xA0010000) +#define ID_REV_IS_VALID_CHIP_ID_(id_rev) \ + ((((id_rev) & 0xFFF00000) == ID_REV_ID_LAN743X_) || \ + (((id_rev) & 0xFF0F0000) == ID_REV_ID_A0X1_)) #define ID_REV_CHIP_REV_MASK_ (0x0000FFFF) #define ID_REV_CHIP_REV_A0_ (0x00000000) #define ID_REV_CHIP_REV_B0_ (0x00000010) @@ -25,6 +30,17 @@ #define FPGA_REV (0x04) #define FPGA_REV_GET_MINOR_(fpga_rev) (((fpga_rev) >> 8) & 0x000000FF) #define FPGA_REV_GET_MAJOR_(fpga_rev) ((fpga_rev) & 0x000000FF) +#define FPGA_SGMII_OP BIT(24) + +#define STRAP_READ (0x0C) +#define STRAP_READ_USE_SGMII_EN_ BIT(22) +#define STRAP_READ_SGMII_EN_ BIT(6) +#define STRAP_READ_SGMII_REFCLK_ BIT(5) +#define STRAP_READ_SGMII_2_5G_ BIT(4) +#define STRAP_READ_BASE_X_ BIT(3) +#define STRAP_READ_RGMII_TXC_DELAY_EN_ BIT(2) +#define STRAP_READ_RGMII_RXC_DELAY_EN_ BIT(1) +#define STRAP_READ_ADV_PM_DISABLE_ BIT(0) #define HW_CFG (0x010) #define HW_CFG_RELOAD_TYPE_ALL_ (0x00000FC0) @@ -135,6 +151,13 @@ #define MAC_RX_ADDRL (0x11C) #define MAC_MII_ACC (0x120) +#define MAC_MII_ACC_MDC_CYCLE_SHIFT_ (16) +#define MAC_MII_ACC_MDC_CYCLE_MASK_ (0x00070000) +#define MAC_MII_ACC_MDC_CYCLE_2_5MHZ_ (0) +#define MAC_MII_ACC_MDC_CYCLE_5MHZ_ (1) +#define MAC_MII_ACC_MDC_CYCLE_12_5MHZ_ (2) +#define MAC_MII_ACC_MDC_CYCLE_25MHZ_ (3) +#define MAC_MII_ACC_MDC_CYCLE_1_25MHZ_ (4) #define MAC_MII_ACC_PHY_ADDR_SHIFT_ (11) #define MAC_MII_ACC_PHY_ADDR_MASK_ (0x0000F800) #define MAC_MII_ACC_MIIRINDA_SHIFT_ (6) @@ -143,6 +166,15 @@ #define MAC_MII_ACC_MII_WRITE_ (0x00000002) #define MAC_MII_ACC_MII_BUSY_ BIT(0) +#define MAC_MII_ACC_MIIMMD_SHIFT_ (6) +#define MAC_MII_ACC_MIIMMD_MASK_ (0x000007C0) +#define MAC_MII_ACC_MIICL45_ BIT(3) +#define MAC_MII_ACC_MIICMD_MASK_ (0x00000006) +#define MAC_MII_ACC_MIICMD_ADDR_ (0x00000000) +#define MAC_MII_ACC_MIICMD_WRITE_ (0x00000002) +#define MAC_MII_ACC_MIICMD_READ_ (0x00000004) +#define MAC_MII_ACC_MIICMD_READ_INC_ (0x00000006) + #define MAC_MII_DATA (0x124) #define MAC_EEE_TX_LPI_REQ_DLY_CNT (0x130) @@ -214,6 +246,11 @@ #define MAC_WUCSR2 (0x600) +#define SGMII_CTL (0x728) +#define SGMII_CTL_SGMII_ENABLE_ BIT(31) +#define SGMII_CTL_LINK_STATUS_SOURCE_ BIT(8) +#define SGMII_CTL_SGMII_POWER_DN_ BIT(1) + #define INT_STS (0x780) #define INT_BIT_DMA_RX_(channel) BIT(24 + (channel)) #define INT_BIT_ALL_RX_ (0x0F000000) @@ -261,6 +298,8 @@ #define INT_MOD_CFG5 (0x7D4) #define INT_MOD_CFG6 (0x7D8) #define INT_MOD_CFG7 (0x7DC) +#define INT_MOD_CFG8 (0x7E0) +#define INT_MOD_CFG9 (0x7E4) #define PTP_CMD_CTL (0x0A00) #define PTP_CMD_CTL_PTP_CLK_STP_NSEC_ BIT(6) @@ -541,10 +580,12 @@ #define LAN743X_MAX_RX_CHANNELS (4) #define LAN743X_MAX_TX_CHANNELS (1) +#define PCI11X1X_MAX_TX_CHANNELS (4) struct lan743x_adapter; #define LAN743X_USED_RX_CHANNELS (4) #define LAN743X_USED_TX_CHANNELS (1) +#define PCI11X1X_USED_TX_CHANNELS (4) #define LAN743X_INT_MOD (400) #if (LAN743X_USED_RX_CHANNELS > LAN743X_MAX_RX_CHANNELS) @@ -553,12 +594,17 @@ struct lan743x_adapter; #if (LAN743X_USED_TX_CHANNELS > LAN743X_MAX_TX_CHANNELS) #error Invalid LAN743X_USED_TX_CHANNELS #endif +#if (PCI11X1X_USED_TX_CHANNELS > PCI11X1X_MAX_TX_CHANNELS) +#error Invalid PCI11X1X_USED_TX_CHANNELS +#endif /* PCI */ /* SMSC acquired EFAR late 1990's, MCHP acquired SMSC 2012 */ #define PCI_VENDOR_ID_SMSC PCI_VENDOR_ID_EFAR #define PCI_DEVICE_ID_SMSC_LAN7430 (0x7430) #define PCI_DEVICE_ID_SMSC_LAN7431 (0x7431) +#define PCI_DEVICE_ID_SMSC_A011 (0xA011) +#define PCI_DEVICE_ID_SMSC_A041 (0xA041) #define PCI_CONFIG_LENGTH (0x1000) @@ -607,13 +653,14 @@ struct lan743x_vector { }; #define LAN743X_MAX_VECTOR_COUNT (8) +#define PCI11X1X_MAX_VECTOR_COUNT (16) struct lan743x_intr { int flags; unsigned int irq; - struct lan743x_vector vector_list[LAN743X_MAX_VECTOR_COUNT]; + struct lan743x_vector vector_list[PCI11X1X_MAX_VECTOR_COUNT]; int number_of_vectors; bool using_vectors; @@ -721,8 +768,13 @@ struct lan743x_adapter { u8 mac_address[ETH_ALEN]; struct lan743x_phy phy; - struct lan743x_tx tx[LAN743X_MAX_TX_CHANNELS]; - struct lan743x_rx rx[LAN743X_MAX_RX_CHANNELS]; + struct lan743x_tx tx[PCI11X1X_USED_TX_CHANNELS]; + struct lan743x_rx rx[LAN743X_USED_RX_CHANNELS]; + bool is_pci11x1x; + bool is_sgmii_en; + u8 max_tx_channels; + u8 used_tx_channels; + u8 max_vector_count; #define LAN743X_ADAPTER_FLAG_OTP BIT(0) u32 flags; diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c index 8b7a8d879083..ec082594bbbd 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.c +++ b/drivers/net/ethernet/microchip/lan743x_ptp.c @@ -1307,21 +1307,21 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) switch (config.tx_type) { case HWTSTAMP_TX_OFF: - for (index = 0; index < LAN743X_MAX_TX_CHANNELS; - index++) + for (index = 0; index < adapter->used_tx_channels; + index++) lan743x_tx_set_timestamping_mode(&adapter->tx[index], false, false); lan743x_ptp_set_sync_ts_insert(adapter, false); break; case HWTSTAMP_TX_ON: - for (index = 0; index < LAN743X_MAX_TX_CHANNELS; + for (index = 0; index < adapter->used_tx_channels; index++) lan743x_tx_set_timestamping_mode(&adapter->tx[index], true, false); lan743x_ptp_set_sync_ts_insert(adapter, false); break; case HWTSTAMP_TX_ONESTEP_SYNC: - for (index = 0; index < LAN743X_MAX_TX_CHANNELS; + for (index = 0; index < adapter->used_tx_channels; index++) lan743x_tx_set_timestamping_mode(&adapter->tx[index], true, true); diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig index ac273f84b69e..4241ff0e5098 100644 --- a/drivers/net/ethernet/microchip/lan966x/Kconfig +++ b/drivers/net/ethernet/microchip/lan966x/Kconfig @@ -1,5 +1,6 @@ config LAN966X_SWITCH tristate "Lan966x switch driver" + depends on PTP_1588_CLOCK_OPTIONAL depends on HAS_IOMEM depends on OF depends on NET_SWITCHDEV diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index d62484f14564..e1bcb28039dc 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -185,6 +185,9 @@ static int lan966x_port_inj_ready(struct lan966x *lan966x, u8 grp) { u32 val; + if (lan_rd(lan966x, QS_INJ_STATUS) & QS_INJ_STATUS_FIFO_RDY_SET(BIT(grp))) + return 0; + return readx_poll_timeout_atomic(lan966x_port_inj_status, lan966x, val, QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp), READL_SLEEP_US, READL_TIMEOUT_US); @@ -318,6 +321,7 @@ static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp) static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev) { struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; __be32 ifh[IFH_LEN]; int err; @@ -338,7 +342,11 @@ static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev) lan966x_ifh_set_timestamp(ifh, LAN966X_SKB_CB(skb)->ts_id); } - return lan966x_port_ifh_xmit(skb, ifh, dev); + spin_lock(&lan966x->tx_lock); + err = lan966x_port_ifh_xmit(skb, ifh, dev); + spin_unlock(&lan966x->tx_lock); + + return err; } static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu) @@ -439,7 +447,8 @@ static bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, ip_hdr(skb)->protocol == IPPROTO_IGMP) return false; - if (skb->protocol == htons(ETH_P_IPV6) && + if (IS_ENABLED(CONFIG_IPV6) && + skb->protocol == htons(ETH_P_IPV6) && ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) && !ipv6_mc_check_mld(skb)) return false; @@ -599,7 +608,9 @@ static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args) skb->offload_fwd_mark = 0; } - netif_rx_ni(skb); + if (!skb_defer_rx_timestamp(skb)) + netif_rx(skb); + dev->stats.rx_bytes += len; dev->stats.rx_packets++; @@ -882,6 +893,8 @@ static void lan966x_init(struct lan966x *lan966x) lan_rmw(ANA_ANAINTR_INTR_ENA_SET(1), ANA_ANAINTR_INTR_ENA, lan966x, ANA_ANAINTR); + + spin_lock_init(&lan966x->tx_lock); } static int lan966x_ram_init(struct lan966x *lan966x) diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h index 058e43531818..ae282da1da74 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h @@ -108,6 +108,8 @@ struct lan966x { u8 base_mac[ETH_ALEN]; + spinlock_t tx_lock; /* lock for frame transmition */ + struct net_device *bridge; u16 bridge_mask; u16 bridge_fwd_mask; diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c index 9fce865287e7..e3555c94294d 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c @@ -419,6 +419,9 @@ static int lan966x_netdevice_event(struct notifier_block *nb, return notifier_from_errno(ret); } +/* We don't offload uppers such as LAG as bridge ports, so every device except + * the bridge itself is foreign. + */ static bool lan966x_foreign_dev_check(const struct net_device *dev, const struct net_device *foreign_dev) { @@ -426,10 +429,10 @@ static bool lan966x_foreign_dev_check(const struct net_device *dev, struct lan966x *lan966x = port->lan966x; if (netif_is_bridge_master(foreign_dev)) - if (lan966x->bridge != foreign_dev) - return true; + if (lan966x->bridge == foreign_dev) + return false; - return false; + return true; } static int lan966x_switchdev_event(struct notifier_block *nb, @@ -449,8 +452,7 @@ static int lan966x_switchdev_event(struct notifier_block *nb, err = switchdev_handle_fdb_event_to_device(dev, event, ptr, lan966x_netdevice_check, lan966x_foreign_dev_check, - lan966x_handle_fdb, - NULL); + lan966x_handle_fdb); return notifier_from_errno(err); } @@ -463,18 +465,6 @@ static int lan966x_handle_port_vlan_add(struct lan966x_port *port, const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj); struct lan966x *lan966x = port->lan966x; - /* When adding a port to a vlan, we get a callback for the port but - * also for the bridge. When get the callback for the bridge just bail - * out. Then when the bridge is added to the vlan, then we get a - * callback here but in this case the flags has set: - * BRIDGE_VLAN_INFO_BRENTRY. In this case it means that the CPU - * port is added to the vlan, so the broadcast frames and unicast frames - * with dmac of the bridge should be foward to CPU. - */ - if (netif_is_bridge_master(obj->orig_dev) && - !(v->flags & BRIDGE_VLAN_INFO_BRENTRY)) - return 0; - if (!netif_is_bridge_master(obj->orig_dev)) lan966x_vlan_port_add_vlan(port, v->vid, v->flags & BRIDGE_VLAN_INFO_PVID, diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile index c271e86ee292..e9dd348a6ebb 100644 --- a/drivers/net/ethernet/microchip/sparx5/Makefile +++ b/drivers/net/ethernet/microchip/sparx5/Makefile @@ -7,4 +7,5 @@ obj-$(CONFIG_SPARX5_SWITCH) += sparx5-switch.o sparx5-switch-objs := sparx5_main.o sparx5_packet.o \ sparx5_netdev.o sparx5_phylink.o sparx5_port.o sparx5_mactable.o sparx5_vlan.o \ - sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o + sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o \ + sparx5_ptp.o diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c index 10b866e9f726..6b0febcb7fa9 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c @@ -1183,6 +1183,39 @@ static void sparx5_config_port_stats(struct sparx5 *sparx5, int portno) sparx5, ANA_AC_PORT_STAT_CFG(portno, SPX5_PORT_POLICER_DROPS)); } +static int sparx5_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct sparx5_port *port = netdev_priv(dev); + struct sparx5 *sparx5 = port->sparx5; + struct sparx5_phc *phc; + + if (!sparx5->ptp) + return ethtool_op_get_ts_info(dev, info); + + phc = &sparx5->phc[SPARX5_PHC_PORT]; + + info->phc_index = phc->clock ? ptp_clock_index(phc->clock) : -1; + if (info->phc_index == -1) { + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + return 0; + } + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) | + BIT(HWTSTAMP_TX_ONESTEP_SYNC); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_ALL); + + return 0; +} + const struct ethtool_ops sparx5_ethtool_ops = { .get_sset_count = sparx5_get_sset_count, .get_strings = sparx5_get_sset_strings, @@ -1194,6 +1227,7 @@ const struct ethtool_ops sparx5_ethtool_ops = { .get_eth_mac_stats = sparx5_get_eth_mac_stats, .get_eth_ctrl_stats = sparx5_get_eth_mac_ctrl_stats, .get_rmon_stats = sparx5_get_eth_rmon_stats, + .get_ts_info = sparx5_get_ts_info, }; int sparx_stats_init(struct sparx5 *sparx5) diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c index 7436f62fa152..2dc87584023a 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c @@ -240,6 +240,8 @@ static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx skb_pull(skb, IFH_LEN * sizeof(u32)); if (likely(!(skb->dev->features & NETIF_F_RXFCS))) skb_trim(skb, skb->len - ETH_FCS_LEN); + + sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp); skb->protocol = eth_type_trans(skb, skb->dev); /* Everything we see on an interface that is in the HW bridge * has already been forwarded diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index 394de85d360d..5f7c7030ce03 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -190,6 +190,7 @@ static const struct sparx5_main_io_resource sparx5_main_iomap[] = { { TARGET_ASM, 0x10600000, 1 }, /* 0x610600000 */ { TARGET_GCB, 0x11010000, 2 }, /* 0x611010000 */ { TARGET_QS, 0x11030000, 2 }, /* 0x611030000 */ + { TARGET_PTP, 0x11040000, 2 }, /* 0x611040000 */ { TARGET_ANA_ACL, 0x11050000, 2 }, /* 0x611050000 */ { TARGET_LRN, 0x11060000, 2 }, /* 0x611060000 */ { TARGET_VCAP_SUPER, 0x11080000, 2 }, /* 0x611080000 */ @@ -692,6 +693,18 @@ static int sparx5_start(struct sparx5 *sparx5) } else { sparx5->xtr_irq = -ENXIO; } + + if (sparx5->ptp_irq >= 0) { + err = devm_request_threaded_irq(sparx5->dev, sparx5->ptp_irq, + NULL, sparx5_ptp_irq_handler, + IRQF_ONESHOT, "sparx5-ptp", + sparx5); + if (err) + sparx5->ptp_irq = -ENXIO; + + sparx5->ptp = 1; + } + return err; } @@ -808,6 +821,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev) sparx5->fdma_irq = platform_get_irq_byname(sparx5->pdev, "fdma"); sparx5->xtr_irq = platform_get_irq_byname(sparx5->pdev, "xtr"); + sparx5->ptp_irq = platform_get_irq_byname(sparx5->pdev, "ptp"); /* Read chip ID to check CPU interface */ sparx5->chip_id = spx5_rd(sparx5, GCB_CHIP_ID); @@ -846,6 +860,12 @@ static int mchp_sparx5_probe(struct platform_device *pdev) dev_err(sparx5->dev, "Start failed\n"); goto cleanup_ports; } + + err = sparx5_ptp_init(sparx5); + if (err) { + dev_err(sparx5->dev, "PTP failed\n"); + goto cleanup_ports; + } goto cleanup_config; cleanup_ports: @@ -869,6 +889,7 @@ static int mchp_sparx5_remove(struct platform_device *pdev) disable_irq(sparx5->fdma_irq); sparx5->fdma_irq = -ENXIO; } + sparx5_ptp_deinit(sparx5); sparx5_fdma_stop(sparx5); sparx5_cleanup_ports(sparx5); /* Unregister netdevs */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h index a1acc9b461f2..33892dfc3b2f 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h @@ -14,8 +14,12 @@ #include <linux/if_vlan.h> #include <linux/bitmap.h> #include <linux/phylink.h> +#include <linux/net_tstamp.h> +#include <linux/ptp_clock_kernel.h> #include <linux/hrtimer.h> +#include "sparx5_main_regs.h" + /* Target chip type */ enum spx5_target_chiptype { SPX5_TARGET_CT_7546 = 0x7546, /* SparX-5-64 Enterprise */ @@ -77,6 +81,18 @@ enum sparx5_vlan_port_type { #define FDMA_RX_DCB_MAX_DBS 15 #define FDMA_TX_DCB_MAX_DBS 1 +#define SPARX5_PHC_COUNT 3 +#define SPARX5_PHC_PORT 0 + +#define IFH_REW_OP_NOOP 0x0 +#define IFH_REW_OP_ONE_STEP_PTP 0x3 +#define IFH_REW_OP_TWO_STEP_PTP 0x4 + +#define IFH_PDU_TYPE_NONE 0x0 +#define IFH_PDU_TYPE_PTP 0x5 +#define IFH_PDU_TYPE_IPV4_UDP_PTP 0x6 +#define IFH_PDU_TYPE_IPV6_UDP_PTP 0x7 + struct sparx5; struct sparx5_db_hw { @@ -165,9 +181,12 @@ struct sparx5_port { enum sparx5_port_max_tags max_vlan_tags; enum sparx5_vlan_port_type vlan_type; u32 custom_etype; - u32 ifh[IFH_LEN]; bool vlan_aware; struct hrtimer inj_timer; + /* ptp */ + u8 ptp_cmd; + u16 ts_id; + struct sk_buff_head tx_skbs; }; enum sparx5_core_clockfreq { @@ -177,6 +196,26 @@ enum sparx5_core_clockfreq { SPX5_CORE_CLOCK_625MHZ, /* 625MHZ core clock frequency */ }; +struct sparx5_phc { + struct ptp_clock *clock; + struct ptp_clock_info info; + struct hwtstamp_config hwtstamp_config; + struct sparx5 *sparx5; + u8 index; +}; + +struct sparx5_skb_cb { + u8 rew_op; + u8 pdu_type; + u8 pdu_w16_offset; + u16 ts_id; + unsigned long jiffies; +}; + +#define SPARX5_PTP_TIMEOUT msecs_to_jiffies(10) +#define SPARX5_SKB_CB(skb) \ + ((struct sparx5_skb_cb *)((skb)->cb)) + struct sparx5 { struct platform_device *pdev; struct device *dev; @@ -224,6 +263,14 @@ struct sparx5 { int fdma_irq; struct sparx5_rx rx; struct sparx5_tx tx; + /* PTP */ + bool ptp; + struct sparx5_phc phc[SPARX5_PHC_COUNT]; + spinlock_t ptp_clock_lock; /* lock for phc */ + spinlock_t ptp_ts_id_lock; /* lock for ts_id */ + struct mutex ptp_lock; /* lock for ptp interface state */ + u16 ptp_skbs; + int ptp_irq; }; /* sparx5_switchdev.c */ @@ -233,6 +280,7 @@ void sparx5_unregister_notifier_blocks(struct sparx5 *sparx5); /* sparx5_packet.c */ struct frame_info { int src_port; + u32 timestamp; }; void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp); @@ -286,12 +334,30 @@ void sparx5_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats int sparx_stats_init(struct sparx5 *sparx5); /* sparx5_netdev.c */ +void sparx5_set_port_ifh_timestamp(void *ifh_hdr, u64 timestamp); +void sparx5_set_port_ifh_rew_op(void *ifh_hdr, u32 rew_op); +void sparx5_set_port_ifh_pdu_type(void *ifh_hdr, u32 pdu_type); +void sparx5_set_port_ifh_pdu_w16_offset(void *ifh_hdr, u32 pdu_w16_offset); +void sparx5_set_port_ifh(void *ifh_hdr, u16 portno); bool sparx5_netdevice_check(const struct net_device *dev); struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno); int sparx5_register_netdevs(struct sparx5 *sparx5); void sparx5_destroy_netdevs(struct sparx5 *sparx5); void sparx5_unregister_netdevs(struct sparx5 *sparx5); +/* sparx5_ptp.c */ +int sparx5_ptp_init(struct sparx5 *sparx5); +void sparx5_ptp_deinit(struct sparx5 *sparx5); +int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr); +int sparx5_ptp_hwtstamp_get(struct sparx5_port *port, struct ifreq *ifr); +void sparx5_ptp_rxtstamp(struct sparx5 *sparx5, struct sk_buff *skb, + u64 timestamp); +int sparx5_ptp_txtstamp_request(struct sparx5_port *port, + struct sk_buff *skb); +void sparx5_ptp_txtstamp_release(struct sparx5_port *port, + struct sk_buff *skb); +irqreturn_t sparx5_ptp_irq_handler(int irq, void *args); + /* Clock period in picoseconds */ static inline u32 sparx5_clk_period(enum sparx5_core_clockfreq cclock) { diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h index 5ab2373a7178..c94de436b281 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h @@ -4,8 +4,8 @@ * Copyright (c) 2021 Microchip Technology Inc. */ -/* This file is autogenerated by cml-utils 2021-05-06 13:06:37 +0200. - * Commit ID: 9ae4ec441e25e4b9003f4e514df5cb12a36b84d3 +/* This file is autogenerated by cml-utils 2022-02-26 14:15:01 +0100. + * Commit ID: 98bdd3d171cc2a1afd30d241d41a4281d471a48c (dirty) */ #ifndef _SPARX5_MAIN_REGS_H_ @@ -40,6 +40,7 @@ enum sparx5_target { TARGET_PCS25G_BR = 144, TARGET_PCS5G_BR = 160, TARGET_PORT_CONF = 173, + TARGET_PTP = 174, TARGET_QFWD = 175, TARGET_QRES = 176, TARGET_QS = 177, @@ -4156,6 +4157,249 @@ enum sparx5_target { #define PORT_CONF_USGMII_CFG_QUAD_MODE_GET(x)\ FIELD_GET(PORT_CONF_USGMII_CFG_QUAD_MODE, x) +/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR */ +#define PTP_PTP_PIN_INTR __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 0, 0, 1, 4) + +#define PTP_PTP_PIN_INTR_INTR_PTP GENMASK(4, 0) +#define PTP_PTP_PIN_INTR_INTR_PTP_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_INTR_INTR_PTP, x) +#define PTP_PTP_PIN_INTR_INTR_PTP_GET(x)\ + FIELD_GET(PTP_PTP_PIN_INTR_INTR_PTP, x) + +/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR_ENA */ +#define PTP_PTP_PIN_INTR_ENA __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 4, 0, 1, 4) + +#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA GENMASK(4, 0) +#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x) +#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA_GET(x)\ + FIELD_GET(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x) + +/* DEVCPU_PTP:PTP_CFG:PTP_INTR_IDENT */ +#define PTP_PTP_INTR_IDENT __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 8, 0, 1, 4) + +#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT GENMASK(4, 0) +#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT_SET(x)\ + FIELD_PREP(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x) +#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT_GET(x)\ + FIELD_GET(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x) + +/* DEVCPU_PTP:PTP_CFG:PTP_DOM_CFG */ +#define PTP_PTP_DOM_CFG __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 12, 0, 1, 4) + +#define PTP_PTP_DOM_CFG_PTP_ENA GENMASK(11, 9) +#define PTP_PTP_DOM_CFG_PTP_ENA_SET(x)\ + FIELD_PREP(PTP_PTP_DOM_CFG_PTP_ENA, x) +#define PTP_PTP_DOM_CFG_PTP_ENA_GET(x)\ + FIELD_GET(PTP_PTP_DOM_CFG_PTP_ENA, x) + +#define PTP_PTP_DOM_CFG_PTP_HOLD GENMASK(8, 6) +#define PTP_PTP_DOM_CFG_PTP_HOLD_SET(x)\ + FIELD_PREP(PTP_PTP_DOM_CFG_PTP_HOLD, x) +#define PTP_PTP_DOM_CFG_PTP_HOLD_GET(x)\ + FIELD_GET(PTP_PTP_DOM_CFG_PTP_HOLD, x) + +#define PTP_PTP_DOM_CFG_PTP_TOD_FREEZE GENMASK(5, 3) +#define PTP_PTP_DOM_CFG_PTP_TOD_FREEZE_SET(x)\ + FIELD_PREP(PTP_PTP_DOM_CFG_PTP_TOD_FREEZE, x) +#define PTP_PTP_DOM_CFG_PTP_TOD_FREEZE_GET(x)\ + FIELD_GET(PTP_PTP_DOM_CFG_PTP_TOD_FREEZE, x) + +#define PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS GENMASK(2, 0) +#define PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(x)\ + FIELD_PREP(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, x) +#define PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_GET(x)\ + FIELD_GET(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, x) + +/* DEVCPU_PTP:PTP_TOD_DOMAINS:CLK_PER_CFG */ +#define PTP_CLK_PER_CFG(g, r) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 0, r, 2, 4) + +/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC */ +#define PTP_PTP_CUR_NSEC(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 8, 0, 1, 4) + +#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC GENMASK(29, 0) +#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC_SET(x)\ + FIELD_PREP(PTP_PTP_CUR_NSEC_PTP_CUR_NSEC, x) +#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC_GET(x)\ + FIELD_GET(PTP_PTP_CUR_NSEC_PTP_CUR_NSEC, x) + +/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC_FRAC */ +#define PTP_PTP_CUR_NSEC_FRAC(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 12, 0, 1, 4) + +#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC GENMASK(7, 0) +#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC_SET(x)\ + FIELD_PREP(PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC, x) +#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC_GET(x)\ + FIELD_GET(PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC, x) + +/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_LSB */ +#define PTP_PTP_CUR_SEC_LSB(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 16, 0, 1, 4) + +/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_MSB */ +#define PTP_PTP_CUR_SEC_MSB(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 20, 0, 1, 4) + +#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB GENMASK(15, 0) +#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB_SET(x)\ + FIELD_PREP(PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB, x) +#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB_GET(x)\ + FIELD_GET(PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB, x) + +/* DEVCPU_PTP:PTP_TOD_DOMAINS:NTP_CUR_NSEC */ +#define PTP_NTP_CUR_NSEC(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 24, 0, 1, 4) + +/* DEVCPU_PTP:PTP_PINS:PTP_PIN_CFG */ +#define PTP_PTP_PIN_CFG(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 0, 0, 1, 4) + +#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION GENMASK(28, 26) +#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x) +#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x) + +#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC GENMASK(25, 24) +#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x) +#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x) + +#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL BIT(23) +#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x) +#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x) + +#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT GENMASK(22, 21) +#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x) +#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x) + +#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT GENMASK(20, 18) +#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_CLK_SELECT, x) +#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_CLK_SELECT, x) + +#define PTP_PTP_PIN_CFG_PTP_PIN_DOM GENMASK(17, 16) +#define PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_DOM, x) +#define PTP_PTP_PIN_CFG_PTP_PIN_DOM_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_DOM, x) + +#define PTP_PTP_PIN_CFG_PTP_PIN_OPT GENMASK(15, 14) +#define PTP_PTP_PIN_CFG_PTP_PIN_OPT_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_OPT, x) +#define PTP_PTP_PIN_CFG_PTP_PIN_OPT_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_OPT, x) + +#define PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK BIT(13) +#define PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK, x) +#define PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK, x) + +#define PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS GENMASK(12, 0) +#define PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS_SET(x)\ + FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS, x) +#define PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS_GET(x)\ + FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS, x) + +/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_MSB */ +#define PTP_PTP_TOD_SEC_MSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 4, 0, 1, 4) + +#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB GENMASK(15, 0) +#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(x)\ + FIELD_PREP(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB, x) +#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_GET(x)\ + FIELD_GET(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB, x) + +/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_LSB */ +#define PTP_PTP_TOD_SEC_LSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 8, 0, 1, 4) + +/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC */ +#define PTP_PTP_TOD_NSEC(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 12, 0, 1, 4) + +#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC GENMASK(29, 0) +#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(x)\ + FIELD_PREP(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC, x) +#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_GET(x)\ + FIELD_GET(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC, x) + +/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC_FRAC */ +#define PTP_PTP_TOD_NSEC_FRAC(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 16, 0, 1, 4) + +#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC GENMASK(7, 0) +#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC_SET(x)\ + FIELD_PREP(PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC, x) +#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC_GET(x)\ + FIELD_GET(PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC, x) + +/* DEVCPU_PTP:PTP_PINS:NTP_NSEC */ +#define PTP_NTP_NSEC(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 20, 0, 1, 4) + +/* DEVCPU_PTP:PTP_PINS:PIN_WF_HIGH_PERIOD */ +#define PTP_PIN_WF_HIGH_PERIOD(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 24, 0, 1, 4) + +#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH GENMASK(29, 0) +#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH_SET(x)\ + FIELD_PREP(PTP_PIN_WF_HIGH_PERIOD_PIN_WFH, x) +#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH_GET(x)\ + FIELD_GET(PTP_PIN_WF_HIGH_PERIOD_PIN_WFH, x) + +/* DEVCPU_PTP:PTP_PINS:PIN_WF_LOW_PERIOD */ +#define PTP_PIN_WF_LOW_PERIOD(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 28, 0, 1, 4) + +#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL GENMASK(29, 0) +#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL_SET(x)\ + FIELD_PREP(PTP_PIN_WF_LOW_PERIOD_PIN_WFL, x) +#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL_GET(x)\ + FIELD_GET(PTP_PIN_WF_LOW_PERIOD_PIN_WFL, x) + +/* DEVCPU_PTP:PTP_PINS:PIN_IOBOUNCH_DELAY */ +#define PTP_PIN_IOBOUNCH_DELAY(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 32, 0, 1, 4) + +#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL GENMASK(18, 3) +#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL_SET(x)\ + FIELD_PREP(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL, x) +#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL_GET(x)\ + FIELD_GET(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL, x) + +#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG GENMASK(2, 0) +#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG_SET(x)\ + FIELD_PREP(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG, x) +#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG_GET(x)\ + FIELD_GET(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG, x) + +/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CTRL */ +#define PTP_PHAD_CTRL(g) __REG(TARGET_PTP, 0, 1, 420, g, 5, 8, 0, 0, 1, 4) + +#define PTP_PHAD_CTRL_PHAD_ENA BIT(7) +#define PTP_PHAD_CTRL_PHAD_ENA_SET(x)\ + FIELD_PREP(PTP_PHAD_CTRL_PHAD_ENA, x) +#define PTP_PHAD_CTRL_PHAD_ENA_GET(x)\ + FIELD_GET(PTP_PHAD_CTRL_PHAD_ENA, x) + +#define PTP_PHAD_CTRL_PHAD_FAILED BIT(6) +#define PTP_PHAD_CTRL_PHAD_FAILED_SET(x)\ + FIELD_PREP(PTP_PHAD_CTRL_PHAD_FAILED, x) +#define PTP_PHAD_CTRL_PHAD_FAILED_GET(x)\ + FIELD_GET(PTP_PHAD_CTRL_PHAD_FAILED, x) + +#define PTP_PHAD_CTRL_REDUCED_RES GENMASK(5, 3) +#define PTP_PHAD_CTRL_REDUCED_RES_SET(x)\ + FIELD_PREP(PTP_PHAD_CTRL_REDUCED_RES, x) +#define PTP_PHAD_CTRL_REDUCED_RES_GET(x)\ + FIELD_GET(PTP_PHAD_CTRL_REDUCED_RES, x) + +#define PTP_PHAD_CTRL_LOCK_ACC GENMASK(2, 0) +#define PTP_PHAD_CTRL_LOCK_ACC_SET(x)\ + FIELD_PREP(PTP_PHAD_CTRL_LOCK_ACC, x) +#define PTP_PHAD_CTRL_LOCK_ACC_GET(x)\ + FIELD_GET(PTP_PHAD_CTRL_LOCK_ACC, x) + +/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CYC_STAT */ +#define PTP_PHAD_CYC_STAT(g) __REG(TARGET_PTP, 0, 1, 420, g, 5, 8, 4, 0, 1, 4) + /* QFWD:SYSTEM:SWITCH_PORT_MODE */ #define QFWD_SWITCH_PORT_MODE(r) __REG(TARGET_QFWD, 0, 1, 0, 0, 1, 340, 0, r, 70, 4) @@ -4528,6 +4772,93 @@ enum sparx5_target { #define REW_TAG_CTRL_TAG_DEI_CFG_GET(x)\ FIELD_GET(REW_TAG_CTRL_TAG_DEI_CFG, x) +/* REW:PTP_CTRL:PTP_TWOSTEP_CTRL */ +#define REW_PTP_TWOSTEP_CTRL __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 0, 0, 1, 4) + +#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA BIT(12) +#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA_SET(x)\ + FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA, x) +#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA_GET(x)\ + FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA, x) + +#define REW_PTP_TWOSTEP_CTRL_PTP_NXT BIT(11) +#define REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(x)\ + FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_NXT, x) +#define REW_PTP_TWOSTEP_CTRL_PTP_NXT_GET(x)\ + FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_NXT, x) + +#define REW_PTP_TWOSTEP_CTRL_PTP_VLD BIT(10) +#define REW_PTP_TWOSTEP_CTRL_PTP_VLD_SET(x)\ + FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_VLD, x) +#define REW_PTP_TWOSTEP_CTRL_PTP_VLD_GET(x)\ + FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_VLD, x) + +#define REW_PTP_TWOSTEP_CTRL_STAMP_TX BIT(9) +#define REW_PTP_TWOSTEP_CTRL_STAMP_TX_SET(x)\ + FIELD_PREP(REW_PTP_TWOSTEP_CTRL_STAMP_TX, x) +#define REW_PTP_TWOSTEP_CTRL_STAMP_TX_GET(x)\ + FIELD_GET(REW_PTP_TWOSTEP_CTRL_STAMP_TX, x) + +#define REW_PTP_TWOSTEP_CTRL_STAMP_PORT GENMASK(8, 1) +#define REW_PTP_TWOSTEP_CTRL_STAMP_PORT_SET(x)\ + FIELD_PREP(REW_PTP_TWOSTEP_CTRL_STAMP_PORT, x) +#define REW_PTP_TWOSTEP_CTRL_STAMP_PORT_GET(x)\ + FIELD_GET(REW_PTP_TWOSTEP_CTRL_STAMP_PORT, x) + +#define REW_PTP_TWOSTEP_CTRL_PTP_OVFL BIT(0) +#define REW_PTP_TWOSTEP_CTRL_PTP_OVFL_SET(x)\ + FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_OVFL, x) +#define REW_PTP_TWOSTEP_CTRL_PTP_OVFL_GET(x)\ + FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_OVFL, x) + +/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP */ +#define REW_PTP_TWOSTEP_STAMP __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 4, 0, 1, 4) + +#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC GENMASK(29, 0) +#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_SET(x)\ + FIELD_PREP(REW_PTP_TWOSTEP_STAMP_STAMP_NSEC, x) +#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(x)\ + FIELD_GET(REW_PTP_TWOSTEP_STAMP_STAMP_NSEC, x) + +/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP_SUBNS */ +#define REW_PTP_TWOSTEP_STAMP_SUBNS __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 8, 0, 1, 4) + +#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC GENMASK(7, 0) +#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC_SET(x)\ + FIELD_PREP(REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC, x) +#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC_GET(x)\ + FIELD_GET(REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC, x) + +/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO */ +#define REW_PTP_RSRV_NOT_ZERO __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 12, 0, 1, 4) + +/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO1 */ +#define REW_PTP_RSRV_NOT_ZERO1 __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 16, 0, 1, 4) + +/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO2 */ +#define REW_PTP_RSRV_NOT_ZERO2 __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 20, 0, 1, 4) + +#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2 GENMASK(5, 0) +#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2_SET(x)\ + FIELD_PREP(REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2, x) +#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2_GET(x)\ + FIELD_GET(REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2, x) + +/* REW:PTP_CTRL:PTP_GEN_STAMP_FMT */ +#define REW_PTP_GEN_STAMP_FMT(r) __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 24, r, 4, 4) + +#define REW_PTP_GEN_STAMP_FMT_RT_OFS GENMASK(6, 2) +#define REW_PTP_GEN_STAMP_FMT_RT_OFS_SET(x)\ + FIELD_PREP(REW_PTP_GEN_STAMP_FMT_RT_OFS, x) +#define REW_PTP_GEN_STAMP_FMT_RT_OFS_GET(x)\ + FIELD_GET(REW_PTP_GEN_STAMP_FMT_RT_OFS, x) + +#define REW_PTP_GEN_STAMP_FMT_RT_FMT GENMASK(1, 0) +#define REW_PTP_GEN_STAMP_FMT_RT_FMT_SET(x)\ + FIELD_PREP(REW_PTP_GEN_STAMP_FMT_RT_FMT, x) +#define REW_PTP_GEN_STAMP_FMT_RT_FMT_GET(x)\ + FIELD_GET(REW_PTP_GEN_STAMP_FMT_RT_FMT, x) + /* REW:RAM_CTRL:RAM_INIT */ #define REW_RAM_INIT __REG(TARGET_REW, 0, 1, 378696, 0, 1, 4, 0, 0, 1, 4) diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c index e042f117dc7a..af4d3e1f1a6d 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c @@ -54,7 +54,7 @@ static void __ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width) ifh_hdr[byte - 5] |= (u8)((encode & 0xFF0000000000) >> 40); } -static void sparx5_set_port_ifh(void *ifh_hdr, u16 portno) +void sparx5_set_port_ifh(void *ifh_hdr, u16 portno) { /* VSTAX.RSV = 1. MSBit must be 1 */ ifh_encode_bitfield(ifh_hdr, 1, VSTAX + 79, 1); @@ -74,6 +74,26 @@ static void sparx5_set_port_ifh(void *ifh_hdr, u16 portno) ifh_encode_bitfield(ifh_hdr, 1, 67, 1); } +void sparx5_set_port_ifh_rew_op(void *ifh_hdr, u32 rew_op) +{ + ifh_encode_bitfield(ifh_hdr, rew_op, VSTAX + 32, 10); +} + +void sparx5_set_port_ifh_pdu_type(void *ifh_hdr, u32 pdu_type) +{ + ifh_encode_bitfield(ifh_hdr, pdu_type, 191, 4); +} + +void sparx5_set_port_ifh_pdu_w16_offset(void *ifh_hdr, u32 pdu_w16_offset) +{ + ifh_encode_bitfield(ifh_hdr, pdu_w16_offset, 195, 6); +} + +void sparx5_set_port_ifh_timestamp(void *ifh_hdr, u64 timestamp) +{ + ifh_encode_bitfield(ifh_hdr, timestamp, 232, 40); +} + static int sparx5_port_open(struct net_device *ndev) { struct sparx5_port *port = netdev_priv(ndev); @@ -179,6 +199,24 @@ static int sparx5_get_port_parent_id(struct net_device *dev, return 0; } +static int sparx5_port_ioctl(struct net_device *dev, struct ifreq *ifr, + int cmd) +{ + struct sparx5_port *sparx5_port = netdev_priv(dev); + struct sparx5 *sparx5 = sparx5_port->sparx5; + + if (!phy_has_hwtstamp(dev->phydev) && sparx5->ptp) { + switch (cmd) { + case SIOCSHWTSTAMP: + return sparx5_ptp_hwtstamp_set(sparx5_port, ifr); + case SIOCGHWTSTAMP: + return sparx5_ptp_hwtstamp_get(sparx5_port, ifr); + } + } + + return phy_mii_ioctl(dev->phydev, ifr, cmd); +} + static const struct net_device_ops sparx5_port_netdev_ops = { .ndo_open = sparx5_port_open, .ndo_stop = sparx5_port_stop, @@ -189,6 +227,7 @@ static const struct net_device_ops sparx5_port_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_get_stats64 = sparx5_get_stats64, .ndo_get_port_parent_id = sparx5_get_port_parent_id, + .ndo_eth_ioctl = sparx5_port_ioctl, }; bool sparx5_netdevice_check(const struct net_device *dev) @@ -210,7 +249,6 @@ struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno) spx5_port->ndev = ndev; spx5_port->sparx5 = sparx5; spx5_port->portno = portno; - sparx5_set_port_ifh(spx5_port->ifh, portno); ndev->netdev_ops = &sparx5_port_netdev_ops; ndev->ethtool_ops = &sparx5_ethtool_ops; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c index 148d431fcde4..304f84aadc36 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c @@ -44,6 +44,12 @@ void sparx5_ifh_parse(u32 *ifh, struct frame_info *info) ((u32)xtr_hdr[30] << 0); fwd = (fwd >> 5); info->src_port = FIELD_GET(GENMASK(7, 1), fwd); + + info->timestamp = + ((u64)xtr_hdr[2] << 24) | + ((u64)xtr_hdr[3] << 16) | + ((u64)xtr_hdr[4] << 8) | + ((u64)xtr_hdr[5] << 0); } static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap) @@ -144,6 +150,7 @@ static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap) /* Finish up skb */ skb_put(skb, byte_cnt - ETH_FCS_LEN); eth_skb_pad(skb); + sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp); skb->protocol = eth_type_trans(skb, netdev); netdev->stats.rx_bytes += skb->len; netdev->stats.rx_packets++; @@ -218,20 +225,44 @@ int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev) struct net_device_stats *stats = &dev->stats; struct sparx5_port *port = netdev_priv(dev); struct sparx5 *sparx5 = port->sparx5; + u32 ifh[IFH_LEN]; int ret; + memset(ifh, 0, IFH_LEN * 4); + sparx5_set_port_ifh(ifh, port->portno); + + if (sparx5->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + ret = sparx5_ptp_txtstamp_request(port, skb); + if (ret) + return ret; + + sparx5_set_port_ifh_rew_op(ifh, SPARX5_SKB_CB(skb)->rew_op); + sparx5_set_port_ifh_pdu_type(ifh, SPARX5_SKB_CB(skb)->pdu_type); + sparx5_set_port_ifh_pdu_w16_offset(ifh, SPARX5_SKB_CB(skb)->pdu_w16_offset); + sparx5_set_port_ifh_timestamp(ifh, SPARX5_SKB_CB(skb)->ts_id); + } + + skb_tx_timestamp(skb); if (sparx5->fdma_irq > 0) - ret = sparx5_fdma_xmit(sparx5, port->ifh, skb); + ret = sparx5_fdma_xmit(sparx5, ifh, skb); else - ret = sparx5_inject(sparx5, port->ifh, skb, dev); + ret = sparx5_inject(sparx5, ifh, skb, dev); if (ret == NETDEV_TX_OK) { stats->tx_bytes += skb->len; stats->tx_packets++; - skb_tx_timestamp(skb); + + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) + return ret; + dev_kfree_skb_any(skb); } else { stats->tx_dropped++; + + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) + sparx5_ptp_txtstamp_release(port, skb); } return ret; } diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c new file mode 100644 index 000000000000..cd110c31e5a4 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c @@ -0,0 +1,685 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + * + * The Sparx5 Chip Register Model can be browsed at this location: + * https://github.com/microchip-ung/sparx-5_reginfo + */ +#include <linux/ptp_classify.h> + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" + +#define SPARX5_MAX_PTP_ID 512 + +#define TOD_ACC_PIN 0x4 + +enum { + PTP_PIN_ACTION_IDLE = 0, + PTP_PIN_ACTION_LOAD, + PTP_PIN_ACTION_SAVE, + PTP_PIN_ACTION_CLOCK, + PTP_PIN_ACTION_DELTA, + PTP_PIN_ACTION_TOD +}; + +static u64 sparx5_ptp_get_1ppm(struct sparx5 *sparx5) +{ + /* Represents 1ppm adjustment in 2^59 format with 1.59687500000(625) + * 1.99609375000(500), 3.99218750000(250) as reference + * The value is calculated as following: + * (1/1000000)/((2^-59)/X) + */ + + u64 res = 0; + + switch (sparx5->coreclock) { + case SPX5_CORE_CLOCK_250MHZ: + res = 2301339409586; + break; + case SPX5_CORE_CLOCK_500MHZ: + res = 1150669704793; + break; + case SPX5_CORE_CLOCK_625MHZ: + res = 920535763834; + break; + default: + WARN_ON("Invalid core clock"); + break; + } + + return res; +} + +static u64 sparx5_ptp_get_nominal_value(struct sparx5 *sparx5) +{ + u64 res = 0; + + switch (sparx5->coreclock) { + case SPX5_CORE_CLOCK_250MHZ: + res = 0x1FF0000000000000; + break; + case SPX5_CORE_CLOCK_500MHZ: + res = 0x0FF8000000000000; + break; + case SPX5_CORE_CLOCK_625MHZ: + res = 0x0CC6666666666666; + break; + default: + WARN_ON("Invalid core clock"); + break; + } + + return res; +} + +int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr) +{ + struct sparx5 *sparx5 = port->sparx5; + struct hwtstamp_config cfg; + struct sparx5_phc *phc; + + /* For now don't allow to run ptp on ports that are part of a bridge, + * because in case of transparent clock the HW will still forward the + * frames, so there would be duplicate frames + */ + + if (test_bit(port->portno, sparx5->bridge_mask)) + return -EINVAL; + + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + switch (cfg.tx_type) { + case HWTSTAMP_TX_ON: + port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP; + break; + case HWTSTAMP_TX_ONESTEP_SYNC: + port->ptp_cmd = IFH_REW_OP_ONE_STEP_PTP; + break; + case HWTSTAMP_TX_OFF: + port->ptp_cmd = IFH_REW_OP_NOOP; + break; + default: + return -ERANGE; + } + + switch (cfg.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_NTP_ALL: + cfg.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + /* Commit back the result & save it */ + mutex_lock(&sparx5->ptp_lock); + phc = &sparx5->phc[SPARX5_PHC_PORT]; + memcpy(&phc->hwtstamp_config, &cfg, sizeof(cfg)); + mutex_unlock(&sparx5->ptp_lock); + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} + +int sparx5_ptp_hwtstamp_get(struct sparx5_port *port, struct ifreq *ifr) +{ + struct sparx5 *sparx5 = port->sparx5; + struct sparx5_phc *phc; + + phc = &sparx5->phc[SPARX5_PHC_PORT]; + return copy_to_user(ifr->ifr_data, &phc->hwtstamp_config, + sizeof(phc->hwtstamp_config)) ? -EFAULT : 0; +} + +static void sparx5_ptp_classify(struct sparx5_port *port, struct sk_buff *skb, + u8 *rew_op, u8 *pdu_type, u8 *pdu_w16_offset) +{ + struct ptp_header *header; + u8 msgtype; + int type; + + if (port->ptp_cmd == IFH_REW_OP_NOOP) { + *rew_op = IFH_REW_OP_NOOP; + *pdu_type = IFH_PDU_TYPE_NONE; + *pdu_w16_offset = 0; + return; + } + + type = ptp_classify_raw(skb); + if (type == PTP_CLASS_NONE) { + *rew_op = IFH_REW_OP_NOOP; + *pdu_type = IFH_PDU_TYPE_NONE; + *pdu_w16_offset = 0; + return; + } + + header = ptp_parse_header(skb, type); + if (!header) { + *rew_op = IFH_REW_OP_NOOP; + *pdu_type = IFH_PDU_TYPE_NONE; + *pdu_w16_offset = 0; + return; + } + + *pdu_w16_offset = 7; + if (type & PTP_CLASS_L2) + *pdu_type = IFH_PDU_TYPE_PTP; + if (type & PTP_CLASS_IPV4) + *pdu_type = IFH_PDU_TYPE_IPV4_UDP_PTP; + if (type & PTP_CLASS_IPV6) + *pdu_type = IFH_PDU_TYPE_IPV6_UDP_PTP; + + if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) { + *rew_op = IFH_REW_OP_TWO_STEP_PTP; + return; + } + + /* If it is sync and run 1 step then set the correct operation, + * otherwise run as 2 step + */ + msgtype = ptp_get_msgtype(header, type); + if ((msgtype & 0xf) == 0) { + *rew_op = IFH_REW_OP_ONE_STEP_PTP; + return; + } + + *rew_op = IFH_REW_OP_TWO_STEP_PTP; +} + +static void sparx5_ptp_txtstamp_old_release(struct sparx5_port *port) +{ + struct sk_buff *skb, *skb_tmp; + unsigned long flags; + + spin_lock_irqsave(&port->tx_skbs.lock, flags); + skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) { + if time_after(SPARX5_SKB_CB(skb)->jiffies + SPARX5_PTP_TIMEOUT, + jiffies) + break; + + __skb_unlink(skb, &port->tx_skbs); + dev_kfree_skb_any(skb); + } + spin_unlock_irqrestore(&port->tx_skbs.lock, flags); +} + +int sparx5_ptp_txtstamp_request(struct sparx5_port *port, + struct sk_buff *skb) +{ + struct sparx5 *sparx5 = port->sparx5; + u8 rew_op, pdu_type, pdu_w16_offset; + unsigned long flags; + + sparx5_ptp_classify(port, skb, &rew_op, &pdu_type, &pdu_w16_offset); + SPARX5_SKB_CB(skb)->rew_op = rew_op; + SPARX5_SKB_CB(skb)->pdu_type = pdu_type; + SPARX5_SKB_CB(skb)->pdu_w16_offset = pdu_w16_offset; + + if (rew_op != IFH_REW_OP_TWO_STEP_PTP) + return 0; + + sparx5_ptp_txtstamp_old_release(port); + + spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags); + if (sparx5->ptp_skbs == SPARX5_MAX_PTP_ID) { + spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags); + return -EBUSY; + } + + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + skb_queue_tail(&port->tx_skbs, skb); + SPARX5_SKB_CB(skb)->ts_id = port->ts_id; + SPARX5_SKB_CB(skb)->jiffies = jiffies; + + sparx5->ptp_skbs++; + port->ts_id++; + if (port->ts_id == SPARX5_MAX_PTP_ID) + port->ts_id = 0; + + spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags); + + return 0; +} + +void sparx5_ptp_txtstamp_release(struct sparx5_port *port, + struct sk_buff *skb) +{ + struct sparx5 *sparx5 = port->sparx5; + unsigned long flags; + + spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags); + port->ts_id--; + sparx5->ptp_skbs--; + skb_unlink(skb, &port->tx_skbs); + spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags); +} + +static void sparx5_get_hwtimestamp(struct sparx5 *sparx5, + struct timespec64 *ts, + u32 nsec) +{ + /* Read current PTP time to get seconds */ + unsigned long flags; + u32 curr_nsec; + + spin_lock_irqsave(&sparx5->ptp_clock_lock, flags); + + spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) | + PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(SPARX5_PHC_PORT) | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0), + PTP_PTP_PIN_CFG_PTP_PIN_ACTION | + PTP_PTP_PIN_CFG_PTP_PIN_DOM | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC, + sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN)); + + ts->tv_sec = spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN)); + curr_nsec = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN)); + + ts->tv_nsec = nsec; + + /* Sec has incremented since the ts was registered */ + if (curr_nsec < nsec) + ts->tv_sec--; + + spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags); +} + +irqreturn_t sparx5_ptp_irq_handler(int irq, void *args) +{ + int budget = SPARX5_MAX_PTP_ID; + struct sparx5 *sparx5 = args; + + while (budget--) { + struct sk_buff *skb, *skb_tmp, *skb_match = NULL; + struct skb_shared_hwtstamps shhwtstamps; + struct sparx5_port *port; + struct timespec64 ts; + unsigned long flags; + u32 val, id, txport; + u32 delay; + + val = spx5_rd(sparx5, REW_PTP_TWOSTEP_CTRL); + + /* Check if a timestamp can be retrieved */ + if (!(val & REW_PTP_TWOSTEP_CTRL_PTP_VLD)) + break; + + WARN_ON(val & REW_PTP_TWOSTEP_CTRL_PTP_OVFL); + + if (!(val & REW_PTP_TWOSTEP_CTRL_STAMP_TX)) + continue; + + /* Retrieve the ts Tx port */ + txport = REW_PTP_TWOSTEP_CTRL_STAMP_PORT_GET(val); + + /* Retrieve its associated skb */ + port = sparx5->ports[txport]; + + /* Retrieve the delay */ + delay = spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP); + delay = REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(delay); + + /* Get next timestamp from fifo, which needs to be the + * rx timestamp which represents the id of the frame + */ + spx5_rmw(REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(1), + REW_PTP_TWOSTEP_CTRL_PTP_NXT, + sparx5, REW_PTP_TWOSTEP_CTRL); + + val = spx5_rd(sparx5, REW_PTP_TWOSTEP_CTRL); + + /* Check if a timestamp can be retried */ + if (!(val & REW_PTP_TWOSTEP_CTRL_PTP_VLD)) + break; + + /* Read RX timestamping to get the ID */ + id = spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP); + id <<= 8; + id |= spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP_SUBNS); + + spin_lock_irqsave(&port->tx_skbs.lock, flags); + skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) { + if (SPARX5_SKB_CB(skb)->ts_id != id) + continue; + + __skb_unlink(skb, &port->tx_skbs); + skb_match = skb; + break; + } + spin_unlock_irqrestore(&port->tx_skbs.lock, flags); + + /* Next ts */ + spx5_rmw(REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(1), + REW_PTP_TWOSTEP_CTRL_PTP_NXT, + sparx5, REW_PTP_TWOSTEP_CTRL); + + if (WARN_ON(!skb_match)) + continue; + + spin_lock(&sparx5->ptp_ts_id_lock); + sparx5->ptp_skbs--; + spin_unlock(&sparx5->ptp_ts_id_lock); + + /* Get the h/w timestamp */ + sparx5_get_hwtimestamp(sparx5, &ts, delay); + + /* Set the timestamp into the skb */ + shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); + skb_tstamp_tx(skb_match, &shhwtstamps); + + dev_kfree_skb_any(skb_match); + } + + return IRQ_HANDLED; +} + +static int sparx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info); + struct sparx5 *sparx5 = phc->sparx5; + unsigned long flags; + bool neg_adj = 0; + u64 tod_inc; + u64 ref; + + if (!scaled_ppm) + return 0; + + if (scaled_ppm < 0) { + neg_adj = 1; + scaled_ppm = -scaled_ppm; + } + + tod_inc = sparx5_ptp_get_nominal_value(sparx5); + + /* The multiplication is split in 2 separate additions because of + * overflow issues. If scaled_ppm with 16bit fractional part was bigger + * than 20ppm then we got overflow. + */ + ref = sparx5_ptp_get_1ppm(sparx5) * (scaled_ppm >> 16); + ref += (sparx5_ptp_get_1ppm(sparx5) * (0xffff & scaled_ppm)) >> 16; + tod_inc = neg_adj ? tod_inc - ref : tod_inc + ref; + + spin_lock_irqsave(&sparx5->ptp_clock_lock, flags); + + spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(1 << BIT(phc->index)), + PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, + sparx5, PTP_PTP_DOM_CFG); + + spx5_wr((u32)tod_inc & 0xFFFFFFFF, sparx5, + PTP_CLK_PER_CFG(phc->index, 0)); + spx5_wr((u32)(tod_inc >> 32), sparx5, + PTP_CLK_PER_CFG(phc->index, 1)); + + spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0), + PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, sparx5, + PTP_PTP_DOM_CFG); + + spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags); + + return 0; +} + +static int sparx5_ptp_settime64(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info); + struct sparx5 *sparx5 = phc->sparx5; + unsigned long flags; + + spin_lock_irqsave(&sparx5->ptp_clock_lock, flags); + + /* Must be in IDLE mode before the time can be loaded */ + spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) | + PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0), + PTP_PTP_PIN_CFG_PTP_PIN_ACTION | + PTP_PTP_PIN_CFG_PTP_PIN_DOM | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC, + sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN)); + + /* Set new value */ + spx5_wr(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(upper_32_bits(ts->tv_sec)), + sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN)); + spx5_wr(lower_32_bits(ts->tv_sec), + sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN)); + spx5_wr(ts->tv_nsec, sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN)); + + /* Apply new values */ + spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_LOAD) | + PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0), + PTP_PTP_PIN_CFG_PTP_PIN_ACTION | + PTP_PTP_PIN_CFG_PTP_PIN_DOM | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC, + sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN)); + + spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags); + + return 0; +} + +static int sparx5_ptp_gettime64(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info); + struct sparx5 *sparx5 = phc->sparx5; + unsigned long flags; + time64_t s; + s64 ns; + + spin_lock_irqsave(&sparx5->ptp_clock_lock, flags); + + spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) | + PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0), + PTP_PTP_PIN_CFG_PTP_PIN_ACTION | + PTP_PTP_PIN_CFG_PTP_PIN_DOM | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC, + sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN)); + + s = spx5_rd(sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN)); + s <<= 32; + s |= spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN)); + ns = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN)); + ns &= PTP_PTP_TOD_NSEC_PTP_TOD_NSEC; + + spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags); + + /* Deal with negative values */ + if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) { + s--; + ns &= 0xf; + ns += 999999984; + } + + set_normalized_timespec64(ts, s, ns); + return 0; +} + +static int sparx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info); + struct sparx5 *sparx5 = phc->sparx5; + + if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) { + unsigned long flags; + + spin_lock_irqsave(&sparx5->ptp_clock_lock, flags); + + /* Must be in IDLE mode before the time can be loaded */ + spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) | + PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0), + PTP_PTP_PIN_CFG_PTP_PIN_ACTION | + PTP_PTP_PIN_CFG_PTP_PIN_DOM | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC, + sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN)); + + spx5_wr(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(delta), + sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN)); + + /* Adjust time with the value of PTP_TOD_NSEC */ + spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_DELTA) | + PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0), + PTP_PTP_PIN_CFG_PTP_PIN_ACTION | + PTP_PTP_PIN_CFG_PTP_PIN_DOM | + PTP_PTP_PIN_CFG_PTP_PIN_SYNC, + sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN)); + + spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags); + } else { + /* Fall back using sparx5_ptp_settime64 which is not exact */ + struct timespec64 ts; + u64 now; + + sparx5_ptp_gettime64(ptp, &ts); + + now = ktime_to_ns(timespec64_to_ktime(ts)); + ts = ns_to_timespec64(now + delta); + + sparx5_ptp_settime64(ptp, &ts); + } + + return 0; +} + +static struct ptp_clock_info sparx5_ptp_clock_info = { + .owner = THIS_MODULE, + .name = "sparx5 ptp", + .max_adj = 200000, + .gettime64 = sparx5_ptp_gettime64, + .settime64 = sparx5_ptp_settime64, + .adjtime = sparx5_ptp_adjtime, + .adjfine = sparx5_ptp_adjfine, +}; + +static int sparx5_ptp_phc_init(struct sparx5 *sparx5, + int index, + struct ptp_clock_info *clock_info) +{ + struct sparx5_phc *phc = &sparx5->phc[index]; + + phc->info = *clock_info; + phc->clock = ptp_clock_register(&phc->info, sparx5->dev); + if (IS_ERR(phc->clock)) + return PTR_ERR(phc->clock); + + phc->index = index; + phc->sparx5 = sparx5; + + /* PTP Rx stamping is always enabled. */ + phc->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + + return 0; +} + +int sparx5_ptp_init(struct sparx5 *sparx5) +{ + u64 tod_adj = sparx5_ptp_get_nominal_value(sparx5); + struct sparx5_port *port; + int err, i; + + if (!sparx5->ptp) + return 0; + + for (i = 0; i < SPARX5_PHC_COUNT; ++i) { + err = sparx5_ptp_phc_init(sparx5, i, &sparx5_ptp_clock_info); + if (err) + return err; + } + + spin_lock_init(&sparx5->ptp_clock_lock); + spin_lock_init(&sparx5->ptp_ts_id_lock); + mutex_init(&sparx5->ptp_lock); + + /* Disable master counters */ + spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0), sparx5, PTP_PTP_DOM_CFG); + + /* Configure the nominal TOD increment per clock cycle */ + spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0x7), + PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, + sparx5, PTP_PTP_DOM_CFG); + + for (i = 0; i < SPARX5_PHC_COUNT; ++i) { + spx5_wr((u32)tod_adj & 0xFFFFFFFF, sparx5, + PTP_CLK_PER_CFG(i, 0)); + spx5_wr((u32)(tod_adj >> 32), sparx5, + PTP_CLK_PER_CFG(i, 1)); + } + + spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0), + PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, + sparx5, PTP_PTP_DOM_CFG); + + /* Enable master counters */ + spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0x7), sparx5, PTP_PTP_DOM_CFG); + + for (i = 0; i < sparx5->port_count; i++) { + port = sparx5->ports[i]; + if (!port) + continue; + + skb_queue_head_init(&port->tx_skbs); + } + + return 0; +} + +void sparx5_ptp_deinit(struct sparx5 *sparx5) +{ + struct sparx5_port *port; + int i; + + for (i = 0; i < sparx5->port_count; i++) { + port = sparx5->ports[i]; + if (!port) + continue; + + skb_queue_purge(&port->tx_skbs); + } + + for (i = 0; i < SPARX5_PHC_COUNT; ++i) + ptp_clock_unregister(sparx5->phc[i].clock); +} + +void sparx5_ptp_rxtstamp(struct sparx5 *sparx5, struct sk_buff *skb, + u64 timestamp) +{ + struct skb_shared_hwtstamps *shhwtstamps; + struct sparx5_phc *phc; + struct timespec64 ts; + u64 full_ts_in_ns; + + if (!sparx5->ptp) + return; + + phc = &sparx5->phc[SPARX5_PHC_PORT]; + sparx5_ptp_gettime64(&phc->info, &ts); + + if (ts.tv_nsec < timestamp) + ts.tv_sec--; + ts.tv_nsec = timestamp; + full_ts_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec); + + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = full_ts_in_ns; +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c index 649ca609884a..dacb87f49552 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c @@ -19,11 +19,27 @@ struct sparx5_switchdev_event_work { unsigned long event; }; +static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port, + struct switchdev_brport_flags flags) +{ + if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD)) + return -EINVAL; + + return 0; +} + static void sparx5_port_attr_bridge_flags(struct sparx5_port *port, struct switchdev_brport_flags flags) { + int pgid; + if (flags.mask & BR_MCAST_FLOOD) - sparx5_pgid_update_mask(port, PGID_MC_FLOOD, true); + for (pgid = PGID_MC_FLOOD; pgid <= PGID_IPV6_MC_CTRL; pgid++) + sparx5_pgid_update_mask(port, pgid, !!(flags.val & BR_MCAST_FLOOD)); + if (flags.mask & BR_FLOOD) + sparx5_pgid_update_mask(port, PGID_UC_FLOOD, !!(flags.val & BR_FLOOD)); + if (flags.mask & BR_BCAST_FLOOD) + sparx5_pgid_update_mask(port, PGID_BCAST, !!(flags.val & BR_BCAST_FLOOD)); } static void sparx5_attr_stp_state_set(struct sparx5_port *port, @@ -72,6 +88,9 @@ static int sparx5_port_attr_set(struct net_device *dev, const void *ctx, struct sparx5_port *port = netdev_priv(dev); switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: + return sparx5_port_attr_pre_bridge_flags(port, + attr->u.brport_flags); case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: sparx5_port_attr_bridge_flags(port, attr->u.brport_flags); break; @@ -369,13 +388,11 @@ static int sparx5_handle_port_vlan_add(struct net_device *dev, struct sparx5_port *port = netdev_priv(dev); if (netif_is_bridge_master(dev)) { - if (v->flags & BRIDGE_VLAN_INFO_BRENTRY) { - struct sparx5 *sparx5 = - container_of(nb, struct sparx5, - switchdev_blocking_nb); + struct sparx5 *sparx5 = + container_of(nb, struct sparx5, + switchdev_blocking_nb); - sparx5_sync_bridge_dev_addr(dev, sparx5, v->vid, true); - } + sparx5_sync_bridge_dev_addr(dev, sparx5, v->vid, true); return 0; } diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c index 4ce490a25f33..8e56ffa1c4f7 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c @@ -58,16 +58,6 @@ int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid, struct sparx5 *sparx5 = port->sparx5; int ret; - /* Make the port a member of the VLAN */ - set_bit(port->portno, sparx5->vlan_mask[vid]); - ret = sparx5_vlant_set_mask(sparx5, vid); - if (ret) - return ret; - - /* Default ingress vlan classification */ - if (pvid) - port->pvid = vid; - /* Untagged egress vlan classification */ if (untagged && port->vid != vid) { if (port->vid) { @@ -79,6 +69,16 @@ int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid, port->vid = vid; } + /* Make the port a member of the VLAN */ + set_bit(port->portno, sparx5->vlan_mask[vid]); + ret = sparx5_vlant_set_mask(sparx5, vid); + if (ret) + return ret; + + /* Default ingress vlan classification */ + if (pvid) + port->pvid = vid; + sparx5_vlan_port_apply(sparx5, port); return 0; diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 15179b9529e1..afb7dcadb8d2 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -510,14 +510,14 @@ static int moxart_mac_probe(struct platform_device *pdev) } priv->tx_buf_base = kmalloc_array(priv->tx_buf_size, TX_DESC_NUM, - GFP_ATOMIC); + GFP_KERNEL); if (!priv->tx_buf_base) { ret = -ENOMEM; goto init_fail; } priv->rx_buf_base = kmalloc_array(priv->rx_buf_size, RX_DESC_NUM, - GFP_ATOMIC); + GFP_KERNEL); if (!priv->rx_buf_base) { ret = -ENOMEM; goto init_fail; diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index e6de86552df0..21134125a6e4 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -13,6 +13,7 @@ #define TABLE_UPDATE_SLEEP_US 10 #define TABLE_UPDATE_TIMEOUT_US 100000 +#define OCELOT_RSV_VLAN_RANGE_START 4000 struct ocelot_mact_entry { u8 mac[ETH_ALEN]; @@ -221,6 +222,35 @@ static void ocelot_vcap_enable(struct ocelot *ocelot, int port) REW_PORT_CFG, port); } +static int ocelot_single_vlan_aware_bridge(struct ocelot *ocelot, + struct netlink_ext_ack *extack) +{ + struct net_device *bridge = NULL; + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (!ocelot_port || !ocelot_port->bridge || + !br_vlan_enabled(ocelot_port->bridge)) + continue; + + if (!bridge) { + bridge = ocelot_port->bridge; + continue; + } + + if (bridge == ocelot_port->bridge) + continue; + + NL_SET_ERR_MSG_MOD(extack, + "Only one VLAN-aware bridge is supported"); + return -EBUSY; + } + + return 0; +} + static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot) { return ocelot_read(ocelot, ANA_TABLES_VLANACCESS); @@ -347,12 +377,45 @@ static void ocelot_port_manage_port_tag(struct ocelot *ocelot, int port) } } +int ocelot_bridge_num_find(struct ocelot *ocelot, + const struct net_device *bridge) +{ + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (ocelot_port && ocelot_port->bridge == bridge) + return ocelot_port->bridge_num; + } + + return -1; +} +EXPORT_SYMBOL_GPL(ocelot_bridge_num_find); + +static u16 ocelot_vlan_unaware_pvid(struct ocelot *ocelot, + const struct net_device *bridge) +{ + int bridge_num; + + /* Standalone ports use VID 0 */ + if (!bridge) + return 0; + + bridge_num = ocelot_bridge_num_find(ocelot, bridge); + if (WARN_ON(bridge_num < 0)) + return 0; + + /* VLAN-unaware bridges use a reserved VID going from 4095 downwards */ + return VLAN_N_VID - bridge_num - 1; +} + /* Default vlan to clasify for untagged frames (may be zero) */ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, const struct ocelot_bridge_vlan *pvid_vlan) { struct ocelot_port *ocelot_port = ocelot->ports[port]; - u16 pvid = OCELOT_VLAN_UNAWARE_PVID; + u16 pvid = ocelot_vlan_unaware_pvid(ocelot, ocelot_port->bridge); u32 val = 0; ocelot_port->pvid_vlan = pvid_vlan; @@ -466,12 +529,29 @@ static int ocelot_vlan_member_del(struct ocelot *ocelot, int port, u16 vid) return 0; } +static int ocelot_add_vlan_unaware_pvid(struct ocelot *ocelot, int port, + const struct net_device *bridge) +{ + u16 vid = ocelot_vlan_unaware_pvid(ocelot, bridge); + + return ocelot_vlan_member_add(ocelot, port, vid, true); +} + +static int ocelot_del_vlan_unaware_pvid(struct ocelot *ocelot, int port, + const struct net_device *bridge) +{ + u16 vid = ocelot_vlan_unaware_pvid(ocelot, bridge); + + return ocelot_vlan_member_del(ocelot, port, vid); +} + int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, bool vlan_aware, struct netlink_ext_ack *extack) { struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1]; struct ocelot_port *ocelot_port = ocelot->ports[port]; struct ocelot_vcap_filter *filter; + int err; u32 val; list_for_each_entry(filter, &block->rules, list) { @@ -483,6 +563,19 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, } } + err = ocelot_single_vlan_aware_bridge(ocelot, extack); + if (err) + return err; + + if (vlan_aware) + err = ocelot_del_vlan_unaware_pvid(ocelot, port, + ocelot_port->bridge); + else + err = ocelot_add_vlan_unaware_pvid(ocelot, port, + ocelot_port->bridge); + if (err) + return err; + ocelot_port->vlan_aware = vlan_aware; if (vlan_aware) @@ -521,6 +614,12 @@ int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid, } } + if (vid > OCELOT_RSV_VLAN_RANGE_START) { + NL_SET_ERR_MSG_MOD(extack, + "VLAN range 4000-4095 reserved for VLAN-unaware bridging"); + return -EBUSY; + } + return 0; } EXPORT_SYMBOL(ocelot_vlan_prepare); @@ -549,14 +648,18 @@ EXPORT_SYMBOL(ocelot_vlan_add); int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid) { struct ocelot_port *ocelot_port = ocelot->ports[port]; + bool del_pvid = false; int err; + if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid) + del_pvid = true; + err = ocelot_vlan_member_del(ocelot, port, vid); if (err) return err; /* Ingress */ - if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid) + if (del_pvid) ocelot_port_set_pvid(ocelot, port, NULL); /* Egress */ @@ -580,11 +683,11 @@ static void ocelot_vlan_init(struct ocelot *ocelot) for (vid = 1; vid < VLAN_N_VID; vid++) ocelot_vlant_set_mask(ocelot, vid, 0); - /* Because VLAN filtering is enabled, we need VID 0 to get untagged - * traffic. It is added automatically if 8021q module is loaded, but - * we can't rely on it since module may be not loaded. + /* We need VID 0 to get traffic on standalone ports. + * It is added automatically if the 8021q module is loaded, but we + * can't rely on that since it might not be. */ - ocelot_vlant_set_mask(ocelot, OCELOT_VLAN_UNAWARE_PVID, all_ports); + ocelot_vlant_set_mask(ocelot, OCELOT_STANDALONE_PVID, all_ports); /* Set vlan ingress filter mask to all ports but the CPU port by * default. @@ -1233,21 +1336,27 @@ void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp) } EXPORT_SYMBOL(ocelot_drain_cpu_queue); -int ocelot_fdb_add(struct ocelot *ocelot, int port, - const unsigned char *addr, u16 vid) +int ocelot_fdb_add(struct ocelot *ocelot, int port, const unsigned char *addr, + u16 vid, const struct net_device *bridge) { int pgid = port; if (port == ocelot->npi) pgid = PGID_CPU; + if (!vid) + vid = ocelot_vlan_unaware_pvid(ocelot, bridge); + return ocelot_mact_learn(ocelot, pgid, addr, vid, ENTRYTYPE_LOCKED); } EXPORT_SYMBOL(ocelot_fdb_add); -int ocelot_fdb_del(struct ocelot *ocelot, int port, - const unsigned char *addr, u16 vid) +int ocelot_fdb_del(struct ocelot *ocelot, int port, const unsigned char *addr, + u16 vid, const struct net_device *bridge) { + if (!vid) + vid = ocelot_vlan_unaware_pvid(ocelot, bridge); + return ocelot_mact_forget(ocelot, addr, vid); } EXPORT_SYMBOL(ocelot_fdb_del); @@ -1409,6 +1518,12 @@ int ocelot_fdb_dump(struct ocelot *ocelot, int port, is_static = (entry.type == ENTRYTYPE_LOCKED); + /* Hide the reserved VLANs used for + * VLAN-unaware bridging. + */ + if (entry.vid > OCELOT_RSV_VLAN_RANGE_START) + entry.vid = 0; + err = cb(entry.mac, entry.vid, is_static, data); if (err) break; @@ -1468,9 +1583,9 @@ ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap) trap->key.ipv6.dport.mask = 0xffff; } -static int ocelot_trap_add(struct ocelot *ocelot, int port, - unsigned long cookie, - void (*populate)(struct ocelot_vcap_filter *f)) +int ocelot_trap_add(struct ocelot *ocelot, int port, + unsigned long cookie, bool take_ts, + void (*populate)(struct ocelot_vcap_filter *f)) { struct ocelot_vcap_block *block_vcap_is2; struct ocelot_vcap_filter *trap; @@ -1496,6 +1611,8 @@ static int ocelot_trap_add(struct ocelot *ocelot, int port, trap->action.cpu_copy_ena = true; trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; trap->action.port_mask = 0; + trap->take_ts = take_ts; + list_add_tail(&trap->trap_list, &ocelot->traps); new = true; } @@ -1507,16 +1624,17 @@ static int ocelot_trap_add(struct ocelot *ocelot, int port, err = ocelot_vcap_filter_replace(ocelot, trap); if (err) { trap->ingress_port_mask &= ~BIT(port); - if (!trap->ingress_port_mask) + if (!trap->ingress_port_mask) { + list_del(&trap->trap_list); kfree(trap); + } return err; } return 0; } -static int ocelot_trap_del(struct ocelot *ocelot, int port, - unsigned long cookie) +int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie) { struct ocelot_vcap_block *block_vcap_is2; struct ocelot_vcap_filter *trap; @@ -1529,39 +1647,42 @@ static int ocelot_trap_del(struct ocelot *ocelot, int port, return 0; trap->ingress_port_mask &= ~BIT(port); - if (!trap->ingress_port_mask) + if (!trap->ingress_port_mask) { + list_del(&trap->trap_list); + return ocelot_vcap_filter_del(ocelot, trap); + } return ocelot_vcap_filter_replace(ocelot, trap); } static int ocelot_l2_ptp_trap_add(struct ocelot *ocelot, int port) { - unsigned long l2_cookie = ocelot->num_phys_ports + 1; + unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot); - return ocelot_trap_add(ocelot, port, l2_cookie, + return ocelot_trap_add(ocelot, port, l2_cookie, true, ocelot_populate_l2_ptp_trap_key); } static int ocelot_l2_ptp_trap_del(struct ocelot *ocelot, int port) { - unsigned long l2_cookie = ocelot->num_phys_ports + 1; + unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot); return ocelot_trap_del(ocelot, port, l2_cookie); } static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port) { - unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2; - unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3; + unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot); + unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot); int err; - err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie, + err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie, true, ocelot_populate_ipv4_ptp_event_trap_key); if (err) return err; - err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie, + err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie, false, ocelot_populate_ipv4_ptp_general_trap_key); if (err) ocelot_trap_del(ocelot, port, ipv4_ev_cookie); @@ -1571,8 +1692,8 @@ static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port) static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port) { - unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2; - unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3; + unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot); + unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot); int err; err = ocelot_trap_del(ocelot, port, ipv4_ev_cookie); @@ -1582,16 +1703,16 @@ static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port) static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port) { - unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4; - unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5; + unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot); + unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot); int err; - err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie, + err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie, true, ocelot_populate_ipv6_ptp_event_trap_key); if (err) return err; - err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie, + err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie, false, ocelot_populate_ipv6_ptp_general_trap_key); if (err) ocelot_trap_del(ocelot, port, ipv6_ev_cookie); @@ -1601,8 +1722,8 @@ static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port) static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port) { - unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4; - unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5; + unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot); + unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot); int err; err = ocelot_trap_del(ocelot, port, ipv6_ev_cookie); @@ -1746,28 +1867,36 @@ void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data) EXPORT_SYMBOL(ocelot_get_strings); /* Caller must hold &ocelot->stats_lock */ -static void ocelot_update_stats(struct ocelot *ocelot) +static int ocelot_port_update_stats(struct ocelot *ocelot, int port) { - int i, j; + unsigned int idx = port * ocelot->num_stats; + struct ocelot_stats_region *region; + int err, j; - for (i = 0; i < ocelot->num_phys_ports; i++) { - /* Configure the port to read the stats from */ - ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(i), SYS_STAT_CFG); + /* Configure the port to read the stats from */ + ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port), SYS_STAT_CFG); - for (j = 0; j < ocelot->num_stats; j++) { - u32 val; - unsigned int idx = i * ocelot->num_stats + j; + list_for_each_entry(region, &ocelot->stats_regions, node) { + err = ocelot_bulk_read_rix(ocelot, SYS_COUNT_RX_OCTETS, + region->offset, region->buf, + region->count); + if (err) + return err; - val = ocelot_read_rix(ocelot, SYS_COUNT_RX_OCTETS, - ocelot->stats_layout[j].offset); + for (j = 0; j < region->count; j++) { + u64 *stat = &ocelot->stats[idx + j]; + u64 val = region->buf[j]; - if (val < (ocelot->stats[idx] & U32_MAX)) - ocelot->stats[idx] += (u64)1 << 32; + if (val < (*stat & U32_MAX)) + *stat += (u64)1 << 32; - ocelot->stats[idx] = (ocelot->stats[idx] & - ~(u64)U32_MAX) + val; + *stat = (*stat & ~(u64)U32_MAX) + val; } + + idx += region->count; } + + return err; } static void ocelot_check_stats_work(struct work_struct *work) @@ -1775,29 +1904,40 @@ static void ocelot_check_stats_work(struct work_struct *work) struct delayed_work *del_work = to_delayed_work(work); struct ocelot *ocelot = container_of(del_work, struct ocelot, stats_work); + int i, err; mutex_lock(&ocelot->stats_lock); - ocelot_update_stats(ocelot); + for (i = 0; i < ocelot->num_phys_ports; i++) { + err = ocelot_port_update_stats(ocelot, i); + if (err) + break; + } mutex_unlock(&ocelot->stats_lock); + if (err) + dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err); + queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, OCELOT_STATS_CHECK_DELAY); } void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data) { - int i; + int i, err; mutex_lock(&ocelot->stats_lock); /* check and update now */ - ocelot_update_stats(ocelot); + err = ocelot_port_update_stats(ocelot, port); /* Copy all counters */ for (i = 0; i < ocelot->num_stats; i++) *data++ = ocelot->stats[port * ocelot->num_stats + i]; mutex_unlock(&ocelot->stats_lock); + + if (err) + dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err); } EXPORT_SYMBOL(ocelot_get_ethtool_stats); @@ -1810,6 +1950,41 @@ int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset) } EXPORT_SYMBOL(ocelot_get_sset_count); +static int ocelot_prepare_stats_regions(struct ocelot *ocelot) +{ + struct ocelot_stats_region *region = NULL; + unsigned int last; + int i; + + INIT_LIST_HEAD(&ocelot->stats_regions); + + for (i = 0; i < ocelot->num_stats; i++) { + if (region && ocelot->stats_layout[i].offset == last + 1) { + region->count++; + } else { + region = devm_kzalloc(ocelot->dev, sizeof(*region), + GFP_KERNEL); + if (!region) + return -ENOMEM; + + region->offset = ocelot->stats_layout[i].offset; + region->count = 1; + list_add_tail(®ion->node, &ocelot->stats_regions); + } + + last = ocelot->stats_layout[i].offset; + } + + list_for_each_entry(region, &ocelot->stats_regions, node) { + region->buf = devm_kcalloc(ocelot->dev, region->count, + sizeof(*region->buf), GFP_KERNEL); + if (!region->buf) + return -ENOMEM; + } + + return 0; +} + int ocelot_get_ts_info(struct ocelot *ocelot, int port, struct ethtool_ts_info *info) { @@ -1843,6 +2018,8 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond) u32 mask = 0; int port; + lockdep_assert_held(&ocelot->fwd_domain_lock); + for (port = 0; port < ocelot->num_phys_ports; port++) { struct ocelot_port *ocelot_port = ocelot->ports[port]; @@ -1856,6 +2033,19 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond) return mask; } +/* The logical port number of a LAG is equal to the lowest numbered physical + * port ID present in that LAG. It may change if that port ever leaves the LAG. + */ +static int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond) +{ + int bond_mask = ocelot_get_bond_mask(ocelot, bond); + + if (!bond_mask) + return -ENOENT; + + return __ffs(bond_mask); +} + u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port) { struct ocelot_port *ocelot_port = ocelot->ports[src_port]; @@ -1975,6 +2165,28 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining) } EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask); +void ocelot_port_set_dsa_8021q_cpu(struct ocelot *ocelot, int port) +{ + u16 vid; + + ocelot->ports[port]->is_dsa_8021q_cpu = true; + + for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++) + ocelot_vlan_member_add(ocelot, port, vid, true); +} +EXPORT_SYMBOL_GPL(ocelot_port_set_dsa_8021q_cpu); + +void ocelot_port_unset_dsa_8021q_cpu(struct ocelot *ocelot, int port) +{ + u16 vid; + + ocelot->ports[port]->is_dsa_8021q_cpu = false; + + for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++) + ocelot_vlan_member_del(ocelot, port, vid); +} +EXPORT_SYMBOL_GPL(ocelot_port_unset_dsa_8021q_cpu); + void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state) { struct ocelot_port *ocelot_port = ocelot->ports[port]; @@ -2119,7 +2331,8 @@ static void ocelot_encode_ports_to_mdb(unsigned char *addr, } int ocelot_port_mdb_add(struct ocelot *ocelot, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + const struct net_device *bridge) { unsigned char addr[ETH_ALEN]; struct ocelot_multicast *mc; @@ -2129,6 +2342,9 @@ int ocelot_port_mdb_add(struct ocelot *ocelot, int port, if (port == ocelot->npi) port = ocelot->num_phys_ports; + if (!vid) + vid = ocelot_vlan_unaware_pvid(ocelot, bridge); + mc = ocelot_multicast_get(ocelot, mdb->addr, vid); if (!mc) { /* New entry */ @@ -2175,7 +2391,8 @@ int ocelot_port_mdb_add(struct ocelot *ocelot, int port, EXPORT_SYMBOL(ocelot_port_mdb_add); int ocelot_port_mdb_del(struct ocelot *ocelot, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + const struct net_device *bridge) { unsigned char addr[ETH_ALEN]; struct ocelot_multicast *mc; @@ -2185,6 +2402,9 @@ int ocelot_port_mdb_del(struct ocelot *ocelot, int port, if (port == ocelot->npi) port = ocelot->num_phys_ports; + if (!vid) + vid = ocelot_vlan_unaware_pvid(ocelot, bridge); + mc = ocelot_multicast_get(ocelot, mdb->addr, vid); if (!mc) return -ENOENT; @@ -2218,18 +2438,30 @@ int ocelot_port_mdb_del(struct ocelot *ocelot, int port, } EXPORT_SYMBOL(ocelot_port_mdb_del); -void ocelot_port_bridge_join(struct ocelot *ocelot, int port, - struct net_device *bridge) +int ocelot_port_bridge_join(struct ocelot *ocelot, int port, + struct net_device *bridge, int bridge_num, + struct netlink_ext_ack *extack) { struct ocelot_port *ocelot_port = ocelot->ports[port]; + int err; + + err = ocelot_single_vlan_aware_bridge(ocelot, extack); + if (err) + return err; mutex_lock(&ocelot->fwd_domain_lock); ocelot_port->bridge = bridge; + ocelot_port->bridge_num = bridge_num; ocelot_apply_bridge_fwd_mask(ocelot, true); mutex_unlock(&ocelot->fwd_domain_lock); + + if (br_vlan_enabled(bridge)) + return 0; + + return ocelot_add_vlan_unaware_pvid(ocelot, port, bridge); } EXPORT_SYMBOL(ocelot_port_bridge_join); @@ -2240,7 +2472,11 @@ void ocelot_port_bridge_leave(struct ocelot *ocelot, int port, mutex_lock(&ocelot->fwd_domain_lock); + if (!br_vlan_enabled(bridge)) + ocelot_del_vlan_unaware_pvid(ocelot, port, bridge); + ocelot_port->bridge = NULL; + ocelot_port->bridge_num = -1; ocelot_port_set_pvid(ocelot, port, NULL); ocelot_port_manage_port_tag(ocelot, port); @@ -2349,7 +2585,7 @@ static void ocelot_setup_logical_port_ids(struct ocelot *ocelot) bond = ocelot_port->bond; if (bond) { - int lag = __ffs(ocelot_get_bond_mask(ocelot, bond)); + int lag = ocelot_bond_get_id(ocelot, bond); ocelot_rmw_gix(ocelot, ANA_PORT_PORT_CFG_PORTID_VAL(lag), @@ -2364,6 +2600,46 @@ static void ocelot_setup_logical_port_ids(struct ocelot *ocelot) } } +/* Documentation for PORTID_VAL says: + * Logical port number for front port. If port is not a member of a LLAG, + * then PORTID must be set to the physical port number. + * If port is a member of a LLAG, then PORTID must be set to the common + * PORTID_VAL used for all member ports of the LLAG. + * The value must not exceed the number of physical ports on the device. + * + * This means we have little choice but to migrate FDB entries pointing towards + * a logical port when that changes. + */ +static void ocelot_migrate_lag_fdbs(struct ocelot *ocelot, + struct net_device *bond, + int lag) +{ + struct ocelot_lag_fdb *fdb; + int err; + + lockdep_assert_held(&ocelot->fwd_domain_lock); + + list_for_each_entry(fdb, &ocelot->lag_fdbs, list) { + if (fdb->bond != bond) + continue; + + err = ocelot_mact_forget(ocelot, fdb->addr, fdb->vid); + if (err) { + dev_err(ocelot->dev, + "failed to delete LAG %s FDB %pM vid %d: %pe\n", + bond->name, fdb->addr, fdb->vid, ERR_PTR(err)); + } + + err = ocelot_mact_learn(ocelot, lag, fdb->addr, fdb->vid, + ENTRYTYPE_LOCKED); + if (err) { + dev_err(ocelot->dev, + "failed to migrate LAG %s FDB %pM vid %d: %pe\n", + bond->name, fdb->addr, fdb->vid, ERR_PTR(err)); + } + } +} + int ocelot_port_lag_join(struct ocelot *ocelot, int port, struct net_device *bond, struct netdev_lag_upper_info *info) @@ -2388,14 +2664,23 @@ EXPORT_SYMBOL(ocelot_port_lag_join); void ocelot_port_lag_leave(struct ocelot *ocelot, int port, struct net_device *bond) { + int old_lag_id, new_lag_id; + mutex_lock(&ocelot->fwd_domain_lock); + old_lag_id = ocelot_bond_get_id(ocelot, bond); + ocelot->ports[port]->bond = NULL; ocelot_setup_logical_port_ids(ocelot); ocelot_apply_bridge_fwd_mask(ocelot, false); ocelot_set_aggr_pgids(ocelot); + new_lag_id = ocelot_bond_get_id(ocelot, bond); + + if (new_lag_id >= 0 && old_lag_id != new_lag_id) + ocelot_migrate_lag_fdbs(ocelot, bond, new_lag_id); + mutex_unlock(&ocelot->fwd_domain_lock); } EXPORT_SYMBOL(ocelot_port_lag_leave); @@ -2404,13 +2689,83 @@ void ocelot_port_lag_change(struct ocelot *ocelot, int port, bool lag_tx_active) { struct ocelot_port *ocelot_port = ocelot->ports[port]; + mutex_lock(&ocelot->fwd_domain_lock); + ocelot_port->lag_tx_active = lag_tx_active; /* Rebalance the LAGs */ ocelot_set_aggr_pgids(ocelot); + + mutex_unlock(&ocelot->fwd_domain_lock); } EXPORT_SYMBOL(ocelot_port_lag_change); +int ocelot_lag_fdb_add(struct ocelot *ocelot, struct net_device *bond, + const unsigned char *addr, u16 vid, + const struct net_device *bridge) +{ + struct ocelot_lag_fdb *fdb; + int lag, err; + + fdb = kzalloc(sizeof(*fdb), GFP_KERNEL); + if (!fdb) + return -ENOMEM; + + mutex_lock(&ocelot->fwd_domain_lock); + + if (!vid) + vid = ocelot_vlan_unaware_pvid(ocelot, bridge); + + ether_addr_copy(fdb->addr, addr); + fdb->vid = vid; + fdb->bond = bond; + + lag = ocelot_bond_get_id(ocelot, bond); + + err = ocelot_mact_learn(ocelot, lag, addr, vid, ENTRYTYPE_LOCKED); + if (err) { + mutex_unlock(&ocelot->fwd_domain_lock); + kfree(fdb); + return err; + } + + list_add_tail(&fdb->list, &ocelot->lag_fdbs); + mutex_unlock(&ocelot->fwd_domain_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(ocelot_lag_fdb_add); + +int ocelot_lag_fdb_del(struct ocelot *ocelot, struct net_device *bond, + const unsigned char *addr, u16 vid, + const struct net_device *bridge) +{ + struct ocelot_lag_fdb *fdb, *tmp; + + mutex_lock(&ocelot->fwd_domain_lock); + + if (!vid) + vid = ocelot_vlan_unaware_pvid(ocelot, bridge); + + list_for_each_entry_safe(fdb, tmp, &ocelot->lag_fdbs, list) { + if (!ether_addr_equal(fdb->addr, addr) || fdb->vid != vid || + fdb->bond != bond) + continue; + + ocelot_mact_forget(ocelot, addr, vid); + list_del(&fdb->list); + mutex_unlock(&ocelot->fwd_domain_lock); + kfree(fdb); + + return 0; + } + + mutex_unlock(&ocelot->fwd_domain_lock); + + return -ENOENT; +} +EXPORT_SYMBOL_GPL(ocelot_lag_fdb_del); + /* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu. * The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG. * In the special case that it's the NPI port that we're configuring, the @@ -2531,6 +2886,9 @@ EXPORT_SYMBOL(ocelot_port_pre_bridge_flags); void ocelot_port_bridge_flags(struct ocelot *ocelot, int port, struct switchdev_brport_flags flags) { + if (port == ocelot->npi) + port = ocelot->num_phys_ports; + if (flags.mask & BR_LEARNING) ocelot_port_set_learning(ocelot, port, !!(flags.val & BR_LEARNING)); @@ -2643,7 +3001,7 @@ static void ocelot_cpu_port_init(struct ocelot *ocelot) /* Configure the CPU port to be VLAN aware */ ocelot_write_gix(ocelot, - ANA_PORT_VLAN_CFG_VLAN_VID(OCELOT_VLAN_UNAWARE_PVID) | + ANA_PORT_VLAN_CFG_VLAN_VID(OCELOT_STANDALONE_PVID) | ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1), ANA_PORT_VLAN_CFG, cpu); @@ -2705,6 +3063,7 @@ int ocelot_init(struct ocelot *ocelot) INIT_LIST_HEAD(&ocelot->multicast); INIT_LIST_HEAD(&ocelot->pgids); INIT_LIST_HEAD(&ocelot->vlans); + INIT_LIST_HEAD(&ocelot->lag_fdbs); ocelot_detect_features(ocelot); ocelot_mact_init(ocelot); ocelot_vlan_init(ocelot); @@ -2810,6 +3169,13 @@ int ocelot_init(struct ocelot *ocelot) ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6), ANA_CPUQ_8021_CFG, i); + ret = ocelot_prepare_stats_regions(ocelot); + if (ret) { + destroy_workqueue(ocelot->stats_queue); + destroy_workqueue(ocelot->owq); + return ret; + } + INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work); queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, OCELOT_STATS_CHECK_DELAY); diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index bf4eff6d7086..f8dc0d75eb5d 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -21,11 +21,12 @@ #include <soc/mscc/ocelot_dev.h> #include <soc/mscc/ocelot_ana.h> #include <soc/mscc/ocelot_ptp.h> +#include <soc/mscc/ocelot_vcap.h> #include <soc/mscc/ocelot.h> #include "ocelot_rew.h" #include "ocelot_qs.h" -#define OCELOT_VLAN_UNAWARE_PVID 0 +#define OCELOT_STANDALONE_PVID 0 #define OCELOT_BUFFER_CELL_SZ 60 #define OCELOT_STATS_CHECK_DELAY (2 * HZ) @@ -80,6 +81,9 @@ struct ocelot_multicast { struct ocelot_pgid *pgid; }; +int ocelot_bridge_num_find(struct ocelot *ocelot, + const struct net_device *bridge); + int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid, bool is_static, void *data); int ocelot_mact_learn(struct ocelot *ocelot, int port, @@ -102,6 +106,11 @@ int ocelot_port_devlink_init(struct ocelot *ocelot, int port, enum devlink_port_flavour flavour); void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port); +int ocelot_trap_add(struct ocelot *ocelot, int port, + unsigned long cookie, bool take_ts, + void (*populate)(struct ocelot_vcap_filter *f)); +int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie); + extern struct notifier_block ocelot_netdevice_nb; extern struct notifier_block ocelot_switchdev_nb; extern struct notifier_block ocelot_switchdev_blocking_nb; diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index 949858891973..b3f5418dc622 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -6,6 +6,7 @@ #include <net/pkt_cls.h> #include <net/tc_act/tc_gact.h> #include <soc/mscc/ocelot_vcap.h> +#include "ocelot_police.h" #include "ocelot_vcap.h" /* Arbitrarily chosen constants for encoding the VCAP block and lookup number @@ -217,6 +218,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port, bool ingress, struct flow_cls_offload *f, struct ocelot_vcap_filter *filter) { + const struct flow_action *action = &f->rule->action; struct netlink_ext_ack *extack = f->common.extack; bool allow_missing_goto_target = false; const struct flow_action_entry *a; @@ -244,7 +246,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port, filter->goto_target = -1; filter->type = OCELOT_VCAP_FILTER_DUMMY; - flow_action_for_each(i, a, &f->rule->action) { + flow_action_for_each(i, a, action) { switch (a->id) { case FLOW_ACTION_DROP: if (filter->block_id != VCAP_IS2) { @@ -279,6 +281,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port, filter->action.cpu_copy_ena = true; filter->action.cpu_qu_num = 0; filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + list_add_tail(&filter->trap_list, &ocelot->traps); break; case FLOW_ACTION_POLICE: if (filter->block_id == PSFP_BLOCK_ID) { @@ -296,11 +299,11 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port, "Last action must be GOTO"); return -EOPNOTSUPP; } - if (a->police.rate_pkt_ps) { - NL_SET_ERR_MSG_MOD(extack, - "QoS offload not support packets per second"); - return -EOPNOTSUPP; - } + + err = ocelot_policer_validate(action, a, extack); + if (err) + return err; + filter->action.police_ena = true; pol_ix = a->hw_index + ocelot->vcap_pol.base; @@ -840,6 +843,8 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port, ret = ocelot_flower_parse(ocelot, port, ingress, f, filter); if (ret) { + if (!list_empty(&filter->trap_list)) + list_del(&filter->trap_list); kfree(filter); return ret; } diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c index 7390fa3980ec..2067382d0ee1 100644 --- a/drivers/net/ethernet/mscc/ocelot_io.c +++ b/drivers/net/ethernet/mscc/ocelot_io.c @@ -10,6 +10,19 @@ #include "ocelot.h" +int __ocelot_bulk_read_ix(struct ocelot *ocelot, u32 reg, u32 offset, void *buf, + int count) +{ + u16 target = reg >> TARGET_OFFSET; + + WARN_ON(!target); + + return regmap_bulk_read(ocelot->targets[target], + ocelot->map[target][reg & REG_MASK] + offset, + buf, count); +} +EXPORT_SYMBOL_GPL(__ocelot_bulk_read_ix); + u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset) { u16 target = reg >> TARGET_OFFSET; diff --git a/drivers/net/ethernet/mscc/ocelot_mrp.c b/drivers/net/ethernet/mscc/ocelot_mrp.c index 1fa58546abdc..3ccec488a304 100644 --- a/drivers/net/ethernet/mscc/ocelot_mrp.c +++ b/drivers/net/ethernet/mscc/ocelot_mrp.c @@ -60,7 +60,7 @@ static int ocelot_mrp_redirect_add_vcap(struct ocelot *ocelot, int src_port, filter->key_type = OCELOT_VCAP_KEY_ETYPE; filter->prio = 1; - filter->id.cookie = src_port; + filter->id.cookie = OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, src_port); filter->id.tc_offload = false; filter->block_id = VCAP_IS2; filter->type = OCELOT_VCAP_FILTER_OFFLOAD; @@ -77,55 +77,46 @@ static int ocelot_mrp_redirect_add_vcap(struct ocelot *ocelot, int src_port, return err; } -static int ocelot_mrp_copy_add_vcap(struct ocelot *ocelot, int port, - int prio, unsigned long cookie) +static void ocelot_populate_mrp_trap_key(struct ocelot_vcap_filter *filter) { const u8 mrp_mask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; - struct ocelot_vcap_filter *filter; - int err; - - filter = kzalloc(sizeof(*filter), GFP_KERNEL); - if (!filter) - return -ENOMEM; - filter->key_type = OCELOT_VCAP_KEY_ETYPE; - filter->prio = prio; - filter->id.cookie = cookie; - filter->id.tc_offload = false; - filter->block_id = VCAP_IS2; - filter->type = OCELOT_VCAP_FILTER_OFFLOAD; - filter->ingress_port_mask = BIT(port); /* Here is possible to use control or test dmac because the mask * doesn't cover the LSB */ ether_addr_copy(filter->key.etype.dmac.value, mrp_test_dmac); ether_addr_copy(filter->key.etype.dmac.mask, mrp_mask); - filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; - filter->action.port_mask = 0x0; - filter->action.cpu_copy_ena = true; - filter->action.cpu_qu_num = OCELOT_MRP_CPUQ; +} - err = ocelot_vcap_filter_add(ocelot, filter, NULL); - if (err) - kfree(filter); +static int ocelot_mrp_trap_add(struct ocelot *ocelot, int port) +{ + unsigned long cookie = OCELOT_VCAP_IS2_MRP_TRAP(ocelot); - return err; + return ocelot_trap_add(ocelot, port, cookie, false, + ocelot_populate_mrp_trap_key); +} + +static int ocelot_mrp_trap_del(struct ocelot *ocelot, int port) +{ + unsigned long cookie = OCELOT_VCAP_IS2_MRP_TRAP(ocelot); + + return ocelot_trap_del(ocelot, port, cookie); } static void ocelot_mrp_save_mac(struct ocelot *ocelot, struct ocelot_port *port) { ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_test_dmac, - OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED); + OCELOT_STANDALONE_PVID, ENTRYTYPE_LOCKED); ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_control_dmac, - OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED); + OCELOT_STANDALONE_PVID, ENTRYTYPE_LOCKED); } static void ocelot_mrp_del_mac(struct ocelot *ocelot, struct ocelot_port *port) { - ocelot_mact_forget(ocelot, mrp_test_dmac, OCELOT_VLAN_UNAWARE_PVID); - ocelot_mact_forget(ocelot, mrp_control_dmac, OCELOT_VLAN_UNAWARE_PVID); + ocelot_mact_forget(ocelot, mrp_test_dmac, OCELOT_STANDALONE_PVID); + ocelot_mact_forget(ocelot, mrp_control_dmac, OCELOT_STANDALONE_PVID); } int ocelot_mrp_add(struct ocelot *ocelot, int port, @@ -186,7 +177,7 @@ int ocelot_mrp_add_ring_role(struct ocelot *ocelot, int port, ocelot_mrp_save_mac(ocelot, ocelot_port); if (mrp->ring_role != BR_MRP_RING_ROLE_MRC) - return ocelot_mrp_copy_add_vcap(ocelot, port, 1, port); + return ocelot_mrp_trap_add(ocelot, port); dst_port = ocelot_mrp_find_partner_port(ocelot, ocelot_port); if (dst_port == -1) @@ -196,10 +187,10 @@ int ocelot_mrp_add_ring_role(struct ocelot *ocelot, int port, if (err) return err; - err = ocelot_mrp_copy_add_vcap(ocelot, port, 2, - port + ocelot->num_phys_ports); + err = ocelot_mrp_trap_add(ocelot, port); if (err) { - ocelot_mrp_del_vcap(ocelot, port); + ocelot_mrp_del_vcap(ocelot, + OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port)); return err; } @@ -211,7 +202,7 @@ int ocelot_mrp_del_ring_role(struct ocelot *ocelot, int port, const struct switchdev_obj_ring_role_mrp *mrp) { struct ocelot_port *ocelot_port = ocelot->ports[port]; - int i; + int err, i; if (!ocelot_port) return -EOPNOTSUPP; @@ -222,8 +213,11 @@ int ocelot_mrp_del_ring_role(struct ocelot *ocelot, int port, if (ocelot_port->mrp_ring_id != mrp->ring_id) return 0; - ocelot_mrp_del_vcap(ocelot, port); - ocelot_mrp_del_vcap(ocelot, port + ocelot->num_phys_ports); + err = ocelot_mrp_trap_del(ocelot, port); + if (err) + return err; + + ocelot_mrp_del_vcap(ocelot, OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port)); for (i = 0; i < ocelot->num_phys_ports; ++i) { ocelot_port = ocelot->ports[i]; diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index e271b6225b72..5767e38c0c5a 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -14,6 +14,7 @@ #include <linux/phy/phy.h> #include <net/pkt_cls.h> #include "ocelot.h" +#include "ocelot_police.h" #include "ocelot_vcap.h" #include "ocelot_fdma.h" @@ -258,11 +259,10 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv, return -EEXIST; } - if (action->police.rate_pkt_ps) { - NL_SET_ERR_MSG_MOD(extack, - "QoS offload not support packets per second"); - return -EOPNOTSUPP; - } + err = ocelot_policer_validate(&f->rule->action, action, + extack); + if (err) + return err; pol.rate = (u32)div_u64(action->police.rate_bytes_ps, 1000) * 8; pol.burst = action->police.burst; @@ -419,7 +419,7 @@ static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid) * with VLAN filtering feature. We need to keep it to receive * untagged traffic. */ - if (vid == OCELOT_VLAN_UNAWARE_PVID) + if (vid == OCELOT_STANDALONE_PVID) return 0; ret = ocelot_vlan_del(ocelot, port, vid); @@ -559,7 +559,7 @@ static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr) struct ocelot_mact_work_ctx w; ether_addr_copy(w.forget.addr, addr); - w.forget.vid = OCELOT_VLAN_UNAWARE_PVID; + w.forget.vid = OCELOT_STANDALONE_PVID; w.type = OCELOT_MACT_FORGET; return ocelot_enqueue_mact_action(ocelot, &w); @@ -573,7 +573,7 @@ static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr) struct ocelot_mact_work_ctx w; ether_addr_copy(w.learn.addr, addr); - w.learn.vid = OCELOT_VLAN_UNAWARE_PVID; + w.learn.vid = OCELOT_STANDALONE_PVID; w.learn.pgid = PGID_CPU; w.learn.entry_type = ENTRYTYPE_LOCKED; w.type = OCELOT_MACT_LEARN; @@ -608,9 +608,9 @@ static int ocelot_port_set_mac_address(struct net_device *dev, void *p) /* Learn the new net device MAC address in the mac table. */ ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data, - OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED); + OCELOT_STANDALONE_PVID, ENTRYTYPE_LOCKED); /* Then forget the previous one. */ - ocelot_mact_forget(ocelot, dev->dev_addr, OCELOT_VLAN_UNAWARE_PVID); + ocelot_mact_forget(ocelot, dev->dev_addr, OCELOT_STANDALONE_PVID); eth_hw_addr_set(dev, addr->sa_data); return 0; @@ -662,10 +662,11 @@ static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct netlink_ext_ack *extack) { struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot *ocelot = priv->port.ocelot; + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; int port = priv->chip_port; - return ocelot_fdb_add(ocelot, port, addr, vid); + return ocelot_fdb_add(ocelot, port, addr, vid, ocelot_port->bridge); } static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], @@ -673,10 +674,11 @@ static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], const unsigned char *addr, u16 vid) { struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot *ocelot = priv->port.ocelot; + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; int port = priv->chip_port; - return ocelot_fdb_del(ocelot, port, addr, vid); + return ocelot_fdb_del(ocelot, port, addr, vid, ocelot_port->bridge); } static int ocelot_port_fdb_dump(struct sk_buff *skb, @@ -988,7 +990,7 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev, struct ocelot *ocelot = ocelot_port->ocelot; int port = priv->chip_port; - return ocelot_port_mdb_add(ocelot, port, mdb); + return ocelot_port_mdb_add(ocelot, port, mdb, ocelot_port->bridge); } static int ocelot_port_obj_del_mdb(struct net_device *dev, @@ -999,7 +1001,7 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev, struct ocelot *ocelot = ocelot_port->ocelot; int port = priv->chip_port; - return ocelot_port_mdb_del(ocelot, port, mdb); + return ocelot_port_mdb_del(ocelot, port, mdb, ocelot_port->bridge); } static int ocelot_port_obj_mrp_add(struct net_device *dev, @@ -1173,6 +1175,33 @@ static int ocelot_switchdev_unsync(struct ocelot *ocelot, int port) return 0; } +static int ocelot_bridge_num_get(struct ocelot *ocelot, + const struct net_device *bridge_dev) +{ + int bridge_num = ocelot_bridge_num_find(ocelot, bridge_dev); + + if (bridge_num < 0) { + /* First port that offloads this bridge */ + bridge_num = find_first_zero_bit(&ocelot->bridges, + ocelot->num_phys_ports); + + set_bit(bridge_num, &ocelot->bridges); + } + + return bridge_num; +} + +static void ocelot_bridge_num_put(struct ocelot *ocelot, + const struct net_device *bridge_dev, + int bridge_num) +{ + /* Check if the bridge is still in use, otherwise it is time + * to clean it up so we can reuse this bridge_num later. + */ + if (!ocelot_bridge_num_find(ocelot, bridge_dev)) + clear_bit(bridge_num, &ocelot->bridges); +} + static int ocelot_netdevice_bridge_join(struct net_device *dev, struct net_device *brport_dev, struct net_device *bridge, @@ -1182,9 +1211,14 @@ static int ocelot_netdevice_bridge_join(struct net_device *dev, struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; int port = priv->chip_port; - int err; + int bridge_num, err; + + bridge_num = ocelot_bridge_num_get(ocelot, bridge); - ocelot_port_bridge_join(ocelot, port, bridge); + err = ocelot_port_bridge_join(ocelot, port, bridge, bridge_num, + extack); + if (err) + goto err_join; err = switchdev_bridge_port_offload(brport_dev, dev, priv, &ocelot_switchdev_nb, @@ -1205,6 +1239,8 @@ err_switchdev_sync: &ocelot_switchdev_blocking_nb); err_switchdev_offload: ocelot_port_bridge_leave(ocelot, port, bridge); +err_join: + ocelot_bridge_num_put(ocelot, bridge, bridge_num); return err; } @@ -1225,6 +1261,7 @@ static int ocelot_netdevice_bridge_leave(struct net_device *dev, struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; + int bridge_num = ocelot_port->bridge_num; int port = priv->chip_port; int err; @@ -1233,6 +1270,7 @@ static int ocelot_netdevice_bridge_leave(struct net_device *dev, return err; ocelot_port_bridge_leave(ocelot, port, bridge); + ocelot_bridge_num_put(ocelot, bridge, bridge_num); return 0; } @@ -1700,7 +1738,7 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, eth_hw_addr_gen(dev, ocelot->base_mac, port); ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, - OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED); + OCELOT_STANDALONE_PVID, ENTRYTYPE_LOCKED); ocelot_init_port(ocelot, port); diff --git a/drivers/net/ethernet/mscc/ocelot_police.c b/drivers/net/ethernet/mscc/ocelot_police.c index 6f5068c1041a..a65606bb84a0 100644 --- a/drivers/net/ethernet/mscc/ocelot_police.c +++ b/drivers/net/ethernet/mscc/ocelot_police.c @@ -154,6 +154,47 @@ int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix, return 0; } +int ocelot_policer_validate(const struct flow_action *action, + const struct flow_action_entry *a, + struct netlink_ext_ack *extack) +{ + if (a->police.exceed.act_id != FLOW_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when exceed action is not drop"); + return -EOPNOTSUPP; + } + + if (a->police.notexceed.act_id != FLOW_ACTION_PIPE && + a->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is not pipe or ok"); + return -EOPNOTSUPP; + } + + if (a->police.notexceed.act_id == FLOW_ACTION_ACCEPT && + !flow_action_is_last_entry(action, a)) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is ok, but police action is not last"); + return -EOPNOTSUPP; + } + + if (a->police.peakrate_bytes_ps || + a->police.avrate || a->police.overhead) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when peakrate/avrate/overhead is configured"); + return -EOPNOTSUPP; + } + + if (a->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, + "Offload does not support packets per second"); + return -EOPNOTSUPP; + } + + return 0; +} +EXPORT_SYMBOL(ocelot_policer_validate); + int ocelot_port_policer_add(struct ocelot *ocelot, int port, struct ocelot_policer *pol) { diff --git a/drivers/net/ethernet/mscc/ocelot_police.h b/drivers/net/ethernet/mscc/ocelot_police.h index 7adb05f71999..7552995f8b17 100644 --- a/drivers/net/ethernet/mscc/ocelot_police.h +++ b/drivers/net/ethernet/mscc/ocelot_police.h @@ -8,6 +8,7 @@ #define _MSCC_OCELOT_POLICE_H_ #include "ocelot.h" +#include <net/flow_offload.h> enum mscc_qos_rate_mode { MSCC_QOS_RATE_MODE_DISABLED, /* Policer/shaper disabled */ @@ -33,4 +34,8 @@ struct qos_policer_conf { int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix, struct qos_policer_conf *conf); +int ocelot_policer_validate(const struct flow_action *action, + const struct flow_action_entry *a, + struct netlink_ext_ack *extack); + #endif /* _MSCC_OCELOT_POLICE_H_ */ diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c index d3544413a8a4..b976d480aeb3 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.c +++ b/drivers/net/ethernet/mscc/ocelot_vcap.c @@ -564,9 +564,9 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, val = proto.value[0]; msk = proto.mask[0]; type = IS2_TYPE_IP_UDP_TCP; - if (msk == 0xff && (val == 6 || val == 17)) { + if (msk == 0xff && (val == IPPROTO_TCP || val == IPPROTO_UDP)) { /* UDP/TCP protocol match */ - tcp = (val == 6 ? + tcp = (val == IPPROTO_TCP ? OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0); vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_TCP, tcp); vcap_key_l4_port_set(vcap, &data, @@ -1195,18 +1195,16 @@ static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot, struct ocelot_vcap_block *block, struct ocelot_vcap_filter *filter) { - struct ocelot_vcap_filter *tmp; - struct list_head *pos, *q; + struct ocelot_vcap_filter *tmp, *n; - list_for_each_safe(pos, q, &block->rules) { - tmp = list_entry(pos, struct ocelot_vcap_filter, list); + list_for_each_entry_safe(tmp, n, &block->rules, list) { if (ocelot_vcap_filter_equal(filter, tmp)) { if (tmp->block_id == VCAP_IS2 && tmp->action.police_ena) ocelot_vcap_policer_del(ocelot, tmp->action.pol_ix); - list_del(pos); + list_del(&tmp->list); kfree(tmp); } } @@ -1401,6 +1399,7 @@ int ocelot_vcap_init(struct ocelot *ocelot) } INIT_LIST_HEAD(&ocelot->dummy_rules); + INIT_LIST_HEAD(&ocelot->traps); INIT_LIST_HEAD(&ocelot->vcap_pol.pol_list); return 0; diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index 9cff3d48acbc..9c72b43c1581 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -31,6 +31,7 @@ nfp-objs := \ nfp_net_main.o \ nfp_net_repr.o \ nfp_net_sriov.o \ + nfp_net_xsk.o \ nfp_netvf_main.o \ nfp_port.o \ nfp_shared_buf.o \ diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index a3242b36e216..2c40a3959f94 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -922,6 +922,51 @@ nfp_fl_pedit(const struct flow_action_entry *act, } } +static struct nfp_fl_meter *nfp_fl_meter(char *act_data) +{ + size_t act_size = sizeof(struct nfp_fl_meter); + struct nfp_fl_meter *meter_act; + + meter_act = (struct nfp_fl_meter *)act_data; + + memset(meter_act, 0, act_size); + + meter_act->head.jump_id = NFP_FL_ACTION_OPCODE_METER; + meter_act->head.len_lw = act_size >> NFP_FL_LW_SIZ; + + return meter_act; +} + +static int +nfp_flower_meter_action(struct nfp_app *app, + const struct flow_action_entry *action, + struct nfp_fl_payload *nfp_fl, int *a_len, + struct net_device *netdev, + struct netlink_ext_ack *extack) +{ + struct nfp_fl_meter *fl_meter; + u32 meter_id; + + if (*a_len + sizeof(struct nfp_fl_meter) > NFP_FL_MAX_A_SIZ) { + NL_SET_ERR_MSG_MOD(extack, + "unsupported offload:meter action size beyond the allowed maximum"); + return -EOPNOTSUPP; + } + + meter_id = action->hw_index; + if (!nfp_flower_search_meter_entry(app, meter_id)) { + NL_SET_ERR_MSG_MOD(extack, + "can not offload flow table with unsupported police action.\n"); + return -EOPNOTSUPP; + } + + fl_meter = nfp_fl_meter(&nfp_fl->action_data[*a_len]); + *a_len += sizeof(struct nfp_fl_meter); + fl_meter->meter_id = cpu_to_be32(meter_id); + + return 0; +} + static int nfp_flower_output_action(struct nfp_app *app, const struct flow_action_entry *act, @@ -985,6 +1030,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, struct nfp_flower_pedit_acts *set_act, bool *pkt_host, struct netlink_ext_ack *extack, int act_idx) { + struct nfp_flower_priv *fl_priv = app->priv; struct nfp_fl_pre_tunnel *pre_tun; struct nfp_fl_set_tun *set_tun; struct nfp_fl_push_vlan *psh_v; @@ -1149,6 +1195,18 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, *pkt_host = true; break; + case FLOW_ACTION_POLICE: + if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_METER)) { + NL_SET_ERR_MSG_MOD(extack, + "unsupported offload: unsupported police action in action list"); + return -EOPNOTSUPP; + } + + err = nfp_flower_meter_action(app, act, nfp_fl, a_len, netdev, + extack); + if (err) + return err; + break; default: /* Currently we do not handle any other actions. */ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list"); diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 784292b16290..68e8a2fb1a29 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -85,6 +85,7 @@ #define NFP_FL_ACTION_OPCODE_SET_TCP 15 #define NFP_FL_ACTION_OPCODE_PRE_LAG 16 #define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17 +#define NFP_FL_ACTION_OPCODE_METER 24 #define NFP_FL_ACTION_OPCODE_PUSH_GENEVE 26 #define NFP_FL_ACTION_OPCODE_NUM 32 @@ -260,6 +261,12 @@ struct nfp_fl_set_mpls { __be32 lse; }; +struct nfp_fl_meter { + struct nfp_fl_act_head head; + __be16 reserved; + __be32 meter_id; +}; + /* Metadata with L2 (1W/4B) * ---------------------------------------------------------------- * 3 2 1 @@ -723,6 +730,8 @@ static inline bool nfp_fl_is_netdev_to_offload(struct net_device *netdev) return true; if (netif_is_gretap(netdev)) return true; + if (netif_is_ip6gretap(netdev)) + return true; return false; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 917c450a7aad..fa902ce2dd82 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -12,7 +12,9 @@ #include <linux/rhashtable.h> #include <linux/time64.h> #include <linux/types.h> +#include <net/flow_offload.h> #include <net/pkt_cls.h> +#include <net/pkt_sched.h> #include <net/tcp.h> #include <linux/workqueue.h> #include <linux/idr.h> @@ -48,6 +50,7 @@ struct nfp_app; #define NFP_FL_FEATS_IPV6_TUN BIT(7) #define NFP_FL_FEATS_VLAN_QINQ BIT(8) #define NFP_FL_FEATS_QOS_PPS BIT(9) +#define NFP_FL_FEATS_QOS_METER BIT(10) #define NFP_FL_FEATS_HOST_ACK BIT(31) #define NFP_FL_ENABLE_FLOW_MERGE BIT(0) @@ -63,7 +66,8 @@ struct nfp_app; NFP_FL_FEATS_PRE_TUN_RULES | \ NFP_FL_FEATS_IPV6_TUN | \ NFP_FL_FEATS_VLAN_QINQ | \ - NFP_FL_FEATS_QOS_PPS) + NFP_FL_FEATS_QOS_PPS | \ + NFP_FL_FEATS_QOS_METER) struct nfp_fl_mask_id { struct circ_buf mask_id_free_list; @@ -191,6 +195,8 @@ struct nfp_fl_internal_ports { * @qos_stats_work: Workqueue for qos stats processing * @qos_rate_limiters: Current active qos rate limiters * @qos_stats_lock: Lock on qos stats updates + * @meter_stats_lock: Lock on meter stats updates + * @meter_table: Hash table used to store the meter table * @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded * @merge_table: Hash table to store merged flows * @ct_zone_table: Hash table used to store the different zones @@ -228,6 +234,8 @@ struct nfp_flower_priv { struct delayed_work qos_stats_work; unsigned int qos_rate_limiters; spinlock_t qos_stats_lock; /* Protect the qos stats */ + struct mutex meter_stats_lock; /* Protect the meter stats */ + struct rhashtable meter_table; int pre_tun_rule_cnt; struct rhashtable merge_table; struct rhashtable ct_zone_table; @@ -374,6 +382,31 @@ struct nfp_fl_stats_frame { __be64 stats_cookie; }; +struct nfp_meter_stats_entry { + u64 pkts; + u64 bytes; + u64 drops; +}; + +struct nfp_meter_entry { + struct rhash_head ht_node; + u32 meter_id; + bool bps; + u32 rate; + u32 burst; + u64 used; + struct nfp_meter_stats { + u64 update; + struct nfp_meter_stats_entry curr; + struct nfp_meter_stats_entry prev; + } stats; +}; + +enum nfp_meter_op { + NFP_METER_ADD, + NFP_METER_DEL, +}; + static inline bool nfp_flower_internal_port_can_offload(struct nfp_app *app, struct net_device *netdev) @@ -569,4 +602,18 @@ nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow, void nfp_flower_update_merge_stats(struct nfp_app *app, struct nfp_fl_payload *sub_flow); + +int nfp_setup_tc_act_offload(struct nfp_app *app, + struct flow_offload_action *fl_act); +int nfp_init_meter_table(struct nfp_app *app); +void nfp_flower_stats_meter_request_all(struct nfp_flower_priv *fl_priv); +void nfp_act_stats_reply(struct nfp_app *app, void *pmsg); +int nfp_flower_offload_one_police(struct nfp_app *app, bool ingress, + bool pps, u32 id, u32 rate, u32 burst); +int nfp_flower_setup_meter_entry(struct nfp_app *app, + const struct flow_action_entry *action, + enum nfp_meter_op op, + u32 meter_id); +struct nfp_meter_entry * +nfp_flower_search_meter_entry(struct nfp_app *app, u32 meter_id); #endif diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index f97eff5afd12..92e8ade4854e 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1861,6 +1861,20 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, str return 0; } +static int +nfp_setup_tc_no_dev(struct nfp_app *app, enum tc_setup_type type, void *data) +{ + if (!data) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_ACT: + return nfp_setup_tc_act_offload(app, data); + default: + return -EOPNOTSUPP; + } +} + int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, enum tc_setup_type type, void *type_data, @@ -1868,7 +1882,7 @@ nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void * void (*cleanup)(struct flow_block_cb *block_cb)) { if (!netdev) - return -EOPNOTSUPP; + return nfp_setup_tc_no_dev(cb_priv, type, data); if (!nfp_fl_is_netdev_to_offload(netdev)) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c index 784c6dbf8bc4..3206ba83b1aa 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c @@ -1,7 +1,11 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) /* Copyright (C) 2019 Netronome Systems, Inc. */ +#include <linux/hash.h> +#include <linux/hashtable.h> +#include <linux/jhash.h> #include <linux/math64.h> +#include <linux/vmalloc.h> #include <net/pkt_cls.h> #include <net/pkt_sched.h> @@ -11,10 +15,14 @@ #define NFP_FL_QOS_UPDATE msecs_to_jiffies(1000) #define NFP_FL_QOS_PPS BIT(15) +#define NFP_FL_QOS_METER BIT(10) struct nfp_police_cfg_head { __be32 flags_opts; - __be32 port; + union { + __be32 meter_id; + __be32 port; + }; }; enum NFP_FL_QOS_TYPES { @@ -46,7 +54,15 @@ enum NFP_FL_QOS_TYPES { * | Committed Information Rate | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * Word[0](FLag options): - * [15] p(pps) 1 for pps ,0 for bps + * [15] p(pps) 1 for pps, 0 for bps + * + * Meter control message + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-------------------------------+-+---+-----+-+---------+-+---+-+ + * | Reserved |p| Y |TYPE |E|TSHFV |P| PC|R| + * +-------------------------------+-+---+-----+-+---------+-+---+-+ + * | meter ID | + * +-------------------------------+-------------------------------+ * */ struct nfp_police_config { @@ -67,6 +83,74 @@ struct nfp_police_stats_reply { __be64 drop_pkts; }; +int nfp_flower_offload_one_police(struct nfp_app *app, bool ingress, + bool pps, u32 id, u32 rate, u32 burst) +{ + struct nfp_police_config *config; + struct sk_buff *skb; + + skb = nfp_flower_cmsg_alloc(app, sizeof(struct nfp_police_config), + NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + config = nfp_flower_cmsg_get_data(skb); + memset(config, 0, sizeof(struct nfp_police_config)); + if (pps) + config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_PPS); + if (!ingress) + config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_METER); + + if (ingress) + config->head.port = cpu_to_be32(id); + else + config->head.meter_id = cpu_to_be32(id); + + config->bkt_tkn_p = cpu_to_be32(burst); + config->bkt_tkn_c = cpu_to_be32(burst); + config->pbs = cpu_to_be32(burst); + config->cbs = cpu_to_be32(burst); + config->pir = cpu_to_be32(rate); + config->cir = cpu_to_be32(rate); + nfp_ctrl_tx(app->ctrl, skb); + + return 0; +} + +static int nfp_policer_validate(const struct flow_action *action, + const struct flow_action_entry *act, + struct netlink_ext_ack *extack) +{ + if (act->police.exceed.act_id != FLOW_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when exceed action is not drop"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && + act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is not pipe or ok"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && + !flow_action_is_last_entry(action, act)) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is ok, but action is not last"); + return -EOPNOTSUPP; + } + + if (act->police.peakrate_bytes_ps || + act->police.avrate || act->police.overhead) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when peakrate/avrate/overhead is configured"); + return -EOPNOTSUPP; + } + + return 0; +} + static int nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev, struct tc_cls_matchall_offload *flow, @@ -77,15 +161,15 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev, struct nfp_flower_priv *fl_priv = app->priv; struct flow_action_entry *action = NULL; struct nfp_flower_repr_priv *repr_priv; - struct nfp_police_config *config; u32 netdev_port_id, i; struct nfp_repr *repr; - struct sk_buff *skb; bool pps_support; u32 bps_num = 0; u32 pps_num = 0; u32 burst; + bool pps; u64 rate; + int err; if (!nfp_netdev_is_nfp_repr(netdev)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port"); @@ -132,6 +216,11 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev, "unsupported offload: qos rate limit offload requires police action"); return -EOPNOTSUPP; } + + err = nfp_policer_validate(&flow->rule->action, action, extack); + if (err) + return err; + if (action->police.rate_bytes_ps > 0) { if (bps_num++) { NL_SET_ERR_MSG_MOD(extack, @@ -169,23 +258,12 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev, } if (rate != 0) { - skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config), - NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL); - if (!skb) - return -ENOMEM; - - config = nfp_flower_cmsg_get_data(skb); - memset(config, 0, sizeof(struct nfp_police_config)); + pps = false; if (action->police.rate_pkt_ps > 0) - config->head.flags_opts = cpu_to_be32(NFP_FL_QOS_PPS); - config->head.port = cpu_to_be32(netdev_port_id); - config->bkt_tkn_p = cpu_to_be32(burst); - config->bkt_tkn_c = cpu_to_be32(burst); - config->pbs = cpu_to_be32(burst); - config->cbs = cpu_to_be32(burst); - config->pir = cpu_to_be32(rate); - config->cir = cpu_to_be32(rate); - nfp_ctrl_tx(repr->app->ctrl, skb); + pps = true; + nfp_flower_offload_one_police(repr->app, true, + pps, netdev_port_id, + rate, burst); } } repr_priv->qos_table.netdev_port_id = netdev_port_id; @@ -266,6 +344,9 @@ void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb) u32 netdev_port_id; msg = nfp_flower_cmsg_get_data(skb); + if (be32_to_cpu(msg->head.flags_opts) & NFP_FL_QOS_METER) + return nfp_act_stats_reply(app, msg); + netdev_port_id = be32_to_cpu(msg->head.port); rcu_read_lock(); netdev = nfp_app_dev_get(app, netdev_port_id, NULL); @@ -297,7 +378,7 @@ exit_unlock_rcu: static void nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv, - u32 netdev_port_id) + u32 id, bool ingress) { struct nfp_police_cfg_head *head; struct sk_buff *skb; @@ -308,10 +389,15 @@ nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv, GFP_ATOMIC); if (!skb) return; - head = nfp_flower_cmsg_get_data(skb); + memset(head, 0, sizeof(struct nfp_police_cfg_head)); - head->port = cpu_to_be32(netdev_port_id); + if (ingress) { + head->port = cpu_to_be32(id); + } else { + head->flags_opts = cpu_to_be32(NFP_FL_QOS_METER); + head->meter_id = cpu_to_be32(id); + } nfp_ctrl_tx(fl_priv->app->ctrl, skb); } @@ -341,7 +427,8 @@ nfp_flower_stats_rlim_request_all(struct nfp_flower_priv *fl_priv) if (!netdev_port_id) continue; - nfp_flower_stats_rlim_request(fl_priv, netdev_port_id); + nfp_flower_stats_rlim_request(fl_priv, + netdev_port_id, true); } } @@ -359,6 +446,8 @@ static void update_stats_cache(struct work_struct *work) qos_stats_work); nfp_flower_stats_rlim_request_all(fl_priv); + nfp_flower_stats_meter_request_all(fl_priv); + schedule_delayed_work(&fl_priv->qos_stats_work, NFP_FL_QOS_UPDATE); } @@ -406,6 +495,9 @@ void nfp_flower_qos_init(struct nfp_app *app) struct nfp_flower_priv *fl_priv = app->priv; spin_lock_init(&fl_priv->qos_stats_lock); + mutex_init(&fl_priv->meter_stats_lock); + nfp_init_meter_table(app); + INIT_DELAYED_WORK(&fl_priv->qos_stats_work, &update_stats_cache); } @@ -441,3 +533,333 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev, return -EOPNOTSUPP; } } + +/* offload tc action, currently only for tc police */ + +static const struct rhashtable_params stats_meter_table_params = { + .key_offset = offsetof(struct nfp_meter_entry, meter_id), + .head_offset = offsetof(struct nfp_meter_entry, ht_node), + .key_len = sizeof(u32), +}; + +struct nfp_meter_entry * +nfp_flower_search_meter_entry(struct nfp_app *app, u32 meter_id) +{ + struct nfp_flower_priv *priv = app->priv; + + return rhashtable_lookup_fast(&priv->meter_table, &meter_id, + stats_meter_table_params); +} + +static struct nfp_meter_entry * +nfp_flower_add_meter_entry(struct nfp_app *app, u32 meter_id) +{ + struct nfp_meter_entry *meter_entry = NULL; + struct nfp_flower_priv *priv = app->priv; + + meter_entry = rhashtable_lookup_fast(&priv->meter_table, + &meter_id, + stats_meter_table_params); + if (meter_entry) + return meter_entry; + + meter_entry = kzalloc(sizeof(*meter_entry), GFP_KERNEL); + if (!meter_entry) + return NULL; + + meter_entry->meter_id = meter_id; + meter_entry->used = jiffies; + if (rhashtable_insert_fast(&priv->meter_table, &meter_entry->ht_node, + stats_meter_table_params)) { + kfree(meter_entry); + return NULL; + } + + priv->qos_rate_limiters++; + if (priv->qos_rate_limiters == 1) + schedule_delayed_work(&priv->qos_stats_work, + NFP_FL_QOS_UPDATE); + + return meter_entry; +} + +static void nfp_flower_del_meter_entry(struct nfp_app *app, u32 meter_id) +{ + struct nfp_meter_entry *meter_entry = NULL; + struct nfp_flower_priv *priv = app->priv; + + meter_entry = rhashtable_lookup_fast(&priv->meter_table, &meter_id, + stats_meter_table_params); + if (!meter_entry) + return; + + rhashtable_remove_fast(&priv->meter_table, + &meter_entry->ht_node, + stats_meter_table_params); + kfree(meter_entry); + priv->qos_rate_limiters--; + if (!priv->qos_rate_limiters) + cancel_delayed_work_sync(&priv->qos_stats_work); +} + +int nfp_flower_setup_meter_entry(struct nfp_app *app, + const struct flow_action_entry *action, + enum nfp_meter_op op, + u32 meter_id) +{ + struct nfp_flower_priv *fl_priv = app->priv; + struct nfp_meter_entry *meter_entry = NULL; + int err = 0; + + mutex_lock(&fl_priv->meter_stats_lock); + + switch (op) { + case NFP_METER_DEL: + nfp_flower_del_meter_entry(app, meter_id); + goto exit_unlock; + case NFP_METER_ADD: + meter_entry = nfp_flower_add_meter_entry(app, meter_id); + break; + default: + err = -EOPNOTSUPP; + goto exit_unlock; + } + + if (!meter_entry) { + err = -ENOMEM; + goto exit_unlock; + } + + if (action->police.rate_bytes_ps > 0) { + meter_entry->bps = true; + meter_entry->rate = action->police.rate_bytes_ps; + meter_entry->burst = action->police.burst; + } else { + meter_entry->bps = false; + meter_entry->rate = action->police.rate_pkt_ps; + meter_entry->burst = action->police.burst_pkt; + } + +exit_unlock: + mutex_unlock(&fl_priv->meter_stats_lock); + return err; +} + +int nfp_init_meter_table(struct nfp_app *app) +{ + struct nfp_flower_priv *priv = app->priv; + + return rhashtable_init(&priv->meter_table, &stats_meter_table_params); +} + +void +nfp_flower_stats_meter_request_all(struct nfp_flower_priv *fl_priv) +{ + struct nfp_meter_entry *meter_entry = NULL; + struct rhashtable_iter iter; + + mutex_lock(&fl_priv->meter_stats_lock); + rhashtable_walk_enter(&fl_priv->meter_table, &iter); + rhashtable_walk_start(&iter); + + while ((meter_entry = rhashtable_walk_next(&iter)) != NULL) { + if (IS_ERR(meter_entry)) + continue; + nfp_flower_stats_rlim_request(fl_priv, + meter_entry->meter_id, false); + } + + rhashtable_walk_stop(&iter); + rhashtable_walk_exit(&iter); + mutex_unlock(&fl_priv->meter_stats_lock); +} + +static int +nfp_act_install_actions(struct nfp_app *app, struct flow_offload_action *fl_act, + struct netlink_ext_ack *extack) +{ + struct flow_action_entry *paction = &fl_act->action.entries[0]; + u32 action_num = fl_act->action.num_entries; + struct nfp_flower_priv *fl_priv = app->priv; + struct flow_action_entry *action = NULL; + u32 burst, i, meter_id; + bool pps_support, pps; + bool add = false; + u64 rate; + + pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS); + + for (i = 0 ; i < action_num; i++) { + /*set qos associate data for this interface */ + action = paction + i; + if (action->id != FLOW_ACTION_POLICE) { + NL_SET_ERR_MSG_MOD(extack, + "unsupported offload: qos rate limit offload requires police action"); + continue; + } + if (action->police.rate_bytes_ps > 0) { + rate = action->police.rate_bytes_ps; + burst = action->police.burst; + } else if (action->police.rate_pkt_ps > 0 && pps_support) { + rate = action->police.rate_pkt_ps; + burst = action->police.burst_pkt; + } else { + NL_SET_ERR_MSG_MOD(extack, + "unsupported offload: unsupported qos rate limit"); + continue; + } + + if (rate != 0) { + meter_id = action->hw_index; + if (nfp_flower_setup_meter_entry(app, action, NFP_METER_ADD, meter_id)) + continue; + + pps = false; + if (action->police.rate_pkt_ps > 0) + pps = true; + nfp_flower_offload_one_police(app, false, pps, meter_id, + rate, burst); + add = true; + } + } + + return add ? 0 : -EOPNOTSUPP; +} + +static int +nfp_act_remove_actions(struct nfp_app *app, struct flow_offload_action *fl_act, + struct netlink_ext_ack *extack) +{ + struct nfp_meter_entry *meter_entry = NULL; + struct nfp_police_config *config; + struct sk_buff *skb; + u32 meter_id; + bool pps; + + /*delete qos associate data for this interface */ + if (fl_act->id != FLOW_ACTION_POLICE) { + NL_SET_ERR_MSG_MOD(extack, + "unsupported offload: qos rate limit offload requires police action"); + return -EOPNOTSUPP; + } + + meter_id = fl_act->index; + meter_entry = nfp_flower_search_meter_entry(app, meter_id); + if (!meter_entry) { + NL_SET_ERR_MSG_MOD(extack, + "no meter entry when delete the action index."); + return -ENOENT; + } + pps = !meter_entry->bps; + + skb = nfp_flower_cmsg_alloc(app, sizeof(struct nfp_police_config), + NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + config = nfp_flower_cmsg_get_data(skb); + memset(config, 0, sizeof(struct nfp_police_config)); + config->head.flags_opts = cpu_to_be32(NFP_FL_QOS_METER); + config->head.meter_id = cpu_to_be32(meter_id); + if (pps) + config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_PPS); + + nfp_ctrl_tx(app->ctrl, skb); + nfp_flower_setup_meter_entry(app, NULL, NFP_METER_DEL, meter_id); + + return 0; +} + +void +nfp_act_stats_reply(struct nfp_app *app, void *pmsg) +{ + struct nfp_flower_priv *fl_priv = app->priv; + struct nfp_meter_entry *meter_entry = NULL; + struct nfp_police_stats_reply *msg = pmsg; + u32 meter_id; + + meter_id = be32_to_cpu(msg->head.meter_id); + mutex_lock(&fl_priv->meter_stats_lock); + + meter_entry = nfp_flower_search_meter_entry(app, meter_id); + if (!meter_entry) + goto exit_unlock; + + meter_entry->stats.curr.pkts = be64_to_cpu(msg->pass_pkts) + + be64_to_cpu(msg->drop_pkts); + meter_entry->stats.curr.bytes = be64_to_cpu(msg->pass_bytes) + + be64_to_cpu(msg->drop_bytes); + meter_entry->stats.curr.drops = be64_to_cpu(msg->drop_pkts); + if (!meter_entry->stats.update) { + meter_entry->stats.prev.pkts = meter_entry->stats.curr.pkts; + meter_entry->stats.prev.bytes = meter_entry->stats.curr.bytes; + meter_entry->stats.prev.drops = meter_entry->stats.curr.drops; + } + + meter_entry->stats.update = jiffies; + +exit_unlock: + mutex_unlock(&fl_priv->meter_stats_lock); +} + +static int +nfp_act_stats_actions(struct nfp_app *app, struct flow_offload_action *fl_act, + struct netlink_ext_ack *extack) +{ + struct nfp_flower_priv *fl_priv = app->priv; + struct nfp_meter_entry *meter_entry = NULL; + u64 diff_bytes, diff_pkts, diff_drops; + int err = 0; + + if (fl_act->id != FLOW_ACTION_POLICE) { + NL_SET_ERR_MSG_MOD(extack, + "unsupported offload: qos rate limit offload requires police action"); + return -EOPNOTSUPP; + } + + mutex_lock(&fl_priv->meter_stats_lock); + meter_entry = nfp_flower_search_meter_entry(app, fl_act->index); + if (!meter_entry) { + err = -ENOENT; + goto exit_unlock; + } + diff_pkts = meter_entry->stats.curr.pkts > meter_entry->stats.prev.pkts ? + meter_entry->stats.curr.pkts - meter_entry->stats.prev.pkts : 0; + diff_bytes = meter_entry->stats.curr.bytes > meter_entry->stats.prev.bytes ? + meter_entry->stats.curr.bytes - meter_entry->stats.prev.bytes : 0; + diff_drops = meter_entry->stats.curr.drops > meter_entry->stats.prev.drops ? + meter_entry->stats.curr.drops - meter_entry->stats.prev.drops : 0; + + flow_stats_update(&fl_act->stats, diff_bytes, diff_pkts, diff_drops, + meter_entry->stats.update, + FLOW_ACTION_HW_STATS_DELAYED); + + meter_entry->stats.prev.pkts = meter_entry->stats.curr.pkts; + meter_entry->stats.prev.bytes = meter_entry->stats.curr.bytes; + meter_entry->stats.prev.drops = meter_entry->stats.curr.drops; + +exit_unlock: + mutex_unlock(&fl_priv->meter_stats_lock); + return err; +} + +int nfp_setup_tc_act_offload(struct nfp_app *app, + struct flow_offload_action *fl_act) +{ + struct netlink_ext_ack *extack = fl_act->extack; + struct nfp_flower_priv *fl_priv = app->priv; + + if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_METER)) + return -EOPNOTSUPP; + + switch (fl_act->command) { + case FLOW_ACT_REPLACE: + return nfp_act_install_actions(app, fl_act, extack); + case FLOW_ACT_DESTROY: + return nfp_act_remove_actions(app, fl_act, extack); + case FLOW_ACT_STATS: + return nfp_act_stats_actions(app, fl_act, extack); + default: + return -EOPNOTSUPP; + } +} diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index cd50db779dda..c71bd555f482 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -922,8 +922,8 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, int port, bool mod) { struct nfp_flower_priv *priv = app->priv; - int ida_idx = NFP_MAX_MAC_INDEX, err; struct nfp_tun_offloaded_mac *entry; + int ida_idx = -1, err; u16 nfp_mac_idx = 0; entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); @@ -942,8 +942,8 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, if (!nfp_mac_idx) { /* Assign a global index if non-repr or MAC is now shared. */ if (entry || !port) { - ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0, - NFP_MAX_MAC_INDEX, GFP_KERNEL); + ida_idx = ida_alloc_max(&priv->tun.mac_off_ids, + NFP_MAX_MAC_INDEX, GFP_KERNEL); if (ida_idx < 0) return ida_idx; @@ -997,8 +997,8 @@ err_remove_hash: err_free_entry: kfree(entry); err_free_ida: - if (ida_idx != NFP_MAX_MAC_INDEX) - ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); + if (ida_idx != -1) + ida_free(&priv->tun.mac_off_ids, ida_idx); return err; } @@ -1061,7 +1061,7 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, } ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index); - ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); + ida_free(&priv->tun.mac_off_ids, ida_idx); entry->index = nfp_mac_idx; return 0; } @@ -1081,7 +1081,7 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, /* If MAC has global ID then extract and free the ida entry. */ if (nfp_tunnel_is_mac_idx_global(nfp_mac_idx)) { ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index); - ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); + ida_free(&priv->tun.mac_off_ids, ida_idx); } kfree(entry); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 0b1865e9f0b5..437a19722fcf 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -109,6 +109,7 @@ struct nfp_eth_table_port; struct nfp_net; struct nfp_net_r_vector; struct nfp_port; +struct xsk_buff_pool; /* Convenience macro for wrapping descriptor index on ring size */ #define D_IDX(ring, idx) ((idx) & ((ring)->cnt - 1)) @@ -170,11 +171,14 @@ struct nfp_net_tx_desc { * struct nfp_net_tx_buf - software TX buffer descriptor * @skb: normal ring, sk_buff associated with this buffer * @frag: XDP ring, page frag associated with this buffer + * @xdp: XSK buffer pool handle (for AF_XDP) * @dma_addr: DMA mapping address of the buffer * @fidx: Fragment index (-1 for the head and [0..nr_frags-1] for frags) * @pkt_cnt: Number of packets to be produced out of the skb associated * with this buffer (valid only on the head's buffer). * Will be 1 for all non-TSO packets. + * @is_xsk_tx: Flag if buffer is a RX buffer after a XDP_TX action and not a + * buffer from the TX queue (for AF_XDP). * @real_len: Number of bytes which to be produced out of the skb (valid only * on the head's buffer). Equal to skb->len for non-TSO packets. */ @@ -182,10 +186,18 @@ struct nfp_net_tx_buf { union { struct sk_buff *skb; void *frag; + struct xdp_buff *xdp; }; dma_addr_t dma_addr; - short int fidx; - u16 pkt_cnt; + union { + struct { + short int fidx; + u16 pkt_cnt; + }; + struct { + bool is_xsk_tx; + }; + }; u32 real_len; }; @@ -315,6 +327,16 @@ struct nfp_net_rx_buf { }; /** + * struct nfp_net_xsk_rx_buf - software RX XSK buffer descriptor + * @dma_addr: DMA mapping address of the buffer + * @xdp: XSK buffer pool handle (for AF_XDP) + */ +struct nfp_net_xsk_rx_buf { + dma_addr_t dma_addr; + struct xdp_buff *xdp; +}; + +/** * struct nfp_net_rx_ring - RX ring structure * @r_vec: Back pointer to ring vector structure * @cnt: Size of the queue in number of descriptors @@ -324,6 +346,7 @@ struct nfp_net_rx_buf { * @fl_qcidx: Queue Controller Peripheral (QCP) queue index for the freelist * @qcp_fl: Pointer to base of the QCP freelist queue * @rxbufs: Array of transmitted FL/RX buffers + * @xsk_rxbufs: Array of transmitted FL/RX buffers (for AF_XDP) * @rxds: Virtual address of FL/RX ring in host memory * @xdp_rxq: RX-ring info avail for XDP * @dma: DMA address of the FL/RX ring @@ -342,6 +365,7 @@ struct nfp_net_rx_ring { u8 __iomem *qcp_fl; struct nfp_net_rx_buf *rxbufs; + struct nfp_net_xsk_rx_buf *xsk_rxbufs; struct nfp_net_rx_desc *rxds; struct xdp_rxq_info xdp_rxq; @@ -360,6 +384,7 @@ struct nfp_net_rx_ring { * @tx_ring: Pointer to TX ring * @rx_ring: Pointer to RX ring * @xdp_ring: Pointer to an extra TX ring for XDP + * @xsk_pool: XSK buffer pool active on vector queue pair (for AF_XDP) * @irq_entry: MSI-X table entry (use for talking to the device) * @event_ctr: Number of interrupt * @rx_dim: Dynamic interrupt moderation structure for RX @@ -431,6 +456,7 @@ struct nfp_net_r_vector { u64 rx_replace_buf_alloc_fail; struct nfp_net_tx_ring *xdp_ring; + struct xsk_buff_pool *xsk_pool; struct u64_stats_sync tx_sync; u64 tx_pkts; @@ -501,6 +527,7 @@ struct nfp_stat_pair { * @num_stack_tx_rings: Number of TX rings used by the stack (not XDP) * @num_rx_rings: Currently configured number of RX rings * @mtu: Device MTU + * @xsk_pools: XSK buffer pools, @max_r_vecs in size (for AF_XDP). */ struct nfp_net_dp { struct device *dev; @@ -537,6 +564,8 @@ struct nfp_net_dp { unsigned int num_rx_rings; unsigned int mtu; + + struct xsk_buff_pool **xsk_pools; }; /** @@ -965,6 +994,7 @@ int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd); void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 update); int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn); +void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr); unsigned int nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries, unsigned int min_irqs, unsigned int want_irqs); @@ -973,6 +1003,19 @@ void nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries, unsigned int n); +void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring); +void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget); + +bool +nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta, + void *data, void *pkt, unsigned int pkt_len, int meta_len); + +void nfp_net_rx_csum(const struct nfp_net_dp *dp, + struct nfp_net_r_vector *r_vec, + const struct nfp_net_rx_desc *rxd, + const struct nfp_meta_parsed *meta, + struct sk_buff *skb); + struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn); int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new, struct netlink_ext_ack *extack); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 79257ec41987..67a87fdf7564 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -38,6 +38,7 @@ #include <net/tls.h> #include <net/vxlan.h> +#include <net/xdp_sock_drv.h> #include "nfpcore/nfp_nsp.h" #include "ccm.h" @@ -45,6 +46,7 @@ #include "nfp_net_ctrl.h" #include "nfp_net.h" #include "nfp_net_sriov.h" +#include "nfp_net_xsk.h" #include "nfp_port.h" #include "crypto/crypto.h" #include "crypto/fw.h" @@ -381,7 +383,7 @@ int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd) * * Clear the ICR for the IRQ entry. */ -static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr) +void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr) { nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED); nn_pci_flush(nn); @@ -923,7 +925,7 @@ static void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle) #endif } -static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring) +void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring) { wmb(); nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add); @@ -1142,7 +1144,7 @@ err_flush: * @tx_ring: TX ring structure * @budget: NAPI budget (only used as bool to determine if in NAPI context) */ -static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget) +void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; struct nfp_net_dp *dp = &r_vec->nfp_net->dp; @@ -1315,6 +1317,9 @@ nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) tx_ring->rd_p++; } + if (tx_ring->is_xdp) + nfp_net_xsk_tx_bufs_free(tx_ring); + memset(tx_ring->txds, 0, tx_ring->size); tx_ring->wr_p = 0; tx_ring->rd_p = 0; @@ -1338,24 +1343,43 @@ static void nfp_net_tx_timeout(struct net_device *netdev, unsigned int txqueue) /* Receive processing */ static unsigned int -nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp) +nfp_net_calc_fl_bufsz_data(struct nfp_net_dp *dp) { - unsigned int fl_bufsz; + unsigned int fl_bufsz = 0; - fl_bufsz = NFP_NET_RX_BUF_HEADROOM; - fl_bufsz += dp->rx_dma_off; if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) fl_bufsz += NFP_NET_MAX_PREPEND; else fl_bufsz += dp->rx_offset; fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu; + return fl_bufsz; +} + +static unsigned int nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp) +{ + unsigned int fl_bufsz; + + fl_bufsz = NFP_NET_RX_BUF_HEADROOM; + fl_bufsz += dp->rx_dma_off; + fl_bufsz += nfp_net_calc_fl_bufsz_data(dp); + fl_bufsz = SKB_DATA_ALIGN(fl_bufsz); fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); return fl_bufsz; } +static unsigned int nfp_net_calc_fl_bufsz_xsk(struct nfp_net_dp *dp) +{ + unsigned int fl_bufsz; + + fl_bufsz = XDP_PACKET_HEADROOM; + fl_bufsz += nfp_net_calc_fl_bufsz_data(dp); + + return fl_bufsz; +} + static void nfp_net_free_frag(void *frag, bool xdp) { @@ -1484,10 +1508,14 @@ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring) /* Move the empty entry to the end of the list */ wr_idx = D_IDX(rx_ring, rx_ring->wr_p); last_idx = rx_ring->cnt - 1; - rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr; - rx_ring->rxbufs[wr_idx].frag = rx_ring->rxbufs[last_idx].frag; - rx_ring->rxbufs[last_idx].dma_addr = 0; - rx_ring->rxbufs[last_idx].frag = NULL; + if (rx_ring->r_vec->xsk_pool) { + rx_ring->xsk_rxbufs[wr_idx] = rx_ring->xsk_rxbufs[last_idx]; + memset(&rx_ring->xsk_rxbufs[last_idx], 0, + sizeof(*rx_ring->xsk_rxbufs)); + } else { + rx_ring->rxbufs[wr_idx] = rx_ring->rxbufs[last_idx]; + memset(&rx_ring->rxbufs[last_idx], 0, sizeof(*rx_ring->rxbufs)); + } memset(rx_ring->rxds, 0, rx_ring->size); rx_ring->wr_p = 0; @@ -1509,6 +1537,9 @@ nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp, { unsigned int i; + if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) + return; + for (i = 0; i < rx_ring->cnt - 1; i++) { /* NULL skb can only happen when initial filling of the ring * fails to allocate enough buffers and calls here to free @@ -1536,6 +1567,9 @@ nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_buf *rxbufs; unsigned int i; + if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) + return 0; + rxbufs = rx_ring->rxbufs; for (i = 0; i < rx_ring->cnt - 1; i++) { @@ -1560,6 +1594,9 @@ nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp, { unsigned int i; + if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) + return nfp_net_xsk_rx_ring_fill_freelist(rx_ring); + for (i = 0; i < rx_ring->cnt - 1; i++) nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag, rx_ring->rxbufs[i].dma_addr); @@ -1587,10 +1624,10 @@ static int nfp_net_rx_csum_has_errors(u16 flags) * @meta: Parsed metadata prepend * @skb: Pointer to SKB */ -static void nfp_net_rx_csum(struct nfp_net_dp *dp, - struct nfp_net_r_vector *r_vec, - struct nfp_net_rx_desc *rxd, - struct nfp_meta_parsed *meta, struct sk_buff *skb) +void nfp_net_rx_csum(const struct nfp_net_dp *dp, + struct nfp_net_r_vector *r_vec, + const struct nfp_net_rx_desc *rxd, + const struct nfp_meta_parsed *meta, struct sk_buff *skb) { skb_checksum_none_assert(skb); @@ -1668,7 +1705,7 @@ nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta, &rx_hash->hash); } -static bool +bool nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta, void *data, void *pkt, unsigned int pkt_len, int meta_len) { @@ -2540,7 +2577,11 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) if (dp->netdev) xdp_rxq_info_unreg(&rx_ring->xdp_rxq); - kvfree(rx_ring->rxbufs); + + if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) + kvfree(rx_ring->xsk_rxbufs); + else + kvfree(rx_ring->rxbufs); if (rx_ring->rxds) dma_free_coherent(dp->dev, rx_ring->size, @@ -2548,6 +2589,7 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) rx_ring->cnt = 0; rx_ring->rxbufs = NULL; + rx_ring->xsk_rxbufs = NULL; rx_ring->rxds = NULL; rx_ring->dma = 0; rx_ring->size = 0; @@ -2563,13 +2605,28 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) static int nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) { + enum xdp_mem_type mem_type; + size_t rxbuf_sw_desc_sz; int err; + if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) { + mem_type = MEM_TYPE_XSK_BUFF_POOL; + rxbuf_sw_desc_sz = sizeof(*rx_ring->xsk_rxbufs); + } else { + mem_type = MEM_TYPE_PAGE_ORDER0; + rxbuf_sw_desc_sz = sizeof(*rx_ring->rxbufs); + } + if (dp->netdev) { err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev, rx_ring->idx, rx_ring->r_vec->napi.napi_id); if (err < 0) return err; + + err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, + mem_type, NULL); + if (err) + goto err_alloc; } rx_ring->cnt = dp->rxd_cnt; @@ -2583,10 +2640,17 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) goto err_alloc; } - rx_ring->rxbufs = kvcalloc(rx_ring->cnt, sizeof(*rx_ring->rxbufs), - GFP_KERNEL); - if (!rx_ring->rxbufs) - goto err_alloc; + if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) { + rx_ring->xsk_rxbufs = kvcalloc(rx_ring->cnt, rxbuf_sw_desc_sz, + GFP_KERNEL); + if (!rx_ring->xsk_rxbufs) + goto err_alloc; + } else { + rx_ring->rxbufs = kvcalloc(rx_ring->cnt, rxbuf_sw_desc_sz, + GFP_KERNEL); + if (!rx_ring->rxbufs) + goto err_alloc; + } return 0; @@ -2639,6 +2703,27 @@ static void nfp_net_rx_rings_free(struct nfp_net_dp *dp) } static void +nfp_net_napi_add(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, int idx) +{ + if (dp->netdev) + netif_napi_add(dp->netdev, &r_vec->napi, + nfp_net_has_xsk_pool_slow(dp, idx) ? + nfp_net_xsk_poll : nfp_net_poll, + NAPI_POLL_WEIGHT); + else + tasklet_enable(&r_vec->tasklet); +} + +static void +nfp_net_napi_del(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec) +{ + if (dp->netdev) + netif_napi_del(&r_vec->napi); + else + tasklet_disable(&r_vec->tasklet); +} + +static void nfp_net_vector_assign_rings(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, int idx) { @@ -2648,6 +2733,17 @@ nfp_net_vector_assign_rings(struct nfp_net_dp *dp, r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ? &dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL; + + if (nfp_net_has_xsk_pool_slow(dp, idx) || r_vec->xsk_pool) { + r_vec->xsk_pool = dp->xdp_prog ? dp->xsk_pools[idx] : NULL; + + if (r_vec->xsk_pool) + xsk_pool_set_rxq_info(r_vec->xsk_pool, + &r_vec->rx_ring->xdp_rxq); + + nfp_net_napi_del(dp, r_vec); + nfp_net_napi_add(dp, r_vec, idx); + } } static int @@ -2656,23 +2752,14 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, { int err; - /* Setup NAPI */ - if (nn->dp.netdev) - netif_napi_add(nn->dp.netdev, &r_vec->napi, - nfp_net_poll, NAPI_POLL_WEIGHT); - else - tasklet_enable(&r_vec->tasklet); + nfp_net_napi_add(&nn->dp, r_vec, idx); snprintf(r_vec->name, sizeof(r_vec->name), "%s-rxtx-%d", nfp_net_name(nn), idx); err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name, r_vec); if (err) { - if (nn->dp.netdev) - netif_napi_del(&r_vec->napi); - else - tasklet_disable(&r_vec->tasklet); - + nfp_net_napi_del(&nn->dp, r_vec); nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector); return err; } @@ -2690,11 +2777,7 @@ static void nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec) { irq_set_affinity_hint(r_vec->irq_vector, NULL); - if (nn->dp.netdev) - netif_napi_del(&r_vec->napi); - else - tasklet_disable(&r_vec->tasklet); - + nfp_net_napi_del(&nn->dp, r_vec); free_irq(r_vec->irq_vector, r_vec); } @@ -2808,8 +2891,11 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn) if (err) nn_err(nn, "Could not disable device: %d\n", err); - for (r = 0; r < nn->dp.num_rx_rings; r++) + for (r = 0; r < nn->dp.num_rx_rings; r++) { nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]); + if (nfp_net_has_xsk_pool_slow(&nn->dp, nn->dp.rx_rings[r].idx)) + nfp_net_xsk_rx_bufs_free(&nn->dp.rx_rings[r]); + } for (r = 0; r < nn->dp.num_tx_rings; r++) nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]); for (r = 0; r < nn->dp.num_r_vecs; r++) @@ -3296,6 +3382,15 @@ struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn) *new = nn->dp; + new->xsk_pools = kmemdup(new->xsk_pools, + array_size(nn->max_r_vecs, + sizeof(new->xsk_pools)), + GFP_KERNEL); + if (!new->xsk_pools) { + kfree(new); + return NULL; + } + /* Clear things which need to be recomputed */ new->fl_bufsz = 0; new->tx_rings = NULL; @@ -3306,10 +3401,18 @@ struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn) return new; } +static void nfp_net_free_dp(struct nfp_net_dp *dp) +{ + kfree(dp->xsk_pools); + kfree(dp); +} + static int nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp, struct netlink_ext_ack *extack) { + unsigned int r, xsk_min_fl_bufsz; + /* XDP-enabled tests */ if (!dp->xdp_prog) return 0; @@ -3322,6 +3425,18 @@ nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp, return -EINVAL; } + xsk_min_fl_bufsz = nfp_net_calc_fl_bufsz_xsk(dp); + for (r = 0; r < nn->max_r_vecs; r++) { + if (!dp->xsk_pools[r]) + continue; + + if (xsk_pool_get_rx_frame_size(dp->xsk_pools[r]) < xsk_min_fl_bufsz) { + NL_SET_ERR_MSG_MOD(extack, + "XSK buffer pool chunk size too small"); + return -EINVAL; + } + } + return 0; } @@ -3389,7 +3504,7 @@ int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp, nfp_net_open_stack(nn); exit_free_dp: - kfree(dp); + nfp_net_free_dp(dp); return err; @@ -3398,7 +3513,7 @@ err_free_rx: err_cleanup_vecs: for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--) nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); - kfree(dp); + nfp_net_free_dp(dp); return err; } @@ -3716,6 +3831,9 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp) return nfp_net_xdp_setup_drv(nn, xdp); case XDP_SETUP_PROG_HW: return nfp_net_xdp_setup_hw(nn, xdp); + case XDP_SETUP_XSK_POOL: + return nfp_net_xsk_setup_pool(netdev, xdp->xsk.pool, + xdp->xsk.queue_id); default: return nfp_app_bpf(nn->app, nn, xdp); } @@ -3766,6 +3884,7 @@ const struct net_device_ops nfp_net_netdev_ops = { .ndo_features_check = nfp_net_features_check, .ndo_get_phys_port_name = nfp_net_get_phys_port_name, .ndo_bpf = nfp_net_xdp, + .ndo_xsk_wakeup = nfp_net_xsk_wakeup, .ndo_get_devlink_port = nfp_devlink_get_devlink_port, }; @@ -3893,6 +4012,14 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev, nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings); nn->dp.num_r_vecs = min_t(unsigned int, nn->dp.num_r_vecs, num_online_cpus()); + nn->max_r_vecs = nn->dp.num_r_vecs; + + nn->dp.xsk_pools = kcalloc(nn->max_r_vecs, sizeof(nn->dp.xsk_pools), + GFP_KERNEL); + if (!nn->dp.xsk_pools) { + err = -ENOMEM; + goto err_free_nn; + } nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT; nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT; @@ -3932,6 +4059,7 @@ void nfp_net_free(struct nfp_net *nn) WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted); nfp_ccm_mbox_free(nn); + kfree(nn->dp.xsk_pools); if (nn->dp.netdev) free_netdev(nn->dp.netdev); else diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c index 553c708694e8..2c74b3c5aef9 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c @@ -42,13 +42,19 @@ static int nfp_rx_q_show(struct seq_file *file, void *data) seq_printf(file, "%04d: 0x%08x 0x%08x", i, rxd->vals[0], rxd->vals[1]); - frag = READ_ONCE(rx_ring->rxbufs[i].frag); - if (frag) - seq_printf(file, " frag=%p", frag); - - if (rx_ring->rxbufs[i].dma_addr) - seq_printf(file, " dma_addr=%pad", - &rx_ring->rxbufs[i].dma_addr); + if (!r_vec->xsk_pool) { + frag = READ_ONCE(rx_ring->rxbufs[i].frag); + if (frag) + seq_printf(file, " frag=%p", frag); + + if (rx_ring->rxbufs[i].dma_addr) + seq_printf(file, " dma_addr=%pad", + &rx_ring->rxbufs[i].dma_addr); + } else { + if (rx_ring->xsk_rxbufs[i].dma_addr) + seq_printf(file, " dma_addr=%pad", + &rx_ring->xsk_rxbufs[i].dma_addr); + } if (i == rx_ring->rd_p % rxd_cnt) seq_puts(file, " H_RD "); @@ -103,20 +109,23 @@ static int nfp_tx_q_show(struct seq_file *file, void *data) tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p); for (i = 0; i < txd_cnt; i++) { + struct xdp_buff *xdp; + struct sk_buff *skb; + txd = &tx_ring->txds[i]; seq_printf(file, "%04d: 0x%08x 0x%08x 0x%08x 0x%08x", i, txd->vals[0], txd->vals[1], txd->vals[2], txd->vals[3]); - if (tx_ring == r_vec->tx_ring) { - struct sk_buff *skb = READ_ONCE(tx_ring->txbufs[i].skb); - + if (!tx_ring->is_xdp) { + skb = READ_ONCE(tx_ring->txbufs[i].skb); if (skb) seq_printf(file, " skb->head=%p skb->data=%p", skb->head, skb->data); } else { - seq_printf(file, " frag=%p", - READ_ONCE(tx_ring->txbufs[i].frag)); + xdp = READ_ONCE(tx_ring->txbufs[i].xdp); + if (xdp) + seq_printf(file, " xdp->data=%p", xdp->data); } if (tx_ring->txbufs[i].dma_addr) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c b/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c new file mode 100644 index 000000000000..ab7243277efa --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c @@ -0,0 +1,592 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2018 Netronome Systems, Inc */ +/* Copyright (C) 2021 Corigine, Inc */ + +#include <linux/dma-direction.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <net/xdp_sock_drv.h> +#include <trace/events/xdp.h> + +#include "nfp_app.h" +#include "nfp_net.h" +#include "nfp_net_xsk.h" + +static int nfp_net_tx_space(struct nfp_net_tx_ring *tx_ring) +{ + return tx_ring->cnt - tx_ring->wr_p + tx_ring->rd_p - 1; +} + +static void nfp_net_xsk_tx_free(struct nfp_net_tx_buf *txbuf) +{ + xsk_buff_free(txbuf->xdp); + + txbuf->dma_addr = 0; + txbuf->xdp = NULL; +} + +void nfp_net_xsk_tx_bufs_free(struct nfp_net_tx_ring *tx_ring) +{ + struct nfp_net_tx_buf *txbuf; + unsigned int idx; + + while (tx_ring->rd_p != tx_ring->wr_p) { + idx = D_IDX(tx_ring, tx_ring->rd_p); + txbuf = &tx_ring->txbufs[idx]; + + txbuf->real_len = 0; + + tx_ring->qcp_rd_p++; + tx_ring->rd_p++; + + if (tx_ring->r_vec->xsk_pool) { + if (txbuf->is_xsk_tx) + nfp_net_xsk_tx_free(txbuf); + + xsk_tx_completed(tx_ring->r_vec->xsk_pool, 1); + } + } +} + +static bool nfp_net_xsk_complete(struct nfp_net_tx_ring *tx_ring) +{ + struct nfp_net_r_vector *r_vec = tx_ring->r_vec; + u32 done_pkts = 0, done_bytes = 0, reused = 0; + bool done_all; + int idx, todo; + u32 qcp_rd_p; + + if (tx_ring->wr_p == tx_ring->rd_p) + return true; + + /* Work out how many descriptors have been transmitted. */ + qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); + + if (qcp_rd_p == tx_ring->qcp_rd_p) + return true; + + todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p); + + done_all = todo <= NFP_NET_XDP_MAX_COMPLETE; + todo = min(todo, NFP_NET_XDP_MAX_COMPLETE); + + tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo); + + done_pkts = todo; + while (todo--) { + struct nfp_net_tx_buf *txbuf; + + idx = D_IDX(tx_ring, tx_ring->rd_p); + tx_ring->rd_p++; + + txbuf = &tx_ring->txbufs[idx]; + if (unlikely(!txbuf->real_len)) + continue; + + done_bytes += txbuf->real_len; + txbuf->real_len = 0; + + if (txbuf->is_xsk_tx) { + nfp_net_xsk_tx_free(txbuf); + reused++; + } + } + + u64_stats_update_begin(&r_vec->tx_sync); + r_vec->tx_bytes += done_bytes; + r_vec->tx_pkts += done_pkts; + u64_stats_update_end(&r_vec->tx_sync); + + xsk_tx_completed(r_vec->xsk_pool, done_pkts - reused); + + WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt, + "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n", + tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt); + + return done_all; +} + +static void nfp_net_xsk_tx(struct nfp_net_tx_ring *tx_ring) +{ + struct nfp_net_r_vector *r_vec = tx_ring->r_vec; + struct xdp_desc desc[NFP_NET_XSK_TX_BATCH]; + struct xsk_buff_pool *xsk_pool; + struct nfp_net_tx_desc *txd; + u32 pkts = 0, wr_idx; + u32 i, got; + + xsk_pool = r_vec->xsk_pool; + + while (nfp_net_tx_space(tx_ring) >= NFP_NET_XSK_TX_BATCH) { + for (i = 0; i < NFP_NET_XSK_TX_BATCH; i++) + if (!xsk_tx_peek_desc(xsk_pool, &desc[i])) + break; + got = i; + if (!got) + break; + + wr_idx = D_IDX(tx_ring, tx_ring->wr_p + i); + prefetchw(&tx_ring->txds[wr_idx]); + + for (i = 0; i < got; i++) + xsk_buff_raw_dma_sync_for_device(xsk_pool, desc[i].addr, + desc[i].len); + + for (i = 0; i < got; i++) { + wr_idx = D_IDX(tx_ring, tx_ring->wr_p + i); + + tx_ring->txbufs[wr_idx].real_len = desc[i].len; + tx_ring->txbufs[wr_idx].is_xsk_tx = false; + + /* Build TX descriptor. */ + txd = &tx_ring->txds[wr_idx]; + nfp_desc_set_dma_addr(txd, + xsk_buff_raw_get_dma(xsk_pool, + desc[i].addr + )); + txd->offset_eop = PCIE_DESC_TX_EOP; + txd->dma_len = cpu_to_le16(desc[i].len); + txd->data_len = cpu_to_le16(desc[i].len); + } + + tx_ring->wr_p += got; + pkts += got; + } + + if (!pkts) + return; + + xsk_tx_release(xsk_pool); + /* Ensure all records are visible before incrementing write counter. */ + wmb(); + nfp_qcp_wr_ptr_add(tx_ring->qcp_q, pkts); +} + +static bool +nfp_net_xsk_tx_xdp(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, + struct nfp_net_rx_ring *rx_ring, + struct nfp_net_tx_ring *tx_ring, + struct nfp_net_xsk_rx_buf *xrxbuf, unsigned int pkt_len, + int pkt_off) +{ + struct xsk_buff_pool *pool = r_vec->xsk_pool; + struct nfp_net_tx_buf *txbuf; + struct nfp_net_tx_desc *txd; + unsigned int wr_idx; + + if (nfp_net_tx_space(tx_ring) < 1) + return false; + + xsk_buff_raw_dma_sync_for_device(pool, xrxbuf->dma_addr + pkt_off, pkt_len); + + wr_idx = D_IDX(tx_ring, tx_ring->wr_p); + + txbuf = &tx_ring->txbufs[wr_idx]; + txbuf->xdp = xrxbuf->xdp; + txbuf->real_len = pkt_len; + txbuf->is_xsk_tx = true; + + /* Build TX descriptor */ + txd = &tx_ring->txds[wr_idx]; + txd->offset_eop = PCIE_DESC_TX_EOP; + txd->dma_len = cpu_to_le16(pkt_len); + nfp_desc_set_dma_addr(txd, xrxbuf->dma_addr + pkt_off); + txd->data_len = cpu_to_le16(pkt_len); + + txd->flags = 0; + txd->mss = 0; + txd->lso_hdrlen = 0; + + tx_ring->wr_ptr_add++; + tx_ring->wr_p++; + + return true; +} + +static int nfp_net_rx_space(struct nfp_net_rx_ring *rx_ring) +{ + return rx_ring->cnt - rx_ring->wr_p + rx_ring->rd_p - 1; +} + +static void +nfp_net_xsk_rx_bufs_stash(struct nfp_net_rx_ring *rx_ring, unsigned int idx, + struct xdp_buff *xdp) +{ + unsigned int headroom; + + headroom = xsk_pool_get_headroom(rx_ring->r_vec->xsk_pool); + + rx_ring->rxds[idx].fld.reserved = 0; + rx_ring->rxds[idx].fld.meta_len_dd = 0; + + rx_ring->xsk_rxbufs[idx].xdp = xdp; + rx_ring->xsk_rxbufs[idx].dma_addr = + xsk_buff_xdp_get_frame_dma(xdp) + headroom; +} + +static void nfp_net_xsk_rx_unstash(struct nfp_net_xsk_rx_buf *rxbuf) +{ + rxbuf->dma_addr = 0; + rxbuf->xdp = NULL; +} + +static void nfp_net_xsk_rx_free(struct nfp_net_xsk_rx_buf *rxbuf) +{ + if (rxbuf->xdp) + xsk_buff_free(rxbuf->xdp); + + nfp_net_xsk_rx_unstash(rxbuf); +} + +void nfp_net_xsk_rx_bufs_free(struct nfp_net_rx_ring *rx_ring) +{ + unsigned int i; + + if (!rx_ring->cnt) + return; + + for (i = 0; i < rx_ring->cnt - 1; i++) + nfp_net_xsk_rx_free(&rx_ring->xsk_rxbufs[i]); +} + +void nfp_net_xsk_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring) +{ + struct nfp_net_r_vector *r_vec = rx_ring->r_vec; + struct xsk_buff_pool *pool = r_vec->xsk_pool; + unsigned int wr_idx, wr_ptr_add = 0; + struct xdp_buff *xdp; + + while (nfp_net_rx_space(rx_ring)) { + wr_idx = D_IDX(rx_ring, rx_ring->wr_p); + + xdp = xsk_buff_alloc(pool); + if (!xdp) + break; + + nfp_net_xsk_rx_bufs_stash(rx_ring, wr_idx, xdp); + + nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld, + rx_ring->xsk_rxbufs[wr_idx].dma_addr); + + rx_ring->wr_p++; + wr_ptr_add++; + } + + /* Ensure all records are visible before incrementing write counter. */ + wmb(); + nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, wr_ptr_add); +} + +static void nfp_net_xsk_rx_drop(struct nfp_net_r_vector *r_vec, + struct nfp_net_xsk_rx_buf *xrxbuf) +{ + u64_stats_update_begin(&r_vec->rx_sync); + r_vec->rx_drops++; + u64_stats_update_end(&r_vec->rx_sync); + + nfp_net_xsk_rx_free(xrxbuf); +} + +static void nfp_net_xsk_rx_skb(struct nfp_net_rx_ring *rx_ring, + const struct nfp_net_rx_desc *rxd, + struct nfp_net_xsk_rx_buf *xrxbuf, + const struct nfp_meta_parsed *meta, + unsigned int pkt_len, + bool meta_xdp, + unsigned int *skbs_polled) +{ + struct nfp_net_r_vector *r_vec = rx_ring->r_vec; + struct nfp_net_dp *dp = &r_vec->nfp_net->dp; + struct net_device *netdev; + struct sk_buff *skb; + + if (likely(!meta->portid)) { + netdev = dp->netdev; + } else { + struct nfp_net *nn = netdev_priv(dp->netdev); + + netdev = nfp_app_dev_get(nn->app, meta->portid, NULL); + if (unlikely(!netdev)) { + nfp_net_xsk_rx_drop(r_vec, xrxbuf); + return; + } + nfp_repr_inc_rx_stats(netdev, pkt_len); + } + + skb = napi_alloc_skb(&r_vec->napi, pkt_len); + if (!skb) { + nfp_net_xsk_rx_drop(r_vec, xrxbuf); + return; + } + memcpy(skb_put(skb, pkt_len), xrxbuf->xdp->data, pkt_len); + + skb->mark = meta->mark; + skb_set_hash(skb, meta->hash, meta->hash_type); + + skb_record_rx_queue(skb, rx_ring->idx); + skb->protocol = eth_type_trans(skb, netdev); + + nfp_net_rx_csum(dp, r_vec, rxd, meta, skb); + + if (rxd->rxd.flags & PCIE_DESC_RX_VLAN) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + le16_to_cpu(rxd->rxd.vlan)); + if (meta_xdp) + skb_metadata_set(skb, + xrxbuf->xdp->data - xrxbuf->xdp->data_meta); + + napi_gro_receive(&rx_ring->r_vec->napi, skb); + + nfp_net_xsk_rx_free(xrxbuf); + + (*skbs_polled)++; +} + +static unsigned int +nfp_net_xsk_rx(struct nfp_net_rx_ring *rx_ring, int budget, + unsigned int *skbs_polled) +{ + struct nfp_net_r_vector *r_vec = rx_ring->r_vec; + struct nfp_net_dp *dp = &r_vec->nfp_net->dp; + struct nfp_net_tx_ring *tx_ring; + struct bpf_prog *xdp_prog; + bool xdp_redir = false; + int pkts_polled = 0; + + xdp_prog = READ_ONCE(dp->xdp_prog); + tx_ring = r_vec->xdp_ring; + + while (pkts_polled < budget) { + unsigned int meta_len, data_len, pkt_len, pkt_off; + struct nfp_net_xsk_rx_buf *xrxbuf; + struct nfp_net_rx_desc *rxd; + struct nfp_meta_parsed meta; + int idx, act; + + idx = D_IDX(rx_ring, rx_ring->rd_p); + + rxd = &rx_ring->rxds[idx]; + if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD)) + break; + + rx_ring->rd_p++; + pkts_polled++; + + xrxbuf = &rx_ring->xsk_rxbufs[idx]; + + /* If starved of buffers "drop" it and scream. */ + if (rx_ring->rd_p >= rx_ring->wr_p) { + nn_dp_warn(dp, "Starved of RX buffers\n"); + nfp_net_xsk_rx_drop(r_vec, xrxbuf); + break; + } + + /* Memory barrier to ensure that we won't do other reads + * before the DD bit. + */ + dma_rmb(); + + memset(&meta, 0, sizeof(meta)); + + /* Only supporting AF_XDP with dynamic metadata so buffer layout + * is always: + * + * --------------------------------------------------------- + * | off | metadata | packet | XXXX | + * --------------------------------------------------------- + */ + meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK; + data_len = le16_to_cpu(rxd->rxd.data_len); + pkt_len = data_len - meta_len; + + if (unlikely(meta_len > NFP_NET_MAX_PREPEND)) { + nn_dp_warn(dp, "Oversized RX packet metadata %u\n", + meta_len); + nfp_net_xsk_rx_drop(r_vec, xrxbuf); + continue; + } + + /* Stats update. */ + u64_stats_update_begin(&r_vec->rx_sync); + r_vec->rx_pkts++; + r_vec->rx_bytes += pkt_len; + u64_stats_update_end(&r_vec->rx_sync); + + xrxbuf->xdp->data += meta_len; + xrxbuf->xdp->data_end = xrxbuf->xdp->data + pkt_len; + xdp_set_data_meta_invalid(xrxbuf->xdp); + xsk_buff_dma_sync_for_cpu(xrxbuf->xdp, r_vec->xsk_pool); + net_prefetch(xrxbuf->xdp->data); + + if (meta_len) { + if (unlikely(nfp_net_parse_meta(dp->netdev, &meta, + xrxbuf->xdp->data - + meta_len, + xrxbuf->xdp->data, + pkt_len, meta_len))) { + nn_dp_warn(dp, "Invalid RX packet metadata\n"); + nfp_net_xsk_rx_drop(r_vec, xrxbuf); + continue; + } + + if (unlikely(meta.portid)) { + struct nfp_net *nn = netdev_priv(dp->netdev); + + if (meta.portid != NFP_META_PORT_ID_CTRL) { + nfp_net_xsk_rx_skb(rx_ring, rxd, xrxbuf, + &meta, pkt_len, + false, skbs_polled); + continue; + } + + nfp_app_ctrl_rx_raw(nn->app, xrxbuf->xdp->data, + pkt_len); + nfp_net_xsk_rx_free(xrxbuf); + continue; + } + } + + act = bpf_prog_run_xdp(xdp_prog, xrxbuf->xdp); + + pkt_len = xrxbuf->xdp->data_end - xrxbuf->xdp->data; + pkt_off = xrxbuf->xdp->data - xrxbuf->xdp->data_hard_start; + + switch (act) { + case XDP_PASS: + nfp_net_xsk_rx_skb(rx_ring, rxd, xrxbuf, &meta, pkt_len, + true, skbs_polled); + break; + case XDP_TX: + if (!nfp_net_xsk_tx_xdp(dp, r_vec, rx_ring, tx_ring, + xrxbuf, pkt_len, pkt_off)) + nfp_net_xsk_rx_drop(r_vec, xrxbuf); + else + nfp_net_xsk_rx_unstash(xrxbuf); + break; + case XDP_REDIRECT: + if (xdp_do_redirect(dp->netdev, xrxbuf->xdp, xdp_prog)) { + nfp_net_xsk_rx_drop(r_vec, xrxbuf); + } else { + nfp_net_xsk_rx_unstash(xrxbuf); + xdp_redir = true; + } + break; + default: + bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(dp->netdev, xdp_prog, act); + fallthrough; + case XDP_DROP: + nfp_net_xsk_rx_drop(r_vec, xrxbuf); + break; + } + } + + nfp_net_xsk_rx_ring_fill_freelist(r_vec->rx_ring); + + if (xdp_redir) + xdp_do_flush_map(); + + if (tx_ring->wr_ptr_add) + nfp_net_tx_xmit_more_flush(tx_ring); + + return pkts_polled; +} + +static void nfp_net_xsk_pool_unmap(struct device *dev, + struct xsk_buff_pool *pool) +{ + return xsk_pool_dma_unmap(pool, 0); +} + +static int nfp_net_xsk_pool_map(struct device *dev, struct xsk_buff_pool *pool) +{ + return xsk_pool_dma_map(pool, dev, 0); +} + +int nfp_net_xsk_setup_pool(struct net_device *netdev, + struct xsk_buff_pool *pool, u16 queue_id) +{ + struct nfp_net *nn = netdev_priv(netdev); + + struct xsk_buff_pool *prev_pool; + struct nfp_net_dp *dp; + int err; + + /* Reject on old FWs so we can drop some checks on datapath. */ + if (nn->dp.rx_offset != NFP_NET_CFG_RX_OFFSET_DYNAMIC) + return -EOPNOTSUPP; + if (!nn->dp.chained_metadata_format) + return -EOPNOTSUPP; + + /* Install */ + if (pool) { + err = nfp_net_xsk_pool_map(nn->dp.dev, pool); + if (err) + return err; + } + + /* Reconfig/swap */ + dp = nfp_net_clone_dp(nn); + if (!dp) { + err = -ENOMEM; + goto err_unmap; + } + + prev_pool = dp->xsk_pools[queue_id]; + dp->xsk_pools[queue_id] = pool; + + err = nfp_net_ring_reconfig(nn, dp, NULL); + if (err) + goto err_unmap; + + /* Uninstall */ + if (prev_pool) + nfp_net_xsk_pool_unmap(nn->dp.dev, prev_pool); + + return 0; +err_unmap: + if (pool) + nfp_net_xsk_pool_unmap(nn->dp.dev, pool); + + return err; +} + +int nfp_net_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags) +{ + struct nfp_net *nn = netdev_priv(netdev); + + /* queue_id comes from a zero-copy socket, installed with XDP_SETUP_XSK_POOL, + * so it must be within our vector range. Moreover, our napi structs + * are statically allocated, so we can always kick them without worrying + * if reconfig is in progress or interface down. + */ + napi_schedule(&nn->r_vecs[queue_id].napi); + + return 0; +} + +int nfp_net_xsk_poll(struct napi_struct *napi, int budget) +{ + struct nfp_net_r_vector *r_vec = + container_of(napi, struct nfp_net_r_vector, napi); + unsigned int pkts_polled, skbs = 0; + + pkts_polled = nfp_net_xsk_rx(r_vec->rx_ring, budget, &skbs); + + if (pkts_polled < budget) { + if (r_vec->tx_ring) + nfp_net_tx_complete(r_vec->tx_ring, budget); + + if (!nfp_net_xsk_complete(r_vec->xdp_ring)) + pkts_polled = budget; + + nfp_net_xsk_tx(r_vec->xdp_ring); + + if (pkts_polled < budget && napi_complete_done(napi, skbs)) + nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); + } + + return pkts_polled; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.h b/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.h new file mode 100644 index 000000000000..5c8549cb3543 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2018 Netronome Systems, Inc */ +/* Copyright (C) 2021 Corigine, Inc */ + +#ifndef _NFP_XSK_H_ +#define _NFP_XSK_H_ + +#include <net/xdp_sock_drv.h> + +#define NFP_NET_XSK_TX_BATCH 16 /* XSK TX transmission batch size. */ + +static inline bool nfp_net_has_xsk_pool_slow(struct nfp_net_dp *dp, + unsigned int qid) +{ + return dp->xdp_prog && dp->xsk_pools[qid]; +} + +int nfp_net_xsk_setup_pool(struct net_device *netdev, struct xsk_buff_pool *pool, + u16 queue_id); + +void nfp_net_xsk_tx_bufs_free(struct nfp_net_tx_ring *tx_ring); +void nfp_net_xsk_rx_bufs_free(struct nfp_net_rx_ring *rx_ring); + +void nfp_net_xsk_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring); + +int nfp_net_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags); +int nfp_net_xsk_poll(struct napi_struct *napi, int budget); + +#endif /* _NFP_XSK_H_ */ diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index 07a00dd9cfe0..4b3482ce90a1 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -324,8 +324,9 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev) + sizeof(*priv->rx_bd_v) * ((i + 1) % RX_BD_NUM)); - skb = netdev_alloc_skb_ip_align(ndev, - NIXGE_MAX_JUMBO_FRAME_SIZE); + skb = __netdev_alloc_skb_ip_align(ndev, + NIXGE_MAX_JUMBO_FRAME_SIZE, + GFP_KERNEL); if (!skb) goto out; diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index bc39558fe82b..756f97dce85b 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1471,6 +1471,7 @@ static int lpc_eth_drv_resume(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct netdata_local *pldat; + int ret; if (device_may_wakeup(&pdev->dev)) disable_irq_wake(ndev->irq); @@ -1480,7 +1481,9 @@ static int lpc_eth_drv_resume(struct platform_device *pdev) pldat = netdev_priv(ndev); /* Enable interface clock */ - clk_enable(pldat->clk); + ret = clk_enable(pldat->clk); + if (ret) + return ret; /* Reset and initialize */ __lpc_eth_reset(pldat); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c index 52a1b5cfd8e7..9d0514cfeb5c 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c @@ -202,19 +202,25 @@ do_check_time: } } + dev_dbg(ionic->dev, "fw_status 0x%02x ready %d idev->ready %d last_hb 0x%x state 0x%02lx\n", + fw_status, fw_status_ready, idev->fw_status_ready, + idev->last_fw_hb, lif->state[0]); + /* is this a transition? */ - if (fw_status_ready != idev->fw_status_ready) { + if (fw_status_ready != idev->fw_status_ready && + !test_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) { bool trigger = false; - if (!fw_status_ready && lif && + idev->fw_status_ready = fw_status_ready; + + if (!fw_status_ready && !test_bit(IONIC_LIF_F_FW_RESET, lif->state) && !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) { dev_info(ionic->dev, "FW stopped 0x%02x\n", fw_status); trigger = true; - } else if (fw_status_ready && lif && - test_bit(IONIC_LIF_F_FW_RESET, lif->state) && - !test_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) { + } else if (fw_status_ready && + test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { dev_info(ionic->dev, "FW running 0x%02x\n", fw_status); trigger = true; } @@ -222,8 +228,6 @@ do_check_time: if (trigger) { struct ionic_deferred_work *work; - idev->fw_status_ready = fw_status_ready; - work = kzalloc(sizeof(*work), GFP_ATOMIC); if (work) { work->type = IONIC_DW_TYPE_LIF_RESET; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index 386a5cf1e224..01c22701482d 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -74,10 +74,10 @@ static void ionic_get_drvinfo(struct net_device *netdev, struct ionic_lif *lif = netdev_priv(netdev); struct ionic *ionic = lif->ionic; - strlcpy(drvinfo->driver, IONIC_DRV_NAME, sizeof(drvinfo->driver)); - strlcpy(drvinfo->fw_version, ionic->idev.dev_info.fw_version, + strscpy(drvinfo->driver, IONIC_DRV_NAME, sizeof(drvinfo->driver)); + strscpy(drvinfo->fw_version, ionic->idev.dev_info.fw_version, sizeof(drvinfo->fw_version)); - strlcpy(drvinfo->bus_info, ionic_bus_info(ionic), + strscpy(drvinfo->bus_info, ionic_bus_info(ionic), sizeof(drvinfo->bus_info)); } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h index 278610ed7227..4a90f611c611 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_if.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h @@ -759,7 +759,7 @@ enum ionic_txq_desc_opcode { * IONIC_TXQ_DESC_OPCODE_CSUM_HW: * Offload 16-bit checksum computation to hardware. * If @csum_l3 is set then the packet's L3 checksum is - * updated. Similarly, if @csum_l4 is set the the L4 + * updated. Similarly, if @csum_l4 is set the L4 * checksum is updated. If @encap is set then encap header * checksums are also updated. * @@ -1368,9 +1368,9 @@ union ionic_port_config { * @status: link status (enum ionic_port_oper_status) * @id: port id * @speed: link speed (in Mbps) - * @link_down_count: number of times link went from from up to down + * @link_down_count: number of times link went from up to down * @fec_type: fec type (enum ionic_port_fec_type) - * @xcvr: tranceiver status + * @xcvr: transceiver status */ struct ionic_port_status { __le32 id; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 542e395fb037..f3568901eb91 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -12,6 +12,7 @@ #include <linux/pci.h> #include <linux/cpumask.h> #include <linux/crash_dump.h> +#include <linux/vmalloc.h> #include "ionic.h" #include "ionic_bus.h" @@ -393,11 +394,11 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) ionic_qcq_intr_free(lif, qcq); if (qcq->cq.info) { - devm_kfree(dev, qcq->cq.info); + vfree(qcq->cq.info); qcq->cq.info = NULL; } if (qcq->q.info) { - devm_kfree(dev, qcq->q.info); + vfree(qcq->q.info); qcq->q.info = NULL; } } @@ -528,8 +529,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, new->q.dev = dev; new->flags = flags; - new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info), - GFP_KERNEL); + new->q.info = vzalloc(num_descs * sizeof(*new->q.info)); if (!new->q.info) { netdev_err(lif->netdev, "Cannot allocate queue info\n"); err = -ENOMEM; @@ -550,8 +550,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, if (err) goto err_out; - new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info), - GFP_KERNEL); + new->cq.info = vzalloc(num_descs * sizeof(*new->cq.info)); if (!new->cq.info) { netdev_err(lif->netdev, "Cannot allocate completion queue info\n"); err = -ENOMEM; @@ -640,14 +639,14 @@ err_out_free_cq: err_out_free_q: dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa); err_out_free_cq_info: - devm_kfree(dev, new->cq.info); + vfree(new->cq.info); err_out_free_irq: if (flags & IONIC_QCQ_F_INTR) { devm_free_irq(dev, new->intr.vector, &new->napi); ionic_intr_free(lif->ionic, new->intr.index); } err_out_free_q_info: - devm_kfree(dev, new->q.info); + vfree(new->q.info); err_out_free_qcq: devm_kfree(dev, new); err_out: @@ -3303,7 +3302,7 @@ static void ionic_lif_set_netdev_info(struct ionic_lif *lif) }, }; - strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name, + strscpy(ctx.cmd.lif_setattr.name, lif->netdev->name, sizeof(ctx.cmd.lif_setattr.name)); ionic_adminq_post_wait(lif, &ctx); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c index fd6806b4a1b9..9859a4432985 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c @@ -151,7 +151,6 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = { IONIC_RX_STAT_DESC(vlan_stripped), }; - #define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc) #define IONIC_NUM_PORT_STATS ARRAY_SIZE(ionic_port_stats_desc) #define IONIC_NUM_TX_STATS ARRAY_SIZE(ionic_tx_stats_desc) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index d197a70a49c9..f54035455ad6 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -10,7 +10,6 @@ #include "ionic_lif.h" #include "ionic_txrx.h" - static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, ionic_desc_cb cb_func, void *cb_arg) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 8ac38828ba45..0848b5529d48 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -2984,12 +2984,16 @@ static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn, u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; struct qed_filter_accept_flags *flags = ¶ms->accept_flags; struct qed_public_vf_info *vf_info; + u16 tlv_mask; + + tlv_mask = BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM) | + BIT(QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN); /* Untrusted VFs can't even be trusted to know that fact. * Simply indicate everything is configured fine, and trace * configuration 'behind their back'. */ - if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM))) + if (!(*tlvs & tlv_mask)) return 0; vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); @@ -3006,6 +3010,13 @@ static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn, flags->tx_accept_filter &= ~mask; } + if (params->update_accept_any_vlan_flg) { + vf_info->accept_any_vlan = params->accept_any_vlan; + + if (vf_info->forced_vlan && !vf_info->is_trusted_configured) + params->accept_any_vlan = false; + } + return 0; } @@ -3806,11 +3817,11 @@ bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) return found; } -static void qed_iov_get_link(struct qed_hwfn *p_hwfn, - u16 vfid, - struct qed_mcp_link_params *p_params, - struct qed_mcp_link_state *p_link, - struct qed_mcp_link_capabilities *p_caps) +static int qed_iov_get_link(struct qed_hwfn *p_hwfn, + u16 vfid, + struct qed_mcp_link_params *p_params, + struct qed_mcp_link_state *p_link, + struct qed_mcp_link_capabilities *p_caps) { struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, vfid, @@ -3818,7 +3829,7 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn, struct qed_bulletin_content *p_bulletin; if (!p_vf) - return; + return -EINVAL; p_bulletin = p_vf->bulletin.p_virt; @@ -3828,6 +3839,7 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn, __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin); if (p_caps) __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin); + return 0; } static int @@ -4686,6 +4698,7 @@ static int qed_get_vf_config(struct qed_dev *cdev, struct qed_public_vf_info *vf_info; struct qed_mcp_link_state link; u32 tx_rate; + int ret; /* Sanitize request */ if (IS_VF(cdev)) @@ -4699,7 +4712,9 @@ static int qed_get_vf_config(struct qed_dev *cdev, vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); - qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL); + ret = qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL); + if (ret) + return ret; /* Fill information about VF */ ivi->vf = vf_id; @@ -4715,6 +4730,7 @@ static int qed_get_vf_config(struct qed_dev *cdev, tx_rate = vf_info->tx_rate; ivi->max_tx_rate = tx_rate ? tx_rate : link.speed; ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id); + ivi->trusted = vf_info->is_trusted_request; return 0; } @@ -5145,6 +5161,12 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) params.update_ctl_frame_check = 1; params.mac_chk_en = !vf_info->is_trusted_configured; + params.update_accept_any_vlan_flg = 0; + + if (vf_info->accept_any_vlan && vf_info->forced_vlan) { + params.update_accept_any_vlan_flg = 1; + params.accept_any_vlan = vf_info->accept_any_vlan; + } if (vf_info->rx_accept_mode & mask) { flags->update_rx_mode_config = 1; @@ -5160,13 +5182,20 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) if (!vf_info->is_trusted_configured) { flags->rx_accept_filter &= ~mask; flags->tx_accept_filter &= ~mask; + params.accept_any_vlan = false; } if (flags->update_rx_mode_config || flags->update_tx_mode_config || - params.update_ctl_frame_check) + params.update_ctl_frame_check || + params.update_accept_any_vlan_flg) { + DP_VERBOSE(hwfn, QED_MSG_IOV, + "vport update config for %s VF[abs 0x%x rel 0x%x]\n", + vf_info->is_trusted_configured ? "trusted" : "untrusted", + vf->abs_vf_id, vf->relative_vf_id); qed_sp_vport_update(hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); + } } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index f448e3dd6c8b..6ee2493de164 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -62,6 +62,7 @@ struct qed_public_vf_info { bool is_trusted_request; u8 rx_accept_mode; u8 tx_accept_mode; + bool accept_any_vlan; }; struct qed_iov_vf_init_params { diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 597cd9cd57b5..7b0e390c0b07 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -513,6 +513,9 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) p_iov->bulletin.size, &p_iov->bulletin.phys, GFP_KERNEL); + if (!p_iov->bulletin.p_virt) + goto free_pf2vf_reply; + DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n", p_iov->bulletin.p_virt, @@ -552,6 +555,10 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) return rc; +free_pf2vf_reply: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(union pfvf_tlvs), + p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys); free_vf2pf_request: dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(union vfpf_tlvs), diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 29cdcb2285b1..bcf3746220df 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -10,6 +10,7 @@ #include <linux/ipv6.h> #include <net/checksum.h> #include <linux/printk.h> +#include <linux/jiffies.h> #include "qlcnic.h" @@ -332,7 +333,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter, hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { if (ether_addr_equal(tmp_fil->faddr, (u8 *)&src_addr) && tmp_fil->vlan_id == vlan_id) { - if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime)) + if (time_is_before_jiffies(QLCNIC_READD_AGE * HZ + tmp_fil->ftime)) qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring); tmp_fil->ftime = jiffies; diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 955cce644392..c865a4be05ee 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -435,7 +435,7 @@ qcaspi_receive(struct qcaspi *qca) qca->rx_skb->protocol = eth_type_trans( qca->rx_skb, qca->rx_skb->dev); skb_checksum_none_assert(qca->rx_skb); - netif_rx_ni(qca->rx_skb); + netif_rx(qca->rx_skb); qca->rx_skb = netdev_alloc_skb_ip_align(net_dev, net_dev->mtu + VLAN_ETH_HLEN); if (!qca->rx_skb) { @@ -1001,7 +1001,7 @@ qca_spi_probe(struct spi_device *spi) return 0; } -static int +static void qca_spi_remove(struct spi_device *spi) { struct net_device *qcaspi_devs = spi_get_drvdata(spi); @@ -1011,8 +1011,6 @@ qca_spi_remove(struct spi_device *spi) unregister_netdev(qcaspi_devs); free_netdev(qcaspi_devs); - - return 0; } static const struct spi_device_id qca_spi_id[] = { diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c index 27c4f43176aa..26646cb6a20a 100644 --- a/drivers/net/ethernet/qualcomm/qca_uart.c +++ b/drivers/net/ethernet/qualcomm/qca_uart.c @@ -108,7 +108,7 @@ qca_tty_receive(struct serdev_device *serdev, const unsigned char *data, qca->rx_skb->protocol = eth_type_trans( qca->rx_skb, qca->rx_skb->dev); skb_checksum_none_assert(qca->rx_skb); - netif_rx_ni(qca->rx_skb); + netif_rx(qca->rx_skb); qca->rx_skb = netdev_alloc_skb_ip_align(netdev, netdev->mtu + VLAN_ETH_HLEN); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c index 3676976c875b..ba194698cc14 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c @@ -298,7 +298,6 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, { struct rmnet_map_header *map_header; u32 padding, map_datalen; - u8 *padbytes; map_datalen = skb->len - hdrlen; map_header = (struct rmnet_map_header *) @@ -323,8 +322,7 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, if (skb_tailroom(skb) < padding) return NULL; - padbytes = (u8 *)skb_put(skb, padding); - memset(padbytes, 0, padding); + skb_put_zero(skb, padding); done: map_header->pkt_len = htons(map_datalen + padding); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 24e2635c4c80..525d66f71f02 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -475,7 +475,7 @@ static int ravb_ring_init(struct net_device *ndev, int q) goto error; for (i = 0; i < priv->num_rx_ring[q]; i++) { - skb = netdev_alloc_skb(ndev, info->max_rx_len); + skb = __netdev_alloc_skb(ndev, info->max_rx_len, GFP_KERNEL); if (!skb) goto error; ravb_set_buffer_align(skb); diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index 77a0d9d7e65a..407a1f8e3059 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -2285,18 +2285,18 @@ static int __init sxgbe_cmdline_opt(char *str) char *opt; if (!str || !*str) - return -EINVAL; + return 1; while ((opt = strsep(&str, ",")) != NULL) { if (!strncmp(opt, "eee_timer:", 10)) { if (kstrtoint(opt + 10, 0, &eee_timer)) goto err; } } - return 0; + return 1; err: pr_err("%s: ERROR broken module parameter conversion\n", __func__); - return -EINVAL; + return 1; } __setup("sxgbeeth=", sxgbe_cmdline_opt); diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index ead550ae2709..d6fdcdc530ca 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -78,31 +78,48 @@ static const struct efx_channel_type efx_default_channel_type = { * INTERRUPTS *************/ -static unsigned int efx_wanted_parallelism(struct efx_nic *efx) +static unsigned int count_online_cores(struct efx_nic *efx, bool local_node) { - cpumask_var_t thread_mask; + cpumask_var_t filter_mask; unsigned int count; int cpu; + if (unlikely(!zalloc_cpumask_var(&filter_mask, GFP_KERNEL))) { + netif_warn(efx, probe, efx->net_dev, + "RSS disabled due to allocation failure\n"); + return 1; + } + + cpumask_copy(filter_mask, cpu_online_mask); + if (local_node) { + int numa_node = pcibus_to_node(efx->pci_dev->bus); + + cpumask_and(filter_mask, filter_mask, cpumask_of_node(numa_node)); + } + + count = 0; + for_each_cpu(cpu, filter_mask) { + ++count; + cpumask_andnot(filter_mask, filter_mask, topology_sibling_cpumask(cpu)); + } + + free_cpumask_var(filter_mask); + + return count; +} + +static unsigned int efx_wanted_parallelism(struct efx_nic *efx) +{ + unsigned int count; + if (rss_cpus) { count = rss_cpus; } else { - if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) { - netif_warn(efx, probe, efx->net_dev, - "RSS disabled due to allocation failure\n"); - return 1; - } + count = count_online_cores(efx, true); - count = 0; - for_each_online_cpu(cpu) { - if (!cpumask_test_cpu(cpu, thread_mask)) { - ++count; - cpumask_or(thread_mask, thread_mask, - topology_sibling_cpumask(cpu)); - } - } - - free_cpumask_var(thread_mask); + /* If no online CPUs in local node, fallback to any online CPUs */ + if (count == 0) + count = count_online_cores(efx, false); } if (count > EFX_MAX_RX_QUEUES) { @@ -369,12 +386,20 @@ int efx_probe_interrupts(struct efx_nic *efx) #if defined(CONFIG_SMP) void efx_set_interrupt_affinity(struct efx_nic *efx) { + int numa_node = pcibus_to_node(efx->pci_dev->bus); + const struct cpumask *numa_mask = cpumask_of_node(numa_node); struct efx_channel *channel; unsigned int cpu; + /* If no online CPUs in local node, fallback to any online CPU */ + if (cpumask_first_and(cpu_online_mask, numa_mask) >= nr_cpu_ids) + numa_mask = cpu_online_mask; + + cpu = -1; efx_for_each_channel(channel, efx) { - cpu = cpumask_local_spread(channel->channel, - pcibus_to_node(efx->pci_dev->bus)); + cpu = cpumask_next_and(cpu, cpu_online_mask, numa_mask); + if (cpu >= nr_cpu_ids) + cpu = cpumask_first_and(cpu_online_mask, numa_mask); irq_set_affinity_hint(channel->irq, cpumask_of(cpu)); } } diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index be6bfd6b7ec7..50baf62b2cbc 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -163,9 +163,9 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ spin_lock_bh(&mcdi->iface_lock); ++mcdi->seqno; + seqno = mcdi->seqno & SEQ_MASK; spin_unlock_bh(&mcdi->iface_lock); - seqno = mcdi->seqno & SEQ_MASK; xflags = 0; if (mcdi->mode == MCDI_MODE_EVENTS) xflags |= MCDI_HEADER_XFLAGS_EVREQ; diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 556bd353dd42..b0c5a44785fa 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1044,7 +1044,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) "rx failed to build skb\n"); break; } - page_pool_release_page(dring->page_pool, page); + skb_mark_for_recycle(skb); skb_reserve(skb, xdp.data - xdp.data_hard_start); skb_put(skb, xdp.data_end - xdp.data); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index 5943ff9f21c2..63754a9c4ba7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -721,6 +721,7 @@ static int tgl_common_data(struct pci_dev *pdev, plat->rx_queues_to_use = 6; plat->tx_queues_to_use = 4; plat->clk_ptp_rate = 200000000; + plat->speed_mode_2500 = intel_speed_mode_2500; plat->safety_feat_cfg->tsoee = 1; plat->safety_feat_cfg->mrxpee = 0; @@ -740,7 +741,6 @@ static int tgl_sgmii_phy0_data(struct pci_dev *pdev, { plat->bus_id = 1; plat->phy_interface = PHY_INTERFACE_MODE_SGMII; - plat->speed_mode_2500 = intel_speed_mode_2500; plat->serdes_powerup = intel_serdes_powerup; plat->serdes_powerdown = intel_serdes_powerdown; return tgl_common_data(pdev, plat); @@ -755,7 +755,6 @@ static int tgl_sgmii_phy1_data(struct pci_dev *pdev, { plat->bus_id = 2; plat->phy_interface = PHY_INTERFACE_MODE_SGMII; - plat->speed_mode_2500 = intel_speed_mode_2500; plat->serdes_powerup = intel_serdes_powerup; plat->serdes_powerdown = intel_serdes_powerdown; return tgl_common_data(pdev, plat); @@ -1160,6 +1159,7 @@ static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend, #define PCI_DEVICE_ID_INTEL_TGL_SGMII1G 0xa0ac #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0 0x7aac #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1 0x7aad +#define PCI_DEVICE_ID_INTEL_ADLN_SGMII1G 0x54ac static const struct pci_device_id intel_eth_pci_id_table[] = { { PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) }, @@ -1177,6 +1177,7 @@ static const struct pci_device_id intel_eth_pci_id_table[] = { { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) }, { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) }, { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) }, + { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &tgl_sgmii1g_phy0_info) }, {} }; MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index 2ffa0a11eea5..0cc28c79cc61 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -78,6 +78,7 @@ struct ethqos_emac_por { struct ethqos_emac_driver_data { const struct ethqos_emac_por *por; unsigned int num_por; + bool rgmii_config_looback_en; }; struct qcom_ethqos { @@ -90,6 +91,7 @@ struct qcom_ethqos { const struct ethqos_emac_por *por; unsigned int num_por; + bool rgmii_config_looback_en; }; static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset) @@ -181,6 +183,22 @@ static const struct ethqos_emac_por emac_v2_3_0_por[] = { static const struct ethqos_emac_driver_data emac_v2_3_0_data = { .por = emac_v2_3_0_por, .num_por = ARRAY_SIZE(emac_v2_3_0_por), + .rgmii_config_looback_en = true, +}; + +static const struct ethqos_emac_por emac_v2_1_0_por[] = { + { .offset = RGMII_IO_MACRO_CONFIG, .value = 0x40C01343 }, + { .offset = SDCC_HC_REG_DLL_CONFIG, .value = 0x2004642C }, + { .offset = SDCC_HC_REG_DDR_CONFIG, .value = 0x00000000 }, + { .offset = SDCC_HC_REG_DLL_CONFIG2, .value = 0x00200000 }, + { .offset = SDCC_USR_CTL, .value = 0x00010800 }, + { .offset = RGMII_IO_MACRO_CONFIG2, .value = 0x00002060 }, +}; + +static const struct ethqos_emac_driver_data emac_v2_1_0_data = { + .por = emac_v2_1_0_por, + .num_por = ARRAY_SIZE(emac_v2_1_0_por), + .rgmii_config_looback_en = false, }; static int ethqos_dll_configure(struct qcom_ethqos *ethqos) @@ -297,8 +315,12 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos) rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_DLY_EN, SDCC_DDR_CONFIG_PRG_DLY_EN, SDCC_HC_REG_DDR_CONFIG); - rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN, - RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG); + if (ethqos->rgmii_config_looback_en) + rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN, + RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG); + else + rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN, + 0, RGMII_IO_MACRO_CONFIG); break; case SPEED_100: @@ -331,8 +353,13 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos) rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN, SDCC_HC_REG_DDR_CONFIG); - rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN, - RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG); + if (ethqos->rgmii_config_looback_en) + rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN, + RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG); + else + rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN, + 0, RGMII_IO_MACRO_CONFIG); + break; case SPEED_10: @@ -504,6 +531,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev) data = of_device_get_match_data(&pdev->dev); ethqos->por = data->por; ethqos->num_por = data->num_por; + ethqos->rgmii_config_looback_en = data->rgmii_config_looback_en; ethqos->rgmii_clk = devm_clk_get(&pdev->dev, "rgmii"); if (IS_ERR(ethqos->rgmii_clk)) { @@ -558,6 +586,7 @@ static int qcom_ethqos_remove(struct platform_device *pdev) static const struct of_device_id qcom_ethqos_match[] = { { .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_data}, + { .compatible = "qcom,sm8150-ethqos", .data = &emac_v2_1_0_data}, { } }; MODULE_DEVICE_TABLE(of, qcom_ethqos_match); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index b745d624b2cb..c1bfd89a5a1f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2209,6 +2209,23 @@ static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) stmmac_stop_tx(priv, priv->ioaddr, chan); } +static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv) +{ + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); + u32 chan; + + for (chan = 0; chan < dma_csr_ch; chan++) { + struct stmmac_channel *ch = &priv->channel[chan]; + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); + spin_unlock_irqrestore(&ch->lock, flags); + } +} + /** * stmmac_start_all_dma - start all RX and TX DMA channels * @priv: driver private structure @@ -2851,8 +2868,10 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) stmmac_axi(priv, priv->ioaddr, priv->plat->axi); /* DMA CSR Channel configuration */ - for (chan = 0; chan < dma_csr_ch; chan++) + for (chan = 0; chan < dma_csr_ch; chan++) { stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); + stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); + } /* DMA RX Channel Configuration */ for (chan = 0; chan < rx_channels_count; chan++) { @@ -3256,7 +3275,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) ret = stmmac_init_ptp(priv); if (ret == -EOPNOTSUPP) - netdev_warn(priv->dev, "PTP not supported by HW\n"); + netdev_info(priv->dev, "PTP not supported by HW\n"); else if (ret) netdev_warn(priv->dev, "PTP init failed\n"); else if (ptp_register) @@ -3708,6 +3727,7 @@ static int stmmac_open(struct net_device *dev) stmmac_enable_all_queues(priv); netif_tx_start_all_queues(priv->dev); + stmmac_enable_all_dma_irq(priv); return 0; @@ -6457,8 +6477,10 @@ int stmmac_xdp_open(struct net_device *dev) } /* DMA CSR Channel configuration */ - for (chan = 0; chan < dma_csr_ch; chan++) + for (chan = 0; chan < dma_csr_ch; chan++) { stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); + stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); + } /* Adjust Split header */ sph_en = (priv->hw->rx_csum > 0) && priv->sph; @@ -6519,6 +6541,7 @@ int stmmac_xdp_open(struct net_device *dev) stmmac_enable_all_queues(priv); netif_carrier_on(dev); netif_tx_start_all_queues(dev); + stmmac_enable_all_dma_irq(priv); return 0; @@ -7398,6 +7421,7 @@ int stmmac_resume(struct device *dev) stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); stmmac_enable_all_queues(priv); + stmmac_enable_all_dma_irq(priv); mutex_unlock(&priv->lock); rtnl_unlock(); @@ -7414,7 +7438,7 @@ static int __init stmmac_cmdline_opt(char *str) char *opt; if (!str || !*str) - return -EINVAL; + return 1; while ((opt = strsep(&str, ",")) != NULL) { if (!strncmp(opt, "debug:", 6)) { if (kstrtoint(opt + 6, 0, &debug)) @@ -7445,11 +7469,11 @@ static int __init stmmac_cmdline_opt(char *str) goto err; } } - return 0; + return 1; err: pr_err("%s: ERROR broken module parameter conversion", __func__); - return -EINVAL; + return 1; } __setup("stmmaceth=", stmmac_cmdline_opt); diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index dba9f12efa1c..b04a6a7bf566 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -88,6 +88,7 @@ #include <asm/io.h> #include <asm/byteorder.h> #include <linux/uaccess.h> +#include <linux/jiffies.h> #define cas_page_map(x) kmap_atomic((x)) #define cas_page_unmap(x) kunmap_atomic((x)) @@ -1234,19 +1235,6 @@ static void cas_init_rx_dma(struct cas *cp) */ readl(cp->regs + REG_INTR_STATUS_ALIAS); writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR); - if (cp->cas_flags & CAS_FLAG_REG_PLUS) { - for (i = 1; i < N_RX_COMP_RINGS; i++) - readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i)); - - /* 2 is different from 3 and 4 */ - if (N_RX_COMP_RINGS > 1) - writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1, - cp->regs + REG_PLUS_ALIASN_CLEAR(1)); - - for (i = 2; i < N_RX_COMP_RINGS; i++) - writel(INTR_RX_DONE_ALT, - cp->regs + REG_PLUS_ALIASN_CLEAR(i)); - } /* set up pause thresholds */ val = CAS_BASE(RX_PAUSE_THRESH_OFF, @@ -3508,9 +3496,6 @@ enable_rx_done: if (N_RX_DESC_RINGS > 1) writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs + REG_PLUS_RX_KICK1); - - for (i = 1; i < N_RX_COMP_RINGS; i++) - writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i)); } } @@ -4063,8 +4048,8 @@ static void cas_link_timer(struct timer_list *t) if (link_transition_timeout != 0 && cp->link_transition_jiffies_valid && - ((jiffies - cp->link_transition_jiffies) > - (link_transition_timeout))) { + time_is_before_jiffies(cp->link_transition_jiffies + + link_transition_timeout)) { /* One-second counter so link-down workaround doesn't * cause resets to occur so fast as to fool the switch * into thinking the link is down. @@ -4679,7 +4664,7 @@ static void cas_set_msglevel(struct net_device *dev, u32 value) static int cas_get_regs_len(struct net_device *dev) { struct cas *cp = netdev_priv(dev); - return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS; + return min_t(int, cp->casreg_len, CAS_MAX_REGS); } static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs, diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index ad9029ae6848..77e5dffb558f 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -3146,7 +3146,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, if (err) { printk(KERN_ERR "happymeal(PCI): Cannot register net device, " "aborting.\n"); - goto err_out_iounmap; + goto err_out_free_coherent; } pci_set_drvdata(pdev, hp); @@ -3179,6 +3179,10 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, return 0; +err_out_free_coherent: + dma_free_coherent(hp->dma_dev, PAGE_SIZE, + hp->happy_block, hp->hblock_dvma); + err_out_iounmap: iounmap(hp->gregs); diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c index d45b6bb86f0b..72acdf802258 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c +++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c @@ -6,7 +6,7 @@ */ #include <linux/net_tstamp.h> -#include <linux/phy.h> +#include <linux/phylink.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> @@ -471,9 +471,7 @@ static void am65_cpsw_get_pauseparam(struct net_device *ndev, { struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev); - pause->autoneg = AUTONEG_DISABLE; - pause->rx_pause = salve->rx_pause ? true : false; - pause->tx_pause = salve->tx_pause ? true : false; + phylink_ethtool_get_pauseparam(salve->phylink, pause); } static int am65_cpsw_set_pauseparam(struct net_device *ndev, @@ -481,18 +479,7 @@ static int am65_cpsw_set_pauseparam(struct net_device *ndev, { struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev); - if (!salve->phy) - return -EINVAL; - - if (!phy_validate_pause(salve->phy, pause)) - return -EINVAL; - - salve->rx_pause = pause->rx_pause ? true : false; - salve->tx_pause = pause->tx_pause ? true : false; - - phy_set_asym_pause(salve->phy, salve->rx_pause, salve->tx_pause); - - return 0; + return phylink_ethtool_set_pauseparam(salve->phylink, pause); } static void am65_cpsw_get_wol(struct net_device *ndev, @@ -500,11 +487,7 @@ static void am65_cpsw_get_wol(struct net_device *ndev, { struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev); - wol->supported = 0; - wol->wolopts = 0; - - if (salve->phy) - phy_ethtool_get_wol(salve->phy, wol); + phylink_ethtool_get_wol(salve->phylink, wol); } static int am65_cpsw_set_wol(struct net_device *ndev, @@ -512,10 +495,7 @@ static int am65_cpsw_set_wol(struct net_device *ndev, { struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev); - if (!salve->phy) - return -EOPNOTSUPP; - - return phy_ethtool_set_wol(salve->phy, wol); + return phylink_ethtool_set_wol(salve->phylink, wol); } static int am65_cpsw_get_link_ksettings(struct net_device *ndev, @@ -523,11 +503,7 @@ static int am65_cpsw_get_link_ksettings(struct net_device *ndev, { struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev); - if (!salve->phy) - return -EOPNOTSUPP; - - phy_ethtool_ksettings_get(salve->phy, ecmd); - return 0; + return phylink_ethtool_ksettings_get(salve->phylink, ecmd); } static int @@ -536,40 +512,28 @@ am65_cpsw_set_link_ksettings(struct net_device *ndev, { struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev); - if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy)) - return -EOPNOTSUPP; - - return phy_ethtool_ksettings_set(salve->phy, ecmd); + return phylink_ethtool_ksettings_set(salve->phylink, ecmd); } static int am65_cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata) { struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev); - if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy)) - return -EOPNOTSUPP; - - return phy_ethtool_get_eee(salve->phy, edata); + return phylink_ethtool_get_eee(salve->phylink, edata); } static int am65_cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata) { struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev); - if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy)) - return -EOPNOTSUPP; - - return phy_ethtool_set_eee(salve->phy, edata); + return phylink_ethtool_set_eee(salve->phylink, edata); } static int am65_cpsw_nway_reset(struct net_device *ndev) { struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev); - if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy)) - return -EOPNOTSUPP; - - return phy_restart_aneg(salve->phy); + return phylink_ethtool_nway_reset(salve->phylink); } static int am65_cpsw_get_regs_len(struct net_device *ndev) diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 8251d7eb001b..d2747e9db286 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -18,7 +18,7 @@ #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/of_device.h> -#include <linux/phy.h> +#include <linux/phylink.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> @@ -159,69 +159,6 @@ static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common *common) common->pdata.quirks); } -void am65_cpsw_nuss_adjust_link(struct net_device *ndev) -{ - struct am65_cpsw_common *common = am65_ndev_to_common(ndev); - struct am65_cpsw_port *port = am65_ndev_to_port(ndev); - struct phy_device *phy = port->slave.phy; - u32 mac_control = 0; - - if (!phy) - return; - - if (phy->link) { - mac_control = CPSW_SL_CTL_GMII_EN; - - if (phy->speed == 1000) - mac_control |= CPSW_SL_CTL_GIG; - if (phy->speed == 10 && phy_interface_is_rgmii(phy)) - /* Can be used with in band mode only */ - mac_control |= CPSW_SL_CTL_EXT_EN; - if (phy->speed == 100 && phy->interface == PHY_INTERFACE_MODE_RMII) - mac_control |= CPSW_SL_CTL_IFCTL_A; - if (phy->duplex) - mac_control |= CPSW_SL_CTL_FULLDUPLEX; - - /* RGMII speed is 100M if !CPSW_SL_CTL_GIG*/ - - /* rx_pause/tx_pause */ - if (port->slave.rx_pause) - mac_control |= CPSW_SL_CTL_RX_FLOW_EN; - - if (port->slave.tx_pause) - mac_control |= CPSW_SL_CTL_TX_FLOW_EN; - - cpsw_sl_ctl_set(port->slave.mac_sl, mac_control); - - /* enable forwarding */ - cpsw_ale_control_set(common->ale, port->port_id, - ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); - - am65_cpsw_qos_link_up(ndev, phy->speed); - netif_tx_wake_all_queues(ndev); - } else { - int tmo; - - /* disable forwarding */ - cpsw_ale_control_set(common->ale, port->port_id, - ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); - - cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE); - - tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100); - dev_dbg(common->dev, "donw msc_sl %08x tmo %d\n", - cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS), - tmo); - - cpsw_sl_ctl_reset(port->slave.mac_sl); - - am65_cpsw_qos_link_down(ndev); - netif_tx_stop_all_queues(ndev); - } - - phy_print_status(phy); -} - static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev, __be16 proto, u16 vid) { @@ -589,15 +526,11 @@ static int am65_cpsw_nuss_ndo_slave_stop(struct net_device *ndev) struct am65_cpsw_port *port = am65_ndev_to_port(ndev); int ret; - if (port->slave.phy) - phy_stop(port->slave.phy); + phylink_stop(port->slave.phylink); netif_tx_stop_all_queues(ndev); - if (port->slave.phy) { - phy_disconnect(port->slave.phy); - port->slave.phy = NULL; - } + phylink_disconnect_phy(port->slave.phylink); ret = am65_cpsw_nuss_common_stop(common); if (ret) @@ -667,25 +600,14 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev) if (ret) goto error_cleanup; - if (port->slave.phy_node) { - port->slave.phy = of_phy_connect(ndev, - port->slave.phy_node, - &am65_cpsw_nuss_adjust_link, - 0, port->slave.phy_if); - if (!port->slave.phy) { - dev_err(common->dev, "phy %pOF not found on slave %d\n", - port->slave.phy_node, - port->port_id); - ret = -ENODEV; - goto error_cleanup; - } - } + ret = phylink_of_phy_connect(port->slave.phylink, port->slave.phy_node, 0); + if (ret) + goto error_cleanup; /* restore vlan configurations */ vlan_for_each(ndev, cpsw_restore_vlans, port); - phy_attached_info(port->slave.phy); - phy_start(port->slave.phy); + phylink_start(port->slave.phylink); return 0; @@ -1431,10 +1353,7 @@ static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev, return am65_cpsw_nuss_hwtstamp_get(ndev, req); } - if (!port->slave.phy) - return -EOPNOTSUPP; - - return phy_mii_ioctl(port->slave.phy, req, cmd); + return phylink_mii_ioctl(port->slave.phylink, req, cmd); } static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev, @@ -1494,6 +1413,81 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = { .ndo_get_devlink_port = am65_cpsw_ndo_get_devlink_port, }; +static void am65_cpsw_nuss_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ + /* Currently not used */ +} + +static void am65_cpsw_nuss_mac_link_down(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data, + phylink_config); + struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave); + struct am65_cpsw_common *common = port->common; + struct net_device *ndev = port->ndev; + int tmo; + + /* disable forwarding */ + cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); + + cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE); + + tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100); + dev_dbg(common->dev, "down msc_sl %08x tmo %d\n", + cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS), tmo); + + cpsw_sl_ctl_reset(port->slave.mac_sl); + + am65_cpsw_qos_link_down(ndev); + netif_tx_stop_all_queues(ndev); +} + +static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy_device *phy, + unsigned int mode, phy_interface_t interface, int speed, + int duplex, bool tx_pause, bool rx_pause) +{ + struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data, + phylink_config); + struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave); + struct am65_cpsw_common *common = port->common; + u32 mac_control = CPSW_SL_CTL_GMII_EN; + struct net_device *ndev = port->ndev; + + if (speed == SPEED_1000) + mac_control |= CPSW_SL_CTL_GIG; + if (speed == SPEED_10 && interface == PHY_INTERFACE_MODE_RGMII) + /* Can be used with in band mode only */ + mac_control |= CPSW_SL_CTL_EXT_EN; + if (speed == SPEED_100 && interface == PHY_INTERFACE_MODE_RMII) + mac_control |= CPSW_SL_CTL_IFCTL_A; + if (duplex) + mac_control |= CPSW_SL_CTL_FULLDUPLEX; + + /* rx_pause/tx_pause */ + if (rx_pause) + mac_control |= CPSW_SL_CTL_RX_FLOW_EN; + + if (tx_pause) + mac_control |= CPSW_SL_CTL_TX_FLOW_EN; + + cpsw_sl_ctl_set(port->slave.mac_sl, mac_control); + + /* enable forwarding */ + cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); + + am65_cpsw_qos_link_up(ndev, speed); + netif_tx_wake_all_queues(ndev); +} + +static const struct phylink_mac_ops am65_cpsw_phylink_mac_ops = { + .validate = phylink_generic_validate, + .mac_config = am65_cpsw_nuss_mac_config, + .mac_link_down = am65_cpsw_nuss_mac_link_down, + .mac_link_up = am65_cpsw_nuss_mac_link_up, +}; + static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port) { struct am65_cpsw_common *common = port->common; @@ -1890,27 +1884,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) of_property_read_bool(port_np, "ti,mac-only"); /* get phy/link info */ - if (of_phy_is_fixed_link(port_np)) { - ret = of_phy_register_fixed_link(port_np); - if (ret) { - ret = dev_err_probe(dev, ret, - "failed to register fixed-link phy %pOF\n", - port_np); - goto of_node_put; - } - port->slave.phy_node = of_node_get(port_np); - } else { - port->slave.phy_node = - of_parse_phandle(port_np, "phy-handle", 0); - } - - if (!port->slave.phy_node) { - dev_err(dev, - "slave[%d] no phy found\n", port_id); - ret = -ENODEV; - goto of_node_put; - } - + port->slave.phy_node = port_np; ret = of_get_phy_mode(port_np, &port->slave.phy_if); if (ret) { dev_err(dev, "%pOF read phy-mode err %d\n", @@ -1952,12 +1926,25 @@ static void am65_cpsw_pcpu_stats_free(void *data) free_percpu(stats); } +static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common) +{ + struct am65_cpsw_port *port; + int i; + + for (i = 0; i < common->port_num; i++) { + port = &common->ports[i]; + if (port->slave.phylink) + phylink_destroy(port->slave.phylink); + } +} + static int am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) { struct am65_cpsw_ndev_priv *ndev_priv; struct device *dev = common->dev; struct am65_cpsw_port *port; + struct phylink *phylink; int ret; port = &common->ports[port_idx]; @@ -1995,6 +1982,20 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops; port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave; + /* Configuring Phylink */ + port->slave.phylink_config.dev = &port->ndev->dev; + port->slave.phylink_config.type = PHYLINK_NETDEV; + port->slave.phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD; + + phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces); + + phylink = phylink_create(&port->slave.phylink_config, dev->fwnode, port->slave.phy_if, + &am65_cpsw_phylink_mac_ops); + if (IS_ERR(phylink)) + return PTR_ERR(phylink); + + port->slave.phylink = phylink; + /* Disable TX checksum offload by default due to HW bug */ if (common->pdata.quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM) port->ndev->features &= ~NETIF_F_HW_CSUM; @@ -2761,15 +2762,17 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) ret = am65_cpsw_nuss_init_ndevs(common); if (ret) - goto err_of_clear; + goto err_free_phylink; ret = am65_cpsw_nuss_register_ndevs(common); if (ret) - goto err_of_clear; + goto err_free_phylink; pm_runtime_put(dev); return 0; +err_free_phylink: + am65_cpsw_nuss_phylink_cleanup(common); err_of_clear: of_platform_device_destroy(common->mdio_dev, NULL); err_pm_clear: @@ -2792,6 +2795,7 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev) return ret; } + am65_cpsw_nuss_phylink_cleanup(common); am65_cpsw_unregister_devlink(common); am65_cpsw_unregister_notifiers(common); diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h index 048ed10143c1..ac945631bf2f 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h @@ -10,7 +10,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> -#include <linux/phy.h> +#include <linux/phylink.h> #include <linux/platform_device.h> #include <linux/soc/ti/k3-ringacc.h> #include <net/devlink.h> @@ -30,13 +30,14 @@ struct am65_cpsw_slave_data { bool mac_only; struct cpsw_sl *mac_sl; struct device_node *phy_node; - struct phy_device *phy; phy_interface_t phy_if; struct phy *ifphy; bool rx_pause; bool tx_pause; u8 mac_addr[ETH_ALEN]; int port_vlan; + struct phylink *phylink; + struct phylink_config phylink_config; }; struct am65_cpsw_port { diff --git a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c index 599708a3e81d..d4c56da98a6a 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c +++ b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c @@ -237,15 +237,11 @@ static int am65_cpsw_port_vlans_add(struct am65_cpsw_port *port, { bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; struct net_device *orig_dev = vlan->obj.orig_dev; - bool cpu_port = netif_is_bridge_master(orig_dev); bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; netdev_dbg(port->ndev, "VID add: %s: vid:%u flags:%X\n", port->ndev->name, vlan->vid, vlan->flags); - if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY)) - return 0; - return am65_cpsw_port_vlan_add(port, untag, pvid, vlan->vid, orig_dev); } diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c index a7d97d429e06..ce85f7610273 100644 --- a/drivers/net/ethernet/ti/cpsw_switchdev.c +++ b/drivers/net/ethernet/ti/cpsw_switchdev.c @@ -252,15 +252,11 @@ static int cpsw_port_vlans_add(struct cpsw_priv *priv, { bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; struct net_device *orig_dev = vlan->obj.orig_dev; - bool cpu_port = netif_is_bridge_master(orig_dev); bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; dev_dbg(priv->dev, "VID add: %s: vid:%u flags:%X\n", priv->ndev->name, vlan->vid, vlan->flags); - if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY)) - return 0; - return cpsw_port_vlan_add(priv, untag, pvid, vlan->vid, orig_dev); } diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index dc70a6bfaa6a..92ca739fac01 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -568,7 +568,9 @@ int cpts_register(struct cpts *cpts) for (i = 0; i < CPTS_MAX_EVENTS; i++) list_add(&cpts->pool_data[i].list, &cpts->pool); - clk_enable(cpts->refclk); + err = clk_enable(cpts->refclk); + if (err) + return err; cpts_write32(cpts, CPTS_EN, control); cpts_write32(cpts, TS_PEND_EN, int_enable); diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 31df3267a01a..4b6aed78d392 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1604,6 +1604,7 @@ static int emac_dev_stop(struct net_device *ndev) int irq_num; struct emac_priv *priv = netdev_priv(ndev); struct device *emac_dev = &ndev->dev; + int ret = 0; /* inform the upper layers. */ netif_stop_queue(ndev); @@ -1618,17 +1619,31 @@ static int emac_dev_stop(struct net_device *ndev) phy_disconnect(ndev->phydev); /* Free IRQ */ - while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) { - for (irq_num = res->start; irq_num <= res->end; irq_num++) - free_irq(irq_num, priv->ndev); - i++; + if (dev_of_node(&priv->pdev->dev)) { + do { + ret = platform_get_irq_optional(priv->pdev, i); + if (ret < 0 && ret != -ENXIO) + break; + if (ret > 0) { + free_irq(ret, priv->ndev); + } else { + ret = 0; + break; + } + } while (++i); + } else { + while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) { + for (irq_num = res->start; irq_num <= res->end; irq_num++) + free_irq(irq_num, priv->ndev); + i++; + } } if (netif_msg_drv(priv)) dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name); pm_runtime_put(&priv->pdev->dev); - return 0; + return ret; } /** diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c index 89a31783fbb4..eb39a45de012 100644 --- a/drivers/net/ethernet/vertexcom/mse102x.c +++ b/drivers/net/ethernet/vertexcom/mse102x.c @@ -362,7 +362,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse) mse102x_dump_packet(__func__, skb->len, skb->data); skb->protocol = eth_type_trans(skb, mse->ndev); - netif_rx_ni(skb); + netif_rx(skb); mse->ndev->stats.rx_packets++; mse->ndev->stats.rx_bytes += rxlen; @@ -731,7 +731,7 @@ static int mse102x_probe_spi(struct spi_device *spi) return 0; } -static int mse102x_remove_spi(struct spi_device *spi) +static void mse102x_remove_spi(struct spi_device *spi) { struct mse102x_net *mse = dev_get_drvdata(&spi->dev); struct mse102x_net_spi *mses = to_mse102x_spi(mse); @@ -741,8 +741,6 @@ static int mse102x_remove_spi(struct spi_device *spi) mse102x_remove_device_debugfs(mses); unregister_netdev(mse->ndev); - - return 0; } static const struct of_device_id mse102x_match_table[] = { diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c index 7779a36da3c8..7c52796273a4 100644 --- a/drivers/net/ethernet/wiznet/w5100-spi.c +++ b/drivers/net/ethernet/wiznet/w5100-spi.c @@ -461,11 +461,9 @@ static int w5100_spi_probe(struct spi_device *spi) return w5100_probe(&spi->dev, ops, priv_size, mac, spi->irq, -EINVAL); } -static int w5100_spi_remove(struct spi_device *spi) +static void w5100_spi_remove(struct spi_device *spi) { w5100_remove(&spi->dev); - - return 0; } static const struct spi_device_id w5100_spi_ids[] = { diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index ae24d6b86803..4fd7c39e1123 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -883,7 +883,7 @@ static void w5100_rx_work(struct work_struct *work) struct sk_buff *skb; while ((skb = w5100_rx_skb(priv->ndev))) - netif_rx_ni(skb); + netif_rx(skb); w5100_enable_intr(priv); } diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig index 911b5ef9e680..0014729b8865 100644 --- a/drivers/net/ethernet/xilinx/Kconfig +++ b/drivers/net/ethernet/xilinx/Kconfig @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only # -# Xilink device configuration +# Xilinx device configuration # config NET_VENDOR_XILINX diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h index 4a73127e10a6..c6395c406418 100644 --- a/drivers/net/ethernet/xilinx/ll_temac.h +++ b/drivers/net/ethernet/xilinx/ll_temac.h @@ -271,7 +271,7 @@ This option defaults to enabled (set) */ #define XTE_TIE_OFFSET 0x000003A4 /* Interrupt enable */ -/** MII Mamagement Control register (MGTCR) */ +/* MII Management Control register (MGTCR) */ #define XTE_MGTDR_OFFSET 0x000003B0 /* MII data */ #define XTE_MIIMAI_OFFSET 0x000003B4 /* MII control */ @@ -283,7 +283,7 @@ This option defaults to enabled (set) */ #define STS_CTRL_APP0_ERR (1 << 31) #define STS_CTRL_APP0_IRQONEND (1 << 30) -/* undoccumented */ +/* undocumented */ #define STS_CTRL_APP0_STOPONEND (1 << 29) #define STS_CTRL_APP0_CMPLT (1 << 28) #define STS_CTRL_APP0_SOP (1 << 27) diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index b900ab5aef2a..869e362e09c1 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -361,8 +361,9 @@ static int temac_dma_bd_init(struct net_device *ndev) lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p + sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num)); - skb = netdev_alloc_skb_ip_align(ndev, - XTE_MAX_JUMBO_FRAME_SIZE); + skb = __netdev_alloc_skb_ip_align(ndev, + XTE_MAX_JUMBO_FRAME_SIZE, + GFP_KERNEL); if (!skb) goto out; @@ -1008,7 +1009,7 @@ static void ll_temac_recv(struct net_device *ndev) (skb->len > 64)) { /* Convert from device endianness (be32) to cpu - * endiannes, and if necessary swap the bytes + * endianness, and if necessary swap the bytes * (back) for proper IP checksum byte order * (be16). */ @@ -1433,6 +1434,8 @@ static int temac_probe(struct platform_device *pdev) lp->indirect_lock = devm_kmalloc(&pdev->dev, sizeof(*lp->indirect_lock), GFP_KERNEL); + if (!lp->indirect_lock) + return -ENOMEM; spin_lock_init(lp->indirect_lock); } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index 40108968b350..0f9c88dd1a4a 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -119,11 +119,11 @@ #define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */ #define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */ -/* Default TX/RX Threshold and waitbound values for SGDMA mode */ +/* Default TX/RX Threshold and delay timer values for SGDMA mode */ #define XAXIDMA_DFT_TX_THRESHOLD 24 -#define XAXIDMA_DFT_TX_WAITBOUND 254 -#define XAXIDMA_DFT_RX_THRESHOLD 24 -#define XAXIDMA_DFT_RX_WAITBOUND 254 +#define XAXIDMA_DFT_TX_USEC 50 +#define XAXIDMA_DFT_RX_THRESHOLD 1 +#define XAXIDMA_DFT_RX_USEC 50 #define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */ #define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */ @@ -385,6 +385,7 @@ struct axidma_bd { * @phy_node: Pointer to device node structure * @phylink: Pointer to phylink instance * @phylink_config: phylink configuration settings + * @napi: NAPI control structure * @pcs_phy: Reference to PCS/PMA PHY if used * @pcs: phylink pcs structure for PCS PHY * @switch_x_sgmii: Whether switchable 1000BaseX/SGMII mode is enabled in the core @@ -395,6 +396,7 @@ struct axidma_bd { * @regs_start: Resource start for axienet device addresses * @regs: Base address for the axienet_local device address space * @dma_regs: Base address for the axidma device address space + * @rx_dma_cr: Nominal content of RX DMA control register * @dma_err_task: Work structure to process Axi DMA errors * @tx_irq: Axidma TX IRQ number * @rx_irq: Axidma RX IRQ number @@ -423,7 +425,9 @@ struct axidma_bd { * @csum_offload_on_tx_path: Stores the checksum selection on TX side. * @csum_offload_on_rx_path: Stores the checksum selection on RX side. * @coalesce_count_rx: Store the irq coalesce on RX side. + * @coalesce_usec_rx: IRQ coalesce delay for RX * @coalesce_count_tx: Store the irq coalesce on TX side. + * @coalesce_usec_tx: IRQ coalesce delay for TX */ struct axienet_local { struct net_device *ndev; @@ -434,6 +438,8 @@ struct axienet_local { struct phylink *phylink; struct phylink_config phylink_config; + struct napi_struct napi; + struct mdio_device *pcs_phy; struct phylink_pcs pcs; @@ -449,6 +455,8 @@ struct axienet_local { void __iomem *regs; void __iomem *dma_regs; + u32 rx_dma_cr; + struct work_struct dma_err_task; int tx_irq; @@ -476,7 +484,9 @@ struct axienet_local { int csum_offload_on_rx_path; u32 coalesce_count_rx; + u32 coalesce_usec_rx; u32 coalesce_count_tx; + u32 coalesce_usec_tx; }; /** diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index de0a6372ae0e..c7eb05e4a6bf 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -7,7 +7,7 @@ * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> * Copyright (c) 2010 - 2011 PetaLogix - * Copyright (c) 2019 SED Systems, a division of Calian Ltd. + * Copyright (c) 2019 - 2022 Calian Advanced Technologies * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. * * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 @@ -33,7 +33,7 @@ #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/skbuff.h> -#include <linux/spinlock.h> +#include <linux/math64.h> #include <linux/phy.h> #include <linux/mii.h> #include <linux/ethtool.h> @@ -190,7 +190,7 @@ static void axienet_dma_bd_release(struct net_device *ndev) struct axienet_local *lp = netdev_priv(ndev); /* If we end up here, tx_bd_v must have been DMA allocated. */ - dma_free_coherent(ndev->dev.parent, + dma_free_coherent(lp->dev, sizeof(*lp->tx_bd_v) * lp->tx_bd_num, lp->tx_bd_v, lp->tx_bd_p); @@ -215,18 +215,90 @@ static void axienet_dma_bd_release(struct net_device *ndev) */ if (lp->rx_bd_v[i].cntrl) { phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]); - dma_unmap_single(ndev->dev.parent, phys, + dma_unmap_single(lp->dev, phys, lp->max_frm_size, DMA_FROM_DEVICE); } } - dma_free_coherent(ndev->dev.parent, + dma_free_coherent(lp->dev, sizeof(*lp->rx_bd_v) * lp->rx_bd_num, lp->rx_bd_v, lp->rx_bd_p); } /** + * axienet_usec_to_timer - Calculate IRQ delay timer value + * @lp: Pointer to the axienet_local structure + * @coalesce_usec: Microseconds to convert into timer value + */ +static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec) +{ + u32 result; + u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */ + + if (lp->axi_clk) + clk_rate = clk_get_rate(lp->axi_clk); + + /* 1 Timeout Interval = 125 * (clock period of SG clock) */ + result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate, + (u64)125000000); + if (result > 255) + result = 255; + + return result; +} + +/** + * axienet_dma_start - Set up DMA registers and start DMA operation + * @lp: Pointer to the axienet_local structure + */ +static void axienet_dma_start(struct axienet_local *lp) +{ + u32 tx_cr; + + /* Start updating the Rx channel control register */ + lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) | + XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; + /* Only set interrupt delay timer if not generating an interrupt on + * the first RX packet. Otherwise leave at 0 to disable delay interrupt. + */ + if (lp->coalesce_count_rx > 1) + lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx) + << XAXIDMA_DELAY_SHIFT) | + XAXIDMA_IRQ_DELAY_MASK; + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); + + /* Start updating the Tx channel control register */ + tx_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) | + XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; + /* Only set interrupt delay timer if not generating an interrupt on + * the first TX packet. Otherwise leave at 0 to disable delay interrupt. + */ + if (lp->coalesce_count_tx > 1) + tx_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx) + << XAXIDMA_DELAY_SHIFT) | + XAXIDMA_IRQ_DELAY_MASK; + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, tx_cr); + + /* Populate the tail pointer and bring the Rx Axi DMA engine out of + * halted state. This will make the Rx side ready for reception. + */ + axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); + lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); + axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + + (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); + + /* Write to the RS (Run-stop) bit in the Tx channel control register. + * Tx channel is now ready to run. But only after we write to the + * tail pointer register that the Tx channel will start transmitting. + */ + axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); + tx_cr |= XAXIDMA_CR_RUNSTOP_MASK; + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, tx_cr); +} + +/** * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA * @ndev: Pointer to the net_device structure * @@ -238,7 +310,6 @@ static void axienet_dma_bd_release(struct net_device *ndev) */ static int axienet_dma_bd_init(struct net_device *ndev) { - u32 cr; int i; struct sk_buff *skb; struct axienet_local *lp = netdev_priv(ndev); @@ -249,13 +320,13 @@ static int axienet_dma_bd_init(struct net_device *ndev) lp->rx_bd_ci = 0; /* Allocate the Tx and Rx buffer descriptors. */ - lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, + lp->tx_bd_v = dma_alloc_coherent(lp->dev, sizeof(*lp->tx_bd_v) * lp->tx_bd_num, &lp->tx_bd_p, GFP_KERNEL); if (!lp->tx_bd_v) return -ENOMEM; - lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, + lp->rx_bd_v = dma_alloc_coherent(lp->dev, sizeof(*lp->rx_bd_v) * lp->rx_bd_num, &lp->rx_bd_p, GFP_KERNEL); if (!lp->rx_bd_v) @@ -285,9 +356,9 @@ static int axienet_dma_bd_init(struct net_device *ndev) goto out; lp->rx_bd_v[i].skb = skb; - addr = dma_map_single(ndev->dev.parent, skb->data, + addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE); - if (dma_mapping_error(ndev->dev.parent, addr)) { + if (dma_mapping_error(lp->dev, addr)) { netdev_err(ndev, "DMA mapping error\n"); goto out; } @@ -296,50 +367,7 @@ static int axienet_dma_bd_init(struct net_device *ndev) lp->rx_bd_v[i].cntrl = lp->max_frm_size; } - /* Start updating the Rx channel control register */ - cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); - /* Update the interrupt coalesce count */ - cr = ((cr & ~XAXIDMA_COALESCE_MASK) | - ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT)); - /* Update the delay timer count */ - cr = ((cr & ~XAXIDMA_DELAY_MASK) | - (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); - /* Enable coalesce, delay timer and error interrupts */ - cr |= XAXIDMA_IRQ_ALL_MASK; - /* Write to the Rx channel control register */ - axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); - - /* Start updating the Tx channel control register */ - cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); - /* Update the interrupt coalesce count */ - cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | - ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT)); - /* Update the delay timer count */ - cr = (((cr & ~XAXIDMA_DELAY_MASK)) | - (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); - /* Enable coalesce, delay timer and error interrupts */ - cr |= XAXIDMA_IRQ_ALL_MASK; - /* Write to the Tx channel control register */ - axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); - - /* Populate the tail pointer and bring the Rx Axi DMA engine out of - * halted state. This will make the Rx side ready for reception. - */ - axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); - cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); - axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, - cr | XAXIDMA_CR_RUNSTOP_MASK); - axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + - (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); - - /* Write to the RS (Run-stop) bit in the Tx channel control register. - * Tx channel is now ready to run. But only after we write to the - * tail pointer register that the Tx channel will start transmitting. - */ - axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); - cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); - axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, - cr | XAXIDMA_CR_RUNSTOP_MASK); + axienet_dma_start(lp); return 0; out: @@ -531,13 +559,51 @@ static int __axienet_device_reset(struct axienet_local *lp) } /** + * axienet_dma_stop - Stop DMA operation + * @lp: Pointer to the axienet_local structure + */ +static void axienet_dma_stop(struct axienet_local *lp) +{ + int count; + u32 cr, sr; + + cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); + cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); + synchronize_irq(lp->rx_irq); + + cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); + cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); + synchronize_irq(lp->tx_irq); + + /* Give DMAs a chance to halt gracefully */ + sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); + for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { + msleep(20); + sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); + } + + sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); + for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { + msleep(20); + sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); + } + + /* Do a reset to ensure DMA is really stopped */ + axienet_lock_mii(lp); + __axienet_device_reset(lp); + axienet_unlock_mii(lp); +} + +/** * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. * @ndev: Pointer to the net_device structure * * This function is called to reset and initialize the Axi Ethernet core. This * is typically called during initialization. It does a reset of the Axi DMA * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines - * areconnected to Axi Ethernet reset lines, this in turn resets the Axi + * are connected to Axi Ethernet reset lines, this in turn resets the Axi * Ethernet core. No separate hardware reset is done for the Axi Ethernet * core. * Returns 0 on success or a negative error number otherwise. @@ -636,7 +702,7 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, /* Ensure we see complete descriptor update */ dma_rmb(); phys = desc_get_phys_addr(lp, cur_p); - dma_unmap_single(ndev->dev.parent, phys, + dma_unmap_single(lp->dev, phys, (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), DMA_TO_DEVICE); @@ -774,9 +840,9 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ } - phys = dma_map_single(ndev->dev.parent, skb->data, + phys = dma_map_single(lp->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) { + if (unlikely(dma_mapping_error(lp->dev, phys))) { if (net_ratelimit()) netdev_err(ndev, "TX DMA mapping error\n"); ndev->stats.tx_dropped++; @@ -790,11 +856,11 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) lp->tx_bd_tail = 0; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; frag = &skb_shinfo(skb)->frags[ii]; - phys = dma_map_single(ndev->dev.parent, + phys = dma_map_single(lp->dev, skb_frag_address(frag), skb_frag_size(frag), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) { + if (unlikely(dma_mapping_error(lp->dev, phys))) { if (net_ratelimit()) netdev_err(ndev, "TX DMA mapping error\n"); ndev->stats.tx_dropped++; @@ -833,79 +899,84 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) } /** - * axienet_recv - Is called from Axi DMA Rx Isr to complete the received - * BD processing. - * @ndev: Pointer to net_device structure. + * axienet_poll - Triggered by RX ISR to complete the received BD processing. + * @napi: Pointer to NAPI structure. + * @budget: Max number of packets to process. * - * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It - * does minimal processing and invokes "netif_rx" to complete further - * processing. + * Return: Number of RX packets processed. */ -static void axienet_recv(struct net_device *ndev) +static int axienet_poll(struct napi_struct *napi, int budget) { u32 length; u32 csumstatus; u32 size = 0; - u32 packets = 0; + int packets = 0; dma_addr_t tail_p = 0; - struct axienet_local *lp = netdev_priv(ndev); - struct sk_buff *skb, *new_skb; struct axidma_bd *cur_p; + struct sk_buff *skb, *new_skb; + struct axienet_local *lp = container_of(napi, struct axienet_local, napi); cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; - while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { + while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { dma_addr_t phys; - tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; - /* Ensure we see complete descriptor update */ dma_rmb(); - phys = desc_get_phys_addr(lp, cur_p); - dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size, - DMA_FROM_DEVICE); skb = cur_p->skb; cur_p->skb = NULL; - length = cur_p->app4 & 0x0000FFFF; - - skb_put(skb, length); - skb->protocol = eth_type_trans(skb, ndev); - /*skb_checksum_none_assert(skb);*/ - skb->ip_summed = CHECKSUM_NONE; - - /* if we're doing Rx csum offload, set it up */ - if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { - csumstatus = (cur_p->app2 & - XAE_FULL_CSUM_STATUS_MASK) >> 3; - if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) || - (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) { - skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* skb could be NULL if a previous pass already received the + * packet for this slot in the ring, but failed to refill it + * with a newly allocated buffer. In this case, don't try to + * receive it again. + */ + if (likely(skb)) { + length = cur_p->app4 & 0x0000FFFF; + + phys = desc_get_phys_addr(lp, cur_p); + dma_unmap_single(lp->dev, phys, lp->max_frm_size, + DMA_FROM_DEVICE); + + skb_put(skb, length); + skb->protocol = eth_type_trans(skb, lp->ndev); + /*skb_checksum_none_assert(skb);*/ + skb->ip_summed = CHECKSUM_NONE; + + /* if we're doing Rx csum offload, set it up */ + if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { + csumstatus = (cur_p->app2 & + XAE_FULL_CSUM_STATUS_MASK) >> 3; + if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED || + csumstatus == XAE_IP_UDP_CSUM_VALIDATED) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && + skb->protocol == htons(ETH_P_IP) && + skb->len > 64) { + skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); + skb->ip_summed = CHECKSUM_COMPLETE; } - } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && - skb->protocol == htons(ETH_P_IP) && - skb->len > 64) { - skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); - skb->ip_summed = CHECKSUM_COMPLETE; - } - netif_rx(skb); + napi_gro_receive(napi, skb); - size += length; - packets++; + size += length; + packets++; + } - new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); + new_skb = napi_alloc_skb(napi, lp->max_frm_size); if (!new_skb) - return; + break; - phys = dma_map_single(ndev->dev.parent, new_skb->data, + phys = dma_map_single(lp->dev, new_skb->data, lp->max_frm_size, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) { + if (unlikely(dma_mapping_error(lp->dev, phys))) { if (net_ratelimit()) - netdev_err(ndev, "RX DMA mapping error\n"); + netdev_err(lp->ndev, "RX DMA mapping error\n"); dev_kfree_skb(new_skb); - return; + break; } desc_set_phys_addr(lp, phys, cur_p); @@ -913,16 +984,30 @@ static void axienet_recv(struct net_device *ndev) cur_p->status = 0; cur_p->skb = new_skb; + /* Only update tail_p to mark this slot as usable after it has + * been successfully refilled. + */ + tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; + if (++lp->rx_bd_ci >= lp->rx_bd_num) lp->rx_bd_ci = 0; cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; } - ndev->stats.rx_packets += packets; - ndev->stats.rx_bytes += size; + lp->ndev->stats.rx_packets += packets; + lp->ndev->stats.rx_bytes += size; if (tail_p) axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); + + if (packets < budget && napi_complete_done(napi, packets)) { + /* Re-enable RX completion interrupts. This should + * cause an immediate interrupt if any RX packets are + * already pending. + */ + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); + } + return packets; } /** @@ -937,41 +1022,27 @@ static void axienet_recv(struct net_device *ndev) */ static irqreturn_t axienet_tx_irq(int irq, void *_ndev) { - u32 cr; unsigned int status; struct net_device *ndev = _ndev; struct axienet_local *lp = netdev_priv(ndev); status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); - if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { - axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); - axienet_start_xmit_done(lp->ndev); - goto out; - } + if (!(status & XAXIDMA_IRQ_ALL_MASK)) return IRQ_NONE; - if (status & XAXIDMA_IRQ_ERROR_MASK) { - dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status); - dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n", - (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, - (lp->tx_bd_v[lp->tx_bd_ci]).phys); - - cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); - /* Disable coalesce, delay timer and error interrupts */ - cr &= (~XAXIDMA_IRQ_ALL_MASK); - /* Write to the Tx channel control register */ - axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); - - cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); - /* Disable coalesce, delay timer and error interrupts */ - cr &= (~XAXIDMA_IRQ_ALL_MASK); - /* Write to the Rx channel control register */ - axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); + axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); + + if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { + netdev_err(ndev, "DMA Tx error 0x%x\n", status); + netdev_err(ndev, "Current BD is at: 0x%x%08x\n", + (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, + (lp->tx_bd_v[lp->tx_bd_ci]).phys); schedule_work(&lp->dma_err_task); - axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); + } else { + axienet_start_xmit_done(lp->ndev); } -out: + return IRQ_HANDLED; } @@ -982,46 +1053,40 @@ out: * * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise. * - * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD + * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD * processing. */ static irqreturn_t axienet_rx_irq(int irq, void *_ndev) { - u32 cr; unsigned int status; struct net_device *ndev = _ndev; struct axienet_local *lp = netdev_priv(ndev); status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); - if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { - axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); - axienet_recv(lp->ndev); - goto out; - } + if (!(status & XAXIDMA_IRQ_ALL_MASK)) return IRQ_NONE; - if (status & XAXIDMA_IRQ_ERROR_MASK) { - dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status); - dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n", - (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, - (lp->rx_bd_v[lp->rx_bd_ci]).phys); - - cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); - /* Disable coalesce, delay timer and error interrupts */ - cr &= (~XAXIDMA_IRQ_ALL_MASK); - /* Finally write to the Tx channel control register */ - axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); - - cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); - /* Disable coalesce, delay timer and error interrupts */ - cr &= (~XAXIDMA_IRQ_ALL_MASK); - /* write to the Rx channel control register */ - axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); + axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); + + if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { + netdev_err(ndev, "DMA Rx error 0x%x\n", status); + netdev_err(ndev, "Current BD is at: 0x%x%08x\n", + (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, + (lp->rx_bd_v[lp->rx_bd_ci]).phys); schedule_work(&lp->dma_err_task); - axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); + } else { + /* Disable further RX completion interrupts and schedule + * NAPI receive. + */ + u32 cr = lp->rx_dma_cr; + + cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); + + napi_schedule(&lp->napi); } -out: + return IRQ_HANDLED; } @@ -1095,6 +1160,8 @@ static int axienet_open(struct net_device *ndev) /* Enable worker thread for Axi DMA error handling */ INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); + napi_enable(&lp->napi); + /* Enable interrupts for Axi DMA Tx */ ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, ndev->name, ndev); @@ -1120,6 +1187,7 @@ err_eth_irq: err_rx_irq: free_irq(lp->tx_irq, ndev); err_tx_irq: + napi_disable(&lp->napi); phylink_stop(lp->phylink); phylink_disconnect_phy(lp->phylink); cancel_work_sync(&lp->dma_err_task); @@ -1139,46 +1207,22 @@ err_tx_irq: */ static int axienet_stop(struct net_device *ndev) { - u32 cr, sr; - int count; struct axienet_local *lp = netdev_priv(ndev); dev_dbg(&ndev->dev, "axienet_close()\n"); + napi_disable(&lp->napi); + phylink_stop(lp->phylink); phylink_disconnect_phy(lp->phylink); axienet_setoptions(ndev, lp->options & ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); - cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); - cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); - axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); - - cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); - cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); - axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); + axienet_dma_stop(lp); axienet_iow(lp, XAE_IE_OFFSET, 0); - /* Give DMAs a chance to halt gracefully */ - sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); - for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { - msleep(20); - sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); - } - - sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); - for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { - msleep(20); - sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); - } - - /* Do a reset to ensure DMA is really stopped */ - axienet_lock_mii(lp); - __axienet_device_reset(lp); - axienet_unlock_mii(lp); - cancel_work_sync(&lp->dma_err_task); if (lp->eth_irq > 0) @@ -1449,14 +1493,12 @@ axienet_ethtools_get_coalesce(struct net_device *ndev, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { - u32 regval = 0; struct axienet_local *lp = netdev_priv(ndev); - regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); - ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) - >> XAXIDMA_COALESCE_SHIFT; - regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); - ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) - >> XAXIDMA_COALESCE_SHIFT; + + ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx; + ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx; + ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx; + ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx; return 0; } @@ -1489,8 +1531,12 @@ axienet_ethtools_set_coalesce(struct net_device *ndev, if (ecoalesce->rx_max_coalesced_frames) lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; + if (ecoalesce->rx_coalesce_usecs) + lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs; if (ecoalesce->tx_max_coalesced_frames) lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; + if (ecoalesce->tx_coalesce_usecs) + lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs; return 0; } @@ -1521,7 +1567,8 @@ static int axienet_ethtools_nway_reset(struct net_device *dev) } static const struct ethtool_ops axienet_ethtool_ops = { - .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES, + .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USECS, .get_drvinfo = axienet_ethtools_get_drvinfo, .get_regs_len = axienet_ethtools_get_regs_len, .get_regs = axienet_ethtools_get_regs, @@ -1678,29 +1725,26 @@ static const struct phylink_mac_ops axienet_phylink_ops = { */ static void axienet_dma_err_handler(struct work_struct *work) { + u32 i; u32 axienet_status; - u32 cr, i; + struct axidma_bd *cur_p; struct axienet_local *lp = container_of(work, struct axienet_local, dma_err_task); struct net_device *ndev = lp->ndev; - struct axidma_bd *cur_p; + + napi_disable(&lp->napi); axienet_setoptions(ndev, lp->options & ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); - /* When we do an Axi Ethernet reset, it resets the complete core - * including the MDIO. MDIO must be disabled before resetting. - * Hold MDIO bus lock to avoid MDIO accesses during the reset. - */ - axienet_lock_mii(lp); - __axienet_device_reset(lp); - axienet_unlock_mii(lp); + + axienet_dma_stop(lp); for (i = 0; i < lp->tx_bd_num; i++) { cur_p = &lp->tx_bd_v[i]; if (cur_p->cntrl) { dma_addr_t addr = desc_get_phys_addr(lp, cur_p); - dma_unmap_single(ndev->dev.parent, addr, + dma_unmap_single(lp->dev, addr, (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), DMA_TO_DEVICE); @@ -1733,50 +1777,7 @@ static void axienet_dma_err_handler(struct work_struct *work) lp->tx_bd_tail = 0; lp->rx_bd_ci = 0; - /* Start updating the Rx channel control register */ - cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); - /* Update the interrupt coalesce count */ - cr = ((cr & ~XAXIDMA_COALESCE_MASK) | - (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); - /* Update the delay timer count */ - cr = ((cr & ~XAXIDMA_DELAY_MASK) | - (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); - /* Enable coalesce, delay timer and error interrupts */ - cr |= XAXIDMA_IRQ_ALL_MASK; - /* Finally write to the Rx channel control register */ - axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); - - /* Start updating the Tx channel control register */ - cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); - /* Update the interrupt coalesce count */ - cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | - (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); - /* Update the delay timer count */ - cr = (((cr & ~XAXIDMA_DELAY_MASK)) | - (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); - /* Enable coalesce, delay timer and error interrupts */ - cr |= XAXIDMA_IRQ_ALL_MASK; - /* Finally write to the Tx channel control register */ - axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); - - /* Populate the tail pointer and bring the Rx Axi DMA engine out of - * halted state. This will make the Rx side ready for reception. - */ - axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); - cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); - axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, - cr | XAXIDMA_CR_RUNSTOP_MASK); - axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + - (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); - - /* Write to the RS (Run-stop) bit in the Tx channel control register. - * Tx channel is now ready to run. But only after we write to the - * tail pointer register that the Tx channel will start transmitting - */ - axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); - cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); - axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, - cr | XAXIDMA_CR_RUNSTOP_MASK); + axienet_dma_start(lp); axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); axienet_status &= ~XAE_RCW1_RX_MASK; @@ -1797,6 +1798,7 @@ static void axienet_dma_err_handler(struct work_struct *work) axienet_set_mac_address(ndev, NULL); axienet_set_multicast_list(ndev); axienet_setoptions(ndev, lp->options); + napi_enable(&lp->napi); } /** @@ -1845,6 +1847,8 @@ static int axienet_probe(struct platform_device *pdev) lp->rx_bd_num = RX_BD_NUM_DEFAULT; lp->tx_bd_num = TX_BD_NUM_DEFAULT; + netif_napi_add(ndev, &lp->napi, axienet_poll, NAPI_POLL_WEIGHT); + lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk"); if (!lp->axi_clk) { /* For backward compatibility, if named AXI clock is not present, @@ -2051,7 +2055,9 @@ static int axienet_probe(struct platform_device *pdev) } lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; + lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC; lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; + lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC; /* Reset core now that clocks are enabled, prior to accessing MDIO */ ret = __axienet_device_reset(lp); diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 519599480b15..57a24f62e353 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -498,7 +498,7 @@ static void xemaclite_update_address(struct net_local *drvdata, * @dev: Pointer to the network device instance * @address: Void pointer to the sockaddr structure * - * This function copies the HW address from the sockaddr strucutre to the + * This function copies the HW address from the sockaddr structure to the * net_device structure and updates the address in HW. * * Return: Error if the net device is busy or 0 if the addr is set @@ -1183,7 +1183,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) if (rc) { dev_err(dev, "Cannot register network device, aborting\n"); - goto error; + goto put_node; } dev_info(dev, @@ -1191,6 +1191,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev) (unsigned long __force)ndev->mem_start, lp->base_addr, ndev->irq); return 0; +put_node: + of_node_put(lp->phy_node); error: free_netdev(ndev); return rc; diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index c1fdd721a730..a895ff756093 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -925,7 +925,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, } skb->protocol = eth_type_trans(skb, geneve->dev); - netif_rx(skb); + __netif_rx(skb); dst_release(&rt->dst); return -EMSGSIZE; } @@ -1021,7 +1021,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, } skb->protocol = eth_type_trans(skb, geneve->dev); - netif_rx(skb); + __netif_rx(skb); dst_release(dst); return -EMSGSIZE; } diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 24e5c54d06c1..bf087171bcf0 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -207,7 +207,7 @@ static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb, dev_sw_netstats_rx_add(pctx->dev, skb->len); - netif_rx(skb); + __netif_rx(skb); return 0; err: diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index b1fc153125d9..45c3c4a1101b 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -668,11 +668,11 @@ static void sixpack_close(struct tty_struct *tty) */ netif_stop_queue(sp->dev); + unregister_netdev(sp->dev); + del_timer_sync(&sp->tx_t); del_timer_sync(&sp->resync_t); - unregister_netdev(sp->dev); - /* Free all 6pack frame buffers after unreg. */ kfree(sp->rbuff); kfree(sp->xbuff); diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index 7e527499d3ad..a2a12208e3ad 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c @@ -28,6 +28,7 @@ #include <asm/io.h> #include <asm/irq.h> #include <linux/uaccess.h> +#include <linux/jiffies.h> #include <net/ax25.h> #include "z8530.h" @@ -377,7 +378,7 @@ static int __init dmascc_init(void) udelay(2000000 / TMR_0_HZ); /* Timing loop */ - while (jiffies - time < 13) { + while (time_is_after_jiffies(time + 13)) { for (i = 0; i < hw[h].num_devs; i++) if (base[i] && counting[i]) { /* Read back Timer 1: latch; read LSB; read MSB */ @@ -525,7 +526,7 @@ static int __init setup_adapter(int card_base, int type, int n) /* Wait and detect IRQ */ time = jiffies; - while (jiffies - time < 2 + HZ / TMR_0_HZ); + while (time_is_after_jiffies(time + 2 + HZ / TMR_0_HZ)); irq = probe_irq_off(irqs); /* Clear pending interrupt, disable interrupts */ @@ -1353,7 +1354,7 @@ static void es_isr(struct scc_priv *priv) /* Switch state */ write_scc(priv, R15, 0); if (priv->tx_count && - (jiffies - priv->tx_start) < priv->param.txtimeout) { + time_is_after_jiffies(priv->tx_start + priv->param.txtimeout)) { priv->state = TX_PAUSE; start_timer(priv, priv->param.txpause, 0); } else { diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index 7db9cbd0f5de..6afdf1622944 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -1304,7 +1304,7 @@ err_alloc_wq: return ret; } -static int adf7242_remove(struct spi_device *spi) +static void adf7242_remove(struct spi_device *spi) { struct adf7242_local *lp = spi_get_drvdata(spi); @@ -1316,8 +1316,6 @@ static int adf7242_remove(struct spi_device *spi) ieee802154_unregister_hw(lp->hw); mutex_destroy(&lp->bmux); ieee802154_free_hw(lp->hw); - - return 0; } static const struct of_device_id adf7242_of_match[] = { diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 4f5ef8a9a9a8..549d04b5f3d4 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -1768,7 +1768,7 @@ free_dev: return rc; } -static int at86rf230_remove(struct spi_device *spi) +static void at86rf230_remove(struct spi_device *spi) { struct at86rf230_local *lp = spi_get_drvdata(spi); @@ -1778,8 +1778,6 @@ static int at86rf230_remove(struct spi_device *spi) ieee802154_free_hw(lp->hw); at86rf230_debugfs_remove(); dev_dbg(&spi->dev, "unregistered at86rf230\n"); - - return 0; } static const struct of_device_id at86rf230_of_match[] = { diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index f3438d3e104a..187cbc634ce8 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -831,7 +831,7 @@ static void ca8210_rx_done(struct cas_control *cas_ctl) finish:; } -static int ca8210_remove(struct spi_device *spi_device); +static void ca8210_remove(struct spi_device *spi_device); /** * ca8210_spi_transfer_complete() - Called when a single spi transfer has @@ -2975,8 +2975,8 @@ static void ca8210_hw_setup(struct ieee802154_hw *ca8210_hw) ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND; ca8210_hw->phy->cca_ed_level = -9800; ca8210_hw->phy->symbol_duration = 16; - ca8210_hw->phy->lifs_period = 40; - ca8210_hw->phy->sifs_period = 12; + ca8210_hw->phy->lifs_period = 40 * ca8210_hw->phy->symbol_duration; + ca8210_hw->phy->sifs_period = 12 * ca8210_hw->phy->symbol_duration; ca8210_hw->flags = IEEE802154_HW_AFILT | IEEE802154_HW_OMIT_CKSUM | @@ -3049,7 +3049,7 @@ static void ca8210_test_interface_clear(struct ca8210_priv *priv) * * Return: 0 or linux error code */ -static int ca8210_remove(struct spi_device *spi_device) +static void ca8210_remove(struct spi_device *spi_device) { struct ca8210_priv *priv; struct ca8210_platform_data *pdata; @@ -3089,8 +3089,6 @@ static int ca8210_remove(struct spi_device *spi_device) if (IS_ENABLED(CONFIG_IEEE802154_CA8210_DEBUGFS)) ca8210_test_interface_clear(priv); } - - return 0; } /** diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c index 89c046b204e0..1e1f40f628a0 100644 --- a/drivers/net/ieee802154/cc2520.c +++ b/drivers/net/ieee802154/cc2520.c @@ -1213,7 +1213,7 @@ err_hw_init: return ret; } -static int cc2520_remove(struct spi_device *spi) +static void cc2520_remove(struct spi_device *spi) { struct cc2520_private *priv = spi_get_drvdata(spi); @@ -1222,8 +1222,6 @@ static int cc2520_remove(struct spi_device *spi) ieee802154_unregister_hw(priv->hw); ieee802154_free_hw(priv->hw); - - return 0; } static const struct spi_device_id cc2520_ids[] = { diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index 383231b85464..c927a5ae0d05 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c @@ -1335,7 +1335,7 @@ free_dev: return ret; } -static int mcr20a_remove(struct spi_device *spi) +static void mcr20a_remove(struct spi_device *spi) { struct mcr20a_local *lp = spi_get_drvdata(spi); @@ -1343,8 +1343,6 @@ static int mcr20a_remove(struct spi_device *spi) ieee802154_unregister_hw(lp->hw); ieee802154_free_hw(lp->hw); - - return 0; } static const struct of_device_id mcr20a_of_match[] = { diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c index ff83e00b77af..ee4cfbf2c5cc 100644 --- a/drivers/net/ieee802154/mrf24j40.c +++ b/drivers/net/ieee802154/mrf24j40.c @@ -1356,7 +1356,7 @@ err_ret: return ret; } -static int mrf24j40_remove(struct spi_device *spi) +static void mrf24j40_remove(struct spi_device *spi) { struct mrf24j40 *devrec = spi_get_drvdata(spi); @@ -1366,8 +1366,6 @@ static int mrf24j40_remove(struct spi_device *spi) ieee802154_free_hw(devrec->hw); /* TODO: Will ieee802154_free_device() wait until ->xmit() is * complete? */ - - return 0; } static const struct of_device_id mrf24j40_of_match[] = { diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig index d037682fb7ad..6782c2cbf542 100644 --- a/drivers/net/ipa/Kconfig +++ b/drivers/net/ipa/Kconfig @@ -2,7 +2,9 @@ config QCOM_IPA tristate "Qualcomm IPA support" depends on NET && QCOM_SMEM depends on ARCH_QCOM || COMPILE_TEST + depends on INTERCONNECT depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST) + depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n select QCOM_MDT_LOADER if ARCH_QCOM select QCOM_SCM select QCOM_QMI_HELPERS diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c index f2989aac47a6..16ece27d14d7 100644 --- a/drivers/net/ipa/ipa_power.c +++ b/drivers/net/ipa/ipa_power.c @@ -35,18 +35,6 @@ #define IPA_AUTOSUSPEND_DELAY 500 /* milliseconds */ /** - * struct ipa_interconnect - IPA interconnect information - * @path: Interconnect path - * @average_bandwidth: Average interconnect bandwidth (KB/second) - * @peak_bandwidth: Peak interconnect bandwidth (KB/second) - */ -struct ipa_interconnect { - struct icc_path *path; - u32 average_bandwidth; - u32 peak_bandwidth; -}; - -/** * enum ipa_power_flag - IPA power flags * @IPA_POWER_FLAG_RESUMED: Whether resume from suspend has been signaled * @IPA_POWER_FLAG_SYSTEM: Hardware is system (not runtime) suspended @@ -79,164 +67,78 @@ struct ipa_power { spinlock_t spinlock; /* used with STOPPED/STARTED power flags */ DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT); u32 interconnect_count; - struct ipa_interconnect *interconnect; + struct icc_bulk_data interconnect[]; }; -static int ipa_interconnect_init_one(struct device *dev, - struct ipa_interconnect *interconnect, - const struct ipa_interconnect_data *data) -{ - struct icc_path *path; - - path = of_icc_get(dev, data->name); - if (IS_ERR(path)) { - int ret = PTR_ERR(path); - - dev_err_probe(dev, ret, "error getting %s interconnect\n", - data->name); - - return ret; - } - - interconnect->path = path; - interconnect->average_bandwidth = data->average_bandwidth; - interconnect->peak_bandwidth = data->peak_bandwidth; - - return 0; -} - -static void ipa_interconnect_exit_one(struct ipa_interconnect *interconnect) -{ - icc_put(interconnect->path); - memset(interconnect, 0, sizeof(*interconnect)); -} - /* Initialize interconnects required for IPA operation */ -static int ipa_interconnect_init(struct ipa_power *power, struct device *dev, +static int ipa_interconnect_init(struct ipa_power *power, const struct ipa_interconnect_data *data) { - struct ipa_interconnect *interconnect; - u32 count; - int ret; - - count = power->interconnect_count; - interconnect = kcalloc(count, sizeof(*interconnect), GFP_KERNEL); - if (!interconnect) - return -ENOMEM; - power->interconnect = interconnect; - - while (count--) { - ret = ipa_interconnect_init_one(dev, interconnect, data++); - if (ret) - goto out_unwind; - interconnect++; - } - - return 0; - -out_unwind: - while (interconnect-- > power->interconnect) - ipa_interconnect_exit_one(interconnect); - kfree(power->interconnect); - power->interconnect = NULL; - - return ret; -} - -/* Inverse of ipa_interconnect_init() */ -static void ipa_interconnect_exit(struct ipa_power *power) -{ - struct ipa_interconnect *interconnect; - - interconnect = power->interconnect + power->interconnect_count; - while (interconnect-- > power->interconnect) - ipa_interconnect_exit_one(interconnect); - kfree(power->interconnect); - power->interconnect = NULL; -} - -/* Currently we only use one bandwidth level, so just "enable" interconnects */ -static int ipa_interconnect_enable(struct ipa *ipa) -{ - struct ipa_interconnect *interconnect; - struct ipa_power *power = ipa->power; + struct icc_bulk_data *interconnect; int ret; u32 i; - interconnect = power->interconnect; + /* Initialize our interconnect data array for bulk operations */ + interconnect = &power->interconnect[0]; for (i = 0; i < power->interconnect_count; i++) { - ret = icc_set_bw(interconnect->path, - interconnect->average_bandwidth, - interconnect->peak_bandwidth); - if (ret) { - dev_err(&ipa->pdev->dev, - "error %d enabling %s interconnect\n", - ret, icc_get_name(interconnect->path)); - goto out_unwind; - } + /* interconnect->path is filled in by of_icc_bulk_get() */ + interconnect->name = data->name; + interconnect->avg_bw = data->average_bandwidth; + interconnect->peak_bw = data->peak_bandwidth; + data++; interconnect++; } - return 0; + ret = of_icc_bulk_get(power->dev, power->interconnect_count, + power->interconnect); + if (ret) + return ret; -out_unwind: - while (interconnect-- > power->interconnect) - (void)icc_set_bw(interconnect->path, 0, 0); + /* All interconnects are initially disabled */ + icc_bulk_disable(power->interconnect_count, power->interconnect); + + /* Set the bandwidth values to be used when enabled */ + ret = icc_bulk_set_bw(power->interconnect_count, power->interconnect); + if (ret) + icc_bulk_put(power->interconnect_count, power->interconnect); return ret; } -/* To disable an interconnect, we just its bandwidth to 0 */ -static int ipa_interconnect_disable(struct ipa *ipa) +/* Inverse of ipa_interconnect_init() */ +static void ipa_interconnect_exit(struct ipa_power *power) { - struct ipa_interconnect *interconnect; - struct ipa_power *power = ipa->power; - struct device *dev = &ipa->pdev->dev; - int result = 0; - u32 count; - int ret; - - count = power->interconnect_count; - interconnect = power->interconnect + count; - while (count--) { - interconnect--; - ret = icc_set_bw(interconnect->path, 0, 0); - if (ret) { - dev_err(dev, "error %d disabling %s interconnect\n", - ret, icc_get_name(interconnect->path)); - /* Try to disable all; record only the first error */ - if (!result) - result = ret; - } - } - - return result; + icc_bulk_put(power->interconnect_count, power->interconnect); } /* Enable IPA power, enabling interconnects and the core clock */ static int ipa_power_enable(struct ipa *ipa) { + struct ipa_power *power = ipa->power; int ret; - ret = ipa_interconnect_enable(ipa); + ret = icc_bulk_enable(power->interconnect_count, power->interconnect); if (ret) return ret; - ret = clk_prepare_enable(ipa->power->core); + ret = clk_prepare_enable(power->core); if (ret) { - dev_err(&ipa->pdev->dev, "error %d enabling core clock\n", ret); - (void)ipa_interconnect_disable(ipa); + dev_err(power->dev, "error %d enabling core clock\n", ret); + icc_bulk_disable(power->interconnect_count, + power->interconnect); } return ret; } /* Inverse of ipa_power_enable() */ -static int ipa_power_disable(struct ipa *ipa) +static void ipa_power_disable(struct ipa *ipa) { - clk_disable_unprepare(ipa->power->core); + struct ipa_power *power = ipa->power; - return ipa_interconnect_disable(ipa); + clk_disable_unprepare(power->core); + + icc_bulk_disable(power->interconnect_count, power->interconnect); } static int ipa_runtime_suspend(struct device *dev) @@ -250,7 +152,9 @@ static int ipa_runtime_suspend(struct device *dev) gsi_suspend(&ipa->gsi); } - return ipa_power_disable(ipa); + ipa_power_disable(ipa); + + return 0; } static int ipa_runtime_resume(struct device *dev) @@ -453,6 +357,7 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data) { struct ipa_power *power; struct clk *clk; + size_t size; int ret; clk = clk_get(dev, "core"); @@ -469,7 +374,8 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data) goto err_clk_put; } - power = kzalloc(sizeof(*power), GFP_KERNEL); + size = data->interconnect_count * sizeof(power->interconnect[0]); + power = kzalloc(sizeof(*power) + size, GFP_KERNEL); if (!power) { ret = -ENOMEM; goto err_clk_put; @@ -479,7 +385,7 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data) spin_lock_init(&power->spinlock); power->interconnect_count = data->interconnect_count; - ret = ipa_interconnect_init(power, dev, data->interconnect_data); + ret = ipa_interconnect_init(power, data->interconnect_data); if (ret) goto err_kfree; diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index ed0edf5884ef..720394c0639b 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -74,11 +74,11 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb, skb_tx_timestamp(skb); /* do not fool net_timestamp_check() with various clock bases */ - skb->tstamp = 0; + skb_clear_tstamp(skb); skb_orphan(skb); - /* Before queueing this packet to netif_rx(), + /* Before queueing this packet to __netif_rx(), * make sure dst is refcounted. */ skb_dst_force(skb); @@ -86,7 +86,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb, skb->protocol = eth_type_trans(skb, dev); len = skb->len; - if (likely(netif_rx(skb) == NET_RX_SUCCESS)) + if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) dev_lstats_add(dev, len); return NETDEV_TX_OK; diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 3d0874331763..832f09ac075e 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -1033,7 +1033,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb) else nskb->pkt_type = PACKET_MULTICAST; - netif_rx(nskb); + __netif_rx(nskb); } continue; } @@ -1056,7 +1056,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb) nskb->dev = ndev; - if (netif_rx(nskb) == NET_RX_SUCCESS) { + if (__netif_rx(nskb) == NET_RX_SUCCESS) { u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsUntagged++; u64_stats_update_end(&secy_stats->syncp); @@ -1288,7 +1288,7 @@ nosci: macsec_reset_skb(nskb, macsec->secy.netdev); - ret = netif_rx(nskb); + ret = __netif_rx(nskb); if (ret == NET_RX_SUCCESS) { u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsUnknownSCI++; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 6ef5f77be4d0..33753a2fde29 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -285,7 +285,7 @@ static void macvlan_broadcast(struct sk_buff *skb, if (likely(nskb)) err = macvlan_broadcast_one(nskb, vlan, eth, mode == MACVLAN_MODE_BRIDGE) ?: - netif_rx_ni(nskb); + netif_rx(nskb); macvlan_count_rx(vlan, skb->len + ETH_HLEN, err == NET_RX_SUCCESS, true); } @@ -410,7 +410,7 @@ static void macvlan_forward_source_one(struct sk_buff *skb, if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, dev->dev_addr)) nskb->pkt_type = PACKET_HOST; - ret = netif_rx(nskb); + ret = __netif_rx(nskb); macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false); } @@ -468,7 +468,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) /* forward to original port. */ vlan = src; ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?: - netif_rx(skb); + __netif_rx(skb); handle_res = RX_HANDLER_CONSUMED; goto out; } diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 6b12902a803f..cecf8c63096c 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -133,11 +133,17 @@ static void macvtap_setup(struct net_device *dev) dev->tx_queue_len = TUN_READQ_SIZE; } +static struct net *macvtap_link_net(const struct net_device *dev) +{ + return dev_net(macvlan_dev_real_dev(dev)); +} + static struct rtnl_link_ops macvtap_link_ops __read_mostly = { .kind = "macvtap", .setup = macvtap_setup, .newlink = macvtap_newlink, .dellink = macvtap_dellink, + .get_link_net = macvtap_link_net, .priv_size = sizeof(struct macvtap_dev), }; diff --git a/drivers/net/mctp/Kconfig b/drivers/net/mctp/Kconfig index 2929471395ae..dc71657d9184 100644 --- a/drivers/net/mctp/Kconfig +++ b/drivers/net/mctp/Kconfig @@ -21,6 +21,18 @@ config MCTP_SERIAL Say y here if you need to connect to MCTP endpoints over serial. To compile as a module, use m; the module will be called mctp-serial. +config MCTP_TRANSPORT_I2C + tristate "MCTP SMBus/I2C transport" + # i2c-mux is optional, but we must build as a module if i2c-mux is a module + depends on I2C_MUX || !I2C_MUX + depends on I2C + depends on I2C_SLAVE + select MCTP_FLOWS + help + Provides a driver to access MCTP devices over SMBus/I2C transport, + from DMTF specification DSP0237. A MCTP protocol network device is + created for each I2C bus that has been assigned a mctp-i2c device. + endmenu endif diff --git a/drivers/net/mctp/Makefile b/drivers/net/mctp/Makefile index d32622613ce4..1ca3e6028f77 100644 --- a/drivers/net/mctp/Makefile +++ b/drivers/net/mctp/Makefile @@ -1 +1,2 @@ obj-$(CONFIG_MCTP_SERIAL) += mctp-serial.o +obj-$(CONFIG_MCTP_TRANSPORT_I2C) += mctp-i2c.o diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c new file mode 100644 index 000000000000..baf7afac7857 --- /dev/null +++ b/drivers/net/mctp/mctp-i2c.c @@ -0,0 +1,1082 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Management Controller Transport Protocol (MCTP) + * Implements DMTF specification + * "DSP0237 Management Component Transport Protocol (MCTP) SMBus/I2C + * Transport Binding" + * https://www.dmtf.org/sites/default/files/standards/documents/DSP0237_1.2.0.pdf + * + * A netdev is created for each I2C bus that handles MCTP. In the case of an I2C + * mux topology a single I2C client is attached to the root of the mux topology, + * shared between all mux I2C busses underneath. For non-mux cases an I2C client + * is attached per netdev. + * + * mctp-i2c-controller.yml devicetree binding has further details. + * + * Copyright (c) 2022 Code Construct + * Copyright (c) 2022 Google + */ + +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/i2c.h> +#include <linux/i2c-mux.h> +#include <linux/if_arp.h> +#include <net/mctp.h> +#include <net/mctpdevice.h> + +/* byte_count is limited to u8 */ +#define MCTP_I2C_MAXBLOCK 255 +/* One byte is taken by source_slave */ +#define MCTP_I2C_MAXMTU (MCTP_I2C_MAXBLOCK - 1) +#define MCTP_I2C_MINMTU (64 + 4) +/* Allow space for dest_address, command, byte_count, data, PEC */ +#define MCTP_I2C_BUFSZ (3 + MCTP_I2C_MAXBLOCK + 1) +#define MCTP_I2C_MINLEN 8 +#define MCTP_I2C_COMMANDCODE 0x0f +#define MCTP_I2C_TX_WORK_LEN 100 +/* Sufficient for 64kB at min mtu */ +#define MCTP_I2C_TX_QUEUE_LEN 1100 + +#define MCTP_I2C_OF_PROP "mctp-controller" + +enum { + MCTP_I2C_FLOW_STATE_NEW = 0, + MCTP_I2C_FLOW_STATE_ACTIVE, +}; + +/* List of all struct mctp_i2c_client + * Lock protects driver_clients and also prevents adding/removing adapters + * during mctp_i2c_client probe/remove. + */ +static DEFINE_MUTEX(driver_clients_lock); +static LIST_HEAD(driver_clients); + +struct mctp_i2c_client; + +/* The netdev structure. One of these per I2C adapter. */ +struct mctp_i2c_dev { + struct net_device *ndev; + struct i2c_adapter *adapter; + struct mctp_i2c_client *client; + struct list_head list; /* For mctp_i2c_client.devs */ + + size_t rx_pos; + u8 rx_buffer[MCTP_I2C_BUFSZ]; + struct completion rx_done; + + struct task_struct *tx_thread; + wait_queue_head_t tx_wq; + struct sk_buff_head tx_queue; + u8 tx_scratch[MCTP_I2C_BUFSZ]; + + /* A fake entry in our tx queue to perform an unlock operation */ + struct sk_buff unlock_marker; + + /* Spinlock protects i2c_lock_count, release_count, allow_rx */ + spinlock_t lock; + int i2c_lock_count; + int release_count; + /* Indicates that the netif is ready to receive incoming packets */ + bool allow_rx; + +}; + +/* The i2c client structure. One per hardware i2c bus at the top of the + * mux tree, shared by multiple netdevs + */ +struct mctp_i2c_client { + struct i2c_client *client; + u8 lladdr; + + struct mctp_i2c_dev *sel; + struct list_head devs; + spinlock_t sel_lock; /* Protects sel and devs */ + + struct list_head list; /* For driver_clients */ +}; + +/* Header on the wire. */ +struct mctp_i2c_hdr { + u8 dest_slave; + u8 command; + /* Count of bytes following byte_count, excluding PEC */ + u8 byte_count; + u8 source_slave; +}; + +static int mctp_i2c_recv(struct mctp_i2c_dev *midev); +static int mctp_i2c_slave_cb(struct i2c_client *client, + enum i2c_slave_event event, u8 *val); +static void mctp_i2c_ndo_uninit(struct net_device *dev); +static int mctp_i2c_ndo_open(struct net_device *dev); + +static struct i2c_adapter *mux_root_adapter(struct i2c_adapter *adap) +{ +#if IS_ENABLED(CONFIG_I2C_MUX) + return i2c_root_adapter(&adap->dev); +#else + /* In non-mux config all i2c adapters are root adapters */ + return adap; +#endif +} + +/* Creates a new i2c slave device attached to the root adapter. + * Sets up the slave callback. + * Must be called with a client on a root adapter. + */ +static struct mctp_i2c_client *mctp_i2c_new_client(struct i2c_client *client) +{ + struct mctp_i2c_client *mcli = NULL; + struct i2c_adapter *root = NULL; + int rc; + + if (client->flags & I2C_CLIENT_TEN) { + dev_err(&client->dev, "failed, MCTP requires a 7-bit I2C address, addr=0x%x\n", + client->addr); + rc = -EINVAL; + goto err; + } + + root = mux_root_adapter(client->adapter); + if (!root) { + dev_err(&client->dev, "failed to find root adapter\n"); + rc = -ENOENT; + goto err; + } + if (root != client->adapter) { + dev_err(&client->dev, + "A mctp-i2c-controller client cannot be placed on an I2C mux adapter.\n" + " It should be placed on the mux tree root adapter\n" + " then set mctp-controller property on adapters to attach\n"); + rc = -EINVAL; + goto err; + } + + mcli = kzalloc(sizeof(*mcli), GFP_KERNEL); + if (!mcli) { + rc = -ENOMEM; + goto err; + } + spin_lock_init(&mcli->sel_lock); + INIT_LIST_HEAD(&mcli->devs); + INIT_LIST_HEAD(&mcli->list); + mcli->lladdr = client->addr & 0xff; + mcli->client = client; + i2c_set_clientdata(client, mcli); + + rc = i2c_slave_register(mcli->client, mctp_i2c_slave_cb); + if (rc < 0) { + dev_err(&client->dev, "i2c register failed %d\n", rc); + mcli->client = NULL; + i2c_set_clientdata(client, NULL); + goto err; + } + + return mcli; +err: + if (mcli) { + if (mcli->client) + i2c_unregister_device(mcli->client); + kfree(mcli); + } + return ERR_PTR(rc); +} + +static void mctp_i2c_free_client(struct mctp_i2c_client *mcli) +{ + int rc; + + WARN_ON(!mutex_is_locked(&driver_clients_lock)); + WARN_ON(!list_empty(&mcli->devs)); + WARN_ON(mcli->sel); /* sanity check, no locking */ + + rc = i2c_slave_unregister(mcli->client); + /* Leak if it fails, we can't propagate errors upwards */ + if (rc < 0) + dev_err(&mcli->client->dev, "i2c unregister failed %d\n", rc); + else + kfree(mcli); +} + +/* Switch the mctp i2c device to receive responses. + * Call with sel_lock held + */ +static void __mctp_i2c_device_select(struct mctp_i2c_client *mcli, + struct mctp_i2c_dev *midev) +{ + assert_spin_locked(&mcli->sel_lock); + if (midev) + dev_hold(midev->ndev); + if (mcli->sel) + dev_put(mcli->sel->ndev); + mcli->sel = midev; +} + +/* Switch the mctp i2c device to receive responses */ +static void mctp_i2c_device_select(struct mctp_i2c_client *mcli, + struct mctp_i2c_dev *midev) +{ + unsigned long flags; + + spin_lock_irqsave(&mcli->sel_lock, flags); + __mctp_i2c_device_select(mcli, midev); + spin_unlock_irqrestore(&mcli->sel_lock, flags); +} + +static int mctp_i2c_slave_cb(struct i2c_client *client, + enum i2c_slave_event event, u8 *val) +{ + struct mctp_i2c_client *mcli = i2c_get_clientdata(client); + struct mctp_i2c_dev *midev = NULL; + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&mcli->sel_lock, flags); + midev = mcli->sel; + if (midev) + dev_hold(midev->ndev); + spin_unlock_irqrestore(&mcli->sel_lock, flags); + + if (!midev) + return 0; + + switch (event) { + case I2C_SLAVE_WRITE_RECEIVED: + if (midev->rx_pos < MCTP_I2C_BUFSZ) { + midev->rx_buffer[midev->rx_pos] = *val; + midev->rx_pos++; + } else { + midev->ndev->stats.rx_over_errors++; + } + + break; + case I2C_SLAVE_WRITE_REQUESTED: + /* dest_slave as first byte */ + midev->rx_buffer[0] = mcli->lladdr << 1; + midev->rx_pos = 1; + break; + case I2C_SLAVE_STOP: + rc = mctp_i2c_recv(midev); + break; + default: + break; + } + + dev_put(midev->ndev); + return rc; +} + +/* Processes incoming data that has been accumulated by the slave cb */ +static int mctp_i2c_recv(struct mctp_i2c_dev *midev) +{ + struct net_device *ndev = midev->ndev; + struct mctp_i2c_hdr *hdr; + struct mctp_skb_cb *cb; + struct sk_buff *skb; + unsigned long flags; + u8 pec, calc_pec; + size_t recvlen; + int status; + + /* + 1 for the PEC */ + if (midev->rx_pos < MCTP_I2C_MINLEN + 1) { + ndev->stats.rx_length_errors++; + return -EINVAL; + } + /* recvlen excludes PEC */ + recvlen = midev->rx_pos - 1; + + hdr = (void *)midev->rx_buffer; + if (hdr->command != MCTP_I2C_COMMANDCODE) { + ndev->stats.rx_dropped++; + return -EINVAL; + } + + if (hdr->byte_count + offsetof(struct mctp_i2c_hdr, source_slave) != recvlen) { + ndev->stats.rx_length_errors++; + return -EINVAL; + } + + pec = midev->rx_buffer[midev->rx_pos - 1]; + calc_pec = i2c_smbus_pec(0, midev->rx_buffer, recvlen); + if (pec != calc_pec) { + ndev->stats.rx_crc_errors++; + return -EINVAL; + } + + skb = netdev_alloc_skb(ndev, recvlen); + if (!skb) { + ndev->stats.rx_dropped++; + return -ENOMEM; + } + + skb->protocol = htons(ETH_P_MCTP); + skb_put_data(skb, midev->rx_buffer, recvlen); + skb_reset_mac_header(skb); + skb_pull(skb, sizeof(struct mctp_i2c_hdr)); + skb_reset_network_header(skb); + + cb = __mctp_cb(skb); + cb->halen = 1; + cb->haddr[0] = hdr->source_slave >> 1; + + /* We need to ensure that the netif is not used once netdev + * unregister occurs + */ + spin_lock_irqsave(&midev->lock, flags); + if (midev->allow_rx) { + reinit_completion(&midev->rx_done); + spin_unlock_irqrestore(&midev->lock, flags); + + status = netif_rx(skb); + complete(&midev->rx_done); + } else { + status = NET_RX_DROP; + spin_unlock_irqrestore(&midev->lock, flags); + } + + if (status == NET_RX_SUCCESS) { + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += recvlen; + } else { + ndev->stats.rx_dropped++; + } + return 0; +} + +enum mctp_i2c_flow_state { + MCTP_I2C_TX_FLOW_INVALID, + MCTP_I2C_TX_FLOW_NONE, + MCTP_I2C_TX_FLOW_NEW, + MCTP_I2C_TX_FLOW_EXISTING, +}; + +static enum mctp_i2c_flow_state +mctp_i2c_get_tx_flow_state(struct mctp_i2c_dev *midev, struct sk_buff *skb) +{ + enum mctp_i2c_flow_state state; + struct mctp_sk_key *key; + struct mctp_flow *flow; + unsigned long flags; + + flow = skb_ext_find(skb, SKB_EXT_MCTP); + if (!flow) + return MCTP_I2C_TX_FLOW_NONE; + + key = flow->key; + if (!key) + return MCTP_I2C_TX_FLOW_NONE; + + spin_lock_irqsave(&key->lock, flags); + /* If the key is present but invalid, we're unlikely to be able + * to handle the flow at all; just drop now + */ + if (!key->valid) { + state = MCTP_I2C_TX_FLOW_INVALID; + + } else if (key->dev_flow_state == MCTP_I2C_FLOW_STATE_NEW) { + key->dev_flow_state = MCTP_I2C_FLOW_STATE_ACTIVE; + state = MCTP_I2C_TX_FLOW_NEW; + } else { + state = MCTP_I2C_TX_FLOW_EXISTING; + } + + spin_unlock_irqrestore(&key->lock, flags); + + return state; +} + +/* We're not contending with ourselves here; we only need to exclude other + * i2c clients from using the bus. refcounts are simply to prevent + * recursive locking. + */ +static void mctp_i2c_lock_nest(struct mctp_i2c_dev *midev) +{ + unsigned long flags; + bool lock; + + spin_lock_irqsave(&midev->lock, flags); + lock = midev->i2c_lock_count == 0; + midev->i2c_lock_count++; + spin_unlock_irqrestore(&midev->lock, flags); + + if (lock) + i2c_lock_bus(midev->adapter, I2C_LOCK_SEGMENT); +} + +static void mctp_i2c_unlock_nest(struct mctp_i2c_dev *midev) +{ + unsigned long flags; + bool unlock; + + spin_lock_irqsave(&midev->lock, flags); + if (!WARN_ONCE(midev->i2c_lock_count == 0, "lock count underflow!")) + midev->i2c_lock_count--; + unlock = midev->i2c_lock_count == 0; + spin_unlock_irqrestore(&midev->lock, flags); + + if (unlock) + i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT); +} + +/* Unlocks the bus if was previously locked, used for cleanup */ +static void mctp_i2c_unlock_reset(struct mctp_i2c_dev *midev) +{ + unsigned long flags; + bool unlock; + + spin_lock_irqsave(&midev->lock, flags); + unlock = midev->i2c_lock_count > 0; + midev->i2c_lock_count = 0; + spin_unlock_irqrestore(&midev->lock, flags); + + if (unlock) + i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT); +} + +static void mctp_i2c_xmit(struct mctp_i2c_dev *midev, struct sk_buff *skb) +{ + struct net_device_stats *stats = &midev->ndev->stats; + enum mctp_i2c_flow_state fs; + struct mctp_i2c_hdr *hdr; + struct i2c_msg msg = {0}; + u8 *pecp; + int rc; + + fs = mctp_i2c_get_tx_flow_state(midev, skb); + + hdr = (void *)skb_mac_header(skb); + /* Sanity check that packet contents matches skb length, + * and can't exceed MCTP_I2C_BUFSZ + */ + if (skb->len != hdr->byte_count + 3) { + dev_warn_ratelimited(&midev->adapter->dev, + "Bad tx length %d vs skb %u\n", + hdr->byte_count + 3, skb->len); + return; + } + + if (skb_tailroom(skb) >= 1) { + /* Linear case with space, we can just append the PEC */ + skb_put(skb, 1); + } else { + /* Otherwise need to copy the buffer */ + skb_copy_bits(skb, 0, midev->tx_scratch, skb->len); + hdr = (void *)midev->tx_scratch; + } + + pecp = (void *)&hdr->source_slave + hdr->byte_count; + *pecp = i2c_smbus_pec(0, (u8 *)hdr, hdr->byte_count + 3); + msg.buf = (void *)&hdr->command; + /* command, bytecount, data, pec */ + msg.len = 2 + hdr->byte_count + 1; + msg.addr = hdr->dest_slave >> 1; + + switch (fs) { + case MCTP_I2C_TX_FLOW_NONE: + /* no flow: full lock & unlock */ + mctp_i2c_lock_nest(midev); + mctp_i2c_device_select(midev->client, midev); + rc = __i2c_transfer(midev->adapter, &msg, 1); + mctp_i2c_unlock_nest(midev); + break; + + case MCTP_I2C_TX_FLOW_NEW: + /* new flow: lock, tx, but don't unlock; that will happen + * on flow release + */ + mctp_i2c_lock_nest(midev); + mctp_i2c_device_select(midev->client, midev); + fallthrough; + + case MCTP_I2C_TX_FLOW_EXISTING: + /* existing flow: we already have the lock; just tx */ + rc = __i2c_transfer(midev->adapter, &msg, 1); + break; + + case MCTP_I2C_TX_FLOW_INVALID: + return; + } + + if (rc < 0) { + dev_warn_ratelimited(&midev->adapter->dev, + "__i2c_transfer failed %d\n", rc); + stats->tx_errors++; + } else { + stats->tx_bytes += skb->len; + stats->tx_packets++; + } +} + +static void mctp_i2c_flow_release(struct mctp_i2c_dev *midev) +{ + unsigned long flags; + bool unlock; + + spin_lock_irqsave(&midev->lock, flags); + if (midev->release_count > midev->i2c_lock_count) { + WARN_ONCE(1, "release count overflow"); + midev->release_count = midev->i2c_lock_count; + } + + midev->i2c_lock_count -= midev->release_count; + unlock = midev->i2c_lock_count == 0 && midev->release_count > 0; + midev->release_count = 0; + spin_unlock_irqrestore(&midev->lock, flags); + + if (unlock) + i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT); +} + +static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev, + unsigned short type, const void *daddr, + const void *saddr, unsigned int len) +{ + struct mctp_i2c_hdr *hdr; + struct mctp_hdr *mhdr; + u8 lldst, llsrc; + + if (len > MCTP_I2C_MAXMTU) + return -EMSGSIZE; + + lldst = *((u8 *)daddr); + llsrc = *((u8 *)saddr); + + skb_push(skb, sizeof(struct mctp_i2c_hdr)); + skb_reset_mac_header(skb); + hdr = (void *)skb_mac_header(skb); + mhdr = mctp_hdr(skb); + hdr->dest_slave = (lldst << 1) & 0xff; + hdr->command = MCTP_I2C_COMMANDCODE; + hdr->byte_count = len + 1; + hdr->source_slave = ((llsrc << 1) & 0xff) | 0x01; + mhdr->ver = 0x01; + + return 0; +} + +static int mctp_i2c_tx_thread(void *data) +{ + struct mctp_i2c_dev *midev = data; + struct sk_buff *skb; + unsigned long flags; + + for (;;) { + if (kthread_should_stop()) + break; + + spin_lock_irqsave(&midev->tx_queue.lock, flags); + skb = __skb_dequeue(&midev->tx_queue); + if (netif_queue_stopped(midev->ndev)) + netif_wake_queue(midev->ndev); + spin_unlock_irqrestore(&midev->tx_queue.lock, flags); + + if (skb == &midev->unlock_marker) { + mctp_i2c_flow_release(midev); + + } else if (skb) { + mctp_i2c_xmit(midev, skb); + kfree_skb(skb); + + } else { + wait_event_idle(midev->tx_wq, + !skb_queue_empty(&midev->tx_queue) || + kthread_should_stop()); + } + } + + return 0; +} + +static netdev_tx_t mctp_i2c_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct mctp_i2c_dev *midev = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&midev->tx_queue.lock, flags); + if (skb_queue_len(&midev->tx_queue) >= MCTP_I2C_TX_WORK_LEN) { + netif_stop_queue(dev); + spin_unlock_irqrestore(&midev->tx_queue.lock, flags); + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); + return NETDEV_TX_BUSY; + } + + __skb_queue_tail(&midev->tx_queue, skb); + if (skb_queue_len(&midev->tx_queue) == MCTP_I2C_TX_WORK_LEN) + netif_stop_queue(dev); + spin_unlock_irqrestore(&midev->tx_queue.lock, flags); + + wake_up(&midev->tx_wq); + return NETDEV_TX_OK; +} + +static void mctp_i2c_release_flow(struct mctp_dev *mdev, + struct mctp_sk_key *key) + +{ + struct mctp_i2c_dev *midev = netdev_priv(mdev->dev); + unsigned long flags; + + spin_lock_irqsave(&midev->lock, flags); + midev->release_count++; + spin_unlock_irqrestore(&midev->lock, flags); + + /* Ensure we have a release operation queued, through the fake + * marker skb + */ + spin_lock(&midev->tx_queue.lock); + if (!midev->unlock_marker.next) + __skb_queue_tail(&midev->tx_queue, &midev->unlock_marker); + spin_unlock(&midev->tx_queue.lock); + + wake_up(&midev->tx_wq); +} + +static const struct net_device_ops mctp_i2c_ops = { + .ndo_start_xmit = mctp_i2c_start_xmit, + .ndo_uninit = mctp_i2c_ndo_uninit, + .ndo_open = mctp_i2c_ndo_open, +}; + +static const struct header_ops mctp_i2c_headops = { + .create = mctp_i2c_header_create, +}; + +static const struct mctp_netdev_ops mctp_i2c_mctp_ops = { + .release_flow = mctp_i2c_release_flow, +}; + +static void mctp_i2c_net_setup(struct net_device *dev) +{ + dev->type = ARPHRD_MCTP; + + dev->mtu = MCTP_I2C_MAXMTU; + dev->min_mtu = MCTP_I2C_MINMTU; + dev->max_mtu = MCTP_I2C_MAXMTU; + dev->tx_queue_len = MCTP_I2C_TX_QUEUE_LEN; + + dev->hard_header_len = sizeof(struct mctp_i2c_hdr); + dev->addr_len = 1; + + dev->netdev_ops = &mctp_i2c_ops; + dev->header_ops = &mctp_i2c_headops; +} + +/* Populates the mctp_i2c_dev priv struct for a netdev. + * Returns an error pointer on failure. + */ +static struct mctp_i2c_dev *mctp_i2c_midev_init(struct net_device *dev, + struct mctp_i2c_client *mcli, + struct i2c_adapter *adap) +{ + struct mctp_i2c_dev *midev = netdev_priv(dev); + unsigned long flags; + + midev->tx_thread = kthread_create(mctp_i2c_tx_thread, midev, + "%s/tx", dev->name); + if (IS_ERR(midev->tx_thread)) + return ERR_CAST(midev->tx_thread); + + midev->ndev = dev; + get_device(&adap->dev); + midev->adapter = adap; + get_device(&mcli->client->dev); + midev->client = mcli; + INIT_LIST_HEAD(&midev->list); + spin_lock_init(&midev->lock); + midev->i2c_lock_count = 0; + midev->release_count = 0; + init_completion(&midev->rx_done); + complete(&midev->rx_done); + init_waitqueue_head(&midev->tx_wq); + skb_queue_head_init(&midev->tx_queue); + + /* Add to the parent mcli */ + spin_lock_irqsave(&mcli->sel_lock, flags); + list_add(&midev->list, &mcli->devs); + /* Select a device by default */ + if (!mcli->sel) + __mctp_i2c_device_select(mcli, midev); + spin_unlock_irqrestore(&mcli->sel_lock, flags); + + /* Start the worker thread */ + wake_up_process(midev->tx_thread); + + return midev; +} + +/* Counterpart of mctp_i2c_midev_init */ +static void mctp_i2c_midev_free(struct mctp_i2c_dev *midev) +{ + struct mctp_i2c_client *mcli = midev->client; + unsigned long flags; + + if (midev->tx_thread) { + kthread_stop(midev->tx_thread); + midev->tx_thread = NULL; + } + + /* Unconditionally unlock on close */ + mctp_i2c_unlock_reset(midev); + + /* Remove the netdev from the parent i2c client. */ + spin_lock_irqsave(&mcli->sel_lock, flags); + list_del(&midev->list); + if (mcli->sel == midev) { + struct mctp_i2c_dev *first; + + first = list_first_entry_or_null(&mcli->devs, struct mctp_i2c_dev, list); + __mctp_i2c_device_select(mcli, first); + } + spin_unlock_irqrestore(&mcli->sel_lock, flags); + + skb_queue_purge(&midev->tx_queue); + put_device(&midev->adapter->dev); + put_device(&mcli->client->dev); +} + +/* Stops, unregisters, and frees midev */ +static void mctp_i2c_unregister(struct mctp_i2c_dev *midev) +{ + unsigned long flags; + + /* Stop tx thread prior to unregister, it uses netif_() functions */ + kthread_stop(midev->tx_thread); + midev->tx_thread = NULL; + + /* Prevent any new rx in mctp_i2c_recv(), let any pending work finish */ + spin_lock_irqsave(&midev->lock, flags); + midev->allow_rx = false; + spin_unlock_irqrestore(&midev->lock, flags); + wait_for_completion(&midev->rx_done); + + mctp_unregister_netdev(midev->ndev); + /* midev has been freed now by mctp_i2c_ndo_uninit callback */ + + free_netdev(midev->ndev); +} + +static void mctp_i2c_ndo_uninit(struct net_device *dev) +{ + struct mctp_i2c_dev *midev = netdev_priv(dev); + + /* Perform cleanup here to ensure that mcli->sel isn't holding + * a reference that would prevent unregister_netdevice() + * from completing. + */ + mctp_i2c_midev_free(midev); +} + +static int mctp_i2c_ndo_open(struct net_device *dev) +{ + struct mctp_i2c_dev *midev = netdev_priv(dev); + unsigned long flags; + + /* i2c rx handler can only pass packets once the netdev is registered */ + spin_lock_irqsave(&midev->lock, flags); + midev->allow_rx = true; + spin_unlock_irqrestore(&midev->lock, flags); + + return 0; +} + +static int mctp_i2c_add_netdev(struct mctp_i2c_client *mcli, + struct i2c_adapter *adap) +{ + struct mctp_i2c_dev *midev = NULL; + struct net_device *ndev = NULL; + struct i2c_adapter *root; + unsigned long flags; + char namebuf[30]; + int rc; + + root = mux_root_adapter(adap); + if (root != mcli->client->adapter) { + dev_err(&mcli->client->dev, + "I2C adapter %s is not a child bus of %s\n", + mcli->client->adapter->name, root->name); + return -EINVAL; + } + + WARN_ON(!mutex_is_locked(&driver_clients_lock)); + snprintf(namebuf, sizeof(namebuf), "mctpi2c%d", adap->nr); + ndev = alloc_netdev(sizeof(*midev), namebuf, NET_NAME_ENUM, mctp_i2c_net_setup); + if (!ndev) { + dev_err(&mcli->client->dev, "alloc netdev failed\n"); + rc = -ENOMEM; + goto err; + } + dev_net_set(ndev, current->nsproxy->net_ns); + SET_NETDEV_DEV(ndev, &adap->dev); + dev_addr_set(ndev, &mcli->lladdr); + + midev = mctp_i2c_midev_init(ndev, mcli, adap); + if (IS_ERR(midev)) { + rc = PTR_ERR(midev); + midev = NULL; + goto err; + } + + rc = mctp_register_netdev(ndev, &mctp_i2c_mctp_ops); + if (rc < 0) { + dev_err(&mcli->client->dev, + "register netdev \"%s\" failed %d\n", + ndev->name, rc); + goto err; + } + + spin_lock_irqsave(&midev->lock, flags); + midev->allow_rx = false; + spin_unlock_irqrestore(&midev->lock, flags); + + return 0; +err: + if (midev) + mctp_i2c_midev_free(midev); + if (ndev) + free_netdev(ndev); + return rc; +} + +/* Removes any netdev for adap. mcli is the parent root i2c client */ +static void mctp_i2c_remove_netdev(struct mctp_i2c_client *mcli, + struct i2c_adapter *adap) +{ + struct mctp_i2c_dev *midev = NULL, *m = NULL; + unsigned long flags; + + WARN_ON(!mutex_is_locked(&driver_clients_lock)); + spin_lock_irqsave(&mcli->sel_lock, flags); + /* List size is limited by number of MCTP netdevs on a single hardware bus */ + list_for_each_entry(m, &mcli->devs, list) + if (m->adapter == adap) { + midev = m; + break; + } + spin_unlock_irqrestore(&mcli->sel_lock, flags); + + if (midev) + mctp_i2c_unregister(midev); +} + +/* Determines whether a device is an i2c adapter. + * Optionally returns the root i2c_adapter + */ +static struct i2c_adapter *mctp_i2c_get_adapter(struct device *dev, + struct i2c_adapter **ret_root) +{ + struct i2c_adapter *root, *adap; + + if (dev->type != &i2c_adapter_type) + return NULL; + adap = to_i2c_adapter(dev); + root = mux_root_adapter(adap); + WARN_ONCE(!root, "MCTP I2C failed to find root adapter for %s\n", + dev_name(dev)); + if (!root) + return NULL; + if (ret_root) + *ret_root = root; + return adap; +} + +/* Determines whether a device is an i2c adapter with the "mctp-controller" + * devicetree property set. If adap is not an OF node, returns match_no_of + */ +static bool mctp_i2c_adapter_match(struct i2c_adapter *adap, bool match_no_of) +{ + if (!adap->dev.of_node) + return match_no_of; + return of_property_read_bool(adap->dev.of_node, MCTP_I2C_OF_PROP); +} + +/* Called for each existing i2c device (adapter or client) when a + * new mctp-i2c client is probed. + */ +static int mctp_i2c_client_try_attach(struct device *dev, void *data) +{ + struct i2c_adapter *adap = NULL, *root = NULL; + struct mctp_i2c_client *mcli = data; + + adap = mctp_i2c_get_adapter(dev, &root); + if (!adap) + return 0; + if (mcli->client->adapter != root) + return 0; + /* Must either have mctp-controller property on the adapter, or + * be a root adapter if it's non-devicetree + */ + if (!mctp_i2c_adapter_match(adap, adap == root)) + return 0; + + return mctp_i2c_add_netdev(mcli, adap); +} + +static void mctp_i2c_notify_add(struct device *dev) +{ + struct mctp_i2c_client *mcli = NULL, *m = NULL; + struct i2c_adapter *root = NULL, *adap = NULL; + int rc; + + adap = mctp_i2c_get_adapter(dev, &root); + if (!adap) + return; + /* Check for mctp-controller property on the adapter */ + if (!mctp_i2c_adapter_match(adap, false)) + return; + + /* Find an existing mcli for adap's root */ + mutex_lock(&driver_clients_lock); + list_for_each_entry(m, &driver_clients, list) { + if (m->client->adapter == root) { + mcli = m; + break; + } + } + + if (mcli) { + rc = mctp_i2c_add_netdev(mcli, adap); + if (rc < 0) + dev_warn(dev, "Failed adding mctp-i2c net device\n"); + } + mutex_unlock(&driver_clients_lock); +} + +static void mctp_i2c_notify_del(struct device *dev) +{ + struct i2c_adapter *root = NULL, *adap = NULL; + struct mctp_i2c_client *mcli = NULL; + + adap = mctp_i2c_get_adapter(dev, &root); + if (!adap) + return; + + mutex_lock(&driver_clients_lock); + list_for_each_entry(mcli, &driver_clients, list) { + if (mcli->client->adapter == root) { + mctp_i2c_remove_netdev(mcli, adap); + break; + } + } + mutex_unlock(&driver_clients_lock); +} + +static int mctp_i2c_probe(struct i2c_client *client) +{ + struct mctp_i2c_client *mcli = NULL; + int rc; + + mutex_lock(&driver_clients_lock); + mcli = mctp_i2c_new_client(client); + if (IS_ERR(mcli)) { + rc = PTR_ERR(mcli); + mcli = NULL; + goto out; + } else { + list_add(&mcli->list, &driver_clients); + } + + /* Add a netdev for adapters that have a 'mctp-controller' property */ + i2c_for_each_dev(mcli, mctp_i2c_client_try_attach); + rc = 0; +out: + mutex_unlock(&driver_clients_lock); + return rc; +} + +static int mctp_i2c_remove(struct i2c_client *client) +{ + struct mctp_i2c_client *mcli = i2c_get_clientdata(client); + struct mctp_i2c_dev *midev = NULL, *tmp = NULL; + + mutex_lock(&driver_clients_lock); + list_del(&mcli->list); + /* Remove all child adapter netdevs */ + list_for_each_entry_safe(midev, tmp, &mcli->devs, list) + mctp_i2c_unregister(midev); + + mctp_i2c_free_client(mcli); + mutex_unlock(&driver_clients_lock); + /* Callers ignore return code */ + return 0; +} + +/* We look for a 'mctp-controller' property on I2C busses as they are + * added/deleted, creating/removing netdevs as required. + */ +static int mctp_i2c_notifier_call(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + + switch (action) { + case BUS_NOTIFY_ADD_DEVICE: + mctp_i2c_notify_add(dev); + break; + case BUS_NOTIFY_DEL_DEVICE: + mctp_i2c_notify_del(dev); + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block mctp_i2c_notifier = { + .notifier_call = mctp_i2c_notifier_call, +}; + +static const struct i2c_device_id mctp_i2c_id[] = { + { "mctp-i2c-interface", 0 }, + {}, +}; +MODULE_DEVICE_TABLE(i2c, mctp_i2c_id); + +static const struct of_device_id mctp_i2c_of_match[] = { + { .compatible = "mctp-i2c-controller" }, + {}, +}; +MODULE_DEVICE_TABLE(of, mctp_i2c_of_match); + +static struct i2c_driver mctp_i2c_driver = { + .driver = { + .name = "mctp-i2c-interface", + .of_match_table = mctp_i2c_of_match, + }, + .probe_new = mctp_i2c_probe, + .remove = mctp_i2c_remove, + .id_table = mctp_i2c_id, +}; + +static __init int mctp_i2c_mod_init(void) +{ + int rc; + + pr_info("MCTP I2C interface driver\n"); + rc = i2c_add_driver(&mctp_i2c_driver); + if (rc < 0) + return rc; + rc = bus_register_notifier(&i2c_bus_type, &mctp_i2c_notifier); + if (rc < 0) { + i2c_del_driver(&mctp_i2c_driver); + return rc; + } + return 0; +} + +static __exit void mctp_i2c_mod_exit(void) +{ + int rc; + + rc = bus_unregister_notifier(&i2c_bus_type, &mctp_i2c_notifier); + if (rc < 0) + pr_warn("MCTP I2C could not unregister notifier, %d\n", rc); + i2c_del_driver(&mctp_i2c_driver); +} + +module_init(mctp_i2c_mod_init); +module_exit(mctp_i2c_mod_exit); + +MODULE_DESCRIPTION("MCTP I2C device"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Matt Johnston <matt@codeconstruct.com.au>"); diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c index eaa6fb3224bc..7cd103fd34ef 100644 --- a/drivers/net/mctp/mctp-serial.c +++ b/drivers/net/mctp/mctp-serial.c @@ -286,7 +286,7 @@ static void mctp_serial_rx(struct mctp_serial *dev) cb = __mctp_cb(skb); cb->halen = 0; - netif_rx_ni(skb); + netif_rx(skb); dev->netdev->stats.rx_packets++; dev->netdev->stats.rx_bytes += dev->rxlen; } @@ -403,8 +403,16 @@ static void mctp_serial_tty_receive_buf(struct tty_struct *tty, mctp_serial_push(dev, c[i]); } +static void mctp_serial_uninit(struct net_device *ndev) +{ + struct mctp_serial *dev = netdev_priv(ndev); + + cancel_work_sync(&dev->tx_work); +} + static const struct net_device_ops mctp_serial_netdev_ops = { .ndo_start_xmit = mctp_serial_tx, + .ndo_uninit = mctp_serial_uninit, }; static void mctp_serial_setup(struct net_device *ndev) @@ -483,7 +491,6 @@ static void mctp_serial_close(struct tty_struct *tty) int idx = dev->idx; unregister_netdev(dev->netdev); - cancel_work_sync(&dev->tx_work); ida_free(&mctp_serial_ida, idx); } diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c index 5f4cd24a0241..4eba5a91075c 100644 --- a/drivers/net/mdio/mdio-ipq4019.c +++ b/drivers/net/mdio/mdio-ipq4019.c @@ -200,7 +200,11 @@ static int ipq_mdio_reset(struct mii_bus *bus) if (ret) return ret; - return clk_prepare_enable(priv->mdio_clk); + ret = clk_prepare_enable(priv->mdio_clk); + if (ret == 0) + mdelay(10); + + return ret; } static int ipq4019_mdio_probe(struct platform_device *pdev) diff --git a/drivers/net/mdio/mdio-mux.c b/drivers/net/mdio/mdio-mux.c index ebd001f0eece..a881e3523328 100644 --- a/drivers/net/mdio/mdio-mux.c +++ b/drivers/net/mdio/mdio-mux.c @@ -168,8 +168,8 @@ int mdio_mux_init(struct device *dev, cb->mii_bus->priv = cb; cb->mii_bus->name = "mdio_mux"; - snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%x.%x", - pb->parent_id, v); + snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x.%x", + cb->mii_bus->name, pb->parent_id, v); cb->mii_bus->parent = dev; cb->mii_bus->read = mdio_mux_read; cb->mii_bus->write = mdio_mux_write; diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c index aaa628f859fd..0b1b6f650104 100644 --- a/drivers/net/mhi_net.c +++ b/drivers/net/mhi_net.c @@ -225,7 +225,7 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev, u64_stats_inc(&mhi_netdev->stats.rx_packets); u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len); u64_stats_update_end(&mhi_netdev->stats.rx_syncp); - netif_rx(skb); + __netif_rx(skb); } /* Refill if RX buffers queue becomes low */ diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index 4300261e2f9e..378ee779061c 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -623,14 +623,14 @@ static int nsim_fib6_rt_append(struct nsim_fib_data *data, if (err) goto err_fib6_rt_nh_del; - fib6_event->rt_arr[i]->trap = true; + WRITE_ONCE(fib6_event->rt_arr[i]->trap, true); } return 0; err_fib6_rt_nh_del: for (i--; i >= 0; i--) { - fib6_event->rt_arr[i]->trap = false; + WRITE_ONCE(fib6_event->rt_arr[i]->trap, false); nsim_fib6_rt_nh_del(fib6_rt, fib6_event->rt_arr[i]); } return err; diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index 98ca6b18415e..80bdc07f2cd3 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -119,7 +119,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, skb->protocol = eth_type_trans(skb, ndev); skb->ip_summed = CHECKSUM_NONE; - if (netif_rx(skb) == NET_RX_DROP) { + if (__netif_rx(skb) == NET_RX_DROP) { ndev->stats.rx_errors++; ndev->stats.rx_dropped++; } else { diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index c2d1a85ec559..ef8b14135133 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -886,7 +886,7 @@ out: spin_unlock_irqrestore(&dp83640->rx_lock, flags); if (shhwtstamps) - netif_rx_ni(skb); + netif_rx(skb); } static void decode_txts(struct dp83640_private *dp83640, @@ -970,17 +970,6 @@ static void decode_status_frame(struct dp83640_private *dp83640, } } -static int is_sync(struct sk_buff *skb, int type) -{ - struct ptp_header *hdr; - - hdr = ptp_parse_header(skb, type); - if (!hdr) - return 0; - - return ptp_get_msgtype(hdr, type) == PTP_MSGTYPE_SYNC; -} - static void dp83640_free_clocks(void) { struct dp83640_clock *clock; @@ -1329,7 +1318,7 @@ static void rx_timestamp_work(struct work_struct *work) break; } - netif_rx_ni(skb); + netif_rx(skb); } if (!skb_queue_empty(&dp83640->rx_queue)) @@ -1380,7 +1369,7 @@ static bool dp83640_rxtstamp(struct mii_timestamper *mii_ts, skb_queue_tail(&dp83640->rx_queue, skb); schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT); } else { - netif_rx_ni(skb); + netif_rx(skb); } return true; @@ -1396,7 +1385,7 @@ static void dp83640_txtstamp(struct mii_timestamper *mii_ts, switch (dp83640->hwts_tx_en) { case HWTSTAMP_TX_ONESTEP_SYNC: - if (is_sync(skb, type)) { + if (ptp_msg_is_sync(skb, type)) { kfree_skb(skb); return; } diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index 211b5476a6f5..ce17b2af3218 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -274,7 +274,7 @@ static int dp83822_config_intr(struct phy_device *phydev) if (err < 0) return err; - err = phy_write(phydev, MII_DP83822_MISR1, 0); + err = phy_write(phydev, MII_DP83822_MISR2, 0); if (err < 0) return err; diff --git a/drivers/net/phy/mediatek-ge.c b/drivers/net/phy/mediatek-ge.c index b7a5ae20edd5..68ee434f9dea 100644 --- a/drivers/net/phy/mediatek-ge.c +++ b/drivers/net/phy/mediatek-ge.c @@ -55,9 +55,6 @@ static int mt7530_phy_config_init(struct phy_device *phydev) static int mt7531_phy_config_init(struct phy_device *phydev) { - if (phydev->interface != PHY_INTERFACE_MODE_INTERNAL) - return -EINVAL; - mtk_gephy_config_init(phydev); /* PHY link down power saving enable */ diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c index 7e7904fee1d9..73f7962a37d3 100644 --- a/drivers/net/phy/meson-gxl.c +++ b/drivers/net/phy/meson-gxl.c @@ -30,8 +30,12 @@ #define INTSRC_LINK_DOWN BIT(4) #define INTSRC_REMOTE_FAULT BIT(5) #define INTSRC_ANEG_COMPLETE BIT(6) +#define INTSRC_ENERGY_DETECT BIT(7) #define INTSRC_MASK 30 +#define INT_SOURCES (INTSRC_LINK_DOWN | INTSRC_ANEG_COMPLETE | \ + INTSRC_ENERGY_DETECT) + #define BANK_ANALOG_DSP 0 #define BANK_WOL 1 #define BANK_BIST 3 @@ -200,7 +204,6 @@ static int meson_gxl_ack_interrupt(struct phy_device *phydev) static int meson_gxl_config_intr(struct phy_device *phydev) { - u16 val; int ret; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { @@ -209,16 +212,9 @@ static int meson_gxl_config_intr(struct phy_device *phydev) if (ret) return ret; - val = INTSRC_ANEG_PR - | INTSRC_PARALLEL_FAULT - | INTSRC_ANEG_LP_ACK - | INTSRC_LINK_DOWN - | INTSRC_REMOTE_FAULT - | INTSRC_ANEG_COMPLETE; - ret = phy_write(phydev, INTSRC_MASK, val); + ret = phy_write(phydev, INTSRC_MASK, INT_SOURCES); } else { - val = 0; - ret = phy_write(phydev, INTSRC_MASK, val); + ret = phy_write(phydev, INTSRC_MASK, 0); /* Ack any pending IRQ */ ret = meson_gxl_ack_interrupt(phydev); @@ -237,10 +233,23 @@ static irqreturn_t meson_gxl_handle_interrupt(struct phy_device *phydev) return IRQ_NONE; } + irq_status &= INT_SOURCES; + if (irq_status == 0) return IRQ_NONE; - phy_trigger_machine(phydev); + /* Aneg-complete interrupt is used for link-up detection */ + if (phydev->autoneg == AUTONEG_ENABLE && + irq_status == INTSRC_ENERGY_DETECT) + return IRQ_HANDLED; + + /* Give PHY some time before MAC starts sending data. This works + * around an issue where network doesn't come up properly. + */ + if (!(irq_status & INTSRC_LINK_DOWN)) + phy_queue_state_machine(phydev, msecs_to_jiffies(100)); + else + phy_trigger_machine(phydev); return IRQ_HANDLED; } diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index a7ebcdab415b..19b11e896460 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -28,6 +28,10 @@ #include <linux/of.h> #include <linux/clk.h> #include <linux/delay.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/ptp_clock.h> +#include <linux/ptp_classify.h> +#include <linux/net_tstamp.h> /* Operation Mode Strap Override */ #define MII_KSZPHY_OMSO 0x16 @@ -79,6 +83,119 @@ #define LAN8814_INTR_CTRL_REG_POLARITY BIT(1) #define LAN8814_INTR_CTRL_REG_INTR_ENABLE BIT(0) +/* Represents 1ppm adjustment in 2^32 format with + * each nsec contains 4 clock cycles. + * The value is calculated as following: (1/1000000)/((2^-32)/4) + */ +#define LAN8814_1PPM_FORMAT 17179 + +#define PTP_RX_MOD 0x024F +#define PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_ BIT(3) +#define PTP_RX_TIMESTAMP_EN 0x024D +#define PTP_TX_TIMESTAMP_EN 0x028D + +#define PTP_TIMESTAMP_EN_SYNC_ BIT(0) +#define PTP_TIMESTAMP_EN_DREQ_ BIT(1) +#define PTP_TIMESTAMP_EN_PDREQ_ BIT(2) +#define PTP_TIMESTAMP_EN_PDRES_ BIT(3) + +#define PTP_RX_LATENCY_1000 0x0224 +#define PTP_TX_LATENCY_1000 0x0225 + +#define PTP_RX_LATENCY_100 0x0222 +#define PTP_TX_LATENCY_100 0x0223 + +#define PTP_RX_LATENCY_10 0x0220 +#define PTP_TX_LATENCY_10 0x0221 + +#define PTP_TX_PARSE_L2_ADDR_EN 0x0284 +#define PTP_RX_PARSE_L2_ADDR_EN 0x0244 + +#define PTP_TX_PARSE_IP_ADDR_EN 0x0285 +#define PTP_RX_PARSE_IP_ADDR_EN 0x0245 +#define LTC_HARD_RESET 0x023F +#define LTC_HARD_RESET_ BIT(0) + +#define TSU_HARD_RESET 0x02C1 +#define TSU_HARD_RESET_ BIT(0) + +#define PTP_CMD_CTL 0x0200 +#define PTP_CMD_CTL_PTP_DISABLE_ BIT(0) +#define PTP_CMD_CTL_PTP_ENABLE_ BIT(1) +#define PTP_CMD_CTL_PTP_CLOCK_READ_ BIT(3) +#define PTP_CMD_CTL_PTP_CLOCK_LOAD_ BIT(4) +#define PTP_CMD_CTL_PTP_LTC_STEP_SEC_ BIT(5) +#define PTP_CMD_CTL_PTP_LTC_STEP_NSEC_ BIT(6) + +#define PTP_CLOCK_SET_SEC_MID 0x0206 +#define PTP_CLOCK_SET_SEC_LO 0x0207 +#define PTP_CLOCK_SET_NS_HI 0x0208 +#define PTP_CLOCK_SET_NS_LO 0x0209 + +#define PTP_CLOCK_READ_SEC_MID 0x022A +#define PTP_CLOCK_READ_SEC_LO 0x022B +#define PTP_CLOCK_READ_NS_HI 0x022C +#define PTP_CLOCK_READ_NS_LO 0x022D + +#define PTP_OPERATING_MODE 0x0241 +#define PTP_OPERATING_MODE_STANDALONE_ BIT(0) + +#define PTP_TX_MOD 0x028F +#define PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_ BIT(12) +#define PTP_TX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_ BIT(3) + +#define PTP_RX_PARSE_CONFIG 0x0242 +#define PTP_RX_PARSE_CONFIG_LAYER2_EN_ BIT(0) +#define PTP_RX_PARSE_CONFIG_IPV4_EN_ BIT(1) +#define PTP_RX_PARSE_CONFIG_IPV6_EN_ BIT(2) + +#define PTP_TX_PARSE_CONFIG 0x0282 +#define PTP_TX_PARSE_CONFIG_LAYER2_EN_ BIT(0) +#define PTP_TX_PARSE_CONFIG_IPV4_EN_ BIT(1) +#define PTP_TX_PARSE_CONFIG_IPV6_EN_ BIT(2) + +#define PTP_CLOCK_RATE_ADJ_HI 0x020C +#define PTP_CLOCK_RATE_ADJ_LO 0x020D +#define PTP_CLOCK_RATE_ADJ_DIR_ BIT(15) + +#define PTP_LTC_STEP_ADJ_HI 0x0212 +#define PTP_LTC_STEP_ADJ_LO 0x0213 +#define PTP_LTC_STEP_ADJ_DIR_ BIT(15) + +#define LAN8814_INTR_STS_REG 0x0033 +#define LAN8814_INTR_STS_REG_1588_TSU0_ BIT(0) +#define LAN8814_INTR_STS_REG_1588_TSU1_ BIT(1) +#define LAN8814_INTR_STS_REG_1588_TSU2_ BIT(2) +#define LAN8814_INTR_STS_REG_1588_TSU3_ BIT(3) + +#define PTP_CAP_INFO 0x022A +#define PTP_CAP_INFO_TX_TS_CNT_GET_(reg_val) (((reg_val) & 0x0f00) >> 8) +#define PTP_CAP_INFO_RX_TS_CNT_GET_(reg_val) ((reg_val) & 0x000f) + +#define PTP_TX_EGRESS_SEC_HI 0x0296 +#define PTP_TX_EGRESS_SEC_LO 0x0297 +#define PTP_TX_EGRESS_NS_HI 0x0294 +#define PTP_TX_EGRESS_NS_LO 0x0295 +#define PTP_TX_MSG_HEADER2 0x0299 + +#define PTP_RX_INGRESS_SEC_HI 0x0256 +#define PTP_RX_INGRESS_SEC_LO 0x0257 +#define PTP_RX_INGRESS_NS_HI 0x0254 +#define PTP_RX_INGRESS_NS_LO 0x0255 +#define PTP_RX_MSG_HEADER2 0x0259 + +#define PTP_TSU_INT_EN 0x0200 +#define PTP_TSU_INT_EN_PTP_TX_TS_OVRFL_EN_ BIT(3) +#define PTP_TSU_INT_EN_PTP_TX_TS_EN_ BIT(2) +#define PTP_TSU_INT_EN_PTP_RX_TS_OVRFL_EN_ BIT(1) +#define PTP_TSU_INT_EN_PTP_RX_TS_EN_ BIT(0) + +#define PTP_TSU_INT_STS 0x0201 +#define PTP_TSU_INT_STS_PTP_TX_TS_OVRFL_INT_ BIT(3) +#define PTP_TSU_INT_STS_PTP_TX_TS_EN_ BIT(2) +#define PTP_TSU_INT_STS_PTP_RX_TS_OVRFL_INT_ BIT(1) +#define PTP_TSU_INT_STS_PTP_RX_TS_EN_ BIT(0) + /* PHY Control 1 */ #define MII_KSZPHY_CTRL_1 0x1e #define KSZ8081_CTRL1_MDIX_STAT BIT(4) @@ -108,6 +225,7 @@ #define MII_KSZPHY_TX_DATA_PAD_SKEW 0x106 #define PS_TO_REG 200 +#define FIFO_SIZE 8 struct kszphy_hw_stat { const char *string; @@ -128,7 +246,57 @@ struct kszphy_type { bool has_rmii_ref_clk_sel; }; +/* Shared structure between the PHYs of the same package. */ +struct lan8814_shared_priv { + struct phy_device *phydev; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + + /* Reference counter to how many ports in the package are enabling the + * timestamping + */ + u8 ref; + + /* Lock for ptp_clock and ref */ + struct mutex shared_lock; +}; + +struct lan8814_ptp_rx_ts { + struct list_head list; + u32 seconds; + u32 nsec; + u16 seq_id; +}; + +struct kszphy_latencies { + u16 rx_10; + u16 tx_10; + u16 rx_100; + u16 tx_100; + u16 rx_1000; + u16 tx_1000; +}; + +struct kszphy_ptp_priv { + struct mii_timestamper mii_ts; + struct phy_device *phydev; + + struct sk_buff_head tx_queue; + struct sk_buff_head rx_queue; + + struct list_head rx_ts_list; + /* Lock for Rx ts fifo */ + spinlock_t rx_ts_lock; + + int hwts_tx_type; + enum hwtstamp_rx_filters rx_filter; + int layer; + int version; +}; + struct kszphy_priv { + struct kszphy_ptp_priv ptp_priv; + struct kszphy_latencies latencies; const struct kszphy_type *type; int led_mode; bool rmii_ref_clk_sel; @@ -136,6 +304,14 @@ struct kszphy_priv { u64 stats[ARRAY_SIZE(kszphy_hw_stats)]; }; +static struct kszphy_latencies lan8814_latencies = { + .rx_10 = 0x22AA, + .tx_10 = 0x2E4A, + .rx_100 = 0x092A, + .tx_100 = 0x02C1, + .rx_1000 = 0x01AD, + .tx_1000 = 0x00C9, +}; static const struct kszphy_type ksz8021_type = { .led_mode_reg = MII_KSZPHY_CTRL_2, .has_broadcast_disable = true, @@ -1596,11 +1772,13 @@ static int lanphy_read_page_reg(struct phy_device *phydev, int page, u32 addr) { u32 data; - phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page); - phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr); - phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, - (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC)); - data = phy_read(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA); + phy_lock_mdio_bus(phydev); + __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page); + __phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr); + __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, + (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC)); + data = __phy_read(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA); + phy_unlock_mdio_bus(phydev); return data; } @@ -1608,43 +1786,670 @@ static int lanphy_read_page_reg(struct phy_device *phydev, int page, u32 addr) static int lanphy_write_page_reg(struct phy_device *phydev, int page, u16 addr, u16 val) { - phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page); - phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr); - phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, - (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC)); + phy_lock_mdio_bus(phydev); + __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page); + __phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr); + __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, + page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC); - val = phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, val); - if (val) { + val = __phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, val); + if (val != 0) phydev_err(phydev, "Error: phy_write has returned error %d\n", val); - return val; + phy_unlock_mdio_bus(phydev); + return val; +} + +static int lan8814_config_ts_intr(struct phy_device *phydev, bool enable) +{ + u16 val = 0; + + if (enable) + val = PTP_TSU_INT_EN_PTP_TX_TS_EN_ | + PTP_TSU_INT_EN_PTP_TX_TS_OVRFL_EN_ | + PTP_TSU_INT_EN_PTP_RX_TS_EN_ | + PTP_TSU_INT_EN_PTP_RX_TS_OVRFL_EN_; + + return lanphy_write_page_reg(phydev, 5, PTP_TSU_INT_EN, val); +} + +static void lan8814_ptp_rx_ts_get(struct phy_device *phydev, + u32 *seconds, u32 *nano_seconds, u16 *seq_id) +{ + *seconds = lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_SEC_HI); + *seconds = (*seconds << 16) | + lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_SEC_LO); + + *nano_seconds = lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_NS_HI); + *nano_seconds = ((*nano_seconds & 0x3fff) << 16) | + lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_NS_LO); + + *seq_id = lanphy_read_page_reg(phydev, 5, PTP_RX_MSG_HEADER2); +} + +static void lan8814_ptp_tx_ts_get(struct phy_device *phydev, + u32 *seconds, u32 *nano_seconds, u16 *seq_id) +{ + *seconds = lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_SEC_HI); + *seconds = *seconds << 16 | + lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_SEC_LO); + + *nano_seconds = lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_NS_HI); + *nano_seconds = ((*nano_seconds & 0x3fff) << 16) | + lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_NS_LO); + + *seq_id = lanphy_read_page_reg(phydev, 5, PTP_TX_MSG_HEADER2); +} + +static int lan8814_ts_info(struct mii_timestamper *mii_ts, struct ethtool_ts_info *info) +{ + struct kszphy_ptp_priv *ptp_priv = container_of(mii_ts, struct kszphy_ptp_priv, mii_ts); + struct phy_device *phydev = ptp_priv->phydev; + struct lan8814_shared_priv *shared = phydev->shared->priv; + + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->phc_index = ptp_clock_index(shared->ptp_clock); + + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON) | + (1 << HWTSTAMP_TX_ONESTEP_SYNC); + + info->rx_filters = + (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); + + return 0; +} + +static void lan8814_flush_fifo(struct phy_device *phydev, bool egress) +{ + int i; + + for (i = 0; i < FIFO_SIZE; ++i) + lanphy_read_page_reg(phydev, 5, + egress ? PTP_TX_MSG_HEADER2 : PTP_RX_MSG_HEADER2); + + /* Read to clear overflow status bit */ + lanphy_read_page_reg(phydev, 5, PTP_TSU_INT_STS); +} + +static int lan8814_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr) +{ + struct kszphy_ptp_priv *ptp_priv = + container_of(mii_ts, struct kszphy_ptp_priv, mii_ts); + struct phy_device *phydev = ptp_priv->phydev; + struct lan8814_shared_priv *shared = phydev->shared->priv; + struct lan8814_ptp_rx_ts *rx_ts, *tmp; + struct hwtstamp_config config; + int txcfg = 0, rxcfg = 0; + int pkt_ts_enable; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + ptp_priv->hwts_tx_type = config.tx_type; + ptp_priv->rx_filter = config.rx_filter; + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + ptp_priv->layer = 0; + ptp_priv->version = 0; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + ptp_priv->layer = PTP_CLASS_L4; + ptp_priv->version = PTP_CLASS_V2; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + ptp_priv->layer = PTP_CLASS_L2; + ptp_priv->version = PTP_CLASS_V2; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + ptp_priv->layer = PTP_CLASS_L4 | PTP_CLASS_L2; + ptp_priv->version = PTP_CLASS_V2; + break; + default: + return -ERANGE; + } + + if (ptp_priv->layer & PTP_CLASS_L2) { + rxcfg = PTP_RX_PARSE_CONFIG_LAYER2_EN_; + txcfg = PTP_TX_PARSE_CONFIG_LAYER2_EN_; + } else if (ptp_priv->layer & PTP_CLASS_L4) { + rxcfg |= PTP_RX_PARSE_CONFIG_IPV4_EN_ | PTP_RX_PARSE_CONFIG_IPV6_EN_; + txcfg |= PTP_TX_PARSE_CONFIG_IPV4_EN_ | PTP_TX_PARSE_CONFIG_IPV6_EN_; } + lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_RX_PARSE_CONFIG, rxcfg); + lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_PARSE_CONFIG, txcfg); + + pkt_ts_enable = PTP_TIMESTAMP_EN_SYNC_ | PTP_TIMESTAMP_EN_DREQ_ | + PTP_TIMESTAMP_EN_PDREQ_ | PTP_TIMESTAMP_EN_PDRES_; + lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_RX_TIMESTAMP_EN, pkt_ts_enable); + lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_TIMESTAMP_EN, pkt_ts_enable); + + if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC) + lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD, + PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_); + + if (config.rx_filter != HWTSTAMP_FILTER_NONE) + lan8814_config_ts_intr(ptp_priv->phydev, true); + else + lan8814_config_ts_intr(ptp_priv->phydev, false); + + mutex_lock(&shared->shared_lock); + if (config.rx_filter != HWTSTAMP_FILTER_NONE) + shared->ref++; + else + shared->ref--; + + if (shared->ref) + lanphy_write_page_reg(ptp_priv->phydev, 4, PTP_CMD_CTL, + PTP_CMD_CTL_PTP_ENABLE_); + else + lanphy_write_page_reg(ptp_priv->phydev, 4, PTP_CMD_CTL, + PTP_CMD_CTL_PTP_DISABLE_); + mutex_unlock(&shared->shared_lock); + + /* In case of multiple starts and stops, these needs to be cleared */ + list_for_each_entry_safe(rx_ts, tmp, &ptp_priv->rx_ts_list, list) { + list_del(&rx_ts->list); + kfree(rx_ts); + } + skb_queue_purge(&ptp_priv->rx_queue); + skb_queue_purge(&ptp_priv->tx_queue); + + lan8814_flush_fifo(ptp_priv->phydev, false); + lan8814_flush_fifo(ptp_priv->phydev, true); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; +} + +static void lan8814_txtstamp(struct mii_timestamper *mii_ts, + struct sk_buff *skb, int type) +{ + struct kszphy_ptp_priv *ptp_priv = container_of(mii_ts, struct kszphy_ptp_priv, mii_ts); + + switch (ptp_priv->hwts_tx_type) { + case HWTSTAMP_TX_ONESTEP_SYNC: + if (ptp_msg_is_sync(skb, type)) { + kfree_skb(skb); + return; + } + fallthrough; + case HWTSTAMP_TX_ON: + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + skb_queue_tail(&ptp_priv->tx_queue, skb); + break; + case HWTSTAMP_TX_OFF: + default: + kfree_skb(skb); + break; + } +} + +static void lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig) +{ + struct ptp_header *ptp_header; + u32 type; + + skb_push(skb, ETH_HLEN); + type = ptp_classify_raw(skb); + ptp_header = ptp_parse_header(skb, type); + skb_pull_inline(skb, ETH_HLEN); + + *sig = (__force u16)(ntohs(ptp_header->sequence_id)); +} + +static bool lan8814_match_rx_ts(struct kszphy_ptp_priv *ptp_priv, + struct sk_buff *skb) +{ + struct skb_shared_hwtstamps *shhwtstamps; + struct lan8814_ptp_rx_ts *rx_ts, *tmp; + unsigned long flags; + bool ret = false; + u16 skb_sig; + + lan8814_get_sig_rx(skb, &skb_sig); + + /* Iterate over all RX timestamps and match it with the received skbs */ + spin_lock_irqsave(&ptp_priv->rx_ts_lock, flags); + list_for_each_entry_safe(rx_ts, tmp, &ptp_priv->rx_ts_list, list) { + /* Check if we found the signature we were looking for. */ + if (memcmp(&skb_sig, &rx_ts->seq_id, sizeof(rx_ts->seq_id))) + continue; + + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, + rx_ts->nsec); + list_del(&rx_ts->list); + kfree(rx_ts); + + ret = true; + break; + } + spin_unlock_irqrestore(&ptp_priv->rx_ts_lock, flags); + + if (ret) + netif_rx(skb); + return ret; +} + +static bool lan8814_rxtstamp(struct mii_timestamper *mii_ts, struct sk_buff *skb, int type) +{ + struct kszphy_ptp_priv *ptp_priv = + container_of(mii_ts, struct kszphy_ptp_priv, mii_ts); + + if (ptp_priv->rx_filter == HWTSTAMP_FILTER_NONE || + type == PTP_CLASS_NONE) + return false; + + if ((type & ptp_priv->version) == 0 || (type & ptp_priv->layer) == 0) + return false; + + /* If we failed to match then add it to the queue for when the timestamp + * will come + */ + if (!lan8814_match_rx_ts(ptp_priv, skb)) + skb_queue_tail(&ptp_priv->rx_queue, skb); + + return true; +} + +static void lan8814_ptp_clock_set(struct phy_device *phydev, + u32 seconds, u32 nano_seconds) +{ + u32 sec_low, sec_high, nsec_low, nsec_high; + + sec_low = seconds & 0xffff; + sec_high = (seconds >> 16) & 0xffff; + nsec_low = nano_seconds & 0xffff; + nsec_high = (nano_seconds >> 16) & 0x3fff; + + lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_LO, sec_low); + lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_MID, sec_high); + lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_LO, nsec_low); + lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_HI, nsec_high); + + lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_LOAD_); +} + +static void lan8814_ptp_clock_get(struct phy_device *phydev, + u32 *seconds, u32 *nano_seconds) +{ + lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_READ_); + + *seconds = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_MID); + *seconds = (*seconds << 16) | + lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_LO); + + *nano_seconds = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_HI); + *nano_seconds = ((*nano_seconds & 0x3fff) << 16) | + lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_LO); +} + +static int lan8814_ptpci_gettime64(struct ptp_clock_info *ptpci, + struct timespec64 *ts) +{ + struct lan8814_shared_priv *shared = container_of(ptpci, struct lan8814_shared_priv, + ptp_clock_info); + struct phy_device *phydev = shared->phydev; + u32 nano_seconds; + u32 seconds; + + mutex_lock(&shared->shared_lock); + lan8814_ptp_clock_get(phydev, &seconds, &nano_seconds); + mutex_unlock(&shared->shared_lock); + ts->tv_sec = seconds; + ts->tv_nsec = nano_seconds; + return 0; } -static int lan8814_config_init(struct phy_device *phydev) +static int lan8814_ptpci_settime64(struct ptp_clock_info *ptpci, + const struct timespec64 *ts) { - int val; + struct lan8814_shared_priv *shared = container_of(ptpci, struct lan8814_shared_priv, + ptp_clock_info); + struct phy_device *phydev = shared->phydev; - /* Reset the PHY */ - val = lanphy_read_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET); - val |= LAN8814_QSGMII_SOFT_RESET_BIT; - lanphy_write_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET, val); + mutex_lock(&shared->shared_lock); + lan8814_ptp_clock_set(phydev, ts->tv_sec, ts->tv_nsec); + mutex_unlock(&shared->shared_lock); - /* Disable ANEG with QSGMII PCS Host side */ - val = lanphy_read_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG); - val &= ~LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA; - lanphy_write_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG, val); + return 0; +} - /* MDI-X setting for swap A,B transmit */ - val = lanphy_read_page_reg(phydev, 2, LAN8814_ALIGN_SWAP); - val &= ~LAN8814_ALIGN_TX_A_B_SWAP_MASK; - val |= LAN8814_ALIGN_TX_A_B_SWAP; - lanphy_write_page_reg(phydev, 2, LAN8814_ALIGN_SWAP, val); +static void lan8814_ptp_clock_step(struct phy_device *phydev, + s64 time_step_ns) +{ + u32 nano_seconds_step; + u64 abs_time_step_ns; + u32 unsigned_seconds; + u32 nano_seconds; + u32 remainder; + s32 seconds; + + if (time_step_ns > 15000000000LL) { + /* convert to clock set */ + lan8814_ptp_clock_get(phydev, &unsigned_seconds, &nano_seconds); + unsigned_seconds += div_u64_rem(time_step_ns, 1000000000LL, + &remainder); + nano_seconds += remainder; + if (nano_seconds >= 1000000000) { + unsigned_seconds++; + nano_seconds -= 1000000000; + } + lan8814_ptp_clock_set(phydev, unsigned_seconds, nano_seconds); + return; + } else if (time_step_ns < -15000000000LL) { + /* convert to clock set */ + time_step_ns = -time_step_ns; + + lan8814_ptp_clock_get(phydev, &unsigned_seconds, &nano_seconds); + unsigned_seconds -= div_u64_rem(time_step_ns, 1000000000LL, + &remainder); + nano_seconds_step = remainder; + if (nano_seconds < nano_seconds_step) { + unsigned_seconds--; + nano_seconds += 1000000000; + } + nano_seconds -= nano_seconds_step; + lan8814_ptp_clock_set(phydev, unsigned_seconds, + nano_seconds); + return; + } + + /* do clock step */ + if (time_step_ns >= 0) { + abs_time_step_ns = (u64)time_step_ns; + seconds = (s32)div_u64_rem(abs_time_step_ns, 1000000000, + &remainder); + nano_seconds = remainder; + } else { + abs_time_step_ns = (u64)(-time_step_ns); + seconds = -((s32)div_u64_rem(abs_time_step_ns, 1000000000, + &remainder)); + nano_seconds = remainder; + if (nano_seconds > 0) { + /* subtracting nano seconds is not allowed + * convert to subtracting from seconds, + * and adding to nanoseconds + */ + seconds--; + nano_seconds = (1000000000 - nano_seconds); + } + } + + if (nano_seconds > 0) { + /* add 8 ns to cover the likely normal increment */ + nano_seconds += 8; + } + + if (nano_seconds >= 1000000000) { + /* carry into seconds */ + seconds++; + nano_seconds -= 1000000000; + } + + while (seconds) { + if (seconds > 0) { + u32 adjustment_value = (u32)seconds; + u16 adjustment_value_lo, adjustment_value_hi; + + if (adjustment_value > 0xF) + adjustment_value = 0xF; + + adjustment_value_lo = adjustment_value & 0xffff; + adjustment_value_hi = (adjustment_value >> 16) & 0x3fff; + + lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_LO, + adjustment_value_lo); + lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI, + PTP_LTC_STEP_ADJ_DIR_ | + adjustment_value_hi); + seconds -= ((s32)adjustment_value); + } else { + u32 adjustment_value = (u32)(-seconds); + u16 adjustment_value_lo, adjustment_value_hi; + + if (adjustment_value > 0xF) + adjustment_value = 0xF; + + adjustment_value_lo = adjustment_value & 0xffff; + adjustment_value_hi = (adjustment_value >> 16) & 0x3fff; + + lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_LO, + adjustment_value_lo); + lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI, + adjustment_value_hi); + seconds += ((s32)adjustment_value); + } + lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, + PTP_CMD_CTL_PTP_LTC_STEP_SEC_); + } + if (nano_seconds) { + u16 nano_seconds_lo; + u16 nano_seconds_hi; + + nano_seconds_lo = nano_seconds & 0xffff; + nano_seconds_hi = (nano_seconds >> 16) & 0x3fff; + + lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_LO, + nano_seconds_lo); + lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI, + PTP_LTC_STEP_ADJ_DIR_ | + nano_seconds_hi); + lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, + PTP_CMD_CTL_PTP_LTC_STEP_NSEC_); + } +} + +static int lan8814_ptpci_adjtime(struct ptp_clock_info *ptpci, s64 delta) +{ + struct lan8814_shared_priv *shared = container_of(ptpci, struct lan8814_shared_priv, + ptp_clock_info); + struct phy_device *phydev = shared->phydev; + + mutex_lock(&shared->shared_lock); + lan8814_ptp_clock_step(phydev, delta); + mutex_unlock(&shared->shared_lock); + + return 0; +} + +static int lan8814_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm) +{ + struct lan8814_shared_priv *shared = container_of(ptpci, struct lan8814_shared_priv, + ptp_clock_info); + struct phy_device *phydev = shared->phydev; + u16 kszphy_rate_adj_lo, kszphy_rate_adj_hi; + bool positive = true; + u32 kszphy_rate_adj; + + if (scaled_ppm < 0) { + scaled_ppm = -scaled_ppm; + positive = false; + } + + kszphy_rate_adj = LAN8814_1PPM_FORMAT * (scaled_ppm >> 16); + kszphy_rate_adj += (LAN8814_1PPM_FORMAT * (0xffff & scaled_ppm)) >> 16; + + kszphy_rate_adj_lo = kszphy_rate_adj & 0xffff; + kszphy_rate_adj_hi = (kszphy_rate_adj >> 16) & 0x3fff; + + if (positive) + kszphy_rate_adj_hi |= PTP_CLOCK_RATE_ADJ_DIR_; + + mutex_lock(&shared->shared_lock); + lanphy_write_page_reg(phydev, 4, PTP_CLOCK_RATE_ADJ_HI, kszphy_rate_adj_hi); + lanphy_write_page_reg(phydev, 4, PTP_CLOCK_RATE_ADJ_LO, kszphy_rate_adj_lo); + mutex_unlock(&shared->shared_lock); return 0; } +static void lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig) +{ + struct ptp_header *ptp_header; + u32 type; + + type = ptp_classify_raw(skb); + ptp_header = ptp_parse_header(skb, type); + + *sig = (__force u16)(ntohs(ptp_header->sequence_id)); +} + +static void lan8814_dequeue_tx_skb(struct kszphy_ptp_priv *ptp_priv) +{ + struct phy_device *phydev = ptp_priv->phydev; + struct skb_shared_hwtstamps shhwtstamps; + struct sk_buff *skb, *skb_tmp; + unsigned long flags; + u32 seconds, nsec; + bool ret = false; + u16 skb_sig; + u16 seq_id; + + lan8814_ptp_tx_ts_get(phydev, &seconds, &nsec, &seq_id); + + spin_lock_irqsave(&ptp_priv->tx_queue.lock, flags); + skb_queue_walk_safe(&ptp_priv->tx_queue, skb, skb_tmp) { + lan8814_get_sig_tx(skb, &skb_sig); + + if (memcmp(&skb_sig, &seq_id, sizeof(seq_id))) + continue; + + __skb_unlink(skb, &ptp_priv->tx_queue); + ret = true; + break; + } + spin_unlock_irqrestore(&ptp_priv->tx_queue.lock, flags); + + if (ret) { + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ktime_set(seconds, nsec); + skb_complete_tx_timestamp(skb, &shhwtstamps); + } +} + +static void lan8814_get_tx_ts(struct kszphy_ptp_priv *ptp_priv) +{ + struct phy_device *phydev = ptp_priv->phydev; + u32 reg; + + do { + lan8814_dequeue_tx_skb(ptp_priv); + + /* If other timestamps are available in the FIFO, + * process them. + */ + reg = lanphy_read_page_reg(phydev, 5, PTP_CAP_INFO); + } while (PTP_CAP_INFO_TX_TS_CNT_GET_(reg) > 0); +} + +static bool lan8814_match_skb(struct kszphy_ptp_priv *ptp_priv, + struct lan8814_ptp_rx_ts *rx_ts) +{ + struct skb_shared_hwtstamps *shhwtstamps; + struct sk_buff *skb, *skb_tmp; + unsigned long flags; + bool ret = false; + u16 skb_sig; + + spin_lock_irqsave(&ptp_priv->rx_queue.lock, flags); + skb_queue_walk_safe(&ptp_priv->rx_queue, skb, skb_tmp) { + lan8814_get_sig_rx(skb, &skb_sig); + + if (memcmp(&skb_sig, &rx_ts->seq_id, sizeof(rx_ts->seq_id))) + continue; + + __skb_unlink(skb, &ptp_priv->rx_queue); + + ret = true; + break; + } + spin_unlock_irqrestore(&ptp_priv->rx_queue.lock, flags); + + if (ret) { + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec); + netif_rx(skb); + } + + return ret; +} + +static void lan8814_get_rx_ts(struct kszphy_ptp_priv *ptp_priv) +{ + struct phy_device *phydev = ptp_priv->phydev; + struct lan8814_ptp_rx_ts *rx_ts; + unsigned long flags; + u32 reg; + + do { + rx_ts = kzalloc(sizeof(*rx_ts), GFP_KERNEL); + if (!rx_ts) + return; + + lan8814_ptp_rx_ts_get(phydev, &rx_ts->seconds, &rx_ts->nsec, + &rx_ts->seq_id); + + /* If we failed to match the skb add it to the queue for when + * the frame will come + */ + if (!lan8814_match_skb(ptp_priv, rx_ts)) { + spin_lock_irqsave(&ptp_priv->rx_ts_lock, flags); + list_add(&rx_ts->list, &ptp_priv->rx_ts_list); + spin_unlock_irqrestore(&ptp_priv->rx_ts_lock, flags); + } else { + kfree(rx_ts); + } + + /* If other timestamps are available in the FIFO, + * process them. + */ + reg = lanphy_read_page_reg(phydev, 5, PTP_CAP_INFO); + } while (PTP_CAP_INFO_RX_TS_CNT_GET_(reg) > 0); +} + +static void lan8814_handle_ptp_interrupt(struct phy_device *phydev) +{ + struct kszphy_priv *priv = phydev->priv; + struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv; + u16 status; + + status = lanphy_read_page_reg(phydev, 5, PTP_TSU_INT_STS); + if (status & PTP_TSU_INT_STS_PTP_TX_TS_EN_) + lan8814_get_tx_ts(ptp_priv); + + if (status & PTP_TSU_INT_STS_PTP_RX_TS_EN_) + lan8814_get_rx_ts(ptp_priv); + + if (status & PTP_TSU_INT_STS_PTP_TX_TS_OVRFL_INT_) { + lan8814_flush_fifo(phydev, true); + skb_queue_purge(&ptp_priv->tx_queue); + } + + if (status & PTP_TSU_INT_STS_PTP_RX_TS_OVRFL_INT_) { + lan8814_flush_fifo(phydev, false); + skb_queue_purge(&ptp_priv->rx_queue); + } +} + static int lan8804_config_init(struct phy_device *phydev) { int val; @@ -1666,17 +2471,31 @@ static int lan8804_config_init(struct phy_device *phydev) static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev) { + u16 tsu_irq_status; int irq_status; irq_status = phy_read(phydev, LAN8814_INTS); - if (irq_status < 0) - return IRQ_NONE; + if (irq_status > 0 && (irq_status & LAN8814_INT_LINK)) + phy_trigger_machine(phydev); - if (!(irq_status & LAN8814_INT_LINK)) + if (irq_status < 0) { + phy_error(phydev); return IRQ_NONE; + } - phy_trigger_machine(phydev); + while (1) { + tsu_irq_status = lanphy_read_page_reg(phydev, 4, + LAN8814_INTR_STS_REG); + if (tsu_irq_status > 0 && + (tsu_irq_status & (LAN8814_INTR_STS_REG_1588_TSU0_ | + LAN8814_INTR_STS_REG_1588_TSU1_ | + LAN8814_INTR_STS_REG_1588_TSU2_ | + LAN8814_INTR_STS_REG_1588_TSU3_))) + lan8814_handle_ptp_interrupt(phydev); + else + break; + } return IRQ_HANDLED; } @@ -1716,6 +2535,223 @@ static int lan8814_config_intr(struct phy_device *phydev) return err; } +static void lan8814_ptp_init(struct phy_device *phydev) +{ + struct kszphy_priv *priv = phydev->priv; + struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv; + u32 temp; + + lanphy_write_page_reg(phydev, 5, TSU_HARD_RESET, TSU_HARD_RESET_); + + temp = lanphy_read_page_reg(phydev, 5, PTP_TX_MOD); + temp |= PTP_TX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_; + lanphy_write_page_reg(phydev, 5, PTP_TX_MOD, temp); + + temp = lanphy_read_page_reg(phydev, 5, PTP_RX_MOD); + temp |= PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_; + lanphy_write_page_reg(phydev, 5, PTP_RX_MOD, temp); + + lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_CONFIG, 0); + lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_CONFIG, 0); + + /* Removing default registers configs related to L2 and IP */ + lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_L2_ADDR_EN, 0); + lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_L2_ADDR_EN, 0); + lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_IP_ADDR_EN, 0); + lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_IP_ADDR_EN, 0); + + skb_queue_head_init(&ptp_priv->tx_queue); + skb_queue_head_init(&ptp_priv->rx_queue); + INIT_LIST_HEAD(&ptp_priv->rx_ts_list); + spin_lock_init(&ptp_priv->rx_ts_lock); + + ptp_priv->phydev = phydev; + + ptp_priv->mii_ts.rxtstamp = lan8814_rxtstamp; + ptp_priv->mii_ts.txtstamp = lan8814_txtstamp; + ptp_priv->mii_ts.hwtstamp = lan8814_hwtstamp; + ptp_priv->mii_ts.ts_info = lan8814_ts_info; + + phydev->mii_ts = &ptp_priv->mii_ts; +} + +static int lan8814_ptp_probe_once(struct phy_device *phydev) +{ + struct lan8814_shared_priv *shared = phydev->shared->priv; + + /* Initialise shared lock for clock*/ + mutex_init(&shared->shared_lock); + + shared->ptp_clock_info.owner = THIS_MODULE; + snprintf(shared->ptp_clock_info.name, 30, "%s", phydev->drv->name); + shared->ptp_clock_info.max_adj = 31249999; + shared->ptp_clock_info.n_alarm = 0; + shared->ptp_clock_info.n_ext_ts = 0; + shared->ptp_clock_info.n_pins = 0; + shared->ptp_clock_info.pps = 0; + shared->ptp_clock_info.pin_config = NULL; + shared->ptp_clock_info.adjfine = lan8814_ptpci_adjfine; + shared->ptp_clock_info.adjtime = lan8814_ptpci_adjtime; + shared->ptp_clock_info.gettime64 = lan8814_ptpci_gettime64; + shared->ptp_clock_info.settime64 = lan8814_ptpci_settime64; + shared->ptp_clock_info.getcrosststamp = NULL; + + shared->ptp_clock = ptp_clock_register(&shared->ptp_clock_info, + &phydev->mdio.dev); + if (IS_ERR_OR_NULL(shared->ptp_clock)) { + phydev_err(phydev, "ptp_clock_register failed %lu\n", + PTR_ERR(shared->ptp_clock)); + return -EINVAL; + } + + phydev_dbg(phydev, "successfully registered ptp clock\n"); + + shared->phydev = phydev; + + /* The EP.4 is shared between all the PHYs in the package and also it + * can be accessed by any of the PHYs + */ + lanphy_write_page_reg(phydev, 4, LTC_HARD_RESET, LTC_HARD_RESET_); + lanphy_write_page_reg(phydev, 4, PTP_OPERATING_MODE, + PTP_OPERATING_MODE_STANDALONE_); + + return 0; +} + +static int lan8814_read_status(struct phy_device *phydev) +{ + struct kszphy_priv *priv = phydev->priv; + struct kszphy_latencies *latencies = &priv->latencies; + int err; + int regval; + + err = genphy_read_status(phydev); + if (err) + return err; + + switch (phydev->speed) { + case SPEED_1000: + lanphy_write_page_reg(phydev, 5, PTP_RX_LATENCY_1000, + latencies->rx_1000); + lanphy_write_page_reg(phydev, 5, PTP_TX_LATENCY_1000, + latencies->tx_1000); + break; + case SPEED_100: + lanphy_write_page_reg(phydev, 5, PTP_RX_LATENCY_100, + latencies->rx_100); + lanphy_write_page_reg(phydev, 5, PTP_TX_LATENCY_100, + latencies->tx_100); + break; + case SPEED_10: + lanphy_write_page_reg(phydev, 5, PTP_RX_LATENCY_10, + latencies->rx_10); + lanphy_write_page_reg(phydev, 5, PTP_TX_LATENCY_10, + latencies->tx_10); + break; + default: + break; + } + + /* Make sure the PHY is not broken. Read idle error count, + * and reset the PHY if it is maxed out. + */ + regval = phy_read(phydev, MII_STAT1000); + if ((regval & 0xFF) == 0xFF) { + phy_init_hw(phydev); + phydev->link = 0; + if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev)) + phydev->drv->config_intr(phydev); + return genphy_config_aneg(phydev); + } + + return 0; +} + +static int lan8814_config_init(struct phy_device *phydev) +{ + int val; + + /* Reset the PHY */ + val = lanphy_read_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET); + val |= LAN8814_QSGMII_SOFT_RESET_BIT; + lanphy_write_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET, val); + + /* Disable ANEG with QSGMII PCS Host side */ + val = lanphy_read_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG); + val &= ~LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA; + lanphy_write_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG, val); + + /* MDI-X setting for swap A,B transmit */ + val = lanphy_read_page_reg(phydev, 2, LAN8814_ALIGN_SWAP); + val &= ~LAN8814_ALIGN_TX_A_B_SWAP_MASK; + val |= LAN8814_ALIGN_TX_A_B_SWAP; + lanphy_write_page_reg(phydev, 2, LAN8814_ALIGN_SWAP, val); + + return 0; +} + +static void lan8814_parse_latency(struct phy_device *phydev) +{ + const struct device_node *np = phydev->mdio.dev.of_node; + struct kszphy_priv *priv = phydev->priv; + struct kszphy_latencies *latency = &priv->latencies; + u32 val; + + if (!of_property_read_u32(np, "lan8814,latency_rx_10", &val)) + latency->rx_10 = val; + if (!of_property_read_u32(np, "lan8814,latency_tx_10", &val)) + latency->tx_10 = val; + if (!of_property_read_u32(np, "lan8814,latency_rx_100", &val)) + latency->rx_100 = val; + if (!of_property_read_u32(np, "lan8814,latency_tx_100", &val)) + latency->tx_100 = val; + if (!of_property_read_u32(np, "lan8814,latency_rx_1000", &val)) + latency->rx_1000 = val; + if (!of_property_read_u32(np, "lan8814,latency_tx_1000", &val)) + latency->tx_1000 = val; +} + +static int lan8814_probe(struct phy_device *phydev) +{ + const struct device_node *np = phydev->mdio.dev.of_node; + struct kszphy_priv *priv; + u16 addr; + int err; + + priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->led_mode = -1; + + priv->latencies = lan8814_latencies; + + phydev->priv = priv; + + if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) || + !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING) || + of_property_read_bool(np, "lan8814,ignore-ts")) + return 0; + + /* Strap-in value for PHY address, below register read gives starting + * phy address value + */ + addr = lanphy_read_page_reg(phydev, 4, 0) & 0x1F; + devm_phy_package_join(&phydev->mdio.dev, phydev, + addr, sizeof(struct lan8814_shared_priv)); + + if (phy_package_init_once(phydev)) { + err = lan8814_ptp_probe_once(phydev); + if (err) + return err; + } + + lan8814_parse_latency(phydev); + lan8814_ptp_init(phydev); + + return 0; +} + static struct phy_driver ksphy_driver[] = { { .phy_id = PHY_ID_KS8737, @@ -1890,10 +2926,9 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Microchip INDY Gigabit Quad PHY", .config_init = lan8814_config_init, - .driver_data = &ksz9021_type, - .probe = kszphy_probe, + .probe = lan8814_probe, .soft_reset = genphy_soft_reset, - .read_status = ksz9031_read_status, + .read_status = lan8814_read_status, .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c index bc50224d43dd..389df3f4293c 100644 --- a/drivers/net/phy/microchip_t1.c +++ b/drivers/net/phy/microchip_t1.c @@ -8,11 +8,17 @@ #include <linux/phy.h> #include <linux/ethtool.h> #include <linux/ethtool_netlink.h> +#include <linux/bitfield.h> + +#define PHY_ID_LAN87XX 0x0007c150 +#define PHY_ID_LAN937X 0x0007c180 /* External Register Control Register */ #define LAN87XX_EXT_REG_CTL (0x14) #define LAN87XX_EXT_REG_CTL_RD_CTL (0x1000) #define LAN87XX_EXT_REG_CTL_WR_CTL (0x0800) +#define LAN87XX_REG_BANK_SEL_MASK GENMASK(10, 8) +#define LAN87XX_REG_ADDR_MASK GENMASK(7, 0) /* External Register Read Data Register */ #define LAN87XX_EXT_REG_RD_DATA (0x15) @@ -37,6 +43,7 @@ #define PHYACC_ATTR_MODE_READ 0 #define PHYACC_ATTR_MODE_WRITE 1 #define PHYACC_ATTR_MODE_MODIFY 2 +#define PHYACC_ATTR_MODE_POLL 3 #define PHYACC_ATTR_BANK_SMI 0 #define PHYACC_ATTR_BANK_MISC 1 @@ -50,8 +57,33 @@ #define LAN87XX_CABLE_TEST_OPEN 1 #define LAN87XX_CABLE_TEST_SAME_SHORT 2 +/* T1 Registers */ +#define T1_AFE_PORT_CFG1_REG 0x0B +#define T1_POWER_DOWN_CONTROL_REG 0x1A +#define T1_SLV_FD_MULT_CFG_REG 0x18 +#define T1_CDR_CFG_PRE_LOCK_REG 0x05 +#define T1_CDR_CFG_POST_LOCK_REG 0x06 +#define T1_LCK_STG2_MUFACT_CFG_REG 0x1A +#define T1_LCK_STG3_MUFACT_CFG_REG 0x1B +#define T1_POST_LCK_MUFACT_CFG_REG 0x1C +#define T1_TX_RX_FIFO_CFG_REG 0x02 +#define T1_TX_LPF_FIR_CFG_REG 0x55 +#define T1_SQI_CONFIG_REG 0x2E +#define T1_MDIO_CONTROL2_REG 0x10 +#define T1_INTERRUPT_SOURCE_REG 0x18 +#define T1_INTERRUPT2_SOURCE_REG 0x08 +#define T1_EQ_FD_STG1_FRZ_CFG 0x69 +#define T1_EQ_FD_STG2_FRZ_CFG 0x6A +#define T1_EQ_FD_STG3_FRZ_CFG 0x6B +#define T1_EQ_FD_STG4_FRZ_CFG 0x6C +#define T1_EQ_WT_FD_LCK_FRZ_CFG 0x6D +#define T1_PST_EQ_LCK_STG1_FRZ_CFG 0x6E + +#define T1_MODE_STAT_REG 0x11 +#define T1_LINK_UP_MSK BIT(0) + #define DRIVER_AUTHOR "Nisar Sayed <nisar.sayed@microchip.com>" -#define DRIVER_DESC "Microchip LAN87XX T1 PHY driver" +#define DRIVER_DESC "Microchip LAN87XX/LAN937x T1 PHY driver" struct access_ereg_val { u8 mode; @@ -61,6 +93,37 @@ struct access_ereg_val { u16 mask; }; +static int lan937x_dsp_workaround(struct phy_device *phydev, u16 ereg, u8 bank) +{ + u8 prev_bank; + int rc = 0; + u16 val; + + mutex_lock(&phydev->lock); + /* Read previous selected bank */ + rc = phy_read(phydev, LAN87XX_EXT_REG_CTL); + if (rc < 0) + goto out_unlock; + + /* store the prev_bank */ + prev_bank = FIELD_GET(LAN87XX_REG_BANK_SEL_MASK, rc); + + if (bank != prev_bank && bank == PHYACC_ATTR_BANK_DSP) { + val = ereg & ~LAN87XX_REG_ADDR_MASK; + + val &= ~LAN87XX_EXT_REG_CTL_WR_CTL; + val |= LAN87XX_EXT_REG_CTL_RD_CTL; + + /* access twice for DSP bank change,dummy access */ + rc = phy_write(phydev, LAN87XX_EXT_REG_CTL, val); + } + +out_unlock: + mutex_unlock(&phydev->lock); + + return rc; +} + static int access_ereg(struct phy_device *phydev, u8 mode, u8 bank, u8 offset, u16 val) { @@ -89,6 +152,13 @@ static int access_ereg(struct phy_device *phydev, u8 mode, u8 bank, ereg |= (bank << 8) | offset; + /* DSP bank access workaround for lan937x */ + if (phydev->phy_id == PHY_ID_LAN937X) { + rc = lan937x_dsp_workaround(phydev, ereg, bank); + if (rc < 0) + return rc; + } + rc = phy_write(phydev, LAN87XX_EXT_REG_CTL, ereg); if (rc < 0) return rc; @@ -117,6 +187,15 @@ static int access_ereg_modify_changed(struct phy_device *phydev, return rc; } +static int access_smi_poll_timeout(struct phy_device *phydev, + u8 offset, u16 mask, u16 clr) +{ + int val; + + return phy_read_poll_timeout(phydev, offset, val, (val & mask) == clr, + 150, 30000, true); +} + static int lan87xx_config_rgmii_delay(struct phy_device *phydev) { int rc; @@ -157,68 +236,159 @@ static int lan87xx_config_rgmii_delay(struct phy_device *phydev) static int lan87xx_phy_init(struct phy_device *phydev) { static const struct access_ereg_val init[] = { - /* TX Amplitude = 5 */ - {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_AFE, 0x0B, - 0x000A, 0x001E}, - /* Clear SMI interrupts */ - {PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_SMI, 0x18, - 0, 0}, - /* Clear MISC interrupts */ - {PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_MISC, 0x08, - 0, 0}, - /* Turn on TC10 Ring Oscillator (ROSC) */ - {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_MISC, 0x20, - 0x0020, 0x0020}, - /* WUR Detect Length to 1.2uS, LPC Detect Length to 1.09uS */ - {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_PCS, 0x20, - 0x283C, 0}, - /* Wake_In Debounce Length to 39uS, Wake_Out Length to 79uS */ - {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x21, - 0x274F, 0}, - /* Enable Auto Wake Forward to Wake_Out, ROSC on, Sleep, - * and Wake_In to wake PHY - */ - {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x20, - 0x80A7, 0}, - /* Enable WUP Auto Fwd, Enable Wake on MDI, Wakeup Debouncer - * to 128 uS - */ - {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x24, - 0xF110, 0}, - /* Enable HW Init */ - {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_SMI, 0x1A, - 0x0100, 0x0100}, + /* TXPD/TXAMP6 Configs */ + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_AFE, + T1_AFE_PORT_CFG1_REG, 0x002D, 0 }, + /* HW_Init Hi and Force_ED */ + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI, + T1_POWER_DOWN_CONTROL_REG, 0x0308, 0 }, + /* Equalizer Full Duplex Freeze - T1 Slave */ + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_EQ_FD_STG1_FRZ_CFG, 0x0002, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_EQ_FD_STG2_FRZ_CFG, 0x0002, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_EQ_FD_STG3_FRZ_CFG, 0x0002, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_EQ_FD_STG4_FRZ_CFG, 0x0002, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_EQ_WT_FD_LCK_FRZ_CFG, 0x0002, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_PST_EQ_LCK_STG1_FRZ_CFG, 0x0002, 0 }, + /* Slave Full Duplex Multi Configs */ + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_SLV_FD_MULT_CFG_REG, 0x0D53, 0 }, + /* CDR Pre and Post Lock Configs */ + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_CDR_CFG_PRE_LOCK_REG, 0x0AB2, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_CDR_CFG_POST_LOCK_REG, 0x0AB3, 0 }, + /* Lock Stage 2-3 Multi Factor Config */ + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_LCK_STG2_MUFACT_CFG_REG, 0x0AEA, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_LCK_STG3_MUFACT_CFG_REG, 0x0AEB, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_POST_LCK_MUFACT_CFG_REG, 0x0AEB, 0 }, + /* Pointer delay */ + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_RX_FIFO_CFG_REG, 0x1C00, 0 }, + /* Tx iir edits */ + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x1000, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x1861, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x1061, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x1922, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x1122, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x1983, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x1183, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x1944, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x1144, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x18c5, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x10c5, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x1846, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x1046, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x1807, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x1007, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x1808, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x1008, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x1809, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x1009, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x180A, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x100A, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x180B, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x100B, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x180C, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x100C, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x180D, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x100D, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x180E, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x100E, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x180F, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x100F, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x1810, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x1010, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x1811, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x1011, 0 }, + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_TX_LPF_FIR_CFG_REG, 0x1000, 0 }, + /* SQI enable */ + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_SQI_CONFIG_REG, 0x9572, 0 }, + /* Flag LPS and WUR as idle errors */ + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI, + T1_MDIO_CONTROL2_REG, 0x0014, 0 }, + /* HW_Init toggle, undo force ED, TXPD off */ + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI, + T1_POWER_DOWN_CONTROL_REG, 0x0200, 0 }, + /* Reset PCS to trigger hardware initialization */ + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI, + T1_MDIO_CONTROL2_REG, 0x0094, 0 }, + /* Poll till Hardware is initialized */ + { PHYACC_ATTR_MODE_POLL, PHYACC_ATTR_BANK_SMI, + T1_MDIO_CONTROL2_REG, 0x0080, 0 }, + /* Tx AMP - 0x06 */ + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_AFE, + T1_AFE_PORT_CFG1_REG, 0x000C, 0 }, + /* Read INTERRUPT_SOURCE Register */ + { PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_SMI, + T1_INTERRUPT_SOURCE_REG, 0, 0 }, + /* Read INTERRUPT_SOURCE Register */ + { PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_MISC, + T1_INTERRUPT2_SOURCE_REG, 0, 0 }, + /* HW_Init Hi */ + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI, + T1_POWER_DOWN_CONTROL_REG, 0x0300, 0 }, }; int rc, i; - /* Start manual initialization procedures in Managed Mode */ - rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI, - 0x1a, 0x0000, 0x0100); + /* phy Soft reset */ + rc = genphy_soft_reset(phydev); if (rc < 0) return rc; - /* Soft Reset the SMI block */ - rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI, - 0x00, 0x8000, 0x8000); - if (rc < 0) - return rc; - - /* Check to see if the self-clearing bit is cleared */ - usleep_range(1000, 2000); - rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ, - PHYACC_ATTR_BANK_SMI, 0x00, 0); - if (rc < 0) - return rc; - if ((rc & 0x8000) != 0) - return -ETIMEDOUT; - /* PHY Initialization */ for (i = 0; i < ARRAY_SIZE(init); i++) { - if (init[i].mode == PHYACC_ATTR_MODE_MODIFY) { - rc = access_ereg_modify_changed(phydev, init[i].bank, - init[i].offset, - init[i].val, - init[i].mask); + if (init[i].mode == PHYACC_ATTR_MODE_POLL && + init[i].bank == PHYACC_ATTR_BANK_SMI) { + rc = access_smi_poll_timeout(phydev, + init[i].offset, + init[i].val, + init[i].mask); } else { rc = access_ereg(phydev, init[i].mode, init[i].bank, init[i].offset, init[i].val); @@ -504,22 +674,86 @@ static int lan87xx_cable_test_get_status(struct phy_device *phydev, return 0; } +static int lan87xx_read_status(struct phy_device *phydev) +{ + int rc = 0; + + rc = phy_read(phydev, T1_MODE_STAT_REG); + if (rc < 0) + return rc; + + if (rc & T1_LINK_UP_MSK) + phydev->link = 1; + else + phydev->link = 0; + + phydev->speed = SPEED_UNKNOWN; + phydev->duplex = DUPLEX_UNKNOWN; + phydev->pause = 0; + phydev->asym_pause = 0; + + rc = genphy_read_master_slave(phydev); + if (rc < 0) + return rc; + + rc = genphy_read_status_fixed(phydev); + if (rc < 0) + return rc; + + return rc; +} + +static int lan87xx_config_aneg(struct phy_device *phydev) +{ + u16 ctl = 0; + int rc; + + switch (phydev->master_slave_set) { + case MASTER_SLAVE_CFG_MASTER_FORCE: + ctl |= CTL1000_AS_MASTER; + break; + case MASTER_SLAVE_CFG_SLAVE_FORCE: + break; + case MASTER_SLAVE_CFG_UNKNOWN: + case MASTER_SLAVE_CFG_UNSUPPORTED: + return 0; + default: + phydev_warn(phydev, "Unsupported Master/Slave mode\n"); + return -EOPNOTSUPP; + } + + rc = phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl); + if (rc == 1) + rc = genphy_soft_reset(phydev); + + return rc; +} + static struct phy_driver microchip_t1_phy_driver[] = { { - .phy_id = 0x0007c150, - .phy_id_mask = 0xfffffff0, + PHY_ID_MATCH_MODEL(PHY_ID_LAN87XX), .name = "Microchip LAN87xx T1", .flags = PHY_POLL_CABLE_TEST, - .features = PHY_BASIC_T1_FEATURES, - .config_init = lan87xx_config_init, - .config_intr = lan87xx_phy_config_intr, .handle_interrupt = lan87xx_handle_interrupt, - .suspend = genphy_suspend, .resume = genphy_resume, + .config_aneg = lan87xx_config_aneg, + .read_status = lan87xx_read_status, + .cable_test_start = lan87xx_cable_test_start, + .cable_test_get_status = lan87xx_cable_test_get_status, + }, + { + PHY_ID_MATCH_MODEL(PHY_ID_LAN937X), + .name = "Microchip LAN937x T1", + .features = PHY_BASIC_T1_FEATURES, + .config_init = lan87xx_config_init, + .suspend = genphy_suspend, + .resume = genphy_resume, + .config_aneg = lan87xx_config_aneg, + .read_status = lan87xx_read_status, .cable_test_start = lan87xx_cable_test_start, .cable_test_get_status = lan87xx_cable_test_get_status, } @@ -528,7 +762,8 @@ static struct phy_driver microchip_t1_phy_driver[] = { module_phy_driver(microchip_t1_phy_driver); static struct mdio_device_id __maybe_unused microchip_t1_tbl[] = { - { 0x0007c150, 0xfffffff0 }, + { PHY_ID_MATCH_MODEL(PHY_ID_LAN87XX) }, + { PHY_ID_MATCH_MODEL(PHY_ID_LAN937X) }, { } }; diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c index 34f829845d06..cf728bfd83e2 100644 --- a/drivers/net/phy/mscc/mscc_ptp.c +++ b/drivers/net/phy/mscc/mscc_ptp.c @@ -1212,7 +1212,7 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts, ts.tv_sec--; shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns); - netif_rx_ni(skb); + netif_rx(skb); return true; } diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c index 06fdbae509a7..047c581457e3 100644 --- a/drivers/net/phy/nxp-c45-tja11xx.c +++ b/drivers/net/phy/nxp-c45-tja11xx.c @@ -478,7 +478,7 @@ static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp) shhwtstamps_rx = skb_hwtstamps(skb); shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts)); NXP_C45_SKB_CB(skb)->header->reserved2 = 0; - netif_rx_ni(skb); + netif_rx(skb); } if (priv->extts) { diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index ce0bb5951b81..8406ac739def 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -2051,17 +2051,11 @@ static int genphy_setup_master_slave(struct phy_device *phydev) CTL1000_PREFER_MASTER), ctl); } -static int genphy_read_master_slave(struct phy_device *phydev) +int genphy_read_master_slave(struct phy_device *phydev) { int cfg, state; int val; - if (!phydev->is_gigabit_capable) { - phydev->master_slave_get = MASTER_SLAVE_CFG_UNSUPPORTED; - phydev->master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED; - return 0; - } - phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN; phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN; @@ -2102,6 +2096,7 @@ static int genphy_read_master_slave(struct phy_device *phydev) return 0; } +EXPORT_SYMBOL(genphy_read_master_slave); /** * genphy_restart_aneg - Enable and Restart Autonegotiation @@ -2396,14 +2391,18 @@ int genphy_read_status(struct phy_device *phydev) if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link) return 0; + phydev->master_slave_get = MASTER_SLAVE_CFG_UNSUPPORTED; + phydev->master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED; phydev->speed = SPEED_UNKNOWN; phydev->duplex = DUPLEX_UNKNOWN; phydev->pause = 0; phydev->asym_pause = 0; - err = genphy_read_master_slave(phydev); - if (err < 0) - return err; + if (phydev->is_gigabit_capable) { + err = genphy_read_master_slave(phydev); + if (err < 0) + return err; + } err = genphy_read_lpa(phydev); if (err < 0) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 5b53a3e23c89..06943889d747 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -74,6 +74,7 @@ struct phylink { struct work_struct resolve; bool mac_link_dropped; + bool using_mac_select_pcs; struct sfp_bus *sfp_bus; bool sfp_may_have_phy; @@ -416,7 +417,7 @@ static int phylink_validate_mac_and_pcs(struct phylink *pl, int ret; /* Get the PCS for this interface mode */ - if (pl->mac_ops->mac_select_pcs) { + if (pl->using_mac_select_pcs) { pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface); if (IS_ERR(pcs)) return PTR_ERR(pcs); @@ -791,7 +792,7 @@ static void phylink_major_config(struct phylink *pl, bool restart, phylink_dbg(pl, "major config %s\n", phy_modes(state->interface)); - if (pl->mac_ops->mac_select_pcs) { + if (pl->using_mac_select_pcs) { pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface); if (IS_ERR(pcs)) { phylink_err(pl, @@ -814,8 +815,18 @@ static void phylink_major_config(struct phylink *pl, bool restart, /* If we have a new PCS, switch to the new PCS after preparing the MAC * for the change. */ - if (pcs) - phylink_set_pcs(pl, pcs); + if (pcs) { + pl->pcs = pcs; + pl->pcs_ops = pcs->ops; + + if (!pl->phylink_disable_state && + pl->cfg_link_an_mode == MLO_AN_INBAND) { + if (pcs->poll) + mod_timer(&pl->link_poll, jiffies + HZ); + else + del_timer(&pl->link_poll); + } + } phylink_mac_config(pl, state); @@ -1171,9 +1182,8 @@ static int phylink_register_sfp(struct phylink *pl, bus = sfp_bus_find_fwnode(fwnode); if (IS_ERR(bus)) { - ret = PTR_ERR(bus); - phylink_err(pl, "unable to attach SFP bus: %d\n", ret); - return ret; + phylink_err(pl, "unable to attach SFP bus: %pe\n", bus); + return PTR_ERR(bus); } pl->sfp_bus = bus; @@ -1205,11 +1215,17 @@ struct phylink *phylink_create(struct phylink_config *config, phy_interface_t iface, const struct phylink_mac_ops *mac_ops) { + bool using_mac_select_pcs = false; struct phylink *pl; int ret; - /* Validate the supplied configuration */ if (mac_ops->mac_select_pcs && + mac_ops->mac_select_pcs(config, PHY_INTERFACE_MODE_NA) != + ERR_PTR(-EOPNOTSUPP)) + using_mac_select_pcs = true; + + /* Validate the supplied configuration */ + if (using_mac_select_pcs && phy_interface_empty(config->supported_interfaces)) { dev_err(config->dev, "phylink: error: empty supported_interfaces but mac_select_pcs() method present\n"); @@ -1233,6 +1249,7 @@ struct phylink *phylink_create(struct phylink_config *config, return ERR_PTR(-EINVAL); } + pl->using_mac_select_pcs = using_mac_select_pcs; pl->phy_state.interface = iface; pl->link_interface = iface; if (iface == PHY_INTERFACE_MODE_MOCA) @@ -1279,36 +1296,6 @@ struct phylink *phylink_create(struct phylink_config *config, EXPORT_SYMBOL_GPL(phylink_create); /** - * phylink_set_pcs() - set the current PCS for phylink to use - * @pl: a pointer to a &struct phylink returned from phylink_create() - * @pcs: a pointer to the &struct phylink_pcs - * - * Bind the MAC PCS to phylink. This may be called after phylink_create(). - * If it is desired to dynamically change the PCS, then the preferred method - * is to use mac_select_pcs(), but it may also be called in mac_prepare() - * or mac_config(). - * - * Please note that there are behavioural changes with the mac_config() - * callback if a PCS is present (denoting a newer setup) so removing a PCS - * is not supported, and if a PCS is going to be used, it must be registered - * by calling phylink_set_pcs() at the latest in the first mac_config() call. - */ -void phylink_set_pcs(struct phylink *pl, struct phylink_pcs *pcs) -{ - pl->pcs = pcs; - pl->pcs_ops = pcs->ops; - - if (!pl->phylink_disable_state && - pl->cfg_link_an_mode == MLO_AN_INBAND) { - if (pl->config->pcs_poll || pcs->poll) - mod_timer(&pl->link_poll, jiffies + HZ); - else - del_timer(&pl->link_poll); - } -} -EXPORT_SYMBOL_GPL(phylink_set_pcs); - -/** * phylink_destroy() - cleanup and destroy the phylink instance * @pl: a pointer to a &struct phylink returned from phylink_create() * @@ -1392,11 +1379,11 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy, ret = phylink_validate(pl, supported, &config); if (ret) { - phylink_warn(pl, "validation of %s with support %*pb and advertisement %*pb failed: %d\n", + phylink_warn(pl, "validation of %s with support %*pb and advertisement %*pb failed: %pe\n", phy_modes(config.interface), __ETHTOOL_LINK_MODE_MASK_NBITS, phy->supported, __ETHTOOL_LINK_MODE_MASK_NBITS, config.advertising, - ret); + ERR_PTR(ret)); return ret; } @@ -1673,7 +1660,6 @@ void phylink_start(struct phylink *pl) poll |= pl->config->poll_fixed_state; break; case MLO_AN_INBAND: - poll |= pl->config->pcs_poll; if (pl->pcs) poll |= pl->pcs->poll; break; @@ -2596,8 +2582,9 @@ static int phylink_sfp_config(struct phylink *pl, u8 mode, /* Ignore errors if we're expecting a PHY to attach later */ ret = phylink_validate(pl, support, &config); if (ret) { - phylink_err(pl, "validation with support %*pb failed: %d\n", - __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret); + phylink_err(pl, "validation with support %*pb failed: %pe\n", + __ETHTOOL_LINK_MODE_MASK_NBITS, support, + ERR_PTR(ret)); return ret; } @@ -2613,10 +2600,12 @@ static int phylink_sfp_config(struct phylink *pl, u8 mode, linkmode_copy(support1, support); ret = phylink_validate(pl, support1, &config); if (ret) { - phylink_err(pl, "validation of %s/%s with support %*pb failed: %d\n", + phylink_err(pl, + "validation of %s/%s with support %*pb failed: %pe\n", phylink_an_mode_str(mode), phy_modes(config.interface), - __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret); + __ETHTOOL_LINK_MODE_MASK_NBITS, support, + ERR_PTR(ret)); return ret; } diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 4720b24ca51b..4dfb79807823 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -471,8 +471,8 @@ static unsigned int sfp_soft_get_state(struct sfp *sfp) state |= SFP_F_TX_FAULT; } else { dev_err_ratelimited(sfp->dev, - "failed to read SFP soft status: %d\n", - ret); + "failed to read SFP soft status: %pe\n", + ERR_PTR(ret)); /* Preserve the current state */ state = sfp->state; } @@ -1311,7 +1311,8 @@ static void sfp_hwmon_probe(struct work_struct *work) mod_delayed_work(system_wq, &sfp->hwmon_probe, T_PROBE_RETRY_SLOW); } else { - dev_warn(sfp->dev, "hwmon probe failed: %d\n", err); + dev_warn(sfp->dev, "hwmon probe failed: %pe\n", + ERR_PTR(err)); } return; } @@ -1516,14 +1517,15 @@ static int sfp_sm_probe_phy(struct sfp *sfp, bool is_c45) if (phy == ERR_PTR(-ENODEV)) return PTR_ERR(phy); if (IS_ERR(phy)) { - dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy)); + dev_err(sfp->dev, "mdiobus scan returned %pe\n", phy); return PTR_ERR(phy); } err = phy_device_register(phy); if (err) { phy_device_free(phy); - dev_err(sfp->dev, "phy_device_register failed: %d\n", err); + dev_err(sfp->dev, "phy_device_register failed: %pe\n", + ERR_PTR(err)); return err; } @@ -1531,7 +1533,7 @@ static int sfp_sm_probe_phy(struct sfp *sfp, bool is_c45) if (err) { phy_device_remove(phy); phy_device_free(phy); - dev_err(sfp->dev, "sfp_add_phy failed: %d\n", err); + dev_err(sfp->dev, "sfp_add_phy failed: %pe\n", ERR_PTR(err)); return err; } @@ -1708,7 +1710,7 @@ static int sfp_sm_mod_hpower(struct sfp *sfp, bool enable) err = sfp_read(sfp, true, SFP_EXT_STATUS, &val, sizeof(val)); if (err != sizeof(val)) { - dev_err(sfp->dev, "Failed to read EEPROM: %d\n", err); + dev_err(sfp->dev, "Failed to read EEPROM: %pe\n", ERR_PTR(err)); return -EAGAIN; } @@ -1726,7 +1728,8 @@ static int sfp_sm_mod_hpower(struct sfp *sfp, bool enable) err = sfp_write(sfp, true, SFP_EXT_STATUS, &val, sizeof(val)); if (err != sizeof(val)) { - dev_err(sfp->dev, "Failed to write EEPROM: %d\n", err); + dev_err(sfp->dev, "Failed to write EEPROM: %pe\n", + ERR_PTR(err)); return -EAGAIN; } @@ -1778,7 +1781,9 @@ static int sfp_cotsworks_fixup_check(struct sfp *sfp, struct sfp_eeprom_id *id) id->base.connector = SFF8024_CONNECTOR_LC; err = sfp_write(sfp, false, SFP_PHYS_ID, &id->base, 3); if (err != 3) { - dev_err(sfp->dev, "Failed to rewrite module EEPROM: %d\n", err); + dev_err(sfp->dev, + "Failed to rewrite module EEPROM: %pe\n", + ERR_PTR(err)); return err; } @@ -1789,7 +1794,9 @@ static int sfp_cotsworks_fixup_check(struct sfp *sfp, struct sfp_eeprom_id *id) check = sfp_check(&id->base, sizeof(id->base) - 1); err = sfp_write(sfp, false, SFP_CC_BASE, &check, 1); if (err != 1) { - dev_err(sfp->dev, "Failed to update base structure checksum in fiber module EEPROM: %d\n", err); + dev_err(sfp->dev, + "Failed to update base structure checksum in fiber module EEPROM: %pe\n", + ERR_PTR(err)); return err; } } @@ -1814,12 +1821,13 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report) ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base)); if (ret < 0) { if (report) - dev_err(sfp->dev, "failed to read EEPROM: %d\n", ret); + dev_err(sfp->dev, "failed to read EEPROM: %pe\n", + ERR_PTR(ret)); return -EAGAIN; } if (ret != sizeof(id.base)) { - dev_err(sfp->dev, "EEPROM short read: %d\n", ret); + dev_err(sfp->dev, "EEPROM short read: %pe\n", ERR_PTR(ret)); return -EAGAIN; } @@ -1839,13 +1847,15 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report) ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base)); if (ret < 0) { if (report) - dev_err(sfp->dev, "failed to read EEPROM: %d\n", - ret); + dev_err(sfp->dev, + "failed to read EEPROM: %pe\n", + ERR_PTR(ret)); return -EAGAIN; } if (ret != sizeof(id.base)) { - dev_err(sfp->dev, "EEPROM short read: %d\n", ret); + dev_err(sfp->dev, "EEPROM short read: %pe\n", + ERR_PTR(ret)); return -EAGAIN; } } @@ -1887,12 +1897,13 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report) ret = sfp_read(sfp, false, SFP_CC_BASE + 1, &id.ext, sizeof(id.ext)); if (ret < 0) { if (report) - dev_err(sfp->dev, "failed to read EEPROM: %d\n", ret); + dev_err(sfp->dev, "failed to read EEPROM: %pe\n", + ERR_PTR(ret)); return -EAGAIN; } if (ret != sizeof(id.ext)) { - dev_err(sfp->dev, "EEPROM short read: %d\n", ret); + dev_err(sfp->dev, "EEPROM short read: %pe\n", ERR_PTR(ret)); return -EAGAIN; } @@ -2046,7 +2057,8 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event) err = sfp_hwmon_insert(sfp); if (err) - dev_warn(sfp->dev, "hwmon probe failed: %d\n", err); + dev_warn(sfp->dev, "hwmon probe failed: %pe\n", + ERR_PTR(err)); sfp_sm_mod_next(sfp, SFP_MOD_WAITDEV, 0); fallthrough; diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c index 8b5445a724ce..ff37f8ba6758 100644 --- a/drivers/net/phy/spi_ks8995.c +++ b/drivers/net/phy/spi_ks8995.c @@ -517,7 +517,7 @@ static int ks8995_probe(struct spi_device *spi) return 0; } -static int ks8995_remove(struct spi_device *spi) +static void ks8995_remove(struct spi_device *spi) { struct ks8995_switch *ks = spi_get_drvdata(spi); @@ -526,8 +526,6 @@ static int ks8995_remove(struct spi_device *spi) /* assert reset */ if (ks->pdata && gpio_is_valid(ks->pdata->reset_gpio)) gpiod_set_value(gpio_to_desc(ks->pdata->reset_gpio), 1); - - return 0; } /* ------------------------------------------------------------------------ */ diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c index 0d491b4d6667..dafd3e9ebbf8 100644 --- a/drivers/net/plip/plip.c +++ b/drivers/net/plip/plip.c @@ -676,7 +676,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl, case PLIP_PK_DONE: /* Inform the upper layer for the arrival of a packet. */ rcv->skb->protocol=plip_type_trans(rcv->skb, dev); - netif_rx_ni(rcv->skb); + netif_rx(rcv->skb); dev->stats.rx_bytes += rcv->length.h; dev->stats.rx_packets++; rcv->skb = NULL; diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index 1a95f3beb784..39e61e07e489 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c @@ -109,7 +109,7 @@ static int rionet_rx_clean(struct net_device *ndev) skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE); rnet->rx_skb[i]->protocol = eth_type_trans(rnet->rx_skb[i], ndev); - error = netif_rx(rnet->rx_skb[i]); + error = __netif_rx(rnet->rx_skb[i]); if (error == NET_RX_DROP) { ndev->stats.rx_dropped++; diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c index 57a6d598467b..c3f8020571ad 100644 --- a/drivers/net/sb1000.c +++ b/drivers/net/sb1000.c @@ -872,7 +872,7 @@ printk("cm0: IP identification: %02x%02x fragment offset: %02x%02x\n", buffer[3 /* datagram completed: send to upper level */ skb_trim(skb, dlen); - netif_rx(skb); + __netif_rx(skb); stats->rx_bytes+=dlen; stats->rx_packets++; lp->rx_skb[ns] = NULL; diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index 98f586f910fb..88396ff99f03 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -368,7 +368,7 @@ static void sl_bump(struct slip *sl) skb_put_data(skb, sl->rbuff, count); skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IP); - netif_rx_ni(skb); + netif_rx(skb); dev->stats.rx_packets++; } diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 8e3a28ba6b28..c3d42062559d 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -322,6 +322,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) struct tap_dev *tap; struct tap_queue *q; netdev_features_t features = TAP_FEATURES; + enum skb_drop_reason drop_reason; tap = tap_dev_get_rcu(dev); if (!tap) @@ -343,12 +344,16 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) struct sk_buff *segs = __skb_gso_segment(skb, features, false); struct sk_buff *next; - if (IS_ERR(segs)) + if (IS_ERR(segs)) { + drop_reason = SKB_DROP_REASON_SKB_GSO_SEG; goto drop; + } if (!segs) { - if (ptr_ring_produce(&q->ring, skb)) + if (ptr_ring_produce(&q->ring, skb)) { + drop_reason = SKB_DROP_REASON_FULL_RING; goto drop; + } goto wake_up; } @@ -356,8 +361,9 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) skb_list_walk_safe(segs, skb, next) { skb_mark_not_on_list(skb); if (ptr_ring_produce(&q->ring, skb)) { - kfree_skb(skb); - kfree_skb_list(next); + drop_reason = SKB_DROP_REASON_FULL_RING; + kfree_skb_reason(skb, drop_reason); + kfree_skb_list_reason(next, drop_reason); break; } } @@ -369,10 +375,14 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) */ if (skb->ip_summed == CHECKSUM_PARTIAL && !(features & NETIF_F_CSUM_MASK) && - skb_checksum_help(skb)) + skb_checksum_help(skb)) { + drop_reason = SKB_DROP_REASON_SKB_CSUM; goto drop; - if (ptr_ring_produce(&q->ring, skb)) + } + if (ptr_ring_produce(&q->ring, skb)) { + drop_reason = SKB_DROP_REASON_FULL_RING; goto drop; + } } wake_up: @@ -383,7 +393,7 @@ drop: /* Count errors/drops only here, thus don't care about args. */ if (tap->count_rx_dropped) tap->count_rx_dropped(tap); - kfree_skb(skb); + kfree_skb_reason(skb, drop_reason); return RX_HANDLER_CONSUMED; } EXPORT_SYMBOL_GPL(tap_handle_frame); @@ -632,6 +642,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, int depth; bool zerocopy = false; size_t linear; + enum skb_drop_reason drop_reason; if (q->flags & IFF_VNET_HDR) { vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); @@ -696,8 +707,10 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, else err = skb_copy_datagram_from_iter(skb, 0, from, len); - if (err) + if (err) { + drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT; goto err_kfree; + } skb_set_network_header(skb, ETH_HLEN); skb_reset_mac_header(skb); @@ -706,8 +719,10 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, if (vnet_hdr_len) { err = virtio_net_hdr_to_skb(skb, &vnet_hdr, tap_is_little_endian(q)); - if (err) + if (err) { + drop_reason = SKB_DROP_REASON_DEV_HDR; goto err_kfree; + } } skb_probe_transport_header(skb); @@ -738,7 +753,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, return total_len; err_kfree: - kfree_skb(skb); + kfree_skb_reason(skb, drop_reason); err: rcu_read_lock(); @@ -1198,7 +1213,8 @@ static int tap_sendmsg(struct socket *sock, struct msghdr *m, struct xdp_buff *xdp; int i; - if (ctl && (ctl->type == TUN_MSG_PTR)) { + if (m->msg_controllen == sizeof(struct tun_msg_ctl) && + ctl && ctl->type == TUN_MSG_PTR) { for (i = 0; i < ctl->num; i++) { xdp = &((struct xdp_buff *)ctl->ptr)[i]; tap_get_user_xdp(q, xdp); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 8b2adc56b92a..b07dde6f0abf 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -734,6 +734,11 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb) port = team_port_get_rcu(skb->dev); team = port->team; if (!team_port_enabled(port)) { + if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) + /* link-local packets are mostly useful when stack receives them + * with the link they arrive on. + */ + return RX_HANDLER_PASS; /* allow exact match delivery for disabled ports */ res = RX_HANDLER_EXACT; } else { diff --git a/drivers/net/tun.c b/drivers/net/tun.c index fed85447701a..2b9a22669a12 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1058,6 +1058,7 @@ static unsigned int run_ebpf_filter(struct tun_struct *tun, static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); + enum skb_drop_reason drop_reason; int txq = skb->queue_mapping; struct netdev_queue *queue; struct tun_file *tfile; @@ -1067,8 +1068,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) tfile = rcu_dereference(tun->tfiles[txq]); /* Drop packet if interface is not attached */ - if (!tfile) + if (!tfile) { + drop_reason = SKB_DROP_REASON_DEV_READY; goto drop; + } if (!rcu_dereference(tun->steering_prog)) tun_automq_xmit(tun, skb); @@ -1078,19 +1081,32 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) /* Drop if the filter does not like it. * This is a noop if the filter is disabled. * Filter can be enabled only for the TAP devices. */ - if (!check_filter(&tun->txflt, skb)) + if (!check_filter(&tun->txflt, skb)) { + drop_reason = SKB_DROP_REASON_TAP_TXFILTER; goto drop; + } if (tfile->socket.sk->sk_filter && - sk_filter(tfile->socket.sk, skb)) + sk_filter(tfile->socket.sk, skb)) { + drop_reason = SKB_DROP_REASON_SOCKET_FILTER; goto drop; + } len = run_ebpf_filter(tun, skb, len); - if (len == 0 || pskb_trim(skb, len)) + if (len == 0) { + drop_reason = SKB_DROP_REASON_TAP_FILTER; goto drop; + } + + if (pskb_trim(skb, len)) { + drop_reason = SKB_DROP_REASON_NOMEM; + goto drop; + } - if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) + if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) { + drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT; goto drop; + } skb_tx_timestamp(skb); @@ -1101,8 +1117,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) nf_reset_ct(skb); - if (ptr_ring_produce(&tfile->tx_ring, skb)) + if (ptr_ring_produce(&tfile->tx_ring, skb)) { + drop_reason = SKB_DROP_REASON_FULL_RING; goto drop; + } /* NETIF_F_LLTX requires to do our own update of trans_start */ queue = netdev_get_tx_queue(dev, txq); @@ -1119,7 +1137,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) drop: atomic_long_inc(&dev->tx_dropped); skb_tx_error(skb); - kfree_skb(skb); + kfree_skb_reason(skb, drop_reason); rcu_read_unlock(); return NET_XMIT_DROP; } @@ -1717,6 +1735,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, u32 rxhash = 0; int skb_xdp = 1; bool frags = tun_napi_frags_enabled(tfile); + enum skb_drop_reason drop_reason; if (!(tun->flags & IFF_NO_PI)) { if (len < sizeof(pi)) @@ -1820,9 +1839,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, if (err) { err = -EFAULT; + drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT; drop: atomic_long_inc(&tun->dev->rx_dropped); - kfree_skb(skb); + kfree_skb_reason(skb, drop_reason); if (frags) { tfile->napi.skb = NULL; mutex_unlock(&tfile->napi_mutex); @@ -1869,6 +1889,7 @@ drop: case IFF_TAP: if (frags && !pskb_may_pull(skb, ETH_HLEN)) { err = -ENOMEM; + drop_reason = SKB_DROP_REASON_HDR_TRUNC; goto drop; } skb->protocol = eth_type_trans(skb, tun->dev); @@ -1922,6 +1943,7 @@ drop: if (unlikely(!(tun->dev->flags & IFF_UP))) { err = -EIO; rcu_read_unlock(); + drop_reason = SKB_DROP_REASON_DEV_READY; goto drop; } @@ -1962,7 +1984,7 @@ drop: } else if (!IS_ENABLED(CONFIG_4KSTACKS)) { tun_rx_batched(tun, tfile, skb, more); } else { - netif_rx_ni(skb); + netif_rx(skb); } rcu_read_unlock(); @@ -2388,9 +2410,10 @@ static int tun_xdp_one(struct tun_struct *tun, struct virtio_net_hdr *gso = &hdr->gso; struct bpf_prog *xdp_prog; struct sk_buff *skb = NULL; + struct sk_buff_head *queue; u32 rxhash = 0, act; int buflen = hdr->buflen; - int err = 0; + int ret = 0; bool skb_xdp = false; struct page *page; @@ -2405,13 +2428,13 @@ static int tun_xdp_one(struct tun_struct *tun, xdp_set_data_meta_invalid(xdp); act = bpf_prog_run_xdp(xdp_prog, xdp); - err = tun_xdp_act(tun, xdp_prog, xdp, act); - if (err < 0) { + ret = tun_xdp_act(tun, xdp_prog, xdp, act); + if (ret < 0) { put_page(virt_to_head_page(xdp->data)); - return err; + return ret; } - switch (err) { + switch (ret) { case XDP_REDIRECT: *flush = true; fallthrough; @@ -2435,7 +2458,7 @@ static int tun_xdp_one(struct tun_struct *tun, build: skb = build_skb(xdp->data_hard_start, buflen); if (!skb) { - err = -ENOMEM; + ret = -ENOMEM; goto out; } @@ -2445,7 +2468,7 @@ build: if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) { atomic_long_inc(&tun->rx_frame_errors); kfree_skb(skb); - err = -EINVAL; + ret = -EINVAL; goto out; } @@ -2455,16 +2478,27 @@ build: skb_record_rx_queue(skb, tfile->queue_index); if (skb_xdp) { - err = do_xdp_generic(xdp_prog, skb); - if (err != XDP_PASS) + ret = do_xdp_generic(xdp_prog, skb); + if (ret != XDP_PASS) { + ret = 0; goto out; + } } if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 && !tfile->detached) rxhash = __skb_get_hash_symmetric(skb); - netif_receive_skb(skb); + if (tfile->napi_enabled) { + queue = &tfile->sk.sk_write_queue; + spin_lock(&queue->lock); + __skb_queue_tail(queue, skb); + spin_unlock(&queue->lock); + ret = 1; + } else { + netif_receive_skb(skb); + ret = 0; + } /* No need to disable preemption here since this function is * always called with bh disabled @@ -2475,7 +2509,7 @@ build: tun_flow_update(tun, rxhash, tfile); out: - return err; + return ret; } static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) @@ -2489,10 +2523,11 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) if (!tun) return -EBADFD; - if (ctl && (ctl->type == TUN_MSG_PTR)) { + if (m->msg_controllen == sizeof(struct tun_msg_ctl) && + ctl && ctl->type == TUN_MSG_PTR) { struct tun_page tpage; int n = ctl->num; - int flush = 0; + int flush = 0, queued = 0; memset(&tpage, 0, sizeof(tpage)); @@ -2501,12 +2536,17 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) for (i = 0; i < n; i++) { xdp = &((struct xdp_buff *)ctl->ptr)[i]; - tun_xdp_one(tun, tfile, xdp, &flush, &tpage); + ret = tun_xdp_one(tun, tfile, xdp, &flush, &tpage); + if (ret > 0) + queued += ret; } if (flush) xdp_do_flush(); + if (tfile->napi_enabled && queued > 0) + napi_schedule(&tfile->napi); + rcu_read_unlock(); local_bh_enable(); diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 524805285019..632fa6c1d5e3 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -491,7 +491,8 @@ void asix_set_multicast(struct net_device *net) asix_write_cmd_async(dev, AX_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL); } -int asix_mdio_read(struct net_device *netdev, int phy_id, int loc) +static int __asix_mdio_read(struct net_device *netdev, int phy_id, int loc, + bool in_pm) { struct usbnet *dev = netdev_priv(netdev); __le16 res; @@ -499,18 +500,18 @@ int asix_mdio_read(struct net_device *netdev, int phy_id, int loc) mutex_lock(&dev->phy_mutex); - ret = asix_check_host_enable(dev, 0); + ret = asix_check_host_enable(dev, in_pm); if (ret == -ENODEV || ret == -ETIMEDOUT) { mutex_unlock(&dev->phy_mutex); return ret; } ret = asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id, (__u16)loc, 2, - &res, 0); + &res, in_pm); if (ret < 0) goto out; - ret = asix_set_hw_mii(dev, 0); + ret = asix_set_hw_mii(dev, in_pm); out: mutex_unlock(&dev->phy_mutex); @@ -520,8 +521,13 @@ out: return ret < 0 ? ret : le16_to_cpu(res); } +int asix_mdio_read(struct net_device *netdev, int phy_id, int loc) +{ + return __asix_mdio_read(netdev, phy_id, loc, false); +} + static int __asix_mdio_write(struct net_device *netdev, int phy_id, int loc, - int val) + int val, bool in_pm) { struct usbnet *dev = netdev_priv(netdev); __le16 res = cpu_to_le16(val); @@ -532,16 +538,16 @@ static int __asix_mdio_write(struct net_device *netdev, int phy_id, int loc, mutex_lock(&dev->phy_mutex); - ret = asix_check_host_enable(dev, 0); + ret = asix_check_host_enable(dev, in_pm); if (ret == -ENODEV) goto out; ret = asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2, - &res, 0); + &res, in_pm); if (ret < 0) goto out; - ret = asix_set_hw_mii(dev, 0); + ret = asix_set_hw_mii(dev, in_pm); out: mutex_unlock(&dev->phy_mutex); @@ -550,7 +556,7 @@ out: void asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val) { - __asix_mdio_write(netdev, phy_id, loc, val); + __asix_mdio_write(netdev, phy_id, loc, val, false); } /* MDIO read and write wrappers for phylib */ @@ -558,67 +564,25 @@ int asix_mdio_bus_read(struct mii_bus *bus, int phy_id, int regnum) { struct usbnet *priv = bus->priv; - return asix_mdio_read(priv->net, phy_id, regnum); + return __asix_mdio_read(priv->net, phy_id, regnum, false); } int asix_mdio_bus_write(struct mii_bus *bus, int phy_id, int regnum, u16 val) { struct usbnet *priv = bus->priv; - return __asix_mdio_write(priv->net, phy_id, regnum, val); + return __asix_mdio_write(priv->net, phy_id, regnum, val, false); } int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc) { - struct usbnet *dev = netdev_priv(netdev); - __le16 res; - int ret; - - mutex_lock(&dev->phy_mutex); - - ret = asix_check_host_enable(dev, 1); - if (ret == -ENODEV || ret == -ETIMEDOUT) { - mutex_unlock(&dev->phy_mutex); - return ret; - } - - ret = asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id, - (__u16)loc, 2, &res, 1); - if (ret < 0) { - mutex_unlock(&dev->phy_mutex); - return ret; - } - asix_set_hw_mii(dev, 1); - mutex_unlock(&dev->phy_mutex); - - netdev_dbg(dev->net, "asix_mdio_read_nopm() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n", - phy_id, loc, le16_to_cpu(res)); - - return le16_to_cpu(res); + return __asix_mdio_read(netdev, phy_id, loc, true); } void asix_mdio_write_nopm(struct net_device *netdev, int phy_id, int loc, int val) { - struct usbnet *dev = netdev_priv(netdev); - __le16 res = cpu_to_le16(val); - int ret; - - netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n", - phy_id, loc, val); - - mutex_lock(&dev->phy_mutex); - - ret = asix_check_host_enable(dev, 1); - if (ret == -ENODEV) { - mutex_unlock(&dev->phy_mutex); - return; - } - - asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id, - (__u16)loc, 2, &res, 1); - asix_set_hw_mii(dev, 1); - mutex_unlock(&dev->phy_mutex); + __asix_mdio_write(netdev, phy_id, loc, val, true); } void asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo) diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 1a627ba4b850..a31098981a65 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1468,58 +1468,68 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) u16 hdr_off; u32 *pkt_hdr; - /* This check is no longer done by usbnet */ - if (skb->len < dev->net->hard_header_len) + /* At the end of the SKB, there's a header telling us how many packets + * are bundled into this buffer and where we can find an array of + * per-packet metadata (which contains elements encoded into u16). + */ + if (skb->len < 4) return 0; - skb_trim(skb, skb->len - 4); rx_hdr = get_unaligned_le32(skb_tail_pointer(skb)); - pkt_cnt = (u16)rx_hdr; hdr_off = (u16)(rx_hdr >> 16); + + if (pkt_cnt == 0) + return 0; + + /* Make sure that the bounds of the metadata array are inside the SKB + * (and in front of the counter at the end). + */ + if (pkt_cnt * 2 + hdr_off > skb->len) + return 0; pkt_hdr = (u32 *)(skb->data + hdr_off); - while (pkt_cnt--) { + /* Packets must not overlap the metadata array */ + skb_trim(skb, hdr_off); + + for (; ; pkt_cnt--, pkt_hdr++) { u16 pkt_len; le32_to_cpus(pkt_hdr); pkt_len = (*pkt_hdr >> 16) & 0x1fff; - /* Check CRC or runt packet */ - if ((*pkt_hdr & AX_RXHDR_CRC_ERR) || - (*pkt_hdr & AX_RXHDR_DROP_ERR)) { - skb_pull(skb, (pkt_len + 7) & 0xFFF8); - pkt_hdr++; - continue; - } - - if (pkt_cnt == 0) { - skb->len = pkt_len; - /* Skip IP alignment pseudo header */ - skb_pull(skb, 2); - skb_set_tail_pointer(skb, skb->len); - skb->truesize = pkt_len + sizeof(struct sk_buff); - ax88179_rx_checksum(skb, pkt_hdr); - return 1; - } + if (pkt_len > skb->len) + return 0; - ax_skb = skb_clone(skb, GFP_ATOMIC); - if (ax_skb) { + /* Check CRC or runt packet */ + if (((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) == 0) && + pkt_len >= 2 + ETH_HLEN) { + bool last = (pkt_cnt == 0); + + if (last) { + ax_skb = skb; + } else { + ax_skb = skb_clone(skb, GFP_ATOMIC); + if (!ax_skb) + return 0; + } ax_skb->len = pkt_len; /* Skip IP alignment pseudo header */ skb_pull(ax_skb, 2); skb_set_tail_pointer(ax_skb, ax_skb->len); ax_skb->truesize = pkt_len + sizeof(struct sk_buff); ax88179_rx_checksum(ax_skb, pkt_hdr); + + if (last) + return 1; + usbnet_skb_return(dev, ax_skb); - } else { - return 0; } - skb_pull(skb, (pkt_len + 7) & 0xFFF8); - pkt_hdr++; + /* Trim this packet away from the SKB */ + if (!skb_pull(skb, (pkt_len + 7) & 0xFFF8)) + return 0; } - return 1; } static struct sk_buff * diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index eb3817d70f2b..9b4dfa3001d6 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -583,6 +583,11 @@ static const struct usb_device_id products[] = { .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ .bInterfaceProtocol = USB_CDC_PROTO_NONE +#define ZAURUS_FAKE_INTERFACE \ + .bInterfaceClass = USB_CLASS_COMM, \ + .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \ + .bInterfaceProtocol = USB_CDC_PROTO_NONE + /* SA-1100 based Sharp Zaurus ("collie"), or compatible; * wire-incompatible with true CDC Ethernet implementations. * (And, it seems, needlessly so...) @@ -640,6 +645,13 @@ static const struct usb_device_id products[] = { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, + .idProduct = 0x9032, /* SL-6000 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, /* reported with some C860 units */ .idProduct = 0x9050, /* C-860 */ ZAURUS_MASTER_INTERFACE, diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index a7c1434fe2da..c89639381eca 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -660,6 +660,11 @@ static const struct usb_device_id mbim_devs[] = { .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle, }, + /* Telit FN990 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1071, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle, + }, + /* default entry */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&cdc_mbim_info_zlp, diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index e303b522efb5..15f91d691bba 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1715,10 +1715,10 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in) { struct sk_buff *skb; struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; - int len; + unsigned int len; int nframes; int x; - int offset; + unsigned int offset; union { struct usb_cdc_ncm_ndp16 *ndp16; struct usb_cdc_ncm_ndp32 *ndp32; @@ -1790,8 +1790,8 @@ next_ndp: break; } - /* sanity checking */ - if (((offset + len) > skb_in->len) || + /* sanity checking - watch out for integer wrap*/ + if ((offset > skb_in->len) || (len > skb_in->len - offset) || (len > ctx->rx_max) || (len < ETH_HLEN)) { netif_dbg(dev, rx_err, dev->net, "invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n", diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c index 13a9a83b8538..46af78caf457 100644 --- a/drivers/net/usb/gl620a.c +++ b/drivers/net/usb/gl620a.c @@ -56,7 +56,7 @@ struct gl_packet { __le32 packet_length; - char packet_data [1]; + char packet_data[]; }; struct gl_header { diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index f97813a4e8d1..f8221a7acf62 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -2319,7 +2319,7 @@ static struct hso_device *hso_create_device(struct usb_interface *intf, { struct hso_device *hso_dev; - hso_dev = kzalloc(sizeof(*hso_dev), GFP_ATOMIC); + hso_dev = kzalloc(sizeof(*hso_dev), GFP_KERNEL); if (!hso_dev) return NULL; diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 5567220e9d16..4ef61f6b85df 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -86,9 +86,10 @@ static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index, ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); - if (unlikely(ret < 0)) { - netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n", - index, ret); + if (ret < 0) { + if (ret != -ENODEV) + netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n", + index, ret); return ret; } @@ -118,7 +119,7 @@ static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index, ret = fn(dev, USB_VENDOR_REQUEST_WRITE_REGISTER, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); - if (unlikely(ret < 0)) + if (ret < 0 && ret != -ENODEV) netdev_warn(dev->net, "Failed to write reg index 0x%08x: %d\n", index, ret); @@ -161,6 +162,9 @@ static int __must_check __smsc95xx_phy_wait_not_busy(struct usbnet *dev, do { ret = __smsc95xx_read_reg(dev, MII_ADDR, &val, in_pm); if (ret < 0) { + /* Ignore -ENODEV error during disconnect() */ + if (ret == -ENODEV) + return 0; netdev_warn(dev->net, "Error reading MII_ACCESS\n"); return ret; } @@ -196,7 +200,8 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx, addr = mii_address_cmd(phy_id, idx, MII_READ_ | MII_BUSY_); ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm); if (ret < 0) { - netdev_warn(dev->net, "Error writing MII_ADDR\n"); + if (ret != -ENODEV) + netdev_warn(dev->net, "Error writing MII_ADDR\n"); goto done; } @@ -208,7 +213,8 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx, ret = __smsc95xx_read_reg(dev, MII_DATA, &val, in_pm); if (ret < 0) { - netdev_warn(dev->net, "Error reading MII_DATA\n"); + if (ret != -ENODEV) + netdev_warn(dev->net, "Error reading MII_DATA\n"); goto done; } @@ -216,6 +222,10 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx, done: mutex_unlock(&dev->phy_mutex); + + /* Ignore -ENODEV error during disconnect() */ + if (ret == -ENODEV) + return 0; return ret; } @@ -237,7 +247,8 @@ static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id, val = regval; ret = __smsc95xx_write_reg(dev, MII_DATA, val, in_pm); if (ret < 0) { - netdev_warn(dev->net, "Error writing MII_DATA\n"); + if (ret != -ENODEV) + netdev_warn(dev->net, "Error writing MII_DATA\n"); goto done; } @@ -245,7 +256,8 @@ static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id, addr = mii_address_cmd(phy_id, idx, MII_WRITE_ | MII_BUSY_); ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm); if (ret < 0) { - netdev_warn(dev->net, "Error writing MII_ADDR\n"); + if (ret != -ENODEV) + netdev_warn(dev->net, "Error writing MII_ADDR\n"); goto done; } diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index b658510cc9a4..5a53e63d33a6 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -413,7 +413,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb) /* ignore the CRC length */ len = (skb->data[1] | (skb->data[2] << 8)) - 4; - if (len > ETH_FRAME_LEN) + if (len > ETH_FRAME_LEN || len > skb->len) return 0; /* the last packet of current skb */ diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c index 8e717a0b559b..7984f2157d22 100644 --- a/drivers/net/usb/zaurus.c +++ b/drivers/net/usb/zaurus.c @@ -256,6 +256,11 @@ static const struct usb_device_id products [] = { .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ .bInterfaceProtocol = USB_CDC_PROTO_NONE +#define ZAURUS_FAKE_INTERFACE \ + .bInterfaceClass = USB_CLASS_COMM, \ + .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \ + .bInterfaceProtocol = USB_CDC_PROTO_NONE + /* SA-1100 based Sharp Zaurus ("collie"), or compatible. */ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO @@ -315,6 +320,13 @@ static const struct usb_device_id products [] = { .driver_info = ZAURUS_PXA_INFO, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x9032, /* SL-6000 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = (unsigned long)&bogus_mdlm_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, /* reported with some C860 units */ diff --git a/drivers/net/veth.c b/drivers/net/veth.c index d29fb9759cc9..58b20ea171dd 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -287,7 +287,7 @@ static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb, { return __dev_forward_skb(dev, skb) ?: xdp ? veth_xdp_rx(rq, skb) : - netif_rx(skb); + __netif_rx(skb); } /* return true if the specified skb has chances of GRO aggregation diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index a801ea40908f..11f26b00a226 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -3449,8 +3449,7 @@ static __init int virtio_net_driver_init(void) NULL, virtnet_cpu_dead); if (ret) goto err_dead; - - ret = register_virtio_driver(&virtio_net_driver); + ret = register_virtio_driver(&virtio_net_driver); if (ret) goto err_virtio; return 0; diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index e0b1ab99a359..714cafcf6c6c 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -418,7 +418,7 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev, skb->protocol = eth_type_trans(skb, dev); - if (likely(netif_rx(skb) == NET_RX_SUCCESS)) + if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) vrf_rx_stats(dev, len); else this_cpu_inc(dev->dstats->rx_drps); diff --git a/drivers/net/vxlan/Makefile b/drivers/net/vxlan/Makefile new file mode 100644 index 000000000000..d4c255499b72 --- /dev/null +++ b/drivers/net/vxlan/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the vxlan driver +# + +obj-$(CONFIG_VXLAN) += vxlan.o + +vxlan-objs := vxlan_core.o vxlan_multicast.o vxlan_vnifilter.o diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan/vxlan_core.c index 359d16780dbb..3872f76ea1d3 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -34,10 +34,10 @@ #include <net/ip6_checksum.h> #endif +#include "vxlan_private.h" + #define VXLAN_VERSION "0.1" -#define PORT_HASH_BITS 8 -#define PORT_HASH_SIZE (1<<PORT_HASH_BITS) #define FDB_AGE_DEFAULT 300 /* 5 min */ #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */ @@ -53,41 +53,15 @@ static bool log_ecn_error = true; module_param(log_ecn_error, bool, 0644); MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); -static unsigned int vxlan_net_id; -static struct rtnl_link_ops vxlan_link_ops; +unsigned int vxlan_net_id; -static const u8 all_zeros_mac[ETH_ALEN + 2]; +const u8 all_zeros_mac[ETH_ALEN + 2]; +static struct rtnl_link_ops vxlan_link_ops; static int vxlan_sock_add(struct vxlan_dev *vxlan); static void vxlan_vs_del_dev(struct vxlan_dev *vxlan); -/* per-network namespace private data for this module */ -struct vxlan_net { - struct list_head vxlan_list; - struct hlist_head sock_list[PORT_HASH_SIZE]; - spinlock_t sock_lock; - struct notifier_block nexthop_notifier_block; -}; - -/* Forwarding table entry */ -struct vxlan_fdb { - struct hlist_node hlist; /* linked list of entries */ - struct rcu_head rcu; - unsigned long updated; /* jiffies */ - unsigned long used; - struct list_head remotes; - u8 eth_addr[ETH_ALEN]; - u16 state; /* see ndm_state */ - __be32 vni; - u16 flags; /* see ndm_flags and below */ - struct list_head nh_list; - struct nexthop __rcu *nh; - struct vxlan_dev __rcu *vdev; -}; - -#define NTF_VXLAN_ADDED_BY_USER 0x100 - /* salt for hash table */ static u32 vxlan_salt __read_mostly; @@ -98,17 +72,6 @@ static inline bool vxlan_collect_metadata(struct vxlan_sock *vs) } #if IS_ENABLED(CONFIG_IPV6) -static inline -bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b) -{ - if (a->sa.sa_family != b->sa.sa_family) - return false; - if (a->sa.sa_family == AF_INET6) - return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr); - else - return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr; -} - static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla) { if (nla_len(nla) >= sizeof(struct in6_addr)) { @@ -135,12 +98,6 @@ static int vxlan_nla_put_addr(struct sk_buff *skb, int attr, #else /* !CONFIG_IPV6 */ -static inline -bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b) -{ - return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr; -} - static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla) { if (nla_len(nla) >= sizeof(struct in6_addr)) { @@ -161,37 +118,6 @@ static int vxlan_nla_put_addr(struct sk_buff *skb, int attr, } #endif -/* Virtual Network hash table head */ -static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni) -{ - return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)]; -} - -/* Socket hash table head */ -static inline struct hlist_head *vs_head(struct net *net, __be16 port) -{ - struct vxlan_net *vn = net_generic(net, vxlan_net_id); - - return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)]; -} - -/* First remote destination for a forwarding entry. - * Guaranteed to be non-NULL because remotes are never deleted. - */ -static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb) -{ - if (rcu_access_pointer(fdb->nh)) - return NULL; - return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list); -} - -static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb) -{ - if (rcu_access_pointer(fdb->nh)) - return NULL; - return list_first_entry(&fdb->remotes, struct vxlan_rdst, list); -} - /* Find VXLAN socket based on network namespace, address family, UDP port, * enabled unshareable flags and socket device binding (see l3mdev with * non-default VRF). @@ -213,18 +139,29 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family, return NULL; } -static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, int ifindex, - __be32 vni) +static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, + int ifindex, __be32 vni, + struct vxlan_vni_node **vninode) { + struct vxlan_vni_node *vnode; struct vxlan_dev_node *node; /* For flow based devices, map all packets to VNI 0 */ - if (vs->flags & VXLAN_F_COLLECT_METADATA) + if (vs->flags & VXLAN_F_COLLECT_METADATA && + !(vs->flags & VXLAN_F_VNIFILTER)) vni = 0; hlist_for_each_entry_rcu(node, vni_head(vs, vni), hlist) { - if (node->vxlan->default_dst.remote_vni != vni) + if (!node->vxlan) + continue; + vnode = NULL; + if (node->vxlan->cfg.flags & VXLAN_F_VNIFILTER) { + vnode = vxlan_vnifilter_lookup(node->vxlan, vni); + if (!vnode) + continue; + } else if (node->vxlan->default_dst.remote_vni != vni) { continue; + } if (IS_ENABLED(CONFIG_IPV6)) { const struct vxlan_config *cfg = &node->vxlan->cfg; @@ -234,6 +171,8 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, int ifindex, continue; } + if (vninode) + *vninode = vnode; return node->vxlan; } @@ -251,7 +190,7 @@ static struct vxlan_dev *vxlan_find_vni(struct net *net, int ifindex, if (!vs) return NULL; - return vxlan_vs_find_vni(vs, ifindex, vni); + return vxlan_vs_find_vni(vs, ifindex, vni, NULL); } /* Fill in neighbour message in skbuff. */ @@ -493,7 +432,7 @@ static u32 eth_hash(const unsigned char *addr) return hash_64(value, FDB_HASH_BITS); } -static u32 eth_vni_hash(const unsigned char *addr, __be32 vni) +u32 eth_vni_hash(const unsigned char *addr, __be32 vni) { /* use 1 byte of OUI and 3 bytes of NIC */ u32 key = get_unaligned((u32 *)(addr + 2)); @@ -501,7 +440,7 @@ static u32 eth_vni_hash(const unsigned char *addr, __be32 vni) return jhash_2words(key, vni, vxlan_salt) & (FDB_HASH_SIZE - 1); } -static u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni) +u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni) { if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) return eth_vni_hash(mac, vni); @@ -872,37 +811,35 @@ static int vxlan_fdb_nh_update(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, goto err_inval; } - if (nh) { - if (!nexthop_get(nh)) { - NL_SET_ERR_MSG(extack, "Nexthop has been deleted"); - nh = NULL; - goto err_inval; - } - if (!nexthop_is_fdb(nh)) { - NL_SET_ERR_MSG(extack, "Nexthop is not a fdb nexthop"); - goto err_inval; - } + if (!nexthop_get(nh)) { + NL_SET_ERR_MSG(extack, "Nexthop has been deleted"); + nh = NULL; + goto err_inval; + } + if (!nexthop_is_fdb(nh)) { + NL_SET_ERR_MSG(extack, "Nexthop is not a fdb nexthop"); + goto err_inval; + } + + if (!nexthop_is_multipath(nh)) { + NL_SET_ERR_MSG(extack, "Nexthop is not a multipath group"); + goto err_inval; + } - if (!nexthop_is_multipath(nh)) { - NL_SET_ERR_MSG(extack, "Nexthop is not a multipath group"); + /* check nexthop group family */ + switch (vxlan->default_dst.remote_ip.sa.sa_family) { + case AF_INET: + if (!nexthop_has_v4(nh)) { + err = -EAFNOSUPPORT; + NL_SET_ERR_MSG(extack, "Nexthop group family not supported"); goto err_inval; } - - /* check nexthop group family */ - switch (vxlan->default_dst.remote_ip.sa.sa_family) { - case AF_INET: - if (!nexthop_has_v4(nh)) { - err = -EAFNOSUPPORT; - NL_SET_ERR_MSG(extack, "Nexthop group family not supported"); - goto err_inval; - } - break; - case AF_INET6: - if (nexthop_has_v4(nh)) { - err = -EAFNOSUPPORT; - NL_SET_ERR_MSG(extack, "Nexthop group family not supported"); - goto err_inval; - } + break; + case AF_INET6: + if (nexthop_has_v4(nh)) { + err = -EAFNOSUPPORT; + NL_SET_ERR_MSG(extack, "Nexthop group family not supported"); + goto err_inval; } } @@ -920,12 +857,12 @@ err_inval: return err; } -static int vxlan_fdb_create(struct vxlan_dev *vxlan, - const u8 *mac, union vxlan_addr *ip, - __u16 state, __be16 port, __be32 src_vni, - __be32 vni, __u32 ifindex, __u16 ndm_flags, - u32 nhid, struct vxlan_fdb **fdb, - struct netlink_ext_ack *extack) +int vxlan_fdb_create(struct vxlan_dev *vxlan, + const u8 *mac, union vxlan_addr *ip, + __u16 state, __be16 port, __be32 src_vni, + __be32 vni, __u32 ifindex, __u16 ndm_flags, + u32 nhid, struct vxlan_fdb **fdb, + struct netlink_ext_ack *extack) { struct vxlan_rdst *rd = NULL; struct vxlan_fdb *f; @@ -1150,13 +1087,13 @@ err_notify: } /* Add new entry to forwarding table -- assumes lock held */ -static int vxlan_fdb_update(struct vxlan_dev *vxlan, - const u8 *mac, union vxlan_addr *ip, - __u16 state, __u16 flags, - __be16 port, __be32 src_vni, __be32 vni, - __u32 ifindex, __u16 ndm_flags, u32 nhid, - bool swdev_notify, - struct netlink_ext_ack *extack) +int vxlan_fdb_update(struct vxlan_dev *vxlan, + const u8 *mac, union vxlan_addr *ip, + __u16 state, __u16 flags, + __be16 port, __be32 src_vni, __be32 vni, + __u32 ifindex, __u16 ndm_flags, u32 nhid, + bool swdev_notify, + struct netlink_ext_ack *extack) { struct vxlan_fdb *f; @@ -1307,10 +1244,10 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return err; } -static int __vxlan_fdb_delete(struct vxlan_dev *vxlan, - const unsigned char *addr, union vxlan_addr ip, - __be16 port, __be32 src_vni, __be32 vni, - u32 ifindex, bool swdev_notify) +int __vxlan_fdb_delete(struct vxlan_dev *vxlan, + const unsigned char *addr, union vxlan_addr ip, + __be16 port, __be32 src_vni, __be32 vni, + u32 ifindex, bool swdev_notify) { struct vxlan_rdst *rd = NULL; struct vxlan_fdb *f; @@ -1519,56 +1456,6 @@ static bool vxlan_snoop(struct net_device *dev, return false; } -/* See if multicast group is already in use by other ID */ -static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev) -{ - struct vxlan_dev *vxlan; - struct vxlan_sock *sock4; -#if IS_ENABLED(CONFIG_IPV6) - struct vxlan_sock *sock6; -#endif - unsigned short family = dev->default_dst.remote_ip.sa.sa_family; - - sock4 = rtnl_dereference(dev->vn4_sock); - - /* The vxlan_sock is only used by dev, leaving group has - * no effect on other vxlan devices. - */ - if (family == AF_INET && sock4 && refcount_read(&sock4->refcnt) == 1) - return false; -#if IS_ENABLED(CONFIG_IPV6) - sock6 = rtnl_dereference(dev->vn6_sock); - if (family == AF_INET6 && sock6 && refcount_read(&sock6->refcnt) == 1) - return false; -#endif - - list_for_each_entry(vxlan, &vn->vxlan_list, next) { - if (!netif_running(vxlan->dev) || vxlan == dev) - continue; - - if (family == AF_INET && - rtnl_dereference(vxlan->vn4_sock) != sock4) - continue; -#if IS_ENABLED(CONFIG_IPV6) - if (family == AF_INET6 && - rtnl_dereference(vxlan->vn6_sock) != sock6) - continue; -#endif - - if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip, - &dev->default_dst.remote_ip)) - continue; - - if (vxlan->default_dst.remote_ifindex != - dev->default_dst.remote_ifindex) - continue; - - return true; - } - - return false; -} - static bool __vxlan_sock_release_prep(struct vxlan_sock *vs) { struct vxlan_net *vn; @@ -1602,7 +1489,10 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan) RCU_INIT_POINTER(vxlan->vn4_sock, NULL); synchronize_net(); - vxlan_vs_del_dev(vxlan); + if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) + vxlan_vs_del_vnigrp(vxlan); + else + vxlan_vs_del_dev(vxlan); if (__vxlan_sock_release_prep(sock4)) { udp_tunnel_sock_release(sock4->sock); @@ -1617,76 +1507,6 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan) #endif } -/* Update multicast group membership when first VNI on - * multicast address is brought up - */ -static int vxlan_igmp_join(struct vxlan_dev *vxlan) -{ - struct sock *sk; - union vxlan_addr *ip = &vxlan->default_dst.remote_ip; - int ifindex = vxlan->default_dst.remote_ifindex; - int ret = -EINVAL; - - if (ip->sa.sa_family == AF_INET) { - struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); - struct ip_mreqn mreq = { - .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, - .imr_ifindex = ifindex, - }; - - sk = sock4->sock->sk; - lock_sock(sk); - ret = ip_mc_join_group(sk, &mreq); - release_sock(sk); -#if IS_ENABLED(CONFIG_IPV6) - } else { - struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); - - sk = sock6->sock->sk; - lock_sock(sk); - ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex, - &ip->sin6.sin6_addr); - release_sock(sk); -#endif - } - - return ret; -} - -/* Inverse of vxlan_igmp_join when last VNI is brought down */ -static int vxlan_igmp_leave(struct vxlan_dev *vxlan) -{ - struct sock *sk; - union vxlan_addr *ip = &vxlan->default_dst.remote_ip; - int ifindex = vxlan->default_dst.remote_ifindex; - int ret = -EINVAL; - - if (ip->sa.sa_family == AF_INET) { - struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); - struct ip_mreqn mreq = { - .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, - .imr_ifindex = ifindex, - }; - - sk = sock4->sock->sk; - lock_sock(sk); - ret = ip_mc_leave_group(sk, &mreq); - release_sock(sk); -#if IS_ENABLED(CONFIG_IPV6) - } else { - struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); - - sk = sock6->sock->sk; - lock_sock(sk); - ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex, - &ip->sin6.sin6_addr); - release_sock(sk); -#endif - } - - return ret; -} - static bool vxlan_remcsum(struct vxlanhdr *unparsed, struct sk_buff *skb, u32 vxflags) { @@ -1828,6 +1648,7 @@ static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph, /* Callback from net/ipv4/udp.c to receive packets */ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) { + struct vxlan_vni_node *vninode = NULL; struct vxlan_dev *vxlan; struct vxlan_sock *vs; struct vxlanhdr unparsed; @@ -1860,7 +1681,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) vni = vxlan_vni(vxlan_hdr(skb)->vx_vni); - vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni); + vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, &vninode); if (!vxlan) goto drop; @@ -1930,6 +1751,8 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) if (!vxlan_ecn_decapsulate(vs, oiph, skb)) { ++vxlan->dev->stats.rx_frame_errors; ++vxlan->dev->stats.rx_errors; + vxlan_vnifilter_count(vxlan, vni, vninode, + VXLAN_VNI_STATS_RX_ERRORS, 0); goto drop; } @@ -1938,10 +1761,13 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) if (unlikely(!(vxlan->dev->flags & IFF_UP))) { rcu_read_unlock(); atomic_long_inc(&vxlan->dev->rx_dropped); + vxlan_vnifilter_count(vxlan, vni, vninode, + VXLAN_VNI_STATS_RX_DROPS, 0); goto drop; } dev_sw_netstats_rx_add(vxlan->dev, skb->len); + vxlan_vnifilter_count(vxlan, vni, vninode, VXLAN_VNI_STATS_RX, skb->len); gro_cells_receive(&vxlan->gro_cells, skb); rcu_read_unlock(); @@ -1975,7 +1801,7 @@ static int vxlan_err_lookup(struct sock *sk, struct sk_buff *skb) return -ENOENT; vni = vxlan_vni(hdr->vx_vni); - vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni); + vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, NULL); if (!vxlan) return -ENOENT; @@ -2049,8 +1875,12 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) reply->ip_summed = CHECKSUM_UNNECESSARY; reply->pkt_type = PACKET_HOST; - if (netif_rx_ni(reply) == NET_RX_DROP) + if (netif_rx(reply) == NET_RX_DROP) { dev->stats.rx_dropped++; + vxlan_vnifilter_count(vxlan, vni, NULL, + VXLAN_VNI_STATS_RX_DROPS, 0); + } + } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) { union vxlan_addr ipa = { .sin.sin_addr.s_addr = tip, @@ -2204,9 +2034,11 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) if (reply == NULL) goto out; - if (netif_rx_ni(reply) == NET_RX_DROP) + if (netif_rx(reply) == NET_RX_DROP) { dev->stats.rx_dropped++; - + vxlan_vnifilter_count(vxlan, vni, NULL, + VXLAN_VNI_STATS_RX_DROPS, 0); + } } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) { union vxlan_addr ipa = { .sin6.sin6_addr = msg->target, @@ -2540,15 +2372,20 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, tx_stats->tx_packets++; tx_stats->tx_bytes += len; u64_stats_update_end(&tx_stats->syncp); + vxlan_vnifilter_count(src_vxlan, vni, NULL, VXLAN_VNI_STATS_TX, len); - if (netif_rx(skb) == NET_RX_SUCCESS) { + if (__netif_rx(skb) == NET_RX_SUCCESS) { u64_stats_update_begin(&rx_stats->syncp); rx_stats->rx_packets++; rx_stats->rx_bytes += len; u64_stats_update_end(&rx_stats->syncp); + vxlan_vnifilter_count(dst_vxlan, vni, NULL, VXLAN_VNI_STATS_RX, + len); } else { drop: dev->stats.rx_dropped++; + vxlan_vnifilter_count(dst_vxlan, vni, NULL, + VXLAN_VNI_STATS_RX_DROPS, 0); } rcu_read_unlock(); } @@ -2578,6 +2415,8 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, vxlan->cfg.flags); if (!dst_vxlan) { dev->stats.tx_errors++; + vxlan_vnifilter_count(vxlan, vni, NULL, + VXLAN_VNI_STATS_TX_ERRORS, 0); kfree_skb(skb); return -ENOENT; @@ -2601,15 +2440,19 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, union vxlan_addr remote_ip, local_ip; struct vxlan_metadata _md; struct vxlan_metadata *md = &_md; + unsigned int pkt_len = skb->len; __be16 src_port = 0, dst_port; struct dst_entry *ndst = NULL; - __be32 vni, label; __u8 tos, ttl; int ifindex; int err; u32 flags = vxlan->cfg.flags; bool udp_sum = false; bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev)); + __be32 vni = 0; +#if IS_ENABLED(CONFIG_IPV6) + __be32 label; +#endif info = skb_tunnel_info(skb); @@ -2647,7 +2490,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX); else udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); +#if IS_ENABLED(CONFIG_IPV6) label = vxlan->cfg.label; +#endif } else { if (!info) { WARN_ONCE(1, "%s: Missing encapsulation instructions\n", @@ -2674,7 +2519,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } ttl = info->key.ttl; tos = info->key.tos; +#if IS_ENABLED(CONFIG_IPV6) label = info->key.label; +#endif udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); } src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, @@ -2821,12 +2668,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, label, src_port, dst_port, !udp_sum); #endif } + vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX, pkt_len); out_unlock: rcu_read_unlock(); return; drop: dev->stats.tx_dropped++; + vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0); dev_kfree_skb(skb); return; @@ -2838,6 +2687,7 @@ tx_error: dev->stats.tx_carrier_errors++; dst_release(ndst); dev->stats.tx_errors++; + vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_ERRORS, 0); kfree_skb(skb); } @@ -2870,6 +2720,8 @@ static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev, drop: dev->stats.tx_dropped++; + vxlan_vnifilter_count(netdev_priv(dev), vni, NULL, + VXLAN_VNI_STATS_TX_DROPS, 0); dev_kfree_skb(skb); } @@ -2944,6 +2796,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) vxlan_fdb_miss(vxlan, eth->h_dest); dev->stats.tx_dropped++; + vxlan_vnifilter_count(vxlan, vni, NULL, + VXLAN_VNI_STATS_TX_DROPS, 0); kfree_skb(skb); return NETDEV_TX_OK; } @@ -3044,6 +2898,9 @@ static int vxlan_init(struct net_device *dev) struct vxlan_dev *vxlan = netdev_priv(dev); int err; + if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) + vxlan_vnigroup_init(vxlan); + dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; @@ -3073,6 +2930,9 @@ static void vxlan_uninit(struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); + if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) + vxlan_vnigroup_uninit(vxlan); + gro_cells_destroy(&vxlan->gro_cells); vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni); @@ -3090,14 +2950,10 @@ static int vxlan_open(struct net_device *dev) if (ret < 0) return ret; - if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) { - ret = vxlan_igmp_join(vxlan); - if (ret == -EADDRINUSE) - ret = 0; - if (ret) { - vxlan_sock_release(vxlan); - return ret; - } + ret = vxlan_multicast_join(vxlan); + if (ret) { + vxlan_sock_release(vxlan); + return ret; } if (vxlan->cfg.age_interval) @@ -3134,19 +2990,15 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) static int vxlan_stop(struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); - struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); - int ret = 0; - if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) && - !vxlan_group_used(vn, vxlan)) - ret = vxlan_igmp_leave(vxlan); + vxlan_multicast_leave(vxlan); del_timer_sync(&vxlan->age_timer); vxlan_flush(vxlan, false); vxlan_sock_release(vxlan); - return ret; + return 0; } /* Stub, nothing needs to be done. */ @@ -3369,6 +3221,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG }, [IFLA_VXLAN_TTL_INHERIT] = { .type = NLA_FLAG }, [IFLA_VXLAN_DF] = { .type = NLA_U8 }, + [IFLA_VXLAN_VNIFILTER] = { .type = NLA_U8 }, }; static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[], @@ -3554,6 +3407,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6, static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6) { struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); + bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA; struct vxlan_sock *vs = NULL; struct vxlan_dev_node *node; int l3mdev_index = 0; @@ -3589,7 +3443,12 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6) rcu_assign_pointer(vxlan->vn4_sock, vs); node = &vxlan->hlist4; } - vxlan_vs_add_dev(vs, vxlan, node); + + if (metadata && (vxlan->cfg.flags & VXLAN_F_VNIFILTER)) + vxlan_vs_add_vnigrp(vxlan, vs, ipv6); + else + vxlan_vs_add_dev(vs, vxlan, node); + return 0; } @@ -3616,13 +3475,42 @@ static int vxlan_sock_add(struct vxlan_dev *vxlan) return ret; } +int vxlan_vni_in_use(struct net *src_net, struct vxlan_dev *vxlan, + struct vxlan_config *conf, __be32 vni) +{ + struct vxlan_net *vn = net_generic(src_net, vxlan_net_id); + struct vxlan_dev *tmp; + + list_for_each_entry(tmp, &vn->vxlan_list, next) { + if (tmp == vxlan) + continue; + if (tmp->cfg.flags & VXLAN_F_VNIFILTER) { + if (!vxlan_vnifilter_lookup(tmp, vni)) + continue; + } else if (tmp->cfg.vni != vni) { + continue; + } + if (tmp->cfg.dst_port != conf->dst_port) + continue; + if ((tmp->cfg.flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)) != + (conf->flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6))) + continue; + + if ((conf->flags & VXLAN_F_IPV6_LINKLOCAL) && + tmp->cfg.remote_ifindex != conf->remote_ifindex) + continue; + + return -EEXIST; + } + + return 0; +} + static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf, struct net_device **lower, struct vxlan_dev *old, struct netlink_ext_ack *extack) { - struct vxlan_net *vn = net_generic(src_net, vxlan_net_id); - struct vxlan_dev *tmp; bool use_ipv6 = false; if (conf->flags & VXLAN_F_GPE) { @@ -3755,22 +3643,7 @@ static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf, if (!conf->age_interval) conf->age_interval = FDB_AGE_DEFAULT; - list_for_each_entry(tmp, &vn->vxlan_list, next) { - if (tmp == old) - continue; - - if (tmp->cfg.vni != conf->vni) - continue; - if (tmp->cfg.dst_port != conf->dst_port) - continue; - if ((tmp->cfg.flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)) != - (conf->flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6))) - continue; - - if ((conf->flags & VXLAN_F_IPV6_LINKLOCAL) && - tmp->cfg.remote_ifindex != conf->remote_ifindex) - continue; - + if (vxlan_vni_in_use(src_net, old, conf, conf->vni)) { NL_SET_ERR_MSG(extack, "A VXLAN device with the specified VNI already exists"); return -EEXIST; @@ -4226,6 +4099,21 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], if (data[IFLA_VXLAN_DF]) conf->df = nla_get_u8(data[IFLA_VXLAN_DF]); + if (data[IFLA_VXLAN_VNIFILTER]) { + err = vxlan_nl2flag(conf, data, IFLA_VXLAN_VNIFILTER, + VXLAN_F_VNIFILTER, changelink, false, + extack); + if (err) + return err; + + if ((conf->flags & VXLAN_F_VNIFILTER) && + !(conf->flags & VXLAN_F_COLLECT_METADATA)) { + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_VNIFILTER], + "vxlan vnifilter only valid in collect metadata mode"); + return -EINVAL; + } + } + return 0; } @@ -4301,6 +4189,19 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], dst->remote_ifindex, true); spin_unlock_bh(&vxlan->hash_lock[hash_index]); + + /* If vni filtering device, also update fdb entries of + * all vnis that were using default remote ip + */ + if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) { + err = vxlan_vnilist_update_group(vxlan, &dst->remote_ip, + &conf.remote_ip, extack); + if (err) { + netdev_adjacent_change_abort(dst->remote_dev, + lowerdev, dev); + return err; + } + } } if (conf.age_interval != vxlan->cfg.age_interval) @@ -4446,6 +4347,11 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL)) goto nla_put_failure; + if (vxlan->cfg.flags & VXLAN_F_VNIFILTER && + nla_put_u8(skb, IFLA_VXLAN_VNIFILTER, + !!(vxlan->cfg.flags & VXLAN_F_VNIFILTER))) + goto nla_put_failure; + return 0; nla_put_failure: @@ -4805,6 +4711,8 @@ static int __init vxlan_init_module(void) if (rc) goto out4; + vxlan_vnifilter_init(); + return 0; out4: unregister_switchdev_notifier(&vxlan_switchdev_notifier_block); @@ -4819,6 +4727,7 @@ late_initcall(vxlan_init_module); static void __exit vxlan_cleanup_module(void) { + vxlan_vnifilter_uninit(); rtnl_link_unregister(&vxlan_link_ops); unregister_switchdev_notifier(&vxlan_switchdev_notifier_block); unregister_netdevice_notifier(&vxlan_notifier_block); diff --git a/drivers/net/vxlan/vxlan_multicast.c b/drivers/net/vxlan/vxlan_multicast.c new file mode 100644 index 000000000000..a7f2d67dc61b --- /dev/null +++ b/drivers/net/vxlan/vxlan_multicast.c @@ -0,0 +1,272 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Vxlan multicast group handling + * + */ +#include <linux/kernel.h> +#include <net/net_namespace.h> +#include <net/sock.h> +#include <linux/igmp.h> +#include <net/vxlan.h> + +#include "vxlan_private.h" + +/* Update multicast group membership when first VNI on + * multicast address is brought up + */ +int vxlan_igmp_join(struct vxlan_dev *vxlan, union vxlan_addr *rip, + int rifindex) +{ + union vxlan_addr *ip = (rip ? : &vxlan->default_dst.remote_ip); + int ifindex = (rifindex ? : vxlan->default_dst.remote_ifindex); + int ret = -EINVAL; + struct sock *sk; + + if (ip->sa.sa_family == AF_INET) { + struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); + struct ip_mreqn mreq = { + .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, + .imr_ifindex = ifindex, + }; + + sk = sock4->sock->sk; + lock_sock(sk); + ret = ip_mc_join_group(sk, &mreq); + release_sock(sk); +#if IS_ENABLED(CONFIG_IPV6) + } else { + struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); + + sk = sock6->sock->sk; + lock_sock(sk); + ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex, + &ip->sin6.sin6_addr); + release_sock(sk); +#endif + } + + return ret; +} + +int vxlan_igmp_leave(struct vxlan_dev *vxlan, union vxlan_addr *rip, + int rifindex) +{ + union vxlan_addr *ip = (rip ? : &vxlan->default_dst.remote_ip); + int ifindex = (rifindex ? : vxlan->default_dst.remote_ifindex); + int ret = -EINVAL; + struct sock *sk; + + if (ip->sa.sa_family == AF_INET) { + struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); + struct ip_mreqn mreq = { + .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, + .imr_ifindex = ifindex, + }; + + sk = sock4->sock->sk; + lock_sock(sk); + ret = ip_mc_leave_group(sk, &mreq); + release_sock(sk); +#if IS_ENABLED(CONFIG_IPV6) + } else { + struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); + + sk = sock6->sock->sk; + lock_sock(sk); + ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex, + &ip->sin6.sin6_addr); + release_sock(sk); +#endif + } + + return ret; +} + +static bool vxlan_group_used_match(union vxlan_addr *ip, int ifindex, + union vxlan_addr *rip, int rifindex) +{ + if (!vxlan_addr_multicast(rip)) + return false; + + if (!vxlan_addr_equal(rip, ip)) + return false; + + if (rifindex != ifindex) + return false; + + return true; +} + +static bool vxlan_group_used_by_vnifilter(struct vxlan_dev *vxlan, + union vxlan_addr *ip, int ifindex) +{ + struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp); + struct vxlan_vni_node *v, *tmp; + + if (vxlan_group_used_match(ip, ifindex, + &vxlan->default_dst.remote_ip, + vxlan->default_dst.remote_ifindex)) + return true; + + list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) { + if (!vxlan_addr_multicast(&v->remote_ip)) + continue; + + if (vxlan_group_used_match(ip, ifindex, + &v->remote_ip, + vxlan->default_dst.remote_ifindex)) + return true; + } + + return false; +} + +/* See if multicast group is already in use by other ID */ +bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev, + __be32 vni, union vxlan_addr *rip, int rifindex) +{ + union vxlan_addr *ip = (rip ? : &dev->default_dst.remote_ip); + int ifindex = (rifindex ? : dev->default_dst.remote_ifindex); + struct vxlan_dev *vxlan; + struct vxlan_sock *sock4; +#if IS_ENABLED(CONFIG_IPV6) + struct vxlan_sock *sock6; +#endif + unsigned short family = dev->default_dst.remote_ip.sa.sa_family; + + sock4 = rtnl_dereference(dev->vn4_sock); + + /* The vxlan_sock is only used by dev, leaving group has + * no effect on other vxlan devices. + */ + if (family == AF_INET && sock4 && refcount_read(&sock4->refcnt) == 1) + return false; + +#if IS_ENABLED(CONFIG_IPV6) + sock6 = rtnl_dereference(dev->vn6_sock); + if (family == AF_INET6 && sock6 && refcount_read(&sock6->refcnt) == 1) + return false; +#endif + + list_for_each_entry(vxlan, &vn->vxlan_list, next) { + if (!netif_running(vxlan->dev) || vxlan == dev) + continue; + + if (family == AF_INET && + rtnl_dereference(vxlan->vn4_sock) != sock4) + continue; +#if IS_ENABLED(CONFIG_IPV6) + if (family == AF_INET6 && + rtnl_dereference(vxlan->vn6_sock) != sock6) + continue; +#endif + if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) { + if (!vxlan_group_used_by_vnifilter(vxlan, ip, ifindex)) + continue; + } else { + if (!vxlan_group_used_match(ip, ifindex, + &vxlan->default_dst.remote_ip, + vxlan->default_dst.remote_ifindex)) + continue; + } + + return true; + } + + return false; +} + +static int vxlan_multicast_join_vnigrp(struct vxlan_dev *vxlan) +{ + struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp); + struct vxlan_vni_node *v, *tmp, *vgood = NULL; + int ret = 0; + + list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) { + if (!vxlan_addr_multicast(&v->remote_ip)) + continue; + /* skip if address is same as default address */ + if (vxlan_addr_equal(&v->remote_ip, + &vxlan->default_dst.remote_ip)) + continue; + ret = vxlan_igmp_join(vxlan, &v->remote_ip, 0); + if (ret == -EADDRINUSE) + ret = 0; + if (ret) + goto out; + vgood = v; + } +out: + if (ret) { + list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) { + if (!vxlan_addr_multicast(&v->remote_ip)) + continue; + if (vxlan_addr_equal(&v->remote_ip, + &vxlan->default_dst.remote_ip)) + continue; + vxlan_igmp_leave(vxlan, &v->remote_ip, 0); + if (v == vgood) + break; + } + } + + return ret; +} + +static int vxlan_multicast_leave_vnigrp(struct vxlan_dev *vxlan) +{ + struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); + struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp); + struct vxlan_vni_node *v, *tmp; + int last_err = 0, ret; + + list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) { + if (vxlan_addr_multicast(&v->remote_ip) && + !vxlan_group_used(vn, vxlan, v->vni, &v->remote_ip, + 0)) { + ret = vxlan_igmp_leave(vxlan, &v->remote_ip, 0); + if (ret) + last_err = ret; + } + } + + return last_err; +} + +int vxlan_multicast_join(struct vxlan_dev *vxlan) +{ + int ret = 0; + + if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) { + ret = vxlan_igmp_join(vxlan, &vxlan->default_dst.remote_ip, + vxlan->default_dst.remote_ifindex); + if (ret == -EADDRINUSE) + ret = 0; + if (ret) + return ret; + } + + if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) + return vxlan_multicast_join_vnigrp(vxlan); + + return 0; +} + +int vxlan_multicast_leave(struct vxlan_dev *vxlan) +{ + struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); + int ret = 0; + + if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) && + !vxlan_group_used(vn, vxlan, 0, NULL, 0)) { + ret = vxlan_igmp_leave(vxlan, &vxlan->default_dst.remote_ip, + vxlan->default_dst.remote_ifindex); + if (ret) + return ret; + } + + if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) + return vxlan_multicast_leave_vnigrp(vxlan); + + return 0; +} diff --git a/drivers/net/vxlan/vxlan_private.h b/drivers/net/vxlan/vxlan_private.h new file mode 100644 index 000000000000..599c3b4fdd5e --- /dev/null +++ b/drivers/net/vxlan/vxlan_private.h @@ -0,0 +1,162 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Vxlan private header file + * + */ + +#ifndef _VXLAN_PRIVATE_H +#define _VXLAN_PRIVATE_H + +#include <linux/rhashtable.h> + +extern unsigned int vxlan_net_id; +extern const u8 all_zeros_mac[ETH_ALEN + 2]; +extern const struct rhashtable_params vxlan_vni_rht_params; + +#define PORT_HASH_BITS 8 +#define PORT_HASH_SIZE (1 << PORT_HASH_BITS) + +/* per-network namespace private data for this module */ +struct vxlan_net { + struct list_head vxlan_list; + struct hlist_head sock_list[PORT_HASH_SIZE]; + spinlock_t sock_lock; + struct notifier_block nexthop_notifier_block; +}; + +/* Forwarding table entry */ +struct vxlan_fdb { + struct hlist_node hlist; /* linked list of entries */ + struct rcu_head rcu; + unsigned long updated; /* jiffies */ + unsigned long used; + struct list_head remotes; + u8 eth_addr[ETH_ALEN]; + u16 state; /* see ndm_state */ + __be32 vni; + u16 flags; /* see ndm_flags and below */ + struct list_head nh_list; + struct nexthop __rcu *nh; + struct vxlan_dev __rcu *vdev; +}; + +#define NTF_VXLAN_ADDED_BY_USER 0x100 + +/* Virtual Network hash table head */ +static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni) +{ + return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)]; +} + +/* Socket hash table head */ +static inline struct hlist_head *vs_head(struct net *net, __be16 port) +{ + struct vxlan_net *vn = net_generic(net, vxlan_net_id); + + return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)]; +} + +/* First remote destination for a forwarding entry. + * Guaranteed to be non-NULL because remotes are never deleted. + */ +static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb) +{ + if (rcu_access_pointer(fdb->nh)) + return NULL; + return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list); +} + +static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb) +{ + if (rcu_access_pointer(fdb->nh)) + return NULL; + return list_first_entry(&fdb->remotes, struct vxlan_rdst, list); +} + +#if IS_ENABLED(CONFIG_IPV6) +static inline +bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b) +{ + if (a->sa.sa_family != b->sa.sa_family) + return false; + if (a->sa.sa_family == AF_INET6) + return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr); + else + return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr; +} + +#else /* !CONFIG_IPV6 */ + +static inline +bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b) +{ + return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr; +} + +#endif + +static inline struct vxlan_vni_node * +vxlan_vnifilter_lookup(struct vxlan_dev *vxlan, __be32 vni) +{ + struct vxlan_vni_group *vg; + + vg = rcu_dereference_rtnl(vxlan->vnigrp); + if (!vg) + return NULL; + + return rhashtable_lookup_fast(&vg->vni_hash, &vni, + vxlan_vni_rht_params); +} + +/* vxlan_core.c */ +int vxlan_fdb_create(struct vxlan_dev *vxlan, + const u8 *mac, union vxlan_addr *ip, + __u16 state, __be16 port, __be32 src_vni, + __be32 vni, __u32 ifindex, __u16 ndm_flags, + u32 nhid, struct vxlan_fdb **fdb, + struct netlink_ext_ack *extack); +int __vxlan_fdb_delete(struct vxlan_dev *vxlan, + const unsigned char *addr, union vxlan_addr ip, + __be16 port, __be32 src_vni, __be32 vni, + u32 ifindex, bool swdev_notify); +u32 eth_vni_hash(const unsigned char *addr, __be32 vni); +u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni); +int vxlan_fdb_update(struct vxlan_dev *vxlan, + const u8 *mac, union vxlan_addr *ip, + __u16 state, __u16 flags, + __be16 port, __be32 src_vni, __be32 vni, + __u32 ifindex, __u16 ndm_flags, u32 nhid, + bool swdev_notify, struct netlink_ext_ack *extack); +int vxlan_vni_in_use(struct net *src_net, struct vxlan_dev *vxlan, + struct vxlan_config *conf, __be32 vni); + +/* vxlan_vnifilter.c */ +int vxlan_vnigroup_init(struct vxlan_dev *vxlan); +void vxlan_vnigroup_uninit(struct vxlan_dev *vxlan); + +void vxlan_vnifilter_init(void); +void vxlan_vnifilter_uninit(void); +void vxlan_vnifilter_count(struct vxlan_dev *vxlan, __be32 vni, + struct vxlan_vni_node *vninode, + int type, unsigned int len); + +void vxlan_vs_add_vnigrp(struct vxlan_dev *vxlan, + struct vxlan_sock *vs, + bool ipv6); +void vxlan_vs_del_vnigrp(struct vxlan_dev *vxlan); +int vxlan_vnilist_update_group(struct vxlan_dev *vxlan, + union vxlan_addr *old_remote_ip, + union vxlan_addr *new_remote_ip, + struct netlink_ext_ack *extack); + + +/* vxlan_multicast.c */ +int vxlan_multicast_join(struct vxlan_dev *vxlan); +int vxlan_multicast_leave(struct vxlan_dev *vxlan); +bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev, + __be32 vni, union vxlan_addr *rip, int rifindex); +int vxlan_igmp_join(struct vxlan_dev *vxlan, union vxlan_addr *rip, + int rifindex); +int vxlan_igmp_leave(struct vxlan_dev *vxlan, union vxlan_addr *rip, + int rifindex); +#endif diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c new file mode 100644 index 000000000000..9f28d0b6a6b2 --- /dev/null +++ b/drivers/net/vxlan/vxlan_vnifilter.c @@ -0,0 +1,999 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Vxlan vni filter for collect metadata mode + * + * Authors: Roopa Prabhu <roopa@nvidia.com> + * + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/etherdevice.h> +#include <linux/rhashtable.h> +#include <net/rtnetlink.h> +#include <net/net_namespace.h> +#include <net/sock.h> +#include <net/vxlan.h> + +#include "vxlan_private.h" + +static inline int vxlan_vni_cmp(struct rhashtable_compare_arg *arg, + const void *ptr) +{ + const struct vxlan_vni_node *vnode = ptr; + __be32 vni = *(__be32 *)arg->key; + + return vnode->vni != vni; +} + +const struct rhashtable_params vxlan_vni_rht_params = { + .head_offset = offsetof(struct vxlan_vni_node, vnode), + .key_offset = offsetof(struct vxlan_vni_node, vni), + .key_len = sizeof(__be32), + .nelem_hint = 3, + .max_size = VXLAN_N_VID, + .obj_cmpfn = vxlan_vni_cmp, + .automatic_shrinking = true, +}; + +static void vxlan_vs_add_del_vninode(struct vxlan_dev *vxlan, + struct vxlan_vni_node *v, + bool del) +{ + struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); + struct vxlan_dev_node *node; + struct vxlan_sock *vs; + + spin_lock(&vn->sock_lock); + if (del) { + if (!hlist_unhashed(&v->hlist4.hlist)) + hlist_del_init_rcu(&v->hlist4.hlist); +#if IS_ENABLED(CONFIG_IPV6) + if (!hlist_unhashed(&v->hlist6.hlist)) + hlist_del_init_rcu(&v->hlist6.hlist); +#endif + goto out; + } + +#if IS_ENABLED(CONFIG_IPV6) + vs = rtnl_dereference(vxlan->vn6_sock); + if (vs && v) { + node = &v->hlist6; + hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni)); + } +#endif + vs = rtnl_dereference(vxlan->vn4_sock); + if (vs && v) { + node = &v->hlist4; + hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni)); + } +out: + spin_unlock(&vn->sock_lock); +} + +void vxlan_vs_add_vnigrp(struct vxlan_dev *vxlan, + struct vxlan_sock *vs, + bool ipv6) +{ + struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); + struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp); + struct vxlan_vni_node *v, *tmp; + struct vxlan_dev_node *node; + + if (!vg) + return; + + spin_lock(&vn->sock_lock); + list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) { +#if IS_ENABLED(CONFIG_IPV6) + if (ipv6) + node = &v->hlist6; + else +#endif + node = &v->hlist4; + node->vxlan = vxlan; + hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni)); + } + spin_unlock(&vn->sock_lock); +} + +void vxlan_vs_del_vnigrp(struct vxlan_dev *vxlan) +{ + struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp); + struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); + struct vxlan_vni_node *v, *tmp; + + if (!vg) + return; + + spin_lock(&vn->sock_lock); + list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) { + hlist_del_init_rcu(&v->hlist4.hlist); +#if IS_ENABLED(CONFIG_IPV6) + hlist_del_init_rcu(&v->hlist6.hlist); +#endif + } + spin_unlock(&vn->sock_lock); +} + +static void vxlan_vnifilter_stats_get(const struct vxlan_vni_node *vninode, + struct vxlan_vni_stats *dest) +{ + int i; + + memset(dest, 0, sizeof(*dest)); + for_each_possible_cpu(i) { + struct vxlan_vni_stats_pcpu *pstats; + struct vxlan_vni_stats temp; + unsigned int start; + + pstats = per_cpu_ptr(vninode->stats, i); + do { + start = u64_stats_fetch_begin_irq(&pstats->syncp); + memcpy(&temp, &pstats->stats, sizeof(temp)); + } while (u64_stats_fetch_retry_irq(&pstats->syncp, start)); + + dest->rx_packets += temp.rx_packets; + dest->rx_bytes += temp.rx_bytes; + dest->rx_drops += temp.rx_drops; + dest->rx_errors += temp.rx_errors; + dest->tx_packets += temp.tx_packets; + dest->tx_bytes += temp.tx_bytes; + dest->tx_drops += temp.tx_drops; + dest->tx_errors += temp.tx_errors; + } +} + +static void vxlan_vnifilter_stats_add(struct vxlan_vni_node *vninode, + int type, unsigned int len) +{ + struct vxlan_vni_stats_pcpu *pstats = this_cpu_ptr(vninode->stats); + + u64_stats_update_begin(&pstats->syncp); + switch (type) { + case VXLAN_VNI_STATS_RX: + pstats->stats.rx_bytes += len; + pstats->stats.rx_packets++; + break; + case VXLAN_VNI_STATS_RX_DROPS: + pstats->stats.rx_drops++; + break; + case VXLAN_VNI_STATS_RX_ERRORS: + pstats->stats.rx_errors++; + break; + case VXLAN_VNI_STATS_TX: + pstats->stats.tx_bytes += len; + pstats->stats.tx_packets++; + break; + case VXLAN_VNI_STATS_TX_DROPS: + pstats->stats.tx_drops++; + break; + case VXLAN_VNI_STATS_TX_ERRORS: + pstats->stats.tx_errors++; + break; + } + u64_stats_update_end(&pstats->syncp); +} + +void vxlan_vnifilter_count(struct vxlan_dev *vxlan, __be32 vni, + struct vxlan_vni_node *vninode, + int type, unsigned int len) +{ + struct vxlan_vni_node *vnode; + + if (!(vxlan->cfg.flags & VXLAN_F_VNIFILTER)) + return; + + if (vninode) { + vnode = vninode; + } else { + vnode = vxlan_vnifilter_lookup(vxlan, vni); + if (!vnode) + return; + } + + vxlan_vnifilter_stats_add(vnode, type, len); +} + +static u32 vnirange(struct vxlan_vni_node *vbegin, + struct vxlan_vni_node *vend) +{ + return (be32_to_cpu(vend->vni) - be32_to_cpu(vbegin->vni)); +} + +static size_t vxlan_vnifilter_entry_nlmsg_size(void) +{ + return NLMSG_ALIGN(sizeof(struct tunnel_msg)) + + nla_total_size(0) /* VXLAN_VNIFILTER_ENTRY */ + + nla_total_size(sizeof(u32)) /* VXLAN_VNIFILTER_ENTRY_START */ + + nla_total_size(sizeof(u32)) /* VXLAN_VNIFILTER_ENTRY_END */ + + nla_total_size(sizeof(struct in6_addr));/* VXLAN_VNIFILTER_ENTRY_GROUP{6} */ +} + +static int __vnifilter_entry_fill_stats(struct sk_buff *skb, + const struct vxlan_vni_node *vbegin) +{ + struct vxlan_vni_stats vstats; + struct nlattr *vstats_attr; + + vstats_attr = nla_nest_start(skb, VXLAN_VNIFILTER_ENTRY_STATS); + if (!vstats_attr) + goto out_stats_err; + + vxlan_vnifilter_stats_get(vbegin, &vstats); + if (nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_BYTES, + vstats.rx_bytes, VNIFILTER_ENTRY_STATS_PAD) || + nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_PKTS, + vstats.rx_packets, VNIFILTER_ENTRY_STATS_PAD) || + nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_DROPS, + vstats.rx_drops, VNIFILTER_ENTRY_STATS_PAD) || + nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_ERRORS, + vstats.rx_errors, VNIFILTER_ENTRY_STATS_PAD) || + nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_BYTES, + vstats.tx_bytes, VNIFILTER_ENTRY_STATS_PAD) || + nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_PKTS, + vstats.tx_packets, VNIFILTER_ENTRY_STATS_PAD) || + nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_DROPS, + vstats.tx_drops, VNIFILTER_ENTRY_STATS_PAD) || + nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_ERRORS, + vstats.tx_errors, VNIFILTER_ENTRY_STATS_PAD)) + goto out_stats_err; + + nla_nest_end(skb, vstats_attr); + + return 0; + +out_stats_err: + nla_nest_cancel(skb, vstats_attr); + return -EMSGSIZE; +} + +static bool vxlan_fill_vni_filter_entry(struct sk_buff *skb, + struct vxlan_vni_node *vbegin, + struct vxlan_vni_node *vend, + bool fill_stats) +{ + struct nlattr *ventry; + u32 vs = be32_to_cpu(vbegin->vni); + u32 ve = 0; + + if (vbegin != vend) + ve = be32_to_cpu(vend->vni); + + ventry = nla_nest_start(skb, VXLAN_VNIFILTER_ENTRY); + if (!ventry) + return false; + + if (nla_put_u32(skb, VXLAN_VNIFILTER_ENTRY_START, vs)) + goto out_err; + + if (ve && nla_put_u32(skb, VXLAN_VNIFILTER_ENTRY_END, ve)) + goto out_err; + + if (!vxlan_addr_any(&vbegin->remote_ip)) { + if (vbegin->remote_ip.sa.sa_family == AF_INET) { + if (nla_put_in_addr(skb, VXLAN_VNIFILTER_ENTRY_GROUP, + vbegin->remote_ip.sin.sin_addr.s_addr)) + goto out_err; +#if IS_ENABLED(CONFIG_IPV6) + } else { + if (nla_put_in6_addr(skb, VXLAN_VNIFILTER_ENTRY_GROUP6, + &vbegin->remote_ip.sin6.sin6_addr)) + goto out_err; +#endif + } + } + + if (fill_stats && __vnifilter_entry_fill_stats(skb, vbegin)) + goto out_err; + + nla_nest_end(skb, ventry); + + return true; + +out_err: + nla_nest_cancel(skb, ventry); + + return false; +} + +static void vxlan_vnifilter_notify(const struct vxlan_dev *vxlan, + struct vxlan_vni_node *vninode, int cmd) +{ + struct tunnel_msg *tmsg; + struct sk_buff *skb; + struct nlmsghdr *nlh; + struct net *net = dev_net(vxlan->dev); + int err = -ENOBUFS; + + skb = nlmsg_new(vxlan_vnifilter_entry_nlmsg_size(), GFP_KERNEL); + if (!skb) + goto out_err; + + err = -EMSGSIZE; + nlh = nlmsg_put(skb, 0, 0, cmd, sizeof(*tmsg), 0); + if (!nlh) + goto out_err; + tmsg = nlmsg_data(nlh); + memset(tmsg, 0, sizeof(*tmsg)); + tmsg->family = AF_BRIDGE; + tmsg->ifindex = vxlan->dev->ifindex; + + if (!vxlan_fill_vni_filter_entry(skb, vninode, vninode, false)) + goto out_err; + + nlmsg_end(skb, nlh); + rtnl_notify(skb, net, 0, RTNLGRP_TUNNEL, NULL, GFP_KERNEL); + + return; + +out_err: + rtnl_set_sk_err(net, RTNLGRP_TUNNEL, err); + + kfree_skb(skb); +} + +static int vxlan_vnifilter_dump_dev(const struct net_device *dev, + struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct vxlan_vni_node *tmp, *v, *vbegin = NULL, *vend = NULL; + struct vxlan_dev *vxlan = netdev_priv(dev); + struct tunnel_msg *new_tmsg, *tmsg; + int idx = 0, s_idx = cb->args[1]; + struct vxlan_vni_group *vg; + struct nlmsghdr *nlh; + bool dump_stats; + int err = 0; + + if (!(vxlan->cfg.flags & VXLAN_F_VNIFILTER)) + return -EINVAL; + + /* RCU needed because of the vni locking rules (rcu || rtnl) */ + vg = rcu_dereference(vxlan->vnigrp); + if (!vg || !vg->num_vnis) + return 0; + + tmsg = nlmsg_data(cb->nlh); + dump_stats = !!(tmsg->flags & TUNNEL_MSG_FLAG_STATS); + + nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + RTM_NEWTUNNEL, sizeof(*new_tmsg), NLM_F_MULTI); + if (!nlh) + return -EMSGSIZE; + new_tmsg = nlmsg_data(nlh); + memset(new_tmsg, 0, sizeof(*new_tmsg)); + new_tmsg->family = PF_BRIDGE; + new_tmsg->ifindex = dev->ifindex; + + list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) { + if (idx < s_idx) { + idx++; + continue; + } + if (!vbegin) { + vbegin = v; + vend = v; + continue; + } + if (!dump_stats && vnirange(vend, v) == 1 && + vxlan_addr_equal(&v->remote_ip, &vend->remote_ip)) { + goto update_end; + } else { + if (!vxlan_fill_vni_filter_entry(skb, vbegin, vend, + dump_stats)) { + err = -EMSGSIZE; + break; + } + idx += vnirange(vbegin, vend) + 1; + vbegin = v; + } +update_end: + vend = v; + } + + if (!err && vbegin) { + if (!vxlan_fill_vni_filter_entry(skb, vbegin, vend, dump_stats)) + err = -EMSGSIZE; + } + + cb->args[1] = err ? idx : 0; + + nlmsg_end(skb, nlh); + + return err; +} + +static int vxlan_vnifilter_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + int idx = 0, err = 0, s_idx = cb->args[0]; + struct net *net = sock_net(skb->sk); + struct tunnel_msg *tmsg; + struct net_device *dev; + + tmsg = nlmsg_data(cb->nlh); + + if (tmsg->flags & ~TUNNEL_MSG_VALID_USER_FLAGS) { + NL_SET_ERR_MSG(cb->extack, "Invalid tunnelmsg flags in ancillary header"); + return -EINVAL; + } + + rcu_read_lock(); + if (tmsg->ifindex) { + dev = dev_get_by_index_rcu(net, tmsg->ifindex); + if (!dev) { + err = -ENODEV; + goto out_err; + } + err = vxlan_vnifilter_dump_dev(dev, skb, cb); + /* if the dump completed without an error we return 0 here */ + if (err != -EMSGSIZE) + goto out_err; + } else { + for_each_netdev_rcu(net, dev) { + if (!netif_is_vxlan(dev)) + continue; + if (idx < s_idx) + goto skip; + err = vxlan_vnifilter_dump_dev(dev, skb, cb); + if (err == -EMSGSIZE) + break; +skip: + idx++; + } + } + cb->args[0] = idx; + rcu_read_unlock(); + + return skb->len; + +out_err: + rcu_read_unlock(); + + return err; +} + +static const struct nla_policy vni_filter_entry_policy[VXLAN_VNIFILTER_ENTRY_MAX + 1] = { + [VXLAN_VNIFILTER_ENTRY_START] = { .type = NLA_U32 }, + [VXLAN_VNIFILTER_ENTRY_END] = { .type = NLA_U32 }, + [VXLAN_VNIFILTER_ENTRY_GROUP] = { .type = NLA_BINARY, + .len = sizeof_field(struct iphdr, daddr) }, + [VXLAN_VNIFILTER_ENTRY_GROUP6] = { .type = NLA_BINARY, + .len = sizeof(struct in6_addr) }, +}; + +static const struct nla_policy vni_filter_policy[VXLAN_VNIFILTER_MAX + 1] = { + [VXLAN_VNIFILTER_ENTRY] = { .type = NLA_NESTED }, +}; + +static int vxlan_update_default_fdb_entry(struct vxlan_dev *vxlan, __be32 vni, + union vxlan_addr *old_remote_ip, + union vxlan_addr *remote_ip, + struct netlink_ext_ack *extack) +{ + struct vxlan_rdst *dst = &vxlan->default_dst; + u32 hash_index; + int err = 0; + + hash_index = fdb_head_index(vxlan, all_zeros_mac, vni); + spin_lock_bh(&vxlan->hash_lock[hash_index]); + if (remote_ip && !vxlan_addr_any(remote_ip)) { + err = vxlan_fdb_update(vxlan, all_zeros_mac, + remote_ip, + NUD_REACHABLE | NUD_PERMANENT, + NLM_F_APPEND | NLM_F_CREATE, + vxlan->cfg.dst_port, + vni, + vni, + dst->remote_ifindex, + NTF_SELF, 0, true, extack); + if (err) { + spin_unlock_bh(&vxlan->hash_lock[hash_index]); + return err; + } + } + + if (old_remote_ip && !vxlan_addr_any(old_remote_ip)) { + __vxlan_fdb_delete(vxlan, all_zeros_mac, + *old_remote_ip, + vxlan->cfg.dst_port, + vni, vni, + dst->remote_ifindex, + true); + } + spin_unlock_bh(&vxlan->hash_lock[hash_index]); + + return err; +} + +static int vxlan_vni_update_group(struct vxlan_dev *vxlan, + struct vxlan_vni_node *vninode, + union vxlan_addr *group, + bool create, bool *changed, + struct netlink_ext_ack *extack) +{ + struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); + struct vxlan_rdst *dst = &vxlan->default_dst; + union vxlan_addr *newrip = NULL, *oldrip = NULL; + union vxlan_addr old_remote_ip; + int ret = 0; + + memcpy(&old_remote_ip, &vninode->remote_ip, sizeof(old_remote_ip)); + + /* if per vni remote ip is not present use vxlan dev + * default dst remote ip for fdb entry + */ + if (group && !vxlan_addr_any(group)) { + newrip = group; + } else { + if (!vxlan_addr_any(&dst->remote_ip)) + newrip = &dst->remote_ip; + } + + /* if old rip exists, and no newrip, + * explicitly delete old rip + */ + if (!newrip && !vxlan_addr_any(&old_remote_ip)) + oldrip = &old_remote_ip; + + if (!newrip && !oldrip) + return 0; + + if (!create && oldrip && newrip && vxlan_addr_equal(oldrip, newrip)) + return 0; + + ret = vxlan_update_default_fdb_entry(vxlan, vninode->vni, + oldrip, newrip, + extack); + if (ret) + goto out; + + if (group) + memcpy(&vninode->remote_ip, group, sizeof(vninode->remote_ip)); + + if (vxlan->dev->flags & IFF_UP) { + if (vxlan_addr_multicast(&old_remote_ip) && + !vxlan_group_used(vn, vxlan, vninode->vni, + &old_remote_ip, + vxlan->default_dst.remote_ifindex)) { + ret = vxlan_igmp_leave(vxlan, &old_remote_ip, + 0); + if (ret) + goto out; + } + + if (vxlan_addr_multicast(&vninode->remote_ip)) { + ret = vxlan_igmp_join(vxlan, &vninode->remote_ip, 0); + if (ret == -EADDRINUSE) + ret = 0; + if (ret) + goto out; + } + } + + *changed = true; + + return 0; +out: + return ret; +} + +int vxlan_vnilist_update_group(struct vxlan_dev *vxlan, + union vxlan_addr *old_remote_ip, + union vxlan_addr *new_remote_ip, + struct netlink_ext_ack *extack) +{ + struct list_head *headp, *hpos; + struct vxlan_vni_group *vg; + struct vxlan_vni_node *vent; + int ret; + + vg = rtnl_dereference(vxlan->vnigrp); + + headp = &vg->vni_list; + list_for_each_prev(hpos, headp) { + vent = list_entry(hpos, struct vxlan_vni_node, vlist); + if (vxlan_addr_any(&vent->remote_ip)) { + ret = vxlan_update_default_fdb_entry(vxlan, vent->vni, + old_remote_ip, + new_remote_ip, + extack); + if (ret) + return ret; + } + } + + return 0; +} + +static void vxlan_vni_delete_group(struct vxlan_dev *vxlan, + struct vxlan_vni_node *vninode) +{ + struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); + struct vxlan_rdst *dst = &vxlan->default_dst; + + /* if per vni remote_ip not present, delete the + * default dst remote_ip previously added for this vni + */ + if (!vxlan_addr_any(&vninode->remote_ip) || + !vxlan_addr_any(&dst->remote_ip)) + __vxlan_fdb_delete(vxlan, all_zeros_mac, + (vxlan_addr_any(&vninode->remote_ip) ? + dst->remote_ip : vninode->remote_ip), + vxlan->cfg.dst_port, + vninode->vni, vninode->vni, + dst->remote_ifindex, + true); + + if (vxlan->dev->flags & IFF_UP) { + if (vxlan_addr_multicast(&vninode->remote_ip) && + !vxlan_group_used(vn, vxlan, vninode->vni, + &vninode->remote_ip, + dst->remote_ifindex)) { + vxlan_igmp_leave(vxlan, &vninode->remote_ip, 0); + } + } +} + +static int vxlan_vni_update(struct vxlan_dev *vxlan, + struct vxlan_vni_group *vg, + __be32 vni, union vxlan_addr *group, + bool *changed, + struct netlink_ext_ack *extack) +{ + struct vxlan_vni_node *vninode; + int ret; + + vninode = rhashtable_lookup_fast(&vg->vni_hash, &vni, + vxlan_vni_rht_params); + if (!vninode) + return 0; + + ret = vxlan_vni_update_group(vxlan, vninode, group, false, changed, + extack); + if (ret) + return ret; + + if (changed) + vxlan_vnifilter_notify(vxlan, vninode, RTM_NEWTUNNEL); + + return 0; +} + +static void __vxlan_vni_add_list(struct vxlan_vni_group *vg, + struct vxlan_vni_node *v) +{ + struct list_head *headp, *hpos; + struct vxlan_vni_node *vent; + + headp = &vg->vni_list; + list_for_each_prev(hpos, headp) { + vent = list_entry(hpos, struct vxlan_vni_node, vlist); + if (be32_to_cpu(v->vni) < be32_to_cpu(vent->vni)) + continue; + else + break; + } + list_add_rcu(&v->vlist, hpos); + vg->num_vnis++; +} + +static void __vxlan_vni_del_list(struct vxlan_vni_group *vg, + struct vxlan_vni_node *v) +{ + list_del_rcu(&v->vlist); + vg->num_vnis--; +} + +static struct vxlan_vni_node *vxlan_vni_alloc(struct vxlan_dev *vxlan, + __be32 vni) +{ + struct vxlan_vni_node *vninode; + + vninode = kzalloc(sizeof(*vninode), GFP_ATOMIC); + if (!vninode) + return NULL; + vninode->stats = netdev_alloc_pcpu_stats(struct vxlan_vni_stats_pcpu); + if (!vninode->stats) { + kfree(vninode); + return NULL; + } + vninode->vni = vni; + vninode->hlist4.vxlan = vxlan; +#if IS_ENABLED(CONFIG_IPV6) + vninode->hlist6.vxlan = vxlan; +#endif + + return vninode; +} + +static int vxlan_vni_add(struct vxlan_dev *vxlan, + struct vxlan_vni_group *vg, + u32 vni, union vxlan_addr *group, + struct netlink_ext_ack *extack) +{ + struct vxlan_vni_node *vninode; + __be32 v = cpu_to_be32(vni); + bool changed = false; + int err = 0; + + if (vxlan_vnifilter_lookup(vxlan, v)) + return vxlan_vni_update(vxlan, vg, v, group, &changed, extack); + + err = vxlan_vni_in_use(vxlan->net, vxlan, &vxlan->cfg, v); + if (err) { + NL_SET_ERR_MSG(extack, "VNI in use"); + return err; + } + + vninode = vxlan_vni_alloc(vxlan, v); + if (!vninode) + return -ENOMEM; + + err = rhashtable_lookup_insert_fast(&vg->vni_hash, + &vninode->vnode, + vxlan_vni_rht_params); + if (err) { + kfree(vninode); + return err; + } + + __vxlan_vni_add_list(vg, vninode); + + if (vxlan->dev->flags & IFF_UP) + vxlan_vs_add_del_vninode(vxlan, vninode, false); + + err = vxlan_vni_update_group(vxlan, vninode, group, true, &changed, + extack); + + if (changed) + vxlan_vnifilter_notify(vxlan, vninode, RTM_NEWTUNNEL); + + return err; +} + +static void vxlan_vni_node_rcu_free(struct rcu_head *rcu) +{ + struct vxlan_vni_node *v; + + v = container_of(rcu, struct vxlan_vni_node, rcu); + free_percpu(v->stats); + kfree(v); +} + +static int vxlan_vni_del(struct vxlan_dev *vxlan, + struct vxlan_vni_group *vg, + u32 vni, struct netlink_ext_ack *extack) +{ + struct vxlan_vni_node *vninode; + __be32 v = cpu_to_be32(vni); + int err = 0; + + vg = rtnl_dereference(vxlan->vnigrp); + + vninode = rhashtable_lookup_fast(&vg->vni_hash, &v, + vxlan_vni_rht_params); + if (!vninode) { + err = -ENOENT; + goto out; + } + + vxlan_vni_delete_group(vxlan, vninode); + + err = rhashtable_remove_fast(&vg->vni_hash, + &vninode->vnode, + vxlan_vni_rht_params); + if (err) + goto out; + + __vxlan_vni_del_list(vg, vninode); + + vxlan_vnifilter_notify(vxlan, vninode, RTM_DELTUNNEL); + + if (vxlan->dev->flags & IFF_UP) + vxlan_vs_add_del_vninode(vxlan, vninode, true); + + call_rcu(&vninode->rcu, vxlan_vni_node_rcu_free); + + return 0; +out: + return err; +} + +static int vxlan_vni_add_del(struct vxlan_dev *vxlan, __u32 start_vni, + __u32 end_vni, union vxlan_addr *group, + int cmd, struct netlink_ext_ack *extack) +{ + struct vxlan_vni_group *vg; + int v, err = 0; + + vg = rtnl_dereference(vxlan->vnigrp); + + for (v = start_vni; v <= end_vni; v++) { + switch (cmd) { + case RTM_NEWTUNNEL: + err = vxlan_vni_add(vxlan, vg, v, group, extack); + break; + case RTM_DELTUNNEL: + err = vxlan_vni_del(vxlan, vg, v, extack); + break; + default: + err = -EOPNOTSUPP; + break; + } + if (err) + goto out; + } + + return 0; +out: + return err; +} + +static int vxlan_process_vni_filter(struct vxlan_dev *vxlan, + struct nlattr *nlvnifilter, + int cmd, struct netlink_ext_ack *extack) +{ + struct nlattr *vattrs[VXLAN_VNIFILTER_ENTRY_MAX + 1]; + u32 vni_start = 0, vni_end = 0; + union vxlan_addr group; + int err; + + err = nla_parse_nested(vattrs, + VXLAN_VNIFILTER_ENTRY_MAX, + nlvnifilter, vni_filter_entry_policy, + extack); + if (err) + return err; + + if (vattrs[VXLAN_VNIFILTER_ENTRY_START]) { + vni_start = nla_get_u32(vattrs[VXLAN_VNIFILTER_ENTRY_START]); + vni_end = vni_start; + } + + if (vattrs[VXLAN_VNIFILTER_ENTRY_END]) + vni_end = nla_get_u32(vattrs[VXLAN_VNIFILTER_ENTRY_END]); + + if (!vni_start && !vni_end) { + NL_SET_ERR_MSG_ATTR(extack, nlvnifilter, + "vni start nor end found in vni entry"); + return -EINVAL; + } + + if (vattrs[VXLAN_VNIFILTER_ENTRY_GROUP]) { + group.sin.sin_addr.s_addr = + nla_get_in_addr(vattrs[VXLAN_VNIFILTER_ENTRY_GROUP]); + group.sa.sa_family = AF_INET; + } else if (vattrs[VXLAN_VNIFILTER_ENTRY_GROUP6]) { + group.sin6.sin6_addr = + nla_get_in6_addr(vattrs[VXLAN_VNIFILTER_ENTRY_GROUP6]); + group.sa.sa_family = AF_INET6; + } else { + memset(&group, 0, sizeof(group)); + } + + if (vxlan_addr_multicast(&group) && !vxlan->default_dst.remote_ifindex) { + NL_SET_ERR_MSG(extack, + "Local interface required for multicast remote group"); + + return -EINVAL; + } + + err = vxlan_vni_add_del(vxlan, vni_start, vni_end, &group, cmd, + extack); + if (err) + return err; + + return 0; +} + +void vxlan_vnigroup_uninit(struct vxlan_dev *vxlan) +{ + struct vxlan_vni_node *v, *tmp; + struct vxlan_vni_group *vg; + + vg = rtnl_dereference(vxlan->vnigrp); + list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) { + rhashtable_remove_fast(&vg->vni_hash, &v->vnode, + vxlan_vni_rht_params); + hlist_del_init_rcu(&v->hlist4.hlist); +#if IS_ENABLED(CONFIG_IPV6) + hlist_del_init_rcu(&v->hlist6.hlist); +#endif + __vxlan_vni_del_list(vg, v); + vxlan_vnifilter_notify(vxlan, v, RTM_DELTUNNEL); + call_rcu(&v->rcu, vxlan_vni_node_rcu_free); + } + rhashtable_destroy(&vg->vni_hash); + kfree(vg); +} + +int vxlan_vnigroup_init(struct vxlan_dev *vxlan) +{ + struct vxlan_vni_group *vg; + int ret; + + vg = kzalloc(sizeof(*vg), GFP_KERNEL); + if (!vg) + return -ENOMEM; + ret = rhashtable_init(&vg->vni_hash, &vxlan_vni_rht_params); + if (ret) { + kfree(vg); + return ret; + } + INIT_LIST_HEAD(&vg->vni_list); + rcu_assign_pointer(vxlan->vnigrp, vg); + + return 0; +} + +static int vxlan_vnifilter_process(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) +{ + struct net *net = sock_net(skb->sk); + struct tunnel_msg *tmsg; + struct vxlan_dev *vxlan; + struct net_device *dev; + struct nlattr *attr; + int err, vnis = 0; + int rem; + + /* this should validate the header and check for remaining bytes */ + err = nlmsg_parse(nlh, sizeof(*tmsg), NULL, VXLAN_VNIFILTER_MAX, + vni_filter_policy, extack); + if (err < 0) + return err; + + tmsg = nlmsg_data(nlh); + dev = __dev_get_by_index(net, tmsg->ifindex); + if (!dev) + return -ENODEV; + + if (!netif_is_vxlan(dev)) { + NL_SET_ERR_MSG_MOD(extack, "The device is not a vxlan device"); + return -EINVAL; + } + + vxlan = netdev_priv(dev); + + if (!(vxlan->cfg.flags & VXLAN_F_VNIFILTER)) + return -EOPNOTSUPP; + + nlmsg_for_each_attr(attr, nlh, sizeof(*tmsg), rem) { + switch (nla_type(attr)) { + case VXLAN_VNIFILTER_ENTRY: + err = vxlan_process_vni_filter(vxlan, attr, + nlh->nlmsg_type, extack); + break; + default: + continue; + } + vnis++; + if (err) + break; + } + + if (!vnis) { + NL_SET_ERR_MSG_MOD(extack, "No vnis found to process"); + err = -EINVAL; + } + + return err; +} + +void vxlan_vnifilter_init(void) +{ + rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETTUNNEL, NULL, + vxlan_vnifilter_dump, 0); + rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWTUNNEL, + vxlan_vnifilter_process, NULL, 0); + rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELTUNNEL, + vxlan_vnifilter_process, NULL, 0); +} + +void vxlan_vnifilter_uninit(void) +{ + rtnl_unregister(PF_BRIDGE, RTM_GETTUNNEL); + rtnl_unregister(PF_BRIDGE, RTM_NEWTUNNEL); + rtnl_unregister(PF_BRIDGE, RTM_DELTUNNEL); +} diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 6a142dc85c37..76c6b4f89890 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -57,6 +57,7 @@ #include <asm/io.h> #include <asm/dma.h> #include <linux/uaccess.h> +#include <linux/jiffies.h> //#include <asm/spinlock.h> #define DRIVER_MAJOR_VERSION 1 @@ -1968,7 +1969,7 @@ static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue) printk("%s: Xmitter busy|\n", dev->name); sc->extra_stats.tx_tbusy_calls++; - if (jiffies - dev_trans_start(dev) < TX_TIMEOUT) + if (time_is_before_jiffies(dev_trans_start(dev) + TX_TIMEOUT)) goto bug_out; /* diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c index 8e3b1c717c10..6063552cea9b 100644 --- a/drivers/net/wan/slic_ds26522.c +++ b/drivers/net/wan/slic_ds26522.c @@ -194,10 +194,9 @@ static int slic_ds26522_init_configure(struct spi_device *spi) return 0; } -static int slic_ds26522_remove(struct spi_device *spi) +static void slic_ds26522_remove(struct spi_device *spi) { pr_info("DS26522 module uninstalled\n"); - return 0; } static int slic_ds26522_probe(struct spi_device *spi) diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c index b22ed499f7ba..a56fab6232a9 100644 --- a/drivers/net/wireless/ath/ath6kl/txrx.c +++ b/drivers/net/wireless/ath/ath6kl/txrx.c @@ -839,7 +839,7 @@ static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev, skb->protocol = eth_type_trans(skb, skb->dev); - netif_rx_ni(skb); + netif_rx(skb); } static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num) diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index cc830c795b33..5704defd7be1 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -958,7 +958,7 @@ void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid, if (gro) napi_gro_receive(&wil->napi_rx, skb); else - netif_rx_ni(skb); + netif_rx(skb); } ndev->stats.rx_packets++; stats->rx_packets++; diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index dd8abbb28849..98b4c189eecc 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -1199,7 +1199,7 @@ static void wmi_evt_eapol_rx(struct wil6210_vif *vif, int id, void *d, int len) eth->h_proto = cpu_to_be16(ETH_P_PAE); skb_put_data(skb, evt->eapol, eapol_len); skb->protocol = eth_type_trans(skb, ndev); - if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) { + if (likely(netif_rx(skb) == NET_RX_SUCCESS)) { ndev->stats.rx_packets++; ndev->stats.rx_bytes += sz; if (stats) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c index 3984fd7d918e..2c95a08a5871 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c @@ -397,9 +397,9 @@ brcmf_proto_bcdc_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, } static void brcmf_proto_bcdc_rxreorder(struct brcmf_if *ifp, - struct sk_buff *skb, bool inirq) + struct sk_buff *skb) { - brcmf_fws_rxreorder(ifp, skb, inirq); + brcmf_fws_rxreorder(ifp, skb); } static void diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index fed9cd5f29a2..26fab4bee22c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -400,7 +400,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp, spin_unlock_irqrestore(&ifp->netif_stop_lock, flags); } -void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq) +void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb) { /* Most of Broadcom's firmwares send 802.11f ADD frame every time a new * STA connects to the AP interface. This is an obsoleted standard most @@ -423,15 +423,7 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq) ifp->ndev->stats.rx_packets++; brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol)); - if (inirq) { - netif_rx(skb); - } else { - /* If the receive is not processed inside an ISR, - * the softirqd must be woken explicitly to service - * the NET_RX_SOFTIRQ. This is handled by netif_rx_ni(). - */ - netif_rx_ni(skb); - } + netif_rx(skb); } void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb) @@ -480,7 +472,7 @@ void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb) skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_802_2); - brcmf_netif_rx(ifp, skb, false); + brcmf_netif_rx(ifp, skb); } static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb, @@ -515,7 +507,7 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event, return; if (brcmf_proto_is_reorder_skb(skb)) { - brcmf_proto_rxreorder(ifp, skb, inirq); + brcmf_proto_rxreorder(ifp, skb); } else { /* Process special event packets */ if (handle_event) { @@ -524,7 +516,7 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event, brcmf_fweh_process_skb(ifp->drvr, skb, BCMILCP_SUBTYPE_VENDOR_LONG, gfp); } - brcmf_netif_rx(ifp, skb, inirq); + brcmf_netif_rx(ifp, skb); } } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h index 8212c9de14f1..340346c122d3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h @@ -208,7 +208,7 @@ void brcmf_remove_interface(struct brcmf_if *ifp, bool locked); void brcmf_txflowblock_if(struct brcmf_if *ifp, enum brcmf_netif_stop_reason reason, bool state); void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success); -void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq); +void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb); void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb); void brcmf_net_detach(struct net_device *ndev, bool locked); int brcmf_net_mon_attach(struct brcmf_if *ifp); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index 1001c8888bfe..dcbe55b56e43 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -695,7 +695,7 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req, { struct brcmf_fw_item *first = &req->items[0]; struct brcmf_fw *fwctx; - char *alt_path; + char *alt_path = NULL; int ret; brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev)); @@ -714,7 +714,9 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req, fwctx->done = fw_cb; /* First try alternative board-specific path if any */ - alt_path = brcm_alt_fw_path(first->path, fwctx->req->board_type); + if (fwctx->req->board_type) + alt_path = brcm_alt_fw_path(first->path, + fwctx->req->board_type); if (alt_path) { ret = request_firmware_nowait(THIS_MODULE, true, alt_path, fwctx->dev, GFP_KERNEL, fwctx, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index 19b0f318f93e..d58525ebe618 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -1664,7 +1664,7 @@ static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi, rfi->pend_pkts -= skb_queue_len(skb_list); } -void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq) +void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt) { struct brcmf_pub *drvr = ifp->drvr; u8 *reorder_data; @@ -1682,7 +1682,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq) /* validate flags and flow id */ if (flags == 0xFF) { bphy_err(drvr, "invalid flags...so ignore this packet\n"); - brcmf_netif_rx(ifp, pkt, inirq); + brcmf_netif_rx(ifp, pkt); return; } @@ -1694,7 +1694,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq) if (rfi == NULL) { brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n", flow_id); - brcmf_netif_rx(ifp, pkt, inirq); + brcmf_netif_rx(ifp, pkt); return; } @@ -1719,7 +1719,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq) rfi = kzalloc(buf_size, GFP_ATOMIC); if (rfi == NULL) { bphy_err(drvr, "failed to alloc buffer\n"); - brcmf_netif_rx(ifp, pkt, inirq); + brcmf_netif_rx(ifp, pkt); return; } @@ -1833,7 +1833,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq) netif_rx: skb_queue_walk_safe(&reorder_list, pkt, pnext) { __skb_unlink(pkt, &reorder_list); - brcmf_netif_rx(ifp, pkt, inirq); + brcmf_netif_rx(ifp, pkt); } } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h index 50e424b5880d..b16a9d1c0508 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h @@ -42,6 +42,6 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp); void brcmf_fws_del_interface(struct brcmf_if *ifp); void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb); void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked); -void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq); +void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb); #endif /* FWSIGNAL_H_ */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c index 7c8e08ee8f0f..b2d0f7570aa9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c @@ -536,8 +536,7 @@ static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws, return -ENODEV; } -static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb, - bool inirq) +static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb) { } @@ -1191,7 +1190,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) } skb->protocol = eth_type_trans(skb, ifp->ndev); - brcmf_netif_rx(ifp, skb, false); + brcmf_netif_rx(ifp, skb); } static void brcmf_msgbuf_process_gen_status(struct brcmf_msgbuf *msgbuf, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h index f4a79e217da5..bd08d3aaa8f4 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h @@ -32,7 +32,7 @@ struct brcmf_proto { u8 peer[ETH_ALEN]); void (*add_tdls_peer)(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN]); - void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq); + void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb); void (*add_if)(struct brcmf_if *ifp); void (*del_if)(struct brcmf_if *ifp); void (*reset_if)(struct brcmf_if *ifp); @@ -109,9 +109,9 @@ static inline bool brcmf_proto_is_reorder_skb(struct sk_buff *skb) } static inline void -brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq) +brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb) { - ifp->drvr->proto->rxreorder(ifp, skb, inirq); + ifp->drvr->proto->rxreorder(ifp, skb); } static inline void diff --git a/drivers/net/wireless/intel/Makefile b/drivers/net/wireless/intel/Makefile index 1364b0014488..208e73a16051 100644 --- a/drivers/net/wireless/intel/Makefile +++ b/drivers/net/wireless/intel/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_IPW2200) += ipw2x00/ obj-$(CONFIG_IWLEGACY) += iwlegacy/ obj-$(CONFIG_IWLWIFI) += iwlwifi/ +obj-$(CONFIG_IWLMEI) += iwlwifi/ diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index c21c0c68849a..85e704283755 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig @@ -80,19 +80,6 @@ config IWLWIFI_OPMODE_MODULAR comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM" depends on IWLDVM=n && IWLMVM=n -config IWLWIFI_BCAST_FILTERING - bool "Enable broadcast filtering" - depends on IWLMVM - help - Say Y here to enable default bcast filtering configuration. - - Enabling broadcast filtering will drop any incoming wireless - broadcast frames, except some very specific predefined - patterns (e.g. incoming arp requests). - - If unsure, don't enable this option, as some programs might - expect incoming broadcasts for their normal operations. - menu "Debugging Options" config IWLWIFI_DEBUG diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index dd220e1d7996..33aae639ad37 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2019-2021 Intel Corporation + * Copyright (C) 2019-2022 Intel Corporation */ #include <linux/uuid.h> #include <linux/dmi.h> @@ -923,10 +923,11 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt) * only one using version 36, so skip this version entirely. */ return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 || - IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 || - (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 && - ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) == - CSR_HW_REV_TYPE_7265D)); + (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 && + fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) || + (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 && + ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) == + CSR_HW_REV_TYPE_7265D)); } IWL_EXPORT_SYMBOL(iwl_sar_geo_support); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index a91bd66ecb30..c78d2f1c722c 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -494,11 +494,6 @@ enum iwl_legacy_cmds { DEBUG_LOG_MSG = 0xf7, /** - * @BCAST_FILTER_CMD: &struct iwl_bcast_filter_cmd - */ - BCAST_FILTER_CMD = 0xcf, - - /** * @MCAST_FILTER_CMD: &struct iwl_mcast_filter_cmd */ MCAST_FILTER_CMD = 0xd0, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h index dd62a63956b3..e44c70b7c790 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h @@ -36,92 +36,4 @@ struct iwl_mcast_filter_cmd { u8 addr_list[0]; } __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */ -#define MAX_BCAST_FILTERS 8 -#define MAX_BCAST_FILTER_ATTRS 2 - -/** - * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet - * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start. - * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e. - * start of ip payload). - */ -enum iwl_mvm_bcast_filter_attr_offset { - BCAST_FILTER_OFFSET_PAYLOAD_START = 0, - BCAST_FILTER_OFFSET_IP_END = 1, -}; - -/** - * struct iwl_fw_bcast_filter_attr - broadcast filter attribute - * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset. - * @offset: starting offset of this pattern. - * @reserved1: reserved - * @val: value to match - big endian (MSB is the first - * byte to match from offset pos). - * @mask: mask to match (big endian). - */ -struct iwl_fw_bcast_filter_attr { - u8 offset_type; - u8 offset; - __le16 reserved1; - __be32 val; - __be32 mask; -} __packed; /* BCAST_FILTER_ATT_S_VER_1 */ - -/** - * enum iwl_mvm_bcast_filter_frame_type - filter frame type - * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames. - * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames - */ -enum iwl_mvm_bcast_filter_frame_type { - BCAST_FILTER_FRAME_TYPE_ALL = 0, - BCAST_FILTER_FRAME_TYPE_IPV4 = 1, -}; - -/** - * struct iwl_fw_bcast_filter - broadcast filter - * @discard: discard frame (1) or let it pass (0). - * @frame_type: &enum iwl_mvm_bcast_filter_frame_type. - * @reserved1: reserved - * @num_attrs: number of valid attributes in this filter. - * @attrs: attributes of this filter. a filter is considered matched - * only when all its attributes are matched (i.e. AND relationship) - */ -struct iwl_fw_bcast_filter { - u8 discard; - u8 frame_type; - u8 num_attrs; - u8 reserved1; - struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS]; -} __packed; /* BCAST_FILTER_S_VER_1 */ - -/** - * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration. - * @default_discard: default action for this mac (discard (1) / pass (0)). - * @reserved1: reserved - * @attached_filters: bitmap of relevant filters for this mac. - */ -struct iwl_fw_bcast_mac { - u8 default_discard; - u8 reserved1; - __le16 attached_filters; -} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */ - -/** - * struct iwl_bcast_filter_cmd - broadcast filtering configuration - * @disable: enable (0) / disable (1) - * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS) - * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER) - * @reserved1: reserved - * @filters: broadcast filters - * @macs: broadcast filtering configuration per-mac - */ -struct iwl_bcast_filter_cmd { - u8 disable; - u8 max_bcast_filters; - u8 max_macs; - u8 reserved1; - struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS]; - struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER]; -} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */ - #endif /* __iwl_fw_api_filter_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h index 2198ca5269e1..687f804c46b7 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h @@ -752,7 +752,6 @@ struct iwl_lq_cmd { u8 iwl_fw_rate_idx_to_plcp(int idx); u32 iwl_new_rate_from_v1(u32 rate_v1); -u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags); const struct iwl_rate_mcs_info *iwl_rate_mcs(int idx); const char *iwl_rs_pretty_ant(u8 ant); const char *iwl_rs_pretty_bw(int bw); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 35e4adb7006f..5679a78758be 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -181,7 +181,6 @@ struct iwl_ucode_capa { * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version) * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save - * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering. * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS. */ enum iwl_ucode_tlv_flag { @@ -196,7 +195,6 @@ enum iwl_ucode_tlv_flag { IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24), IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25), IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26), - IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29), }; typedef unsigned int __bitwise iwl_ucode_tlv_api_t; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/rs.c b/drivers/net/wireless/intel/iwlwifi/fw/rs.c index a21c3befd93b..a835214611ce 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/rs.c @@ -91,6 +91,20 @@ const char *iwl_rs_pretty_bw(int bw) } IWL_EXPORT_SYMBOL(iwl_rs_pretty_bw); +static u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags) +{ + int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1; + int idx; + bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1); + int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0; + int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE; + + for (idx = offset; idx < last; idx++) + if (iwl_fw_rate_idx_to_plcp(idx) == rate) + return idx - offset; + return IWL_RATE_INVALID; +} + u32 iwl_new_rate_from_v1(u32 rate_v1) { u32 rate_v2 = 0; @@ -144,7 +158,10 @@ u32 iwl_new_rate_from_v1(u32 rate_v1) } else { u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1); - WARN_ON(legacy_rate < 0); + if (WARN_ON_ONCE(legacy_rate == IWL_RATE_INVALID)) + legacy_rate = (rate_v1 & RATE_MCS_CCK_MSK_V1) ? + IWL_FIRST_CCK_RATE : IWL_FIRST_OFDM_RATE; + rate_v2 |= legacy_rate; if (!(rate_v1 & RATE_MCS_CCK_MSK_V1)) rate_v2 |= RATE_MCS_LEGACY_OFDM_MSK; @@ -172,20 +189,6 @@ u32 iwl_new_rate_from_v1(u32 rate_v1) } IWL_EXPORT_SYMBOL(iwl_new_rate_from_v1); -u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags) -{ - int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1; - int idx; - bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1); - int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0; - int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE; - - for (idx = offset; idx < last; idx++) - if (iwl_fw_rate_idx_to_plcp(idx) == rate) - return idx - offset; - return -1; -} - int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate) { char *type; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index c0a18e820b51..3e1f011e93aa 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2021 Intel Corporation + * Copyright (C) 2005-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2016 Intel Deutschland GmbH */ @@ -329,6 +329,7 @@ enum { #define CSR_HW_REV_TYPE_2x00 (0x0000100) #define CSR_HW_REV_TYPE_105 (0x0000110) #define CSR_HW_REV_TYPE_135 (0x0000120) +#define CSR_HW_REV_TYPE_3160 (0x0000164) #define CSR_HW_REV_TYPE_7265D (0x0000210) #define CSR_HW_REV_TYPE_NONE (0x00001F0) #define CSR_HW_REV_TYPE_QNJ (0x0000360) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 8bb4f54d9b38..a2203f661321 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1681,6 +1681,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) out_unbind: complete(&drv->request_firmware_complete); device_release_driver(drv->trans->dev); + /* drv has just been freed by the release */ + failure = false; free: if (failure) iwl_dealloc_ucode(drv); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 0dfd69fcd5d7..9040da3dcce3 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -553,8 +553,7 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = - IEEE80211_HE_MAC_CAP0_HTC_HE | - IEEE80211_HE_MAC_CAP0_TWT_REQ, + IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c index 25975d02da6a..b4f45234cfc8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mei/main.c +++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2021 Intel Corporation + * Copyright (C) 2021-2022 Intel Corporation */ #include <linux/etherdevice.h> @@ -146,6 +146,7 @@ struct iwl_mei_filters { * @csme_taking_ownership: true when CSME is taking ownership. Used to remember * to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down * flow. + * @link_prot_state: true when we are in link protection PASSIVE * @csa_throttle_end_wk: used when &csa_throttled is true * @data_q_lock: protects the access to the data queues which are * accessed without the mutex. @@ -165,6 +166,7 @@ struct iwl_mei { bool amt_enabled; bool csa_throttled; bool csme_taking_ownership; + bool link_prot_state; struct delayed_work csa_throttle_end_wk; spinlock_t data_q_lock; @@ -229,8 +231,6 @@ static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev) if (IS_ERR(mem->ctrl)) { int ret = PTR_ERR(mem->ctrl); - dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n", - ret); mem->ctrl = NULL; return ret; @@ -669,6 +669,8 @@ iwl_mei_handle_conn_status(struct mei_cl_device *cldev, iwl_mei_cache.ops->me_conn_status(iwl_mei_cache.priv, &conn_info); + mei->link_prot_state = status->link_prot_state; + /* * Update the Rfkill state in case the host does not own the device: * if we are in Link Protection, ask to not touch the device, else, @@ -1663,9 +1665,11 @@ int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops) mei_cldev_get_drvdata(iwl_mei_global_cldev); /* we have already a SAP connection */ - if (iwl_mei_is_connected()) + if (iwl_mei_is_connected()) { iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WIFIDR_UP); + ops->rfkill(priv, mei->link_prot_state); + } } ret = 0; @@ -1784,6 +1788,8 @@ static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {} #endif /* CONFIG_DEBUG_FS */ +#define ALLOC_SHARED_MEM_RETRY_MAX_NUM 3 + /* * iwl_mei_probe - the probe function called by the mei bus enumeration * @@ -1795,6 +1801,7 @@ static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {} static int iwl_mei_probe(struct mei_cl_device *cldev, const struct mei_cl_device_id *id) { + int alloc_retry = ALLOC_SHARED_MEM_RETRY_MAX_NUM; struct iwl_mei *mei; int ret; @@ -1812,15 +1819,31 @@ static int iwl_mei_probe(struct mei_cl_device *cldev, mei_cldev_set_drvdata(cldev, mei); mei->cldev = cldev; - /* - * The CSME firmware needs to boot the internal WLAN client. Wait here - * so that the DMA map request will succeed. - */ - msleep(20); + do { + ret = iwl_mei_alloc_shared_mem(cldev); + if (!ret) + break; + /* + * The CSME firmware needs to boot the internal WLAN client. + * This can take time in certain configurations (usually + * upon resume and when the whole CSME firmware is shut down + * during suspend). + * + * Wait a bit before retrying and hope we'll succeed next time. + */ - ret = iwl_mei_alloc_shared_mem(cldev); - if (ret) + dev_dbg(&cldev->dev, + "Couldn't allocate the shared memory: %d, attempt %d / %d\n", + ret, alloc_retry, ALLOC_SHARED_MEM_RETRY_MAX_NUM); + msleep(100); + alloc_retry--; + } while (alloc_retry); + + if (ret) { + dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n", + ret); goto free; + } iwl_mei_init_shared_mem(mei); diff --git a/drivers/net/wireless/intel/iwlwifi/mei/net.c b/drivers/net/wireless/intel/iwlwifi/mei/net.c index fa2dbdca5592..3472167c8370 100644 --- a/drivers/net/wireless/intel/iwlwifi/mei/net.c +++ b/drivers/net/wireless/intel/iwlwifi/mei/net.c @@ -195,8 +195,7 @@ static bool iwl_mei_rx_filter_ipv4(struct sk_buff *skb, bool match; if (!pskb_may_pull(skb, skb_network_offset(skb) + sizeof(*iphdr)) || - !pskb_may_pull(skb, skb_network_offset(skb) + - sizeof(ip_hdrlen(skb) - sizeof(*iphdr)))) + !pskb_may_pull(skb, skb_network_offset(skb) + ip_hdrlen(skb))) return false; iphdrlen = ip_hdrlen(skb); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index fecd7d4a7bdc..49898fd99594 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -5,6 +5,7 @@ * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include <linux/vmalloc.h> +#include <linux/err.h> #include <linux/ieee80211.h> #include <linux/netdevice.h> @@ -1368,189 +1369,6 @@ static ssize_t iwl_dbgfs_dbg_time_point_write(struct iwl_mvm *mvm, return count; } -#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__) -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING -static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - struct iwl_bcast_filter_cmd cmd; - const struct iwl_fw_bcast_filter *filter; - char *buf; - int bufsz = 1024; - int i, j, pos = 0; - ssize_t ret; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - mutex_lock(&mvm->mutex); - if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) { - ADD_TEXT("None\n"); - mutex_unlock(&mvm->mutex); - goto out; - } - mutex_unlock(&mvm->mutex); - - for (i = 0; cmd.filters[i].attrs[0].mask; i++) { - filter = &cmd.filters[i]; - - ADD_TEXT("Filter [%d]:\n", i); - ADD_TEXT("\tDiscard=%d\n", filter->discard); - ADD_TEXT("\tFrame Type: %s\n", - filter->frame_type ? "IPv4" : "Generic"); - - for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) { - const struct iwl_fw_bcast_filter_attr *attr; - - attr = &filter->attrs[j]; - if (!attr->mask) - break; - - ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n", - j, attr->offset, - attr->offset_type ? "IP End" : - "Payload Start", - be32_to_cpu(attr->mask), - be32_to_cpu(attr->val), - le16_to_cpu(attr->reserved1)); - } - } -out: - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - int pos, next_pos; - struct iwl_fw_bcast_filter filter = {}; - struct iwl_bcast_filter_cmd cmd; - u32 filter_id, attr_id, mask, value; - int err = 0; - - if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard, - &filter.frame_type, &pos) != 3) - return -EINVAL; - - if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) || - filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4) - return -EINVAL; - - for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs); - attr_id++) { - struct iwl_fw_bcast_filter_attr *attr = - &filter.attrs[attr_id]; - - if (pos >= count) - break; - - if (sscanf(&buf[pos], "%hhi %hhi %i %i %n", - &attr->offset, &attr->offset_type, - &mask, &value, &next_pos) != 4) - return -EINVAL; - - attr->mask = cpu_to_be32(mask); - attr->val = cpu_to_be32(value); - if (mask) - filter.num_attrs++; - - pos += next_pos; - } - - mutex_lock(&mvm->mutex); - memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id], - &filter, sizeof(filter)); - - /* send updated bcast filtering configuration */ - if (iwl_mvm_firmware_running(mvm) && - mvm->dbgfs_bcast_filtering.override && - iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) - err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, - sizeof(cmd), &cmd); - mutex_unlock(&mvm->mutex); - - return err ?: count; -} - -static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - struct iwl_bcast_filter_cmd cmd; - char *buf; - int bufsz = 1024; - int i, pos = 0; - ssize_t ret; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - mutex_lock(&mvm->mutex); - if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) { - ADD_TEXT("None\n"); - mutex_unlock(&mvm->mutex); - goto out; - } - mutex_unlock(&mvm->mutex); - - for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) { - const struct iwl_fw_bcast_mac *mac = &cmd.macs[i]; - - ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n", - i, mac->default_discard, mac->attached_filters); - } -out: - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm, - char *buf, size_t count, - loff_t *ppos) -{ - struct iwl_bcast_filter_cmd cmd; - struct iwl_fw_bcast_mac mac = {}; - u32 mac_id, attached_filters; - int err = 0; - - if (!mvm->bcast_filters) - return -ENOENT; - - if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard, - &attached_filters) != 3) - return -EINVAL; - - if (mac_id >= ARRAY_SIZE(cmd.macs) || - mac.default_discard > 1 || - attached_filters >= BIT(ARRAY_SIZE(cmd.filters))) - return -EINVAL; - - mac.attached_filters = cpu_to_le16(attached_filters); - - mutex_lock(&mvm->mutex); - memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id], - &mac, sizeof(mac)); - - /* send updated bcast filtering configuration */ - if (iwl_mvm_firmware_running(mvm) && - mvm->dbgfs_bcast_filtering.override && - iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) - err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, - sizeof(cmd), &cmd); - mutex_unlock(&mvm->mutex); - - return err ?: count; -} -#endif - #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm) #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ @@ -1881,11 +1699,6 @@ MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie_restore, 512); MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids); -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING -MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256); -#endif - #ifdef CONFIG_ACPI MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile); #endif @@ -2043,7 +1856,6 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw, void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) { struct dentry *bcast_dir __maybe_unused; - char buf[100]; spin_lock_init(&mvm->drv_stats_lock); @@ -2095,21 +1907,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR); -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING - if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) { - bcast_dir = debugfs_create_dir("bcast_filtering", - mvm->debugfs_dir); - - debugfs_create_bool("override", 0600, bcast_dir, - &mvm->dbgfs_bcast_filtering.override); - - MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters, - bcast_dir, 0600); - MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs, - bcast_dir, 0600); - } -#endif - #ifdef CONFIG_PM_SLEEP MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400); debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir, @@ -2140,6 +1937,11 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) * Create a symlink with mac80211. It will be removed when mac80211 * exists (before the opmode exists which removes the target.) */ - snprintf(buf, 100, "../../%pd2", mvm->debugfs_dir->d_parent); - debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf); + if (!IS_ERR(mvm->debugfs_dir)) { + char buf[100]; + + snprintf(buf, 100, "../../%pd2", mvm->debugfs_dir->d_parent); + debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, + buf); + } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 94d291a56e4a..4632d3ad1a2b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1643,7 +1643,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ret = iwl_mvm_sar_init(mvm); if (ret == 0) ret = iwl_mvm_sar_geo_init(mvm); - else if (ret < 0) + if (ret < 0) goto error; ret = iwl_mvm_sgom_init(mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 81d02a17dfe4..784d91281c02 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -55,79 +55,6 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { }, }; -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING -/* - * Use the reserved field to indicate magic values. - * these values will only be used internally by the driver, - * and won't make it to the fw (reserved will be 0). - * BC_FILTER_MAGIC_IP - configure the val of this attribute to - * be the vif's ip address. in case there is not a single - * ip address (0, or more than 1), this attribute will - * be skipped. - * BC_FILTER_MAGIC_MAC - set the val of this attribute to - * the LSB bytes of the vif's mac address - */ -enum { - BC_FILTER_MAGIC_NONE = 0, - BC_FILTER_MAGIC_IP, - BC_FILTER_MAGIC_MAC, -}; - -static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = { - { - /* arp */ - .discard = 0, - .frame_type = BCAST_FILTER_FRAME_TYPE_ALL, - .attrs = { - { - /* frame type - arp, hw type - ethernet */ - .offset_type = - BCAST_FILTER_OFFSET_PAYLOAD_START, - .offset = sizeof(rfc1042_header), - .val = cpu_to_be32(0x08060001), - .mask = cpu_to_be32(0xffffffff), - }, - { - /* arp dest ip */ - .offset_type = - BCAST_FILTER_OFFSET_PAYLOAD_START, - .offset = sizeof(rfc1042_header) + 2 + - sizeof(struct arphdr) + - ETH_ALEN + sizeof(__be32) + - ETH_ALEN, - .mask = cpu_to_be32(0xffffffff), - /* mark it as special field */ - .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP), - }, - }, - }, - { - /* dhcp offer bcast */ - .discard = 0, - .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4, - .attrs = { - { - /* udp dest port - 68 (bootp client)*/ - .offset_type = BCAST_FILTER_OFFSET_IP_END, - .offset = offsetof(struct udphdr, dest), - .val = cpu_to_be32(0x00440000), - .mask = cpu_to_be32(0xffff0000), - }, - { - /* dhcp - lsb bytes of client hw address */ - .offset_type = BCAST_FILTER_OFFSET_IP_END, - .offset = 38, - .mask = cpu_to_be32(0xffffffff), - /* mark it as special field */ - .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC), - }, - }, - }, - /* last filter must be empty */ - {}, -}; -#endif - static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = { .max_peers = IWL_MVM_TOF_MAX_APS, .report_ap_tsf = 1, @@ -299,7 +226,6 @@ static const u8 he_if_types_ext_capa_sta[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, - [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT, }; static const struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = { @@ -668,11 +594,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) } #endif -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING - /* assign default bcast filtering configuration */ - mvm->bcast_filters = iwl_mvm_default_bcast_filters; -#endif - ret = iwl_mvm_leds_init(mvm); if (ret) return ret; @@ -1839,162 +1760,6 @@ static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw, mutex_unlock(&mvm->mutex); } -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING -struct iwl_bcast_iter_data { - struct iwl_mvm *mvm; - struct iwl_bcast_filter_cmd *cmd; - u8 current_filter; -}; - -static void -iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif, - const struct iwl_fw_bcast_filter *in_filter, - struct iwl_fw_bcast_filter *out_filter) -{ - struct iwl_fw_bcast_filter_attr *attr; - int i; - - memcpy(out_filter, in_filter, sizeof(*out_filter)); - - for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) { - attr = &out_filter->attrs[i]; - - if (!attr->mask) - break; - - switch (attr->reserved1) { - case cpu_to_le16(BC_FILTER_MAGIC_IP): - if (vif->bss_conf.arp_addr_cnt != 1) { - attr->mask = 0; - continue; - } - - attr->val = vif->bss_conf.arp_addr_list[0]; - break; - case cpu_to_le16(BC_FILTER_MAGIC_MAC): - attr->val = *(__be32 *)&vif->addr[2]; - break; - default: - break; - } - attr->reserved1 = 0; - out_filter->num_attrs++; - } -} - -static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_bcast_iter_data *data = _data; - struct iwl_mvm *mvm = data->mvm; - struct iwl_bcast_filter_cmd *cmd = data->cmd; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_fw_bcast_mac *bcast_mac; - int i; - - if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs))) - return; - - bcast_mac = &cmd->macs[mvmvif->id]; - - /* - * enable filtering only for associated stations, but not for P2P - * Clients - */ - if (vif->type != NL80211_IFTYPE_STATION || vif->p2p || - !vif->bss_conf.assoc) - return; - - bcast_mac->default_discard = 1; - - /* copy all configured filters */ - for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) { - /* - * Make sure we don't exceed our filters limit. - * if there is still a valid filter to be configured, - * be on the safe side and just allow bcast for this mac. - */ - if (WARN_ON_ONCE(data->current_filter >= - ARRAY_SIZE(cmd->filters))) { - bcast_mac->default_discard = 0; - bcast_mac->attached_filters = 0; - break; - } - - iwl_mvm_set_bcast_filter(vif, - &mvm->bcast_filters[i], - &cmd->filters[data->current_filter]); - - /* skip current filter if it contains no attributes */ - if (!cmd->filters[data->current_filter].num_attrs) - continue; - - /* attach the filter to current mac */ - bcast_mac->attached_filters |= - cpu_to_le16(BIT(data->current_filter)); - - data->current_filter++; - } -} - -bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm, - struct iwl_bcast_filter_cmd *cmd) -{ - struct iwl_bcast_iter_data iter_data = { - .mvm = mvm, - .cmd = cmd, - }; - - if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL) - return false; - - memset(cmd, 0, sizeof(*cmd)); - cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters); - cmd->max_macs = ARRAY_SIZE(cmd->macs); - -#ifdef CONFIG_IWLWIFI_DEBUGFS - /* use debugfs filters/macs if override is configured */ - if (mvm->dbgfs_bcast_filtering.override) { - memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters, - sizeof(cmd->filters)); - memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs, - sizeof(cmd->macs)); - return true; - } -#endif - - /* if no filters are configured, do nothing */ - if (!mvm->bcast_filters) - return false; - - /* configure and attach these filters for each associated sta vif */ - ieee80211_iterate_active_interfaces( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_bcast_filter_iterator, &iter_data); - - return true; -} - -static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm) -{ - struct iwl_bcast_filter_cmd cmd; - - if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING)) - return 0; - - if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) - return 0; - - return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, - sizeof(cmd), &cmd); -} -#else -static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm) -{ - return 0; -} -#endif - static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { @@ -2591,7 +2356,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, } iwl_mvm_recalc_multicast(mvm); - iwl_mvm_configure_bcast_filter(mvm); /* reset rssi values */ mvmvif->bf_data.ave_beacon_signal = 0; @@ -2638,11 +2402,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, } } - if (changes & BSS_CHANGED_ARP_FILTER) { - IWL_DEBUG_MAC80211(mvm, "arp filter changed\n"); - iwl_mvm_configure_bcast_filter(mvm); - } - if (changes & BSS_CHANGED_BANDWIDTH) iwl_mvm_apply_fw_smps_request(vif); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 7c6802f92ef7..c6bc85d4600a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -884,17 +884,6 @@ struct iwl_mvm { /* rx chain antennas set through debugfs for the scan command */ u8 scan_rx_ant; -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING - /* broadcast filters to configure for each associated station */ - const struct iwl_fw_bcast_filter *bcast_filters; -#ifdef CONFIG_IWLWIFI_DEBUGFS - struct { - bool override; - struct iwl_bcast_filter_cmd cmd; - } dbgfs_bcast_filtering; -#endif -#endif - /* Internal station */ struct iwl_mvm_int_sta aux_sta; struct iwl_mvm_int_sta snif_sta; @@ -1593,8 +1582,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm); int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm); int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm); -bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm, - struct iwl_bcast_filter_cmd *cmd); /* * FW notifications / CMD responses handlers @@ -2224,7 +2211,7 @@ static inline void iwl_mvm_mei_device_down(struct iwl_mvm *mvm) static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm) { bool sw_rfkill = - mvm->hw_registered ? rfkill_blocked(mvm->hw->wiphy->rfkill) : false; + mvm->hw_registered ? rfkill_soft_blocked(mvm->hw->wiphy->rfkill) : false; if (mvm->mei_registered) iwl_mei_set_rfkill_state(iwl_mvm_is_radio_killed(mvm), diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 36ac870e3bae..b2f33ebdf485 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -479,7 +479,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(MCC_CHUB_UPDATE_CMD), HCMD_NAME(MARKER_CMD), HCMD_NAME(BT_PROFILE_NOTIFICATION), - HCMD_NAME(BCAST_FILTER_CMD), HCMD_NAME(MCAST_FILTER_CMD), HCMD_NAME(REPLY_SF_CFG_CMD), HCMD_NAME(REPLY_BEACON_FILTERING_CMD), diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index e2ecede55bff..7763037b93ed 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1427,7 +1427,7 @@ static void iwl_mvm_hwrate_to_tx_status(const struct iwl_fw *fw, struct ieee80211_tx_rate *r = &info->status.rates[0]; if (iwl_fw_lookup_notif_ver(fw, LONG_GROUP, - TX_CMD, 0) > 6) + TX_CMD, 0) <= 6) rate_n_flags = iwl_new_rate_from_v1(rate_n_flags); info->status.antenna = diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c index 78450366312b..080a1587caa5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c @@ -71,12 +71,13 @@ static int iwl_mvm_vendor_host_get_ownership(struct wiphy *wiphy, { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; mutex_lock(&mvm->mutex); - iwl_mvm_mei_get_ownership(mvm); + ret = iwl_mvm_mei_get_ownership(mvm); mutex_unlock(&mvm->mutex); - return 0; + return ret; } static const struct wiphy_vendor_command iwl_mvm_vendor_commands[] = { diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index 0febdcacbd42..94f40c4d2421 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -385,8 +385,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, /* This may fail if AMT took ownership of the device */ if (iwl_pcie_prepare_card_hw(trans)) { IWL_WARN(trans, "Exit HW not ready\n"); - ret = -EIO; - goto out; + return -EIO; } iwl_enable_rfkill_int(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 6ad5df611370..8be3c3c8c68b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1329,8 +1329,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, /* This may fail if AMT took ownership of the device */ if (iwl_pcie_prepare_card_hw(trans)) { IWL_WARN(trans, "Exit HW not ready\n"); - ret = -EIO; - goto out; + return -EIO; } iwl_enable_rfkill_int(trans); diff --git a/drivers/net/wireless/intersil/p54/p54spi.c b/drivers/net/wireless/intersil/p54/p54spi.c index ab0fe8565851..f99b7ba69fc3 100644 --- a/drivers/net/wireless/intersil/p54/p54spi.c +++ b/drivers/net/wireless/intersil/p54/p54spi.c @@ -669,7 +669,7 @@ err_free: return ret; } -static int p54spi_remove(struct spi_device *spi) +static void p54spi_remove(struct spi_device *spi) { struct p54s_priv *priv = spi_get_drvdata(spi); @@ -684,8 +684,6 @@ static int p54spi_remove(struct spi_device *spi) mutex_destroy(&priv->mutex); p54_free_common(priv->hw); - - return 0; } diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 10b08212d1ff..28bfa7b7b73c 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2475,6 +2475,15 @@ static void hw_scan_work(struct work_struct *work) if (req->ie_len) skb_put_data(probe, req->ie, req->ie_len); + if (!ieee80211_tx_prepare_skb(hwsim->hw, + hwsim->hw_scan_vif, + probe, + hwsim->tmp_chan->band, + NULL)) { + kfree_skb(probe); + continue; + } + local_bh_disable(); mac80211_hwsim_tx_frame(hwsim->hw, probe, hwsim->tmp_chan); @@ -4136,6 +4145,10 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, } txi->flags |= IEEE80211_TX_STAT_ACK; } + + if (hwsim_flags & HWSIM_TX_CTL_NO_ACK) + txi->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; + ieee80211_tx_status_irqsafe(data2->hw, skb); return 0; out: diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c index cd9f8ecf171f..ff1c7ec8c450 100644 --- a/drivers/net/wireless/marvell/libertas/if_spi.c +++ b/drivers/net/wireless/marvell/libertas/if_spi.c @@ -1195,7 +1195,7 @@ out: return err; } -static int libertas_spi_remove(struct spi_device *spi) +static void libertas_spi_remove(struct spi_device *spi) { struct if_spi_card *card = spi_get_drvdata(spi); struct lbs_private *priv = card->priv; @@ -1212,8 +1212,6 @@ static int libertas_spi_remove(struct spi_device *spi) if (card->pdata->teardown) card->pdata->teardown(spi); free_if_spi_card(card); - - return 0; } static int if_spi_suspend(struct device *dev) diff --git a/drivers/net/wireless/marvell/libertas/rx.c b/drivers/net/wireless/marvell/libertas/rx.c index 9f24b0760e1f..c34d30f7cbe0 100644 --- a/drivers/net/wireless/marvell/libertas/rx.c +++ b/drivers/net/wireless/marvell/libertas/rx.c @@ -147,7 +147,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb) dev->stats.rx_packets++; skb->protocol = eth_type_trans(skb, dev); - netif_rx_any_context(skb); + netif_rx(skb); ret = 0; done: @@ -262,7 +262,7 @@ static int process_rxed_802_11_packet(struct lbs_private *priv, dev->stats.rx_packets++; skb->protocol = eth_type_trans(skb, priv->dev); - netif_rx_any_context(skb); + netif_rx(skb); ret = 0; diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c index 245ff644f81e..4e49ed21c5ce 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c @@ -350,7 +350,7 @@ int mwifiex_uap_recv_packet(struct mwifiex_private *priv, skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE); /* Forward multicast/broadcast packet to upper layer*/ - netif_rx_any_context(skb); + netif_rx(skb); return 0; } diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c index d583fa600a29..d5edb1e89f5b 100644 --- a/drivers/net/wireless/marvell/mwifiex/util.c +++ b/drivers/net/wireless/marvell/mwifiex/util.c @@ -488,7 +488,7 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb) (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE)) skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE); - netif_rx_any_context(skb); + netif_rx(skb); return 0; } diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c index 217477f34c21..18420e954402 100644 --- a/drivers/net/wireless/microchip/wilc1000/spi.c +++ b/drivers/net/wireless/microchip/wilc1000/spi.c @@ -240,7 +240,7 @@ free: return ret; } -static int wilc_bus_remove(struct spi_device *spi) +static void wilc_bus_remove(struct spi_device *spi) { struct wilc *wilc = spi_get_drvdata(spi); struct wilc_spi *spi_priv = wilc->bus_data; @@ -248,8 +248,6 @@ static int wilc_bus_remove(struct spi_device *spi) clk_disable_unprepare(wilc->rtc_clk); wilc_netdev_cleanup(wilc); kfree(spi_priv); - - return 0; } static const struct of_device_id wilc_of_match[] = { diff --git a/drivers/net/wireless/st/cw1200/cw1200_spi.c b/drivers/net/wireless/st/cw1200/cw1200_spi.c index 271ed2ce2d7f..fe0d220da44d 100644 --- a/drivers/net/wireless/st/cw1200/cw1200_spi.c +++ b/drivers/net/wireless/st/cw1200/cw1200_spi.c @@ -423,7 +423,7 @@ static int cw1200_spi_probe(struct spi_device *func) } /* Disconnect Function to be called by SPI stack when device is disconnected */ -static int cw1200_spi_disconnect(struct spi_device *func) +static void cw1200_spi_disconnect(struct spi_device *func) { struct hwbus_priv *self = spi_get_drvdata(func); @@ -435,8 +435,6 @@ static int cw1200_spi_disconnect(struct spi_device *func) } } cw1200_spi_off(dev_get_platdata(&func->dev)); - - return 0; } static int __maybe_unused cw1200_spi_suspend(struct device *dev) diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c index 5b894bd6237e..9df38726e8b0 100644 --- a/drivers/net/wireless/ti/wl1251/spi.c +++ b/drivers/net/wireless/ti/wl1251/spi.c @@ -327,14 +327,12 @@ out_free: return ret; } -static int wl1251_spi_remove(struct spi_device *spi) +static void wl1251_spi_remove(struct spi_device *spi) { struct wl1251 *wl = spi_get_drvdata(spi); wl1251_free_hw(wl); regulator_disable(wl->vio); - - return 0; } static struct spi_driver wl1251_spi_driver = { diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index 354a7e1c3315..7eae1ec2eb2b 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c @@ -546,13 +546,11 @@ out_dev_put: return ret; } -static int wl1271_remove(struct spi_device *spi) +static void wl1271_remove(struct spi_device *spi) { struct wl12xx_spi_glue *glue = spi_get_drvdata(spi); platform_device_unregister(glue->core); - - return 0; } static struct spi_driver wl1271_spi_driver = { diff --git a/drivers/net/wwan/iosm/iosm_ipc_debugfs.c b/drivers/net/wwan/iosm/iosm_ipc_debugfs.c index f2f57751a7d2..e916139b8cd4 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_debugfs.c +++ b/drivers/net/wwan/iosm/iosm_ipc_debugfs.c @@ -12,10 +12,10 @@ void ipc_debugfs_init(struct iosm_imem *ipc_imem) { - struct dentry *debugfs_pdev = wwan_get_debugfs_dir(ipc_imem->dev); + ipc_imem->debugfs_wwan_dir = wwan_get_debugfs_dir(ipc_imem->dev); ipc_imem->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, - debugfs_pdev); + ipc_imem->debugfs_wwan_dir); ipc_imem->trace = ipc_trace_init(ipc_imem); if (!ipc_imem->trace) @@ -26,4 +26,5 @@ void ipc_debugfs_deinit(struct iosm_imem *ipc_imem) { ipc_trace_deinit(ipc_imem->trace); debugfs_remove_recursive(ipc_imem->debugfs_dir); + wwan_put_debugfs_dir(ipc_imem->debugfs_wwan_dir); } diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c index f9e8e0ee4de3..1e6a47976642 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem.c +++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c @@ -114,17 +114,35 @@ ipc_imem_fast_update_timer_cb(struct hrtimer *hr_timer) return HRTIMER_NORESTART; } +static int ipc_imem_tq_adb_timer_cb(struct iosm_imem *ipc_imem, int arg, + void *msg, size_t size) +{ + ipc_mux_ul_adb_finish(ipc_imem->mux); + return 0; +} + +static enum hrtimer_restart +ipc_imem_adb_timer_cb(struct hrtimer *hr_timer) +{ + struct iosm_imem *ipc_imem = + container_of(hr_timer, struct iosm_imem, adb_timer); + + ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_adb_timer_cb, 0, + NULL, 0, false); + return HRTIMER_NORESTART; +} + static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem, struct ipc_mux_config *cfg) { ipc_mmio_update_cp_capability(ipc_imem->mmio); - if (!ipc_imem->mmio->has_mux_lite) { + if (ipc_imem->mmio->mux_protocol == MUX_UNKNOWN) { dev_err(ipc_imem->dev, "Failed to get Mux capability."); return -EINVAL; } - cfg->protocol = MUX_LITE; + cfg->protocol = ipc_imem->mmio->mux_protocol; cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ? MUX_UL_ON_CREDITS : @@ -153,6 +171,10 @@ void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem, IPC_MSG_PREP_FEATURE_SET, &prep_args); } +/** + * ipc_imem_td_update_timer_start - Starts the TD Update Timer if not started. + * @ipc_imem: Pointer to imem data-struct + */ void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem) { /* Use the TD update timer only in the runtime phase */ @@ -179,6 +201,21 @@ void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer) hrtimer_cancel(hr_timer); } +/** + * ipc_imem_adb_timer_start - Starts the adb Timer if not starting. + * @ipc_imem: Pointer to imem data-struct + */ +void ipc_imem_adb_timer_start(struct iosm_imem *ipc_imem) +{ + if (!hrtimer_active(&ipc_imem->adb_timer)) { + ipc_imem->hrtimer_period = + ktime_set(0, IOSM_AGGR_MUX_ADB_FINISH_TIMEOUT_NSEC); + hrtimer_start(&ipc_imem->adb_timer, + ipc_imem->hrtimer_period, + HRTIMER_MODE_REL); + } +} + bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem) { struct ipc_mem_channel *channel; @@ -550,6 +587,11 @@ static void ipc_imem_run_state_worker(struct work_struct *instance) while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) { if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) { ipc_imem->ipc_port[ctrl_chl_idx] = NULL; + if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7360_ID && + chnl_cfg_port.wwan_port_type == WWAN_PORT_MBIM) { + ctrl_chl_idx++; + continue; + } if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) { ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL, chnl_cfg_port, @@ -680,8 +722,11 @@ static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq) } /* Try to generate new ADB or ADGH. */ - if (ipc_mux_ul_data_encode(ipc_imem->mux)) + if (ipc_mux_ul_data_encode(ipc_imem->mux)) { ipc_imem_td_update_timer_start(ipc_imem); + if (ipc_imem->mux->protocol == MUX_AGGREGATION) + ipc_imem_adb_timer_start(ipc_imem); + } /* Continue the send procedure with accumulated SIO or NETIF packets. * Reset the debounce flags. @@ -1330,6 +1375,9 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id, HRTIMER_MODE_REL); ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb; + hrtimer_init(&ipc_imem->adb_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + ipc_imem->adb_timer.function = ipc_imem_adb_timer_cb; + if (ipc_imem_config(ipc_imem)) { dev_err(ipc_imem->dev, "failed to initialize the imem"); goto imem_config_fail; diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.h b/drivers/net/wwan/iosm/iosm_ipc_imem.h index 98554e9beb01..e700dc8bfe0a 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem.h +++ b/drivers/net/wwan/iosm/iosm_ipc_imem.h @@ -317,6 +317,7 @@ enum ipc_phase { * @tdupdate_timer: Delay the TD update doorbell. * @fast_update_timer: forced head pointer update delay timer. * @td_alloc_timer: Timer for DL pipe TD allocation retry + * @adb_timer: Timer for finishing the ADB. * @rom_exit_code: Mapped boot rom exit code. * @enter_runtime: 1 means the transition to runtime phase was * executed. @@ -340,6 +341,7 @@ enum ipc_phase { * @ev_mux_net_transmit_pending:0 means inform the IPC tasklet to pass * @reset_det_n: Reset detect flag * @pcie_wake_n: Pcie wake flag + * @debugfs_wwan_dir: WWAN Debug FS directory entry * @debugfs_dir: Debug FS directory for driver-specific entries */ struct iosm_imem { @@ -364,6 +366,7 @@ struct iosm_imem { struct hrtimer tdupdate_timer; struct hrtimer fast_update_timer; struct hrtimer td_alloc_timer; + struct hrtimer adb_timer; enum rom_exit_code rom_exit_code; u32 enter_runtime; struct completion ul_pend_sem; @@ -382,6 +385,7 @@ struct iosm_imem { reset_det_n:1, pcie_wake_n:1; #ifdef CONFIG_WWAN_DEBUGFS + struct dentry *debugfs_wwan_dir; struct dentry *debugfs_dir; #endif }; @@ -593,4 +597,7 @@ void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype, * Returns: 0 on success, -1 on failure */ int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem); + +void ipc_imem_adb_timer_start(struct iosm_imem *ipc_imem); + #endif diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.c b/drivers/net/wwan/iosm/iosm_ipc_mmio.c index f09e5e77a2a5..63eb08c43c05 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mmio.c +++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.c @@ -10,6 +10,7 @@ #include <linux/slab.h> #include "iosm_ipc_mmio.h" +#include "iosm_ipc_mux.h" /* Definition of MMIO offsets * note that MMIO_CI offsets are relative to end of chip info structure @@ -71,8 +72,9 @@ void ipc_mmio_update_cp_capability(struct iosm_mmio *ipc_mmio) ver = ipc_mmio_get_cp_version(ipc_mmio); cp_cap = ioread32(ipc_mmio->base + ipc_mmio->offset.cp_capability); - ipc_mmio->has_mux_lite = (ver >= IOSM_CP_VERSION) && - !(cp_cap & DL_AGGR) && !(cp_cap & UL_AGGR); + ipc_mmio->mux_protocol = ((ver >= IOSM_CP_VERSION) && (cp_cap & + (UL_AGGR | DL_AGGR))) ? MUX_AGGREGATION + : MUX_LITE; ipc_mmio->has_ul_flow_credit = (ver >= IOSM_CP_VERSION) && (cp_cap & UL_FLOW_CREDIT); diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.h b/drivers/net/wwan/iosm/iosm_ipc_mmio.h index f861994a6d90..193d7ba2478a 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mmio.h +++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.h @@ -72,7 +72,7 @@ struct mmio_offset { * @context_info_addr: Physical base address of context info structure * @chip_info_version: Version of chip info structure * @chip_info_size: Size of chip info structure - * @has_mux_lite: It doesn't support mux aggergation + * @mux_protocol: mux protocol * @has_ul_flow_credit: Ul flow credit support * @has_slp_no_prot: Device sleep no protocol support * @has_mcr_support: Usage of mcr support @@ -84,8 +84,8 @@ struct iosm_mmio { phys_addr_t context_info_addr; unsigned int chip_info_version; unsigned int chip_info_size; - u8 has_mux_lite:1, - has_ul_flow_credit:1, + u32 mux_protocol; + u8 has_ul_flow_credit:1, has_slp_no_prot:1, has_mcr_support:1; }; diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.c b/drivers/net/wwan/iosm/iosm_ipc_mux.c index 8e66ffe92055..9c7a9a2a1f25 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mux.c +++ b/drivers/net/wwan/iosm/iosm_ipc_mux.c @@ -279,9 +279,10 @@ struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg, struct iosm_imem *imem) { struct iosm_mux *ipc_mux = kzalloc(sizeof(*ipc_mux), GFP_KERNEL); - int i, ul_tds, ul_td_size; + int i, j, ul_tds, ul_td_size; struct sk_buff_head *free_list; struct sk_buff *skb; + int qlt_size; if (!ipc_mux) return NULL; @@ -321,6 +322,24 @@ struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg, ipc_mux->channel_id = -1; ipc_mux->channel = NULL; + if (ipc_mux->protocol != MUX_LITE) { + qlt_size = offsetof(struct mux_qlth, ql) + + MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql); + + for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) { + ipc_mux->ul_adb.pp_qlt[i] = kzalloc(qlt_size, + GFP_ATOMIC); + if (!ipc_mux->ul_adb.pp_qlt[i]) { + for (j = i - 1; j >= 0; j--) + kfree(ipc_mux->ul_adb.pp_qlt[j]); + return NULL; + } + } + + ul_td_size = IPC_MEM_MAX_UL_ADB_BUF_SIZE; + ul_tds = IPC_MEM_MAX_TDS_MUX_AGGR_UL; + } + /* Allocate the list of UL ADB. */ for (i = 0; i < ul_tds; i++) { dma_addr_t mapping; diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.h b/drivers/net/wwan/iosm/iosm_ipc_mux.h index 88debaa1ed31..cd9d74cc097f 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mux.h +++ b/drivers/net/wwan/iosm/iosm_ipc_mux.h @@ -8,9 +8,12 @@ #include "iosm_ipc_protocol.h" -/* Size of the buffer for the IP MUX data buffer. */ -#define IPC_MEM_MAX_DL_MUX_BUF_SIZE (16 * 1024) -#define IPC_MEM_MAX_UL_ADB_BUF_SIZE IPC_MEM_MAX_DL_MUX_BUF_SIZE +#define IPC_MEM_MAX_UL_DG_ENTRIES 100 +#define IPC_MEM_MAX_TDS_MUX_AGGR_UL 60 + +#define IPC_MEM_MAX_ADB_BUF_SIZE (16 * 1024) +#define IPC_MEM_MAX_UL_ADB_BUF_SIZE IPC_MEM_MAX_ADB_BUF_SIZE +#define IPC_MEM_MAX_DL_ADB_BUF_SIZE IPC_MEM_MAX_ADB_BUF_SIZE /* Size of the buffer for the IP MUX Lite data buffer. */ #define IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE (2 * 1024) @@ -167,6 +170,7 @@ enum mux_state { enum ipc_mux_protocol { MUX_UNKNOWN, MUX_LITE, + MUX_AGGREGATION, }; /* Supported UL data transfer methods. */ @@ -192,24 +196,111 @@ struct mux_session { flush:1; /* flush net interface ? */ }; -/* State of a single UL data block. */ +/** + * struct mux_adth_dg - Structure of the datagram in the Aggregated Datagram + * Table Header. + * @datagram_index : Index (in bytes) to the k-th datagram in the table. + * Index shall count from the start of the block including + * the 16-byte header. This value shall be non-zero. + * @datagram_length: Length of the k-th datagram including the head padding. + * This value shall be non-zero. + * @service_class: Service class identifier for the datagram. + * @reserved: Reserved bytes. Set to zero + */ +struct mux_adth_dg { + __le32 datagram_index; + __le16 datagram_length; + u8 service_class; + u8 reserved; +}; + +/** + * struct mux_qlth_ql - Structure of the queue level in the Aggregated + * Datagram Queue Level Table Header. + * @nr_of_bytes: Number of bytes available to transmit in the queue. + */ +struct mux_qlth_ql { + __le32 nr_of_bytes; +}; + +/** + * struct mux_qlth - Structure of Aggregated Datagram Queue Level Table + * Header. + * @signature: Signature of the Queue Level Table Header + * Value: 0x48544C51 (ASCII characters: 'Q' 'L' 'T' 'H') + * @table_length: Length (in bytes) of the datagram table. This length + * shall include the queue level table header size. + * Minimum value:0x10 + * @if_id: ID of the interface the queue levels in the table + * belong to. + * @reserved: Reserved byte. Set to zero. + * @next_table_index: Index (in bytes) to the next table in the buffer. Index + * shall count from the start of the block including the + * 16-byte header. Value of zero indicates end of the list. + * @reserved2: Reserved bytes. Set to zero + * @ql: Queue level table with variable length + */ +struct mux_qlth { + __le32 signature; + __le16 table_length; + u8 if_id; + u8 reserved; + __le32 next_table_index; + __le32 reserved2; + struct mux_qlth_ql ql; +}; + +/** + * struct mux_adb - Structure of State of a single UL data block. + * @dest_skb: Current UL skb for the data block. + * @buf: ADB memory + * @adgh: ADGH pointer + * @qlth_skb: QLTH pointer + * @next_table_index: Pointer to next table index. + * @free_list: List of alloc. ADB for the UL sess. + * @size: Size of the ADB memory. + * @if_cnt: Statistic counter + * @dg_cnt_total: Datagram count total + * @payload_size: Payload Size + * @dg: Datagram table. + * @pp_qlt: Pointers to hold Queue Level Tables of session + * @adbh: ADBH pointer + * @qlt_updated: Queue level table updated + * @dg_count: Datagram count + */ struct mux_adb { - struct sk_buff *dest_skb; /* Current UL skb for the data block. */ - u8 *buf; /* ADB memory. */ - struct mux_adgh *adgh; /* ADGH pointer */ - struct sk_buff *qlth_skb; /* QLTH pointer */ - u32 *next_table_index; /* Pointer to next table index. */ - struct sk_buff_head free_list; /* List of alloc. ADB for the UL sess.*/ - int size; /* Size of the ADB memory. */ - u32 if_cnt; /* Statistic counter */ + struct sk_buff *dest_skb; + u8 *buf; + struct mux_adgh *adgh; + struct sk_buff *qlth_skb; + u32 *next_table_index; + struct sk_buff_head free_list; + int size; + u32 if_cnt; u32 dg_cnt_total; u32 payload_size; + struct mux_adth_dg + dg[IPC_MEM_MUX_IP_SESSION_ENTRIES][IPC_MEM_MAX_UL_DG_ENTRIES]; + struct mux_qlth *pp_qlt[IPC_MEM_MUX_IP_SESSION_ENTRIES]; + struct mux_adbh *adbh; + u32 qlt_updated[IPC_MEM_MUX_IP_SESSION_ENTRIES]; + u32 dg_count[IPC_MEM_MUX_IP_SESSION_ENTRIES]; }; -/* Temporary ACB state. */ +/** + * struct mux_acb - Structure of Temporary ACB state. + * @skb: Used UL skb. + * @if_id: Session id. + * @buf_p: Command buffer. + * @wanted_response: Wanted Response + * @got_response: Got response + * @cmd: command + * @got_param: Received command/response parameter + */ struct mux_acb { struct sk_buff *skb; /* Used UL skb. */ int if_id; /* Session id. */ + u8 *buf_p; u32 wanted_response; u32 got_response; u32 cmd; @@ -241,6 +332,12 @@ struct mux_acb { * @wwan_q_offset: This will hold the offset of the given instance * Useful while passing or receiving packets from * wwan/imem layer. + * @adb_finish_timer: Timer for forcefully finishing the ADB + * @acb_tx_sequence_nr: Sequence number for the ACB header. + * @params: user configurable parameters + * @adb_tx_sequence_nr: Sequence number for ADB header + * @acc_adb_size: Statistic data for logging + * @acc_payload_size: Statistic data for logging * @initialized: MUX object is initialized * @ev_mux_net_transmit_pending: * 0 means inform the IPC tasklet to pass the @@ -269,10 +366,16 @@ struct iosm_mux { long long ul_data_pend_bytes; struct mux_acb acb; int wwan_q_offset; + struct hrtimer adb_finish_timer; + u16 acb_tx_sequence_nr; + struct ipc_params *params; + u16 adb_tx_sequence_nr; + unsigned long long acc_adb_size; + unsigned long long acc_payload_size; u8 initialized:1, ev_mux_net_transmit_pending:1, - adb_prep_ongoing:1; -}; + adb_prep_ongoing; +} __packed; /* MUX configuration structure */ struct ipc_mux_config { diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c index 40fb54a0513e..d41e373f9c0a 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c +++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c @@ -54,6 +54,49 @@ static int ipc_mux_acb_send(struct iosm_mux *ipc_mux, bool blocking) return 0; } +/* Initialize the command header. */ +static void ipc_mux_acb_init(struct iosm_mux *ipc_mux) +{ + struct mux_acb *acb = &ipc_mux->acb; + struct mux_acbh *header; + + header = (struct mux_acbh *)(acb->skb)->data; + header->block_length = cpu_to_le32(sizeof(struct mux_acbh)); + header->first_cmd_index = header->block_length; + header->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ACBH); + header->sequence_nr = cpu_to_le16(ipc_mux->acb_tx_sequence_nr++); +} + +/* Add a command to the ACB. */ +static struct mux_cmdh *ipc_mux_acb_add_cmd(struct iosm_mux *ipc_mux, u32 cmd, + void *param, u32 param_size) +{ + struct mux_acbh *header; + struct mux_cmdh *cmdh; + struct mux_acb *acb; + + acb = &ipc_mux->acb; + header = (struct mux_acbh *)(acb->skb)->data; + cmdh = (struct mux_cmdh *) + ((acb->skb)->data + le32_to_cpu(header->block_length)); + + cmdh->signature = cpu_to_le32(MUX_SIG_CMDH); + cmdh->command_type = cpu_to_le32(cmd); + cmdh->if_id = acb->if_id; + + acb->cmd = cmd; + cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_cmdh, param) + + param_size); + cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++); + if (param) + memcpy(&cmdh->param, param, param_size); + + skb_put(acb->skb, le32_to_cpu(header->block_length) + + le16_to_cpu(cmdh->cmd_len)); + + return cmdh; +} + /* Prepare mux Command */ static struct mux_lite_cmdh *ipc_mux_lite_add_cmd(struct iosm_mux *ipc_mux, u32 cmd, struct mux_acb *acb, @@ -104,7 +147,7 @@ int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id, size_t res_size, bool blocking, bool respond) { struct mux_acb *acb = &ipc_mux->acb; - struct mux_lite_cmdh *ack_lite; + union mux_type_cmdh cmdh; int ret = 0; acb->if_id = if_id; @@ -112,11 +155,23 @@ int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id, if (ret) return ret; - ack_lite = ipc_mux_lite_add_cmd(ipc_mux, cmd_type, acb, param, - res_size); - if (respond) - ack_lite->transaction_id = cpu_to_le32(transaction_id); + if (ipc_mux->protocol == MUX_LITE) { + cmdh.ack_lite = ipc_mux_lite_add_cmd(ipc_mux, cmd_type, acb, + param, res_size); + if (respond) + cmdh.ack_lite->transaction_id = + cpu_to_le32(transaction_id); + } else { + /* Initialize the ACB header. */ + ipc_mux_acb_init(ipc_mux); + cmdh.ack_aggr = ipc_mux_acb_add_cmd(ipc_mux, cmd_type, param, + res_size); + + if (respond) + cmdh.ack_aggr->transaction_id = + cpu_to_le32(transaction_id); + } ret = ipc_mux_acb_send(ipc_mux, blocking); return ret; @@ -129,15 +184,17 @@ void ipc_mux_netif_tx_flowctrl(struct mux_session *session, int idx, bool on) } static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux, - struct mux_lite_cmdh *cmdh) + union mux_cmd_param param, + __le32 command_type, u8 if_id, + __le32 transaction_id) { struct mux_acb *acb = &ipc_mux->acb; - switch (le32_to_cpu(cmdh->command_type)) { + switch (le32_to_cpu(command_type)) { case MUX_CMD_OPEN_SESSION_RESP: case MUX_CMD_CLOSE_SESSION_RESP: /* Resume the control application. */ - acb->got_param = cmdh->param; + acb->got_param = param; break; case MUX_LITE_CMD_FLOW_CTL_ACK: @@ -147,8 +204,16 @@ static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux, if (ipc_mux->protocol != MUX_LITE) return -EINVAL; - dev_dbg(ipc_mux->dev, "if %u FLOW_CTL_ACK %u received", - cmdh->if_id, le32_to_cpu(cmdh->transaction_id)); + dev_dbg(ipc_mux->dev, "if_id %u FLOW_CTL_ACK %u received", + if_id, le32_to_cpu(transaction_id)); + break; + + case IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK: + /* This command type is not expected as response for + * Lite version of the protocol. So return non-zero. + */ + if (ipc_mux->protocol == MUX_LITE) + return -EINVAL; break; default: @@ -156,38 +221,39 @@ static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux, } acb->wanted_response = MUX_CMD_INVALID; - acb->got_response = le32_to_cpu(cmdh->command_type); + acb->got_response = le32_to_cpu(command_type); complete(&ipc_mux->channel->ul_sem); return 0; } -static int ipc_mux_dl_dlcmds_decode_process(struct iosm_mux *ipc_mux, - struct mux_lite_cmdh *cmdh) +static int ipc_mux_dl_cmds_decode_process(struct iosm_mux *ipc_mux, + union mux_cmd_param *param, + __le32 command_type, u8 if_id, + __le16 cmd_len, int size) { - union mux_cmd_param *param = &cmdh->param; struct mux_session *session; - int new_size; + struct hrtimer *adb_timer; dev_dbg(ipc_mux->dev, "if_id[%d]: dlcmds decode process %d", - cmdh->if_id, le32_to_cpu(cmdh->command_type)); + if_id, le32_to_cpu(command_type)); - switch (le32_to_cpu(cmdh->command_type)) { + switch (le32_to_cpu(command_type)) { case MUX_LITE_CMD_FLOW_CTL: + case IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE: - if (cmdh->if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) { + if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) { dev_err(ipc_mux->dev, "if_id [%d] not valid", - cmdh->if_id); + if_id); return -EINVAL; /* No session interface id. */ } - session = &ipc_mux->session[cmdh->if_id]; + session = &ipc_mux->session[if_id]; + adb_timer = &ipc_mux->imem->adb_timer; - new_size = offsetof(struct mux_lite_cmdh, param) + - sizeof(param->flow_ctl); if (param->flow_ctl.mask == cpu_to_le32(0xFFFFFFFF)) { /* Backward Compatibility */ - if (cmdh->cmd_len == cpu_to_le16(new_size)) + if (cmd_len == cpu_to_le16(size)) session->flow_ctl_mask = le32_to_cpu(param->flow_ctl.mask); else @@ -197,6 +263,16 @@ static int ipc_mux_dl_dlcmds_decode_process(struct iosm_mux *ipc_mux, * to limit uplink session queueing */ session->net_tx_stop = true; + + /* We have to call Finish ADB here. + * Otherwise any already queued data + * will be sent to CP when ADB is full + * for some other sessions. + */ + if (ipc_mux->protocol == MUX_AGGREGATION) { + ipc_mux_ul_adb_finish(ipc_mux); + ipc_imem_hrtimer_stop(adb_timer); + } /* Update the stats */ session->flow_ctl_en_cnt++; } else if (param->flow_ctl.mask == 0) { @@ -205,8 +281,10 @@ static int ipc_mux_dl_dlcmds_decode_process(struct iosm_mux *ipc_mux, * our internal Tx flag and enabling kernel * flow control */ + dev_dbg(ipc_mux->dev, "if_id[%u] flow_ctl mask 0x%08X", + if_id, le32_to_cpu(param->flow_ctl.mask)); /* Backward Compatibility */ - if (cmdh->cmd_len == cpu_to_le16(new_size)) + if (cmd_len == cpu_to_le16(size)) session->flow_ctl_mask = le32_to_cpu(param->flow_ctl.mask); else @@ -217,7 +295,10 @@ static int ipc_mux_dl_dlcmds_decode_process(struct iosm_mux *ipc_mux, break; } - dev_dbg(ipc_mux->dev, "if[%u] FLOW CTRL 0x%08X", cmdh->if_id, + ipc_mux->acc_adb_size = 0; + ipc_mux->acc_payload_size = 0; + + dev_dbg(ipc_mux->dev, "if_id[%u] FLOW CTRL 0x%08X", if_id, le32_to_cpu(param->flow_ctl.mask)); break; @@ -235,12 +316,20 @@ static void ipc_mux_dl_cmd_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb) { struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)skb->data; __le32 trans_id = cmdh->transaction_id; + int size; - if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh)) { + if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param, + cmdh->command_type, cmdh->if_id, + cmdh->transaction_id)) { /* Unable to decode command response indicates the cmd_type * may be a command instead of response. So try to decoding it. */ - if (!ipc_mux_dl_dlcmds_decode_process(ipc_mux, cmdh)) { + size = offsetof(struct mux_lite_cmdh, param) + + sizeof(cmdh->param.flow_ctl); + if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param, + cmdh->command_type, + cmdh->if_id, + cmdh->cmd_len, size)) { /* Decoded command may need a response. Give the * response according to the command type. */ @@ -349,7 +438,7 @@ static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux, adgh = (struct mux_adgh *)block; - if (adgh->signature != cpu_to_le32(MUX_SIG_ADGH)) { + if (adgh->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH)) { dev_err(ipc_mux->dev, "invalid ADGH signature received"); return; } @@ -392,6 +481,192 @@ static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux, ipc_mux->session[if_id].flush = 1; } +static void ipc_mux_dl_acbcmd_decode(struct iosm_mux *ipc_mux, + struct mux_cmdh *cmdh, int size) +{ + u32 link_st = IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT_RESP; + u32 fctl_dis = IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE; + u32 fctl_ena = IOSM_AGGR_MUX_CMD_FLOW_CTL_ENABLE; + u32 fctl_ack = IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK; + union mux_cmd_param *cmd_p = NULL; + u32 cmd = link_st; + u32 trans_id; + + if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param, + cmdh->command_type, cmdh->if_id, + cmdh->cmd_len, size)) { + size = 0; + if (cmdh->command_type == cpu_to_le32(link_st)) { + cmd_p = &cmdh->param; + cmd_p->link_status_resp.response = MUX_CMD_RESP_SUCCESS; + } else if ((cmdh->command_type == cpu_to_le32(fctl_ena)) || + (cmdh->command_type == cpu_to_le32(fctl_dis))) { + cmd = fctl_ack; + } else { + return; + } + trans_id = le32_to_cpu(cmdh->transaction_id); + ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id, + trans_id, cmd_p, size, false, true); + } +} + +/* Decode an aggregated command block. */ +static void ipc_mux_dl_acb_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb) +{ + struct mux_acbh *acbh; + struct mux_cmdh *cmdh; + u32 next_cmd_index; + u8 *block; + int size; + + acbh = (struct mux_acbh *)(skb->data); + block = (u8 *)(skb->data); + + next_cmd_index = le32_to_cpu(acbh->first_cmd_index); + next_cmd_index = array_index_nospec(next_cmd_index, + sizeof(struct mux_cmdh)); + + while (next_cmd_index != 0) { + cmdh = (struct mux_cmdh *)&block[next_cmd_index]; + next_cmd_index = le32_to_cpu(cmdh->next_cmd_index); + if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param, + cmdh->command_type, + cmdh->if_id, + cmdh->transaction_id)) { + size = offsetof(struct mux_cmdh, param) + + sizeof(cmdh->param.flow_ctl); + ipc_mux_dl_acbcmd_decode(ipc_mux, cmdh, size); + } + } +} + +/* process datagram */ +static int mux_dl_process_dg(struct iosm_mux *ipc_mux, struct mux_adbh *adbh, + struct mux_adth_dg *dg, struct sk_buff *skb, + int if_id, int nr_of_dg) +{ + u32 dl_head_pad_len = ipc_mux->session[if_id].dl_head_pad_len; + u32 packet_offset, i, rc; + + for (i = 0; i < nr_of_dg; i++, dg++) { + if (le32_to_cpu(dg->datagram_index) + < sizeof(struct mux_adbh)) + goto dg_error; + + /* Is the packet inside of the ADB */ + if (le32_to_cpu(dg->datagram_index) >= + le32_to_cpu(adbh->block_length)) { + goto dg_error; + } else { + packet_offset = + le32_to_cpu(dg->datagram_index) + + dl_head_pad_len; + /* Pass the packet to the netif layer. */ + rc = ipc_mux_net_receive(ipc_mux, if_id, ipc_mux->wwan, + packet_offset, + dg->service_class, + skb); + if (rc) + goto dg_error; + } + } + return 0; +dg_error: + return -1; +} + +/* Decode an aggregated data block. */ +static void mux_dl_adb_decode(struct iosm_mux *ipc_mux, + struct sk_buff *skb) +{ + struct mux_adth_dg *dg; + struct iosm_wwan *wwan; + struct mux_adbh *adbh; + struct mux_adth *adth; + int nr_of_dg, if_id; + u32 adth_index; + u8 *block; + + block = skb->data; + adbh = (struct mux_adbh *)block; + + /* Process the aggregated datagram tables. */ + adth_index = le32_to_cpu(adbh->first_table_index); + + /* Has CP sent an empty ADB ? */ + if (adth_index < 1) { + dev_err(ipc_mux->dev, "unexpected empty ADB"); + goto adb_decode_err; + } + + /* Loop through mixed session tables. */ + while (adth_index) { + /* Get the reference to the table header. */ + adth = (struct mux_adth *)(block + adth_index); + + /* Get the interface id and map it to the netif id. */ + if_id = adth->if_id; + if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) + goto adb_decode_err; + + if_id = array_index_nospec(if_id, + IPC_MEM_MUX_IP_SESSION_ENTRIES); + + /* Is the session active ? */ + wwan = ipc_mux->session[if_id].wwan; + if (!wwan) + goto adb_decode_err; + + /* Consistency checks for aggregated datagram table. */ + if (adth->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH)) + goto adb_decode_err; + + if (le16_to_cpu(adth->table_length) < (sizeof(struct mux_adth) - + sizeof(struct mux_adth_dg))) + goto adb_decode_err; + + /* Calculate the number of datagrams. */ + nr_of_dg = (le16_to_cpu(adth->table_length) - + sizeof(struct mux_adth) + + sizeof(struct mux_adth_dg)) / + sizeof(struct mux_adth_dg); + + /* Is the datagram table empty ? */ + if (nr_of_dg < 1) { + dev_err(ipc_mux->dev, + "adthidx=%u,nr_of_dg=%d,next_tblidx=%u", + adth_index, nr_of_dg, + le32_to_cpu(adth->next_table_index)); + + /* Move to the next aggregated datagram table. */ + adth_index = le32_to_cpu(adth->next_table_index); + continue; + } + + /* New aggregated datagram table. */ + dg = &adth->dg; + if (mux_dl_process_dg(ipc_mux, adbh, dg, skb, if_id, + nr_of_dg) < 0) + goto adb_decode_err; + + /* mark session for final flush */ + ipc_mux->session[if_id].flush = 1; + + /* Move to the next aggregated datagram table. */ + adth_index = le32_to_cpu(adth->next_table_index); + } + +adb_decode_err: + return; +} + +/** + * ipc_mux_dl_decode - Route the DL packet through the IP MUX layer + * depending on Header. + * @ipc_mux: Pointer to MUX data-struct + * @skb: Pointer to ipc_skb. + */ void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb) { u32 signature; @@ -403,14 +678,18 @@ void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb) signature = le32_to_cpup((__le32 *)skb->data); switch (signature) { - case MUX_SIG_ADGH: + case IOSM_AGGR_MUX_SIG_ADBH: /* Aggregated Data Block Header */ + mux_dl_adb_decode(ipc_mux, skb); + break; + case IOSM_AGGR_MUX_SIG_ADGH: ipc_mux_dl_adgh_decode(ipc_mux, skb); break; - case MUX_SIG_FCTH: ipc_mux_dl_fcth_decode(ipc_mux, skb->data); break; - + case IOSM_AGGR_MUX_SIG_ACBH: /* Aggregated Command Block Header */ + ipc_mux_dl_acb_decode(ipc_mux, skb); + break; case MUX_SIG_CMDH: ipc_mux_dl_cmd_decode(ipc_mux, skb); break; @@ -427,7 +706,10 @@ static int ipc_mux_ul_skb_alloc(struct iosm_mux *ipc_mux, { /* Take the first element of the free list. */ struct sk_buff *skb = skb_dequeue(&ul_adb->free_list); + u32 no_if = IPC_MEM_MUX_IP_SESSION_ENTRIES; + u32 *next_tb_id; int qlt_size; + u32 if_id; if (!skb) return -EBUSY; /* Wait for a free ADB skb. */ @@ -436,7 +718,37 @@ static int ipc_mux_ul_skb_alloc(struct iosm_mux *ipc_mux, IPC_CB(skb)->op_type = (u8)UL_MUX_OP_ADB; switch (type) { - case MUX_SIG_ADGH: + case IOSM_AGGR_MUX_SIG_ADBH: + /* Save the ADB memory settings. */ + ul_adb->dest_skb = skb; + ul_adb->buf = skb->data; + ul_adb->size = IPC_MEM_MAX_ADB_BUF_SIZE; + + /* reset statistic counter */ + ul_adb->if_cnt = 0; + ul_adb->payload_size = 0; + ul_adb->dg_cnt_total = 0; + + /* Initialize the ADBH. */ + ul_adb->adbh = (struct mux_adbh *)ul_adb->buf; + memset(ul_adb->adbh, 0, sizeof(struct mux_adbh)); + ul_adb->adbh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADBH); + ul_adb->adbh->block_length = + cpu_to_le32(sizeof(struct mux_adbh)); + next_tb_id = (unsigned int *)&ul_adb->adbh->first_table_index; + ul_adb->next_table_index = next_tb_id; + + /* Clear the local copy of DGs for new ADB */ + memset(ul_adb->dg, 0, sizeof(ul_adb->dg)); + + /* Clear the DG count and QLT updated status for new ADB */ + for (if_id = 0; if_id < no_if; if_id++) { + ul_adb->dg_count[if_id] = 0; + ul_adb->qlt_updated[if_id] = 0; + } + break; + + case IOSM_AGGR_MUX_SIG_ADGH: /* Save the ADB memory settings. */ ul_adb->dest_skb = skb; ul_adb->buf = skb->data; @@ -506,6 +818,94 @@ static void ipc_mux_ul_adgh_finish(struct iosm_mux *ipc_mux) str, bytes); } +static void ipc_mux_ul_encode_adth(struct iosm_mux *ipc_mux, + struct mux_adb *ul_adb, int *out_offset) +{ + int i, qlt_size, offset = *out_offset; + struct mux_qlth *p_adb_qlt; + struct mux_adth_dg *dg; + struct mux_adth *adth; + u16 adth_dg_size; + u32 *next_tb_id; + + qlt_size = offsetof(struct mux_qlth, ql) + + MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql); + + for (i = 0; i < ipc_mux->nr_sessions; i++) { + if (ul_adb->dg_count[i] > 0) { + adth_dg_size = offsetof(struct mux_adth, dg) + + ul_adb->dg_count[i] * sizeof(*dg); + + *ul_adb->next_table_index = offset; + adth = (struct mux_adth *)&ul_adb->buf[offset]; + next_tb_id = (unsigned int *)&adth->next_table_index; + ul_adb->next_table_index = next_tb_id; + offset += adth_dg_size; + adth->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH); + adth->if_id = i; + adth->table_length = cpu_to_le16(adth_dg_size); + adth_dg_size -= offsetof(struct mux_adth, dg); + memcpy(&adth->dg, ul_adb->dg[i], adth_dg_size); + ul_adb->if_cnt++; + } + + if (ul_adb->qlt_updated[i]) { + *ul_adb->next_table_index = offset; + p_adb_qlt = (struct mux_qlth *)&ul_adb->buf[offset]; + ul_adb->next_table_index = + (u32 *)&p_adb_qlt->next_table_index; + memcpy(p_adb_qlt, ul_adb->pp_qlt[i], qlt_size); + offset += qlt_size; + } + } + *out_offset = offset; +} + +/** + * ipc_mux_ul_adb_finish - Add the TD of the aggregated session packets to TDR. + * @ipc_mux: Pointer to MUX data-struct. + */ +void ipc_mux_ul_adb_finish(struct iosm_mux *ipc_mux) +{ + bool ul_data_pend = false; + struct mux_adb *ul_adb; + unsigned long flags; + int offset; + + ul_adb = &ipc_mux->ul_adb; + if (!ul_adb->dest_skb) + return; + + offset = *ul_adb->next_table_index; + ipc_mux_ul_encode_adth(ipc_mux, ul_adb, &offset); + ul_adb->adbh->block_length = cpu_to_le32(offset); + + if (le32_to_cpu(ul_adb->adbh->block_length) > ul_adb->size) { + ul_adb->dest_skb = NULL; + return; + } + + *ul_adb->next_table_index = 0; + ul_adb->adbh->sequence_nr = cpu_to_le16(ipc_mux->adb_tx_sequence_nr++); + skb_put(ul_adb->dest_skb, le32_to_cpu(ul_adb->adbh->block_length)); + + spin_lock_irqsave(&(&ipc_mux->channel->ul_list)->lock, flags); + __skb_queue_tail(&ipc_mux->channel->ul_list, ul_adb->dest_skb); + spin_unlock_irqrestore(&(&ipc_mux->channel->ul_list)->lock, flags); + + ul_adb->dest_skb = NULL; + /* Updates the TDs with ul_list */ + ul_data_pend = ipc_imem_ul_write_td(ipc_mux->imem); + + /* Delay the doorbell irq */ + if (ul_data_pend) + ipc_imem_td_update_timer_start(ipc_mux->imem); + + ipc_mux->acc_adb_size += le32_to_cpu(ul_adb->adbh->block_length); + ipc_mux->acc_payload_size += ul_adb->payload_size; + ipc_mux->ul_data_pend_bytes += ul_adb->payload_size; +} + /* Allocates an ADB from the free list and initializes it with ADBH */ static bool ipc_mux_ul_adb_allocate(struct iosm_mux *ipc_mux, struct mux_adb *adb, int *size_needed, @@ -688,7 +1088,7 @@ static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id, while (nr_of_pkts > 0) { /* get destination skb allocated */ if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed, - MUX_SIG_ADGH)) { + IOSM_AGGR_MUX_SIG_ADGH)) { dev_err(ipc_mux->dev, "no reserved memory for ADGH"); return -ENOMEM; } @@ -720,7 +1120,7 @@ static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id, memcpy(adb->buf + offset + pad_len, src_skb->data, src_skb->len); - adb->adgh->signature = cpu_to_le32(MUX_SIG_ADGH); + adb->adgh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH); adb->adgh->if_id = session_id; adb->adgh->length = cpu_to_le16(sizeof(struct mux_adgh) + pad_len + @@ -762,6 +1162,187 @@ static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id, return adb_updated; } +/** + * ipc_mux_ul_adb_update_ql - Adds Queue Level Table and Queue Level to ADB + * @ipc_mux: pointer to MUX instance data + * @p_adb: pointer to UL aggegated data block + * @session_id: session id + * @qlth_n_ql_size: Length (in bytes) of the datagram table + * @ul_list: pointer to skb buffer head + */ +void ipc_mux_ul_adb_update_ql(struct iosm_mux *ipc_mux, struct mux_adb *p_adb, + int session_id, int qlth_n_ql_size, + struct sk_buff_head *ul_list) +{ + int qlevel = ul_list->qlen; + struct mux_qlth *p_qlt; + + p_qlt = (struct mux_qlth *)p_adb->pp_qlt[session_id]; + + /* Initialize QLTH if not been done */ + if (p_adb->qlt_updated[session_id] == 0) { + p_qlt->signature = cpu_to_le32(MUX_SIG_QLTH); + p_qlt->if_id = session_id; + p_qlt->table_length = cpu_to_le16(qlth_n_ql_size); + p_qlt->reserved = 0; + p_qlt->reserved2 = 0; + } + + /* Update Queue Level information always */ + p_qlt->ql.nr_of_bytes = cpu_to_le32(qlevel); + p_adb->qlt_updated[session_id] = 1; +} + +/* Update the next table index. */ +static int mux_ul_dg_update_tbl_index(struct iosm_mux *ipc_mux, + int session_id, + struct sk_buff_head *ul_list, + struct mux_adth_dg *dg, + int aligned_size, + u32 qlth_n_ql_size, + struct mux_adb *adb, + struct sk_buff *src_skb) +{ + ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id, + qlth_n_ql_size, ul_list); + ipc_mux_ul_adb_finish(ipc_mux); + if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed, + IOSM_AGGR_MUX_SIG_ADBH)) { + dev_kfree_skb(src_skb); + return -ENOMEM; + } + ipc_mux->size_needed = le32_to_cpu(adb->adbh->block_length); + + ipc_mux->size_needed += offsetof(struct mux_adth, dg); + ipc_mux->size_needed += qlth_n_ql_size; + ipc_mux->size_needed += sizeof(*dg) + aligned_size; + return 0; +} + +/* Process encode session UL data. */ +static int mux_ul_dg_encode(struct iosm_mux *ipc_mux, struct mux_adb *adb, + struct mux_adth_dg *dg, + struct sk_buff_head *ul_list, + struct sk_buff *src_skb, int session_id, + int pkt_to_send, u32 qlth_n_ql_size, + int *out_offset, int head_pad_len) +{ + int aligned_size; + int offset = *out_offset; + unsigned long flags; + int nr_of_skb = 0; + + while (pkt_to_send > 0) { + /* Peek at the head of the list. */ + src_skb = skb_peek(ul_list); + if (!src_skb) { + dev_err(ipc_mux->dev, + "skb peek return NULL with count : %d", + pkt_to_send); + return -1; + } + aligned_size = ALIGN((head_pad_len + src_skb->len), 4); + ipc_mux->size_needed += sizeof(*dg) + aligned_size; + + if (ipc_mux->size_needed > adb->size || + ((ipc_mux->size_needed + ipc_mux->ul_data_pend_bytes) >= + IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B)) { + *adb->next_table_index = offset; + if (mux_ul_dg_update_tbl_index(ipc_mux, session_id, + ul_list, dg, + aligned_size, + qlth_n_ql_size, adb, + src_skb) < 0) + return -ENOMEM; + nr_of_skb = 0; + offset = le32_to_cpu(adb->adbh->block_length); + /* Load pointer to next available datagram entry */ + dg = adb->dg[session_id] + adb->dg_count[session_id]; + } + /* Add buffer without head padding to next pending transfer. */ + memcpy(adb->buf + offset + head_pad_len, + src_skb->data, src_skb->len); + /* Setup datagram entry. */ + dg->datagram_index = cpu_to_le32(offset); + dg->datagram_length = cpu_to_le16(src_skb->len + head_pad_len); + dg->service_class = (((struct sk_buff *)src_skb)->priority); + dg->reserved = 0; + adb->dg_cnt_total++; + adb->payload_size += le16_to_cpu(dg->datagram_length); + dg++; + adb->dg_count[session_id]++; + offset += aligned_size; + /* Remove the processed elements and free it. */ + spin_lock_irqsave(&ul_list->lock, flags); + src_skb = __skb_dequeue(ul_list); + spin_unlock_irqrestore(&ul_list->lock, flags); + + dev_kfree_skb(src_skb); + nr_of_skb++; + pkt_to_send--; + } + *out_offset = offset; + return nr_of_skb; +} + +/* Process encode session UL data to ADB. */ +static int mux_ul_adb_encode(struct iosm_mux *ipc_mux, int session_id, + struct mux_session *session, + struct sk_buff_head *ul_list, struct mux_adb *adb, + int pkt_to_send) +{ + int adb_updated = -EINVAL; + int head_pad_len, offset; + struct sk_buff *src_skb = NULL; + struct mux_adth_dg *dg; + u32 qlth_n_ql_size; + + /* If any of the opened session has set Flow Control ON then limit the + * UL data to mux_flow_ctrl_high_thresh_b bytes + */ + if (ipc_mux->ul_data_pend_bytes >= + IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B) { + ipc_mux_stop_tx_for_all_sessions(ipc_mux); + return adb_updated; + } + + qlth_n_ql_size = offsetof(struct mux_qlth, ql) + + MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql); + head_pad_len = session->ul_head_pad_len; + + if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET) + head_pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET; + + if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed, + IOSM_AGGR_MUX_SIG_ADBH)) + return -ENOMEM; + + offset = le32_to_cpu(adb->adbh->block_length); + + if (ipc_mux->size_needed == 0) + ipc_mux->size_needed = offset; + + /* Calculate the size needed for ADTH, QLTH and QL*/ + if (adb->dg_count[session_id] == 0) { + ipc_mux->size_needed += offsetof(struct mux_adth, dg); + ipc_mux->size_needed += qlth_n_ql_size; + } + + dg = adb->dg[session_id] + adb->dg_count[session_id]; + + if (mux_ul_dg_encode(ipc_mux, adb, dg, ul_list, src_skb, + session_id, pkt_to_send, qlth_n_ql_size, &offset, + head_pad_len) > 0) { + adb_updated = 1; + *adb->next_table_index = offset; + ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id, + qlth_n_ql_size, ul_list); + adb->adbh->block_length = cpu_to_le32(offset); + } + + return adb_updated; +} + bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux) { struct sk_buff_head *ul_list; @@ -802,28 +1383,88 @@ bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux) * -> try next session id. */ continue; - - updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id, session, - ul_list, &ipc_mux->ul_adb, - dg_n); + if (ipc_mux->protocol == MUX_LITE) + updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id, + session, ul_list, + &ipc_mux->ul_adb, + dg_n); + else + updated = mux_ul_adb_encode(ipc_mux, session_id, + session, ul_list, + &ipc_mux->ul_adb, + dg_n); } ipc_mux->adb_prep_ongoing = false; return updated == 1; } -void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb) +/* Calculates the Payload from any given ADB. */ +static int ipc_mux_get_payload_from_adb(struct iosm_mux *ipc_mux, + struct mux_adbh *p_adbh) { - struct mux_adgh *adgh; - u16 adgh_len; + struct mux_adth_dg *dg; + struct mux_adth *adth; + u32 payload_size = 0; + u32 next_table_idx; + int nr_of_dg, i; + + /* Process the aggregated datagram tables. */ + next_table_idx = le32_to_cpu(p_adbh->first_table_index); + + if (next_table_idx < sizeof(struct mux_adbh)) { + dev_err(ipc_mux->dev, "unexpected empty ADB"); + return payload_size; + } - adgh = (struct mux_adgh *)skb->data; - adgh_len = le16_to_cpu(adgh->length); + while (next_table_idx != 0) { + /* Get the reference to the table header. */ + adth = (struct mux_adth *)((u8 *)p_adbh + next_table_idx); - if (adgh->signature == cpu_to_le32(MUX_SIG_ADGH) && - ipc_mux->ul_flow == MUX_UL) - ipc_mux->ul_data_pend_bytes = ipc_mux->ul_data_pend_bytes - - adgh_len; + if (adth->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH)) { + nr_of_dg = (le16_to_cpu(adth->table_length) - + sizeof(struct mux_adth) + + sizeof(struct mux_adth_dg)) / + sizeof(struct mux_adth_dg); + + if (nr_of_dg <= 0) + return payload_size; + + dg = &adth->dg; + + for (i = 0; i < nr_of_dg; i++, dg++) { + if (le32_to_cpu(dg->datagram_index) < + sizeof(struct mux_adbh)) { + return payload_size; + } + payload_size += + le16_to_cpu(dg->datagram_length); + } + } + next_table_idx = le32_to_cpu(adth->next_table_index); + } + + return payload_size; +} + +void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb) +{ + union mux_type_header hr; + u16 adgh_len; + int payload; + + if (ipc_mux->protocol == MUX_LITE) { + hr.adgh = (struct mux_adgh *)skb->data; + adgh_len = le16_to_cpu(hr.adgh->length); + if (hr.adgh->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH) && + ipc_mux->ul_flow == MUX_UL) + ipc_mux->ul_data_pend_bytes = + ipc_mux->ul_data_pend_bytes - adgh_len; + } else { + hr.adbh = (struct mux_adbh *)(skb->data); + payload = ipc_mux_get_payload_from_adb(ipc_mux, hr.adbh); + ipc_mux->ul_data_pend_bytes -= payload; + } if (ipc_mux->ul_flow == MUX_UL) dev_dbg(ipc_mux->dev, "ul_data_pend_bytes: %lld", @@ -846,10 +1487,13 @@ static int ipc_mux_tq_ul_trigger_encode(struct iosm_imem *ipc_imem, int arg, /* Add session UL data to a ADB and ADGH */ ul_data_pend = ipc_mux_ul_data_encode(ipc_mux); - if (ul_data_pend) + if (ul_data_pend) { + if (ipc_mux->protocol == MUX_AGGREGATION) + ipc_imem_adb_timer_start(ipc_mux->imem); + /* Delay the doorbell irq */ ipc_imem_td_update_timer_start(ipc_mux->imem); - + } /* reset the debounce flag */ ipc_mux->ev_mux_net_transmit_pending = false; diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h index aae83db5cbb8..5d4e3b89542c 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h +++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h @@ -13,6 +13,39 @@ */ #define MUX_QUEUE_LEVEL 1 +/* ADB finish timer value */ +#define IOSM_AGGR_MUX_ADB_FINISH_TIMEOUT_NSEC (500 * 1000) + +/* Enables the flow control (Flow is not allowed) */ +#define IOSM_AGGR_MUX_CMD_FLOW_CTL_ENABLE 5 + +/* Disables the flow control (Flow is allowed) */ +#define IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE 6 + +/* ACK the flow control command. Shall have the same Transaction ID as the + * matching FLOW_CTL command + */ +#define IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK 7 + +/* Aggregation Protocol Command for report packet indicating link quality + */ +#define IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT 8 + +/* Response to a report packet */ +#define IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT_RESP 9 + +/* ACBH: Signature of the Aggregated Command Block Header. */ +#define IOSM_AGGR_MUX_SIG_ACBH 0x48424341 + +/* ADTH: Signature of the Aggregated Datagram Table Header. */ +#define IOSM_AGGR_MUX_SIG_ADTH 0x48544441 + +/* ADBH: Signature of the Aggregated Data Block Header. */ +#define IOSM_AGGR_MUX_SIG_ADBH 0x48424441 + +/* ADGH: Signature of the Datagram Header. */ +#define IOSM_AGGR_MUX_SIG_ADGH 0x48474441 + /* Size of the buffer for the IP MUX commands. */ #define MUX_MAX_UL_ACB_BUF_SIZE 256 @@ -53,6 +86,85 @@ #define IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B (110 * 1024) /** + * struct mux_cmdh - Structure of Command Header. + * @signature: Signature of the Command Header. + * @cmd_len: Length (in bytes) of the Aggregated Command Block. + * @if_id: ID of the interface the commands in the table belong to. + * @reserved: Reserved. Set to zero. + * @next_cmd_index: Index (in bytes) to the next command in the buffer. + * @command_type: Command Enum. See table Session Management chapter for + * details. + * @transaction_id: The Transaction ID shall be unique to the command + * @param: Optional parameters used with the command. + */ +struct mux_cmdh { + __le32 signature; + __le16 cmd_len; + u8 if_id; + u8 reserved; + __le32 next_cmd_index; + __le32 command_type; + __le32 transaction_id; + union mux_cmd_param param; +}; + +/** + * struct mux_acbh - Structure of the Aggregated Command Block Header. + * @signature: Signature of the Aggregated Command Block Header. + * @reserved: Reserved bytes. Set to zero. + * @sequence_nr: Block sequence number. + * @block_length: Length (in bytes) of the Aggregated Command Block. + * @first_cmd_index: Index (in bytes) to the first command in the buffer. + */ +struct mux_acbh { + __le32 signature; + __le16 reserved; + __le16 sequence_nr; + __le32 block_length; + __le32 first_cmd_index; +}; + +/** + * struct mux_adbh - Structure of the Aggregated Data Block Header. + * @signature: Signature of the Aggregated Data Block Header. + * @reserved: Reserved bytes. Set to zero. + * @sequence_nr: Block sequence number. + * @block_length: Length (in bytes) of the Aggregated Data Block. + * @first_table_index: Index (in bytes) to the first Datagram Table in + * the buffer. + */ +struct mux_adbh { + __le32 signature; + __le16 reserved; + __le16 sequence_nr; + __le32 block_length; + __le32 first_table_index; +}; + +/** + * struct mux_adth - Structure of the Aggregated Datagram Table Header. + * @signature: Signature of the Aggregated Datagram Table Header. + * @table_length: Length (in bytes) of the datagram table. + * @if_id: ID of the interface the datagrams in the table + * belong to. + * @opt_ipv4v6: Indicates IPv4(=0)/IPv6(=1) hint. + * @reserved: Reserved bits. Set to zero. + * @next_table_index: Index (in bytes) to the next Datagram Table in + * the buffer. + * @reserved2: Reserved bytes. Set to zero + * @dg: datagramm table with variable length + */ +struct mux_adth { + __le32 signature; + __le16 table_length; + u8 if_id; + u8 opt_ipv4v6; + __le32 next_table_index; + __le32 reserved2; + struct mux_adth_dg dg; +}; + +/** * struct mux_adgh - Aggregated Datagram Header. * @signature: Signature of the Aggregated Datagram Header(0x48474441) * @length: Length (in bytes) of the datagram header. This length @@ -129,11 +241,25 @@ struct ipc_mem_lite_gen_tbl { }; /** - * ipc_mux_dl_decode -Route the DL packet through the IP MUX layer - * depending on Header. - * @ipc_mux: Pointer to MUX data-struct - * @skb: Pointer to ipc_skb. + * struct mux_type_cmdh - Structure of command header for mux lite and aggr + * @ack_lite: MUX Lite Command Header pointer + * @ack_aggr: Command Header pointer */ +union mux_type_cmdh { + struct mux_lite_cmdh *ack_lite; + struct mux_cmdh *ack_aggr; +}; + +/** + * struct mux_type_header - Structure of mux header type + * @adgh: Aggregated Datagram Header pointer + * @adbh: Aggregated Data Block Header pointer + */ +union mux_type_header { + struct mux_adgh *adgh; + struct mux_adbh *adbh; +}; + void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb); /** @@ -147,7 +273,7 @@ void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb); * @blocking: True for blocking send * @respond: If true return transaction ID * - * Returns: 0 in success and failure value on error + * Returns: 0 in success and failure value on error */ int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id, u32 transaction_id, union mux_cmd_param *param, @@ -190,4 +316,10 @@ bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux); */ void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb); +void ipc_mux_ul_adb_finish(struct iosm_mux *ipc_mux); + +void ipc_mux_ul_adb_update_ql(struct iosm_mux *ipc_mux, struct mux_adb *p_adb, + int session_id, int qlth_n_ql_size, + struct sk_buff_head *ul_list); + #endif diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c index d73894e2a84e..31f57b986df2 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c +++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c @@ -320,6 +320,7 @@ ret_fail: static const struct pci_device_id iosm_ipc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CP_DEVICE_7560_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CP_DEVICE_7360_ID) }, {} }; MODULE_DEVICE_TABLE(pci, iosm_ipc_ids); diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.h b/drivers/net/wwan/iosm/iosm_ipc_pcie.h index 7d1f0cd7364c..844cf1fed994 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_pcie.h +++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.h @@ -14,6 +14,7 @@ /* Device ID */ #define INTEL_CP_DEVICE_7560_ID 0x7560 +#define INTEL_CP_DEVICE_7360_ID 0x7360 /* Define for BAR area usage */ #define IPC_DOORBELL_BAR0 0 diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c index 1508dc2a497b..b8c7843730ed 100644 --- a/drivers/net/wwan/wwan_core.c +++ b/drivers/net/wwan/wwan_core.c @@ -160,6 +160,42 @@ struct dentry *wwan_get_debugfs_dir(struct device *parent) return wwandev->debugfs_dir; } EXPORT_SYMBOL_GPL(wwan_get_debugfs_dir); + +static int wwan_dev_debugfs_match(struct device *dev, const void *dir) +{ + struct wwan_device *wwandev; + + if (dev->type != &wwan_dev_type) + return 0; + + wwandev = to_wwan_dev(dev); + + return wwandev->debugfs_dir == dir; +} + +static struct wwan_device *wwan_dev_get_by_debugfs(struct dentry *dir) +{ + struct device *dev; + + dev = class_find_device(wwan_class, NULL, dir, wwan_dev_debugfs_match); + if (!dev) + return ERR_PTR(-ENODEV); + + return to_wwan_dev(dev); +} + +void wwan_put_debugfs_dir(struct dentry *dir) +{ + struct wwan_device *wwandev = wwan_dev_get_by_debugfs(dir); + + if (WARN_ON(IS_ERR(wwandev))) + return; + + /* wwan_dev_get_by_debugfs() also got a reference */ + put_device(&wwandev->dev); + put_device(&wwandev->dev); +} +EXPORT_SYMBOL_GPL(wwan_put_debugfs_dir); #endif /* This function allocates and registers a new WWAN device OR if a WWAN device diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index d24b7a7993aa..990360d75cb6 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -256,6 +256,7 @@ static void backend_disconnect(struct backend_info *be) unsigned int queue_index; xen_unregister_watchers(vif); + xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status"); #ifdef CONFIG_DEBUG_FS xenvif_debugfs_delif(vif); #endif /* CONFIG_DEBUG_FS */ @@ -675,7 +676,6 @@ static void hotplug_status_changed(struct xenbus_watch *watch, /* Not interested in this watch anymore. */ unregister_hotplug_status_watch(be); - xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status"); } kfree(str); } @@ -824,15 +824,11 @@ static void connect(struct backend_info *be) xenvif_carrier_on(be->vif); unregister_hotplug_status_watch(be); - if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) { - err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, - NULL, hotplug_status_changed, - "%s/%s", dev->nodename, - "hotplug-status"); - if (err) - goto err; + err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL, + hotplug_status_changed, + "%s/%s", dev->nodename, "hotplug-status"); + if (!err) be->have_hotplug_status_watch = 1; - } netif_tx_wake_all_queues(be->vif->dev); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 8b18246ad999..daa4e6106aac 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -424,14 +424,12 @@ static bool xennet_tx_buf_gc(struct netfront_queue *queue) queue->tx_link[id] = TX_LINK_NONE; skb = queue->tx_skbs[id]; queue->tx_skbs[id] = NULL; - if (unlikely(gnttab_query_foreign_access( - queue->grant_tx_ref[id]) != 0)) { + if (unlikely(!gnttab_end_foreign_access_ref( + queue->grant_tx_ref[id], GNTMAP_readonly))) { dev_alert(dev, "Grant still in use by backend domain\n"); goto err; } - gnttab_end_foreign_access_ref( - queue->grant_tx_ref[id], GNTMAP_readonly); gnttab_release_grant_reference( &queue->gref_tx_head, queue->grant_tx_ref[id]); queue->grant_tx_ref[id] = GRANT_INVALID_REF; @@ -842,6 +840,28 @@ static int xennet_close(struct net_device *dev) return 0; } +static void xennet_destroy_queues(struct netfront_info *info) +{ + unsigned int i; + + for (i = 0; i < info->netdev->real_num_tx_queues; i++) { + struct netfront_queue *queue = &info->queues[i]; + + if (netif_running(info->netdev)) + napi_disable(&queue->napi); + netif_napi_del(&queue->napi); + } + + kfree(info->queues); + info->queues = NULL; +} + +static void xennet_uninit(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); + xennet_destroy_queues(np); +} + static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val) { unsigned long flags; @@ -968,7 +988,6 @@ static int xennet_get_responses(struct netfront_queue *queue, struct device *dev = &queue->info->netdev->dev; struct bpf_prog *xdp_prog; struct xdp_buff xdp; - unsigned long ret; int slots = 1; int err = 0; u32 verdict; @@ -1010,8 +1029,13 @@ static int xennet_get_responses(struct netfront_queue *queue, goto next; } - ret = gnttab_end_foreign_access_ref(ref, 0); - BUG_ON(!ret); + if (!gnttab_end_foreign_access_ref(ref, 0)) { + dev_alert(dev, + "Grant still in use by backend domain\n"); + queue->info->broken = true; + dev_alert(dev, "Disabled for further use\n"); + return -EINVAL; + } gnttab_release_grant_reference(&queue->gref_rx_head, ref); @@ -1232,6 +1256,10 @@ static int xennet_poll(struct napi_struct *napi, int budget) &need_xdp_flush); if (unlikely(err)) { + if (queue->info->broken) { + spin_unlock(&queue->rx_lock); + return 0; + } err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); @@ -1611,6 +1639,7 @@ static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp) } static const struct net_device_ops xennet_netdev_ops = { + .ndo_uninit = xennet_uninit, .ndo_open = xennet_open, .ndo_stop = xennet_close, .ndo_start_xmit = xennet_start_xmit, @@ -1895,7 +1924,7 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_queue *queue, unsigned int feature_split_evtchn) { struct xen_netif_tx_sring *txs; - struct xen_netif_rx_sring *rxs; + struct xen_netif_rx_sring *rxs = NULL; grant_ref_t gref; int err; @@ -1915,21 +1944,21 @@ static int setup_netfront(struct xenbus_device *dev, err = xenbus_grant_ring(dev, txs, 1, &gref); if (err < 0) - goto grant_tx_ring_fail; + goto fail; queue->tx_ring_ref = gref; rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); - goto alloc_rx_ring_fail; + goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); err = xenbus_grant_ring(dev, rxs, 1, &gref); if (err < 0) - goto grant_rx_ring_fail; + goto fail; queue->rx_ring_ref = gref; if (feature_split_evtchn) @@ -1942,22 +1971,28 @@ static int setup_netfront(struct xenbus_device *dev, err = setup_netfront_single(queue); if (err) - goto alloc_evtchn_fail; + goto fail; return 0; /* If we fail to setup netfront, it is safe to just revoke access to * granted pages because backend is not accessing it at this point. */ -alloc_evtchn_fail: - gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0); -grant_rx_ring_fail: - free_page((unsigned long)rxs); -alloc_rx_ring_fail: - gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0); -grant_tx_ring_fail: - free_page((unsigned long)txs); -fail: + fail: + if (queue->rx_ring_ref != GRANT_INVALID_REF) { + gnttab_end_foreign_access(queue->rx_ring_ref, 0, + (unsigned long)rxs); + queue->rx_ring_ref = GRANT_INVALID_REF; + } else { + free_page((unsigned long)rxs); + } + if (queue->tx_ring_ref != GRANT_INVALID_REF) { + gnttab_end_foreign_access(queue->tx_ring_ref, 0, + (unsigned long)txs); + queue->tx_ring_ref = GRANT_INVALID_REF; + } else { + free_page((unsigned long)txs); + } return err; } @@ -2103,22 +2138,6 @@ error: return err; } -static void xennet_destroy_queues(struct netfront_info *info) -{ - unsigned int i; - - for (i = 0; i < info->netdev->real_num_tx_queues; i++) { - struct netfront_queue *queue = &info->queues[i]; - - if (netif_running(info->netdev)) - napi_disable(&queue->napi); - netif_napi_del(&queue->napi); - } - - kfree(info->queues); - info->queues = NULL; -} - static int xennet_create_page_pool(struct netfront_queue *queue) diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c index 5b833a9a83f8..a38e2fcdfd39 100644 --- a/drivers/nfc/nfcmrvl/spi.c +++ b/drivers/nfc/nfcmrvl/spi.c @@ -174,12 +174,11 @@ static int nfcmrvl_spi_probe(struct spi_device *spi) return 0; } -static int nfcmrvl_spi_remove(struct spi_device *spi) +static void nfcmrvl_spi_remove(struct spi_device *spi) { struct nfcmrvl_spi_drv_data *drv_data = spi_get_drvdata(spi); nfcmrvl_nci_unregister_dev(drv_data->priv); - return 0; } static const struct of_device_id of_nfcmrvl_spi_match[] __maybe_unused = { diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c index d7db1a0e6be1..00d8ea6dcb5d 100644 --- a/drivers/nfc/port100.c +++ b/drivers/nfc/port100.c @@ -1612,7 +1612,9 @@ free_nfc_dev: nfc_digital_free_device(dev->nfc_digital_dev); error: + usb_kill_urb(dev->in_urb); usb_free_urb(dev->in_urb); + usb_kill_urb(dev->out_urb); usb_free_urb(dev->out_urb); usb_put_dev(dev->udev); diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c index 4e723992e74c..169eacc0a32a 100644 --- a/drivers/nfc/st-nci/spi.c +++ b/drivers/nfc/st-nci/spi.c @@ -263,13 +263,11 @@ static int st_nci_spi_probe(struct spi_device *dev) return r; } -static int st_nci_spi_remove(struct spi_device *dev) +static void st_nci_spi_remove(struct spi_device *dev) { struct st_nci_spi_phy *phy = spi_get_drvdata(dev); ndlc_remove(phy->ndlc); - - return 0; } static struct spi_device_id st_nci_spi_id_table[] = { diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c index b23f47936473..ed704bb77226 100644 --- a/drivers/nfc/st95hf/core.c +++ b/drivers/nfc/st95hf/core.c @@ -1198,7 +1198,7 @@ err_disable_regulator: return ret; } -static int st95hf_remove(struct spi_device *nfc_spi_dev) +static void st95hf_remove(struct spi_device *nfc_spi_dev) { int result = 0; unsigned char reset_cmd = ST95HF_COMMAND_RESET; @@ -1236,8 +1236,6 @@ static int st95hf_remove(struct spi_device *nfc_spi_dev) /* disable regulator */ if (stcontext->st95hf_supply) regulator_disable(stcontext->st95hf_supply); - - return 0; } /* Register as SPI protocol driver */ diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c index 29ca9c328df2..21d68664fe08 100644 --- a/drivers/nfc/trf7970a.c +++ b/drivers/nfc/trf7970a.c @@ -2144,7 +2144,7 @@ err_destroy_lock: return ret; } -static int trf7970a_remove(struct spi_device *spi) +static void trf7970a_remove(struct spi_device *spi) { struct trf7970a *trf = spi_get_drvdata(spi); @@ -2160,8 +2160,6 @@ static int trf7970a_remove(struct spi_device *spi) regulator_disable(trf->regulator); mutex_destroy(&trf->lock); - - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.c b/drivers/ntb/hw/intel/ntb_hw_gen4.c index fede05151f69..4081fc538ff4 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen4.c +++ b/drivers/ntb/hw/intel/ntb_hw_gen4.c @@ -168,6 +168,18 @@ static enum ntb_topo gen4_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd) return NTB_TOPO_NONE; } +static enum ntb_topo spr_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd) +{ + switch (ppd & SPR_PPD_TOPO_MASK) { + case SPR_PPD_TOPO_B2B_USD: + return NTB_TOPO_B2B_USD; + case SPR_PPD_TOPO_B2B_DSD: + return NTB_TOPO_B2B_DSD; + } + + return NTB_TOPO_NONE; +} + int gen4_init_dev(struct intel_ntb_dev *ndev) { struct pci_dev *pdev = ndev->ntb.pdev; @@ -183,7 +195,10 @@ int gen4_init_dev(struct intel_ntb_dev *ndev) } ppd1 = ioread32(ndev->self_mmio + GEN4_PPD1_OFFSET); - ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1); + if (pdev_is_ICX(pdev)) + ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1); + else if (pdev_is_SPR(pdev)) + ndev->ntb.topo = spr_ppd_topo(ndev, ppd1); dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd1, ntb_topo_string(ndev->ntb.topo)); if (ndev->ntb.topo == NTB_TOPO_NONE) diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.h b/drivers/ntb/hw/intel/ntb_hw_gen4.h index 3fcd3fdce9ed..f91323eaf5ce 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen4.h +++ b/drivers/ntb/hw/intel/ntb_hw_gen4.h @@ -49,10 +49,14 @@ #define GEN4_PPD_CLEAR_TRN 0x0001 #define GEN4_PPD_LINKTRN 0x0008 #define GEN4_PPD_CONN_MASK 0x0300 +#define SPR_PPD_CONN_MASK 0x0700 #define GEN4_PPD_CONN_B2B 0x0200 #define GEN4_PPD_DEV_MASK 0x1000 #define GEN4_PPD_DEV_DSD 0x1000 #define GEN4_PPD_DEV_USD 0x0000 +#define SPR_PPD_DEV_MASK 0x4000 +#define SPR_PPD_DEV_DSD 0x4000 +#define SPR_PPD_DEV_USD 0x0000 #define GEN4_LINK_CTRL_LINK_DISABLE 0x0010 #define GEN4_SLOTSTS 0xb05a @@ -62,6 +66,10 @@ #define GEN4_PPD_TOPO_B2B_USD (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_USD) #define GEN4_PPD_TOPO_B2B_DSD (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_DSD) +#define SPR_PPD_TOPO_MASK (SPR_PPD_CONN_MASK | SPR_PPD_DEV_MASK) +#define SPR_PPD_TOPO_B2B_USD (GEN4_PPD_CONN_B2B | SPR_PPD_DEV_USD) +#define SPR_PPD_TOPO_B2B_DSD (GEN4_PPD_CONN_B2B | SPR_PPD_DEV_DSD) + #define GEN4_DB_COUNT 32 #define GEN4_DB_LINK 32 #define GEN4_DB_LINK_BIT BIT_ULL(GEN4_DB_LINK) @@ -112,4 +120,12 @@ static inline int pdev_is_ICX(struct pci_dev *pdev) return 0; } +static inline int pdev_is_SPR(struct pci_dev *pdev) +{ + if (pdev_is_gen4(pdev) && + pdev->revision > PCI_DEVICE_REVISION_ICX_MAX) + return 1; + return 0; +} + #endif diff --git a/drivers/ntb/msi.c b/drivers/ntb/msi.c index dd683cb58d09..6295e55ef85e 100644 --- a/drivers/ntb/msi.c +++ b/drivers/ntb/msi.c @@ -33,7 +33,6 @@ int ntb_msi_init(struct ntb_dev *ntb, { phys_addr_t mw_phys_addr; resource_size_t mw_size; - size_t struct_size; int peer_widx; int peers; int ret; @@ -43,9 +42,8 @@ int ntb_msi_init(struct ntb_dev *ntb, if (peers <= 0) return -EINVAL; - struct_size = sizeof(*ntb->msi) + sizeof(*ntb->msi->peer_mws) * peers; - - ntb->msi = devm_kzalloc(&ntb->dev, struct_size, GFP_KERNEL); + ntb->msi = devm_kzalloc(&ntb->dev, struct_size(ntb->msi, peer_mws, peers), + GFP_KERNEL); if (!ntb->msi) return -ENOMEM; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 961a5f8a44d2..fd4720d37cc0 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -368,6 +368,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_rq); void nvme_complete_batch_req(struct request *req) { + trace_nvme_complete_rq(req); nvme_cleanup_cmd(req); nvme_end_req_zoned(req); } @@ -1722,7 +1723,7 @@ static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns, return 0; } -static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) +static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) { struct nvme_ctrl *ctrl = ns->ctrl; @@ -1738,7 +1739,8 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) - return 0; + return; + if (ctrl->ops->flags & NVME_F_FABRICS) { /* * The NVMe over Fabrics specification only supports metadata as @@ -1746,7 +1748,7 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) * remap the separate metadata buffer from the block layer. */ if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) - return -EINVAL; + return; ns->features |= NVME_NS_EXT_LBAS; @@ -1773,8 +1775,6 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) else ns->features |= NVME_NS_METADATA_SUPPORTED; } - - return 0; } static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, @@ -1915,9 +1915,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id) ns->lba_shift = id->lbaf[lbaf].ds; nvme_set_queue_limits(ns->ctrl, ns->queue); - ret = nvme_configure_metadata(ns, id); - if (ret) - goto out_unfreeze; + nvme_configure_metadata(ns, id); nvme_set_chunk_sectors(ns, id); nvme_update_disk_info(ns->disk, ns, id); @@ -1933,7 +1931,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id) if (blk_queue_is_zoned(ns->queue)) { ret = nvme_revalidate_zones(ns); if (ret && !nvme_first_scan(ns->disk)) - goto out; + return ret; } if (nvme_ns_head_multipath(ns->head)) { @@ -1948,16 +1946,16 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id) return 0; out_unfreeze: - blk_mq_unfreeze_queue(ns->disk->queue); -out: /* * If probing fails due an unsupported feature, hide the block device, * but still allow other access. */ if (ret == -ENODEV) { ns->disk->flags |= GENHD_FL_HIDDEN; + set_bit(NVME_NS_READY, &ns->flags); ret = 0; } + blk_mq_unfreeze_queue(ns->disk->queue); return ret; } @@ -4573,7 +4571,7 @@ static void nvme_set_queue_dying(struct nvme_ns *ns) if (test_and_set_bit(NVME_NS_DEAD, &ns->flags)) return; - blk_set_queue_dying(ns->queue); + blk_mark_disk_dead(ns->disk); nvme_start_ns_queue(ns); set_capacity_and_notify(ns->disk, 0); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index f8bf6606eb2f..ff775235534c 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -848,7 +848,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) { if (!head->disk) return; - blk_set_queue_dying(head->disk->queue); + blk_mark_disk_dead(head->disk); /* make sure all pending bios are cleaned up */ kblockd_schedule_work(&head->requeue_work); flush_work(&head->requeue_work); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 01e24b5703db..65e00c64a588 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -44,6 +44,8 @@ struct nvme_tcp_request { u32 data_len; u32 pdu_len; u32 pdu_sent; + u32 h2cdata_left; + u32 h2cdata_offset; u16 ttag; __le16 status; struct list_head entry; @@ -95,6 +97,7 @@ struct nvme_tcp_queue { struct nvme_tcp_request *request; int queue_size; + u32 maxh2cdata; size_t cmnd_capsule_len; struct nvme_tcp_ctrl *ctrl; unsigned long flags; @@ -572,23 +575,26 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue, return ret; } -static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, - struct nvme_tcp_r2t_pdu *pdu) +static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req) { struct nvme_tcp_data_pdu *data = req->pdu; struct nvme_tcp_queue *queue = req->queue; struct request *rq = blk_mq_rq_from_pdu(req); + u32 h2cdata_sent = req->pdu_len; u8 hdgst = nvme_tcp_hdgst_len(queue); u8 ddgst = nvme_tcp_ddgst_len(queue); req->state = NVME_TCP_SEND_H2C_PDU; req->offset = 0; - req->pdu_len = le32_to_cpu(pdu->r2t_length); + req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata); req->pdu_sent = 0; + req->h2cdata_left -= req->pdu_len; + req->h2cdata_offset += h2cdata_sent; memset(data, 0, sizeof(*data)); data->hdr.type = nvme_tcp_h2c_data; - data->hdr.flags = NVME_TCP_F_DATA_LAST; + if (!req->h2cdata_left) + data->hdr.flags = NVME_TCP_F_DATA_LAST; if (queue->hdr_digest) data->hdr.flags |= NVME_TCP_F_HDGST; if (queue->data_digest) @@ -597,9 +603,9 @@ static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, data->hdr.pdo = data->hdr.hlen + hdgst; data->hdr.plen = cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst); - data->ttag = pdu->ttag; + data->ttag = req->ttag; data->command_id = nvme_cid(rq); - data->data_offset = pdu->r2t_offset; + data->data_offset = cpu_to_le32(req->h2cdata_offset); data->data_length = cpu_to_le32(req->pdu_len); } @@ -609,6 +615,7 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, struct nvme_tcp_request *req; struct request *rq; u32 r2t_length = le32_to_cpu(pdu->r2t_length); + u32 r2t_offset = le32_to_cpu(pdu->r2t_offset); rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); if (!rq) { @@ -633,14 +640,19 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, return -EPROTO; } - if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) { + if (unlikely(r2t_offset < req->data_sent)) { dev_err(queue->ctrl->ctrl.device, "req %d unexpected r2t offset %u (expected %zu)\n", - rq->tag, le32_to_cpu(pdu->r2t_offset), req->data_sent); + rq->tag, r2t_offset, req->data_sent); return -EPROTO; } - nvme_tcp_setup_h2c_data_pdu(req, pdu); + req->pdu_len = 0; + req->h2cdata_left = r2t_length; + req->h2cdata_offset = r2t_offset; + req->ttag = pdu->ttag; + + nvme_tcp_setup_h2c_data_pdu(req); nvme_tcp_queue_request(req, false, true); return 0; @@ -913,13 +925,22 @@ static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue) static void nvme_tcp_fail_request(struct nvme_tcp_request *req) { - nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR); + if (nvme_tcp_async_req(req)) { + union nvme_result res = {}; + + nvme_complete_async_event(&req->queue->ctrl->ctrl, + cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res); + } else { + nvme_tcp_end_request(blk_mq_rq_from_pdu(req), + NVME_SC_HOST_PATH_ERROR); + } } static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) { struct nvme_tcp_queue *queue = req->queue; int req_data_len = req->data_len; + u32 h2cdata_left = req->h2cdata_left; while (true) { struct page *page = nvme_tcp_req_cur_page(req); @@ -964,7 +985,10 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) req->state = NVME_TCP_SEND_DDGST; req->offset = 0; } else { - nvme_tcp_done_send_req(queue); + if (h2cdata_left) + nvme_tcp_setup_h2c_data_pdu(req); + else + nvme_tcp_done_send_req(queue); } return 1; } @@ -1022,9 +1046,14 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req) if (queue->hdr_digest && !req->offset) nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); - ret = kernel_sendpage(queue->sock, virt_to_page(pdu), - offset_in_page(pdu) + req->offset, len, - MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST); + if (!req->h2cdata_left) + ret = kernel_sendpage(queue->sock, virt_to_page(pdu), + offset_in_page(pdu) + req->offset, len, + MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST); + else + ret = sock_no_sendpage(queue->sock, virt_to_page(pdu), + offset_in_page(pdu) + req->offset, len, + MSG_DONTWAIT | MSG_MORE); if (unlikely(ret <= 0)) return ret; @@ -1044,6 +1073,7 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req) { struct nvme_tcp_queue *queue = req->queue; size_t offset = req->offset; + u32 h2cdata_left = req->h2cdata_left; int ret; struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; struct kvec iov = { @@ -1061,7 +1091,10 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req) return ret; if (offset + ret == NVME_TCP_DIGEST_LENGTH) { - nvme_tcp_done_send_req(queue); + if (h2cdata_left) + nvme_tcp_setup_h2c_data_pdu(req); + else + nvme_tcp_done_send_req(queue); return 1; } @@ -1253,6 +1286,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) struct msghdr msg = {}; struct kvec iov; bool ctrl_hdgst, ctrl_ddgst; + u32 maxh2cdata; int ret; icreq = kzalloc(sizeof(*icreq), GFP_KERNEL); @@ -1336,6 +1370,14 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) goto free_icresp; } + maxh2cdata = le32_to_cpu(icresp->maxdata); + if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) { + pr_err("queue %d: invalid maxh2cdata returned %u\n", + nvme_tcp_queue_id(queue), maxh2cdata); + goto free_icresp; + } + queue->maxh2cdata = maxh2cdata; + ret = 0; free_icresp: kfree(icresp); @@ -2321,6 +2363,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, req->data_sent = 0; req->pdu_len = 0; req->pdu_sent = 0; + req->h2cdata_left = 0; req->data_len = blk_rq_nr_phys_segments(rq) ? blk_rq_payload_bytes(rq) : 0; req->curr_bio = rq->bio; diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 23a38dcf0fc4..9fd1602b539d 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -771,7 +771,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) if (config->wp_gpio) nvmem->wp_gpio = config->wp_gpio; - else + else if (!config->ignore_wp) nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", GPIOD_OUT_HIGH); if (IS_ERR(nvmem->wp_gpio)) { diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index ad85ff6474ff..ec315b060cd5 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -648,8 +648,8 @@ void __init early_init_fdt_scan_reserved_mem(void) } fdt_scan_reserved_mem(); - fdt_init_reserved_mem(); fdt_reserve_elfcorehdr(); + fdt_init_reserved_mem(); } /** diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 70992103c07d..2c2fb161b572 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -513,24 +513,24 @@ static void __init of_unittest_parse_phandle_with_args(void) memset(&args, 0, sizeof(args)); EXPECT_BEGIN(KERN_INFO, - "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1"); + "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1"); rc = of_parse_phandle_with_args(np, "phandle-list-bad-args", "#phandle-cells", 1, &args); EXPECT_END(KERN_INFO, - "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1"); + "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1"); unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); EXPECT_BEGIN(KERN_INFO, - "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1"); + "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1"); rc = of_count_phandle_with_args(np, "phandle-list-bad-args", "#phandle-cells"); EXPECT_END(KERN_INFO, - "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1"); + "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1"); unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); } @@ -670,12 +670,12 @@ static void __init of_unittest_parse_phandle_with_args_map(void) memset(&args, 0, sizeof(args)); EXPECT_BEGIN(KERN_INFO, - "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found -1"); + "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found 1"); rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-args", "phandle", 1, &args); EXPECT_END(KERN_INFO, - "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found -1"); + "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found 1"); unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); } @@ -1257,12 +1257,12 @@ static void __init of_unittest_platform_populate(void) unittest(pdev, "device 2 creation failed\n"); EXPECT_BEGIN(KERN_INFO, - "platform testcase-data:testcase-device2: IRQ index 0 not found"); + "platform testcase-data:testcase-device2: error -ENXIO: IRQ index 0 not found"); irq = platform_get_irq(pdev, 0); EXPECT_END(KERN_INFO, - "platform testcase-data:testcase-device2: IRQ index 0 not found"); + "platform testcase-data:testcase-device2: error -ENXIO: IRQ index 0 not found"); unittest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed - %d\n", irq); diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index 059566f54429..9be007c9420f 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -1003,7 +1003,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, ioc->usg_calls++; #endif - while(sg_dma_len(sglist) && nents--) { + while (nents && sg_dma_len(sglist)) { #ifdef CCIO_COLLECT_STATS ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT; @@ -1011,6 +1011,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, ccio_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction, 0); ++sglist; + nents--; } DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index e60690d38d67..374b9199878d 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -1047,7 +1047,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, spin_unlock_irqrestore(&ioc->res_lock, flags); #endif - while (sg_dma_len(sglist) && nents--) { + while (nents && sg_dma_len(sglist)) { sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction, 0); @@ -1056,6 +1056,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, ioc->usingle_calls--; /* kluge since call is unmap_sg() */ #endif ++sglist; + nents--; } DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 20ea2ee330b8..ae0bc2fee4ca 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -2155,8 +2155,17 @@ static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus) if (!hv_dev) continue; - if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY) - set_dev_node(&dev->dev, hv_dev->desc.virtual_numa_node); + if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY && + hv_dev->desc.virtual_numa_node < num_possible_nodes()) + /* + * The kernel may boot with some NUMA nodes offline + * (e.g. in a KDUMP kernel) or with NUMA disabled via + * "numa=off". In those cases, adjust the host provided + * NUMA node to a valid NUMA node used by the kernel. + */ + set_dev_node(&dev->dev, + numa_map_to_online_node( + hv_dev->desc.virtual_numa_node)); put_pcichild(hv_dev); } diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c index 71258ea3d35f..f8e82c5e2d87 100644 --- a/drivers/pci/controller/pci-mvebu.c +++ b/drivers/pci/controller/pci-mvebu.c @@ -1329,7 +1329,8 @@ static int mvebu_pcie_probe(struct platform_device *pdev) * indirectly via kernel emulated PCI bridge driver. */ mvebu_pcie_setup_hw(port); - mvebu_pcie_set_local_dev_nr(port, 0); + mvebu_pcie_set_local_dev_nr(port, 1); + mvebu_pcie_set_local_bus_nr(port, 0); } pcie->nports = i; diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index cc166c683638..eb05cceab964 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -99,11 +99,13 @@ struct vmd_irq { * @srcu: SRCU struct for local synchronization. * @count: number of child IRQs assigned to this vector; used to track * sharing. + * @virq: The underlying VMD Linux interrupt number */ struct vmd_irq_list { struct list_head irq_list; struct srcu_struct srcu; unsigned int count; + unsigned int virq; }; struct vmd_dev { @@ -253,7 +255,6 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info, struct msi_desc *desc = arg->desc; struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus); struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL); - unsigned int index, vector; if (!vmdirq) return -ENOMEM; @@ -261,10 +262,8 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info, INIT_LIST_HEAD(&vmdirq->node); vmdirq->irq = vmd_next_irq(vmd, desc); vmdirq->virq = virq; - index = index_from_irqs(vmd, vmdirq->irq); - vector = pci_irq_vector(vmd->dev, index); - irq_domain_set_info(domain, virq, vector, info->chip, vmdirq, + irq_domain_set_info(domain, virq, vmdirq->irq->virq, info->chip, vmdirq, handle_untracked_irq, vmd, NULL); return 0; } @@ -685,7 +684,8 @@ static int vmd_alloc_irqs(struct vmd_dev *vmd) return err; INIT_LIST_HEAD(&vmd->irqs[i].irq_list); - err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i), + vmd->irqs[i].virq = pci_irq_vector(dev, i); + err = devm_request_irq(&dev->dev, vmd->irqs[i].virq, vmd_irq, IRQF_NO_THREAD, vmd->name, &vmd->irqs[i]); if (err) @@ -969,7 +969,7 @@ static int vmd_suspend(struct device *dev) int i; for (i = 0; i < vmd->msix_count; i++) - devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]); + devm_free_irq(dev, vmd->irqs[i].virq, &vmd->irqs[i]); return 0; } @@ -981,7 +981,7 @@ static int vmd_resume(struct device *dev) int err, i; for (i = 0; i < vmd->msix_count; i++) { - err = devm_request_irq(dev, pci_irq_vector(pdev, i), + err = devm_request_irq(dev, vmd->irqs[i].virq, vmd_irq, IRQF_NO_THREAD, vmd->name, &vmd->irqs[i]); if (err) diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index bda630889f95..604feeb84ee4 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -166,6 +166,9 @@ static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask) { int ret, i; + for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) + irqs[i] = -1; + /* * If we support PME but can't use MSI/MSI-X for it, we have to * fall back to INTx or other interrupts, e.g., a system shared @@ -314,10 +317,8 @@ static int pcie_device_init(struct pci_dev *pdev, int service, int irq) */ int pcie_port_device_register(struct pci_dev *dev) { - int status, capabilities, irq_services, i, nr_service; - int irqs[PCIE_PORT_DEVICE_MAXSERVICES] = { - [0 ... PCIE_PORT_DEVICE_MAXSERVICES-1] = -1 - }; + int status, capabilities, i, nr_service; + int irqs[PCIE_PORT_DEVICE_MAXSERVICES]; /* Enable PCI Express port device */ status = pci_enable_device(dev); @@ -330,32 +331,18 @@ int pcie_port_device_register(struct pci_dev *dev) return 0; pci_set_master(dev); - - irq_services = 0; - if (IS_ENABLED(CONFIG_PCIE_PME)) - irq_services |= PCIE_PORT_SERVICE_PME; - if (IS_ENABLED(CONFIG_PCIEAER)) - irq_services |= PCIE_PORT_SERVICE_AER; - if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) - irq_services |= PCIE_PORT_SERVICE_HP; - if (IS_ENABLED(CONFIG_PCIE_DPC)) - irq_services |= PCIE_PORT_SERVICE_DPC; - irq_services &= capabilities; - - if (irq_services) { - /* - * Initialize service IRQs. Don't use service devices that - * require interrupts if there is no way to generate them. - * However, some drivers may have a polling mode (e.g. - * pciehp_poll_mode) that can be used in the absence of IRQs. - * Allow them to determine if that is to be used. - */ - status = pcie_init_service_irqs(dev, irqs, irq_services); - if (status) { - irq_services &= PCIE_PORT_SERVICE_HP; - if (!irq_services) - goto error_disable; - } + /* + * Initialize service irqs. Don't use service devices that + * require interrupts if there is no way to generate them. + * However, some drivers may have a polling mode (e.g. pciehp_poll_mode) + * that can be used in the absence of irqs. Allow them to determine + * if that is to be used. + */ + status = pcie_init_service_irqs(dev, irqs, capabilities); + if (status) { + capabilities &= PCIE_PORT_SERVICE_HP; + if (!capabilities) + goto error_disable; } /* Allocate child services if any */ diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index d2dd6a6cda60..65f7f6b0576c 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -5344,11 +5344,6 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags); */ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev) { - if ((pdev->device == 0x7312 && pdev->revision != 0x00) || - (pdev->device == 0x7340 && pdev->revision != 0xc5) || - (pdev->device == 0x7341 && pdev->revision != 0x00)) - return; - if (pdev->device == 0x15d8) { if (pdev->revision == 0xcf && pdev->subsystem_vendor == 0xea50 && @@ -5370,10 +5365,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats); /* AMD Iceland dGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats); /* AMD Navi10 dGPU */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7310, quirk_amd_harvest_no_ats); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7318, quirk_amd_harvest_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7319, quirk_amd_harvest_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731a, quirk_amd_harvest_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731b, quirk_amd_harvest_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731e, quirk_amd_harvest_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731f, quirk_amd_harvest_no_ats); /* AMD Navi14 dGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7341, quirk_amd_harvest_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7347, quirk_amd_harvest_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x734f, quirk_amd_harvest_no_ats); /* AMD Raven platform iGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats); #endif /* CONFIG_PCI_ATS */ diff --git a/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c b/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c index cd2332bf0e31..fdbd64c03e12 100644 --- a/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c +++ b/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c @@ -9,6 +9,7 @@ #include <linux/bitfield.h> #include <linux/bitops.h> +#include <linux/bits.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/io.h> @@ -250,7 +251,7 @@ static int phy_meson_axg_mipi_dphy_power_on(struct phy *phy) (DIV_ROUND_UP(priv->config.clk_zero, temp) << 16) | (DIV_ROUND_UP(priv->config.clk_prepare, temp) << 24)); regmap_write(priv->regmap, MIPI_DSI_CLK_TIM1, - DIV_ROUND_UP(priv->config.clk_pre, temp)); + DIV_ROUND_UP(priv->config.clk_pre, BITS_PER_BYTE)); regmap_write(priv->regmap, MIPI_DSI_HS_TIM, DIV_ROUND_UP(priv->config.hs_exit, temp) | diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig index f81e23742079..849c4204f550 100644 --- a/drivers/phy/broadcom/Kconfig +++ b/drivers/phy/broadcom/Kconfig @@ -97,8 +97,7 @@ config PHY_BRCM_USB depends on OF select GENERIC_PHY select SOC_BRCMSTB if ARCH_BRCMSTB - default ARCH_BCM4908 - default ARCH_BRCMSTB + default ARCH_BCM4908 || ARCH_BRCMSTB help Enable this to support the Broadcom STB USB PHY. This driver is required by the USB XHCI, EHCI and OHCI diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c index 116fb23aebd9..0f1deb6e0eab 100644 --- a/drivers/phy/broadcom/phy-brcm-usb.c +++ b/drivers/phy/broadcom/phy-brcm-usb.c @@ -18,6 +18,7 @@ #include <linux/soc/brcmstb/brcmstb.h> #include <dt-bindings/phy/phy.h> #include <linux/mfd/syscon.h> +#include <linux/suspend.h> #include "phy-brcm-usb-init.h" @@ -70,12 +71,35 @@ struct brcm_usb_phy_data { int init_count; int wake_irq; struct brcm_usb_phy phys[BRCM_USB_PHY_ID_MAX]; + struct notifier_block pm_notifier; + bool pm_active; }; static s8 *node_reg_names[BRCM_REGS_MAX] = { "crtl", "xhci_ec", "xhci_gbl", "usb_phy", "usb_mdio", "bdc_ec" }; +static int brcm_pm_notifier(struct notifier_block *notifier, + unsigned long pm_event, + void *unused) +{ + struct brcm_usb_phy_data *priv = + container_of(notifier, struct brcm_usb_phy_data, pm_notifier); + + switch (pm_event) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + priv->pm_active = true; + break; + case PM_POST_RESTORE: + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + priv->pm_active = false; + break; + } + return NOTIFY_DONE; +} + static irqreturn_t brcm_usb_phy_wake_isr(int irq, void *dev_id) { struct phy *gphy = dev_id; @@ -91,6 +115,9 @@ static int brcm_usb_phy_init(struct phy *gphy) struct brcm_usb_phy_data *priv = container_of(phy, struct brcm_usb_phy_data, phys[phy->id]); + if (priv->pm_active) + return 0; + /* * Use a lock to make sure a second caller waits until * the base phy is inited before using it. @@ -120,6 +147,9 @@ static int brcm_usb_phy_exit(struct phy *gphy) struct brcm_usb_phy_data *priv = container_of(phy, struct brcm_usb_phy_data, phys[phy->id]); + if (priv->pm_active) + return 0; + dev_dbg(&gphy->dev, "EXIT\n"); if (phy->id == BRCM_USB_PHY_2_0) brcm_usb_uninit_eohci(&priv->ini); @@ -488,6 +518,9 @@ static int brcm_usb_phy_probe(struct platform_device *pdev) if (err) return err; + priv->pm_notifier.notifier_call = brcm_pm_notifier; + register_pm_notifier(&priv->pm_notifier); + mutex_init(&priv->mutex); /* make sure invert settings are correct */ @@ -528,7 +561,10 @@ static int brcm_usb_phy_probe(struct platform_device *pdev) static int brcm_usb_phy_remove(struct platform_device *pdev) { + struct brcm_usb_phy_data *priv = dev_get_drvdata(&pdev->dev); + sysfs_remove_group(&pdev->dev.kobj, &brcm_usb_phy_group); + unregister_pm_notifier(&priv->pm_notifier); return 0; } @@ -539,6 +575,7 @@ static int brcm_usb_phy_suspend(struct device *dev) struct brcm_usb_phy_data *priv = dev_get_drvdata(dev); if (priv->init_count) { + dev_dbg(dev, "SUSPEND\n"); priv->ini.wake_enabled = device_may_wakeup(dev); if (priv->phys[BRCM_USB_PHY_3_0].inited) brcm_usb_uninit_xhci(&priv->ini); @@ -578,6 +615,7 @@ static int brcm_usb_phy_resume(struct device *dev) * Uninitialize anything that wasn't previously initialized. */ if (priv->init_count) { + dev_dbg(dev, "RESUME\n"); if (priv->wake_irq >= 0) disable_irq_wake(priv->wake_irq); brcm_usb_init_common(&priv->ini); diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c index da24acd26666..e265647e29a2 100644 --- a/drivers/phy/cadence/phy-cadence-sierra.c +++ b/drivers/phy/cadence/phy-cadence-sierra.c @@ -1338,7 +1338,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; const struct cdns_sierra_data *data; unsigned int id_value; - int i, ret, node = 0; + int ret, node = 0; void __iomem *base; struct device_node *dn = dev->of_node, *child; @@ -1416,7 +1416,8 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev) dev_err(dev, "failed to get reset %s\n", child->full_name); ret = PTR_ERR(sp->phys[node].lnk_rst); - goto put_child2; + of_node_put(child); + goto put_control; } if (!sp->autoconf) { @@ -1424,7 +1425,9 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev) if (ret) { dev_err(dev, "missing property in node %s\n", child->name); - goto put_child; + of_node_put(child); + reset_control_put(sp->phys[node].lnk_rst); + goto put_control; } } @@ -1434,7 +1437,9 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev) if (IS_ERR(gphy)) { ret = PTR_ERR(gphy); - goto put_child; + of_node_put(child); + reset_control_put(sp->phys[node].lnk_rst); + goto put_control; } sp->phys[node].phy = gphy; phy_set_drvdata(gphy, &sp->phys[node]); @@ -1446,26 +1451,28 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev) if (sp->num_lanes > SIERRA_MAX_LANES) { ret = -EINVAL; dev_err(dev, "Invalid lane configuration\n"); - goto put_child2; + goto put_control; } /* If more than one subnode, configure the PHY as multilink */ if (!sp->autoconf && sp->nsubnodes > 1) { ret = cdns_sierra_phy_configure_multilink(sp); if (ret) - goto put_child2; + goto put_control; } pm_runtime_enable(dev); phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); - return PTR_ERR_OR_ZERO(phy_provider); - -put_child: - node++; -put_child2: - for (i = 0; i < node; i++) - reset_control_put(sp->phys[i].lnk_rst); - of_node_put(child); + if (IS_ERR(phy_provider)) { + ret = PTR_ERR(phy_provider); + goto put_control; + } + + return 0; + +put_control: + while (--node >= 0) + reset_control_put(sp->phys[node].lnk_rst); clk_disable: cdns_sierra_phy_disable_clocks(sp); reset_control_assert(sp->apb_rst); diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c index 6d307102f4f6..8ee7682b8e93 100644 --- a/drivers/phy/mediatek/phy-mtk-tphy.c +++ b/drivers/phy/mediatek/phy-mtk-tphy.c @@ -992,7 +992,7 @@ static int phy_efuse_get(struct mtk_tphy *tphy, struct mtk_phy_instance *instanc /* no efuse, ignore it */ if (!instance->efuse_intr && !instance->efuse_rx_imp && - !instance->efuse_rx_imp) { + !instance->efuse_tx_imp) { dev_warn(dev, "no u3 intr efuse, but dts enable it\n"); instance->efuse_sw_en = 0; break; diff --git a/drivers/phy/phy-core-mipi-dphy.c b/drivers/phy/phy-core-mipi-dphy.c index 288c9c67aa74..ccb4045685cd 100644 --- a/drivers/phy/phy-core-mipi-dphy.c +++ b/drivers/phy/phy-core-mipi-dphy.c @@ -36,7 +36,7 @@ int phy_mipi_dphy_get_default_config(unsigned long pixel_clock, cfg->clk_miss = 0; cfg->clk_post = 60000 + 52 * ui; - cfg->clk_pre = 8000; + cfg->clk_pre = 8; cfg->clk_prepare = 38000; cfg->clk_settle = 95000; cfg->clk_term_en = 0; @@ -97,7 +97,7 @@ int phy_mipi_dphy_config_validate(struct phy_configure_opts_mipi_dphy *cfg) if (cfg->clk_post < (60000 + 52 * ui)) return -EINVAL; - if (cfg->clk_pre < 8000) + if (cfg->clk_pre < 8) return -EINVAL; if (cfg->clk_prepare < 38000 || cfg->clk_prepare > 95000) diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c index 347dc79a18c1..630e01b5c19b 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c @@ -5,6 +5,7 @@ * Author: Wyon Bi <bivvy.bi@rock-chips.com> */ +#include <linux/bits.h> #include <linux/kernel.h> #include <linux/clk.h> #include <linux/iopoll.h> @@ -364,7 +365,7 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno) * The value of counter for HS Tclk-pre * Tclk-pre = Tpin_txbyteclkhs * value */ - clk_pre = DIV_ROUND_UP(cfg->clk_pre, t_txbyteclkhs); + clk_pre = DIV_ROUND_UP(cfg->clk_pre, BITS_PER_BYTE); /* * The value of counter for HS Tlpx Time diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c index 2ce9bfd783d4..007a23c78d56 100644 --- a/drivers/phy/st/phy-stm32-usbphyc.c +++ b/drivers/phy/st/phy-stm32-usbphyc.c @@ -304,7 +304,7 @@ static int stm32_usbphyc_pll_enable(struct stm32_usbphyc *usbphyc) ret = __stm32_usbphyc_pll_disable(usbphyc); if (ret) - return ret; + goto dec_n_pll_cons; } ret = stm32_usbphyc_regulators_enable(usbphyc); diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c index b3384c31637a..da546c35d1d5 100644 --- a/drivers/phy/ti/phy-j721e-wiz.c +++ b/drivers/phy/ti/phy-j721e-wiz.c @@ -233,6 +233,7 @@ static const struct clk_div_table clk_div_table[] = { { .val = 1, .div = 2, }, { .val = 2, .div = 4, }, { .val = 3, .div = 8, }, + { /* sentinel */ }, }; static const struct wiz_clk_div_sel clk_div_sel[] = { diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c index f478d8a17115..9be9535ad7ab 100644 --- a/drivers/phy/xilinx/phy-zynqmp.c +++ b/drivers/phy/xilinx/phy-zynqmp.c @@ -134,7 +134,8 @@ #define PROT_BUS_WIDTH_10 0x0 #define PROT_BUS_WIDTH_20 0x1 #define PROT_BUS_WIDTH_40 0x2 -#define PROT_BUS_WIDTH_SHIFT 2 +#define PROT_BUS_WIDTH_SHIFT(n) ((n) * 2) +#define PROT_BUS_WIDTH_MASK(n) GENMASK((n) * 2 + 1, (n) * 2) /* Number of GT lanes */ #define NUM_LANES 4 @@ -445,12 +446,12 @@ static void xpsgtr_phy_init_sata(struct xpsgtr_phy *gtr_phy) static void xpsgtr_phy_init_sgmii(struct xpsgtr_phy *gtr_phy) { struct xpsgtr_dev *gtr_dev = gtr_phy->dev; + u32 mask = PROT_BUS_WIDTH_MASK(gtr_phy->lane); + u32 val = PROT_BUS_WIDTH_10 << PROT_BUS_WIDTH_SHIFT(gtr_phy->lane); /* Set SGMII protocol TX and RX bus width to 10 bits. */ - xpsgtr_write(gtr_dev, TX_PROT_BUS_WIDTH, - PROT_BUS_WIDTH_10 << (gtr_phy->lane * PROT_BUS_WIDTH_SHIFT)); - xpsgtr_write(gtr_dev, RX_PROT_BUS_WIDTH, - PROT_BUS_WIDTH_10 << (gtr_phy->lane * PROT_BUS_WIDTH_SHIFT)); + xpsgtr_clr_set(gtr_dev, TX_PROT_BUS_WIDTH, mask, val); + xpsgtr_clr_set(gtr_dev, RX_PROT_BUS_WIDTH, mask, val); xpsgtr_bypass_scrambler_8b10b(gtr_phy); } diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c index 0bcd19597e4a..3ddaeffc0415 100644 --- a/drivers/pinctrl/intel/pinctrl-tigerlake.c +++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c @@ -749,7 +749,6 @@ static const struct acpi_device_id tgl_pinctrl_acpi_match[] = { { "INT34C5", (kernel_ulong_t)&tgllp_soc_data }, { "INT34C6", (kernel_ulong_t)&tglh_soc_data }, { "INTC1055", (kernel_ulong_t)&tgllp_soc_data }, - { "INTC1057", (kernel_ulong_t)&tgllp_soc_data }, { } }; MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match); diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c index 49e32684dbb2..ecab6bf63dc6 100644 --- a/drivers/pinctrl/pinctrl-k210.c +++ b/drivers/pinctrl/pinctrl-k210.c @@ -482,7 +482,7 @@ static int k210_pinconf_get_drive(unsigned int max_strength_ua) { int i; - for (i = K210_PC_DRIVE_MAX; i; i--) { + for (i = K210_PC_DRIVE_MAX; i >= 0; i--) { if (k210_pinconf_drive_strength[i] <= max_strength_ua) return i; } @@ -527,7 +527,7 @@ static int k210_pinconf_set_param(struct pinctrl_dev *pctldev, case PIN_CONFIG_BIAS_PULL_UP: if (!arg) return -EINVAL; - val |= K210_PC_PD; + val |= K210_PC_PU; break; case PIN_CONFIG_DRIVE_STRENGTH: arg *= 1000; diff --git a/drivers/pinctrl/pinctrl-starfive.c b/drivers/pinctrl/pinctrl-starfive.c index 0b912152a405..266da41a6162 100644 --- a/drivers/pinctrl/pinctrl-starfive.c +++ b/drivers/pinctrl/pinctrl-starfive.c @@ -1164,6 +1164,7 @@ static int starfive_irq_set_type(struct irq_data *d, unsigned int trigger) } static struct irq_chip starfive_irq_chip = { + .name = "StarFive GPIO", .irq_ack = starfive_irq_ack, .irq_mask = starfive_irq_mask, .irq_mask_ack = starfive_irq_mask_ack, @@ -1308,7 +1309,6 @@ static int starfive_probe(struct platform_device *pdev) sfp->gc.ngpio = NR_GPIOS; starfive_irq_chip.parent_device = dev; - starfive_irq_chip.name = sfp->gc.label; sfp->gc.irq.chip = &starfive_irq_chip; sfp->gc.irq.parent_handler = starfive_gpio_irq_handler; diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index 80d6750c74a6..1f401377ff60 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -36,6 +36,13 @@ #include "../core.h" #include "pinctrl-sunxi.h" +/* + * These lock classes tell lockdep that GPIO IRQs are in a different + * category than their parents, so it won't report false recursion. + */ +static struct lock_class_key sunxi_pinctrl_irq_lock_class; +static struct lock_class_key sunxi_pinctrl_irq_request_class; + static struct irq_chip sunxi_pinctrl_edge_irq_chip; static struct irq_chip sunxi_pinctrl_level_irq_chip; @@ -837,7 +844,8 @@ static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip, { struct sunxi_pinctrl *pctl = gpiochip_get_data(chip); - return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL, offset, true); + return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL, + chip->base + offset, true); } static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset) @@ -890,7 +898,8 @@ static int sunxi_pinctrl_gpio_direction_output(struct gpio_chip *chip, struct sunxi_pinctrl *pctl = gpiochip_get_data(chip); sunxi_pinctrl_gpio_set(chip, offset, value); - return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL, offset, false); + return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL, + chip->base + offset, false); } static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc, @@ -1555,6 +1564,8 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev, for (i = 0; i < (pctl->desc->irq_banks * IRQ_PER_BANK); i++) { int irqno = irq_create_mapping(pctl->domain, i); + irq_set_lockdep_class(irqno, &sunxi_pinctrl_irq_lock_class, + &sunxi_pinctrl_irq_request_class); irq_set_chip_and_handler(irqno, &sunxi_pinctrl_edge_irq_chip, handle_edge_irq); irq_set_chip_data(irqno, pctl); diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c index fc5aa1525d13..d49a4efe46c8 100644 --- a/drivers/platform/chrome/cros_ec.c +++ b/drivers/platform/chrome/cros_ec.c @@ -302,13 +302,11 @@ EXPORT_SYMBOL(cros_ec_register); * * Return: 0 on success or negative error code. */ -int cros_ec_unregister(struct cros_ec_device *ec_dev) +void cros_ec_unregister(struct cros_ec_device *ec_dev) { if (ec_dev->pd) platform_device_unregister(ec_dev->pd); platform_device_unregister(ec_dev->ec); - - return 0; } EXPORT_SYMBOL(cros_ec_unregister); diff --git a/drivers/platform/chrome/cros_ec.h b/drivers/platform/chrome/cros_ec.h index 78363dcfdf23..bbca0096868a 100644 --- a/drivers/platform/chrome/cros_ec.h +++ b/drivers/platform/chrome/cros_ec.h @@ -11,7 +11,7 @@ #include <linux/interrupt.h> int cros_ec_register(struct cros_ec_device *ec_dev); -int cros_ec_unregister(struct cros_ec_device *ec_dev); +void cros_ec_unregister(struct cros_ec_device *ec_dev); int cros_ec_suspend(struct cros_ec_device *ec_dev); int cros_ec_resume(struct cros_ec_device *ec_dev); diff --git a/drivers/platform/chrome/cros_ec_i2c.c b/drivers/platform/chrome/cros_ec_i2c.c index 30c8938c27d5..22feb0fd4ce7 100644 --- a/drivers/platform/chrome/cros_ec_i2c.c +++ b/drivers/platform/chrome/cros_ec_i2c.c @@ -313,7 +313,9 @@ static int cros_ec_i2c_remove(struct i2c_client *client) { struct cros_ec_device *ec_dev = i2c_get_clientdata(client); - return cros_ec_unregister(ec_dev); + cros_ec_unregister(ec_dev); + + return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c index d6306d2a096f..7651417b4a25 100644 --- a/drivers/platform/chrome/cros_ec_lpc.c +++ b/drivers/platform/chrome/cros_ec_lpc.c @@ -439,7 +439,9 @@ static int cros_ec_lpc_remove(struct platform_device *pdev) acpi_remove_notify_handler(adev->handle, ACPI_ALL_NOTIFY, cros_ec_lpc_acpi_notify); - return cros_ec_unregister(ec_dev); + cros_ec_unregister(ec_dev); + + return 0; } static const struct acpi_device_id cros_ec_lpc_acpi_device_ids[] = { diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c index 14c4046fa04d..8493af0f680e 100644 --- a/drivers/platform/chrome/cros_ec_spi.c +++ b/drivers/platform/chrome/cros_ec_spi.c @@ -786,11 +786,11 @@ static int cros_ec_spi_probe(struct spi_device *spi) return 0; } -static int cros_ec_spi_remove(struct spi_device *spi) +static void cros_ec_spi_remove(struct spi_device *spi) { struct cros_ec_device *ec_dev = spi_get_drvdata(spi); - return cros_ec_unregister(ec_dev); + cros_ec_unregister(ec_dev); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/platform/olpc/olpc-xo175-ec.c b/drivers/platform/olpc/olpc-xo175-ec.c index 0d46706afd2d..4823bd2819f6 100644 --- a/drivers/platform/olpc/olpc-xo175-ec.c +++ b/drivers/platform/olpc/olpc-xo175-ec.c @@ -648,7 +648,7 @@ static struct olpc_ec_driver olpc_xo175_ec_driver = { .ec_cmd = olpc_xo175_ec_cmd, }; -static int olpc_xo175_ec_remove(struct spi_device *spi) +static void olpc_xo175_ec_remove(struct spi_device *spi) { if (pm_power_off == olpc_xo175_ec_power_off) pm_power_off = NULL; @@ -657,8 +657,6 @@ static int olpc_xo175_ec_remove(struct spi_device *spi) platform_device_unregister(olpc_ec); olpc_ec = NULL; - - return 0; } static int olpc_xo175_ec_probe(struct spi_device *spi) diff --git a/drivers/platform/surface/surface3_power.c b/drivers/platform/surface/surface3_power.c index abac3eec565e..444ec81ba02d 100644 --- a/drivers/platform/surface/surface3_power.c +++ b/drivers/platform/surface/surface3_power.c @@ -232,14 +232,21 @@ static int mshw0011_bix(struct mshw0011_data *cdata, struct bix *bix) } bix->last_full_charg_capacity = ret; - /* get serial number */ + /* + * Get serial number, on some devices (with unofficial replacement + * battery?) reading any of the serial number range addresses gets + * nacked in this case just leave the serial number empty. + */ ret = i2c_smbus_read_i2c_block_data(client, MSHW0011_BAT0_REG_SERIAL_NO, sizeof(buf), buf); - if (ret != sizeof(buf)) { + if (ret == -EREMOTEIO) { + /* no serial number available */ + } else if (ret != sizeof(buf)) { dev_err(&client->dev, "Error reading serial no: %d\n", ret); return ret; + } else { + snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf); } - snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf); /* get cycle count */ ret = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_CYCLE_CNT); diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c index 4c72ba68b315..b1103f85a85a 100644 --- a/drivers/platform/x86/amd-pmc.c +++ b/drivers/platform/x86/amd-pmc.c @@ -21,6 +21,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/platform_device.h> +#include <linux/pm_qos.h> #include <linux/rtc.h> #include <linux/suspend.h> #include <linux/seq_file.h> @@ -85,6 +86,9 @@ #define PMC_MSG_DELAY_MIN_US 50 #define RESPONSE_REGISTER_LOOP_MAX 20000 +/* QoS request for letting CPUs in idle states, but not the deepest */ +#define AMD_PMC_MAX_IDLE_STATE_LATENCY 3 + #define SOC_SUBSYSTEM_IP_MAX 12 #define DELAY_MIN_US 2000 #define DELAY_MAX_US 3000 @@ -131,6 +135,7 @@ struct amd_pmc_dev { struct device *dev; struct pci_dev *rdev; struct mutex lock; /* generic mutex lock */ + struct pm_qos_request amd_pmc_pm_qos_req; #if IS_ENABLED(CONFIG_DEBUG_FS) struct dentry *dbgfs_dir; #endif /* CONFIG_DEBUG_FS */ @@ -521,6 +526,14 @@ static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg) rc = rtc_alarm_irq_enable(rtc_device, 0); dev_dbg(pdev->dev, "wakeup timer programmed for %lld seconds\n", duration); + /* + * Prevent CPUs from getting into deep idle states while sending OS_HINT + * which is otherwise generally safe to send when at least one of the CPUs + * is not in deep idle states. + */ + cpu_latency_qos_update_request(&pdev->amd_pmc_pm_qos_req, AMD_PMC_MAX_IDLE_STATE_LATENCY); + wake_up_all_idle_cpus(); + return rc; } @@ -538,24 +551,31 @@ static int __maybe_unused amd_pmc_suspend(struct device *dev) /* Activate CZN specific RTC functionality */ if (pdev->cpu_id == AMD_CPU_ID_CZN) { rc = amd_pmc_verify_czn_rtc(pdev, &arg); - if (rc < 0) - return rc; + if (rc) + goto fail; } /* Dump the IdleMask before we send hint to SMU */ amd_pmc_idlemask_read(pdev, dev, NULL); msg = amd_pmc_get_os_hint(pdev); rc = amd_pmc_send_cmd(pdev, arg, NULL, msg, 0); - if (rc) + if (rc) { dev_err(pdev->dev, "suspend failed\n"); + goto fail; + } if (enable_stb) rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF); - if (rc) { + if (rc) { dev_err(pdev->dev, "error writing to STB\n"); - return rc; + goto fail; } + return 0; +fail: + if (pdev->cpu_id == AMD_CPU_ID_CZN) + cpu_latency_qos_update_request(&pdev->amd_pmc_pm_qos_req, + PM_QOS_DEFAULT_VALUE); return rc; } @@ -579,12 +599,15 @@ static int __maybe_unused amd_pmc_resume(struct device *dev) /* Write data incremented by 1 to distinguish in stb_read */ if (enable_stb) rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF + 1); - if (rc) { + if (rc) dev_err(pdev->dev, "error writing to STB\n"); - return rc; - } - return 0; + /* Restore the QoS request back to defaults if it was set */ + if (pdev->cpu_id == AMD_CPU_ID_CZN) + cpu_latency_qos_update_request(&pdev->amd_pmc_pm_qos_req, + PM_QOS_DEFAULT_VALUE); + + return rc; } static const struct dev_pm_ops amd_pmc_pm_ops = { @@ -722,6 +745,7 @@ static int amd_pmc_probe(struct platform_device *pdev) amd_pmc_get_smu_version(dev); platform_set_drvdata(pdev, dev); amd_pmc_dbgfs_register(dev); + cpu_latency_qos_add_request(&dev->amd_pmc_pm_qos_req, PM_QOS_DEFAULT_VALUE); return 0; err_pci_dev_put: diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index a3b83b22a3b1..2104a2621e50 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -2223,7 +2223,7 @@ static int fan_curve_check_present(struct asus_wmi *asus, bool *available, err = fan_curve_get_factory_default(asus, fan_dev); if (err) { - if (err == -ENODEV) + if (err == -ENODEV || err == -ENODATA) return 0; return err; } diff --git a/drivers/platform/x86/intel/int3472/tps68470_board_data.c b/drivers/platform/x86/intel/int3472/tps68470_board_data.c index f93d437fd192..525f09a3b5ff 100644 --- a/drivers/platform/x86/intel/int3472/tps68470_board_data.c +++ b/drivers/platform/x86/intel/int3472/tps68470_board_data.c @@ -100,7 +100,8 @@ static struct gpiod_lookup_table surface_go_tps68470_gpios = { .dev_id = "i2c-INT347A:00", .table = { GPIO_LOOKUP("tps68470-gpio", 9, "reset", GPIO_ACTIVE_LOW), - GPIO_LOOKUP("tps68470-gpio", 7, "powerdown", GPIO_ACTIVE_LOW) + GPIO_LOOKUP("tps68470-gpio", 7, "powerdown", GPIO_ACTIVE_LOW), + { } } }; diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index bd045486b933..3424b080db77 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -8703,6 +8703,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = { TPACPI_Q_LNV3('N', '4', '0', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (4nd gen) */ TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */ TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL), /* X1 Carbon (9th gen) */ + TPACPI_Q_LNV3('N', '3', '7', TPACPI_FAN_2CTL), /* T15g (2nd gen) */ TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */ }; diff --git a/drivers/power/supply/bq256xx_charger.c b/drivers/power/supply/bq256xx_charger.c index b274942dc46a..01ad84fd147c 100644 --- a/drivers/power/supply/bq256xx_charger.c +++ b/drivers/power/supply/bq256xx_charger.c @@ -1523,6 +1523,9 @@ static int bq256xx_hw_init(struct bq256xx_device *bq) BQ256XX_WDT_BIT_SHIFT); ret = power_supply_get_battery_info(bq->charger, &bat_info); + if (ret == -ENOMEM) + return ret; + if (ret) { dev_warn(bq->dev, "battery info missing, default values will be applied\n"); diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c index 0c87ad0dbf71..728e2a6cc9c3 100644 --- a/drivers/power/supply/cw2015_battery.c +++ b/drivers/power/supply/cw2015_battery.c @@ -689,7 +689,7 @@ static int cw_bat_probe(struct i2c_client *client) if (ret) { /* Allocate an empty battery */ cw_bat->battery = devm_kzalloc(&client->dev, - sizeof(cw_bat->battery), + sizeof(*cw_bat->battery), GFP_KERNEL); if (!cw_bat->battery) return -ENOMEM; diff --git a/drivers/ptp/ptp_idt82p33.c b/drivers/ptp/ptp_idt82p33.c index c1c959f7e52b..97c1be44e323 100644 --- a/drivers/ptp/ptp_idt82p33.c +++ b/drivers/ptp/ptp_idt82p33.c @@ -6,13 +6,17 @@ #define pr_fmt(fmt) "IDT_82p33xxx: " fmt #include <linux/firmware.h> -#include <linux/i2c.h> +#include <linux/platform_device.h> #include <linux/module.h> #include <linux/ptp_clock_kernel.h> #include <linux/delay.h> +#include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/timekeeping.h> #include <linux/bitops.h> +#include <linux/of.h> +#include <linux/mfd/rsmu.h> +#include <linux/mfd/idt82p33_reg.h> #include "ptp_private.h" #include "ptp_idt82p33.h" @@ -24,15 +28,25 @@ MODULE_LICENSE("GPL"); MODULE_FIRMWARE(FW_FILENAME); /* Module Parameters */ -static u32 sync_tod_timeout = SYNC_TOD_TIMEOUT_SEC; -module_param(sync_tod_timeout, uint, 0); -MODULE_PARM_DESC(sync_tod_timeout, -"duration in second to keep SYNC_TOD on (set to 0 to keep it always on)"); - static u32 phase_snap_threshold = SNAP_THRESHOLD_NS; module_param(phase_snap_threshold, uint, 0); MODULE_PARM_DESC(phase_snap_threshold, -"threshold (150000ns by default) below which adjtime would ignore"); +"threshold (10000ns by default) below which adjtime would use double dco"); + +static char *firmware; +module_param(firmware, charp, 0); + +static inline int idt82p33_read(struct idt82p33 *idt82p33, u16 regaddr, + u8 *buf, u16 count) +{ + return regmap_bulk_read(idt82p33->regmap, regaddr, buf, count); +} + +static inline int idt82p33_write(struct idt82p33 *idt82p33, u16 regaddr, + u8 *buf, u16 count) +{ + return regmap_bulk_write(idt82p33->regmap, regaddr, buf, count); +} static void idt82p33_byte_array_to_timespec(struct timespec64 *ts, u8 buf[TOD_BYTE_COUNT]) @@ -78,110 +92,6 @@ static void idt82p33_timespec_to_byte_array(struct timespec64 const *ts, } } -static int idt82p33_xfer_read(struct idt82p33 *idt82p33, - unsigned char regaddr, - unsigned char *buf, - unsigned int count) -{ - struct i2c_client *client = idt82p33->client; - struct i2c_msg msg[2]; - int cnt; - - msg[0].addr = client->addr; - msg[0].flags = 0; - msg[0].len = 1; - msg[0].buf = ®addr; - - msg[1].addr = client->addr; - msg[1].flags = I2C_M_RD; - msg[1].len = count; - msg[1].buf = buf; - - cnt = i2c_transfer(client->adapter, msg, 2); - if (cnt < 0) { - dev_err(&client->dev, "i2c_transfer returned %d\n", cnt); - return cnt; - } else if (cnt != 2) { - dev_err(&client->dev, - "i2c_transfer sent only %d of %d messages\n", cnt, 2); - return -EIO; - } - return 0; -} - -static int idt82p33_xfer_write(struct idt82p33 *idt82p33, - u8 regaddr, - u8 *buf, - u16 count) -{ - struct i2c_client *client = idt82p33->client; - /* we add 1 byte for device register */ - u8 msg[IDT82P33_MAX_WRITE_COUNT + 1]; - int err; - - if (count > IDT82P33_MAX_WRITE_COUNT) - return -EINVAL; - - msg[0] = regaddr; - memcpy(&msg[1], buf, count); - - err = i2c_master_send(client, msg, count + 1); - if (err < 0) { - dev_err(&client->dev, "i2c_master_send returned %d\n", err); - return err; - } - - return 0; -} - -static int idt82p33_page_offset(struct idt82p33 *idt82p33, unsigned char val) -{ - int err; - - if (idt82p33->page_offset == val) - return 0; - - err = idt82p33_xfer_write(idt82p33, PAGE_ADDR, &val, sizeof(val)); - if (err) - dev_err(&idt82p33->client->dev, - "failed to set page offset %d\n", val); - else - idt82p33->page_offset = val; - - return err; -} - -static int idt82p33_rdwr(struct idt82p33 *idt82p33, unsigned int regaddr, - unsigned char *buf, unsigned int count, bool write) -{ - u8 offset, page; - int err; - - page = _PAGE(regaddr); - offset = _OFFSET(regaddr); - - err = idt82p33_page_offset(idt82p33, page); - if (err) - return err; - - if (write) - return idt82p33_xfer_write(idt82p33, offset, buf, count); - - return idt82p33_xfer_read(idt82p33, offset, buf, count); -} - -static int idt82p33_read(struct idt82p33 *idt82p33, unsigned int regaddr, - unsigned char *buf, unsigned int count) -{ - return idt82p33_rdwr(idt82p33, regaddr, buf, count, false); -} - -static int idt82p33_write(struct idt82p33 *idt82p33, unsigned int regaddr, - unsigned char *buf, unsigned int count) -{ - return idt82p33_rdwr(idt82p33, regaddr, buf, count, true); -} - static int idt82p33_dpll_set_mode(struct idt82p33_channel *channel, enum pll_mode mode) { @@ -206,7 +116,7 @@ static int idt82p33_dpll_set_mode(struct idt82p33_channel *channel, if (err) return err; - channel->pll_mode = dpll_mode; + channel->pll_mode = mode; return 0; } @@ -467,7 +377,7 @@ static int idt82p33_measure_tod_write_overhead(struct idt82p33_channel *channel) err = idt82p33_measure_settime_gettime_gap_overhead(channel, &gap_ns); if (err) { - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "Failed in %s with err %d!\n", __func__, err); return err; } @@ -499,8 +409,8 @@ static int idt82p33_check_and_set_masks(struct idt82p33 *idt82p33, if (page == PLLMASK_ADDR_HI && offset == PLLMASK_ADDR_LO) { if ((val & 0xfc) || !(val & 0x3)) { - dev_err(&idt82p33->client->dev, - "Invalid PLL mask 0x%hhx\n", val); + dev_err(idt82p33->dev, + "Invalid PLL mask 0x%x\n", val); err = -EINVAL; } else { idt82p33->pll_mask = val; @@ -520,14 +430,14 @@ static void idt82p33_display_masks(struct idt82p33 *idt82p33) { u8 mask, i; - dev_info(&idt82p33->client->dev, + dev_info(idt82p33->dev, "pllmask = 0x%02x\n", idt82p33->pll_mask); for (i = 0; i < MAX_PHC_PLL; i++) { mask = 1 << i; if (mask & idt82p33->pll_mask) - dev_info(&idt82p33->client->dev, + dev_info(idt82p33->dev, "PLL%d output_mask = 0x%04x\n", i, idt82p33->channel[i].output_mask); } @@ -539,11 +449,6 @@ static int idt82p33_sync_tod(struct idt82p33_channel *channel, bool enable) u8 sync_cnfg; int err; - /* Turn it off after sync_tod_timeout seconds */ - if (enable && sync_tod_timeout) - ptp_schedule_worker(channel->ptp_clock, - sync_tod_timeout * HZ); - err = idt82p33_read(idt82p33, channel->dpll_sync_cnfg, &sync_cnfg, sizeof(sync_cnfg)); if (err) @@ -557,22 +462,6 @@ static int idt82p33_sync_tod(struct idt82p33_channel *channel, bool enable) &sync_cnfg, sizeof(sync_cnfg)); } -static long idt82p33_sync_tod_work_handler(struct ptp_clock_info *ptp) -{ - struct idt82p33_channel *channel = - container_of(ptp, struct idt82p33_channel, caps); - struct idt82p33 *idt82p33 = channel->idt82p33; - - mutex_lock(&idt82p33->reg_lock); - - (void)idt82p33_sync_tod(channel, false); - - mutex_unlock(&idt82p33->reg_lock); - - /* Return a negative value here to not reschedule */ - return -1; -} - static int idt82p33_output_enable(struct idt82p33_channel *channel, bool enable, unsigned int outn) { @@ -634,18 +523,11 @@ static int idt82p33_enable_tod(struct idt82p33_channel *channel) struct idt82p33 *idt82p33 = channel->idt82p33; struct timespec64 ts = {0, 0}; int err; - u8 val; - - val = 0; - err = idt82p33_write(idt82p33, channel->dpll_input_mode_cnfg, - &val, sizeof(val)); - if (err) - return err; err = idt82p33_measure_tod_write_overhead(channel); if (err) { - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "Failed in %s with err %d!\n", __func__, err); return err; } @@ -673,16 +555,14 @@ static void idt82p33_ptp_clock_unregister_all(struct idt82p33 *idt82p33) } static int idt82p33_enable(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, int on) + struct ptp_clock_request *rq, int on) { struct idt82p33_channel *channel = container_of(ptp, struct idt82p33_channel, caps); struct idt82p33 *idt82p33 = channel->idt82p33; - int err; - - err = -EOPNOTSUPP; + int err = -EOPNOTSUPP; - mutex_lock(&idt82p33->reg_lock); + mutex_lock(idt82p33->lock); if (rq->type == PTP_CLK_REQ_PEROUT) { if (!on) @@ -690,15 +570,18 @@ static int idt82p33_enable(struct ptp_clock_info *ptp, &rq->perout); /* Only accept a 1-PPS aligned to the second. */ else if (rq->perout.start.nsec || rq->perout.period.sec != 1 || - rq->perout.period.nsec) { + rq->perout.period.nsec) err = -ERANGE; - } else + else err = idt82p33_perout_enable(channel, true, &rq->perout); } - mutex_unlock(&idt82p33->reg_lock); + mutex_unlock(idt82p33->lock); + if (err) + dev_err(idt82p33->dev, + "Failed in %s with err %d!\n", __func__, err); return err; } @@ -727,11 +610,11 @@ static int idt82p33_adjwritephase(struct ptp_clock_info *ptp, s32 offset_ns) val[3] = (offset_regval >> 24) & 0x1F; val[3] |= PH_OFFSET_EN; - mutex_lock(&idt82p33->reg_lock); + mutex_lock(idt82p33->lock); err = idt82p33_dpll_set_mode(channel, PLL_MODE_WPH); if (err) { - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "Failed in %s with err %d!\n", __func__, err); goto out; } @@ -740,7 +623,7 @@ static int idt82p33_adjwritephase(struct ptp_clock_info *ptp, s32 offset_ns) sizeof(val)); out: - mutex_unlock(&idt82p33->reg_lock); + mutex_unlock(idt82p33->lock); return err; } @@ -751,12 +634,12 @@ static int idt82p33_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) struct idt82p33 *idt82p33 = channel->idt82p33; int err; - mutex_lock(&idt82p33->reg_lock); + mutex_lock(idt82p33->lock); err = _idt82p33_adjfine(channel, scaled_ppm); + mutex_unlock(idt82p33->lock); if (err) - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "Failed in %s with err %d!\n", __func__, err); - mutex_unlock(&idt82p33->reg_lock); return err; } @@ -768,29 +651,20 @@ static int idt82p33_adjtime(struct ptp_clock_info *ptp, s64 delta_ns) struct idt82p33 *idt82p33 = channel->idt82p33; int err; - mutex_lock(&idt82p33->reg_lock); + mutex_lock(idt82p33->lock); if (abs(delta_ns) < phase_snap_threshold) { - mutex_unlock(&idt82p33->reg_lock); + mutex_unlock(idt82p33->lock); return 0; } err = _idt82p33_adjtime(channel, delta_ns); - if (err) { - mutex_unlock(&idt82p33->reg_lock); - dev_err(&idt82p33->client->dev, - "Adjtime failed in %s with err %d!\n", __func__, err); - return err; - } + mutex_unlock(idt82p33->lock); - err = idt82p33_sync_tod(channel, true); if (err) - dev_err(&idt82p33->client->dev, - "Sync_tod failed in %s with err %d!\n", __func__, err); - - mutex_unlock(&idt82p33->reg_lock); - + dev_err(idt82p33->dev, + "Failed in %s with err %d!\n", __func__, err); return err; } @@ -801,31 +675,31 @@ static int idt82p33_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) struct idt82p33 *idt82p33 = channel->idt82p33; int err; - mutex_lock(&idt82p33->reg_lock); + mutex_lock(idt82p33->lock); err = _idt82p33_gettime(channel, ts); + mutex_unlock(idt82p33->lock); + if (err) - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "Failed in %s with err %d!\n", __func__, err); - mutex_unlock(&idt82p33->reg_lock); - return err; } static int idt82p33_settime(struct ptp_clock_info *ptp, - const struct timespec64 *ts) + const struct timespec64 *ts) { struct idt82p33_channel *channel = container_of(ptp, struct idt82p33_channel, caps); struct idt82p33 *idt82p33 = channel->idt82p33; int err; - mutex_lock(&idt82p33->reg_lock); + mutex_lock(idt82p33->lock); err = _idt82p33_settime(channel, ts); + mutex_unlock(idt82p33->lock); + if (err) - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "Failed in %s with err %d!\n", __func__, err); - mutex_unlock(&idt82p33->reg_lock); - return err; } @@ -864,7 +738,7 @@ static int idt82p33_channel_init(struct idt82p33_channel *channel, int index) static void idt82p33_caps_init(struct ptp_clock_info *caps) { caps->owner = THIS_MODULE; - caps->max_adj = 92000; + caps->max_adj = DCO_MAX_PPB; caps->n_per_out = 11; caps->adjphase = idt82p33_adjwritephase; caps->adjfine = idt82p33_adjfine; @@ -872,7 +746,6 @@ static void idt82p33_caps_init(struct ptp_clock_info *caps) caps->gettime64 = idt82p33_gettime; caps->settime64 = idt82p33_settime; caps->enable = idt82p33_enable; - caps->do_aux_work = idt82p33_sync_tod_work_handler; } static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index) @@ -887,7 +760,7 @@ static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index) err = idt82p33_channel_init(channel, index); if (err) { - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "Channel_init failed in %s with err %d!\n", __func__, err); return err; @@ -912,7 +785,7 @@ static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index) err = idt82p33_dpll_set_mode(channel, PLL_MODE_DCO); if (err) { - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "Dpll_set_mode failed in %s with err %d!\n", __func__, err); return err; @@ -920,13 +793,13 @@ static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index) err = idt82p33_enable_tod(channel); if (err) { - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "Enable_tod failed in %s with err %d!\n", __func__, err); return err; } - dev_info(&idt82p33->client->dev, "PLL%d registered as ptp%d\n", + dev_info(idt82p33->dev, "PLL%d registered as ptp%d\n", index, channel->ptp_clock->index); return 0; @@ -940,25 +813,24 @@ static int idt82p33_load_firmware(struct idt82p33 *idt82p33) int err; s32 len; - dev_dbg(&idt82p33->client->dev, - "requesting firmware '%s'\n", FW_FILENAME); + dev_dbg(idt82p33->dev, "requesting firmware '%s'\n", FW_FILENAME); - err = request_firmware(&fw, FW_FILENAME, &idt82p33->client->dev); + err = request_firmware(&fw, FW_FILENAME, idt82p33->dev); if (err) { - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "Failed in %s with err %d!\n", __func__, err); return err; } - dev_dbg(&idt82p33->client->dev, "firmware size %zu bytes\n", fw->size); + dev_dbg(idt82p33->dev, "firmware size %zu bytes\n", fw->size); rec = (struct idt82p33_fwrc *) fw->data; for (len = fw->size; len > 0; len -= sizeof(*rec)) { if (rec->reserved) { - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "bad firmware, reserved field non-zero\n"); err = -EINVAL; } else { @@ -973,16 +845,11 @@ static int idt82p33_load_firmware(struct idt82p33 *idt82p33) } if (err == 0) { - /* maximum 8 pages */ - if (page >= PAGE_NUM) - continue; - /* Page size 128, last 4 bytes of page skipped */ - if (((loaddr > 0x7b) && (loaddr <= 0x7f)) - || loaddr > 0xfb) + if (loaddr > 0x7b) continue; - err = idt82p33_write(idt82p33, _ADDR(page, loaddr), + err = idt82p33_write(idt82p33, REG_ADDR(page, loaddr), &val, sizeof(val)); } @@ -997,36 +864,34 @@ out: } -static int idt82p33_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int idt82p33_probe(struct platform_device *pdev) { + struct rsmu_ddata *ddata = dev_get_drvdata(pdev->dev.parent); struct idt82p33 *idt82p33; int err; u8 i; - (void)id; - - idt82p33 = devm_kzalloc(&client->dev, + idt82p33 = devm_kzalloc(&pdev->dev, sizeof(struct idt82p33), GFP_KERNEL); if (!idt82p33) return -ENOMEM; - mutex_init(&idt82p33->reg_lock); - - idt82p33->client = client; - idt82p33->page_offset = 0xff; + idt82p33->dev = &pdev->dev; + idt82p33->mfd = pdev->dev.parent; + idt82p33->lock = &ddata->lock; + idt82p33->regmap = ddata->regmap; idt82p33->tod_write_overhead_ns = 0; idt82p33->calculate_overhead_flag = 0; idt82p33->pll_mask = DEFAULT_PLL_MASK; idt82p33->channel[0].output_mask = DEFAULT_OUTPUT_MASK_PLL0; idt82p33->channel[1].output_mask = DEFAULT_OUTPUT_MASK_PLL1; - mutex_lock(&idt82p33->reg_lock); + mutex_lock(idt82p33->lock); err = idt82p33_load_firmware(idt82p33); if (err) - dev_warn(&idt82p33->client->dev, + dev_warn(idt82p33->dev, "loading firmware failed with %d\n", err); if (idt82p33->pll_mask) { @@ -1034,7 +899,7 @@ static int idt82p33_probe(struct i2c_client *client, if (idt82p33->pll_mask & (1 << i)) { err = idt82p33_enable_channel(idt82p33, i); if (err) { - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "Failed in %s with err %d!\n", __func__, err); break; @@ -1042,69 +907,38 @@ static int idt82p33_probe(struct i2c_client *client, } } } else { - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "no PLLs flagged as PHCs, nothing to do\n"); err = -ENODEV; } - mutex_unlock(&idt82p33->reg_lock); + mutex_unlock(idt82p33->lock); if (err) { idt82p33_ptp_clock_unregister_all(idt82p33); return err; } - i2c_set_clientdata(client, idt82p33); + platform_set_drvdata(pdev, idt82p33); return 0; } -static int idt82p33_remove(struct i2c_client *client) +static int idt82p33_remove(struct platform_device *pdev) { - struct idt82p33 *idt82p33 = i2c_get_clientdata(client); + struct idt82p33 *idt82p33 = platform_get_drvdata(pdev); idt82p33_ptp_clock_unregister_all(idt82p33); - mutex_destroy(&idt82p33->reg_lock); return 0; } -#ifdef CONFIG_OF -static const struct of_device_id idt82p33_dt_id[] = { - { .compatible = "idt,82p33810" }, - { .compatible = "idt,82p33813" }, - { .compatible = "idt,82p33814" }, - { .compatible = "idt,82p33831" }, - { .compatible = "idt,82p33910" }, - { .compatible = "idt,82p33913" }, - { .compatible = "idt,82p33914" }, - { .compatible = "idt,82p33931" }, - {}, -}; -MODULE_DEVICE_TABLE(of, idt82p33_dt_id); -#endif - -static const struct i2c_device_id idt82p33_i2c_id[] = { - { "idt82p33810", }, - { "idt82p33813", }, - { "idt82p33814", }, - { "idt82p33831", }, - { "idt82p33910", }, - { "idt82p33913", }, - { "idt82p33914", }, - { "idt82p33931", }, - {}, -}; -MODULE_DEVICE_TABLE(i2c, idt82p33_i2c_id); - -static struct i2c_driver idt82p33_driver = { +static struct platform_driver idt82p33_driver = { .driver = { - .of_match_table = of_match_ptr(idt82p33_dt_id), - .name = "idt82p33", + .name = "82p33x1x-phc", }, - .probe = idt82p33_probe, - .remove = idt82p33_remove, - .id_table = idt82p33_i2c_id, + .probe = idt82p33_probe, + .remove = idt82p33_remove, }; -module_i2c_driver(idt82p33_driver); +module_platform_driver(idt82p33_driver); diff --git a/drivers/ptp/ptp_idt82p33.h b/drivers/ptp/ptp_idt82p33.h index 1c7a0f0872e8..0ea1c35c0f9f 100644 --- a/drivers/ptp/ptp_idt82p33.h +++ b/drivers/ptp/ptp_idt82p33.h @@ -8,94 +8,19 @@ #define PTP_IDT82P33_H #include <linux/ktime.h> -#include <linux/workqueue.h> +#include <linux/mfd/idt82p33_reg.h> +#include <linux/regmap.h> - -/* Register Map - AN888_SMUforIEEE_SynchEther_82P33xxx_RevH.pdf */ -#define PAGE_NUM (8) -#define _ADDR(page, offset) (((page) << 0x7) | ((offset) & 0x7f)) -#define _PAGE(addr) (((addr) >> 0x7) & 0x7) -#define _OFFSET(addr) ((addr) & 0x7f) - -#define DPLL1_TOD_CNFG 0x134 -#define DPLL2_TOD_CNFG 0x1B4 - -#define DPLL1_TOD_STS 0x10B -#define DPLL2_TOD_STS 0x18B - -#define DPLL1_TOD_TRIGGER 0x115 -#define DPLL2_TOD_TRIGGER 0x195 - -#define DPLL1_OPERATING_MODE_CNFG 0x120 -#define DPLL2_OPERATING_MODE_CNFG 0x1A0 - -#define DPLL1_HOLDOVER_FREQ_CNFG 0x12C -#define DPLL2_HOLDOVER_FREQ_CNFG 0x1AC - -#define DPLL1_PHASE_OFFSET_CNFG 0x143 -#define DPLL2_PHASE_OFFSET_CNFG 0x1C3 - -#define DPLL1_SYNC_EDGE_CNFG 0X140 -#define DPLL2_SYNC_EDGE_CNFG 0X1C0 - -#define DPLL1_INPUT_MODE_CNFG 0X116 -#define DPLL2_INPUT_MODE_CNFG 0X196 - -#define OUT_MUX_CNFG(outn) _ADDR(0x6, (0xC * (outn))) - -#define PAGE_ADDR 0x7F -/* Register Map end */ - -/* Register definitions - AN888_SMUforIEEE_SynchEther_82P33xxx_RevH.pdf*/ -#define TOD_TRIGGER(wr_trig, rd_trig) ((wr_trig & 0xf) << 4 | (rd_trig & 0xf)) -#define SYNC_TOD BIT(1) -#define PH_OFFSET_EN BIT(7) -#define SQUELCH_ENABLE BIT(5) - -/* Bit definitions for the DPLL_MODE register */ -#define PLL_MODE_SHIFT (0) -#define PLL_MODE_MASK (0x1F) - -#define PEROUT_ENABLE_OUTPUT_MASK (0xdeadbeef) - -enum pll_mode { - PLL_MODE_MIN = 0, - PLL_MODE_AUTOMATIC = PLL_MODE_MIN, - PLL_MODE_FORCE_FREERUN = 1, - PLL_MODE_FORCE_HOLDOVER = 2, - PLL_MODE_FORCE_LOCKED = 4, - PLL_MODE_FORCE_PRE_LOCKED2 = 5, - PLL_MODE_FORCE_PRE_LOCKED = 6, - PLL_MODE_FORCE_LOST_PHASE = 7, - PLL_MODE_DCO = 10, - PLL_MODE_WPH = 18, - PLL_MODE_MAX = PLL_MODE_WPH, -}; - -enum hw_tod_trig_sel { - HW_TOD_TRIG_SEL_MIN = 0, - HW_TOD_TRIG_SEL_NO_WRITE = HW_TOD_TRIG_SEL_MIN, - HW_TOD_TRIG_SEL_SYNC_SEL = 1, - HW_TOD_TRIG_SEL_IN12 = 2, - HW_TOD_TRIG_SEL_IN13 = 3, - HW_TOD_TRIG_SEL_IN14 = 4, - HW_TOD_TRIG_SEL_TOD_PPS = 5, - HW_TOD_TRIG_SEL_TIMER_INTERVAL = 6, - HW_TOD_TRIG_SEL_MSB_PHASE_OFFSET_CNFG = 7, - HW_TOD_TRIG_SEL_MSB_HOLDOVER_FREQ_CNFG = 8, - HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG = 9, - HW_TOD_RD_TRIG_SEL_LSB_TOD_STS = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG, - WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG, -}; - -/* Register bit definitions end */ #define FW_FILENAME "idt82p33xxx.bin" -#define MAX_PHC_PLL (2) -#define TOD_BYTE_COUNT (10) -#define MAX_MEASURMENT_COUNT (5) -#define SNAP_THRESHOLD_NS (150000) -#define SYNC_TOD_TIMEOUT_SEC (5) -#define IDT82P33_MAX_WRITE_COUNT (512) +#define MAX_PHC_PLL (2) +#define TOD_BYTE_COUNT (10) +#define DCO_MAX_PPB (92000) +#define MAX_MEASURMENT_COUNT (5) +#define SNAP_THRESHOLD_NS (10000) +#define IMMEDIATE_SNAP_THRESHOLD_NS (50000) +#define DDCO_THRESHOLD_NS (5) +#define IDT82P33_MAX_WRITE_COUNT (512) +#define PEROUT_ENABLE_OUTPUT_MASK (0xdeadbeef) #define PLLMASK_ADDR_HI 0xFF #define PLLMASK_ADDR_LO 0xA5 @@ -116,15 +41,25 @@ enum hw_tod_trig_sel { #define DEFAULT_OUTPUT_MASK_PLL0 (0xc0) #define DEFAULT_OUTPUT_MASK_PLL1 DEFAULT_OUTPUT_MASK_PLL0 +/** + * @brief Maximum absolute value for write phase offset in femtoseconds + */ +#define WRITE_PHASE_OFFSET_LIMIT (20000052084ll) + +/** @brief Phase offset resolution + * + * DPLL phase offset = 10^15 fs / ( System Clock * 2^13) + * = 10^15 fs / ( 1638400000 * 2^23) + * = 74.5058059692382 fs + */ +#define IDT_T0DPLL_PHASE_RESOL 74506 + /* PTP Hardware Clock interface */ struct idt82p33_channel { struct ptp_clock_info caps; struct ptp_clock *ptp_clock; - struct idt82p33 *idt82p33; - enum pll_mode pll_mode; - /* task to turn off SYNC_TOD bit after pps sync */ - struct delayed_work sync_tod_work; - bool sync_tod_on; + struct idt82p33 *idt82p33; + enum pll_mode pll_mode; s32 current_freq_ppb; u8 output_mask; u16 dpll_tod_cnfg; @@ -138,15 +73,17 @@ struct idt82p33_channel { }; struct idt82p33 { - struct idt82p33_channel channel[MAX_PHC_PLL]; - struct i2c_client *client; - u8 page_offset; - u8 pll_mask; - ktime_t start_time; - int calculate_overhead_flag; - s64 tod_write_overhead_ns; - /* Protects I2C read/modify/write registers from concurrent access */ - struct mutex reg_lock; + struct idt82p33_channel channel[MAX_PHC_PLL]; + struct device *dev; + u8 pll_mask; + /* Mutex to protect operations from being interrupted */ + struct mutex *lock; + struct regmap *regmap; + struct device *mfd; + /* Overhead calculation for adjtime */ + ktime_t start_time; + int calculate_overhead_flag; + s64 tod_write_overhead_ns; }; /* firmware interface */ @@ -157,18 +94,4 @@ struct idt82p33_fwrc { u8 reserved; } __packed; -/** - * @brief Maximum absolute value for write phase offset in femtoseconds - */ -#define WRITE_PHASE_OFFSET_LIMIT (20000052084ll) - -/** @brief Phase offset resolution - * - * DPLL phase offset = 10^15 fs / ( System Clock * 2^13) - * = 10^15 fs / ( 1638400000 * 2^23) - * = 74.5058059692382 fs - */ -#define IDT_T0DPLL_PHASE_RESOL 74506 - - #endif /* PTP_IDT82P33_H */ diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index 0f1b5a7d2a89..56b04a7bba3a 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -11,12 +11,14 @@ #include <linux/clkdev.h> #include <linux/clk-provider.h> #include <linux/platform_device.h> +#include <linux/platform_data/i2c-xiic.h> #include <linux/ptp_clock_kernel.h> #include <linux/spi/spi.h> #include <linux/spi/xilinx_spi.h> #include <net/devlink.h> #include <linux/i2c.h> #include <linux/mtd/mtd.h> +#include <linux/nvmem-consumer.h> #ifndef PCI_VENDOR_ID_FACEBOOK #define PCI_VENDOR_ID_FACEBOOK 0x1d9b @@ -52,6 +54,8 @@ struct ocp_reg { u32 servo_offset_i; u32 servo_drift_p; u32 servo_drift_i; + u32 status_offset; + u32 status_drift; }; #define OCP_CTRL_ENABLE BIT(0) @@ -88,9 +92,10 @@ struct tod_reg { #define TOD_CTRL_GNSS_MASK ((1U << 4) - 1) #define TOD_CTRL_GNSS_SHIFT 24 -#define TOD_STATUS_UTC_MASK 0xff -#define TOD_STATUS_UTC_VALID BIT(8) -#define TOD_STATUS_LEAP_VALID BIT(16) +#define TOD_STATUS_UTC_MASK 0xff +#define TOD_STATUS_UTC_VALID BIT(8) +#define TOD_STATUS_LEAP_ANNOUNCE BIT(12) +#define TOD_STATUS_LEAP_VALID BIT(16) struct ts_reg { u32 enable; @@ -174,6 +179,35 @@ struct dcf_slave_reg { #define DCF_S_CTRL_ENABLE BIT(0) +struct signal_reg { + u32 enable; + u32 status; + u32 polarity; + u32 version; + u32 __pad0[4]; + u32 cable_delay; + u32 __pad1[3]; + u32 intr; + u32 intr_mask; + u32 __pad2[2]; + u32 start_ns; + u32 start_sec; + u32 pulse_ns; + u32 pulse_sec; + u32 period_ns; + u32 period_sec; + u32 repeat_count; +}; + +struct frequency_reg { + u32 ctrl; + u32 status; +}; +#define FREQ_STATUS_VALID BIT(31) +#define FREQ_STATUS_ERROR BIT(30) +#define FREQ_STATUS_OVERRUN BIT(29) +#define FREQ_STATUS_MASK (BIT(24) - 1) + struct ptp_ocp_flash_info { const char *name; int pci_offset; @@ -201,6 +235,40 @@ struct ptp_ocp_ext_src { int irq_vec; }; +enum ptp_ocp_sma_mode { + SMA_MODE_IN, + SMA_MODE_OUT, +}; + +struct ptp_ocp_sma_connector { + enum ptp_ocp_sma_mode mode; + bool fixed_fcn; + bool fixed_dir; + bool disabled; +}; + +struct ocp_attr_group { + u64 cap; + const struct attribute_group *group; +}; + +#define OCP_CAP_BASIC BIT(0) +#define OCP_CAP_SIGNAL BIT(1) +#define OCP_CAP_FREQ BIT(2) + +struct ptp_ocp_signal { + ktime_t period; + ktime_t pulse; + ktime_t phase; + ktime_t start; + int duty; + bool polarity; + bool running; +}; + +#define OCP_BOARD_ID_LEN 13 +#define OCP_SERIAL_LEN 6 + struct ptp_ocp { struct pci_dev *pdev; struct device dev; @@ -210,16 +278,21 @@ struct ptp_ocp { struct pps_reg __iomem *pps_to_ext; struct pps_reg __iomem *pps_to_clk; struct gpio_reg __iomem *pps_select; - struct gpio_reg __iomem *sma; + struct gpio_reg __iomem *sma_map1; + struct gpio_reg __iomem *sma_map2; struct irig_master_reg __iomem *irig_out; struct irig_slave_reg __iomem *irig_in; struct dcf_master_reg __iomem *dcf_out; struct dcf_slave_reg __iomem *dcf_in; struct tod_reg __iomem *nmea_out; + struct frequency_reg __iomem *freq_in[4]; + struct ptp_ocp_ext_src *signal_out[4]; struct ptp_ocp_ext_src *pps; struct ptp_ocp_ext_src *ts0; struct ptp_ocp_ext_src *ts1; struct ptp_ocp_ext_src *ts2; + struct ptp_ocp_ext_src *ts3; + struct ptp_ocp_ext_src *ts4; struct img_reg __iomem *image; struct ptp_clock *ptp; struct ptp_clock_info ptp_info; @@ -227,6 +300,8 @@ struct ptp_ocp { struct platform_device *spi_flash; struct clk_hw *i2c_clk; struct timer_list watchdog; + const struct ocp_attr_group *attr_tbl; + const struct ptp_ocp_eeprom_map *eeprom_map; struct dentry *debug_root; time64_t gnss_lost; int id; @@ -235,12 +310,17 @@ struct ptp_ocp { int gnss2_port; int mac_port; /* miniature atomic clock */ int nmea_port; - u8 serial[6]; - bool has_serial; + u32 fw_version; + u8 board_id[OCP_BOARD_ID_LEN]; + u8 serial[OCP_SERIAL_LEN]; + bool has_eeprom_data; u32 pps_req_map; int flash_start; u32 utc_tai_offset; u32 ts_window_adjust; + u64 fw_cap; + struct ptp_ocp_signal signal[4]; + struct ptp_ocp_sma_connector sma[4]; }; #define OCP_REQ_TIMESTAMP BIT(0) @@ -263,7 +343,36 @@ static int ptp_ocp_register_serial(struct ptp_ocp *bp, struct ocp_resource *r); static int ptp_ocp_register_ext(struct ptp_ocp *bp, struct ocp_resource *r); static int ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r); static irqreturn_t ptp_ocp_ts_irq(int irq, void *priv); +static irqreturn_t ptp_ocp_signal_irq(int irq, void *priv); static int ptp_ocp_ts_enable(void *priv, u32 req, bool enable); +static int ptp_ocp_signal_from_perout(struct ptp_ocp *bp, int gen, + struct ptp_perout_request *req); +static int ptp_ocp_signal_enable(void *priv, u32 req, bool enable); +static int ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr); + +static const struct ocp_attr_group fb_timecard_groups[]; + +struct ptp_ocp_eeprom_map { + u16 off; + u16 len; + u32 bp_offset; + const void * const tag; +}; + +#define EEPROM_ENTRY(addr, member) \ + .off = addr, \ + .len = sizeof_field(struct ptp_ocp, member), \ + .bp_offset = offsetof(struct ptp_ocp, member) + +#define BP_MAP_ENTRY_ADDR(bp, map) ({ \ + (void *)((uintptr_t)(bp) + (map)->bp_offset); \ +}) + +static struct ptp_ocp_eeprom_map fb_eeprom_map[] = { + { EEPROM_ENTRY(0x43, board_id) }, + { EEPROM_ENTRY(0x00, serial), .tag = "mac" }, + { } +}; #define bp_assign_entry(bp, res, val) ({ \ uintptr_t addr = (uintptr_t)(bp) + (res)->bp_offset; \ @@ -289,10 +398,10 @@ static int ptp_ocp_ts_enable(void *priv, u32 req, bool enable); OCP_RES_LOCATION(member), .setup = ptp_ocp_register_ext /* This is the MSI vector mapping used. - * 0: TS3 (and PPS) + * 0: PPS (TS5) * 1: TS0 * 2: TS1 - * 3: GNSS + * 3: GNSS1 * 4: GNSS2 * 5: MAC * 6: TS2 @@ -300,6 +409,12 @@ static int ptp_ocp_ts_enable(void *priv, u32 req, bool enable); * 8: HWICAP (notused) * 9: SPI Flash * 10: NMEA + * 11: Signal Generator 1 + * 12: Signal Generator 2 + * 13: Signal Generator 3 + * 14: Signal Generator 4 + * 15: TS3 + * 16: TS4 */ static struct ocp_resource ocp_fb_resource[] = { @@ -335,15 +450,70 @@ static struct ocp_resource ocp_fb_resource[] = { }, }, { + OCP_EXT_RESOURCE(ts3), + .offset = 0x01110000, .size = 0x10000, .irq_vec = 15, + .extra = &(struct ptp_ocp_ext_info) { + .index = 3, + .irq_fcn = ptp_ocp_ts_irq, + .enable = ptp_ocp_ts_enable, + }, + }, + { + OCP_EXT_RESOURCE(ts4), + .offset = 0x01120000, .size = 0x10000, .irq_vec = 16, + .extra = &(struct ptp_ocp_ext_info) { + .index = 4, + .irq_fcn = ptp_ocp_ts_irq, + .enable = ptp_ocp_ts_enable, + }, + }, + /* Timestamp for PHC and/or PPS generator */ + { OCP_EXT_RESOURCE(pps), .offset = 0x010C0000, .size = 0x10000, .irq_vec = 0, .extra = &(struct ptp_ocp_ext_info) { - .index = 3, + .index = 5, .irq_fcn = ptp_ocp_ts_irq, .enable = ptp_ocp_ts_enable, }, }, { + OCP_EXT_RESOURCE(signal_out[0]), + .offset = 0x010D0000, .size = 0x10000, .irq_vec = 11, + .extra = &(struct ptp_ocp_ext_info) { + .index = 1, + .irq_fcn = ptp_ocp_signal_irq, + .enable = ptp_ocp_signal_enable, + }, + }, + { + OCP_EXT_RESOURCE(signal_out[1]), + .offset = 0x010E0000, .size = 0x10000, .irq_vec = 12, + .extra = &(struct ptp_ocp_ext_info) { + .index = 2, + .irq_fcn = ptp_ocp_signal_irq, + .enable = ptp_ocp_signal_enable, + }, + }, + { + OCP_EXT_RESOURCE(signal_out[2]), + .offset = 0x010F0000, .size = 0x10000, .irq_vec = 13, + .extra = &(struct ptp_ocp_ext_info) { + .index = 3, + .irq_fcn = ptp_ocp_signal_irq, + .enable = ptp_ocp_signal_enable, + }, + }, + { + OCP_EXT_RESOURCE(signal_out[3]), + .offset = 0x01100000, .size = 0x10000, .irq_vec = 14, + .extra = &(struct ptp_ocp_ext_info) { + .index = 4, + .irq_fcn = ptp_ocp_signal_irq, + .enable = ptp_ocp_signal_enable, + }, + }, + { OCP_MEM_RESOURCE(pps_to_ext), .offset = 0x01030000, .size = 0x10000, }, @@ -384,15 +554,28 @@ static struct ocp_resource ocp_fb_resource[] = { .offset = 0x00130000, .size = 0x1000, }, { - OCP_MEM_RESOURCE(sma), + OCP_MEM_RESOURCE(sma_map1), .offset = 0x00140000, .size = 0x1000, }, { + OCP_MEM_RESOURCE(sma_map2), + .offset = 0x00220000, .size = 0x1000, + }, + { OCP_I2C_RESOURCE(i2c_ctrl), .offset = 0x00150000, .size = 0x10000, .irq_vec = 7, .extra = &(struct ptp_ocp_i2c_info) { .name = "xiic-i2c", .fixed_rate = 50000000, + .data_size = sizeof(struct xiic_i2c_platform_data), + .data = &(struct xiic_i2c_platform_data) { + .num_devices = 2, + .devices = (struct i2c_board_info[]) { + { I2C_BOARD_INFO("24c02", 0x50) }, + { I2C_BOARD_INFO("24mac402", 0x58), + .platform_data = "mac" }, + }, + }, }, }, { @@ -428,6 +611,22 @@ static struct ocp_resource ocp_fb_resource[] = { }, }, { + OCP_MEM_RESOURCE(freq_in[0]), + .offset = 0x01200000, .size = 0x10000, + }, + { + OCP_MEM_RESOURCE(freq_in[1]), + .offset = 0x01210000, .size = 0x10000, + }, + { + OCP_MEM_RESOURCE(freq_in[2]), + .offset = 0x01220000, .size = 0x10000, + }, + { + OCP_MEM_RESOURCE(freq_in[3]), + .offset = 0x01230000, .size = 0x10000, + }, + { .setup = ptp_ocp_fb_board_init, }, { } @@ -460,25 +659,42 @@ static struct ocp_selector ptp_ocp_clock[] = { { } }; +#define SMA_ENABLE BIT(15) +#define SMA_SELECT_MASK ((1U << 15) - 1) +#define SMA_DISABLE 0x10000 + static struct ocp_selector ptp_ocp_sma_in[] = { - { .name = "10Mhz", .value = 0x00 }, - { .name = "PPS1", .value = 0x01 }, - { .name = "PPS2", .value = 0x02 }, - { .name = "TS1", .value = 0x04 }, - { .name = "TS2", .value = 0x08 }, - { .name = "IRIG", .value = 0x10 }, - { .name = "DCF", .value = 0x20 }, + { .name = "10Mhz", .value = 0x0000 }, + { .name = "PPS1", .value = 0x0001 }, + { .name = "PPS2", .value = 0x0002 }, + { .name = "TS1", .value = 0x0004 }, + { .name = "TS2", .value = 0x0008 }, + { .name = "IRIG", .value = 0x0010 }, + { .name = "DCF", .value = 0x0020 }, + { .name = "TS3", .value = 0x0040 }, + { .name = "TS4", .value = 0x0080 }, + { .name = "FREQ1", .value = 0x0100 }, + { .name = "FREQ2", .value = 0x0200 }, + { .name = "FREQ3", .value = 0x0400 }, + { .name = "FREQ4", .value = 0x0800 }, + { .name = "None", .value = SMA_DISABLE }, { } }; static struct ocp_selector ptp_ocp_sma_out[] = { - { .name = "10Mhz", .value = 0x00 }, - { .name = "PHC", .value = 0x01 }, - { .name = "MAC", .value = 0x02 }, - { .name = "GNSS", .value = 0x04 }, - { .name = "GNSS2", .value = 0x08 }, - { .name = "IRIG", .value = 0x10 }, - { .name = "DCF", .value = 0x20 }, + { .name = "10Mhz", .value = 0x0000 }, + { .name = "PHC", .value = 0x0001 }, + { .name = "MAC", .value = 0x0002 }, + { .name = "GNSS1", .value = 0x0004 }, + { .name = "GNSS2", .value = 0x0008 }, + { .name = "IRIG", .value = 0x0010 }, + { .name = "DCF", .value = 0x0020 }, + { .name = "GEN1", .value = 0x0040 }, + { .name = "GEN2", .value = 0x0080 }, + { .name = "GEN3", .value = 0x0100 }, + { .name = "GEN4", .value = 0x0200 }, + { .name = "GND", .value = 0x2000 }, + { .name = "VCC", .value = 0x4000 }, { } }; @@ -607,7 +823,7 @@ ptp_ocp_settime(struct ptp_clock_info *ptp_info, const struct timespec64 *ts) } static void -__ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u64 adj_val) +__ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u32 adj_val) { u32 select, ctrl; @@ -615,7 +831,7 @@ __ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u64 adj_val) iowrite32(OCP_SELECT_CLK_REG, &bp->reg->select); iowrite32(adj_val, &bp->reg->offset_ns); - iowrite32(adj_val & 0x7f, &bp->reg->offset_window_ns); + iowrite32(NSEC_PER_SEC, &bp->reg->offset_window_ns); ctrl = OCP_CTRL_ADJUST_OFFSET | OCP_CTRL_ENABLE; iowrite32(ctrl, &bp->reg->ctrl); @@ -624,6 +840,22 @@ __ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u64 adj_val) iowrite32(select >> 16, &bp->reg->select); } +static void +ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, u64 delta_ns) +{ + struct timespec64 ts; + unsigned long flags; + int err; + + spin_lock_irqsave(&bp->lock, flags); + err = __ptp_ocp_gettime_locked(bp, &ts, NULL); + if (likely(!err)) { + timespec64_add_ns(&ts, delta_ns); + __ptp_ocp_settime_locked(bp, &ts); + } + spin_unlock_irqrestore(&bp->lock, flags); +} + static int ptp_ocp_adjtime(struct ptp_clock_info *ptp_info, s64 delta_ns) { @@ -631,6 +863,11 @@ ptp_ocp_adjtime(struct ptp_clock_info *ptp_info, s64 delta_ns) unsigned long flags; u32 adj_ns, sign; + if (delta_ns > NSEC_PER_SEC || -delta_ns > NSEC_PER_SEC) { + ptp_ocp_adjtime_coarse(bp, delta_ns); + return 0; + } + sign = delta_ns < 0 ? BIT(31) : 0; adj_ns = sign ? -delta_ns : delta_ns; @@ -679,6 +916,12 @@ ptp_ocp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq, ext = bp->ts2; break; case 3: + ext = bp->ts3; + break; + case 4: + ext = bp->ts4; + break; + case 5: ext = bp->pps; break; } @@ -688,13 +931,27 @@ ptp_ocp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq, ext = bp->pps; break; case PTP_CLK_REQ_PEROUT: - if (on && - (rq->perout.period.sec != 1 || rq->perout.period.nsec != 0)) - return -EINVAL; - /* This is a request for 1PPS on an output SMA. - * Allow, but assume manual configuration. - */ - return 0; + switch (rq->perout.index) { + case 0: + /* This is a request for 1PPS on an output SMA. + * Allow, but assume manual configuration. + */ + if (on && (rq->perout.period.sec != 1 || + rq->perout.period.nsec != 0)) + return -EINVAL; + return 0; + case 1: + case 2: + case 3: + case 4: + req = rq->perout.index - 1; + ext = bp->signal_out[req]; + err = ptp_ocp_signal_from_perout(bp, req, &rq->perout); + if (err) + return err; + break; + } + break; default: return -EOPNOTSUPP; } @@ -706,6 +963,24 @@ ptp_ocp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq, return err; } +static int +ptp_ocp_verify(struct ptp_clock_info *ptp_info, unsigned pin, + enum ptp_pin_function func, unsigned chan) +{ + struct ptp_ocp *bp = container_of(ptp_info, struct ptp_ocp, ptp_info); + char buf[16]; + + if (func != PTP_PF_PEROUT) + return -EOPNOTSUPP; + + if (chan) + sprintf(buf, "OUT: GEN%d", chan); + else + sprintf(buf, "OUT: PHC"); + + return ptp_ocp_sma_store(bp, buf, pin + 1); +} + static const struct ptp_clock_info ptp_ocp_clock_info = { .owner = THIS_MODULE, .name = KBUILD_MODNAME, @@ -716,9 +991,10 @@ static const struct ptp_clock_info ptp_ocp_clock_info = { .adjfine = ptp_ocp_null_adjfine, .adjphase = ptp_ocp_null_adjphase, .enable = ptp_ocp_enable, + .verify = ptp_ocp_verify, .pps = true, - .n_ext_ts = 4, - .n_per_out = 1, + .n_ext_ts = 6, + .n_per_out = 5, }; static void @@ -739,11 +1015,30 @@ __ptp_ocp_clear_drift_locked(struct ptp_ocp *bp) } static void +ptp_ocp_utc_distribute(struct ptp_ocp *bp, u32 val) +{ + unsigned long flags; + + spin_lock_irqsave(&bp->lock, flags); + + bp->utc_tai_offset = val; + + if (bp->irig_out) + iowrite32(val, &bp->irig_out->adj_sec); + if (bp->dcf_out) + iowrite32(val, &bp->dcf_out->adj_sec); + if (bp->nmea_out) + iowrite32(val, &bp->nmea_out->adj_sec); + + spin_unlock_irqrestore(&bp->lock, flags); +} + +static void ptp_ocp_watchdog(struct timer_list *t) { struct ptp_ocp *bp = from_timer(bp, t, watchdog); unsigned long flags; - u32 status; + u32 status, utc_offset; status = ioread32(&bp->pps_to_clk->status); @@ -760,6 +1055,17 @@ ptp_ocp_watchdog(struct timer_list *t) bp->gnss_lost = 0; } + /* if GNSS provides correct data we can rely on + * it to get leap second information + */ + if (bp->tod) { + status = ioread32(&bp->tod->utc_status); + utc_offset = status & TOD_STATUS_UTC_MASK; + if (status & TOD_STATUS_UTC_VALID && + utc_offset != bp->utc_tai_offset) + ptp_ocp_utc_distribute(bp, utc_offset); + } + mod_timer(&bp->watchdog, jiffies + HZ); } @@ -829,25 +1135,6 @@ ptp_ocp_init_clock(struct ptp_ocp *bp) } static void -ptp_ocp_utc_distribute(struct ptp_ocp *bp, u32 val) -{ - unsigned long flags; - - spin_lock_irqsave(&bp->lock, flags); - - bp->utc_tai_offset = val; - - if (bp->irig_out) - iowrite32(val, &bp->irig_out->adj_sec); - if (bp->dcf_out) - iowrite32(val, &bp->dcf_out->adj_sec); - if (bp->nmea_out) - iowrite32(val, &bp->nmea_out->adj_sec); - - spin_unlock_irqrestore(&bp->lock, flags); -} - -static void ptp_ocp_tod_init(struct ptp_ocp *bp) { u32 ctrl, reg; @@ -862,119 +1149,110 @@ ptp_ocp_tod_init(struct ptp_ocp *bp) ptp_ocp_utc_distribute(bp, reg & TOD_STATUS_UTC_MASK); } -static void -ptp_ocp_tod_info(struct ptp_ocp *bp) +static const char * +ptp_ocp_tod_proto_name(const int idx) { static const char * const proto_name[] = { "NMEA", "NMEA_ZDA", "NMEA_RMC", "NMEA_none", "UBX", "UBX_UTC", "UBX_LS", "UBX_none" }; + return proto_name[idx]; +} + +static const char * +ptp_ocp_tod_gnss_name(int idx) +{ static const char * const gnss_name[] = { "ALL", "COMBINED", "GPS", "GLONASS", "GALILEO", "BEIDOU", + "Unknown" }; - u32 version, ctrl, reg; - int idx; - - version = ioread32(&bp->tod->version); - dev_info(&bp->pdev->dev, "TOD Version %d.%d.%d\n", - version >> 24, (version >> 16) & 0xff, version & 0xffff); - - ctrl = ioread32(&bp->tod->ctrl); - idx = ctrl & TOD_CTRL_PROTOCOL ? 4 : 0; - idx += (ctrl >> 16) & 3; - dev_info(&bp->pdev->dev, "control: %x\n", ctrl); - dev_info(&bp->pdev->dev, "TOD Protocol %s %s\n", proto_name[idx], - ctrl & TOD_CTRL_ENABLE ? "enabled" : ""); + if (idx >= ARRAY_SIZE(gnss_name)) + idx = ARRAY_SIZE(gnss_name) - 1; + return gnss_name[idx]; +} - idx = (ctrl >> TOD_CTRL_GNSS_SHIFT) & TOD_CTRL_GNSS_MASK; - if (idx < ARRAY_SIZE(gnss_name)) - dev_info(&bp->pdev->dev, "GNSS %s\n", gnss_name[idx]); +struct ptp_ocp_nvmem_match_info { + struct ptp_ocp *bp; + const void * const tag; +}; - reg = ioread32(&bp->tod->status); - dev_info(&bp->pdev->dev, "status: %x\n", reg); +static int +ptp_ocp_nvmem_match(struct device *dev, const void *data) +{ + const struct ptp_ocp_nvmem_match_info *info = data; - reg = ioread32(&bp->tod->adj_sec); - dev_info(&bp->pdev->dev, "correction: %d\n", reg); + dev = dev->parent; + if (!i2c_verify_client(dev) || info->tag != dev->platform_data) + return 0; - reg = ioread32(&bp->tod->utc_status); - dev_info(&bp->pdev->dev, "utc_status: %x\n", reg); - dev_info(&bp->pdev->dev, "utc_offset: %d valid:%d leap_valid:%d\n", - reg & TOD_STATUS_UTC_MASK, reg & TOD_STATUS_UTC_VALID ? 1 : 0, - reg & TOD_STATUS_LEAP_VALID ? 1 : 0); + while ((dev = dev->parent)) + if (dev->driver && !strcmp(dev->driver->name, KBUILD_MODNAME)) + return info->bp == dev_get_drvdata(dev); + return 0; } -static int -ptp_ocp_firstchild(struct device *dev, void *data) +static inline struct nvmem_device * +ptp_ocp_nvmem_device_get(struct ptp_ocp *bp, const void * const tag) { - return 1; + struct ptp_ocp_nvmem_match_info info = { .bp = bp, .tag = tag }; + + return nvmem_device_find(&info, ptp_ocp_nvmem_match); } -static int -ptp_ocp_read_i2c(struct i2c_adapter *adap, u8 addr, u8 reg, u8 sz, u8 *data) +static inline void +ptp_ocp_nvmem_device_put(struct nvmem_device **nvmemp) { - struct i2c_msg msgs[2] = { - { - .addr = addr, - .len = 1, - .buf = ®, - }, - { - .addr = addr, - .flags = I2C_M_RD, - .len = 2, - .buf = data, - }, - }; - int err; - u8 len; - - /* xiic-i2c for some stupid reason only does 2 byte reads. */ - while (sz) { - len = min_t(u8, sz, 2); - msgs[1].len = len; - err = i2c_transfer(adap, msgs, 2); - if (err != msgs[1].len) - return err; - msgs[1].buf += len; - reg += len; - sz -= len; + if (*nvmemp != NULL) { + nvmem_device_put(*nvmemp); + *nvmemp = NULL; } - return 0; } static void -ptp_ocp_get_serial_number(struct ptp_ocp *bp) +ptp_ocp_read_eeprom(struct ptp_ocp *bp) { - struct i2c_adapter *adap; - struct device *dev; - int err; + const struct ptp_ocp_eeprom_map *map; + struct nvmem_device *nvmem; + const void *tag; + int ret; if (!bp->i2c_ctrl) return; - dev = device_find_child(&bp->i2c_ctrl->dev, NULL, ptp_ocp_firstchild); - if (!dev) { - dev_err(&bp->pdev->dev, "Can't find I2C adapter\n"); - return; - } - - adap = i2c_verify_adapter(dev); - if (!adap) { - dev_err(&bp->pdev->dev, "device '%s' isn't an I2C adapter\n", - dev_name(dev)); - goto out; - } + tag = NULL; + nvmem = NULL; - err = ptp_ocp_read_i2c(adap, 0x58, 0x9A, 6, bp->serial); - if (err) { - dev_err(&bp->pdev->dev, "could not read eeprom: %d\n", err); - goto out; + for (map = bp->eeprom_map; map->len; map++) { + if (map->tag != tag) { + tag = map->tag; + ptp_ocp_nvmem_device_put(&nvmem); + } + if (!nvmem) { + nvmem = ptp_ocp_nvmem_device_get(bp, tag); + if (!nvmem) + goto out; + } + ret = nvmem_device_read(nvmem, map->off, map->len, + BP_MAP_ENTRY_ADDR(bp, map)); + if (ret != map->len) + goto read_fail; } - bp->has_serial = true; + bp->has_eeprom_data = true; out: - put_device(dev); + ptp_ocp_nvmem_device_put(&nvmem); + return; + +read_fail: + dev_err(&bp->pdev->dev, "could not read eeprom: %d\n", ret); + goto out; +} + +static int +ptp_ocp_firstchild(struct device *dev, void *data) +{ + return 1; } static struct device * @@ -1075,34 +1353,33 @@ ptp_ocp_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, if (err) return err; - if (bp->image) { - u32 ver = ioread32(&bp->image->version); - - if (ver & 0xffff) { - sprintf(buf, "%d", ver); - err = devlink_info_version_running_put(req, - "fw", - buf); - } else { - sprintf(buf, "%d", ver >> 16); - err = devlink_info_version_running_put(req, - "loader", - buf); - } - if (err) - return err; + if (bp->fw_version & 0xffff) { + sprintf(buf, "%d", bp->fw_version); + err = devlink_info_version_running_put(req, "fw", buf); + } else { + sprintf(buf, "%d", bp->fw_version >> 16); + err = devlink_info_version_running_put(req, "loader", buf); } + if (err) + return err; - if (!bp->has_serial) - ptp_ocp_get_serial_number(bp); - - if (bp->has_serial) { - sprintf(buf, "%pM", bp->serial); - err = devlink_info_serial_number_put(req, buf); - if (err) - return err; + if (!bp->has_eeprom_data) { + ptp_ocp_read_eeprom(bp); + if (!bp->has_eeprom_data) + return 0; } + sprintf(buf, "%pM", bp->serial); + err = devlink_info_serial_number_put(req, buf); + if (err) + return err; + + err = devlink_info_version_fixed_put(req, + DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, + bp->board_id); + if (err) + return err; + return 0; } @@ -1219,6 +1496,137 @@ ptp_ocp_register_i2c(struct ptp_ocp *bp, struct ocp_resource *r) return 0; } +/* The expectation is that this is triggered only on error. */ +static irqreturn_t +ptp_ocp_signal_irq(int irq, void *priv) +{ + struct ptp_ocp_ext_src *ext = priv; + struct signal_reg __iomem *reg = ext->mem; + struct ptp_ocp *bp = ext->bp; + u32 enable, status; + int gen; + + gen = ext->info->index - 1; + + enable = ioread32(®->enable); + status = ioread32(®->status); + + /* disable generator on error */ + if (status || !enable) { + iowrite32(0, ®->intr_mask); + iowrite32(0, ®->enable); + bp->signal[gen].running = false; + } + + iowrite32(0, ®->intr); /* ack interrupt */ + + return IRQ_HANDLED; +} + +static int +ptp_ocp_signal_set(struct ptp_ocp *bp, int gen, struct ptp_ocp_signal *s) +{ + struct ptp_system_timestamp sts; + struct timespec64 ts; + ktime_t start_ns; + int err; + + if (!s->period) + return 0; + + if (!s->pulse) + s->pulse = ktime_divns(s->period * s->duty, 100); + + err = ptp_ocp_gettimex(&bp->ptp_info, &ts, &sts); + if (err) + return err; + + start_ns = ktime_set(ts.tv_sec, ts.tv_nsec) + NSEC_PER_MSEC; + if (!s->start) { + /* roundup() does not work on 32-bit systems */ + s->start = DIV_ROUND_UP_ULL(start_ns, s->period); + s->start = ktime_add(s->start, s->phase); + } + + if (s->duty < 1 || s->duty > 99) + return -EINVAL; + + if (s->pulse < 1 || s->pulse > s->period) + return -EINVAL; + + if (s->start < start_ns) + return -EINVAL; + + bp->signal[gen] = *s; + + return 0; +} + +static int +ptp_ocp_signal_from_perout(struct ptp_ocp *bp, int gen, + struct ptp_perout_request *req) +{ + struct ptp_ocp_signal s = { }; + + s.polarity = bp->signal[gen].polarity; + s.period = ktime_set(req->period.sec, req->period.nsec); + if (!s.period) + return 0; + + if (req->flags & PTP_PEROUT_DUTY_CYCLE) { + s.pulse = ktime_set(req->on.sec, req->on.nsec); + s.duty = ktime_divns(s.pulse * 100, s.period); + } + + if (req->flags & PTP_PEROUT_PHASE) + s.phase = ktime_set(req->phase.sec, req->phase.nsec); + else + s.start = ktime_set(req->start.sec, req->start.nsec); + + return ptp_ocp_signal_set(bp, gen, &s); +} + +static int +ptp_ocp_signal_enable(void *priv, u32 req, bool enable) +{ + struct ptp_ocp_ext_src *ext = priv; + struct signal_reg __iomem *reg = ext->mem; + struct ptp_ocp *bp = ext->bp; + struct timespec64 ts; + int gen; + + gen = ext->info->index - 1; + + iowrite32(0, ®->intr_mask); + iowrite32(0, ®->enable); + bp->signal[gen].running = false; + if (!enable) + return 0; + + ts = ktime_to_timespec64(bp->signal[gen].start); + iowrite32(ts.tv_sec, ®->start_sec); + iowrite32(ts.tv_nsec, ®->start_ns); + + ts = ktime_to_timespec64(bp->signal[gen].period); + iowrite32(ts.tv_sec, ®->period_sec); + iowrite32(ts.tv_nsec, ®->period_ns); + + ts = ktime_to_timespec64(bp->signal[gen].pulse); + iowrite32(ts.tv_sec, ®->pulse_sec); + iowrite32(ts.tv_nsec, ®->pulse_ns); + + iowrite32(bp->signal[gen].polarity, ®->polarity); + iowrite32(0, ®->repeat_count); + + iowrite32(0, ®->intr); /* clear interrupt state */ + iowrite32(1, ®->intr_mask); /* enable interrupt */ + iowrite32(3, ®->enable); /* valid & enable */ + + bp->signal[gen].running = true; + + return 0; +} + static irqreturn_t ptp_ocp_ts_irq(int irq, void *priv) { @@ -1346,7 +1754,7 @@ ptp_ocp_serial_line(struct ptp_ocp *bp, struct ocp_resource *r) uart.port.mapbase = pci_resource_start(pdev, 0) + r->offset; uart.port.irq = pci_irq_vector(pdev, r->irq_vec); uart.port.uartclk = 50000000; - uart.port.flags = UPF_FIXED_TYPE | UPF_IOREMAP; + uart.port.flags = UPF_FIXED_TYPE | UPF_IOREMAP | UPF_NO_THRE_TEST; uart.port.type = PORT_16550A; return serial8250_register_8250_port(&uart); @@ -1391,14 +1799,115 @@ ptp_ocp_nmea_out_init(struct ptp_ocp *bp) iowrite32(1, &bp->nmea_out->ctrl); /* enable */ } +static void +_ptp_ocp_signal_init(struct ptp_ocp_signal *s, struct signal_reg __iomem *reg) +{ + u32 val; + + iowrite32(0, ®->enable); /* disable */ + + val = ioread32(®->polarity); + s->polarity = val ? true : false; + s->duty = 50; +} + +static void +ptp_ocp_signal_init(struct ptp_ocp *bp) +{ + int i; + + for (i = 0; i < 4; i++) + if (bp->signal_out[i]) + _ptp_ocp_signal_init(&bp->signal[i], + bp->signal_out[i]->mem); +} + +static void +ptp_ocp_sma_init(struct ptp_ocp *bp) +{ + u32 reg; + int i; + + /* defaults */ + bp->sma[0].mode = SMA_MODE_IN; + bp->sma[1].mode = SMA_MODE_IN; + bp->sma[2].mode = SMA_MODE_OUT; + bp->sma[3].mode = SMA_MODE_OUT; + + /* If no SMA1 map, the pin functions and directions are fixed. */ + if (!bp->sma_map1) { + for (i = 0; i < 4; i++) { + bp->sma[i].fixed_fcn = true; + bp->sma[i].fixed_dir = true; + } + return; + } + + /* If SMA2 GPIO output map is all 1, it is not present. + * This indicates the firmware has fixed direction SMA pins. + */ + reg = ioread32(&bp->sma_map2->gpio2); + if (reg == 0xffffffff) { + for (i = 0; i < 4; i++) + bp->sma[i].fixed_dir = true; + } else { + reg = ioread32(&bp->sma_map1->gpio1); + bp->sma[0].mode = reg & BIT(15) ? SMA_MODE_IN : SMA_MODE_OUT; + bp->sma[1].mode = reg & BIT(31) ? SMA_MODE_IN : SMA_MODE_OUT; + + reg = ioread32(&bp->sma_map1->gpio2); + bp->sma[2].mode = reg & BIT(15) ? SMA_MODE_OUT : SMA_MODE_IN; + bp->sma[3].mode = reg & BIT(31) ? SMA_MODE_OUT : SMA_MODE_IN; + } +} + +static int +ptp_ocp_fb_set_pins(struct ptp_ocp *bp) +{ + struct ptp_pin_desc *config; + int i; + + config = kzalloc(sizeof(*config) * 4, GFP_KERNEL); + if (!config) + return -ENOMEM; + + for (i = 0; i < 4; i++) { + sprintf(config[i].name, "sma%d", i + 1); + config[i].index = i; + } + + bp->ptp_info.n_pins = 4; + bp->ptp_info.pin_config = config; + + return 0; +} + /* FB specific board initializers; last "resource" registered. */ static int ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r) { + int ver, err; + bp->flash_start = 1024 * 4096; + bp->eeprom_map = fb_eeprom_map; + bp->fw_version = ioread32(&bp->image->version); + bp->attr_tbl = fb_timecard_groups; + bp->fw_cap = OCP_CAP_BASIC; + + ver = bp->fw_version & 0xffff; + if (ver >= 19) + bp->fw_cap |= OCP_CAP_SIGNAL; + if (ver >= 20) + bp->fw_cap |= OCP_CAP_FREQ; ptp_ocp_tod_init(bp); ptp_ocp_nmea_out_init(bp); + ptp_ocp_sma_init(bp); + ptp_ocp_signal_init(bp); + + err = ptp_ocp_fb_set_pins(bp); + if (err) + return err; return ptp_ocp_init_clock(bp); } @@ -1500,38 +2009,8 @@ __handle_signal_inputs(struct ptp_ocp *bp, u32 val) * ANT4 == sma4 (out) */ -enum ptp_ocp_sma_mode { - SMA_MODE_IN, - SMA_MODE_OUT, -}; - -static struct ptp_ocp_sma_connector { - enum ptp_ocp_sma_mode mode; - bool fixed_mode; - u16 default_out_idx; -} ptp_ocp_sma_map[4] = { - { - .mode = SMA_MODE_IN, - .fixed_mode = true, - }, - { - .mode = SMA_MODE_IN, - .fixed_mode = true, - }, - { - .mode = SMA_MODE_OUT, - .fixed_mode = true, - .default_out_idx = 0, /* 10Mhz */ - }, - { - .mode = SMA_MODE_OUT, - .fixed_mode = true, - .default_out_idx = 1, /* PHC */ - }, -}; - static ssize_t -ptp_ocp_show_output(u32 val, char *buf, int default_idx) +ptp_ocp_show_output(u32 val, char *buf, int def_val) { const char *name; ssize_t count; @@ -1539,13 +2018,13 @@ ptp_ocp_show_output(u32 val, char *buf, int default_idx) count = sysfs_emit(buf, "OUT: "); name = ptp_ocp_select_name_from_val(ptp_ocp_sma_out, val); if (!name) - name = ptp_ocp_sma_out[default_idx].name; + name = ptp_ocp_select_name_from_val(ptp_ocp_sma_out, def_val); count += sysfs_emit_at(buf, count, "%s\n", name); return count; } static ssize_t -ptp_ocp_show_inputs(u32 val, char *buf, const char *zero_in) +ptp_ocp_show_inputs(u32 val, char *buf, int def_val) { const char *name; ssize_t count; @@ -1558,8 +2037,10 @@ ptp_ocp_show_inputs(u32 val, char *buf, const char *zero_in) count += sysfs_emit_at(buf, count, "%s ", name); } } - if (!val && zero_in) - count += sysfs_emit_at(buf, count, "%s ", zero_in); + if (!val && def_val >= 0) { + name = ptp_ocp_select_name_from_val(ptp_ocp_sma_in, def_val); + count += sysfs_emit_at(buf, count, "%s ", name); + } if (count) count--; count += sysfs_emit_at(buf, count, "\n"); @@ -1584,7 +2065,7 @@ sma_parse_inputs(const char *buf, enum ptp_ocp_sma_mode *mode) idx = 0; dir = *mode == SMA_MODE_IN ? 0 : 1; - if (!strcasecmp("IN:", argv[idx])) { + if (!strcasecmp("IN:", argv[0])) { dir = 0; idx++; } @@ -1605,102 +2086,126 @@ out: return ret; } +static u32 +ptp_ocp_sma_get(struct ptp_ocp *bp, int sma_nr, enum ptp_ocp_sma_mode mode) +{ + u32 __iomem *gpio; + u32 shift; + + if (bp->sma[sma_nr - 1].fixed_fcn) + return (sma_nr - 1) & 1; + + if (mode == SMA_MODE_IN) + gpio = sma_nr > 2 ? &bp->sma_map2->gpio1 : &bp->sma_map1->gpio1; + else + gpio = sma_nr > 2 ? &bp->sma_map1->gpio2 : &bp->sma_map2->gpio2; + shift = sma_nr & 1 ? 0 : 16; + + return (ioread32(gpio) >> shift) & 0xffff; +} + static ssize_t -ptp_ocp_sma_show(struct ptp_ocp *bp, int sma_nr, u32 val, char *buf, - const char *zero_in) +ptp_ocp_sma_show(struct ptp_ocp *bp, int sma_nr, char *buf, + int default_in_val, int default_out_val) { - struct ptp_ocp_sma_connector *sma = &ptp_ocp_sma_map[sma_nr - 1]; + struct ptp_ocp_sma_connector *sma = &bp->sma[sma_nr - 1]; + u32 val; - if (sma->mode == SMA_MODE_IN) - return ptp_ocp_show_inputs(val, buf, zero_in); + val = ptp_ocp_sma_get(bp, sma_nr, sma->mode) & SMA_SELECT_MASK; - return ptp_ocp_show_output(val, buf, sma->default_out_idx); + if (sma->mode == SMA_MODE_IN) { + if (sma->disabled) + val = SMA_DISABLE; + return ptp_ocp_show_inputs(val, buf, default_in_val); + } + + return ptp_ocp_show_output(val, buf, default_out_val); } static ssize_t sma1_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ptp_ocp *bp = dev_get_drvdata(dev); - u32 val; - val = ioread32(&bp->sma->gpio1) & 0x3f; - return ptp_ocp_sma_show(bp, 1, val, buf, ptp_ocp_sma_in[0].name); + return ptp_ocp_sma_show(bp, 1, buf, 0, 1); } static ssize_t sma2_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ptp_ocp *bp = dev_get_drvdata(dev); - u32 val; - val = (ioread32(&bp->sma->gpio1) >> 16) & 0x3f; - return ptp_ocp_sma_show(bp, 2, val, buf, NULL); + return ptp_ocp_sma_show(bp, 2, buf, -1, 1); } static ssize_t sma3_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ptp_ocp *bp = dev_get_drvdata(dev); - u32 val; - val = ioread32(&bp->sma->gpio2) & 0x3f; - return ptp_ocp_sma_show(bp, 3, val, buf, NULL); + return ptp_ocp_sma_show(bp, 3, buf, -1, 0); } static ssize_t sma4_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ptp_ocp *bp = dev_get_drvdata(dev); - u32 val; - val = (ioread32(&bp->sma->gpio2) >> 16) & 0x3f; - return ptp_ocp_sma_show(bp, 4, val, buf, NULL); + return ptp_ocp_sma_show(bp, 4, buf, -1, 1); } static void -ptp_ocp_sma_store_output(struct ptp_ocp *bp, u32 val, u32 shift) +ptp_ocp_sma_store_output(struct ptp_ocp *bp, int sma_nr, u32 val) { + u32 reg, mask, shift; unsigned long flags; - u32 gpio, mask; + u32 __iomem *gpio; + + gpio = sma_nr > 2 ? &bp->sma_map1->gpio2 : &bp->sma_map2->gpio2; + shift = sma_nr & 1 ? 0 : 16; mask = 0xffff << (16 - shift); spin_lock_irqsave(&bp->lock, flags); - gpio = ioread32(&bp->sma->gpio2); - gpio = (gpio & mask) | (val << shift); + reg = ioread32(gpio); + reg = (reg & mask) | (val << shift); - __handle_signal_outputs(bp, gpio); + __handle_signal_outputs(bp, reg); - iowrite32(gpio, &bp->sma->gpio2); + iowrite32(reg, gpio); spin_unlock_irqrestore(&bp->lock, flags); } static void -ptp_ocp_sma_store_inputs(struct ptp_ocp *bp, u32 val, u32 shift) +ptp_ocp_sma_store_inputs(struct ptp_ocp *bp, int sma_nr, u32 val) { + u32 reg, mask, shift; unsigned long flags; - u32 gpio, mask; + u32 __iomem *gpio; + + gpio = sma_nr > 2 ? &bp->sma_map2->gpio1 : &bp->sma_map1->gpio1; + shift = sma_nr & 1 ? 0 : 16; mask = 0xffff << (16 - shift); spin_lock_irqsave(&bp->lock, flags); - gpio = ioread32(&bp->sma->gpio1); - gpio = (gpio & mask) | (val << shift); + reg = ioread32(gpio); + reg = (reg & mask) | (val << shift); - __handle_signal_inputs(bp, gpio); + __handle_signal_inputs(bp, reg); - iowrite32(gpio, &bp->sma->gpio1); + iowrite32(reg, gpio); spin_unlock_irqrestore(&bp->lock, flags); } -static ssize_t -ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr, u32 shift) +static int +ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr) { - struct ptp_ocp_sma_connector *sma = &ptp_ocp_sma_map[sma_nr - 1]; + struct ptp_ocp_sma_connector *sma = &bp->sma[sma_nr - 1]; enum ptp_ocp_sma_mode mode; int val; @@ -1709,18 +2214,35 @@ ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr, u32 shift) if (val < 0) return val; - if (mode != sma->mode && sma->fixed_mode) + if (sma->fixed_dir && (mode != sma->mode || val & SMA_DISABLE)) return -EOPNOTSUPP; + if (sma->fixed_fcn) { + if (val != ((sma_nr - 1) & 1)) + return -EOPNOTSUPP; + return 0; + } + + sma->disabled = !!(val & SMA_DISABLE); + if (mode != sma->mode) { - pr_err("Mode changes not supported yet.\n"); - return -EOPNOTSUPP; + if (mode == SMA_MODE_IN) + ptp_ocp_sma_store_output(bp, sma_nr, 0); + else + ptp_ocp_sma_store_inputs(bp, sma_nr, 0); + sma->mode = mode; } - if (sma->mode == SMA_MODE_IN) - ptp_ocp_sma_store_inputs(bp, val, shift); + if (!sma->fixed_dir) + val |= SMA_ENABLE; /* add enable bit */ + + if (sma->disabled) + val = 0; + + if (mode == SMA_MODE_IN) + ptp_ocp_sma_store_inputs(bp, sma_nr, val); else - ptp_ocp_sma_store_output(bp, val, shift); + ptp_ocp_sma_store_output(bp, sma_nr, val); return 0; } @@ -1732,7 +2254,7 @@ sma1_store(struct device *dev, struct device_attribute *attr, struct ptp_ocp *bp = dev_get_drvdata(dev); int err; - err = ptp_ocp_sma_store(bp, buf, 1, 0); + err = ptp_ocp_sma_store(bp, buf, 1); return err ? err : count; } @@ -1743,7 +2265,7 @@ sma2_store(struct device *dev, struct device_attribute *attr, struct ptp_ocp *bp = dev_get_drvdata(dev); int err; - err = ptp_ocp_sma_store(bp, buf, 2, 16); + err = ptp_ocp_sma_store(bp, buf, 2); return err ? err : count; } @@ -1754,7 +2276,7 @@ sma3_store(struct device *dev, struct device_attribute *attr, struct ptp_ocp *bp = dev_get_drvdata(dev); int err; - err = ptp_ocp_sma_store(bp, buf, 3, 0); + err = ptp_ocp_sma_store(bp, buf, 3); return err ? err : count; } @@ -1765,7 +2287,7 @@ sma4_store(struct device *dev, struct device_attribute *attr, struct ptp_ocp *bp = dev_get_drvdata(dev); int err; - err = ptp_ocp_sma_store(bp, buf, 4, 16); + err = ptp_ocp_sma_store(bp, buf, 4); return err ? err : count; } static DEVICE_ATTR_RW(sma1); @@ -1789,13 +2311,263 @@ available_sma_outputs_show(struct device *dev, } static DEVICE_ATTR_RO(available_sma_outputs); +#define EXT_ATTR_RO(_group, _name, _val) \ + struct dev_ext_attribute dev_attr_##_group##_val##_##_name = \ + { __ATTR_RO(_name), (void *)_val } +#define EXT_ATTR_RW(_group, _name, _val) \ + struct dev_ext_attribute dev_attr_##_group##_val##_##_name = \ + { __ATTR_RW(_name), (void *)_val } +#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr) + +/* period [duty [phase [polarity]]] */ +static ssize_t +signal_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + struct ptp_ocp *bp = dev_get_drvdata(dev); + struct ptp_ocp_signal s = { }; + int gen = (uintptr_t)ea->var; + int argc, err; + char **argv; + + argv = argv_split(GFP_KERNEL, buf, &argc); + if (!argv) + return -ENOMEM; + + err = -EINVAL; + s.duty = bp->signal[gen].duty; + s.phase = bp->signal[gen].phase; + s.period = bp->signal[gen].period; + s.polarity = bp->signal[gen].polarity; + + switch (argc) { + case 4: + argc--; + err = kstrtobool(argv[argc], &s.polarity); + if (err) + goto out; + fallthrough; + case 3: + argc--; + err = kstrtou64(argv[argc], 0, &s.phase); + if (err) + goto out; + fallthrough; + case 2: + argc--; + err = kstrtoint(argv[argc], 0, &s.duty); + if (err) + goto out; + fallthrough; + case 1: + argc--; + err = kstrtou64(argv[argc], 0, &s.period); + if (err) + goto out; + break; + default: + goto out; + } + + err = ptp_ocp_signal_set(bp, gen, &s); + if (err) + goto out; + + err = ptp_ocp_signal_enable(bp->signal_out[gen], gen, s.period != 0); + +out: + argv_free(argv); + return err ? err : count; +} + +static ssize_t +signal_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + struct ptp_ocp *bp = dev_get_drvdata(dev); + struct ptp_ocp_signal *signal; + struct timespec64 ts; + ssize_t count; + int i; + + i = (uintptr_t)ea->var; + signal = &bp->signal[i]; + + count = sysfs_emit(buf, "%llu %d %llu %d", signal->period, + signal->duty, signal->phase, signal->polarity); + + ts = ktime_to_timespec64(signal->start); + count += sysfs_emit_at(buf, count, " %ptT TAI\n", &ts); + + return count; +} +static EXT_ATTR_RW(signal, signal, 0); +static EXT_ATTR_RW(signal, signal, 1); +static EXT_ATTR_RW(signal, signal, 2); +static EXT_ATTR_RW(signal, signal, 3); + +static ssize_t +duty_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + struct ptp_ocp *bp = dev_get_drvdata(dev); + int i = (uintptr_t)ea->var; + + return sysfs_emit(buf, "%d\n", bp->signal[i].duty); +} +static EXT_ATTR_RO(signal, duty, 0); +static EXT_ATTR_RO(signal, duty, 1); +static EXT_ATTR_RO(signal, duty, 2); +static EXT_ATTR_RO(signal, duty, 3); + +static ssize_t +period_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + struct ptp_ocp *bp = dev_get_drvdata(dev); + int i = (uintptr_t)ea->var; + + return sysfs_emit(buf, "%llu\n", bp->signal[i].period); +} +static EXT_ATTR_RO(signal, period, 0); +static EXT_ATTR_RO(signal, period, 1); +static EXT_ATTR_RO(signal, period, 2); +static EXT_ATTR_RO(signal, period, 3); + +static ssize_t +phase_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + struct ptp_ocp *bp = dev_get_drvdata(dev); + int i = (uintptr_t)ea->var; + + return sysfs_emit(buf, "%llu\n", bp->signal[i].phase); +} +static EXT_ATTR_RO(signal, phase, 0); +static EXT_ATTR_RO(signal, phase, 1); +static EXT_ATTR_RO(signal, phase, 2); +static EXT_ATTR_RO(signal, phase, 3); + +static ssize_t +polarity_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + struct ptp_ocp *bp = dev_get_drvdata(dev); + int i = (uintptr_t)ea->var; + + return sysfs_emit(buf, "%d\n", bp->signal[i].polarity); +} +static EXT_ATTR_RO(signal, polarity, 0); +static EXT_ATTR_RO(signal, polarity, 1); +static EXT_ATTR_RO(signal, polarity, 2); +static EXT_ATTR_RO(signal, polarity, 3); + +static ssize_t +running_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + struct ptp_ocp *bp = dev_get_drvdata(dev); + int i = (uintptr_t)ea->var; + + return sysfs_emit(buf, "%d\n", bp->signal[i].running); +} +static EXT_ATTR_RO(signal, running, 0); +static EXT_ATTR_RO(signal, running, 1); +static EXT_ATTR_RO(signal, running, 2); +static EXT_ATTR_RO(signal, running, 3); + +static ssize_t +start_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + struct ptp_ocp *bp = dev_get_drvdata(dev); + int i = (uintptr_t)ea->var; + struct timespec64 ts; + + ts = ktime_to_timespec64(bp->signal[i].start); + return sysfs_emit(buf, "%llu.%lu\n", ts.tv_sec, ts.tv_nsec); +} +static EXT_ATTR_RO(signal, start, 0); +static EXT_ATTR_RO(signal, start, 1); +static EXT_ATTR_RO(signal, start, 2); +static EXT_ATTR_RO(signal, start, 3); + +static ssize_t +seconds_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + struct ptp_ocp *bp = dev_get_drvdata(dev); + int idx = (uintptr_t)ea->var; + u32 val; + int err; + + err = kstrtou32(buf, 0, &val); + if (err) + return err; + if (val > 0xff) + return -EINVAL; + + if (val) + val = (val << 8) | 0x1; + + iowrite32(val, &bp->freq_in[idx]->ctrl); + + return count; +} + +static ssize_t +seconds_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + struct ptp_ocp *bp = dev_get_drvdata(dev); + int idx = (uintptr_t)ea->var; + u32 val; + + val = ioread32(&bp->freq_in[idx]->ctrl); + if (val & 1) + val = (val >> 8) & 0xff; + else + val = 0; + + return sysfs_emit(buf, "%u\n", val); +} +static EXT_ATTR_RW(freq, seconds, 0); +static EXT_ATTR_RW(freq, seconds, 1); +static EXT_ATTR_RW(freq, seconds, 2); +static EXT_ATTR_RW(freq, seconds, 3); + +static ssize_t +frequency_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + struct ptp_ocp *bp = dev_get_drvdata(dev); + int idx = (uintptr_t)ea->var; + u32 val; + + val = ioread32(&bp->freq_in[idx]->status); + if (val & FREQ_STATUS_ERROR) + return sysfs_emit(buf, "error\n"); + if (val & FREQ_STATUS_OVERRUN) + return sysfs_emit(buf, "overrun\n"); + if (val & FREQ_STATUS_VALID) + return sysfs_emit(buf, "%lu\n", val & FREQ_STATUS_MASK); + return 0; +} +static EXT_ATTR_RO(freq, frequency, 0); +static EXT_ATTR_RO(freq, frequency, 1); +static EXT_ATTR_RO(freq, frequency, 2); +static EXT_ATTR_RO(freq, frequency, 3); + static ssize_t serialnum_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ptp_ocp *bp = dev_get_drvdata(dev); - if (!bp->has_serial) - ptp_ocp_get_serial_number(bp); + if (!bp->has_eeprom_data) + ptp_ocp_read_eeprom(bp); return sysfs_emit(buf, "%pM\n", bp->serial); } @@ -1953,7 +2725,122 @@ available_clock_sources_show(struct device *dev, } static DEVICE_ATTR_RO(available_clock_sources); -static struct attribute *timecard_attrs[] = { +static ssize_t +clock_status_drift_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ptp_ocp *bp = dev_get_drvdata(dev); + u32 val; + int res; + + val = ioread32(&bp->reg->status_drift); + res = (val & ~INT_MAX) ? -1 : 1; + res *= (val & INT_MAX); + return sysfs_emit(buf, "%d\n", res); +} +static DEVICE_ATTR_RO(clock_status_drift); + +static ssize_t +clock_status_offset_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ptp_ocp *bp = dev_get_drvdata(dev); + u32 val; + int res; + + val = ioread32(&bp->reg->status_offset); + res = (val & ~INT_MAX) ? -1 : 1; + res *= (val & INT_MAX); + return sysfs_emit(buf, "%d\n", res); +} +static DEVICE_ATTR_RO(clock_status_offset); + +static ssize_t +tod_correction_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ptp_ocp *bp = dev_get_drvdata(dev); + u32 val; + int res; + + val = ioread32(&bp->tod->adj_sec); + res = (val & ~INT_MAX) ? -1 : 1; + res *= (val & INT_MAX); + return sysfs_emit(buf, "%d\n", res); +} + +static ssize_t +tod_correction_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ptp_ocp *bp = dev_get_drvdata(dev); + unsigned long flags; + int err, res; + u32 val = 0; + + err = kstrtos32(buf, 0, &res); + if (err) + return err; + if (res < 0) { + res *= -1; + val |= BIT(31); + } + val |= res; + + spin_lock_irqsave(&bp->lock, flags); + iowrite32(val, &bp->tod->adj_sec); + spin_unlock_irqrestore(&bp->lock, flags); + + return count; +} +static DEVICE_ATTR_RW(tod_correction); + +#define _DEVICE_SIGNAL_GROUP_ATTRS(_nr) \ + static struct attribute *fb_timecard_signal##_nr##_attrs[] = { \ + &dev_attr_signal##_nr##_signal.attr.attr, \ + &dev_attr_signal##_nr##_duty.attr.attr, \ + &dev_attr_signal##_nr##_phase.attr.attr, \ + &dev_attr_signal##_nr##_period.attr.attr, \ + &dev_attr_signal##_nr##_polarity.attr.attr, \ + &dev_attr_signal##_nr##_running.attr.attr, \ + &dev_attr_signal##_nr##_start.attr.attr, \ + NULL, \ + } + +#define DEVICE_SIGNAL_GROUP(_name, _nr) \ + _DEVICE_SIGNAL_GROUP_ATTRS(_nr); \ + static const struct attribute_group \ + fb_timecard_signal##_nr##_group = { \ + .name = #_name, \ + .attrs = fb_timecard_signal##_nr##_attrs, \ +} + +DEVICE_SIGNAL_GROUP(gen1, 0); +DEVICE_SIGNAL_GROUP(gen2, 1); +DEVICE_SIGNAL_GROUP(gen3, 2); +DEVICE_SIGNAL_GROUP(gen4, 3); + +#define _DEVICE_FREQ_GROUP_ATTRS(_nr) \ + static struct attribute *fb_timecard_freq##_nr##_attrs[] = { \ + &dev_attr_freq##_nr##_seconds.attr.attr, \ + &dev_attr_freq##_nr##_frequency.attr.attr, \ + NULL, \ + } + +#define DEVICE_FREQ_GROUP(_name, _nr) \ + _DEVICE_FREQ_GROUP_ATTRS(_nr); \ + static const struct attribute_group \ + fb_timecard_freq##_nr##_group = { \ + .name = #_name, \ + .attrs = fb_timecard_freq##_nr##_attrs, \ +} + +DEVICE_FREQ_GROUP(freq1, 0); +DEVICE_FREQ_GROUP(freq2, 1); +DEVICE_FREQ_GROUP(freq3, 2); +DEVICE_FREQ_GROUP(freq4, 3); + +static struct attribute *fb_timecard_attrs[] = { &dev_attr_serialnum.attr, &dev_attr_gnss_sync.attr, &dev_attr_clock_source.attr, @@ -1964,38 +2851,119 @@ static struct attribute *timecard_attrs[] = { &dev_attr_sma4.attr, &dev_attr_available_sma_inputs.attr, &dev_attr_available_sma_outputs.attr, + &dev_attr_clock_status_drift.attr, + &dev_attr_clock_status_offset.attr, &dev_attr_irig_b_mode.attr, &dev_attr_utc_tai_offset.attr, &dev_attr_ts_window_adjust.attr, + &dev_attr_tod_correction.attr, NULL, }; -ATTRIBUTE_GROUPS(timecard); +static const struct attribute_group fb_timecard_group = { + .attrs = fb_timecard_attrs, +}; +static const struct ocp_attr_group fb_timecard_groups[] = { + { .cap = OCP_CAP_BASIC, .group = &fb_timecard_group }, + { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal0_group }, + { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal1_group }, + { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal2_group }, + { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal3_group }, + { .cap = OCP_CAP_FREQ, .group = &fb_timecard_freq0_group }, + { .cap = OCP_CAP_FREQ, .group = &fb_timecard_freq1_group }, + { .cap = OCP_CAP_FREQ, .group = &fb_timecard_freq2_group }, + { .cap = OCP_CAP_FREQ, .group = &fb_timecard_freq3_group }, + { }, +}; -static const char * -gpio_map(u32 gpio, u32 bit, const char *pri, const char *sec, const char *def) +static void +gpio_input_map(char *buf, struct ptp_ocp *bp, u16 map[][2], u16 bit, + const char *def) { - const char *ans; + int i; - if (gpio & (1 << bit)) - ans = pri; - else if (gpio & (1 << (bit + 16))) - ans = sec; - else - ans = def; - return ans; + for (i = 0; i < 4; i++) { + if (bp->sma[i].mode != SMA_MODE_IN) + continue; + if (map[i][0] & (1 << bit)) { + sprintf(buf, "sma%d", i + 1); + return; + } + } + if (!def) + def = "----"; + strcpy(buf, def); } static void -gpio_multi_map(char *buf, u32 gpio, u32 bit, - const char *pri, const char *sec, const char *def) +gpio_output_map(char *buf, struct ptp_ocp *bp, u16 map[][2], u16 bit) { char *ans = buf; + int i; - strcpy(ans, def); - if (gpio & (1 << bit)) - ans += sprintf(ans, "%s ", pri); - if (gpio & (1 << (bit + 16))) - ans += sprintf(ans, "%s ", sec); + strcpy(ans, "----"); + for (i = 0; i < 4; i++) { + if (bp->sma[i].mode != SMA_MODE_OUT) + continue; + if (map[i][1] & (1 << bit)) + ans += sprintf(ans, "sma%d ", i + 1); + } +} + +static void +_signal_summary_show(struct seq_file *s, struct ptp_ocp *bp, int nr) +{ + struct signal_reg __iomem *reg = bp->signal_out[nr]->mem; + struct ptp_ocp_signal *signal = &bp->signal[nr]; + char label[8]; + bool on; + u32 val; + + if (!signal) + return; + + on = signal->running; + sprintf(label, "GEN%d", nr); + seq_printf(s, "%7s: %s, period:%llu duty:%d%% phase:%llu pol:%d", + label, on ? " ON" : "OFF", + signal->period, signal->duty, signal->phase, + signal->polarity); + + val = ioread32(®->enable); + seq_printf(s, " [%x", val); + val = ioread32(®->status); + seq_printf(s, " %x]", val); + + seq_printf(s, " start:%llu\n", signal->start); +} + +static void +_frequency_summary_show(struct seq_file *s, int nr, + struct frequency_reg __iomem *reg) +{ + char label[8]; + bool on; + u32 val; + + if (!reg) + return; + + sprintf(label, "FREQ%d", nr); + val = ioread32(®->ctrl); + on = val & 1; + val = (val >> 8) & 0xff; + seq_printf(s, "%7s: %s, sec:%u", + label, + on ? " ON" : "OFF", + val); + + val = ioread32(®->status); + if (val & FREQ_STATUS_ERROR) + seq_printf(s, ", error"); + if (val & FREQ_STATUS_OVERRUN) + seq_printf(s, ", overrun"); + if (val & FREQ_STATUS_VALID) + seq_printf(s, ", freq %lu Hz", val & FREQ_STATUS_MASK); + seq_printf(s, " reg:%x\n", val); } static int @@ -2003,40 +2971,71 @@ ptp_ocp_summary_show(struct seq_file *s, void *data) { struct device *dev = s->private; struct ptp_system_timestamp sts; - u32 sma_in, sma_out, ctrl, val; + u16 sma_val[4][2], ctrl, val; struct ts_reg __iomem *ts_reg; struct timespec64 ts; struct ptp_ocp *bp; - const char *src; + char *src, *buf; bool on, map; - char *buf; + int i; buf = (char *)__get_free_page(GFP_KERNEL); if (!buf) return -ENOMEM; bp = dev_get_drvdata(dev); - sma_in = ioread32(&bp->sma->gpio1); - sma_out = ioread32(&bp->sma->gpio2); seq_printf(s, "%7s: /dev/ptp%d\n", "PTP", ptp_clock_index(bp->ptp)); + if (bp->gnss_port != -1) + seq_printf(s, "%7s: /dev/ttyS%d\n", "GNSS1", bp->gnss_port); + if (bp->gnss2_port != -1) + seq_printf(s, "%7s: /dev/ttyS%d\n", "GNSS2", bp->gnss2_port); + if (bp->mac_port != -1) + seq_printf(s, "%7s: /dev/ttyS%d\n", "MAC", bp->mac_port); + if (bp->nmea_port != -1) + seq_printf(s, "%7s: /dev/ttyS%d\n", "NMEA", bp->nmea_port); + + memset(sma_val, 0xff, sizeof(sma_val)); + if (bp->sma_map1) { + u32 reg; + + reg = ioread32(&bp->sma_map1->gpio1); + sma_val[0][0] = reg & 0xffff; + sma_val[1][0] = reg >> 16; + + reg = ioread32(&bp->sma_map1->gpio2); + sma_val[2][1] = reg & 0xffff; + sma_val[3][1] = reg >> 16; + + reg = ioread32(&bp->sma_map2->gpio1); + sma_val[2][0] = reg & 0xffff; + sma_val[3][0] = reg >> 16; + + reg = ioread32(&bp->sma_map2->gpio2); + sma_val[0][1] = reg & 0xffff; + sma_val[1][1] = reg >> 16; + } sma1_show(dev, NULL, buf); - seq_printf(s, " sma1: %s", buf); + seq_printf(s, " sma1: %04x,%04x %s", + sma_val[0][0], sma_val[0][1], buf); sma2_show(dev, NULL, buf); - seq_printf(s, " sma2: %s", buf); + seq_printf(s, " sma2: %04x,%04x %s", + sma_val[1][0], sma_val[1][1], buf); sma3_show(dev, NULL, buf); - seq_printf(s, " sma3: %s", buf); + seq_printf(s, " sma3: %04x,%04x %s", + sma_val[2][0], sma_val[2][1], buf); sma4_show(dev, NULL, buf); - seq_printf(s, " sma4: %s", buf); + seq_printf(s, " sma4: %04x,%04x %s", + sma_val[3][0], sma_val[3][1], buf); if (bp->ts0) { ts_reg = bp->ts0->mem; on = ioread32(&ts_reg->enable); - src = "GNSS"; + src = "GNSS1"; seq_printf(s, "%7s: %s, src: %s\n", "TS0", on ? " ON" : "OFF", src); } @@ -2044,17 +3043,33 @@ ptp_ocp_summary_show(struct seq_file *s, void *data) if (bp->ts1) { ts_reg = bp->ts1->mem; on = ioread32(&ts_reg->enable); - src = gpio_map(sma_in, 2, "sma1", "sma2", "----"); + gpio_input_map(buf, bp, sma_val, 2, NULL); seq_printf(s, "%7s: %s, src: %s\n", "TS1", - on ? " ON" : "OFF", src); + on ? " ON" : "OFF", buf); } if (bp->ts2) { ts_reg = bp->ts2->mem; on = ioread32(&ts_reg->enable); - src = gpio_map(sma_in, 3, "sma1", "sma2", "----"); + gpio_input_map(buf, bp, sma_val, 3, NULL); seq_printf(s, "%7s: %s, src: %s\n", "TS2", - on ? " ON" : "OFF", src); + on ? " ON" : "OFF", buf); + } + + if (bp->ts3) { + ts_reg = bp->ts3->mem; + on = ioread32(&ts_reg->enable); + gpio_input_map(buf, bp, sma_val, 6, NULL); + seq_printf(s, "%7s: %s, src: %s\n", "TS3", + on ? " ON" : "OFF", buf); + } + + if (bp->ts4) { + ts_reg = bp->ts4->mem; + on = ioread32(&ts_reg->enable); + gpio_input_map(buf, bp, sma_val, 7, NULL); + seq_printf(s, "%7s: %s, src: %s\n", "TS4", + on ? " ON" : "OFF", buf); } if (bp->pps) { @@ -2062,7 +3077,7 @@ ptp_ocp_summary_show(struct seq_file *s, void *data) src = "PHC"; on = ioread32(&ts_reg->enable); map = !!(bp->pps_req_map & OCP_REQ_TIMESTAMP); - seq_printf(s, "%7s: %s, src: %s\n", "TS3", + seq_printf(s, "%7s: %s, src: %s\n", "TS5", on && map ? " ON" : "OFF", src); map = !!(bp->pps_req_map & OCP_REQ_PPS); @@ -2070,11 +3085,19 @@ ptp_ocp_summary_show(struct seq_file *s, void *data) on && map ? " ON" : "OFF", src); } + if (bp->fw_cap & OCP_CAP_SIGNAL) + for (i = 0; i < 4; i++) + _signal_summary_show(s, bp, i); + + if (bp->fw_cap & OCP_CAP_FREQ) + for (i = 0; i < 4; i++) + _frequency_summary_show(s, i, bp->freq_in[i]); + if (bp->irig_out) { ctrl = ioread32(&bp->irig_out->ctrl); on = ctrl & IRIG_M_CTRL_ENABLE; val = ioread32(&bp->irig_out->status); - gpio_multi_map(buf, sma_out, 4, "sma3", "sma4", "----"); + gpio_output_map(buf, bp, sma_val, 4); seq_printf(s, "%7s: %s, error: %d, mode %d, out: %s\n", "IRIG", on ? " ON" : "OFF", val, (ctrl >> 16), buf); } @@ -2082,15 +3105,15 @@ ptp_ocp_summary_show(struct seq_file *s, void *data) if (bp->irig_in) { on = ioread32(&bp->irig_in->ctrl) & IRIG_S_CTRL_ENABLE; val = ioread32(&bp->irig_in->status); - src = gpio_map(sma_in, 4, "sma1", "sma2", "----"); + gpio_input_map(buf, bp, sma_val, 4, NULL); seq_printf(s, "%7s: %s, error: %d, src: %s\n", "IRIG in", - on ? " ON" : "OFF", val, src); + on ? " ON" : "OFF", val, buf); } if (bp->dcf_out) { on = ioread32(&bp->dcf_out->ctrl) & DCF_M_CTRL_ENABLE; val = ioread32(&bp->dcf_out->status); - gpio_multi_map(buf, sma_out, 5, "sma3", "sma4", "----"); + gpio_output_map(buf, bp, sma_val, 5); seq_printf(s, "%7s: %s, error: %d, out: %s\n", "DCF", on ? " ON" : "OFF", val, buf); } @@ -2098,9 +3121,9 @@ ptp_ocp_summary_show(struct seq_file *s, void *data) if (bp->dcf_in) { on = ioread32(&bp->dcf_in->ctrl) & DCF_S_CTRL_ENABLE; val = ioread32(&bp->dcf_in->status); - src = gpio_map(sma_in, 5, "sma1", "sma2", "----"); + gpio_input_map(buf, bp, sma_val, 5, NULL); seq_printf(s, "%7s: %s, error: %d, src: %s\n", "DCF in", - on ? " ON" : "OFF", val, src); + on ? " ON" : "OFF", val, buf); } if (bp->nmea_out) { @@ -2113,12 +3136,13 @@ ptp_ocp_summary_show(struct seq_file *s, void *data) /* compute src for PPS1, used below. */ if (bp->pps_select) { val = ioread32(&bp->pps_select->gpio1); + src = &buf[80]; if (val & 0x01) - src = gpio_map(sma_in, 0, "sma1", "sma2", "----"); + gpio_input_map(src, bp, sma_val, 0, NULL); else if (val & 0x02) src = "MAC"; else if (val & 0x04) - src = "GNSS"; + src = "GNSS1"; else src = "----"; } else { @@ -2151,8 +3175,8 @@ ptp_ocp_summary_show(struct seq_file *s, void *data) /* reuses PPS1 src from earlier */ seq_printf(s, "MAC PPS1 src: %s\n", src); - src = gpio_map(sma_in, 1, "sma1", "sma2", "GNSS2"); - seq_printf(s, "MAC PPS2 src: %s\n", src); + gpio_input_map(buf, bp, sma_val, 1, "GNSS2"); + seq_printf(s, "MAC PPS2 src: %s\n", buf); if (!ptp_ocp_gettimex(&bp->ptp_info, &ts, &sts)) { struct timespec64 sys_ts; @@ -2179,6 +3203,57 @@ ptp_ocp_summary_show(struct seq_file *s, void *data) } DEFINE_SHOW_ATTRIBUTE(ptp_ocp_summary); +static int +ptp_ocp_tod_status_show(struct seq_file *s, void *data) +{ + struct device *dev = s->private; + struct ptp_ocp *bp; + u32 val; + int idx; + + bp = dev_get_drvdata(dev); + + val = ioread32(&bp->tod->ctrl); + if (!(val & TOD_CTRL_ENABLE)) { + seq_printf(s, "TOD Slave disabled\n"); + return 0; + } + seq_printf(s, "TOD Slave enabled, Control Register 0x%08X\n", val); + + idx = val & TOD_CTRL_PROTOCOL ? 4 : 0; + idx += (val >> 16) & 3; + seq_printf(s, "Protocol %s\n", ptp_ocp_tod_proto_name(idx)); + + idx = (val >> TOD_CTRL_GNSS_SHIFT) & TOD_CTRL_GNSS_MASK; + seq_printf(s, "GNSS %s\n", ptp_ocp_tod_gnss_name(idx)); + + val = ioread32(&bp->tod->version); + seq_printf(s, "TOD Version %d.%d.%d\n", + val >> 24, (val >> 16) & 0xff, val & 0xffff); + + val = ioread32(&bp->tod->status); + seq_printf(s, "Status register: 0x%08X\n", val); + + val = ioread32(&bp->tod->adj_sec); + idx = (val & ~INT_MAX) ? -1 : 1; + idx *= (val & INT_MAX); + seq_printf(s, "Correction seconds: %d\n", idx); + + val = ioread32(&bp->tod->utc_status); + seq_printf(s, "UTC status register: 0x%08X\n", val); + seq_printf(s, "UTC offset: %d valid:%d\n", + val & TOD_STATUS_UTC_MASK, val & TOD_STATUS_UTC_VALID ? 1 : 0); + seq_printf(s, "Leap second info valid:%d, Leap second announce %d\n", + val & TOD_STATUS_LEAP_VALID ? 1 : 0, + val & TOD_STATUS_LEAP_ANNOUNCE ? 1 : 0); + + val = ioread32(&bp->tod->leap); + seq_printf(s, "Time to next leap second (in sec): %d\n", (s32) val); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(ptp_ocp_tod_status); + static struct dentry *ptp_ocp_debugfs_root; static void @@ -2190,6 +3265,9 @@ ptp_ocp_debugfs_add_device(struct ptp_ocp *bp) bp->debug_root = d; debugfs_create_file("summary", 0444, bp->debug_root, &bp->dev, &ptp_ocp_summary_fops); + if (bp->tod) + debugfs_create_file("tod_status", 0444, bp->debug_root, + &bp->dev, &ptp_ocp_tod_status_fops); } static void @@ -2296,6 +3374,7 @@ ptp_ocp_complete(struct ptp_ocp *bp) { struct pps_device *pps; char buf[32]; + int i, err; if (bp->gnss_port != -1) { sprintf(buf, "ttyS%d", bp->gnss_port); @@ -2320,8 +3399,13 @@ ptp_ocp_complete(struct ptp_ocp *bp) if (pps) ptp_ocp_symlink(bp, pps->dev, "pps"); - if (device_add_groups(&bp->dev, timecard_groups)) - pr_err("device add groups failed\n"); + for (i = 0; bp->attr_tbl[i].cap; i++) { + if (!(bp->attr_tbl[i].cap & bp->fw_cap)) + continue; + err = sysfs_create_group(&bp->dev.kobj, bp->attr_tbl[i].group); + if (err) + return err; + } ptp_ocp_debugfs_add_device(bp); @@ -2368,20 +3452,15 @@ ptp_ocp_info(struct ptp_ocp *bp) u32 reg; ptp_ocp_phc_info(bp); - if (bp->tod) - ptp_ocp_tod_info(bp); - if (bp->image) { - u32 ver = ioread32(&bp->image->version); + dev_info(dev, "version %x\n", bp->fw_version); + if (bp->fw_version & 0xffff) + dev_info(dev, "regular image, version %d\n", + bp->fw_version & 0xffff); + else + dev_info(dev, "golden image, version %d\n", + bp->fw_version >> 16); - dev_info(dev, "version %x\n", ver); - if (ver & 0xffff) - dev_info(dev, "regular image, version %d\n", - ver & 0xffff); - else - dev_info(dev, "golden image, version %d\n", - ver >> 16); - } ptp_ocp_serial_info(dev, "GNSS", bp->gnss_port, 115200); ptp_ocp_serial_info(dev, "GNSS2", bp->gnss2_port, 115200); ptp_ocp_serial_info(dev, "MAC", bp->mac_port, 57600); @@ -2399,17 +3478,22 @@ static void ptp_ocp_detach_sysfs(struct ptp_ocp *bp) { struct device *dev = &bp->dev; + int i; sysfs_remove_link(&dev->kobj, "ttyGNSS"); sysfs_remove_link(&dev->kobj, "ttyMAC"); sysfs_remove_link(&dev->kobj, "ptp"); sysfs_remove_link(&dev->kobj, "pps"); - device_remove_groups(dev, timecard_groups); + if (bp->attr_tbl) + for (i = 0; bp->attr_tbl[i].cap; i++) + sysfs_remove_group(&dev->kobj, bp->attr_tbl[i].group); } static void ptp_ocp_detach(struct ptp_ocp *bp) { + int i; + ptp_ocp_debugfs_remove_device(bp); ptp_ocp_detach_sysfs(bp); if (timer_pending(&bp->watchdog)) @@ -2420,8 +3504,15 @@ ptp_ocp_detach(struct ptp_ocp *bp) ptp_ocp_unregister_ext(bp->ts1); if (bp->ts2) ptp_ocp_unregister_ext(bp->ts2); + if (bp->ts3) + ptp_ocp_unregister_ext(bp->ts3); + if (bp->ts4) + ptp_ocp_unregister_ext(bp->ts4); if (bp->pps) ptp_ocp_unregister_ext(bp->pps); + for (i = 0; i < 4; i++) + if (bp->signal_out[i]) + ptp_ocp_unregister_ext(bp->signal_out[i]); if (bp->gnss_port != -1) serial8250_unregister_port(bp->gnss_port); if (bp->gnss2_port != -1) @@ -2440,6 +3531,7 @@ ptp_ocp_detach(struct ptp_ocp *bp) pci_free_irq_vectors(bp->pdev); if (bp->ptp) ptp_clock_unregister(bp->ptp); + kfree(bp->ptp_info.pin_config); device_unregister(&bp->dev); } @@ -2459,7 +3551,7 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "pci_enable_device\n"); - goto out_unregister; + goto out_free; } bp = devlink_priv(devlink); @@ -2472,7 +3564,7 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id) * allow this - if not all of the IRQ's are returned, skip the * extra devices and just register the clock. */ - err = pci_alloc_irq_vectors(pdev, 1, 11, PCI_IRQ_MSI | PCI_IRQ_MSIX); + err = pci_alloc_irq_vectors(pdev, 1, 17, PCI_IRQ_MSI | PCI_IRQ_MSIX); if (err < 0) { dev_err(&pdev->dev, "alloc_irq_vectors err: %d\n", err); goto out; @@ -2505,7 +3597,7 @@ out: pci_set_drvdata(pdev, NULL); out_disable: pci_disable_device(pdev); -out_unregister: +out_free: devlink_free(devlink); return err; } diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 86aa4141efa9..d2553970a67b 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -6014,9 +6014,8 @@ core_initcall(regulator_init); static int regulator_late_cleanup(struct device *dev, void *data) { struct regulator_dev *rdev = dev_to_rdev(dev); - const struct regulator_ops *ops = rdev->desc->ops; struct regulation_constraints *c = rdev->constraints; - int enabled, ret; + int ret; if (c && c->always_on) return 0; @@ -6029,14 +6028,8 @@ static int regulator_late_cleanup(struct device *dev, void *data) if (rdev->use_count) goto unlock; - /* If we can't read the status assume it's always on. */ - if (ops->is_enabled) - enabled = ops->is_enabled(rdev); - else - enabled = 1; - - /* But if reading the status failed, assume that it's off. */ - if (enabled <= 0) + /* If reading the status failed, assume that it's off. */ + if (_regulator_is_enabled(rdev) <= 0) goto unlock; if (have_full_constraints()) { diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c index 6f21223a488e..eb9df485bd8a 100644 --- a/drivers/regulator/da9121-regulator.c +++ b/drivers/regulator/da9121-regulator.c @@ -87,16 +87,16 @@ static struct da9121_range da9121_3A_1phase_current = { }; static struct da9121_range da914x_40A_4phase_current = { - .val_min = 14000000, - .val_max = 80000000, - .val_stp = 2000000, + .val_min = 26000000, + .val_max = 78000000, + .val_stp = 4000000, .reg_min = 1, .reg_max = 14, }; static struct da9121_range da914x_20A_2phase_current = { - .val_min = 7000000, - .val_max = 40000000, + .val_min = 13000000, + .val_max = 39000000, .val_stp = 2000000, .reg_min = 1, .reg_max = 14, @@ -561,7 +561,7 @@ static const struct regulator_desc da9217_reg = { }; #define DA914X_MIN_MV 500 -#define DA914X_MAX_MV 1000 +#define DA914X_MAX_MV 1300 #define DA914X_STEP_MV 10 #define DA914X_MIN_SEL (DA914X_MIN_MV / DA914X_STEP_MV) #define DA914X_N_VOLTAGES (((DA914X_MAX_MV - DA914X_MIN_MV) / DA914X_STEP_MV) \ @@ -585,10 +585,6 @@ static const struct regulator_desc da9141_reg = { .vsel_mask = DA9121_MASK_BUCK_BUCKx_5_CHx_A_VOUT, .enable_reg = DA9121_REG_BUCK_BUCK1_0, .enable_mask = DA9121_MASK_BUCK_BUCKx_0_CHx_EN, - /* Default value of BUCK_BUCK1_0.CH1_SRC_DVC_UP */ - .ramp_delay = 20000, - /* tBUCK_EN */ - .enable_time = 20, }; static const struct regulator_desc da9142_reg = { diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c index 2f83adef966e..6d66ab5a8b17 100644 --- a/drivers/rtc/rtc-ds1302.c +++ b/drivers/rtc/rtc-ds1302.c @@ -185,10 +185,9 @@ static int ds1302_probe(struct spi_device *spi) return 0; } -static int ds1302_remove(struct spi_device *spi) +static void ds1302_remove(struct spi_device *spi) { spi_set_drvdata(spi, NULL); - return 0; } #ifdef CONFIG_OF diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c index 9ef107b99b65..ed9360486953 100644 --- a/drivers/rtc/rtc-ds1305.c +++ b/drivers/rtc/rtc-ds1305.c @@ -720,7 +720,7 @@ static int ds1305_probe(struct spi_device *spi) return 0; } -static int ds1305_remove(struct spi_device *spi) +static void ds1305_remove(struct spi_device *spi) { struct ds1305 *ds1305 = spi_get_drvdata(spi); @@ -730,8 +730,6 @@ static int ds1305_remove(struct spi_device *spi) devm_free_irq(&spi->dev, spi->irq, ds1305); cancel_work_sync(&ds1305->work); } - - return 0; } static struct spi_driver ds1305_driver = { diff --git a/drivers/rtc/rtc-ds1343.c b/drivers/rtc/rtc-ds1343.c index f14ed6c96437..ed5a6ba89a3e 100644 --- a/drivers/rtc/rtc-ds1343.c +++ b/drivers/rtc/rtc-ds1343.c @@ -434,11 +434,9 @@ static int ds1343_probe(struct spi_device *spi) return 0; } -static int ds1343_remove(struct spi_device *spi) +static void ds1343_remove(struct spi_device *spi) { dev_pm_clear_wake_irq(&spi->dev); - - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index cd938a26b76c..3b1cd0c96a74 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1180,7 +1180,7 @@ static int io_subchannel_chp_event(struct subchannel *sch, else path_event[chpid] = PE_NONE; } - if (cdev) + if (cdev && cdev->drv && cdev->drv->path_event) cdev->drv->path_event(cdev, path_event); break; } diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 5ea7eeb07002..e0fdd54bfeb7 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -166,7 +166,7 @@ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb) ch->logflags = 0; priv->stats.rx_packets++; priv->stats.rx_bytes += skblen; - netif_rx_ni(skb); + netif_rx(skb); if (len > 0) { skb_pull(pskb, header->length); if (skb_tailroom(pskb) < LL_HEADER_LENGTH) { diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index a61d38a1b4ed..bab9b34926c6 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -223,7 +223,7 @@ lcs_setup_read_ccws(struct lcs_card *card) * we do not need to do set_normalized_cda. */ card->read.ccws[cnt].cda = - (__u32) __pa(card->read.iob[cnt].data); + (__u32)virt_to_phys(card->read.iob[cnt].data); ((struct lcs_header *) card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET; card->read.iob[cnt].callback = lcs_get_frames_cb; @@ -236,7 +236,7 @@ lcs_setup_read_ccws(struct lcs_card *card) /* Last ccw is a tic (transfer in channel). */ card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER; card->read.ccws[LCS_NUM_BUFFS].cda = - (__u32) __pa(card->read.ccws); + (__u32)virt_to_phys(card->read.ccws); /* Setg initial state of the read channel. */ card->read.state = LCS_CH_STATE_INIT; @@ -278,12 +278,12 @@ lcs_setup_write_ccws(struct lcs_card *card) * we do not need to do set_normalized_cda. */ card->write.ccws[cnt].cda = - (__u32) __pa(card->write.iob[cnt].data); + (__u32)virt_to_phys(card->write.iob[cnt].data); } /* Last ccw is a tic (transfer in channel). */ card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER; card->write.ccws[LCS_NUM_BUFFS].cda = - (__u32) __pa(card->write.ccws); + (__u32)virt_to_phys(card->write.ccws); /* Set initial state of the write channel. */ card->read.state = LCS_CH_STATE_INIT; diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 981e7b1c6b96..65aa0a96c21d 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -620,11 +620,7 @@ static void netiucv_unpack_skb(struct iucv_connection *conn, pskb->ip_summed = CHECKSUM_UNNECESSARY; privptr->stats.rx_packets++; privptr->stats.rx_bytes += skb->len; - /* - * Since receiving is always initiated from a tasklet (in iucv.c), - * we must use netif_rx_ni() instead of netif_rx() - */ - netif_rx_ni(skb); + netif_rx(skb); skb_pull(pskb, header->next); skb_put(pskb, NETIUCV_HDRLEN); } diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 29f0111f8e11..d99c5b773e22 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -426,7 +426,7 @@ static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len, ccw->cmd_code = cmd_code; ccw->flags = flags | CCW_FLAG_SLI; ccw->count = len; - ccw->cda = (__u32) __pa(data); + ccw->cda = (__u32)virt_to_phys(data); } static int __qeth_issue_next_read(struct qeth_card *card) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 9251ad276ee8..d2f422a9a4f7 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1961,7 +1961,6 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) if (card->dev->reg_state == NETREG_REGISTERED) unregister_netdev(card->dev); - flush_workqueue(card->cmd_wq); destroy_workqueue(card->cmd_wq); qeth_l3_clear_ip_htable(card, 0); qeth_l3_clear_ipato_list(card); diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 4878c94761f9..98cabe09c040 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -592,6 +592,7 @@ struct lpfc_vport { #define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */ #define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */ #define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/ +#define FC_PT2PT_NO_NVME 0x1000 /* Don't send NVME PRLI */ #define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */ #define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */ #define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */ @@ -1161,6 +1162,16 @@ struct lpfc_hba { uint32_t cfg_hostmem_hgp; uint32_t cfg_log_verbose; uint32_t cfg_enable_fc4_type; +#define LPFC_ENABLE_FCP 1 +#define LPFC_ENABLE_NVME 2 +#define LPFC_ENABLE_BOTH 3 +#if (IS_ENABLED(CONFIG_NVME_FC)) +#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_BOTH +#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_BOTH +#else +#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_FCP +#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_FCP +#endif uint32_t cfg_aer_support; uint32_t cfg_sriov_nr_virtfn; uint32_t cfg_request_firmware_upgrade; @@ -1182,9 +1193,6 @@ struct lpfc_hba { uint32_t cfg_ras_fwlog_func; uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */ uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */ -#define LPFC_ENABLE_FCP 1 -#define LPFC_ENABLE_NVME 2 -#define LPFC_ENABLE_BOTH 3 uint32_t cfg_enable_pbde; uint32_t cfg_enable_mi; struct nvmet_fc_target_port *targetport; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 7a7f17d71811..fa8415259cb8 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1315,6 +1315,9 @@ lpfc_issue_lip(struct Scsi_Host *shost) pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; pmboxq->u.mb.mbxOwner = OWN_HOST; + if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME)) + vport->fc_flag &= ~FC_PT2PT_NO_NVME; + mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); if ((mbxstatus == MBX_SUCCESS) && @@ -3978,8 +3981,8 @@ LPFC_ATTR_R(nvmet_mrq_post, * 3 - register both FCP and NVME * Supported values are [1,3]. Default value is 3 */ -LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH, - LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH, +LPFC_ATTR_R(enable_fc4_type, LPFC_DEF_ENBL_FC4_TYPE, + LPFC_ENABLE_FCP, LPFC_MAX_ENBL_FC4_TYPE, "Enable FC4 Protocol support - FCP / NVME"); /* diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index db5ccae1b63d..f936833c9909 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1072,7 +1072,8 @@ stop_rr_fcf_flogi: /* FLOGI failed, so there is no fabric */ spin_lock_irq(shost->host_lock); - vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP | + FC_PT2PT_NO_NVME); spin_unlock_irq(shost->host_lock); /* If private loop, then allow max outstanding els to be @@ -4607,6 +4608,23 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Added for Vendor specifc support * Just keep retrying for these Rsn / Exp codes */ + if ((vport->fc_flag & FC_PT2PT) && + cmd == ELS_CMD_NVMEPRLI) { + switch (stat.un.b.lsRjtRsnCode) { + case LSRJT_UNABLE_TPC: + case LSRJT_INVALID_CMD: + case LSRJT_LOGICAL_ERR: + case LSRJT_CMD_UNSUPPORTED: + lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, + "0168 NVME PRLI LS_RJT " + "reason %x port doesn't " + "support NVME, disabling NVME\n", + stat.un.b.lsRjtRsnCode); + retry = 0; + vport->fc_flag |= FC_PT2PT_NO_NVME; + goto out_retry; + } + } switch (stat.un.b.lsRjtRsnCode) { case LSRJT_UNABLE_TPC: /* The driver has a VALID PLOGI but the rport has diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index a56f01f659f8..558f7d2559c4 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -2104,7 +2104,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) } if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "3143 Port Down: Firmware Update " "Detected\n"); en_rn_msg = false; diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 7d717a4ac14d..fdf5e777bf11 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1961,8 +1961,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, * is configured try it. */ ndlp->nlp_fc4_type |= NLP_FC4_FCP; - if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || - (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { + if ((!(vport->fc_flag & FC_PT2PT_NO_NVME)) && + (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH || + vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { ndlp->nlp_fc4_type |= NLP_FC4_NVME; /* We need to update the localport also */ lpfc_nvme_update_localport(vport); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 1bc0db572d9e..430abebf99f1 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -13363,6 +13363,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) uint32_t uerr_sta_hi, uerr_sta_lo; uint32_t if_type, portsmphr; struct lpfc_register portstat_reg; + u32 logmask; /* * For now, use the SLI4 device internal unrecoverable error @@ -13413,7 +13414,12 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) readl(phba->sli4_hba.u.if_type2.ERR1regaddr); phba->work_status[1] = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + logmask = LOG_TRACE_EVENT; + if (phba->work_status[0] == + SLIPORT_ERR1_REG_ERR_CODE_2 && + phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART) + logmask = LOG_SLI; + lpfc_printf_log(phba, KERN_ERR, logmask, "2885 Port Status Event: " "port status reg 0x%x, " "port smphr reg 0x%x, " diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index 5916ed7662d5..4eb89aa4a39d 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c @@ -771,11 +771,10 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi, qedi_cmd->list_tmf_work = NULL; } } + spin_unlock_bh(&qedi_conn->tmf_work_lock); - if (!found) { - spin_unlock_bh(&qedi_conn->tmf_work_lock); + if (!found) goto check_cleanup_reqs; - } QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n", @@ -806,7 +805,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi, qedi_cmd->state = CLEANUP_RECV; unlock: spin_unlock_bh(&conn->session->back_lock); - spin_unlock_bh(&qedi_conn->tmf_work_lock); wake_up_interruptible(&qedi_conn->wait_queue); return; diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 50b12d60dc1b..9349557b8a01 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -2681,7 +2681,7 @@ static int ufshcd_map_queues(struct Scsi_Host *shost) break; case HCTX_TYPE_READ: map->nr_queues = 0; - break; + continue; default: WARN_ON_ONCE(true); } diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c index 12c10a5e3d93..7f421600cb66 100644 --- a/drivers/scsi/xen-scsifront.c +++ b/drivers/scsi/xen-scsifront.c @@ -233,12 +233,11 @@ static void scsifront_gnttab_done(struct vscsifrnt_info *info, return; for (i = 0; i < shadow->nr_grants; i++) { - if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) { + if (unlikely(!gnttab_try_end_foreign_access(shadow->gref[i]))) { shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME "grant still in use by backend\n"); BUG(); } - gnttab_end_foreign_access(shadow->gref[i], 0, 0UL); } kfree(shadow->sg); diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c index 72771e018c42..258894ed234b 100644 --- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c +++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c @@ -306,10 +306,9 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev) } lpc_ctrl->clk = devm_clk_get(dev, NULL); - if (IS_ERR(lpc_ctrl->clk)) { - dev_err(dev, "couldn't get clock\n"); - return PTR_ERR(lpc_ctrl->clk); - } + if (IS_ERR(lpc_ctrl->clk)) + return dev_err_probe(dev, PTR_ERR(lpc_ctrl->clk), + "couldn't get clock\n"); rc = clk_prepare_enable(lpc_ctrl->clk); if (rc) { dev_err(dev, "couldn't enable clock\n"); diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c index 072473a16f4d..5ed2fc1c53a0 100644 --- a/drivers/soc/fsl/guts.c +++ b/drivers/soc/fsl/guts.c @@ -28,7 +28,6 @@ struct fsl_soc_die_attr { static struct guts *guts; static struct soc_device_attribute soc_dev_attr; static struct soc_device *soc_dev; -static struct device_node *root; /* SoC die attribute definition for QorIQ platform */ @@ -138,7 +137,7 @@ static u32 fsl_guts_get_svr(void) static int fsl_guts_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node; + struct device_node *root, *np = pdev->dev.of_node; struct device *dev = &pdev->dev; const struct fsl_soc_die_attr *soc_die; const char *machine; @@ -159,8 +158,14 @@ static int fsl_guts_probe(struct platform_device *pdev) root = of_find_node_by_path("/"); if (of_property_read_string(root, "model", &machine)) of_property_read_string_index(root, "compatible", 0, &machine); - if (machine) - soc_dev_attr.machine = machine; + if (machine) { + soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL); + if (!soc_dev_attr.machine) { + of_node_put(root); + return -ENOMEM; + } + } + of_node_put(root); svr = fsl_guts_get_svr(); soc_die = fsl_soc_die_match(svr, fsl_soc_die); @@ -195,7 +200,6 @@ static int fsl_guts_probe(struct platform_device *pdev) static int fsl_guts_remove(struct platform_device *dev) { soc_device_unregister(soc_dev); - of_node_put(root); return 0; } diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c index 4d38c80f8be8..b3c226eb5292 100644 --- a/drivers/soc/fsl/qe/qe.c +++ b/drivers/soc/fsl/qe/qe.c @@ -147,7 +147,7 @@ EXPORT_SYMBOL(qe_issue_cmd); * memory mapped space. * The BRG clock is the QE clock divided by 2. * It was set up long ago during the initial boot phase and is - * is given to us. + * given to us. * Baud rate clocks are zero-based in the driver code (as that maps * to port numbers). Documentation uses 1-based numbering. */ @@ -421,7 +421,7 @@ static void qe_upload_microcode(const void *base, for (i = 0; i < be32_to_cpu(ucode->count); i++) iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata); - + /* Set I-RAM Ready Register */ iowrite32be(QE_IRAM_READY, &qe_immr->iram.iready); } diff --git a/drivers/soc/fsl/qe/qe_io.c b/drivers/soc/fsl/qe/qe_io.c index e277c827bdf3..a5e2d0e5ab51 100644 --- a/drivers/soc/fsl/qe/qe_io.c +++ b/drivers/soc/fsl/qe/qe_io.c @@ -35,6 +35,8 @@ int par_io_init(struct device_node *np) if (ret) return ret; par_io = ioremap(res.start, resource_size(&res)); + if (!par_io) + return -ENOMEM; if (!of_property_read_u32(np, "num-ports", &num_ports)) num_par_io_ports = num_ports; diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c index 3e59d479d001..3cb123016b3e 100644 --- a/drivers/soc/imx/gpcv2.c +++ b/drivers/soc/imx/gpcv2.c @@ -382,7 +382,8 @@ static int imx_pgc_power_down(struct generic_pm_domain *genpd) return 0; out_clk_disable: - clk_bulk_disable_unprepare(domain->num_clks, domain->clks); + if (!domain->keep_clocks) + clk_bulk_disable_unprepare(domain->num_clks, domain->clks); return ret; } diff --git a/drivers/soc/mediatek/mt8192-mmsys.h b/drivers/soc/mediatek/mt8192-mmsys.h index 6f0a57044a7b..6aae0b12b6ff 100644 --- a/drivers/soc/mediatek/mt8192-mmsys.h +++ b/drivers/soc/mediatek/mt8192-mmsys.h @@ -53,7 +53,8 @@ static const struct mtk_mmsys_routes mmsys_mt8192_routing_table[] = { MT8192_AAL0_SEL_IN_CCORR0 }, { DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0, - MT8192_DISP_DSI0_SEL_IN, MT8192_DSI0_SEL_IN_DITHER0 + MT8192_DISP_DSI0_SEL_IN, MT8192_DSI0_SEL_IN_DITHER0, + MT8192_DSI0_SEL_IN_DITHER0 }, { DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0, MT8192_DISP_RDMA0_SOUT_SEL, MT8192_RDMA0_SOUT_COLOR0, diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c index 670cc82d17dc..ca75b14931ec 100644 --- a/drivers/soc/mediatek/mtk-scpsys.c +++ b/drivers/soc/mediatek/mtk-scpsys.c @@ -411,17 +411,12 @@ out: return ret; } -static int init_clks(struct platform_device *pdev, struct clk **clk) +static void init_clks(struct platform_device *pdev, struct clk **clk) { int i; - for (i = CLK_NONE + 1; i < CLK_MAX; i++) { + for (i = CLK_NONE + 1; i < CLK_MAX; i++) clk[i] = devm_clk_get(&pdev->dev, clk_names[i]); - if (IS_ERR(clk[i])) - return PTR_ERR(clk[i]); - } - - return 0; } static struct scp *init_scp(struct platform_device *pdev, @@ -431,7 +426,7 @@ static struct scp *init_scp(struct platform_device *pdev, { struct genpd_onecell_data *pd_data; struct resource *res; - int i, j, ret; + int i, j; struct scp *scp; struct clk *clk[CLK_MAX]; @@ -486,9 +481,7 @@ static struct scp *init_scp(struct platform_device *pdev, pd_data->num_domains = num; - ret = init_clks(pdev, clk); - if (ret) - return ERR_PTR(ret); + init_clks(pdev, clk); for (i = 0; i < num; i++) { struct scp_domain *scpd = &scp->domains[i]; diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig index a9f8b224322e..02e319508cc6 100644 --- a/drivers/soc/samsung/Kconfig +++ b/drivers/soc/samsung/Kconfig @@ -31,7 +31,7 @@ config EXYNOS_USI help Enable support for USI block. USI (Universal Serial Interface) is an IP-core found in modern Samsung Exynos SoCs, like Exynos850 and - ExynosAutoV0. USI block can be configured to provide one of the + ExynosAutoV9. USI block can be configured to provide one of the following serial protocols: UART, SPI or High Speed I2C. This driver allows one to configure USI for desired protocol, which diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c index 2746d05936d3..0fb3631e7346 100644 --- a/drivers/soc/samsung/exynos-chipid.c +++ b/drivers/soc/samsung/exynos-chipid.c @@ -204,7 +204,7 @@ module_platform_driver(exynos_chipid_driver); MODULE_DESCRIPTION("Samsung Exynos ChipID controller and ASV driver"); MODULE_AUTHOR("Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>"); -MODULE_AUTHOR("Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>"); +MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>"); MODULE_AUTHOR("Pankaj Dubey <pankaj.dubey@samsung.com>"); MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index 37f4443ce9a0..e9d83d65873b 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -854,15 +854,13 @@ static int spi_mem_probe(struct spi_device *spi) return memdrv->probe(mem); } -static int spi_mem_remove(struct spi_device *spi) +static void spi_mem_remove(struct spi_device *spi) { struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); struct spi_mem *mem = spi_get_drvdata(spi); if (memdrv->remove) - return memdrv->remove(mem); - - return 0; + memdrv->remove(mem); } static void spi_mem_shutdown(struct spi_device *spi) diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index 553b6b9d0222..c6a1bb09be05 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -585,6 +585,12 @@ static int rockchip_spi_slave_abort(struct spi_controller *ctlr) { struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); + if (atomic_read(&rs->state) & RXDMA) + dmaengine_terminate_sync(ctlr->dma_rx); + if (atomic_read(&rs->state) & TXDMA) + dmaengine_terminate_sync(ctlr->dma_tx); + atomic_set(&rs->state, 0); + spi_enable_chip(rs, false); rs->slave_abort = true; spi_finalize_current_transfer(ctlr); @@ -654,7 +660,7 @@ static int rockchip_spi_probe(struct platform_device *pdev) struct spi_controller *ctlr; struct resource *mem; struct device_node *np = pdev->dev.of_node; - u32 rsd_nsecs; + u32 rsd_nsecs, num_cs; bool slave_mode; slave_mode = of_property_read_bool(np, "spi-slave"); @@ -764,8 +770,9 @@ static int rockchip_spi_probe(struct platform_device *pdev) * rk spi0 has two native cs, spi1..5 one cs only * if num-cs is missing in the dts, default to 1 */ - if (of_property_read_u16(np, "num-cs", &ctlr->num_chipselect)) - ctlr->num_chipselect = 1; + if (of_property_read_u32(np, "num-cs", &num_cs)) + num_cs = 1; + ctlr->num_chipselect = num_cs; ctlr->use_gpio_descriptors = true; } ctlr->dev.of_node = pdev->dev.of_node; diff --git a/drivers/spi/spi-slave-system-control.c b/drivers/spi/spi-slave-system-control.c index 169f3d595f60..d37cfe995a63 100644 --- a/drivers/spi/spi-slave-system-control.c +++ b/drivers/spi/spi-slave-system-control.c @@ -132,13 +132,12 @@ static int spi_slave_system_control_probe(struct spi_device *spi) return 0; } -static int spi_slave_system_control_remove(struct spi_device *spi) +static void spi_slave_system_control_remove(struct spi_device *spi) { struct spi_slave_system_control_priv *priv = spi_get_drvdata(spi); spi_slave_abort(spi); wait_for_completion(&priv->finished); - return 0; } static struct spi_driver spi_slave_system_control_driver = { diff --git a/drivers/spi/spi-slave-time.c b/drivers/spi/spi-slave-time.c index f2e07a392d68..f56c1afb8534 100644 --- a/drivers/spi/spi-slave-time.c +++ b/drivers/spi/spi-slave-time.c @@ -106,13 +106,12 @@ static int spi_slave_time_probe(struct spi_device *spi) return 0; } -static int spi_slave_time_remove(struct spi_device *spi) +static void spi_slave_time_remove(struct spi_device *spi) { struct spi_slave_time_priv *priv = spi_get_drvdata(spi); spi_slave_abort(spi); wait_for_completion(&priv->finished); - return 0; } static struct spi_driver spi_slave_time_driver = { diff --git a/drivers/spi/spi-tle62x0.c b/drivers/spi/spi-tle62x0.c index f8ad0709d015..a565352f6381 100644 --- a/drivers/spi/spi-tle62x0.c +++ b/drivers/spi/spi-tle62x0.c @@ -288,7 +288,7 @@ static int tle62x0_probe(struct spi_device *spi) return ret; } -static int tle62x0_remove(struct spi_device *spi) +static void tle62x0_remove(struct spi_device *spi) { struct tle62x0_state *st = spi_get_drvdata(spi); int ptr; @@ -298,7 +298,6 @@ static int tle62x0_remove(struct spi_device *spi) device_remove_file(&spi->dev, &dev_attr_status_show); kfree(st); - return 0; } static struct spi_driver tle62x0_driver = { diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c index cfa222c9bd5e..78f31b61a2aa 100644 --- a/drivers/spi/spi-zynq-qspi.c +++ b/drivers/spi/spi-zynq-qspi.c @@ -570,6 +570,9 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem, if (op->dummy.nbytes) { tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL); + if (!tmpbuf) + return -ENOMEM; + memset(tmpbuf, 0xff, op->dummy.nbytes); reinit_completion(&xqspi->data_completion); xqspi->txbuf = tmpbuf; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 4599b121d744..646cff67e06d 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -404,15 +404,8 @@ static void spi_remove(struct device *dev) { const struct spi_driver *sdrv = to_spi_driver(dev->driver); - if (sdrv->remove) { - int ret; - - ret = sdrv->remove(to_spi_device(dev)); - if (ret) - dev_warn(dev, - "Failed to unbind driver (%pe), ignoring\n", - ERR_PTR(ret)); - } + if (sdrv->remove) + sdrv->remove(to_spi_device(dev)); dev_pm_domain_detach(dev, true); } @@ -1019,10 +1012,10 @@ int spi_map_buf(struct spi_controller *ctlr, struct device *dev, int i, ret; if (vmalloced_buf || kmap_buf) { - desc_len = min_t(int, max_seg_size, PAGE_SIZE); + desc_len = min_t(unsigned int, max_seg_size, PAGE_SIZE); sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); } else if (virt_addr_valid(buf)) { - desc_len = min_t(int, max_seg_size, ctlr->max_dma_len); + desc_len = min_t(unsigned int, max_seg_size, ctlr->max_dma_len); sgs = DIV_ROUND_UP(len, desc_len); } else { return -EINVAL; diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index a5cceca8b82b..9468f74308bd 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -803,7 +803,7 @@ static int spidev_probe(struct spi_device *spi) return status; } -static int spidev_remove(struct spi_device *spi) +static void spidev_remove(struct spi_device *spi) { struct spidev_data *spidev = spi_get_drvdata(spi); @@ -820,8 +820,6 @@ static int spidev_remove(struct spi_device *spi) if (spidev->users == 0) kfree(spidev); mutex_unlock(&device_list_lock); - - return 0; } static struct spi_driver spidev_spi_driver = { diff --git a/drivers/staging/fbtft/fb_st7789v.c b/drivers/staging/fbtft/fb_st7789v.c index abe9395a0aef..861a154144e6 100644 --- a/drivers/staging/fbtft/fb_st7789v.c +++ b/drivers/staging/fbtft/fb_st7789v.c @@ -144,6 +144,8 @@ static int init_display(struct fbtft_par *par) { int rc; + par->fbtftops.reset(par); + rc = init_tearing_effect_line(par); if (rc) return rc; diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h index 4cdec34e23d2..b68f5f9b7c78 100644 --- a/drivers/staging/fbtft/fbtft.h +++ b/drivers/staging/fbtft/fbtft.h @@ -272,21 +272,39 @@ void fbtft_write_reg8_bus9(struct fbtft_par *par, int len, ...); void fbtft_write_reg16_bus8(struct fbtft_par *par, int len, ...); void fbtft_write_reg16_bus16(struct fbtft_par *par, int len, ...); +#define FBTFT_DT_TABLE(_compatible) \ +static const struct of_device_id dt_ids[] = { \ + { .compatible = _compatible }, \ + {}, \ +}; \ +MODULE_DEVICE_TABLE(of, dt_ids); + +#define FBTFT_SPI_DRIVER(_name, _compatible, _display, _spi_ids) \ + \ +static int fbtft_driver_probe_spi(struct spi_device *spi) \ +{ \ + return fbtft_probe_common(_display, spi, NULL); \ +} \ + \ +static void fbtft_driver_remove_spi(struct spi_device *spi) \ +{ \ + struct fb_info *info = spi_get_drvdata(spi); \ + \ + fbtft_remove_common(&spi->dev, info); \ +} \ + \ +static struct spi_driver fbtft_driver_spi_driver = { \ + .driver = { \ + .name = _name, \ + .of_match_table = dt_ids, \ + }, \ + .id_table = _spi_ids, \ + .probe = fbtft_driver_probe_spi, \ + .remove = fbtft_driver_remove_spi, \ +}; + #define FBTFT_REGISTER_DRIVER(_name, _compatible, _display) \ \ -static int fbtft_driver_probe_spi(struct spi_device *spi) \ -{ \ - return fbtft_probe_common(_display, spi, NULL); \ -} \ - \ -static int fbtft_driver_remove_spi(struct spi_device *spi) \ -{ \ - struct fb_info *info = spi_get_drvdata(spi); \ - \ - fbtft_remove_common(&spi->dev, info); \ - return 0; \ -} \ - \ static int fbtft_driver_probe_pdev(struct platform_device *pdev) \ { \ return fbtft_probe_common(_display, NULL, pdev); \ @@ -300,22 +318,9 @@ static int fbtft_driver_remove_pdev(struct platform_device *pdev) \ return 0; \ } \ \ -static const struct of_device_id dt_ids[] = { \ - { .compatible = _compatible }, \ - {}, \ -}; \ - \ -MODULE_DEVICE_TABLE(of, dt_ids); \ +FBTFT_DT_TABLE(_compatible) \ \ - \ -static struct spi_driver fbtft_driver_spi_driver = { \ - .driver = { \ - .name = _name, \ - .of_match_table = dt_ids, \ - }, \ - .probe = fbtft_driver_probe_spi, \ - .remove = fbtft_driver_remove_spi, \ -}; \ +FBTFT_SPI_DRIVER(_name, _compatible, _display, NULL) \ \ static struct platform_driver fbtft_driver_platform_driver = { \ .driver = { \ @@ -334,7 +339,10 @@ static int __init fbtft_driver_module_init(void) \ ret = spi_register_driver(&fbtft_driver_spi_driver); \ if (ret < 0) \ return ret; \ - return platform_driver_register(&fbtft_driver_platform_driver); \ + ret = platform_driver_register(&fbtft_driver_platform_driver); \ + if (ret < 0) \ + spi_unregister_driver(&fbtft_driver_spi_driver); \ + return ret; \ } \ \ static void __exit fbtft_driver_module_exit(void) \ @@ -348,42 +356,15 @@ module_exit(fbtft_driver_module_exit); #define FBTFT_REGISTER_SPI_DRIVER(_name, _comp_vend, _comp_dev, _display) \ \ -static int fbtft_driver_probe_spi(struct spi_device *spi) \ -{ \ - return fbtft_probe_common(_display, spi, NULL); \ -} \ - \ -static int fbtft_driver_remove_spi(struct spi_device *spi) \ -{ \ - struct fb_info *info = spi_get_drvdata(spi); \ - \ - fbtft_remove_common(&spi->dev, info); \ - return 0; \ -} \ - \ -static const struct of_device_id dt_ids[] = { \ - { .compatible = _comp_vend "," _comp_dev }, \ - {}, \ -}; \ - \ -MODULE_DEVICE_TABLE(of, dt_ids); \ +FBTFT_DT_TABLE(_comp_vend "," _comp_dev) \ \ static const struct spi_device_id spi_ids[] = { \ { .name = _comp_dev }, \ {}, \ }; \ - \ MODULE_DEVICE_TABLE(spi, spi_ids); \ \ -static struct spi_driver fbtft_driver_spi_driver = { \ - .driver = { \ - .name = _name, \ - .of_match_table = dt_ids, \ - }, \ - .id_table = spi_ids, \ - .probe = fbtft_driver_probe_spi, \ - .remove = fbtft_driver_remove_spi, \ -}; \ +FBTFT_SPI_DRIVER(_name, _comp_vend "," _comp_dev, _display, spi_ids) \ \ module_spi_driver(fbtft_driver_spi_driver); diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c index 493ed4821515..8ebb21d4b24b 100644 --- a/drivers/staging/gdm724x/gdm_lte.c +++ b/drivers/staging/gdm724x/gdm_lte.c @@ -76,14 +76,15 @@ static void tx_complete(void *arg) static int gdm_lte_rx(struct sk_buff *skb, struct nic *nic, int nic_type) { - int ret; + int ret, len; - ret = netif_rx_ni(skb); + len = skb->len + ETH_HLEN; + ret = netif_rx(skb); if (ret == NET_RX_DROP) { nic->stats.rx_dropped++; } else { nic->stats.rx_packets++; - nic->stats.rx_bytes += skb->len + ETH_HLEN; + nic->stats.rx_bytes += len; } return 0; diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c index 68c09fa016ed..1d31c35875e3 100644 --- a/drivers/staging/pi433/pi433_if.c +++ b/drivers/staging/pi433/pi433_if.c @@ -1264,7 +1264,7 @@ RX_failed: return retval; } -static int pi433_remove(struct spi_device *spi) +static void pi433_remove(struct spi_device *spi) { struct pi433_device *device = spi_get_drvdata(spi); @@ -1284,8 +1284,6 @@ static int pi433_remove(struct spi_device *spi) kfree(device->rx_buffer); kfree(device); - - return 0; } static const struct of_device_id pi433_dt_ids[] = { diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c index 0f82f5031c43..49a3f45cb771 100644 --- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c +++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c @@ -5907,6 +5907,7 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf) struct sta_info *psta_bmc; struct list_head *xmitframe_plist, *xmitframe_phead, *tmp; struct xmit_frame *pxmitframe = NULL; + struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct sta_priv *pstapriv = &padapter->stapriv; /* for BC/MC Frames */ @@ -5917,7 +5918,8 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf) if ((pstapriv->tim_bitmap&BIT(0)) && (psta_bmc->sleepq_len > 0)) { msleep(10);/* 10ms, ATIM(HIQ) Windows */ - spin_lock_bh(&psta_bmc->sleep_q.lock); + /* spin_lock_bh(&psta_bmc->sleep_q.lock); */ + spin_lock_bh(&pxmitpriv->lock); xmitframe_phead = get_list_head(&psta_bmc->sleep_q); list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) { @@ -5940,7 +5942,8 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf) rtw_hal_xmitframe_enqueue(padapter, pxmitframe); } - spin_unlock_bh(&psta_bmc->sleep_q.lock); + /* spin_unlock_bh(&psta_bmc->sleep_q.lock); */ + spin_unlock_bh(&pxmitpriv->lock); /* check hi queue and bmc_sleepq */ rtw_chk_hi_queue_cmd(padapter); diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c index 41bfca549c64..105fe0e3482a 100644 --- a/drivers/staging/rtl8723bs/core/rtw_recv.c +++ b/drivers/staging/rtl8723bs/core/rtw_recv.c @@ -957,8 +957,10 @@ static signed int validate_recv_ctrl_frame(struct adapter *padapter, union recv_ if ((psta->state&WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap&BIT(psta->aid))) { struct list_head *xmitframe_plist, *xmitframe_phead; struct xmit_frame *pxmitframe = NULL; + struct xmit_priv *pxmitpriv = &padapter->xmitpriv; - spin_lock_bh(&psta->sleep_q.lock); + /* spin_lock_bh(&psta->sleep_q.lock); */ + spin_lock_bh(&pxmitpriv->lock); xmitframe_phead = get_list_head(&psta->sleep_q); xmitframe_plist = get_next(xmitframe_phead); @@ -989,10 +991,12 @@ static signed int validate_recv_ctrl_frame(struct adapter *padapter, union recv_ update_beacon(padapter, WLAN_EID_TIM, NULL, true); } - spin_unlock_bh(&psta->sleep_q.lock); + /* spin_unlock_bh(&psta->sleep_q.lock); */ + spin_unlock_bh(&pxmitpriv->lock); } else { - spin_unlock_bh(&psta->sleep_q.lock); + /* spin_unlock_bh(&psta->sleep_q.lock); */ + spin_unlock_bh(&pxmitpriv->lock); if (pstapriv->tim_bitmap&BIT(psta->aid)) { if (psta->sleepq_len == 0) { diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c index 0c9ea1520fd0..beb11d89db18 100644 --- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c +++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c @@ -293,48 +293,46 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta) /* list_del_init(&psta->wakeup_list); */ - spin_lock_bh(&psta->sleep_q.lock); + spin_lock_bh(&pxmitpriv->lock); + rtw_free_xmitframe_queue(pxmitpriv, &psta->sleep_q); psta->sleepq_len = 0; - spin_unlock_bh(&psta->sleep_q.lock); - - spin_lock_bh(&pxmitpriv->lock); /* vo */ - spin_lock_bh(&pstaxmitpriv->vo_q.sta_pending.lock); + /* spin_lock_bh(&(pxmitpriv->vo_pending.lock)); */ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending); list_del_init(&(pstaxmitpriv->vo_q.tx_pending)); phwxmit = pxmitpriv->hwxmits; phwxmit->accnt -= pstaxmitpriv->vo_q.qcnt; pstaxmitpriv->vo_q.qcnt = 0; - spin_unlock_bh(&pstaxmitpriv->vo_q.sta_pending.lock); + /* spin_unlock_bh(&(pxmitpriv->vo_pending.lock)); */ /* vi */ - spin_lock_bh(&pstaxmitpriv->vi_q.sta_pending.lock); + /* spin_lock_bh(&(pxmitpriv->vi_pending.lock)); */ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending); list_del_init(&(pstaxmitpriv->vi_q.tx_pending)); phwxmit = pxmitpriv->hwxmits+1; phwxmit->accnt -= pstaxmitpriv->vi_q.qcnt; pstaxmitpriv->vi_q.qcnt = 0; - spin_unlock_bh(&pstaxmitpriv->vi_q.sta_pending.lock); + /* spin_unlock_bh(&(pxmitpriv->vi_pending.lock)); */ /* be */ - spin_lock_bh(&pstaxmitpriv->be_q.sta_pending.lock); + /* spin_lock_bh(&(pxmitpriv->be_pending.lock)); */ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->be_q.sta_pending); list_del_init(&(pstaxmitpriv->be_q.tx_pending)); phwxmit = pxmitpriv->hwxmits+2; phwxmit->accnt -= pstaxmitpriv->be_q.qcnt; pstaxmitpriv->be_q.qcnt = 0; - spin_unlock_bh(&pstaxmitpriv->be_q.sta_pending.lock); + /* spin_unlock_bh(&(pxmitpriv->be_pending.lock)); */ /* bk */ - spin_lock_bh(&pstaxmitpriv->bk_q.sta_pending.lock); + /* spin_lock_bh(&(pxmitpriv->bk_pending.lock)); */ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending); list_del_init(&(pstaxmitpriv->bk_q.tx_pending)); phwxmit = pxmitpriv->hwxmits+3; phwxmit->accnt -= pstaxmitpriv->bk_q.qcnt; pstaxmitpriv->bk_q.qcnt = 0; - spin_unlock_bh(&pstaxmitpriv->bk_q.sta_pending.lock); + /* spin_unlock_bh(&(pxmitpriv->bk_pending.lock)); */ spin_unlock_bh(&pxmitpriv->lock); diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c index 13b8bd5ffabc..f466bfd248fb 100644 --- a/drivers/staging/rtl8723bs/core/rtw_xmit.c +++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c @@ -1734,12 +1734,15 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram struct list_head *plist, *phead, *tmp; struct xmit_frame *pxmitframe; + spin_lock_bh(&pframequeue->lock); + phead = get_list_head(pframequeue); list_for_each_safe(plist, tmp, phead) { pxmitframe = list_entry(plist, struct xmit_frame, list); rtw_free_xmitframe(pxmitpriv, pxmitframe); } + spin_unlock_bh(&pframequeue->lock); } s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe) @@ -1794,7 +1797,6 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe) struct sta_info *psta; struct tx_servq *ptxservq; struct pkt_attrib *pattrib = &pxmitframe->attrib; - struct xmit_priv *xmit_priv = &padapter->xmitpriv; struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits; signed int res = _SUCCESS; @@ -1812,14 +1814,12 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe) ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index)); - spin_lock_bh(&xmit_priv->lock); if (list_empty(&ptxservq->tx_pending)) list_add_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue)); list_add_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending)); ptxservq->qcnt++; phwxmits[ac_index].accnt++; - spin_unlock_bh(&xmit_priv->lock); exit: @@ -2202,10 +2202,11 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta) struct list_head *xmitframe_plist, *xmitframe_phead, *tmp; struct xmit_frame *pxmitframe = NULL; struct sta_priv *pstapriv = &padapter->stapriv; + struct xmit_priv *pxmitpriv = &padapter->xmitpriv; psta_bmc = rtw_get_bcmc_stainfo(padapter); - spin_lock_bh(&psta->sleep_q.lock); + spin_lock_bh(&pxmitpriv->lock); xmitframe_phead = get_list_head(&psta->sleep_q); list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) { @@ -2306,7 +2307,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta) _exit: - spin_unlock_bh(&psta->sleep_q.lock); + spin_unlock_bh(&pxmitpriv->lock); if (update_mask) update_beacon(padapter, WLAN_EID_TIM, NULL, true); @@ -2318,8 +2319,9 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst struct list_head *xmitframe_plist, *xmitframe_phead, *tmp; struct xmit_frame *pxmitframe = NULL; struct sta_priv *pstapriv = &padapter->stapriv; + struct xmit_priv *pxmitpriv = &padapter->xmitpriv; - spin_lock_bh(&psta->sleep_q.lock); + spin_lock_bh(&pxmitpriv->lock); xmitframe_phead = get_list_head(&psta->sleep_q); list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) { @@ -2372,7 +2374,7 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst } } - spin_unlock_bh(&psta->sleep_q.lock); + spin_unlock_bh(&pxmitpriv->lock); } void enqueue_pending_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf) diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c index b5d5e922231c..15810438a472 100644 --- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c +++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c @@ -502,7 +502,9 @@ s32 rtl8723bs_hal_xmit( rtw_issue_addbareq_cmd(padapter, pxmitframe); } + spin_lock_bh(&pxmitpriv->lock); err = rtw_xmitframe_enqueue(padapter, pxmitframe); + spin_unlock_bh(&pxmitpriv->lock); if (err != _SUCCESS) { rtw_free_xmitframe(pxmitpriv, pxmitframe); diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme.h b/drivers/staging/rtl8723bs/include/rtw_mlme.h index c94fa7d8d5a9..1b343b434f4d 100644 --- a/drivers/staging/rtl8723bs/include/rtw_mlme.h +++ b/drivers/staging/rtl8723bs/include/rtw_mlme.h @@ -102,13 +102,17 @@ there are several "locks" in mlme_priv, since mlme_priv is a shared resource between many threads, like ISR/Call-Back functions, the OID handlers, and even timer functions. - Each struct __queue has its own locks, already. -Other items are protected by mlme_priv.lock. +Other items in mlme_priv are protected by mlme_priv.lock, while items in +xmit_priv are protected by xmit_priv.lock. To avoid possible dead lock, any thread trying to modifiying mlme_priv SHALL not lock up more than one locks at a time! +The only exception is that queue functions which take the __queue.lock +may be called with the xmit_priv.lock held. In this case the order +MUST always be first lock xmit_priv.lock and then call any queue functions +which take __queue.lock. */ diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index 6759a6261500..3a2e4582db8e 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -1058,15 +1058,27 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header, DEBUG_TRACE(SERVICE_CALLBACK_LINE); + rcu_read_lock(); service = handle_to_service(handle); - if (WARN_ON(!service)) + if (WARN_ON(!service)) { + rcu_read_unlock(); return VCHIQ_SUCCESS; + } user_service = (struct user_service *)service->base.userdata; instance = user_service->instance; - if (!instance || instance->closing) + if (!instance || instance->closing) { + rcu_read_unlock(); return VCHIQ_SUCCESS; + } + + /* + * As hopping around different synchronization mechanism, + * taking an extra reference results in simpler implementation. + */ + vchiq_service_get(service); + rcu_read_unlock(); vchiq_log_trace(vchiq_arm_log_level, "%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx", @@ -1097,6 +1109,7 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header, bulk_userdata); if (status != VCHIQ_SUCCESS) { DEBUG_TRACE(SERVICE_CALLBACK_LINE); + vchiq_service_put(service); return status; } } @@ -1105,10 +1118,12 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header, if (wait_for_completion_interruptible(&user_service->remove_event)) { vchiq_log_info(vchiq_arm_log_level, "%s interrupted", __func__); DEBUG_TRACE(SERVICE_CALLBACK_LINE); + vchiq_service_put(service); return VCHIQ_RETRY; } else if (instance->closing) { vchiq_log_info(vchiq_arm_log_level, "%s closing", __func__); DEBUG_TRACE(SERVICE_CALLBACK_LINE); + vchiq_service_put(service); return VCHIQ_ERROR; } DEBUG_TRACE(SERVICE_CALLBACK_LINE); @@ -1137,6 +1152,7 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header, header = NULL; } DEBUG_TRACE(SERVICE_CALLBACK_LINE); + vchiq_service_put(service); if (skip_completion) return VCHIQ_SUCCESS; diff --git a/drivers/staging/wfx/bus_spi.c b/drivers/staging/wfx/bus_spi.c index 55ffcd7c42e2..fa0ff66a457d 100644 --- a/drivers/staging/wfx/bus_spi.c +++ b/drivers/staging/wfx/bus_spi.c @@ -232,12 +232,11 @@ static int wfx_spi_probe(struct spi_device *func) return wfx_probe(bus->core); } -static int wfx_spi_remove(struct spi_device *func) +static void wfx_spi_remove(struct spi_device *func) { struct wfx_spi_priv *bus = spi_get_drvdata(func); wfx_release(bus->core); - return 0; } /* For dynamic driver binding, kernel does not use OF to match driver. It only diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c index 255500448ad3..e04fc666d218 100644 --- a/drivers/staging/wlan-ng/p80211netdev.c +++ b/drivers/staging/wlan-ng/p80211netdev.c @@ -255,7 +255,7 @@ static int p80211_convert_to_ether(struct wlandevice *wlandev, if (skb_p80211_to_ether(wlandev, wlandev->ethconv, skb) == 0) { wlandev->netdev->stats.rx_packets++; wlandev->netdev->stats.rx_bytes += skb->len; - netif_rx_ni(skb); + netif_rx(skb); return 0; } @@ -290,7 +290,7 @@ static void p80211netdev_rx_bh(struct tasklet_struct *t) dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; - netif_rx_ni(skb); + netif_rx(skb); continue; } else { if (!p80211_convert_to_ether(wlandev, skb)) diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index 1ca320885fad..17a6f51d3089 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -158,6 +158,7 @@ void optee_remove_common(struct optee *optee) optee_unregister_devices(); optee_notif_uninit(optee); + teedev_close_context(optee->ctx); /* * The two devices have to be unregistered before we can free the * other resources. diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c index 20a1b1a3d965..f744ab15bf2c 100644 --- a/drivers/tee/optee/ffa_abi.c +++ b/drivers/tee/optee/ffa_abi.c @@ -424,6 +424,7 @@ static struct tee_shm_pool_mgr *optee_ffa_shm_pool_alloc_pages(void) */ static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx, + struct optee *optee, struct optee_msg_arg *arg) { struct tee_shm *shm; @@ -439,7 +440,7 @@ static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx, shm = optee_rpc_cmd_alloc_suppl(ctx, arg->params[0].u.value.b); break; case OPTEE_RPC_SHM_TYPE_KERNEL: - shm = tee_shm_alloc(ctx, arg->params[0].u.value.b, + shm = tee_shm_alloc(optee->ctx, arg->params[0].u.value.b, TEE_SHM_MAPPED | TEE_SHM_PRIV); break; default: @@ -493,14 +494,13 @@ err_bad_param: } static void handle_ffa_rpc_func_cmd(struct tee_context *ctx, + struct optee *optee, struct optee_msg_arg *arg) { - struct optee *optee = tee_get_drvdata(ctx->teedev); - arg->ret_origin = TEEC_ORIGIN_COMMS; switch (arg->cmd) { case OPTEE_RPC_CMD_SHM_ALLOC: - handle_ffa_rpc_func_cmd_shm_alloc(ctx, arg); + handle_ffa_rpc_func_cmd_shm_alloc(ctx, optee, arg); break; case OPTEE_RPC_CMD_SHM_FREE: handle_ffa_rpc_func_cmd_shm_free(ctx, optee, arg); @@ -510,12 +510,12 @@ static void handle_ffa_rpc_func_cmd(struct tee_context *ctx, } } -static void optee_handle_ffa_rpc(struct tee_context *ctx, u32 cmd, - struct optee_msg_arg *arg) +static void optee_handle_ffa_rpc(struct tee_context *ctx, struct optee *optee, + u32 cmd, struct optee_msg_arg *arg) { switch (cmd) { case OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD: - handle_ffa_rpc_func_cmd(ctx, arg); + handle_ffa_rpc_func_cmd(ctx, optee, arg); break; case OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT: /* Interrupt delivered by now */ @@ -582,7 +582,7 @@ static int optee_ffa_yielding_call(struct tee_context *ctx, * above. */ cond_resched(); - optee_handle_ffa_rpc(ctx, data->data1, rpc_arg); + optee_handle_ffa_rpc(ctx, optee, data->data1, rpc_arg); cmd = OPTEE_FFA_YIELDING_CALL_RESUME; data->data0 = cmd; data->data1 = 0; @@ -619,9 +619,18 @@ static int optee_ffa_do_call_with_arg(struct tee_context *ctx, .data2 = (u32)(shm->sec_world_id >> 32), .data3 = shm->offset, }; - struct optee_msg_arg *arg = tee_shm_get_va(shm, 0); - unsigned int rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params); - struct optee_msg_arg *rpc_arg = tee_shm_get_va(shm, rpc_arg_offs); + struct optee_msg_arg *arg; + unsigned int rpc_arg_offs; + struct optee_msg_arg *rpc_arg; + + arg = tee_shm_get_va(shm, 0); + if (IS_ERR(arg)) + return PTR_ERR(arg); + + rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params); + rpc_arg = tee_shm_get_va(shm, rpc_arg_offs); + if (IS_ERR(rpc_arg)) + return PTR_ERR(rpc_arg); return optee_ffa_yielding_call(ctx, &data, rpc_arg); } @@ -793,7 +802,9 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev) { const struct ffa_dev_ops *ffa_ops; unsigned int rpc_arg_count; + struct tee_shm_pool *pool; struct tee_device *teedev; + struct tee_context *ctx; struct optee *optee; int rc; @@ -813,12 +824,12 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev) if (!optee) return -ENOMEM; - optee->pool = optee_ffa_config_dyn_shm(); - if (IS_ERR(optee->pool)) { - rc = PTR_ERR(optee->pool); - optee->pool = NULL; - goto err; + pool = optee_ffa_config_dyn_shm(); + if (IS_ERR(pool)) { + rc = PTR_ERR(pool); + goto err_free_optee; } + optee->pool = pool; optee->ops = &optee_ffa_ops; optee->ffa.ffa_dev = ffa_dev; @@ -829,7 +840,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev) optee); if (IS_ERR(teedev)) { rc = PTR_ERR(teedev); - goto err; + goto err_free_pool; } optee->teedev = teedev; @@ -837,50 +848,59 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev) optee); if (IS_ERR(teedev)) { rc = PTR_ERR(teedev); - goto err; + goto err_unreg_teedev; } optee->supp_teedev = teedev; rc = tee_device_register(optee->teedev); if (rc) - goto err; + goto err_unreg_supp_teedev; rc = tee_device_register(optee->supp_teedev); if (rc) - goto err; + goto err_unreg_supp_teedev; rc = rhashtable_init(&optee->ffa.global_ids, &shm_rhash_params); if (rc) - goto err; + goto err_unreg_supp_teedev; mutex_init(&optee->ffa.mutex); mutex_init(&optee->call_queue.mutex); INIT_LIST_HEAD(&optee->call_queue.waiters); optee_supp_init(&optee->supp); ffa_dev_set_drvdata(ffa_dev, optee); - rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE); - if (rc) { - optee_ffa_remove(ffa_dev); - return rc; + ctx = teedev_open(optee->teedev); + if (IS_ERR(ctx)) { + rc = PTR_ERR(ctx); + goto err_rhashtable_free; } + optee->ctx = ctx; + rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE); + if (rc) + goto err_close_ctx; rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES); - if (rc) { - optee_ffa_remove(ffa_dev); - return rc; - } + if (rc) + goto err_unregister_devices; pr_info("initialized driver\n"); return 0; -err: - /* - * tee_device_unregister() is safe to call even if the - * devices hasn't been registered with - * tee_device_register() yet. - */ + +err_unregister_devices: + optee_unregister_devices(); + optee_notif_uninit(optee); +err_close_ctx: + teedev_close_context(ctx); +err_rhashtable_free: + rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL); + optee_supp_uninit(&optee->supp); + mutex_destroy(&optee->call_queue.mutex); +err_unreg_supp_teedev: tee_device_unregister(optee->supp_teedev); +err_unreg_teedev: tee_device_unregister(optee->teedev); - if (optee->pool) - tee_shm_pool_free(optee->pool); +err_free_pool: + tee_shm_pool_free(pool); +err_free_optee: kfree(optee); return rc; } diff --git a/drivers/tee/optee/notif.c b/drivers/tee/optee/notif.c index a28fa03dcd0e..05212842b0a5 100644 --- a/drivers/tee/optee/notif.c +++ b/drivers/tee/optee/notif.c @@ -121,5 +121,5 @@ int optee_notif_init(struct optee *optee, u_int max_key) void optee_notif_uninit(struct optee *optee) { - kfree(optee->notif.bitmap); + bitmap_free(optee->notif.bitmap); } diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h index 46f74ab07c7e..92bc47bef95f 100644 --- a/drivers/tee/optee/optee_private.h +++ b/drivers/tee/optee/optee_private.h @@ -53,7 +53,6 @@ struct optee_call_queue { struct optee_notif { u_int max_key; - struct tee_context *ctx; /* Serializes access to the elements below in this struct */ spinlock_t lock; struct list_head db; @@ -134,9 +133,10 @@ struct optee_ops { /** * struct optee - main service struct * @supp_teedev: supplicant device + * @teedev: client device * @ops: internal callbacks for different ways to reach secure * world - * @teedev: client device + * @ctx: driver internal TEE context * @smc: specific to SMC ABI * @ffa: specific to FF-A ABI * @call_queue: queue of threads waiting to call @invoke_fn @@ -152,6 +152,7 @@ struct optee { struct tee_device *supp_teedev; struct tee_device *teedev; const struct optee_ops *ops; + struct tee_context *ctx; union { struct optee_smc smc; struct optee_ffa ffa; diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c index 449d6a72d289..c517d310249f 100644 --- a/drivers/tee/optee/smc_abi.c +++ b/drivers/tee/optee/smc_abi.c @@ -75,16 +75,6 @@ static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr, p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa; p->u.memref.shm = shm; - /* Check that the memref is covered by the shm object */ - if (p->u.memref.size) { - size_t o = p->u.memref.shm_offs + - p->u.memref.size - 1; - - rc = tee_shm_get_pa(shm, o, NULL); - if (rc) - return rc; - } - return 0; } @@ -622,6 +612,7 @@ static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx, } static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, + struct optee *optee, struct optee_msg_arg *arg, struct optee_call_ctx *call_ctx) { @@ -651,7 +642,8 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, shm = optee_rpc_cmd_alloc_suppl(ctx, sz); break; case OPTEE_RPC_SHM_TYPE_KERNEL: - shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV); + shm = tee_shm_alloc(optee->ctx, sz, + TEE_SHM_MAPPED | TEE_SHM_PRIV); break; default: arg->ret = TEEC_ERROR_BAD_PARAMETERS; @@ -747,7 +739,7 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, switch (arg->cmd) { case OPTEE_RPC_CMD_SHM_ALLOC: free_pages_list(call_ctx); - handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx); + handle_rpc_func_cmd_shm_alloc(ctx, optee, arg, call_ctx); break; case OPTEE_RPC_CMD_SHM_FREE: handle_rpc_func_cmd_shm_free(ctx, arg); @@ -776,7 +768,7 @@ static void optee_handle_rpc(struct tee_context *ctx, switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) { case OPTEE_SMC_RPC_FUNC_ALLOC: - shm = tee_shm_alloc(ctx, param->a1, + shm = tee_shm_alloc(optee->ctx, param->a1, TEE_SHM_MAPPED | TEE_SHM_PRIV); if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) { reg_pair_from_64(¶m->a1, ¶m->a2, pa); @@ -954,57 +946,34 @@ static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id) { struct optee *optee = dev_id; - optee_smc_do_bottom_half(optee->notif.ctx); + optee_smc_do_bottom_half(optee->ctx); return IRQ_HANDLED; } static int optee_smc_notif_init_irq(struct optee *optee, u_int irq) { - struct tee_context *ctx; int rc; - ctx = teedev_open(optee->teedev); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); - - optee->notif.ctx = ctx; rc = request_threaded_irq(irq, notif_irq_handler, notif_irq_thread_fn, 0, "optee_notification", optee); if (rc) - goto err_close_ctx; + return rc; optee->smc.notif_irq = irq; return 0; - -err_close_ctx: - teedev_close_context(optee->notif.ctx); - optee->notif.ctx = NULL; - - return rc; } static void optee_smc_notif_uninit_irq(struct optee *optee) { - if (optee->notif.ctx) { - optee_smc_stop_async_notif(optee->notif.ctx); + if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) { + optee_smc_stop_async_notif(optee->ctx); if (optee->smc.notif_irq) { free_irq(optee->smc.notif_irq, optee); irq_dispose_mapping(optee->smc.notif_irq); } - - /* - * The thread normally working with optee->notif.ctx was - * stopped with free_irq() above. - * - * Note we're not using teedev_close_context() or - * tee_client_close_context() since we have already called - * tee_device_put() while initializing to avoid a circular - * reference counting. - */ - teedev_close_context(optee->notif.ctx); } } @@ -1366,6 +1335,7 @@ static int optee_probe(struct platform_device *pdev) struct optee *optee = NULL; void *memremaped_shm = NULL; struct tee_device *teedev; + struct tee_context *ctx; u32 max_notif_value; u32 sec_caps; int rc; @@ -1446,9 +1416,15 @@ static int optee_probe(struct platform_device *pdev) optee->pool = pool; platform_set_drvdata(pdev, optee); + ctx = teedev_open(optee->teedev); + if (IS_ERR(ctx)) { + rc = PTR_ERR(ctx); + goto err_supp_uninit; + } + optee->ctx = ctx; rc = optee_notif_init(optee, max_notif_value); if (rc) - goto err_supp_uninit; + goto err_close_ctx; if (sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) { unsigned int irq; @@ -1496,6 +1472,8 @@ err_disable_shm_cache: optee_unregister_devices(); err_notif_uninit: optee_notif_uninit(optee); +err_close_ctx: + teedev_close_context(ctx); err_supp_uninit: optee_supp_uninit(&optee->supp); mutex_destroy(&optee->call_queue.mutex); diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index 72acb1f61849..4f478812cb51 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -404,6 +404,10 @@ static void int3400_notify(acpi_handle handle, thermal_prop[3] = kasprintf(GFP_KERNEL, "EVENT=%d", therm_event); thermal_prop[4] = NULL; kobject_uevent_env(&priv->thermal->device.kobj, KOBJ_CHANGE, thermal_prop); + kfree(thermal_prop[0]); + kfree(thermal_prop[1]); + kfree(thermal_prop[2]); + kfree(thermal_prop[3]); } static int int3400_thermal_get_temp(struct thermal_zone_device *thermal, diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c index a16dd4d5d710..73e68cce292e 100644 --- a/drivers/thermal/thermal_netlink.c +++ b/drivers/thermal/thermal_netlink.c @@ -419,11 +419,12 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p) for (i = 0; i < tz->trips; i++) { enum thermal_trip_type type; - int temp, hyst; + int temp, hyst = 0; tz->ops->get_trip_type(tz, i, &type); tz->ops->get_trip_temp(tz, i, &temp); - tz->ops->get_trip_hyst(tz, i, &hyst); + if (tz->ops->get_trip_hyst) + tz->ops->get_trip_hyst(tz, i, &hyst); if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, i) || nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, type) || diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 0b1808e3a912..fa92f727fdf8 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -439,7 +439,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci) modembits |= MDM_RTR; if (dlci->modem_tx & TIOCM_RI) modembits |= MDM_IC; - if (dlci->modem_tx & TIOCM_CD) + if (dlci->modem_tx & TIOCM_CD || dlci->gsm->initiator) modembits |= MDM_DV; return modembits; } @@ -448,7 +448,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci) * gsm_print_packet - display a frame for debug * @hdr: header to print before decode * @addr: address EA from the frame - * @cr: C/R bit from the frame + * @cr: C/R bit seen as initiator * @control: control including PF bit * @data: following data bytes * @dlen: length of data @@ -548,7 +548,7 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len) * gsm_send - send a control frame * @gsm: our GSM mux * @addr: address for control frame - * @cr: command/response bit + * @cr: command/response bit seen as initiator * @control: control byte including PF bit * * Format up and transmit a control frame. These do not go via the @@ -563,11 +563,15 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control) int len; u8 cbuf[10]; u8 ibuf[3]; + int ocr; + + /* toggle C/R coding if not initiator */ + ocr = cr ^ (gsm->initiator ? 0 : 1); switch (gsm->encoding) { case 0: cbuf[0] = GSM0_SOF; - cbuf[1] = (addr << 2) | (cr << 1) | EA; + cbuf[1] = (addr << 2) | (ocr << 1) | EA; cbuf[2] = control; cbuf[3] = EA; /* Length of data = 0 */ cbuf[4] = 0xFF - gsm_fcs_add_block(INIT_FCS, cbuf + 1, 3); @@ -577,7 +581,7 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control) case 1: case 2: /* Control frame + packing (but not frame stuffing) in mode 1 */ - ibuf[0] = (addr << 2) | (cr << 1) | EA; + ibuf[0] = (addr << 2) | (ocr << 1) | EA; ibuf[1] = control; ibuf[2] = 0xFF - gsm_fcs_add_block(INIT_FCS, ibuf, 2); /* Stuffing may double the size worst case */ @@ -611,7 +615,7 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control) static inline void gsm_response(struct gsm_mux *gsm, int addr, int control) { - gsm_send(gsm, addr, 1, control); + gsm_send(gsm, addr, 0, control); } /** @@ -1017,25 +1021,25 @@ static void gsm_control_reply(struct gsm_mux *gsm, int cmd, const u8 *data, * @tty: virtual tty bound to the DLCI * @dlci: DLCI to affect * @modem: modem bits (full EA) - * @clen: command length + * @slen: number of signal octets * * Used when a modem control message or line state inline in adaption * layer 2 is processed. Sort out the local modem state and throttles */ static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci, - u32 modem, int clen) + u32 modem, int slen) { int mlines = 0; u8 brk = 0; int fc; - /* The modem status command can either contain one octet (v.24 signals) - or two octets (v.24 signals + break signals). The length field will - either be 2 or 3 respectively. This is specified in section - 5.4.6.3.7 of the 27.010 mux spec. */ + /* The modem status command can either contain one octet (V.24 signals) + * or two octets (V.24 signals + break signals). This is specified in + * section 5.4.6.3.7 of the 07.10 mux spec. + */ - if (clen == 2) + if (slen == 1) modem = modem & 0x7f; else { brk = modem & 0x7f; @@ -1092,6 +1096,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen) unsigned int brk = 0; struct gsm_dlci *dlci; int len = clen; + int slen; const u8 *dp = data; struct tty_struct *tty; @@ -1111,6 +1116,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen) return; dlci = gsm->dlci[addr]; + slen = len; while (gsm_read_ea(&modem, *dp++) == 0) { len--; if (len == 0) @@ -1127,7 +1133,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen) modem |= (brk & 0x7f); } tty = tty_port_tty_get(&dlci->port); - gsm_process_modem(tty, dlci, modem, clen); + gsm_process_modem(tty, dlci, modem, slen); if (tty) { tty_wakeup(tty); tty_kref_put(tty); @@ -1451,6 +1457,9 @@ static void gsm_dlci_close(struct gsm_dlci *dlci) if (dlci->addr != 0) { tty_port_tty_hangup(&dlci->port, false); kfifo_reset(&dlci->fifo); + /* Ensure that gsmtty_open() can return. */ + tty_port_set_initialized(&dlci->port, 0); + wake_up_interruptible(&dlci->port.open_wait); } else dlci->gsm->dead = true; /* Unregister gsmtty driver,report gsmtty dev remove uevent for user */ @@ -1514,7 +1523,7 @@ static void gsm_dlci_t1(struct timer_list *t) dlci->mode = DLCI_MODE_ADM; gsm_dlci_open(dlci); } else { - gsm_dlci_close(dlci); + gsm_dlci_begin_close(dlci); /* prevent half open link */ } break; @@ -1593,6 +1602,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen) struct tty_struct *tty; unsigned int modem = 0; int len = clen; + int slen = 0; if (debug & 16) pr_debug("%d bytes for tty\n", len); @@ -1605,12 +1615,14 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen) case 2: /* Asynchronous serial with line state in each frame */ while (gsm_read_ea(&modem, *data++) == 0) { len--; + slen++; if (len == 0) return; } + slen++; tty = tty_port_tty_get(port); if (tty) { - gsm_process_modem(tty, dlci, modem, clen); + gsm_process_modem(tty, dlci, modem, slen); tty_kref_put(tty); } fallthrough; @@ -1748,7 +1760,12 @@ static void gsm_dlci_release(struct gsm_dlci *dlci) gsm_destroy_network(dlci); mutex_unlock(&dlci->mutex); - tty_hangup(tty); + /* We cannot use tty_hangup() because in tty_kref_put() the tty + * driver assumes that the hangup queue is free and reuses it to + * queue release_one_tty() -> NULL pointer panic in + * process_one_work(). + */ + tty_vhangup(tty); tty_port_tty_set(&dlci->port, NULL); tty_kref_put(tty); @@ -1800,10 +1817,10 @@ static void gsm_queue(struct gsm_mux *gsm) goto invalid; cr = gsm->address & 1; /* C/R bit */ + cr ^= gsm->initiator ? 0 : 1; /* Flip so 1 always means command */ gsm_print_packet("<--", address, cr, gsm->control, gsm->buf, gsm->len); - cr ^= 1 - gsm->initiator; /* Flip so 1 always means command */ dlci = gsm->dlci[address]; switch (gsm->control) { @@ -3234,9 +3251,9 @@ static void gsmtty_throttle(struct tty_struct *tty) if (dlci->state == DLCI_CLOSED) return; if (C_CRTSCTS(tty)) - dlci->modem_tx &= ~TIOCM_DTR; + dlci->modem_tx &= ~TIOCM_RTS; dlci->throttled = true; - /* Send an MSC with DTR cleared */ + /* Send an MSC with RTS cleared */ gsmtty_modem_update(dlci, 0); } @@ -3246,9 +3263,9 @@ static void gsmtty_unthrottle(struct tty_struct *tty) if (dlci->state == DLCI_CLOSED) return; if (C_CRTSCTS(tty)) - dlci->modem_tx |= TIOCM_DTR; + dlci->modem_tx |= TIOCM_RTS; dlci->throttled = false; - /* Send an MSC with DTR set */ + /* Send an MSC with RTS set */ gsmtty_modem_update(dlci, 0); } diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 8933ef1f83c0..efc72104c840 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1329,7 +1329,7 @@ handle_newline: put_tty_queue(c, ldata); smp_store_release(&ldata->canon_head, ldata->read_head); kill_fasync(&tty->fasync, SIGIO, POLL_IN); - wake_up_interruptible_poll(&tty->read_wait, EPOLLIN); + wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM); return; } } @@ -1561,7 +1561,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp, if (read_cnt(ldata)) { kill_fasync(&tty->fasync, SIGIO, POLL_IN); - wake_up_interruptible_poll(&tty->read_wait, EPOLLIN); + wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM); } } @@ -1926,7 +1926,7 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty, return false; canon_head = smp_load_acquire(&ldata->canon_head); - n = min(*nr + 1, canon_head - ldata->read_tail); + n = min(*nr, canon_head - ldata->read_tail); tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1); size = min_t(size_t, tail + n, N_TTY_BUF_SIZE); @@ -1948,10 +1948,8 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty, n += N_TTY_BUF_SIZE; c = n + found; - if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) { - c = min(*nr, c); + if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) n = c; - } n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu tail:%zu more:%zu\n", __func__, eol, found, n, c, tail, more); diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c index 673cda3d011d..948d0a1c6ae8 100644 --- a/drivers/tty/serial/8250/8250_gsc.c +++ b/drivers/tty/serial/8250/8250_gsc.c @@ -26,7 +26,7 @@ static int __init serial_init_chip(struct parisc_device *dev) unsigned long address; int err; -#ifdef CONFIG_64BIT +#if defined(CONFIG_64BIT) && defined(CONFIG_IOSAPIC) if (!dev->irq && (dev->id.sversion == 0xad)) dev->irq = iosapic_serial_irq(dev); #endif diff --git a/drivers/tty/serial/8250/8250_pericom.c b/drivers/tty/serial/8250/8250_pericom.c index 025b055363c3..95ff10f25d58 100644 --- a/drivers/tty/serial/8250/8250_pericom.c +++ b/drivers/tty/serial/8250/8250_pericom.c @@ -117,7 +117,7 @@ static int pericom8250_probe(struct pci_dev *pdev, const struct pci_device_id *i uart.port.private_data = pericom; uart.port.iotype = UPIO_PORT; uart.port.uartclk = 921600 * 16; - uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ | UPF_MAGIC_MULTIPLIER; + uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ; uart.port.set_divisor = pericom_do_set_divisor; for (i = 0; i < nr && i < maxnr; i++) { unsigned int offset = (i == 3 && nr == 4) ? 0x38 : i * 0x8; diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c index 3c92d4e01488..516cff362434 100644 --- a/drivers/tty/serial/max3100.c +++ b/drivers/tty/serial/max3100.c @@ -805,7 +805,7 @@ static int max3100_probe(struct spi_device *spi) return 0; } -static int max3100_remove(struct spi_device *spi) +static void max3100_remove(struct spi_device *spi) { struct max3100_port *s = spi_get_drvdata(spi); int i; @@ -828,13 +828,12 @@ static int max3100_remove(struct spi_device *spi) for (i = 0; i < MAX_MAX3100; i++) if (max3100s[i]) { mutex_unlock(&max3100s_lock); - return 0; + return; } pr_debug("removing max3100 driver\n"); uart_unregister_driver(&max3100_uart_driver); mutex_unlock(&max3100s_lock); - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c index dde0824b2fa5..3112b4a05448 100644 --- a/drivers/tty/serial/max310x.c +++ b/drivers/tty/serial/max310x.c @@ -1487,10 +1487,9 @@ static int max310x_spi_probe(struct spi_device *spi) return max310x_probe(&spi->dev, devtype, regmap, spi->irq); } -static int max310x_spi_remove(struct spi_device *spi) +static void max310x_spi_remove(struct spi_device *spi) { max310x_remove(&spi->dev); - return 0; } static const struct spi_device_id max310x_id_table[] = { diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 64e7e6c8145f..3a6c68e19c80 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -734,12 +734,15 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id) static void sc16is7xx_tx_proc(struct kthread_work *ws) { struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port); + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); if ((port->rs485.flags & SER_RS485_ENABLED) && (port->rs485.delay_rts_before_send > 0)) msleep(port->rs485.delay_rts_before_send); + mutex_lock(&s->efr_lock); sc16is7xx_handle_tx(port); + mutex_unlock(&s->efr_lock); } static void sc16is7xx_reconf_rs485(struct uart_port *port) @@ -1440,11 +1443,9 @@ static int sc16is7xx_spi_probe(struct spi_device *spi) return sc16is7xx_probe(&spi->dev, devtype, regmap, spi->irq); } -static int sc16is7xx_spi_remove(struct spi_device *spi) +static void sc16is7xx_spi_remove(struct spi_device *spi) { sc16is7xx_remove(&spi->dev); - - return 0; } static const struct spi_device_id sc16is7xx_spi_id_table[] = { diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index 3639bb6dc372..58013698635f 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -599,8 +599,8 @@ static int vt_setactivate(struct vt_setactivate __user *sa) if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES) return -ENXIO; - vsa.console = array_index_nospec(vsa.console, MAX_NR_CONSOLES + 1); vsa.console--; + vsa.console = array_index_nospec(vsa.console, MAX_NR_CONSOLES); console_lock(); ret = vc_allocate(vsa.console); if (ret) { @@ -845,6 +845,7 @@ int vt_ioctl(struct tty_struct *tty, return -ENXIO; arg--; + arg = array_index_nospec(arg, MAX_NR_CONSOLES); console_lock(); ret = vc_allocate(arg); console_unlock(); diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c index 8f8405b0d608..5509d3847af4 100644 --- a/drivers/usb/common/ulpi.c +++ b/drivers/usb/common/ulpi.c @@ -130,6 +130,7 @@ static const struct attribute_group *ulpi_dev_attr_groups[] = { static void ulpi_dev_release(struct device *dev) { + of_node_put(dev->of_node); kfree(to_ulpi_dev(dev)); } @@ -247,12 +248,16 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi) return ret; ret = ulpi_read_id(ulpi); - if (ret) + if (ret) { + of_node_put(ulpi->dev.of_node); return ret; + } ret = device_register(&ulpi->dev); - if (ret) + if (ret) { + put_device(&ulpi->dev); return ret; + } dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n", ulpi->id.vendor, ulpi->id.product); @@ -299,7 +304,6 @@ EXPORT_SYMBOL_GPL(ulpi_register_interface); */ void ulpi_unregister_interface(struct ulpi *ulpi) { - of_node_put(ulpi->dev.of_node); device_unregister(&ulpi->dev); } EXPORT_SYMBOL_GPL(ulpi_unregister_interface); diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c index c2bbf97a79be..d5bc36ca5b1f 100644 --- a/drivers/usb/core/port.c +++ b/drivers/usb/core/port.c @@ -602,11 +602,14 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1) return retval; } - find_and_link_peer(hub, port1); - retval = component_add(&port_dev->dev, &connector_ops); - if (retval) + if (retval) { dev_warn(&port_dev->dev, "failed to add component\n"); + device_unregister(&port_dev->dev); + return retval; + } + + find_and_link_peer(hub, port1); /* * Enable runtime pm and hold a refernce that hub_configure() diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index 8a63da3ab39d..88c337bf564f 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -1418,6 +1418,7 @@ void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg); void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2); int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode); #define dwc2_is_device_connected(hsotg) (hsotg->connected) +#define dwc2_is_device_enabled(hsotg) (hsotg->enabled) int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg); int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup); int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg); @@ -1454,6 +1455,7 @@ static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode) { return 0; } #define dwc2_is_device_connected(hsotg) (0) +#define dwc2_is_device_enabled(hsotg) (0) static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) { return 0; } static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c index 1b39c4776369..d8d6493bc457 100644 --- a/drivers/usb/dwc2/drd.c +++ b/drivers/usb/dwc2/drd.c @@ -130,8 +130,10 @@ static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role) already = dwc2_ovr_avalid(hsotg, true); } else if (role == USB_ROLE_DEVICE) { already = dwc2_ovr_bvalid(hsotg, true); - /* This clear DCTL.SFTDISCON bit */ - dwc2_hsotg_core_connect(hsotg); + if (dwc2_is_device_enabled(hsotg)) { + /* This clear DCTL.SFTDISCON bit */ + dwc2_hsotg_core_connect(hsotg); + } } else { if (dwc2_is_device_mode(hsotg)) { if (!dwc2_ovr_bvalid(hsotg, false)) diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 7ff8fc8f79a9..06d0e88ec8af 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -43,6 +43,7 @@ #define PCI_DEVICE_ID_INTEL_ADLP 0x51ee #define PCI_DEVICE_ID_INTEL_ADLM 0x54ee #define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1 +#define PCI_DEVICE_ID_INTEL_RPLS 0x7a61 #define PCI_DEVICE_ID_INTEL_TGL 0x9a15 #define PCI_DEVICE_ID_AMD_MR 0x163a @@ -85,8 +86,8 @@ static const struct acpi_gpio_mapping acpi_dwc3_byt_gpios[] = { static struct gpiod_lookup_table platform_bytcr_gpios = { .dev_id = "0000:00:16.0", .table = { - GPIO_LOOKUP("INT33FC:00", 54, "reset", GPIO_ACTIVE_HIGH), - GPIO_LOOKUP("INT33FC:02", 14, "cs", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("INT33FC:00", 54, "cs", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("INT33FC:02", 14, "reset", GPIO_ACTIVE_HIGH), {} }, }; @@ -119,6 +120,13 @@ static const struct property_entry dwc3_pci_intel_properties[] = { {} }; +static const struct property_entry dwc3_pci_intel_byt_properties[] = { + PROPERTY_ENTRY_STRING("dr_mode", "peripheral"), + PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"), + PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"), + {} +}; + static const struct property_entry dwc3_pci_mrfld_properties[] = { PROPERTY_ENTRY_STRING("dr_mode", "otg"), PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"), @@ -161,6 +169,10 @@ static const struct software_node dwc3_pci_intel_swnode = { .properties = dwc3_pci_intel_properties, }; +static const struct software_node dwc3_pci_intel_byt_swnode = { + .properties = dwc3_pci_intel_byt_properties, +}; + static const struct software_node dwc3_pci_intel_mrfld_swnode = { .properties = dwc3_pci_mrfld_properties, }; @@ -344,7 +356,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = { (kernel_ulong_t) &dwc3_pci_intel_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BYT), - (kernel_ulong_t) &dwc3_pci_intel_swnode, }, + (kernel_ulong_t) &dwc3_pci_intel_byt_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD), (kernel_ulong_t) &dwc3_pci_intel_mrfld_swnode, }, @@ -409,6 +421,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS), (kernel_ulong_t) &dwc3_pci_intel_swnode, }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPLS), + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL), (kernel_ulong_t) &dwc3_pci_intel_swnode, }, diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c index e14ac15e24c3..a6f3a9b38789 100644 --- a/drivers/usb/dwc3/dwc3-xilinx.c +++ b/drivers/usb/dwc3/dwc3-xilinx.c @@ -99,7 +99,7 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data) struct device *dev = priv_data->dev; struct reset_control *crst, *hibrst, *apbrst; struct phy *usb3_phy; - int ret; + int ret = 0; u32 reg; usb3_phy = devm_phy_optional_get(dev, "usb3-phy"); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 520031ba38aa..a0c883f19a41 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1291,6 +1291,19 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id); + /* + * As per data book 4.2.3.2TRB Control Bit Rules section + * + * The controller autonomously checks the HWO field of a TRB to determine if the + * entire TRB is valid. Therefore, software must ensure that the rest of the TRB + * is valid before setting the HWO field to '1'. In most systems, this means that + * software must update the fourth DWORD of a TRB last. + * + * However there is a possibility of CPU re-ordering here which can cause + * controller to observe the HWO bit set prematurely. + * Add a write memory barrier to prevent CPU re-ordering. + */ + wmb(); trb->ctrl |= DWC3_TRB_CTRL_HWO; dwc3_ep_inc_enq(dep); @@ -4147,9 +4160,11 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt) unsigned long flags; irqreturn_t ret = IRQ_NONE; + local_bh_disable(); spin_lock_irqsave(&dwc->lock, flags); ret = dwc3_process_event_buf(evt); spin_unlock_irqrestore(&dwc->lock, flags); + local_bh_enable(); return ret; } diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 16f9e3423c9f..9315313108c9 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1988,6 +1988,9 @@ unknown: if (w_index != 0x5 || (w_value >> 8)) break; interface = w_value & 0xFF; + if (interface >= MAX_CONFIG_INTERFACES || + !os_desc_cfg->interface[interface]) + break; buf[6] = w_index; count = count_ext_prop(os_desc_cfg, interface); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 25ad1e97a458..1922fd02043c 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1711,16 +1711,24 @@ static void ffs_data_put(struct ffs_data *ffs) static void ffs_data_closed(struct ffs_data *ffs) { + struct ffs_epfile *epfiles; + unsigned long flags; + ENTER(); if (atomic_dec_and_test(&ffs->opened)) { if (ffs->no_disconnect) { ffs->state = FFS_DEACTIVATED; - if (ffs->epfiles) { - ffs_epfiles_destroy(ffs->epfiles, - ffs->eps_count); - ffs->epfiles = NULL; - } + spin_lock_irqsave(&ffs->eps_lock, flags); + epfiles = ffs->epfiles; + ffs->epfiles = NULL; + spin_unlock_irqrestore(&ffs->eps_lock, + flags); + + if (epfiles) + ffs_epfiles_destroy(epfiles, + ffs->eps_count); + if (ffs->setup_state == FFS_SETUP_PENDING) __ffs_ep0_stall(ffs); } else { @@ -1767,14 +1775,27 @@ static struct ffs_data *ffs_data_new(const char *dev_name) static void ffs_data_clear(struct ffs_data *ffs) { + struct ffs_epfile *epfiles; + unsigned long flags; + ENTER(); ffs_closed(ffs); BUG_ON(ffs->gadget); - if (ffs->epfiles) { - ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count); + spin_lock_irqsave(&ffs->eps_lock, flags); + epfiles = ffs->epfiles; + ffs->epfiles = NULL; + spin_unlock_irqrestore(&ffs->eps_lock, flags); + + /* + * potential race possible between ffs_func_eps_disable + * & ffs_epfile_release therefore maintaining a local + * copy of epfile will save us from use-after-free. + */ + if (epfiles) { + ffs_epfiles_destroy(epfiles, ffs->eps_count); ffs->epfiles = NULL; } @@ -1922,12 +1943,15 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count) static void ffs_func_eps_disable(struct ffs_function *func) { - struct ffs_ep *ep = func->eps; - struct ffs_epfile *epfile = func->ffs->epfiles; - unsigned count = func->ffs->eps_count; + struct ffs_ep *ep; + struct ffs_epfile *epfile; + unsigned short count; unsigned long flags; spin_lock_irqsave(&func->ffs->eps_lock, flags); + count = func->ffs->eps_count; + epfile = func->ffs->epfiles; + ep = func->eps; while (count--) { /* pending requests get nuked */ if (ep->ep) @@ -1945,14 +1969,18 @@ static void ffs_func_eps_disable(struct ffs_function *func) static int ffs_func_eps_enable(struct ffs_function *func) { - struct ffs_data *ffs = func->ffs; - struct ffs_ep *ep = func->eps; - struct ffs_epfile *epfile = ffs->epfiles; - unsigned count = ffs->eps_count; + struct ffs_data *ffs; + struct ffs_ep *ep; + struct ffs_epfile *epfile; + unsigned short count; unsigned long flags; int ret = 0; spin_lock_irqsave(&func->ffs->eps_lock, flags); + ffs = func->ffs; + ep = func->eps; + epfile = ffs->epfiles; + count = ffs->eps_count; while(count--) { ep->ep->driver_data = ep; diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index 36fa6ef0581b..097a709549d6 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -203,7 +203,7 @@ static struct uac2_input_terminal_descriptor io_in_it_desc = { .bDescriptorSubtype = UAC_INPUT_TERMINAL, /* .bTerminalID = DYNAMIC */ - .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_UNDEFINED), + .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_MICROPHONE), .bAssocTerminal = 0, /* .bCSourceID = DYNAMIC */ .iChannelNames = 0, @@ -231,7 +231,7 @@ static struct uac2_output_terminal_descriptor io_out_ot_desc = { .bDescriptorSubtype = UAC_OUTPUT_TERMINAL, /* .bTerminalID = DYNAMIC */ - .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_UNDEFINED), + .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_SPEAKER), .bAssocTerminal = 0, /* .bSourceID = DYNAMIC */ /* .bCSourceID = DYNAMIC */ diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c index 431d5a7d737e..00b3f6b3bb31 100644 --- a/drivers/usb/gadget/function/rndis.c +++ b/drivers/usb/gadget/function/rndis.c @@ -637,14 +637,17 @@ static int rndis_set_response(struct rndis_params *params, rndis_set_cmplt_type *resp; rndis_resp_t *r; + BufLength = le32_to_cpu(buf->InformationBufferLength); + BufOffset = le32_to_cpu(buf->InformationBufferOffset); + if ((BufLength > RNDIS_MAX_TOTAL_SIZE) || + (BufOffset + 8 >= RNDIS_MAX_TOTAL_SIZE)) + return -EINVAL; + r = rndis_add_response(params, sizeof(rndis_set_cmplt_type)); if (!r) return -ENOMEM; resp = (rndis_set_cmplt_type *)r->buf; - BufLength = le32_to_cpu(buf->InformationBufferLength); - BufOffset = le32_to_cpu(buf->InformationBufferOffset); - #ifdef VERBOSE_DEBUG pr_debug("%s: Length: %d\n", __func__, BufLength); pr_debug("%s: Offset: %d\n", __func__, BufOffset); @@ -919,6 +922,7 @@ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v) params->resp_avail = resp_avail; params->v = v; INIT_LIST_HEAD(¶ms->resp_queue); + spin_lock_init(¶ms->resp_lock); pr_debug("%s: configNr = %d\n", __func__, i); return params; @@ -1012,12 +1016,14 @@ void rndis_free_response(struct rndis_params *params, u8 *buf) { rndis_resp_t *r, *n; + spin_lock(¶ms->resp_lock); list_for_each_entry_safe(r, n, ¶ms->resp_queue, list) { if (r->buf == buf) { list_del(&r->list); kfree(r); } } + spin_unlock(¶ms->resp_lock); } EXPORT_SYMBOL_GPL(rndis_free_response); @@ -1027,14 +1033,17 @@ u8 *rndis_get_next_response(struct rndis_params *params, u32 *length) if (!length) return NULL; + spin_lock(¶ms->resp_lock); list_for_each_entry_safe(r, n, ¶ms->resp_queue, list) { if (!r->send) { r->send = 1; *length = r->length; + spin_unlock(¶ms->resp_lock); return r->buf; } } + spin_unlock(¶ms->resp_lock); return NULL; } EXPORT_SYMBOL_GPL(rndis_get_next_response); @@ -1051,7 +1060,9 @@ static rndis_resp_t *rndis_add_response(struct rndis_params *params, u32 length) r->length = length; r->send = 0; + spin_lock(¶ms->resp_lock); list_add_tail(&r->list, ¶ms->resp_queue); + spin_unlock(¶ms->resp_lock); return r; } diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h index f6167f7fea82..6206b8b7490f 100644 --- a/drivers/usb/gadget/function/rndis.h +++ b/drivers/usb/gadget/function/rndis.h @@ -174,6 +174,7 @@ typedef struct rndis_params { void (*resp_avail)(void *v); void *v; struct list_head resp_queue; + spinlock_t resp_lock; } rndis_params; /* RNDIS Message parser and other useless functions */ diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c index c5a2c734234a..d86c3a36441e 100644 --- a/drivers/usb/gadget/legacy/raw_gadget.c +++ b/drivers/usb/gadget/legacy/raw_gadget.c @@ -1004,7 +1004,7 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io, ret = -EBUSY; goto out_unlock; } - if ((in && !ep->ep->caps.dir_in) || (!in && ep->ep->caps.dir_in)) { + if (in != usb_endpoint_dir_in(ep->ep->desc)) { dev_dbg(&dev->gadget->dev, "fail, wrong direction\n"); ret = -EINVAL; goto out_unlock; diff --git a/drivers/usb/gadget/udc/max3420_udc.c b/drivers/usb/gadget/udc/max3420_udc.c index d2a2b20cc1ad..7d9bd16190c0 100644 --- a/drivers/usb/gadget/udc/max3420_udc.c +++ b/drivers/usb/gadget/udc/max3420_udc.c @@ -1292,7 +1292,7 @@ del_gadget: return err; } -static int max3420_remove(struct spi_device *spi) +static void max3420_remove(struct spi_device *spi) { struct max3420_udc *udc = spi_get_drvdata(spi); unsigned long flags; @@ -1304,8 +1304,6 @@ static int max3420_remove(struct spi_device *spi) kthread_stop(udc->thread_task); spin_unlock_irqrestore(&udc->lock, flags); - - return 0; } static const struct of_device_id max3420_udc_of_match[] = { diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 57d417a7c3e0..601829a6b4ba 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -2378,6 +2378,8 @@ static void handle_ext_role_switch_states(struct device *dev, switch (role) { case USB_ROLE_NONE: usb3->connection_state = USB_ROLE_NONE; + if (cur_role == USB_ROLE_HOST) + device_release_driver(host); if (usb3->driver) usb3_disconnect(usb3); usb3_vbus_out(usb3, false); diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c index 6ce886fb7bfe..2907fad04e2c 100644 --- a/drivers/usb/gadget/udc/udc-xilinx.c +++ b/drivers/usb/gadget/udc/udc-xilinx.c @@ -1615,6 +1615,8 @@ static void xudc_getstatus(struct xusb_udc *udc) break; case USB_RECIP_ENDPOINT: epnum = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK; + if (epnum >= XUSB_MAX_ENDPOINTS) + goto stall; target_ep = &udc->ep[epnum]; epcfgreg = udc->read_fn(udc->addr + target_ep->offset); halt = epcfgreg & XUSB_EP_CFG_STALL_MASK; @@ -1682,6 +1684,10 @@ static void xudc_set_clear_feature(struct xusb_udc *udc) case USB_RECIP_ENDPOINT: if (!udc->setup.wValue) { endpoint = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK; + if (endpoint >= XUSB_MAX_ENDPOINTS) { + xudc_ep0_stall(udc); + return; + } target_ep = &udc->ep[endpoint]; outinbit = udc->setup.wIndex & USB_ENDPOINT_DIR_MASK; outinbit = outinbit >> 7; diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c index 30de85a707fe..99a5523a79fb 100644 --- a/drivers/usb/host/max3421-hcd.c +++ b/drivers/usb/host/max3421-hcd.c @@ -1926,7 +1926,7 @@ error: return retval; } -static int +static void max3421_remove(struct spi_device *spi) { struct max3421_hcd *max3421_hcd; @@ -1947,7 +1947,6 @@ max3421_remove(struct spi_device *spi) free_irq(spi->irq, hcd); usb_put_hcd(hcd); - return 0; } static const struct of_device_id max3421_of_match_table[] = { diff --git a/drivers/usb/host/xen-hcd.c b/drivers/usb/host/xen-hcd.c index be09fd9bac58..19b8c7ed74cb 100644 --- a/drivers/usb/host/xen-hcd.c +++ b/drivers/usb/host/xen-hcd.c @@ -716,8 +716,9 @@ static int xenhcd_map_urb_for_request(struct xenhcd_info *info, struct urb *urb, return 0; } -static void xenhcd_gnttab_done(struct usb_shadow *shadow) +static void xenhcd_gnttab_done(struct xenhcd_info *info, unsigned int id) { + struct usb_shadow *shadow = info->shadow + id; int nr_segs = 0; int i; @@ -726,8 +727,10 @@ static void xenhcd_gnttab_done(struct usb_shadow *shadow) if (xenusb_pipeisoc(shadow->req.pipe)) nr_segs += shadow->req.u.isoc.nr_frame_desc_segs; - for (i = 0; i < nr_segs; i++) - gnttab_end_foreign_access(shadow->req.seg[i].gref, 0, 0UL); + for (i = 0; i < nr_segs; i++) { + if (!gnttab_try_end_foreign_access(shadow->req.seg[i].gref)) + xenhcd_set_error(info, "backend didn't release grant"); + } shadow->req.nr_buffer_segs = 0; shadow->req.u.isoc.nr_frame_desc_segs = 0; @@ -841,7 +844,9 @@ static void xenhcd_cancel_all_enqueued_urbs(struct xenhcd_info *info) list_for_each_entry_safe(urbp, tmp, &info->in_progress_list, list) { req_id = urbp->req_id; if (!urbp->unlinked) { - xenhcd_gnttab_done(&info->shadow[req_id]); + xenhcd_gnttab_done(info, req_id); + if (info->error) + return; if (urbp->urb->status == -EINPROGRESS) /* not dequeued */ xenhcd_giveback_urb(info, urbp->urb, @@ -942,8 +947,7 @@ static int xenhcd_urb_request_done(struct xenhcd_info *info) rp = info->urb_ring.sring->rsp_prod; if (RING_RESPONSE_PROD_OVERFLOW(&info->urb_ring, rp)) { xenhcd_set_error(info, "Illegal index on urb-ring"); - spin_unlock_irqrestore(&info->lock, flags); - return 0; + goto err; } rmb(); /* ensure we see queued responses up to "rp" */ @@ -952,11 +956,13 @@ static int xenhcd_urb_request_done(struct xenhcd_info *info) id = res.id; if (id >= XENUSB_URB_RING_SIZE) { xenhcd_set_error(info, "Illegal data on urb-ring"); - continue; + goto err; } if (likely(xenusb_pipesubmit(info->shadow[id].req.pipe))) { - xenhcd_gnttab_done(&info->shadow[id]); + xenhcd_gnttab_done(info, id); + if (info->error) + goto err; urb = info->shadow[id].urb; if (likely(urb)) { urb->actual_length = res.actual_length; @@ -978,6 +984,10 @@ static int xenhcd_urb_request_done(struct xenhcd_info *info) spin_unlock_irqrestore(&info->lock, flags); return more_to_do; + + err: + spin_unlock_irqrestore(&info->lock, flags); + return 0; } static int xenhcd_conn_notify(struct xenhcd_info *info) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index dc357cabb265..2d378543bc3a 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1091,6 +1091,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) int retval = 0; bool comp_timer_running = false; bool pending_portevent = false; + bool reinit_xhc = false; if (!hcd->state) return 0; @@ -1107,10 +1108,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); spin_lock_irq(&xhci->lock); - if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend) - hibernated = true; - if (!hibernated) { + if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend) + reinit_xhc = true; + + if (!reinit_xhc) { /* * Some controllers might lose power during suspend, so wait * for controller not ready bit to clear, just as in xHC init. @@ -1143,12 +1145,17 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) spin_unlock_irq(&xhci->lock); return -ETIMEDOUT; } - temp = readl(&xhci->op_regs->status); } - /* If restore operation fails, re-initialize the HC during resume */ - if ((temp & STS_SRE) || hibernated) { + temp = readl(&xhci->op_regs->status); + /* re-initialize the HC on Restore Error, or Host Controller Error */ + if (temp & (STS_SRE | STS_HCE)) { + reinit_xhc = true; + xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp); + } + + if (reinit_xhc) { if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !(xhci_all_ports_seen_u0(xhci))) { del_timer_sync(&xhci->comp_mode_recovery_timer); @@ -1604,9 +1611,12 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag struct urb_priv *urb_priv; int num_tds; - if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, - true, true, __func__) <= 0) + if (!urb) return -EINVAL; + ret = xhci_check_args(hcd, urb->dev, urb->ep, + true, true, __func__); + if (ret <= 0) + return ret ? ret : -EINVAL; slot_id = urb->dev->slot_id; ep_index = xhci_get_endpoint_index(&urb->ep->desc); @@ -3323,7 +3333,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, return -EINVAL; ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); if (ret <= 0) - return -EINVAL; + return ret ? ret : -EINVAL; if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) { xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" " descriptor for ep 0x%x does not support streams\n", diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c index 507deef1f709..04c4e3fed094 100644 --- a/drivers/usb/misc/usb251xb.c +++ b/drivers/usb/misc/usb251xb.c @@ -543,6 +543,9 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, if (of_property_read_u16_array(np, "language-id", &hub->lang_id, 1)) hub->lang_id = USB251XB_DEF_LANGUAGE_ID; + if (of_property_read_u8(np, "boost-up", &hub->boost_up)) + hub->boost_up = USB251XB_DEF_BOOST_UP; + cproperty_char = of_get_property(np, "manufacturer", NULL); strlcpy(str, cproperty_char ? : USB251XB_DEF_MANUFACTURER_STRING, sizeof(str)); @@ -584,7 +587,6 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, * may be as soon as needed. */ hub->bat_charge_en = USB251XB_DEF_BATTERY_CHARGING_ENABLE; - hub->boost_up = USB251XB_DEF_BOOST_UP; hub->boost_57 = USB251XB_DEF_BOOST_57; hub->boost_14 = USB251XB_DEF_BOOST_14; hub->port_map12 = USB251XB_DEF_PORT_MAP_12; diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 29f4b87a9e74..2798fca71261 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -81,10 +81,10 @@ #define CH341_QUIRK_SIMULATE_BREAK BIT(1) static const struct usb_device_id id_table[] = { - { USB_DEVICE(0x1a86, 0x5512) }, { USB_DEVICE(0x1a86, 0x5523) }, { USB_DEVICE(0x1a86, 0x7522) }, { USB_DEVICE(0x1a86, 0x7523) }, + { USB_DEVICE(0x2184, 0x0057) }, { USB_DEVICE(0x4348, 0x5523) }, { USB_DEVICE(0x9986, 0x7523) }, { }, diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 8a60c0d56863..a27f7efcec6a 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -51,6 +51,7 @@ static void cp210x_enable_event_mode(struct usb_serial_port *port); static void cp210x_disable_event_mode(struct usb_serial_port *port); static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x0404, 0x034C) }, /* NCR Retail IO Box */ { USB_DEVICE(0x045B, 0x0053) }, /* Renesas RX610 RX-Stick */ { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ @@ -68,6 +69,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */ { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */ { USB_DEVICE(0x0FDE, 0xCA05) }, /* OWL Wireless Electricity Monitor CM-160 */ + { USB_DEVICE(0x106F, 0x0003) }, /* CPI / Money Controls Bulk Coin Recycler */ { USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */ { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */ { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 4edebd14ef29..49c08f07c969 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -969,6 +969,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_023_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_034_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_101_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_159_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_1_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_2_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_3_PID) }, @@ -977,12 +978,14 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_6_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_7_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_8_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_235_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_257_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_1_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_2_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_3_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_4_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_313_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_320_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_324_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_1_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_2_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 755858ca20ba..d1a9564697a4 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -1506,6 +1506,9 @@ #define BRAINBOXES_VX_023_PID 0x1003 /* VX-023 ExpressCard 1 Port RS422/485 */ #define BRAINBOXES_VX_034_PID 0x1004 /* VX-034 ExpressCard 2 Port RS422/485 */ #define BRAINBOXES_US_101_PID 0x1011 /* US-101 1xRS232 */ +#define BRAINBOXES_US_159_PID 0x1021 /* US-159 1xRS232 */ +#define BRAINBOXES_US_235_PID 0x1017 /* US-235 1xRS232 */ +#define BRAINBOXES_US_320_PID 0x1019 /* US-320 1xRS422/485 */ #define BRAINBOXES_US_324_PID 0x1013 /* US-324 1xRS422/485 1Mbaud */ #define BRAINBOXES_US_606_1_PID 0x2001 /* US-606 6 Port RS232 Serial Port 1 and 2 */ #define BRAINBOXES_US_606_2_PID 0x2002 /* US-606 6 Port RS232 Serial Port 3 and 4 */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 42420bfc983c..e7755d9cfc61 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -198,6 +198,8 @@ static void option_instat_callback(struct urb *urb); #define DELL_PRODUCT_5821E 0x81d7 #define DELL_PRODUCT_5821E_ESIM 0x81e0 +#define DELL_PRODUCT_5829E_ESIM 0x81e4 +#define DELL_PRODUCT_5829E 0x81e6 #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da @@ -1063,6 +1065,10 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM), .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E), + .driver_info = RSVD(0) | RSVD(6) }, + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM), + .driver_info = RSVD(0) | RSVD(6) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, @@ -1273,10 +1279,16 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(2) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */ .driver_info = NCTRL(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701a, 0xff), /* Telit LE910R1 (RNDIS) */ + .driver_info = NCTRL(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff), /* Telit LE910R1 (ECM) */ + .driver_info = NCTRL(2) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */ .driver_info = NCTRL(0) | ZLP }, { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */ .driver_info = NCTRL(0) | ZLP }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x9201), /* Telit LE910R1 flashing device */ + .driver_info = NCTRL(0) | ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), .driver_info = RSVD(1) }, @@ -1649,6 +1661,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(2) }, { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */ + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1485, 0xff, 0xff, 0xff), /* ZTE MF286D */ + .driver_info = RSVD(5) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c index 6d27a5b5e3ca..7ffcda94d323 100644 --- a/drivers/usb/typec/tipd/core.c +++ b/drivers/usb/typec/tipd/core.c @@ -761,12 +761,12 @@ static int tps6598x_probe(struct i2c_client *client) ret = tps6598x_read32(tps, TPS_REG_STATUS, &status); if (ret < 0) - return ret; + goto err_clear_mask; trace_tps6598x_status(status); ret = tps6598x_read32(tps, TPS_REG_SYSTEM_CONF, &conf); if (ret < 0) - return ret; + goto err_clear_mask; /* * This fwnode has a "compatible" property, but is never populated as a @@ -855,7 +855,8 @@ err_role_put: usb_role_switch_put(tps->role_sw); err_fwnode_put: fwnode_handle_put(fwnode); - +err_clear_mask: + tps6598x_write64(tps, TPS_REG_INT_MASK1, 0); return ret; } diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index f648f1c54a0f..d0f91078600e 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -1563,11 +1563,27 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd) switch (cmd) { case VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET: + /* This mq feature check aligns with pre-existing userspace + * implementation. + * + * Without it, an untrusted driver could fake a multiqueue config + * request down to a non-mq device that may cause kernel to + * panic due to uninitialized resources for extra vqs. Even with + * a well behaving guest driver, it is not expected to allow + * changing the number of vqs on a non-mq device. + */ + if (!MLX5_FEATURE(mvdev, VIRTIO_NET_F_MQ)) + break; + read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)&mq, sizeof(mq)); if (read != sizeof(mq)) break; newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs); + if (newqps < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || + newqps > mlx5_vdpa_max_qps(mvdev->max_vqs)) + break; + if (ndev->cur_num_vqs == 2 * newqps) { status = VIRTIO_NET_OK; break; @@ -1897,11 +1913,25 @@ static u64 mlx5_vdpa_get_device_features(struct vdpa_device *vdev) return ndev->mvdev.mlx_features; } -static int verify_min_features(struct mlx5_vdpa_dev *mvdev, u64 features) +static int verify_driver_features(struct mlx5_vdpa_dev *mvdev, u64 features) { + /* Minimum features to expect */ if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM))) return -EOPNOTSUPP; + /* Double check features combination sent down by the driver. + * Fail invalid features due to absence of the depended feature. + * + * Per VIRTIO v1.1 specification, section 5.1.3.1 Feature bit + * requirements: "VIRTIO_NET_F_MQ Requires VIRTIO_NET_F_CTRL_VQ". + * By failing the invalid features sent down by untrusted drivers, + * we're assured the assumption made upon is_index_valid() and + * is_ctrl_vq_idx() will not be compromised. + */ + if ((features & (BIT_ULL(VIRTIO_NET_F_MQ) | BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) == + BIT_ULL(VIRTIO_NET_F_MQ)) + return -EINVAL; + return 0; } @@ -1977,7 +2007,7 @@ static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features) print_features(mvdev, features, true); - err = verify_min_features(mvdev, features); + err = verify_driver_features(mvdev, features); if (err) return err; diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index 9846c9de4bfa..1ea525433a5c 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -393,7 +393,7 @@ static void vdpa_get_config_unlocked(struct vdpa_device *vdev, * If it does happen we assume a legacy guest. */ if (!vdev->features_valid) - vdpa_set_features(vdev, 0, true); + vdpa_set_features_unlocked(vdev, 0); ops->get_config(vdev, offset, buf, len); } diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c index 2b1143f11d8f..0a4d93edc4c0 100644 --- a/drivers/vdpa/vdpa_user/iova_domain.c +++ b/drivers/vdpa/vdpa_user/iova_domain.c @@ -294,7 +294,7 @@ vduse_domain_alloc_iova(struct iova_domain *iovad, iova_pfn = alloc_iova_fast(iovad, iova_len, limit >> shift, true); - return iova_pfn << shift; + return (dma_addr_t)iova_pfn << shift; } static void vduse_domain_free_iova(struct iova_domain *iovad, diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c index a57e381e830b..cce101e6a940 100644 --- a/drivers/vdpa/virtio_pci/vp_vdpa.c +++ b/drivers/vdpa/virtio_pci/vp_vdpa.c @@ -533,8 +533,8 @@ static void vp_vdpa_remove(struct pci_dev *pdev) { struct vp_vdpa *vp_vdpa = pci_get_drvdata(pdev); - vdpa_unregister_device(&vp_vdpa->vdpa); vp_modern_remove(&vp_vdpa->mdev); + vdpa_unregister_device(&vp_vdpa->vdpa); } static struct pci_driver vp_vdpa_driver = { diff --git a/drivers/vhost/iotlb.c b/drivers/vhost/iotlb.c index 670d56c879e5..40b098320b2a 100644 --- a/drivers/vhost/iotlb.c +++ b/drivers/vhost/iotlb.c @@ -57,6 +57,17 @@ int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb, if (last < start) return -EFAULT; + /* If the range being mapped is [0, ULONG_MAX], split it into two entries + * otherwise its size would overflow u64. + */ + if (start == 0 && last == ULONG_MAX) { + u64 mid = last / 2; + + vhost_iotlb_add_range_ctx(iotlb, start, mid, addr, perm, opaque); + addr += mid + 1; + start = mid + 1; + } + if (iotlb->limit && iotlb->nmaps == iotlb->limit && iotlb->flags & VHOST_IOTLB_FLAG_RETIRE) { diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 28ef323882fb..792ab5f23647 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -473,6 +473,7 @@ static void vhost_tx_batch(struct vhost_net *net, goto signal_used; msghdr->msg_control = &ctl; + msghdr->msg_controllen = sizeof(ctl); err = sock->ops->sendmsg(sock, msghdr, 0); if (unlikely(err < 0)) { vq_err(&nvq->vq, "Fail to batch sending packets\n"); diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 851539807bc9..ec5249e8c32d 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -286,7 +286,7 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep) if (copy_from_user(&features, featurep, sizeof(features))) return -EFAULT; - if (vdpa_set_features(vdpa, features, false)) + if (vdpa_set_features(vdpa, features)) return -EINVAL; return 0; diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 59edb5a1ffe2..082380c03a3e 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -1170,6 +1170,11 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev, goto done; } + if (msg.size == 0) { + ret = -EINVAL; + goto done; + } + if (dev->msg_handler) ret = dev->msg_handler(dev, &msg); else @@ -1981,7 +1986,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq) return 0; } -static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event) +static int vhost_update_avail_event(struct vhost_virtqueue *vq) { if (vhost_put_avail_event(vq)) return -EFAULT; @@ -2527,7 +2532,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) return false; } } else { - r = vhost_update_avail_event(vq, vq->avail_idx); + r = vhost_update_avail_event(vq); if (r) { vq_err(vq, "Failed to update avail event index at %p: %d\n", vhost_avail_event(vq), r); diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index d6ca1c7ad513..37f0b4274113 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -629,16 +629,18 @@ err: return ret; } -static int vhost_vsock_stop(struct vhost_vsock *vsock) +static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner) { size_t i; - int ret; + int ret = 0; mutex_lock(&vsock->dev.mutex); - ret = vhost_dev_check_owner(&vsock->dev); - if (ret) - goto err; + if (check_owner) { + ret = vhost_dev_check_owner(&vsock->dev); + if (ret) + goto err; + } for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { struct vhost_virtqueue *vq = &vsock->vqs[i]; @@ -753,7 +755,12 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file) * inefficient. Room for improvement here. */ vsock_for_each_connected_socket(vhost_vsock_reset_orphans); - vhost_vsock_stop(vsock); + /* Don't check the owner, because we are in the release path, so we + * need to stop the vsock device in any case. + * vhost_vsock_stop() can not fail in this case, so we don't need to + * check the return code. + */ + vhost_vsock_stop(vsock, false); vhost_vsock_flush(vsock); vhost_dev_stop(&vsock->dev); @@ -868,7 +875,7 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl, if (start) return vhost_vsock_start(vsock); else - return vhost_vsock_stop(vsock); + return vhost_vsock_stop(vsock, true); case VHOST_GET_FEATURES: features = VHOST_VSOCK_FEATURES; if (copy_to_user(argp, &features, sizeof(features))) diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c index 8a4361e95a11..522dd81110b8 100644 --- a/drivers/video/backlight/ams369fg06.c +++ b/drivers/video/backlight/ams369fg06.c @@ -506,12 +506,11 @@ static int ams369fg06_probe(struct spi_device *spi) return 0; } -static int ams369fg06_remove(struct spi_device *spi) +static void ams369fg06_remove(struct spi_device *spi) { struct ams369fg06 *lcd = spi_get_drvdata(spi); ams369fg06_power(lcd, FB_BLANK_POWERDOWN); - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c index 33f5d80495e6..0a57033ae31d 100644 --- a/drivers/video/backlight/corgi_lcd.c +++ b/drivers/video/backlight/corgi_lcd.c @@ -542,7 +542,7 @@ static int corgi_lcd_probe(struct spi_device *spi) return 0; } -static int corgi_lcd_remove(struct spi_device *spi) +static void corgi_lcd_remove(struct spi_device *spi) { struct corgi_lcd *lcd = spi_get_drvdata(spi); @@ -550,7 +550,6 @@ static int corgi_lcd_remove(struct spi_device *spi) lcd->bl_dev->props.brightness = 0; backlight_update_status(lcd->bl_dev); corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_POWERDOWN); - return 0; } static struct spi_driver corgi_lcd_driver = { diff --git a/drivers/video/backlight/ili922x.c b/drivers/video/backlight/ili922x.c index 328aba9cddad..e7b6bd827986 100644 --- a/drivers/video/backlight/ili922x.c +++ b/drivers/video/backlight/ili922x.c @@ -526,10 +526,9 @@ static int ili922x_probe(struct spi_device *spi) return 0; } -static int ili922x_remove(struct spi_device *spi) +static void ili922x_remove(struct spi_device *spi) { ili922x_poweroff(spi); - return 0; } static struct spi_driver ili922x_driver = { diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c index 46f97d1c3d21..cc763cf15f53 100644 --- a/drivers/video/backlight/l4f00242t03.c +++ b/drivers/video/backlight/l4f00242t03.c @@ -223,12 +223,11 @@ static int l4f00242t03_probe(struct spi_device *spi) return 0; } -static int l4f00242t03_remove(struct spi_device *spi) +static void l4f00242t03_remove(struct spi_device *spi) { struct l4f00242t03_priv *priv = spi_get_drvdata(spi); l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN); - return 0; } static void l4f00242t03_shutdown(struct spi_device *spi) diff --git a/drivers/video/backlight/lms501kf03.c b/drivers/video/backlight/lms501kf03.c index f949b66dce1b..5c46df8022bf 100644 --- a/drivers/video/backlight/lms501kf03.c +++ b/drivers/video/backlight/lms501kf03.c @@ -364,12 +364,11 @@ static int lms501kf03_probe(struct spi_device *spi) return 0; } -static int lms501kf03_remove(struct spi_device *spi) +static void lms501kf03_remove(struct spi_device *spi) { struct lms501kf03 *lcd = spi_get_drvdata(spi); lms501kf03_power(lcd, FB_BLANK_POWERDOWN); - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c index 5cbf621e48bd..b6d373af6e3f 100644 --- a/drivers/video/backlight/ltv350qv.c +++ b/drivers/video/backlight/ltv350qv.c @@ -255,12 +255,11 @@ static int ltv350qv_probe(struct spi_device *spi) return 0; } -static int ltv350qv_remove(struct spi_device *spi) +static void ltv350qv_remove(struct spi_device *spi) { struct ltv350qv *lcd = spi_get_drvdata(spi); ltv350qv_power(lcd, FB_BLANK_POWERDOWN); - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c index 0de044dcafd5..fc6fbaf85594 100644 --- a/drivers/video/backlight/tdo24m.c +++ b/drivers/video/backlight/tdo24m.c @@ -397,12 +397,11 @@ static int tdo24m_probe(struct spi_device *spi) return 0; } -static int tdo24m_remove(struct spi_device *spi) +static void tdo24m_remove(struct spi_device *spi) { struct tdo24m *lcd = spi_get_drvdata(spi); tdo24m_power(lcd, FB_BLANK_POWERDOWN); - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c index 38765544345b..23d6c6bf0f54 100644 --- a/drivers/video/backlight/tosa_lcd.c +++ b/drivers/video/backlight/tosa_lcd.c @@ -232,15 +232,13 @@ err_register: return ret; } -static int tosa_lcd_remove(struct spi_device *spi) +static void tosa_lcd_remove(struct spi_device *spi) { struct tosa_lcd_data *data = spi_get_drvdata(spi); i2c_unregister_device(data->i2c); tosa_lcd_tg_off(data); - - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/video/backlight/vgg2432a4.c b/drivers/video/backlight/vgg2432a4.c index 3567b45f9ba9..bfc1913e8b55 100644 --- a/drivers/video/backlight/vgg2432a4.c +++ b/drivers/video/backlight/vgg2432a4.c @@ -233,11 +233,9 @@ static int vgg2432a4_probe(struct spi_device *spi) return 0; } -static int vgg2432a4_remove(struct spi_device *spi) +static void vgg2432a4_remove(struct spi_device *spi) { ili9320_remove(spi_get_drvdata(spi)); - - return 0; } static void vgg2432a4_shutdown(struct spi_device *spi) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index f36829eeb5a9..2fc1b80a26ad 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -1025,7 +1025,7 @@ static void fbcon_init(struct vc_data *vc, int init) struct vc_data *svc = *default_mode; struct fbcon_display *t, *p = &fb_display[vc->vc_num]; int logo = 1, new_rows, new_cols, rows, cols; - int cap, ret; + int ret; if (WARN_ON(info_idx == -1)) return; @@ -1034,7 +1034,6 @@ static void fbcon_init(struct vc_data *vc, int init) con2fb_map[vc->vc_num] = info_idx; info = registered_fb[con2fb_map[vc->vc_num]]; - cap = info->flags; if (logo_shown < 0 && console_loglevel <= CONSOLE_LOGLEVEL_QUIET) logo_shown = FBCON_LOGO_DONTSHOW; @@ -1137,8 +1136,8 @@ static void fbcon_init(struct vc_data *vc, int init) ops->graphics = 0; #ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION - if ((cap & FBINFO_HWACCEL_COPYAREA) && - !(cap & FBINFO_HWACCEL_DISABLED)) + if ((info->flags & FBINFO_HWACCEL_COPYAREA) && + !(info->flags & FBINFO_HWACCEL_DISABLED)) p->scrollmode = SCROLL_MOVE; else /* default to something safe */ p->scrollmode = SCROLL_REDRAW; diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 0fa7ede94fa6..13083ad8d751 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1160,6 +1160,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, ret = fbcon_set_con2fb_map_ioctl(argp); break; case FBIOBLANK: + if (arg > FB_BLANK_POWERDOWN) + return -EINVAL; console_lock(); lock_fb_info(info); ret = fb_blank(info, arg); diff --git a/drivers/video/fbdev/omap/lcd_mipid.c b/drivers/video/fbdev/omap/lcd_mipid.c index a75ae0c9b14c..03cff39d392d 100644 --- a/drivers/video/fbdev/omap/lcd_mipid.c +++ b/drivers/video/fbdev/omap/lcd_mipid.c @@ -570,14 +570,12 @@ static int mipid_spi_probe(struct spi_device *spi) return 0; } -static int mipid_spi_remove(struct spi_device *spi) +static void mipid_spi_remove(struct spi_device *spi) { struct mipid_device *md = dev_get_drvdata(&spi->dev); mipid_disable(&md->panel); kfree(md); - - return 0; } static struct spi_driver mipid_spi_driver = { diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c index 1bec7a4422e8..aab67721263d 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c @@ -316,7 +316,7 @@ err_gpio: return r; } -static int lb035q02_panel_spi_remove(struct spi_device *spi) +static void lb035q02_panel_spi_remove(struct spi_device *spi) { struct panel_drv_data *ddata = spi_get_drvdata(spi); struct omap_dss_device *dssdev = &ddata->dssdev; @@ -328,8 +328,6 @@ static int lb035q02_panel_spi_remove(struct spi_device *spi) lb035q02_disconnect(dssdev); omap_dss_put_device(in); - - return 0; } static const struct of_device_id lb035q02_of_match[] = { diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c index dff9ebbadfc0..be9910ff6e62 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c @@ -327,7 +327,7 @@ err_gpio: return r; } -static int nec_8048_remove(struct spi_device *spi) +static void nec_8048_remove(struct spi_device *spi) { struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); struct omap_dss_device *dssdev = &ddata->dssdev; @@ -341,8 +341,6 @@ static int nec_8048_remove(struct spi_device *spi) nec_8048_disconnect(dssdev); omap_dss_put_device(in); - - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c index 8d8b5ff7d43c..a909b5385ca5 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c @@ -857,7 +857,7 @@ err_gpio: return r; } -static int acx565akm_remove(struct spi_device *spi) +static void acx565akm_remove(struct spi_device *spi) { struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); struct omap_dss_device *dssdev = &ddata->dssdev; @@ -874,8 +874,6 @@ static int acx565akm_remove(struct spi_device *spi) acx565akm_disconnect(dssdev); omap_dss_put_device(in); - - return 0; } static const struct of_device_id acx565akm_of_match[] = { diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c index 595ebd8bd5dc..3c0f887d3092 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c @@ -425,7 +425,7 @@ err_reg: return r; } -static int td028ttec1_panel_remove(struct spi_device *spi) +static void td028ttec1_panel_remove(struct spi_device *spi) { struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); struct omap_dss_device *dssdev = &ddata->dssdev; @@ -439,8 +439,6 @@ static int td028ttec1_panel_remove(struct spi_device *spi) td028ttec1_panel_disconnect(dssdev); omap_dss_put_device(in); - - return 0; } static const struct of_device_id td028ttec1_of_match[] = { diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c index afac1d9445aa..58bbba7c037f 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c @@ -564,7 +564,7 @@ err_regulator: return r; } -static int tpo_td043_remove(struct spi_device *spi) +static void tpo_td043_remove(struct spi_device *spi) { struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); struct omap_dss_device *dssdev = &ddata->dssdev; @@ -580,8 +580,6 @@ static int tpo_td043_remove(struct spi_device *spi) omap_dss_put_device(in); sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group); - - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 34f80b7a8a64..492fc26f0b65 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -105,7 +105,6 @@ config VIRTIO_BALLOON config VIRTIO_MEM tristate "Virtio mem driver" - default m depends on X86_64 depends on VIRTIO depends on MEMORY_HOTPLUG diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 00ac9db792a4..22f15f444f75 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -166,14 +166,13 @@ void virtio_add_status(struct virtio_device *dev, unsigned int status) } EXPORT_SYMBOL_GPL(virtio_add_status); -int virtio_finalize_features(struct virtio_device *dev) +/* Do some validation, then set FEATURES_OK */ +static int virtio_features_ok(struct virtio_device *dev) { - int ret = dev->config->finalize_features(dev); unsigned status; + int ret; might_sleep(); - if (ret) - return ret; ret = arch_has_restricted_virtio_memory_access(); if (ret) { @@ -202,8 +201,23 @@ int virtio_finalize_features(struct virtio_device *dev) } return 0; } -EXPORT_SYMBOL_GPL(virtio_finalize_features); +/** + * virtio_reset_device - quiesce device for removal + * @dev: the device to reset + * + * Prevents device from sending interrupts and accessing memory. + * + * Generally used for cleanup during driver / device removal. + * + * Once this has been invoked, caller must ensure that + * virtqueue_notify / virtqueue_kick are not in progress. + * + * Note: this guarantees that vq callbacks are not in progress, however caller + * is responsible for preventing access from other contexts, such as a system + * call/workqueue/bh. Invoking virtio_break_device then flushing any such + * contexts is one way to handle that. + * */ void virtio_reset_device(struct virtio_device *dev) { dev->config->reset(dev); @@ -245,17 +259,6 @@ static int virtio_dev_probe(struct device *_d) driver_features_legacy = driver_features; } - /* - * Some devices detect legacy solely via F_VERSION_1. Write - * F_VERSION_1 to force LE config space accesses before FEATURES_OK for - * these when needed. - */ - if (drv->validate && !virtio_legacy_is_little_endian() - && device_features & BIT_ULL(VIRTIO_F_VERSION_1)) { - dev->features = BIT_ULL(VIRTIO_F_VERSION_1); - dev->config->finalize_features(dev); - } - if (device_features & (1ULL << VIRTIO_F_VERSION_1)) dev->features = driver_features & device_features; else @@ -266,13 +269,26 @@ static int virtio_dev_probe(struct device *_d) if (device_features & (1ULL << i)) __virtio_set_bit(dev, i); + err = dev->config->finalize_features(dev); + if (err) + goto err; + if (drv->validate) { + u64 features = dev->features; + err = drv->validate(dev); if (err) goto err; + + /* Did validation change any features? Then write them again. */ + if (features != dev->features) { + err = dev->config->finalize_features(dev); + if (err) + goto err; + } } - err = virtio_finalize_features(dev); + err = virtio_features_ok(dev); if (err) goto err; @@ -496,7 +512,11 @@ int virtio_device_restore(struct virtio_device *dev) /* We have a driver! */ virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER); - ret = virtio_finalize_features(dev); + ret = dev->config->finalize_features(dev); + if (ret) + goto err; + + ret = virtio_features_ok(dev); if (ret) goto err; diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c index 7767a7f0119b..76504559bc25 100644 --- a/drivers/virtio/virtio_vdpa.c +++ b/drivers/virtio/virtio_vdpa.c @@ -317,7 +317,7 @@ static int virtio_vdpa_finalize_features(struct virtio_device *vdev) /* Give virtio_ring a chance to accept features. */ vring_transport_features(vdev); - return vdpa_set_features(vdpa, vdev->features, false); + return vdpa_set_features(vdpa, vdev->features); } static const char *virtio_vdpa_bus_name(struct virtio_device *vdev) diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c index 3fa40c723e8e..edb0acd0b832 100644 --- a/drivers/xen/gntalloc.c +++ b/drivers/xen/gntalloc.c @@ -169,20 +169,14 @@ undo: __del_gref(gref); } - /* It's possible for the target domain to map the just-allocated grant - * references by blindly guessing their IDs; if this is done, then - * __del_gref will leave them in the queue_gref list. They need to be - * added to the global list so that we can free them when they are no - * longer referenced. - */ - if (unlikely(!list_empty(&queue_gref))) - list_splice_tail(&queue_gref, &gref_list); mutex_unlock(&gref_mutex); return rc; } static void __del_gref(struct gntalloc_gref *gref) { + unsigned long addr; + if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { uint8_t *tmp = kmap(gref->page); tmp[gref->notify.pgoff] = 0; @@ -196,21 +190,16 @@ static void __del_gref(struct gntalloc_gref *gref) gref->notify.flags = 0; if (gref->gref_id) { - if (gnttab_query_foreign_access(gref->gref_id)) - return; - - if (!gnttab_end_foreign_access_ref(gref->gref_id, 0)) - return; - - gnttab_free_grant_reference(gref->gref_id); + if (gref->page) { + addr = (unsigned long)page_to_virt(gref->page); + gnttab_end_foreign_access(gref->gref_id, 0, addr); + } else + gnttab_free_grant_reference(gref->gref_id); } gref_size--; list_del(&gref->next_gref); - if (gref->page) - __free_page(gref->page); - kfree(gref); } diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 3729bea0c989..5c83d41766c8 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -134,12 +134,9 @@ struct gnttab_ops { */ unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref); /* - * Query the status of a grant entry. Ref parameter is reference of - * queried grant entry, return value is the status of queried entry. - * Detailed status(writing/reading) can be gotten from the return value - * by bit operations. + * Read the frame number related to a given grant reference. */ - int (*query_foreign_access)(grant_ref_t ref); + unsigned long (*read_frame)(grant_ref_t ref); }; struct unmap_refs_callback_data { @@ -284,22 +281,6 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); -static int gnttab_query_foreign_access_v1(grant_ref_t ref) -{ - return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing); -} - -static int gnttab_query_foreign_access_v2(grant_ref_t ref) -{ - return grstatus[ref] & (GTF_reading|GTF_writing); -} - -int gnttab_query_foreign_access(grant_ref_t ref) -{ - return gnttab_interface->query_foreign_access(ref); -} -EXPORT_SYMBOL_GPL(gnttab_query_foreign_access); - static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly) { u16 flags, nflags; @@ -353,6 +334,16 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); +static unsigned long gnttab_read_frame_v1(grant_ref_t ref) +{ + return gnttab_shared.v1[ref].frame; +} + +static unsigned long gnttab_read_frame_v2(grant_ref_t ref) +{ + return gnttab_shared.v2[ref].full_page.frame; +} + struct deferred_entry { struct list_head list; grant_ref_t ref; @@ -382,12 +373,9 @@ static void gnttab_handle_deferred(struct timer_list *unused) spin_unlock_irqrestore(&gnttab_list_lock, flags); if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) { put_free_entry(entry->ref); - if (entry->page) { - pr_debug("freeing g.e. %#x (pfn %#lx)\n", - entry->ref, page_to_pfn(entry->page)); - put_page(entry->page); - } else - pr_info("freeing g.e. %#x\n", entry->ref); + pr_debug("freeing g.e. %#x (pfn %#lx)\n", + entry->ref, page_to_pfn(entry->page)); + put_page(entry->page); kfree(entry); entry = NULL; } else { @@ -412,9 +400,18 @@ static void gnttab_handle_deferred(struct timer_list *unused) static void gnttab_add_deferred(grant_ref_t ref, bool readonly, struct page *page) { - struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC); + struct deferred_entry *entry; + gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; const char *what = KERN_WARNING "leaking"; + entry = kmalloc(sizeof(*entry), gfp); + if (!page) { + unsigned long gfn = gnttab_interface->read_frame(ref); + + page = pfn_to_page(gfn_to_pfn(gfn)); + get_page(page); + } + if (entry) { unsigned long flags; @@ -435,11 +432,21 @@ static void gnttab_add_deferred(grant_ref_t ref, bool readonly, what, ref, page ? page_to_pfn(page) : -1); } +int gnttab_try_end_foreign_access(grant_ref_t ref) +{ + int ret = _gnttab_end_foreign_access_ref(ref, 0); + + if (ret) + put_free_entry(ref); + + return ret; +} +EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access); + void gnttab_end_foreign_access(grant_ref_t ref, int readonly, unsigned long page) { - if (gnttab_end_foreign_access_ref(ref, readonly)) { - put_free_entry(ref); + if (gnttab_try_end_foreign_access(ref)) { if (page != 0) put_page(virt_to_page(page)); } else @@ -1417,7 +1424,7 @@ static const struct gnttab_ops gnttab_v1_ops = { .update_entry = gnttab_update_entry_v1, .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1, .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1, - .query_foreign_access = gnttab_query_foreign_access_v1, + .read_frame = gnttab_read_frame_v1, }; static const struct gnttab_ops gnttab_v2_ops = { @@ -1429,7 +1436,7 @@ static const struct gnttab_ops gnttab_v2_ops = { .update_entry = gnttab_update_entry_v2, .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2, .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2, - .query_foreign_access = gnttab_query_foreign_access_v2, + .read_frame = gnttab_read_frame_v2, }; static bool gnttab_need_v2(void) diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c index 2c890f4f2cbc..72d4e3f193af 100644 --- a/drivers/xen/pci.c +++ b/drivers/xen/pci.c @@ -264,7 +264,7 @@ struct xen_device_domain_owner { }; static DEFINE_SPINLOCK(dev_domain_list_spinlock); -static struct list_head dev_domain_list = LIST_HEAD_INIT(dev_domain_list); +static LIST_HEAD(dev_domain_list); static struct xen_device_domain_owner *find_device(struct pci_dev *dev) { diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c index 3c9ae156b597..0ca351f30a6d 100644 --- a/drivers/xen/pvcalls-front.c +++ b/drivers/xen/pvcalls-front.c @@ -337,8 +337,8 @@ static void free_active_ring(struct sock_mapping *map) if (!map->active.ring) return; - free_pages((unsigned long)map->active.data.in, - map->active.ring->ring_order); + free_pages_exact(map->active.data.in, + PAGE_SIZE << map->active.ring->ring_order); free_page((unsigned long)map->active.ring); } @@ -352,8 +352,8 @@ static int alloc_active_ring(struct sock_mapping *map) goto out; map->active.ring->ring_order = PVCALLS_RING_ORDER; - bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - PVCALLS_RING_ORDER); + bytes = alloc_pages_exact(PAGE_SIZE << PVCALLS_RING_ORDER, + GFP_KERNEL | __GFP_ZERO); if (!bytes) goto out; diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index e8bed1cb76ba..df6890681231 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -379,7 +379,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, unsigned int nr_pages, grant_ref_t *grefs) { int err; - int i, j; + unsigned int i; + grant_ref_t gref_head; + + err = gnttab_alloc_grant_references(nr_pages, &gref_head); + if (err) { + xenbus_dev_fatal(dev, err, "granting access to ring page"); + return err; + } for (i = 0; i < nr_pages; i++) { unsigned long gfn; @@ -389,23 +396,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, else gfn = virt_to_gfn(vaddr); - err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0); - if (err < 0) { - xenbus_dev_fatal(dev, err, - "granting access to ring page"); - goto fail; - } - grefs[i] = err; + grefs[i] = gnttab_claim_grant_reference(&gref_head); + gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id, + gfn, 0); vaddr = vaddr + XEN_PAGE_SIZE; } return 0; - -fail: - for (j = 0; j < i; j++) - gnttab_end_foreign_access_ref(grefs[j], 0); - return err; } EXPORT_SYMBOL_GPL(xenbus_grant_ring); |